From 49328bb25e7ffdf94ae045b0adb3cf759f577526 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 15 Sep 2020 12:28:28 -0400 Subject: [PATCH 001/225] codebase2 wip; temporarily deleted old packages to improve hls startup --- .travis.yml | 4 +- codebase1/codebase/Unison/Codebase/V1/ABT.hs | 705 ++++ .../Unison/Codebase/V1/Branch/NameSegment.hs | 13 + .../codebase/Unison/Codebase/V1/Branch/Raw.hs | 34 + .../codebase/Unison/Codebase/V1/Causal/Raw.hs | 26 + .../Unison/Codebase/V1}/ConstructorType.hs | 4 +- .../Unison/Codebase/V1/DataDeclaration.hs | 72 + .../Unison/Codebase/V1/FileCodebase.hs | 242 ++ .../Unison/Codebase/V1}/LabeledDependency.hs | 14 +- .../codebase/Unison/Codebase/V1/Patch.hs | 60 + .../Unison/Codebase/V1/Patch}/TermEdit.hs | 23 +- .../Unison/Codebase/V1/Patch/TypeEdit.hs | 14 + .../codebase/Unison/Codebase/V1/Reference.hs | 61 + .../codebase/Unison/Codebase/V1/Referent.hs | 54 + .../V1/Serialization}/Serialization.hs | 2 +- .../Unison/Codebase/V1/Serialization/V1.hs | 377 +++ .../codebase/Unison/Codebase/V1}/Star3.hs | 12 +- .../codebase/Unison/Codebase/V1/Symbol.hs | 13 + codebase1/codebase/Unison/Codebase/V1/Term.hs | 167 + .../Unison/Codebase/V1/Term/Pattern.hs | 102 + codebase1/codebase/Unison/Codebase/V1/Type.hs | 55 + .../codebase/Unison/Codebase/V1/Type/Kind.hs | 3 + codebase1/codebase/unison-codebase1.cabal | 59 + codebase2/CHANGELOG.md | 5 + codebase2/LICENSE | 20 + .../U/Codebase/Sqlite/Branch/Diff.hs | 23 + .../U/Codebase/Sqlite/Branch/Format.hs | 8 + .../U/Codebase/Sqlite/Branch/Full.hs | 16 + .../U/Codebase/Sqlite/Branch/MetadataSet.hs | 6 + .../U/Codebase/Sqlite/Causal.hs | 7 + .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 23 + .../U/Codebase/Sqlite/Decl/Format.hs | 32 + .../U/Codebase/Sqlite/LocalIds.hs | 11 + .../U/Codebase/Sqlite/Patch/Diff.hs | 16 + .../U/Codebase/Sqlite/Patch/Format.hs | 1 + .../U/Codebase/Sqlite/Patch/Full.hs | 11 + .../U/Codebase/Sqlite/Patch/TermEdit.hs | 12 + .../U/Codebase/Sqlite/Patch/TypeEdit.hs | 6 + .../U/Codebase/Sqlite/Queries.hs | 9 + .../U/Codebase/Sqlite/Reference.hs | 13 + .../U/Codebase/Sqlite/Referent.hs | 11 + .../U/Codebase/Sqlite/Serialization.hs | 303 ++ .../U/Codebase/Sqlite/Term/Format.hs | 36 + .../U/Codebase/Sqlite/Types.hs | 8 + .../codebase-sqlite/sql/create-index.sql | 122 + codebase2/codebase-sqlite/sql/create.sql | 104 + .../unison-codebase-sqlite.cabal | 56 + codebase2/codebase/U/Codebase/Branch.hs | 29 + codebase2/codebase/U/Codebase/Causal.hs | 11 + codebase2/codebase/U/Codebase/Codebase.hs | 81 + codebase2/codebase/U/Codebase/Decl.hs | 27 + .../codebase/U/Codebase}/Kind.hs | 9 +- codebase2/codebase/U/Codebase/Reference.hs | 31 + codebase2/codebase/U/Codebase/Referent.hs | 33 + codebase2/codebase/U/Codebase/Reflog.hs | 21 + codebase2/codebase/U/Codebase/ShortHash.hs | 15 + codebase2/codebase/U/Codebase/Term.hs | 131 + codebase2/codebase/U/Codebase/TermEdit.hs | 24 + codebase2/codebase/U/Codebase/Type.hs | 49 + .../codebase/U}/Codebase/TypeEdit.hs | 8 +- codebase2/codebase/U/Codebase/WatchKind.hs | 3 + codebase2/codebase/unison-codebase.cabal | 41 + codebase2/core/U/Core/ABT.hs | 53 + codebase2/core/unison-core.cabal | 31 + codebase2/editor/U/Editor/Codebase.hs | 57 + codebase2/editor/unison-editor.cabal | 27 + .../language/U/Language}/Blank.hs | 5 +- codebase2/language/unison-language.cabal | 27 + codebase2/notes.txt | 3 + codebase2/runtime/U/Runtime/CodeLookup.hs | 1 + codebase2/runtime/unison-runtime.cabal | 28 + codebase2/syntax/unison-syntax.cabal | 25 + .../U/Util/Serialization.hs | 191 ++ .../unison-util-serialization.cabal | 33 + codebase2/util/U/Util/Base32Hex.hs | 63 + codebase2/util/U/Util/Hash.hs | 59 + .../util/U/Util}/Hashable.hs | 8 +- .../util/U}/Util/Relation.hs | 12 +- codebase2/util/unison-util.cabal | 33 + hie.yaml | 66 +- parser-typechecker/LICENSE | 19 - parser-typechecker/benchmarks/runtime/Main.hs | 286 -- parser-typechecker/prettyprintdemo/Main.hs | 68 - parser-typechecker/src/Unison/Builtin.hs | 518 --- .../src/Unison/Builtin/Decls.hs | 310 -- parser-typechecker/src/Unison/Codebase.hs | 294 -- .../src/Unison/Codebase/Branch.hs | 900 ----- .../Unison/Codebase/Branch/Dependencies.hs | 89 - .../src/Unison/Codebase/BranchDiff.hs | 166 - .../src/Unison/Codebase/BranchUtil.hs | 135 - .../src/Unison/Codebase/Causal.hs | 373 --- .../src/Unison/Codebase/Classes.hs | 40 - .../src/Unison/Codebase/CodeLookup.hs | 57 - .../src/Unison/Codebase/Editor/AuthorInfo.hs | 59 - .../src/Unison/Codebase/Editor/Command.hs | 191 -- .../Unison/Codebase/Editor/DisplayThing.hs | 12 - .../src/Unison/Codebase/Editor/Git.hs | 249 -- .../Unison/Codebase/Editor/HandleCommand.hs | 272 -- .../src/Unison/Codebase/Editor/HandleInput.hs | 2898 ----------------- .../src/Unison/Codebase/Editor/Input.hs | 144 - .../src/Unison/Codebase/Editor/Output.hs | 359 -- .../Codebase/Editor/Output/BranchDiff.hs | 338 -- .../src/Unison/Codebase/Editor/Propagate.hs | 522 --- .../src/Unison/Codebase/Editor/RemoteRepo.hs | 32 - .../Unison/Codebase/Editor/SearchResult'.hs | 52 - .../Unison/Codebase/Editor/SlurpComponent.hs | 87 - .../src/Unison/Codebase/Editor/SlurpResult.hs | 391 --- .../src/Unison/Codebase/Editor/TodoOutput.hs | 63 - .../src/Unison/Codebase/Editor/UriParser.hs | 166 - .../Unison/Codebase/Editor/VersionParser.hs | 27 - .../src/Unison/Codebase/Execute.hs | 56 - .../src/Unison/Codebase/FileCodebase.hs | 282 -- .../Unison/Codebase/FileCodebase/Common.hs | 590 ---- .../FileCodebase/SlimCopyRegenerateIndex.hs | 321 -- .../src/Unison/Codebase/GitError.hs | 23 - .../src/Unison/Codebase/MainTerm.hs | 72 - .../src/Unison/Codebase/Metadata.hs | 72 - .../src/Unison/Codebase/NameEdit.hs | 15 - .../src/Unison/Codebase/Patch.hs | 136 - .../src/Unison/Codebase/Path.hs | 440 --- .../src/Unison/Codebase/Reflog.hs | 30 - .../src/Unison/Codebase/Runtime.hs | 132 - .../src/Unison/Codebase/SearchResult.hs | 83 - .../src/Unison/Codebase/Serialization/PutT.hs | 57 - .../src/Unison/Codebase/Serialization/V1.hs | 811 ----- .../src/Unison/Codebase/ShortBranchHash.hs | 35 - .../src/Unison/Codebase/SyncMode.hs | 3 - .../src/Unison/Codebase/TranscriptParser.hs | 427 --- .../src/Unison/Codebase/Watch.hs | 151 - parser-typechecker/src/Unison/Codecs.hs | 340 -- parser-typechecker/src/Unison/CommandLine.hs | 221 -- .../src/Unison/CommandLine/DisplayValues.hs | 96 - .../src/Unison/CommandLine/InputPattern.hs | 92 - .../src/Unison/CommandLine/InputPatterns.hs | 1549 --------- .../src/Unison/CommandLine/Main.hs | 258 -- .../src/Unison/CommandLine/OutputMessages.hs | 1977 ----------- parser-typechecker/src/Unison/DeclPrinter.hs | 187 -- parser-typechecker/src/Unison/FileParser.hs | 289 -- parser-typechecker/src/Unison/FileParsers.hs | 196 -- parser-typechecker/src/Unison/Lexer.hs | 777 ----- parser-typechecker/src/Unison/NamePrinter.hs | 81 - parser-typechecker/src/Unison/Parser.hs | 443 --- parser-typechecker/src/Unison/Parsers.hs | 88 - parser-typechecker/src/Unison/Path.hs | 54 - .../src/Unison/PrettyPrintEnv.hs | 142 - .../src/Unison/PrettyTerminal.hs | 51 - parser-typechecker/src/Unison/PrintError.hs | 1243 ------- parser-typechecker/src/Unison/Result.hs | 90 - parser-typechecker/src/Unison/Runtime/ANF.hs | 1408 -------- .../src/Unison/Runtime/Builtin.hs | 1200 ------- .../src/Unison/Runtime/Debug.hs | 52 - .../src/Unison/Runtime/Decompile.hs | 120 - .../src/Unison/Runtime/Foreign.hs | 205 -- .../src/Unison/Runtime/IOSource.hs | 559 ---- parser-typechecker/src/Unison/Runtime/IR.hs | 1196 ------- .../src/Unison/Runtime/Interface.hs | 225 -- .../src/Unison/Runtime/MCode.hs | 1401 -------- .../src/Unison/Runtime/Machine.hs | 1330 -------- .../src/Unison/Runtime/Pattern.hs | 664 ---- parser-typechecker/src/Unison/Runtime/Rt1.hs | 868 ----- .../src/Unison/Runtime/Rt1IO.hs | 529 --- .../src/Unison/Runtime/SparseVector.hs | 128 - .../src/Unison/Runtime/Stack.hs | 642 ---- .../src/Unison/Runtime/Vector.hs | 54 - .../src/Unison/Runtime/docs.markdown | 240 -- parser-typechecker/src/Unison/TermParser.hs | 901 ----- parser-typechecker/src/Unison/TermPrinter.hs | 1029 ------ parser-typechecker/src/Unison/TypeParser.hs | 115 - parser-typechecker/src/Unison/TypePrinter.hs | 185 -- parser-typechecker/src/Unison/Typechecker.hs | 351 -- .../src/Unison/Typechecker/Components.hs | 88 - .../src/Unison/Typechecker/Context.hs | 1801 ---------- .../src/Unison/Typechecker/Extractor.hs | 343 -- .../src/Unison/Typechecker/TypeError.hs | 298 -- .../src/Unison/Typechecker/TypeLookup.hs | 66 - .../src/Unison/Typechecker/TypeVar.hs | 53 - parser-typechecker/src/Unison/UnisonFile.hs | 369 --- .../src/Unison/Util/AnnotatedText.hs | 199 -- parser-typechecker/src/Unison/Util/Bytes.hs | 96 - parser-typechecker/src/Unison/Util/Cache.hs | 90 - .../src/Unison/Util/ColorText.hs | 129 - .../src/Unison/Util/CycleTable.hs | 39 - .../src/Unison/Util/CyclicEq.hs | 60 - .../src/Unison/Util/CyclicOrd.hs | 54 - .../src/Unison/Util/EnumContainers.hs | 115 - .../src/Unison/Util/Exception.hs | 16 - parser-typechecker/src/Unison/Util/Find.hs | 188 -- parser-typechecker/src/Unison/Util/Free.hs | 68 - parser-typechecker/src/Unison/Util/Less.hs | 25 - parser-typechecker/src/Unison/Util/Logger.hs | 109 - parser-typechecker/src/Unison/Util/Map.hs | 16 - parser-typechecker/src/Unison/Util/Menu.hs | 286 -- .../src/Unison/Util/PinBoard.hs | 143 - parser-typechecker/src/Unison/Util/Pretty.hs | 903 ----- parser-typechecker/src/Unison/Util/Range.hs | 27 - .../src/Unison/Util/SyntaxText.hs | 62 - parser-typechecker/src/Unison/Util/TQueue.hs | 89 - parser-typechecker/src/Unison/Util/Timing.hs | 41 - .../src/Unison/Util/TransitiveClosure.hs | 31 - parser-typechecker/tests/Suite.hs | 87 - .../tests/Unison/Core/Test/Name.hs | 29 - parser-typechecker/tests/Unison/Test/ABT.hs | 44 - parser-typechecker/tests/Unison/Test/ANF.hs | 199 -- parser-typechecker/tests/Unison/Test/Cache.hs | 80 - .../tests/Unison/Test/Codebase.hs | 40 - .../tests/Unison/Test/Codebase/Causal.hs | 318 -- .../Unison/Test/Codebase/FileCodebase.hs | 48 - .../tests/Unison/Test/Codebase/Path.hs | 66 - .../tests/Unison/Test/ColorText.hs | 84 - .../tests/Unison/Test/Common.hs | 75 - .../tests/Unison/Test/DataDeclaration.hs | 121 - .../tests/Unison/Test/FileParser.hs | 136 - parser-typechecker/tests/Unison/Test/Git.hs | 523 --- parser-typechecker/tests/Unison/Test/IO.hs | 114 - parser-typechecker/tests/Unison/Test/Lexer.hs | 207 -- parser-typechecker/tests/Unison/Test/MCode.hs | 111 - parser-typechecker/tests/Unison/Test/Range.hs | 33 - .../tests/Unison/Test/Referent.hs | 82 - parser-typechecker/tests/Unison/Test/Term.hs | 53 - .../tests/Unison/Test/TermParser.hs | 227 -- .../tests/Unison/Test/TermPrinter.hs | 586 ---- parser-typechecker/tests/Unison/Test/Type.hs | 33 - .../tests/Unison/Test/TypePrinter.hs | 170 - .../tests/Unison/Test/Typechecker.hs | 33 - .../Unison/Test/Typechecker/Components.hs | 35 - .../tests/Unison/Test/Typechecker/Context.hs | 41 - .../Unison/Test/Typechecker/TypeError.hs | 57 - .../tests/Unison/Test/UnisonSources.hs | 195 -- .../tests/Unison/Test/UriParser.hs | 84 - .../tests/Unison/Test/Util/Bytes.hs | 62 - .../tests/Unison/Test/Util/PinBoard.hs | 52 - .../tests/Unison/Test/Util/Pretty.hs | 33 - parser-typechecker/tests/Unison/Test/Var.hs | 23 - .../tests/Unison/Test/VersionParser.hs | 26 - parser-typechecker/transcripts/Transcripts.hs | 87 - .../unison-parser-typechecker.cabal | 382 --- parser-typechecker/unison/Main.hs | 317 -- parser-typechecker/unison/System/Path.hs | 106 - parser-typechecker/unison/Version.hs | 16 - stack.yaml | 20 +- unison-core/LICENSE | 19 - unison-core/src/Unison/ABT.hs | 715 ---- unison-core/src/Unison/ABT/Normalized.hs | 134 - unison-core/src/Unison/DataDeclaration.hs | 413 --- unison-core/src/Unison/Hash.hs | 109 - unison-core/src/Unison/HashQualified'.hs | 126 - unison-core/src/Unison/HashQualified.hs | 169 - unison-core/src/Unison/Name.hs | 173 - unison-core/src/Unison/NameSegment.hs | 41 - unison-core/src/Unison/Names2.hs | 334 -- unison-core/src/Unison/Names3.hs | 240 -- unison-core/src/Unison/Paths.hs | 204 -- unison-core/src/Unison/Pattern.hs | 165 - unison-core/src/Unison/PatternCompat.hs | 30 - unison-core/src/Unison/Prelude.hs | 62 - unison-core/src/Unison/Reference.hs | 179 - unison-core/src/Unison/Reference/Util.hs | 22 - unison-core/src/Unison/Referent.hs | 128 - unison-core/src/Unison/Settings.hs | 18 - unison-core/src/Unison/ShortHash.hs | 92 - unison-core/src/Unison/Symbol.hs | 35 - unison-core/src/Unison/Term.hs | 1123 ------- unison-core/src/Unison/Type.hs | 645 ---- unison-core/src/Unison/Util/Components.hs | 48 - unison-core/src/Unison/Util/List.hs | 65 - unison-core/src/Unison/Util/Monoid.hs | 27 - unison-core/src/Unison/Util/Relation3.hs | 120 - unison-core/src/Unison/Util/Relation4.hs | 122 - unison-core/src/Unison/Util/Set.hs | 10 - unison-core/src/Unison/Var.hs | 168 - unison-core/unison-core.cabal | 117 - unison-src/Base.u | 444 --- unison-src/Cofree.u | 20 - unison-src/EasyTest.u | 263 -- unison-src/Trie.u | 39 - unison-src/WeightedSearch.u | 69 - unison-src/base58.u | 60 - unison-src/basics.u | 72 - unison-src/demo/1.u | 6 - unison-src/demo/2.u | 46 - unison-src/demo/3.u | 115 - unison-src/errors/407.u | 8 - unison-src/errors/X-array.u | 6 - .../abort-ability-checks-against-pure.u | 9 - unison-src/errors/all-errors.u | 43 - unison-src/errors/check-for-regressions/and.u | 7 - .../check-for-regressions/app-polymorphic.u | 4 - unison-src/errors/check-for-regressions/app.u | 4 - .../applying-non-function.u | 4 - .../errors/check-for-regressions/casebody.u | 3 - .../errors/check-for-regressions/caseguard.u | 2 - .../check-for-regressions/casepattern.u | 3 - .../errors/check-for-regressions/ifcond.u | 1 - .../errors/check-for-regressions/ifelse.u | 1 - .../errors/check-for-regressions/lens.u | 9 - .../errors/check-for-regressions/not-and.u | 14 - .../errors/check-for-regressions/not-and0.u | 9 - .../check-for-regressions/not-caseguard.u | 4 - .../check-for-regressions/not-caseguard2.u | 2 - .../errors/check-for-regressions/not-or.u | 1 - .../errors/check-for-regressions/not-vector.u | 1 - unison-src/errors/check-for-regressions/or.u | 1 - .../errors/check-for-regressions/vector.u | 1 - unison-src/errors/compiler-bug.u | 5 - unison-src/errors/console.u | 19 - unison-src/errors/console2.u | 29 - unison-src/errors/cyclic-unguarded.u | 8 - unison-src/errors/effect-inference1.u | 12 - unison-src/errors/effect_unknown_type.uu | 20 - unison-src/errors/empty-block.u | 1 - unison-src/errors/ex1.u | 49 - unison-src/errors/fix745.u | 19 - unison-src/errors/handle-inference.u | 22 - .../errors/handler-coverage-checking.uu | 29 - unison-src/errors/id.u | 3 - unison-src/errors/io-effect.u | 9 - unison-src/errors/io-state1.u | 17 - unison-src/errors/map-reduce.u | 102 - unison-src/errors/map-traverse3.u | 26 - unison-src/errors/mismatched-braces.u | 4 - unison-src/errors/need-nominal-type.uu | 7 - ...ity-check-fail-by-calling-wrong-function.u | 27 - .../errors/poor-error-message/consoleh.u | 57 - .../doesnt-match-annotation.u | 5 - .../poor-error-message/function-calls.u | 11 - .../poor-error-message/function-calls1.u | 16 - .../poor-error-message/function-calls2.u | 19 - .../poor-error-message/function-calls3.u | 26 - unison-src/errors/poor-error-message/handle.u | 40 - .../errors/poor-error-message/handler-ex.u | 24 - .../mismatched-case-result-types.u | 20 - .../errors/poor-error-message/notaguard.u | 21 - .../overapplied-data-constructor-loc.u | 17 - .../pattern-case-location.u | 10 - .../poor-error-message/pattern-matching-1.u | 28 - .../errors/poor-error-message/tdnr-demo.u | 55 - .../poor-error-message/token-printing.u | 25 - unison-src/errors/rank2a.u | 8 - .../errors/seq-concat-constant-length.u | 3 - unison-src/errors/state4.u | 13 - unison-src/errors/tdnr.u | 3 - unison-src/errors/tdnr2.u | 1 - unison-src/errors/tdnr3.u | 10 - .../errors/term-functor-inspired/effect1.u | 9 - .../term-functor-inspired/if-body-mismatch.u | 3 - .../term-functor-inspired/if-cond-not-bool.u | 1 - .../mismatched-case-result-types.u | 5 - unison-src/errors/type-apply.u | 15 - .../errors/type-functor-inspired/app2.u | 4 - .../errors/type-functor-inspired/arrow1.u | 3 - .../errors/type-functor-inspired/effect2.u | 11 - .../type-functor-inspired/forall-arrow.u | 3 - .../type-functor-inspired/forall-arrow2.u | 4 - .../type-functor-inspired/forall-arrow3.u | 4 - .../need-nonstructural-types.uu | 12 - .../errors/type-functor-inspired/parens.u | 4 - .../errors/type-functor-inspired/subtuple.u | 5 - .../type-functor-inspired/synthesizeApp.u | 4 - .../errors/type-functor-inspired/tuple.u | 4 - .../errors/type-functor-inspired/tuple2.u | 3 - .../errors/type-functor-inspired/unit.u | 3 - unison-src/errors/unexpected-loop.u | 11 - unison-src/errors/unresolved-symbol-1.u | 6 - unison-src/errors/unsound-cont.u | 12 - unison-src/example-errors.u | 181 - unison-src/parser-tests/GenerateErrors.hs | 48 - .../parser-tests/empty-match-list.message.txt | 3 - unison-src/parser-tests/empty-match-list.u | 3 - .../if-without-condition.message.txt | 3 - .../parser-tests/if-without-condition.u | 1 - unison-src/remote-api.u | 95 - unison-src/remote.u | 67 - unison-src/sheepshead.u | 39 - unison-src/tests/324.u | 7 - unison-src/tests/344.uu | 5 - unison-src/tests/514.u | 13 - unison-src/tests/595.u | 13 - unison-src/tests/868.u | 8 - unison-src/tests/868.ur | 1 - unison-src/tests/a-tale-of-two-optionals.u | 13 - unison-src/tests/ability-inference-fail.uu | 35 - unison-src/tests/ability-keyword.u | 7 - unison-src/tests/abort.u | 13 - unison-src/tests/ask-inferred.u | 23 - unison-src/tests/boolean-ops-in-sequence.u | 1 - unison-src/tests/builtin-arity-0-evaluation.u | 3 - .../tests/builtin-arity-0-evaluation.ur | 1 - unison-src/tests/caseguard.u | 15 - unison-src/tests/cce.u | 116 - unison-src/tests/cce.ur | 1 - unison-src/tests/compose-inference.u | 4 - unison-src/tests/console.u | 54 - unison-src/tests/console1.u | 45 - unison-src/tests/data-references-builtins.u | 4 - unison-src/tests/delay.u | 37 - unison-src/tests/delay_parse.u | 20 - unison-src/tests/effect-instantiation.u | 10 - unison-src/tests/effect-instantiation2.u | 8 - unison-src/tests/effect1.u | 8 - unison-src/tests/empty-above-the-fold.u | 6 - unison-src/tests/fib4.ur | 1 - unison-src/tests/fix1640.u | 25 - unison-src/tests/fix528.u | 12 - unison-src/tests/fix528.ur | 1 - unison-src/tests/fix739.u | 4 - unison-src/tests/force.u | 9 - unison-src/tests/guard-boolean-operators.u | 11 - unison-src/tests/handler-stacking.u | 34 - unison-src/tests/hang.u | 88 - unison-src/tests/id.u | 5 - unison-src/tests/if.u | 2 - unison-src/tests/imports.u | 22 - unison-src/tests/imports2.u | 12 - unison-src/tests/inner-lambda1.u | 15 - unison-src/tests/inner-lambda2.u | 16 - unison-src/tests/io-state2.u | 23 - unison-src/tests/io-state3.u | 10 - unison-src/tests/keyword-parse.u | 4 - .../tests/lambda-closing-over-effectful-fn.u | 10 - .../tests/lambda-closing-over-effectful-fn.ur | 1 - unison-src/tests/links.u | 13 - unison-src/tests/links.ur | 1 - unison-src/tests/map-traverse.u | 30 - unison-src/tests/map-traverse2.u | 32 - unison-src/tests/mergesort.u | 26 - unison-src/tests/methodical/abilities.u | 18 - unison-src/tests/methodical/abilities.ur | 1 - .../tests/methodical/apply-constructor.u | 29 - .../tests/methodical/apply-constructor.ur | 1 - unison-src/tests/methodical/apply.u | 43 - unison-src/tests/methodical/apply.ur | 1 - .../tests/methodical/builtin-nat-to-float.u | 1 - .../tests/methodical/builtin-nat-to-float.ur | 1 - unison-src/tests/methodical/builtins.u | 14 - unison-src/tests/methodical/cycle-minimize.u | 11 - unison-src/tests/methodical/dots.u | 28 - unison-src/tests/methodical/dots.ur | 1 - unison-src/tests/methodical/empty.u | 0 unison-src/tests/methodical/empty2.u | 1 - unison-src/tests/methodical/empty3.u | 3 - unison-src/tests/methodical/exponential.u | 5 - unison-src/tests/methodical/exponential.ur | 3 - unison-src/tests/methodical/float.u | 15 - unison-src/tests/methodical/float.ur | 7 - unison-src/tests/methodical/hyperbolic.u | 8 - unison-src/tests/methodical/hyperbolic.ur | 6 - unison-src/tests/methodical/int.u | 24 - unison-src/tests/methodical/int.ur | 17 - unison-src/tests/methodical/let.u | 12 - unison-src/tests/methodical/let.ur | 1 - unison-src/tests/methodical/literals.u | 12 - unison-src/tests/methodical/literals.ur | 1 - unison-src/tests/methodical/loop.u | 8 - unison-src/tests/methodical/nat.u | 31 - unison-src/tests/methodical/nat.ur | 23 - .../tests/methodical/overapply-ability.u | 47 - .../tests/methodical/overapply-ability.ur | 1 - unison-src/tests/methodical/parens.u | 27 - .../tests/methodical/pattern-matching.u | 28 - .../tests/methodical/pattern-matching.ur | 7 - unison-src/tests/methodical/power.u | 4 - unison-src/tests/methodical/power.ur | 2 - unison-src/tests/methodical/rank2.u | 9 - unison-src/tests/methodical/rounding.u | 8 - unison-src/tests/methodical/rounding.ur | 6 - unison-src/tests/methodical/scopedtypevars.u | 29 - unison-src/tests/methodical/semis.u | 13 - unison-src/tests/methodical/semis.ur | 1 - unison-src/tests/methodical/trig.u | 9 - unison-src/tests/methodical/trig.ur | 7 - unison-src/tests/methodical/universals.u | 20 - unison-src/tests/methodical/universals.ur | 17 - unison-src/tests/methodical/wildcardimports.u | 6 - unison-src/tests/multiple-effects.u | 17 - unison-src/tests/one-liners.uu | 2 - unison-src/tests/parenthesized-blocks.u | 5 - unison-src/tests/parenthesized-blocks.ur | 1 - unison-src/tests/pattern-match-seq.u | 86 - unison-src/tests/pattern-match-seq.ur | 20 - unison-src/tests/pattern-matching.u | 36 - unison-src/tests/pattern-matching2.u | 21 - unison-src/tests/pattern-typing-bug.u | 9 - unison-src/tests/pattern-typing-bug.ur | 1 - unison-src/tests/pattern-weirdness.u | 16 - unison-src/tests/pattern-weirdness.ur | 1 - unison-src/tests/quote-parse-bug.uu | 6 - unison-src/tests/r0.u | 5 - unison-src/tests/r1.u | 6 - unison-src/tests/r10.u | 5 - unison-src/tests/r11.u | 7 - unison-src/tests/r12.u | 4 - unison-src/tests/r13.u | 5 - unison-src/tests/r14.u | 4 - unison-src/tests/r2.u | 6 - unison-src/tests/r3.u | 6 - unison-src/tests/r4negate.u | 5 - unison-src/tests/r4x.u | 3 - unison-src/tests/r5.u | 6 - unison-src/tests/r6.u | 4 - unison-src/tests/r7.0.u | 6 - unison-src/tests/r7.1.u | 5 - unison-src/tests/r7.2.u | 4 - unison-src/tests/r8.u | 5 - unison-src/tests/r9.u | 11 - unison-src/tests/rainbow.u | 32 - unison-src/tests/records.u | 12 - unison-src/tests/runtime-crash.uu | 13 - unison-src/tests/sequence-at-0.u | 2 - .../tests/sequence-literal-argument-parsing.u | 5 - unison-src/tests/sequence-literal.u | 19 - unison-src/tests/soe.u | 124 - unison-src/tests/soe2.u | 47 - .../tests/spurious-ability-fail-underapply.u | 8 - unison-src/tests/spurious-ability-fail.u | 16 - unison-src/tests/state1.u | 15 - unison-src/tests/state1a.u | 11 - unison-src/tests/state2.u | 11 - unison-src/tests/state2a-min.u | 17 - unison-src/tests/state2a-min.ur | 1 - unison-src/tests/state2a.u | 50 - unison-src/tests/state2a.uu | 30 - unison-src/tests/state2b-min.u | 15 - unison-src/tests/state2b-min.ur | 1 - unison-src/tests/state2b.u | 39 - unison-src/tests/state3.u | 30 - unison-src/tests/state4.u | 26 - unison-src/tests/state4.ur | 1 - unison-src/tests/state4a.u | 26 - unison-src/tests/state4a.ur | 1 - unison-src/tests/stream.u | 72 - unison-src/tests/stream2.uu | 81 - unison-src/tests/stream3.uu | 71 - unison-src/tests/suffix-resolve.u | 23 - unison-src/tests/tdnr.u | 4 - unison-src/tests/tdnr2.u | 13 - unison-src/tests/tdnr3.u | 6 - unison-src/tests/tdnr4.u | 4 - unison-src/tests/text-escaping.u | 10 - unison-src/tests/text-escaping.ur | 1 - unison-src/tests/text-pattern.u | 6 - unison-src/tests/text-pattern.ur | 1 - unison-src/tests/tictactoe.u | 43 - unison-src/tests/tictactoe0-array-oob1.u | 12 - unison-src/tests/tictactoe0-npe.u | 17 - unison-src/tests/tictactoe0.u | 43 - unison-src/tests/tictactoe2.u | 63 - unison-src/tests/tuple.u | 4 - unison-src/tests/tuple.ur | 1 - unison-src/tests/type-application.u | 11 - unison-src/tests/underscore-parsing.u | 7 - unison-src/tests/ungeneralize-bug.uu | 22 - unison-src/tests/unique.u | 28 - unison-src/tests/void.u | 3 - unison-src/transcripts/addupdatemessages.md | 63 - .../transcripts/addupdatemessages.output.md | 159 - unison-src/transcripts/alias-many.md | 130 - unison-src/transcripts/alias-many.output.md | 379 --- unison-src/transcripts/ambiguous-metadata.md | 17 - .../transcripts/ambiguous-metadata.output.md | 42 - unison-src/transcripts/blocks.md | 177 - unison-src/transcripts/blocks.output.md | 339 -- unison-src/transcripts/builtins-merge.md | 6 - .../transcripts/builtins-merge.output.md | 50 - unison-src/transcripts/bytesFromList.md | 11 - .../transcripts/bytesFromList.output.md | 21 - unison-src/transcripts/cd-back.md | 46 - unison-src/transcripts/cd-back.output.md | 40 - unison-src/transcripts/check763.md | 17 - unison-src/transcripts/check763.output.md | 34 - unison-src/transcripts/check873.md | 17 - unison-src/transcripts/check873.output.md | 40 - unison-src/transcripts/copy-patch.md | 39 - unison-src/transcripts/copy-patch.output.md | 91 - unison-src/transcripts/create-author.md | 17 - .../transcripts/create-author.output.md | 44 - unison-src/transcripts/delete.md | 100 - unison-src/transcripts/delete.output.md | 237 -- unison-src/transcripts/deleteReplacements.md | 46 - .../transcripts/deleteReplacements.output.md | 132 - .../dependents-dependencies-debugfile.md | 38 - ...ependents-dependencies-debugfile.output.md | 93 - unison-src/transcripts/diff.md | 201 -- unison-src/transcripts/diff.output.md | 733 ----- unison-src/transcripts/doc-formatting.md | 254 -- .../transcripts/doc-formatting.output.md | 512 --- unison-src/transcripts/docs.md | 95 - unison-src/transcripts/docs.output.md | 211 -- unison-src/transcripts/emptyCodebase.md | 27 - .../transcripts/emptyCodebase.output.md | 41 - .../transcripts/errors/ucm-hide-all-error.md | 12 - .../errors/ucm-hide-all-error.output.md | 17 - unison-src/transcripts/errors/ucm-hide-all.md | 12 - .../transcripts/errors/ucm-hide-all.output.md | 17 - .../transcripts/errors/ucm-hide-error.md | 12 - .../errors/ucm-hide-error.output.md | 17 - unison-src/transcripts/errors/ucm-hide.md | 12 - .../transcripts/errors/ucm-hide.output.md | 17 - .../errors/unison-hide-all-error.md | 10 - .../errors/unison-hide-all-error.output.md | 16 - .../transcripts/errors/unison-hide-all.md | 10 - .../errors/unison-hide-all.output.md | 16 - .../transcripts/errors/unison-hide-error.md | 10 - .../errors/unison-hide-error.output.md | 16 - unison-src/transcripts/errors/unison-hide.md | 10 - .../transcripts/errors/unison-hide.output.md | 16 - unison-src/transcripts/escape-sequences.md | 5 - .../transcripts/escape-sequences.output.md | 28 - unison-src/transcripts/find-patch.md | 28 - unison-src/transcripts/find-patch.output.md | 81 - .../transcripts/fix-1381-excess-propagate.md | 28 - .../fix-1381-excess-propagate.output.md | 55 - unison-src/transcripts/fix-big-list-crash.md | 13 - .../transcripts/fix-big-list-crash.output.md | 22 - unison-src/transcripts/fix1063.md | 17 - unison-src/transcripts/fix1063.output.md | 35 - unison-src/transcripts/fix1334.md | 36 - unison-src/transcripts/fix1334.output.md | 101 - unison-src/transcripts/fix1356.md | 41 - unison-src/transcripts/fix1356.output.md | 94 - unison-src/transcripts/fix689.md | 13 - unison-src/transcripts/fix689.output.md | 21 - unison-src/transcripts/fix849.md | 12 - unison-src/transcripts/fix849.output.md | 27 - unison-src/transcripts/fix942.md | 37 - unison-src/transcripts/fix942.output.md | 114 - unison-src/transcripts/fix987.md | 37 - unison-src/transcripts/fix987.output.md | 65 - unison-src/transcripts/hello.md | 69 - unison-src/transcripts/hello.output.md | 92 - unison-src/transcripts/link.md | 70 - unison-src/transcripts/link.output.md | 202 -- unison-src/transcripts/merge.md | 101 - unison-src/transcripts/merge.output.md | 227 -- unison-src/transcripts/mergeloop.md | 51 - unison-src/transcripts/mergeloop.output.md | 143 - unison-src/transcripts/merges.md | 119 - unison-src/transcripts/merges.output.md | 410 --- unison-src/transcripts/names.md | 20 - unison-src/transcripts/names.output.md | 32 - unison-src/transcripts/numbered-args.md | 56 - .../transcripts/numbered-args.output.md | 162 - unison-src/transcripts/propagate.md | 134 - unison-src/transcripts/propagate.output.md | 280 -- unison-src/transcripts/redundant.output.md | 45 - unison-src/transcripts/reflog.md | 31 - unison-src/transcripts/reflog.output.md | 90 - unison-src/transcripts/resolve.md | 115 - unison-src/transcripts/resolve.output.md | 259 -- unison-src/transcripts/squash.md | 132 - unison-src/transcripts/squash.output.md | 472 --- unison-src/transcripts/suffixes.md | 40 - unison-src/transcripts/suffixes.output.md | 57 - unison-src/transcripts/todo-bug-builtins.md | 27 - .../transcripts/todo-bug-builtins.output.md | 89 - .../transcripts/transcript-parser-commands.md | 41 - .../transcript-parser-commands.output.md | 72 - unison-src/transcripts/unitnamespace.md | 9 - .../transcripts/unitnamespace.output.md | 35 - yaks/easytest/LICENSE | 19 - yaks/easytest/README.markdown | 264 -- yaks/easytest/easytest.cabal | 95 - yaks/easytest/src/EasyTest.hs | 458 --- yaks/easytest/tests/Suite.hs | 34 - 663 files changed, 4133 insertions(+), 66897 deletions(-) create mode 100644 codebase1/codebase/Unison/Codebase/V1/ABT.hs create mode 100644 codebase1/codebase/Unison/Codebase/V1/Branch/NameSegment.hs create mode 100644 codebase1/codebase/Unison/Codebase/V1/Branch/Raw.hs create mode 100644 codebase1/codebase/Unison/Codebase/V1/Causal/Raw.hs rename {unison-core/src/Unison => codebase1/codebase/Unison/Codebase/V1}/ConstructorType.hs (60%) create mode 100644 codebase1/codebase/Unison/Codebase/V1/DataDeclaration.hs create mode 100644 codebase1/codebase/Unison/Codebase/V1/FileCodebase.hs rename {unison-core/src/Unison => codebase1/codebase/Unison/Codebase/V1}/LabeledDependency.hs (78%) create mode 100644 codebase1/codebase/Unison/Codebase/V1/Patch.hs rename {parser-typechecker/src/Unison/Codebase => codebase1/codebase/Unison/Codebase/V1/Patch}/TermEdit.hs (54%) create mode 100644 codebase1/codebase/Unison/Codebase/V1/Patch/TypeEdit.hs create mode 100644 codebase1/codebase/Unison/Codebase/V1/Reference.hs create mode 100644 codebase1/codebase/Unison/Codebase/V1/Referent.hs rename {parser-typechecker/src/Unison/Codebase => codebase1/codebase/Unison/Codebase/V1/Serialization}/Serialization.hs (95%) create mode 100644 codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs rename {parser-typechecker/src/Unison/Util => codebase1/codebase/Unison/Codebase/V1}/Star3.hs (97%) create mode 100644 codebase1/codebase/Unison/Codebase/V1/Symbol.hs create mode 100644 codebase1/codebase/Unison/Codebase/V1/Term.hs create mode 100644 codebase1/codebase/Unison/Codebase/V1/Term/Pattern.hs create mode 100644 codebase1/codebase/Unison/Codebase/V1/Type.hs create mode 100644 codebase1/codebase/Unison/Codebase/V1/Type/Kind.hs create mode 100644 codebase1/codebase/unison-codebase1.cabal create mode 100644 codebase2/CHANGELOG.md create mode 100644 codebase2/LICENSE create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/MetadataSet.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TypeEdit.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Types.hs create mode 100644 codebase2/codebase-sqlite/sql/create-index.sql create mode 100644 codebase2/codebase-sqlite/sql/create.sql create mode 100644 codebase2/codebase-sqlite/unison-codebase-sqlite.cabal create mode 100644 codebase2/codebase/U/Codebase/Branch.hs create mode 100644 codebase2/codebase/U/Codebase/Causal.hs create mode 100644 codebase2/codebase/U/Codebase/Codebase.hs create mode 100644 codebase2/codebase/U/Codebase/Decl.hs rename {unison-core/src/Unison => codebase2/codebase/U/Codebase}/Kind.hs (65%) create mode 100644 codebase2/codebase/U/Codebase/Reference.hs create mode 100644 codebase2/codebase/U/Codebase/Referent.hs create mode 100644 codebase2/codebase/U/Codebase/Reflog.hs create mode 100644 codebase2/codebase/U/Codebase/ShortHash.hs create mode 100644 codebase2/codebase/U/Codebase/Term.hs create mode 100644 codebase2/codebase/U/Codebase/TermEdit.hs create mode 100644 codebase2/codebase/U/Codebase/Type.hs rename {parser-typechecker/src/Unison => codebase2/codebase/U}/Codebase/TypeEdit.hs (72%) create mode 100644 codebase2/codebase/U/Codebase/WatchKind.hs create mode 100644 codebase2/codebase/unison-codebase.cabal create mode 100644 codebase2/core/U/Core/ABT.hs create mode 100644 codebase2/core/unison-core.cabal create mode 100644 codebase2/editor/U/Editor/Codebase.hs create mode 100644 codebase2/editor/unison-editor.cabal rename {unison-core/src/Unison => codebase2/language/U/Language}/Blank.hs (76%) create mode 100644 codebase2/language/unison-language.cabal create mode 100644 codebase2/notes.txt create mode 100644 codebase2/runtime/U/Runtime/CodeLookup.hs create mode 100644 codebase2/runtime/unison-runtime.cabal create mode 100644 codebase2/syntax/unison-syntax.cabal create mode 100644 codebase2/util-serialization/U/Util/Serialization.hs create mode 100644 codebase2/util-serialization/unison-util-serialization.cabal create mode 100644 codebase2/util/U/Util/Base32Hex.hs create mode 100644 codebase2/util/U/Util/Hash.hs rename {unison-core/src/Unison => codebase2/util/U/Util}/Hashable.hs (95%) rename {unison-core/src/Unison => codebase2/util/U}/Util/Relation.hs (98%) create mode 100644 codebase2/util/unison-util.cabal delete mode 100644 parser-typechecker/LICENSE delete mode 100644 parser-typechecker/benchmarks/runtime/Main.hs delete mode 100644 parser-typechecker/prettyprintdemo/Main.hs delete mode 100644 parser-typechecker/src/Unison/Builtin.hs delete mode 100644 parser-typechecker/src/Unison/Builtin/Decls.hs delete mode 100644 parser-typechecker/src/Unison/Codebase.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Branch.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Branch/Dependencies.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/BranchDiff.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/BranchUtil.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Causal.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Classes.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/CodeLookup.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/AuthorInfo.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/Command.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/DisplayThing.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/Git.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/Input.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/Output.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/Output/BranchDiff.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/Propagate.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/RemoteRepo.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/SearchResult'.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/SlurpComponent.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/SlurpResult.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/TodoOutput.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/UriParser.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Editor/VersionParser.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Execute.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/FileCodebase.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/FileCodebase/SlimCopyRegenerateIndex.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/GitError.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/MainTerm.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Metadata.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/NameEdit.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Patch.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Path.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Reflog.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Runtime.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/SearchResult.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Serialization/PutT.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Serialization/V1.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/ShortBranchHash.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/SyncMode.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/TranscriptParser.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/Watch.hs delete mode 100644 parser-typechecker/src/Unison/Codecs.hs delete mode 100644 parser-typechecker/src/Unison/CommandLine.hs delete mode 100644 parser-typechecker/src/Unison/CommandLine/DisplayValues.hs delete mode 100644 parser-typechecker/src/Unison/CommandLine/InputPattern.hs delete mode 100644 parser-typechecker/src/Unison/CommandLine/InputPatterns.hs delete mode 100644 parser-typechecker/src/Unison/CommandLine/Main.hs delete mode 100644 parser-typechecker/src/Unison/CommandLine/OutputMessages.hs delete mode 100644 parser-typechecker/src/Unison/DeclPrinter.hs delete mode 100644 parser-typechecker/src/Unison/FileParser.hs delete mode 100644 parser-typechecker/src/Unison/FileParsers.hs delete mode 100644 parser-typechecker/src/Unison/Lexer.hs delete mode 100644 parser-typechecker/src/Unison/NamePrinter.hs delete mode 100644 parser-typechecker/src/Unison/Parser.hs delete mode 100644 parser-typechecker/src/Unison/Parsers.hs delete mode 100644 parser-typechecker/src/Unison/Path.hs delete mode 100644 parser-typechecker/src/Unison/PrettyPrintEnv.hs delete mode 100644 parser-typechecker/src/Unison/PrettyTerminal.hs delete mode 100644 parser-typechecker/src/Unison/PrintError.hs delete mode 100644 parser-typechecker/src/Unison/Result.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/ANF.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/Builtin.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/Debug.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/Decompile.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/Foreign.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/IOSource.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/IR.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/Interface.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/MCode.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/Machine.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/Pattern.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/Rt1.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/Rt1IO.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/SparseVector.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/Stack.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/Vector.hs delete mode 100644 parser-typechecker/src/Unison/Runtime/docs.markdown delete mode 100644 parser-typechecker/src/Unison/TermParser.hs delete mode 100644 parser-typechecker/src/Unison/TermPrinter.hs delete mode 100644 parser-typechecker/src/Unison/TypeParser.hs delete mode 100644 parser-typechecker/src/Unison/TypePrinter.hs delete mode 100644 parser-typechecker/src/Unison/Typechecker.hs delete mode 100644 parser-typechecker/src/Unison/Typechecker/Components.hs delete mode 100644 parser-typechecker/src/Unison/Typechecker/Context.hs delete mode 100644 parser-typechecker/src/Unison/Typechecker/Extractor.hs delete mode 100644 parser-typechecker/src/Unison/Typechecker/TypeError.hs delete mode 100644 parser-typechecker/src/Unison/Typechecker/TypeLookup.hs delete mode 100644 parser-typechecker/src/Unison/Typechecker/TypeVar.hs delete mode 100644 parser-typechecker/src/Unison/UnisonFile.hs delete mode 100644 parser-typechecker/src/Unison/Util/AnnotatedText.hs delete mode 100644 parser-typechecker/src/Unison/Util/Bytes.hs delete mode 100644 parser-typechecker/src/Unison/Util/Cache.hs delete mode 100644 parser-typechecker/src/Unison/Util/ColorText.hs delete mode 100644 parser-typechecker/src/Unison/Util/CycleTable.hs delete mode 100644 parser-typechecker/src/Unison/Util/CyclicEq.hs delete mode 100644 parser-typechecker/src/Unison/Util/CyclicOrd.hs delete mode 100644 parser-typechecker/src/Unison/Util/EnumContainers.hs delete mode 100644 parser-typechecker/src/Unison/Util/Exception.hs delete mode 100644 parser-typechecker/src/Unison/Util/Find.hs delete mode 100644 parser-typechecker/src/Unison/Util/Free.hs delete mode 100644 parser-typechecker/src/Unison/Util/Less.hs delete mode 100644 parser-typechecker/src/Unison/Util/Logger.hs delete mode 100644 parser-typechecker/src/Unison/Util/Map.hs delete mode 100644 parser-typechecker/src/Unison/Util/Menu.hs delete mode 100644 parser-typechecker/src/Unison/Util/PinBoard.hs delete mode 100644 parser-typechecker/src/Unison/Util/Pretty.hs delete mode 100644 parser-typechecker/src/Unison/Util/Range.hs delete mode 100644 parser-typechecker/src/Unison/Util/SyntaxText.hs delete mode 100644 parser-typechecker/src/Unison/Util/TQueue.hs delete mode 100644 parser-typechecker/src/Unison/Util/Timing.hs delete mode 100644 parser-typechecker/src/Unison/Util/TransitiveClosure.hs delete mode 100644 parser-typechecker/tests/Suite.hs delete mode 100644 parser-typechecker/tests/Unison/Core/Test/Name.hs delete mode 100644 parser-typechecker/tests/Unison/Test/ABT.hs delete mode 100644 parser-typechecker/tests/Unison/Test/ANF.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Cache.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Codebase.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Codebase/Causal.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Codebase/FileCodebase.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Codebase/Path.hs delete mode 100644 parser-typechecker/tests/Unison/Test/ColorText.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Common.hs delete mode 100644 parser-typechecker/tests/Unison/Test/DataDeclaration.hs delete mode 100644 parser-typechecker/tests/Unison/Test/FileParser.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Git.hs delete mode 100644 parser-typechecker/tests/Unison/Test/IO.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Lexer.hs delete mode 100644 parser-typechecker/tests/Unison/Test/MCode.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Range.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Referent.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Term.hs delete mode 100644 parser-typechecker/tests/Unison/Test/TermParser.hs delete mode 100755 parser-typechecker/tests/Unison/Test/TermPrinter.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Type.hs delete mode 100755 parser-typechecker/tests/Unison/Test/TypePrinter.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Typechecker.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Typechecker/Components.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Typechecker/Context.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Typechecker/TypeError.hs delete mode 100644 parser-typechecker/tests/Unison/Test/UnisonSources.hs delete mode 100644 parser-typechecker/tests/Unison/Test/UriParser.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Util/Bytes.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Util/PinBoard.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Util/Pretty.hs delete mode 100644 parser-typechecker/tests/Unison/Test/Var.hs delete mode 100644 parser-typechecker/tests/Unison/Test/VersionParser.hs delete mode 100644 parser-typechecker/transcripts/Transcripts.hs delete mode 100644 parser-typechecker/unison-parser-typechecker.cabal delete mode 100644 parser-typechecker/unison/Main.hs delete mode 100644 parser-typechecker/unison/System/Path.hs delete mode 100644 parser-typechecker/unison/Version.hs delete mode 100644 unison-core/LICENSE delete mode 100644 unison-core/src/Unison/ABT.hs delete mode 100644 unison-core/src/Unison/ABT/Normalized.hs delete mode 100644 unison-core/src/Unison/DataDeclaration.hs delete mode 100644 unison-core/src/Unison/Hash.hs delete mode 100644 unison-core/src/Unison/HashQualified'.hs delete mode 100644 unison-core/src/Unison/HashQualified.hs delete mode 100644 unison-core/src/Unison/Name.hs delete mode 100644 unison-core/src/Unison/NameSegment.hs delete mode 100644 unison-core/src/Unison/Names2.hs delete mode 100644 unison-core/src/Unison/Names3.hs delete mode 100644 unison-core/src/Unison/Paths.hs delete mode 100644 unison-core/src/Unison/Pattern.hs delete mode 100644 unison-core/src/Unison/PatternCompat.hs delete mode 100644 unison-core/src/Unison/Prelude.hs delete mode 100644 unison-core/src/Unison/Reference.hs delete mode 100644 unison-core/src/Unison/Reference/Util.hs delete mode 100644 unison-core/src/Unison/Referent.hs delete mode 100644 unison-core/src/Unison/Settings.hs delete mode 100644 unison-core/src/Unison/ShortHash.hs delete mode 100644 unison-core/src/Unison/Symbol.hs delete mode 100644 unison-core/src/Unison/Term.hs delete mode 100644 unison-core/src/Unison/Type.hs delete mode 100644 unison-core/src/Unison/Util/Components.hs delete mode 100644 unison-core/src/Unison/Util/List.hs delete mode 100644 unison-core/src/Unison/Util/Monoid.hs delete mode 100644 unison-core/src/Unison/Util/Relation3.hs delete mode 100644 unison-core/src/Unison/Util/Relation4.hs delete mode 100644 unison-core/src/Unison/Util/Set.hs delete mode 100644 unison-core/src/Unison/Var.hs delete mode 100644 unison-core/unison-core.cabal delete mode 100644 unison-src/Base.u delete mode 100644 unison-src/Cofree.u delete mode 100644 unison-src/EasyTest.u delete mode 100644 unison-src/Trie.u delete mode 100644 unison-src/WeightedSearch.u delete mode 100644 unison-src/base58.u delete mode 100644 unison-src/basics.u delete mode 100644 unison-src/demo/1.u delete mode 100644 unison-src/demo/2.u delete mode 100644 unison-src/demo/3.u delete mode 100644 unison-src/errors/407.u delete mode 100644 unison-src/errors/X-array.u delete mode 100644 unison-src/errors/abort-ability-checks-against-pure.u delete mode 100644 unison-src/errors/all-errors.u delete mode 100644 unison-src/errors/check-for-regressions/and.u delete mode 100644 unison-src/errors/check-for-regressions/app-polymorphic.u delete mode 100644 unison-src/errors/check-for-regressions/app.u delete mode 100644 unison-src/errors/check-for-regressions/applying-non-function.u delete mode 100644 unison-src/errors/check-for-regressions/casebody.u delete mode 100644 unison-src/errors/check-for-regressions/caseguard.u delete mode 100644 unison-src/errors/check-for-regressions/casepattern.u delete mode 100644 unison-src/errors/check-for-regressions/ifcond.u delete mode 100644 unison-src/errors/check-for-regressions/ifelse.u delete mode 100644 unison-src/errors/check-for-regressions/lens.u delete mode 100644 unison-src/errors/check-for-regressions/not-and.u delete mode 100644 unison-src/errors/check-for-regressions/not-and0.u delete mode 100644 unison-src/errors/check-for-regressions/not-caseguard.u delete mode 100644 unison-src/errors/check-for-regressions/not-caseguard2.u delete mode 100644 unison-src/errors/check-for-regressions/not-or.u delete mode 100644 unison-src/errors/check-for-regressions/not-vector.u delete mode 100644 unison-src/errors/check-for-regressions/or.u delete mode 100644 unison-src/errors/check-for-regressions/vector.u delete mode 100644 unison-src/errors/compiler-bug.u delete mode 100644 unison-src/errors/console.u delete mode 100644 unison-src/errors/console2.u delete mode 100644 unison-src/errors/cyclic-unguarded.u delete mode 100644 unison-src/errors/effect-inference1.u delete mode 100755 unison-src/errors/effect_unknown_type.uu delete mode 100644 unison-src/errors/empty-block.u delete mode 100644 unison-src/errors/ex1.u delete mode 100644 unison-src/errors/fix745.u delete mode 100644 unison-src/errors/handle-inference.u delete mode 100644 unison-src/errors/handler-coverage-checking.uu delete mode 100644 unison-src/errors/id.u delete mode 100644 unison-src/errors/io-effect.u delete mode 100644 unison-src/errors/io-state1.u delete mode 100644 unison-src/errors/map-reduce.u delete mode 100644 unison-src/errors/map-traverse3.u delete mode 100644 unison-src/errors/mismatched-braces.u delete mode 100644 unison-src/errors/need-nominal-type.uu delete mode 100644 unison-src/errors/poor-error-message/ability-check-fail-by-calling-wrong-function.u delete mode 100644 unison-src/errors/poor-error-message/consoleh.u delete mode 100644 unison-src/errors/poor-error-message/doesnt-match-annotation.u delete mode 100644 unison-src/errors/poor-error-message/function-calls.u delete mode 100644 unison-src/errors/poor-error-message/function-calls1.u delete mode 100644 unison-src/errors/poor-error-message/function-calls2.u delete mode 100644 unison-src/errors/poor-error-message/function-calls3.u delete mode 100644 unison-src/errors/poor-error-message/handle.u delete mode 100644 unison-src/errors/poor-error-message/handler-ex.u delete mode 100644 unison-src/errors/poor-error-message/mismatched-case-result-types.u delete mode 100644 unison-src/errors/poor-error-message/notaguard.u delete mode 100644 unison-src/errors/poor-error-message/overapplied-data-constructor-loc.u delete mode 100644 unison-src/errors/poor-error-message/pattern-case-location.u delete mode 100644 unison-src/errors/poor-error-message/pattern-matching-1.u delete mode 100644 unison-src/errors/poor-error-message/tdnr-demo.u delete mode 100644 unison-src/errors/poor-error-message/token-printing.u delete mode 100644 unison-src/errors/rank2a.u delete mode 100644 unison-src/errors/seq-concat-constant-length.u delete mode 100644 unison-src/errors/state4.u delete mode 100644 unison-src/errors/tdnr.u delete mode 100644 unison-src/errors/tdnr2.u delete mode 100644 unison-src/errors/tdnr3.u delete mode 100644 unison-src/errors/term-functor-inspired/effect1.u delete mode 100644 unison-src/errors/term-functor-inspired/if-body-mismatch.u delete mode 100644 unison-src/errors/term-functor-inspired/if-cond-not-bool.u delete mode 100644 unison-src/errors/term-functor-inspired/mismatched-case-result-types.u delete mode 100644 unison-src/errors/type-apply.u delete mode 100644 unison-src/errors/type-functor-inspired/app2.u delete mode 100644 unison-src/errors/type-functor-inspired/arrow1.u delete mode 100644 unison-src/errors/type-functor-inspired/effect2.u delete mode 100644 unison-src/errors/type-functor-inspired/forall-arrow.u delete mode 100644 unison-src/errors/type-functor-inspired/forall-arrow2.u delete mode 100644 unison-src/errors/type-functor-inspired/forall-arrow3.u delete mode 100644 unison-src/errors/type-functor-inspired/need-nonstructural-types.uu delete mode 100644 unison-src/errors/type-functor-inspired/parens.u delete mode 100644 unison-src/errors/type-functor-inspired/subtuple.u delete mode 100644 unison-src/errors/type-functor-inspired/synthesizeApp.u delete mode 100644 unison-src/errors/type-functor-inspired/tuple.u delete mode 100644 unison-src/errors/type-functor-inspired/tuple2.u delete mode 100644 unison-src/errors/type-functor-inspired/unit.u delete mode 100644 unison-src/errors/unexpected-loop.u delete mode 100644 unison-src/errors/unresolved-symbol-1.u delete mode 100644 unison-src/errors/unsound-cont.u delete mode 100644 unison-src/example-errors.u delete mode 100644 unison-src/parser-tests/GenerateErrors.hs delete mode 100644 unison-src/parser-tests/empty-match-list.message.txt delete mode 100644 unison-src/parser-tests/empty-match-list.u delete mode 100644 unison-src/parser-tests/if-without-condition.message.txt delete mode 100644 unison-src/parser-tests/if-without-condition.u delete mode 100644 unison-src/remote-api.u delete mode 100644 unison-src/remote.u delete mode 100644 unison-src/sheepshead.u delete mode 100644 unison-src/tests/324.u delete mode 100644 unison-src/tests/344.uu delete mode 100644 unison-src/tests/514.u delete mode 100644 unison-src/tests/595.u delete mode 100644 unison-src/tests/868.u delete mode 100644 unison-src/tests/868.ur delete mode 100644 unison-src/tests/a-tale-of-two-optionals.u delete mode 100644 unison-src/tests/ability-inference-fail.uu delete mode 100644 unison-src/tests/ability-keyword.u delete mode 100644 unison-src/tests/abort.u delete mode 100644 unison-src/tests/ask-inferred.u delete mode 100644 unison-src/tests/boolean-ops-in-sequence.u delete mode 100644 unison-src/tests/builtin-arity-0-evaluation.u delete mode 100644 unison-src/tests/builtin-arity-0-evaluation.ur delete mode 100644 unison-src/tests/caseguard.u delete mode 100644 unison-src/tests/cce.u delete mode 100644 unison-src/tests/cce.ur delete mode 100644 unison-src/tests/compose-inference.u delete mode 100644 unison-src/tests/console.u delete mode 100644 unison-src/tests/console1.u delete mode 100644 unison-src/tests/data-references-builtins.u delete mode 100644 unison-src/tests/delay.u delete mode 100644 unison-src/tests/delay_parse.u delete mode 100644 unison-src/tests/effect-instantiation.u delete mode 100644 unison-src/tests/effect-instantiation2.u delete mode 100644 unison-src/tests/effect1.u delete mode 100644 unison-src/tests/empty-above-the-fold.u delete mode 100644 unison-src/tests/fib4.ur delete mode 100644 unison-src/tests/fix1640.u delete mode 100644 unison-src/tests/fix528.u delete mode 100644 unison-src/tests/fix528.ur delete mode 100644 unison-src/tests/fix739.u delete mode 100644 unison-src/tests/force.u delete mode 100644 unison-src/tests/guard-boolean-operators.u delete mode 100644 unison-src/tests/handler-stacking.u delete mode 100644 unison-src/tests/hang.u delete mode 100644 unison-src/tests/id.u delete mode 100644 unison-src/tests/if.u delete mode 100644 unison-src/tests/imports.u delete mode 100644 unison-src/tests/imports2.u delete mode 100644 unison-src/tests/inner-lambda1.u delete mode 100644 unison-src/tests/inner-lambda2.u delete mode 100644 unison-src/tests/io-state2.u delete mode 100644 unison-src/tests/io-state3.u delete mode 100644 unison-src/tests/keyword-parse.u delete mode 100644 unison-src/tests/lambda-closing-over-effectful-fn.u delete mode 100644 unison-src/tests/lambda-closing-over-effectful-fn.ur delete mode 100644 unison-src/tests/links.u delete mode 100644 unison-src/tests/links.ur delete mode 100644 unison-src/tests/map-traverse.u delete mode 100644 unison-src/tests/map-traverse2.u delete mode 100644 unison-src/tests/mergesort.u delete mode 100644 unison-src/tests/methodical/abilities.u delete mode 100644 unison-src/tests/methodical/abilities.ur delete mode 100644 unison-src/tests/methodical/apply-constructor.u delete mode 100644 unison-src/tests/methodical/apply-constructor.ur delete mode 100644 unison-src/tests/methodical/apply.u delete mode 100644 unison-src/tests/methodical/apply.ur delete mode 100644 unison-src/tests/methodical/builtin-nat-to-float.u delete mode 100644 unison-src/tests/methodical/builtin-nat-to-float.ur delete mode 100644 unison-src/tests/methodical/builtins.u delete mode 100644 unison-src/tests/methodical/cycle-minimize.u delete mode 100644 unison-src/tests/methodical/dots.u delete mode 100644 unison-src/tests/methodical/dots.ur delete mode 100644 unison-src/tests/methodical/empty.u delete mode 100644 unison-src/tests/methodical/empty2.u delete mode 100644 unison-src/tests/methodical/empty3.u delete mode 100644 unison-src/tests/methodical/exponential.u delete mode 100644 unison-src/tests/methodical/exponential.ur delete mode 100644 unison-src/tests/methodical/float.u delete mode 100644 unison-src/tests/methodical/float.ur delete mode 100644 unison-src/tests/methodical/hyperbolic.u delete mode 100644 unison-src/tests/methodical/hyperbolic.ur delete mode 100644 unison-src/tests/methodical/int.u delete mode 100644 unison-src/tests/methodical/int.ur delete mode 100644 unison-src/tests/methodical/let.u delete mode 100644 unison-src/tests/methodical/let.ur delete mode 100644 unison-src/tests/methodical/literals.u delete mode 100644 unison-src/tests/methodical/literals.ur delete mode 100644 unison-src/tests/methodical/loop.u delete mode 100644 unison-src/tests/methodical/nat.u delete mode 100644 unison-src/tests/methodical/nat.ur delete mode 100644 unison-src/tests/methodical/overapply-ability.u delete mode 100644 unison-src/tests/methodical/overapply-ability.ur delete mode 100644 unison-src/tests/methodical/parens.u delete mode 100644 unison-src/tests/methodical/pattern-matching.u delete mode 100644 unison-src/tests/methodical/pattern-matching.ur delete mode 100644 unison-src/tests/methodical/power.u delete mode 100644 unison-src/tests/methodical/power.ur delete mode 100644 unison-src/tests/methodical/rank2.u delete mode 100644 unison-src/tests/methodical/rounding.u delete mode 100644 unison-src/tests/methodical/rounding.ur delete mode 100644 unison-src/tests/methodical/scopedtypevars.u delete mode 100644 unison-src/tests/methodical/semis.u delete mode 100644 unison-src/tests/methodical/semis.ur delete mode 100644 unison-src/tests/methodical/trig.u delete mode 100644 unison-src/tests/methodical/trig.ur delete mode 100644 unison-src/tests/methodical/universals.u delete mode 100644 unison-src/tests/methodical/universals.ur delete mode 100644 unison-src/tests/methodical/wildcardimports.u delete mode 100644 unison-src/tests/multiple-effects.u delete mode 100644 unison-src/tests/one-liners.uu delete mode 100644 unison-src/tests/parenthesized-blocks.u delete mode 100644 unison-src/tests/parenthesized-blocks.ur delete mode 100644 unison-src/tests/pattern-match-seq.u delete mode 100644 unison-src/tests/pattern-match-seq.ur delete mode 100644 unison-src/tests/pattern-matching.u delete mode 100644 unison-src/tests/pattern-matching2.u delete mode 100644 unison-src/tests/pattern-typing-bug.u delete mode 100644 unison-src/tests/pattern-typing-bug.ur delete mode 100644 unison-src/tests/pattern-weirdness.u delete mode 100644 unison-src/tests/pattern-weirdness.ur delete mode 100644 unison-src/tests/quote-parse-bug.uu delete mode 100644 unison-src/tests/r0.u delete mode 100644 unison-src/tests/r1.u delete mode 100644 unison-src/tests/r10.u delete mode 100644 unison-src/tests/r11.u delete mode 100644 unison-src/tests/r12.u delete mode 100644 unison-src/tests/r13.u delete mode 100644 unison-src/tests/r14.u delete mode 100644 unison-src/tests/r2.u delete mode 100644 unison-src/tests/r3.u delete mode 100644 unison-src/tests/r4negate.u delete mode 100644 unison-src/tests/r4x.u delete mode 100644 unison-src/tests/r5.u delete mode 100644 unison-src/tests/r6.u delete mode 100644 unison-src/tests/r7.0.u delete mode 100644 unison-src/tests/r7.1.u delete mode 100644 unison-src/tests/r7.2.u delete mode 100644 unison-src/tests/r8.u delete mode 100644 unison-src/tests/r9.u delete mode 100644 unison-src/tests/rainbow.u delete mode 100644 unison-src/tests/records.u delete mode 100644 unison-src/tests/runtime-crash.uu delete mode 100644 unison-src/tests/sequence-at-0.u delete mode 100644 unison-src/tests/sequence-literal-argument-parsing.u delete mode 100644 unison-src/tests/sequence-literal.u delete mode 100644 unison-src/tests/soe.u delete mode 100644 unison-src/tests/soe2.u delete mode 100644 unison-src/tests/spurious-ability-fail-underapply.u delete mode 100644 unison-src/tests/spurious-ability-fail.u delete mode 100644 unison-src/tests/state1.u delete mode 100644 unison-src/tests/state1a.u delete mode 100644 unison-src/tests/state2.u delete mode 100644 unison-src/tests/state2a-min.u delete mode 100644 unison-src/tests/state2a-min.ur delete mode 100644 unison-src/tests/state2a.u delete mode 100644 unison-src/tests/state2a.uu delete mode 100644 unison-src/tests/state2b-min.u delete mode 100644 unison-src/tests/state2b-min.ur delete mode 100644 unison-src/tests/state2b.u delete mode 100644 unison-src/tests/state3.u delete mode 100644 unison-src/tests/state4.u delete mode 100644 unison-src/tests/state4.ur delete mode 100644 unison-src/tests/state4a.u delete mode 100644 unison-src/tests/state4a.ur delete mode 100644 unison-src/tests/stream.u delete mode 100644 unison-src/tests/stream2.uu delete mode 100644 unison-src/tests/stream3.uu delete mode 100644 unison-src/tests/suffix-resolve.u delete mode 100644 unison-src/tests/tdnr.u delete mode 100644 unison-src/tests/tdnr2.u delete mode 100644 unison-src/tests/tdnr3.u delete mode 100644 unison-src/tests/tdnr4.u delete mode 100644 unison-src/tests/text-escaping.u delete mode 100644 unison-src/tests/text-escaping.ur delete mode 100644 unison-src/tests/text-pattern.u delete mode 100644 unison-src/tests/text-pattern.ur delete mode 100644 unison-src/tests/tictactoe.u delete mode 100644 unison-src/tests/tictactoe0-array-oob1.u delete mode 100644 unison-src/tests/tictactoe0-npe.u delete mode 100644 unison-src/tests/tictactoe0.u delete mode 100644 unison-src/tests/tictactoe2.u delete mode 100644 unison-src/tests/tuple.u delete mode 100644 unison-src/tests/tuple.ur delete mode 100644 unison-src/tests/type-application.u delete mode 100644 unison-src/tests/underscore-parsing.u delete mode 100644 unison-src/tests/ungeneralize-bug.uu delete mode 100644 unison-src/tests/unique.u delete mode 100644 unison-src/tests/void.u delete mode 100644 unison-src/transcripts/addupdatemessages.md delete mode 100644 unison-src/transcripts/addupdatemessages.output.md delete mode 100644 unison-src/transcripts/alias-many.md delete mode 100644 unison-src/transcripts/alias-many.output.md delete mode 100644 unison-src/transcripts/ambiguous-metadata.md delete mode 100644 unison-src/transcripts/ambiguous-metadata.output.md delete mode 100644 unison-src/transcripts/blocks.md delete mode 100644 unison-src/transcripts/blocks.output.md delete mode 100644 unison-src/transcripts/builtins-merge.md delete mode 100644 unison-src/transcripts/builtins-merge.output.md delete mode 100644 unison-src/transcripts/bytesFromList.md delete mode 100644 unison-src/transcripts/bytesFromList.output.md delete mode 100644 unison-src/transcripts/cd-back.md delete mode 100644 unison-src/transcripts/cd-back.output.md delete mode 100644 unison-src/transcripts/check763.md delete mode 100644 unison-src/transcripts/check763.output.md delete mode 100644 unison-src/transcripts/check873.md delete mode 100644 unison-src/transcripts/check873.output.md delete mode 100644 unison-src/transcripts/copy-patch.md delete mode 100644 unison-src/transcripts/copy-patch.output.md delete mode 100644 unison-src/transcripts/create-author.md delete mode 100644 unison-src/transcripts/create-author.output.md delete mode 100644 unison-src/transcripts/delete.md delete mode 100644 unison-src/transcripts/delete.output.md delete mode 100644 unison-src/transcripts/deleteReplacements.md delete mode 100644 unison-src/transcripts/deleteReplacements.output.md delete mode 100644 unison-src/transcripts/dependents-dependencies-debugfile.md delete mode 100644 unison-src/transcripts/dependents-dependencies-debugfile.output.md delete mode 100644 unison-src/transcripts/diff.md delete mode 100644 unison-src/transcripts/diff.output.md delete mode 100644 unison-src/transcripts/doc-formatting.md delete mode 100644 unison-src/transcripts/doc-formatting.output.md delete mode 100644 unison-src/transcripts/docs.md delete mode 100644 unison-src/transcripts/docs.output.md delete mode 100644 unison-src/transcripts/emptyCodebase.md delete mode 100644 unison-src/transcripts/emptyCodebase.output.md delete mode 100644 unison-src/transcripts/errors/ucm-hide-all-error.md delete mode 100644 unison-src/transcripts/errors/ucm-hide-all-error.output.md delete mode 100644 unison-src/transcripts/errors/ucm-hide-all.md delete mode 100644 unison-src/transcripts/errors/ucm-hide-all.output.md delete mode 100644 unison-src/transcripts/errors/ucm-hide-error.md delete mode 100644 unison-src/transcripts/errors/ucm-hide-error.output.md delete mode 100644 unison-src/transcripts/errors/ucm-hide.md delete mode 100644 unison-src/transcripts/errors/ucm-hide.output.md delete mode 100644 unison-src/transcripts/errors/unison-hide-all-error.md delete mode 100644 unison-src/transcripts/errors/unison-hide-all-error.output.md delete mode 100644 unison-src/transcripts/errors/unison-hide-all.md delete mode 100644 unison-src/transcripts/errors/unison-hide-all.output.md delete mode 100644 unison-src/transcripts/errors/unison-hide-error.md delete mode 100644 unison-src/transcripts/errors/unison-hide-error.output.md delete mode 100644 unison-src/transcripts/errors/unison-hide.md delete mode 100644 unison-src/transcripts/errors/unison-hide.output.md delete mode 100644 unison-src/transcripts/escape-sequences.md delete mode 100644 unison-src/transcripts/escape-sequences.output.md delete mode 100644 unison-src/transcripts/find-patch.md delete mode 100644 unison-src/transcripts/find-patch.output.md delete mode 100644 unison-src/transcripts/fix-1381-excess-propagate.md delete mode 100644 unison-src/transcripts/fix-1381-excess-propagate.output.md delete mode 100644 unison-src/transcripts/fix-big-list-crash.md delete mode 100644 unison-src/transcripts/fix-big-list-crash.output.md delete mode 100644 unison-src/transcripts/fix1063.md delete mode 100644 unison-src/transcripts/fix1063.output.md delete mode 100644 unison-src/transcripts/fix1334.md delete mode 100644 unison-src/transcripts/fix1334.output.md delete mode 100644 unison-src/transcripts/fix1356.md delete mode 100644 unison-src/transcripts/fix1356.output.md delete mode 100644 unison-src/transcripts/fix689.md delete mode 100644 unison-src/transcripts/fix689.output.md delete mode 100644 unison-src/transcripts/fix849.md delete mode 100644 unison-src/transcripts/fix849.output.md delete mode 100644 unison-src/transcripts/fix942.md delete mode 100644 unison-src/transcripts/fix942.output.md delete mode 100644 unison-src/transcripts/fix987.md delete mode 100644 unison-src/transcripts/fix987.output.md delete mode 100644 unison-src/transcripts/hello.md delete mode 100644 unison-src/transcripts/hello.output.md delete mode 100644 unison-src/transcripts/link.md delete mode 100644 unison-src/transcripts/link.output.md delete mode 100644 unison-src/transcripts/merge.md delete mode 100644 unison-src/transcripts/merge.output.md delete mode 100644 unison-src/transcripts/mergeloop.md delete mode 100644 unison-src/transcripts/mergeloop.output.md delete mode 100644 unison-src/transcripts/merges.md delete mode 100644 unison-src/transcripts/merges.output.md delete mode 100644 unison-src/transcripts/names.md delete mode 100644 unison-src/transcripts/names.output.md delete mode 100644 unison-src/transcripts/numbered-args.md delete mode 100644 unison-src/transcripts/numbered-args.output.md delete mode 100644 unison-src/transcripts/propagate.md delete mode 100644 unison-src/transcripts/propagate.output.md delete mode 100644 unison-src/transcripts/redundant.output.md delete mode 100644 unison-src/transcripts/reflog.md delete mode 100644 unison-src/transcripts/reflog.output.md delete mode 100644 unison-src/transcripts/resolve.md delete mode 100644 unison-src/transcripts/resolve.output.md delete mode 100644 unison-src/transcripts/squash.md delete mode 100644 unison-src/transcripts/squash.output.md delete mode 100644 unison-src/transcripts/suffixes.md delete mode 100644 unison-src/transcripts/suffixes.output.md delete mode 100644 unison-src/transcripts/todo-bug-builtins.md delete mode 100644 unison-src/transcripts/todo-bug-builtins.output.md delete mode 100644 unison-src/transcripts/transcript-parser-commands.md delete mode 100644 unison-src/transcripts/transcript-parser-commands.output.md delete mode 100644 unison-src/transcripts/unitnamespace.md delete mode 100644 unison-src/transcripts/unitnamespace.output.md delete mode 100644 yaks/easytest/LICENSE delete mode 100644 yaks/easytest/README.markdown delete mode 100644 yaks/easytest/easytest.cabal delete mode 100644 yaks/easytest/src/EasyTest.hs delete mode 100644 yaks/easytest/tests/Suite.hs diff --git a/.travis.yml b/.travis.yml index 633d8347eb..6b710dee4d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,8 +33,8 @@ before_install: install: - stack ghc -- --version - stack --no-terminal build -- stack --no-terminal exec tests -- stack --no-terminal exec transcripts +# - stack --no-terminal exec tests +# - stack --no-terminal exec transcripts # fail if running transcripts modified any versioned files - git diff - x=`git status --porcelain -uno` bash -c 'if [[ -n $x ]]; then echo "$x" && false; fi' diff --git a/codebase1/codebase/Unison/Codebase/V1/ABT.hs b/codebase1/codebase/Unison/Codebase/V1/ABT.hs new file mode 100644 index 0000000000..7b51fe8741 --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/ABT.hs @@ -0,0 +1,705 @@ +-- Based on: http://semantic-domain.blogspot.com/2015/03/abstract-binding-trees.html +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.V1.ABT where + +import Data.Maybe (fromMaybe) +import qualified Data.Foldable as Foldable +import qualified Data.Set as Set +import Data.Set (Set) +import Prelude hiding (abs, cycle) + +data ABT f v r + = Var v + | Cycle r + | Abs v r + | Tm (f r) + deriving (Functor, Foldable, Traversable) + +-- | At each level in the tree, we store the set of free variables and +-- a value of type `a`. Variables are of type `v`. +data Term f v a = Term {freeVars :: Set v, annotation :: a, out :: ABT f v (Term f v a)} + +-- | A class for variables. +-- +-- * `Set.notMember (freshIn vs v) vs`: +-- `freshIn` returns a variable not used in the `Set` +class Ord v => Var v where + freshIn :: Set v -> v -> v + +-- data V v = Free v | Bound v deriving (Eq,Ord,Show,Functor) + +-- unvar :: V v -> v +-- unvar (Free v) = v +-- unvar (Bound v) = v + +-- instance Var v => Var (V v) where +-- freshIn s v = freshIn (Set.map unvar s) <$> v + +-- newtype Path s t a b m = Path { focus :: s -> Maybe (a, b -> Maybe t, m) } + +-- here :: Monoid m => Path s t s t m +-- here = Path $ \s -> Just (s, Just, mempty) + +-- instance Semigroup (Path s t a b m) where +-- (<>) = mappend + +-- instance Monoid (Path s t a b m) where +-- mempty = Path (const Nothing) +-- mappend (Path p1) (Path p2) = Path p3 where +-- p3 s = p1 s <|> p2 s + +-- type Path' f g m = forall a v . Var v => Path (Term f v a) (Term f (V v) a) (Term g v a) (Term g (V v) a) m + +-- compose :: Monoid m => Path s t a b m -> Path a b a' b' m -> Path s t a' b' m +-- compose (Path p1) (Path p2) = Path p3 where +-- p3 s = do +-- (get1,set1,m1) <- p1 s +-- (get2,set2,m2) <- p2 get1 +-- pure (get2, set2 >=> set1, m1 `mappend` m2) + +-- at :: Path s t a b m -> s -> Maybe a +-- at p s = (\(a,_,_) -> a) <$> focus p s + +-- modify' :: Path s t a b m -> (m -> a -> b) -> s -> Maybe t +-- modify' p f s = focus p s >>= \(get,set,m) -> set (f m get) + +-- wrap :: (Functor f, Foldable f, Var v) => v -> Term f (V v) a -> (V v, Term f (V v) a) +-- wrap v t = +-- if Set.member (Free v) (freeVars t) +-- then let v' = fresh t (Bound v) in (v', rename (Bound v) v' t) +-- else (Bound v, t) + +-- wrap' :: (Functor f, Foldable f, Var v) +-- => v -> Term f (V v) a -> (V v -> Term f (V v) a -> c) -> c +-- wrap' v t f = uncurry f (wrap v t) + +-- -- | Return the list of all variables bound by this ABT +-- bound' :: Foldable f => Term f v a -> [v] +-- bound' t = case out t of +-- Abs v t -> v : bound' t +-- Cycle t -> bound' t +-- Tm f -> Foldable.toList f >>= bound' +-- _ -> [] + +-- annotateBound' :: (Ord v, Functor f, Foldable f) => Term f v a0 -> Term f v [v] +-- annotateBound' t = snd <$> annotateBound'' t + +-- -- Annotate the tree with the set of bound variables at each node. +-- annotateBound :: (Ord v, Foldable f, Functor f) => Term f v a -> Term f v (a, Set v) +-- annotateBound = go Set.empty where +-- go bound t = let a = (annotation t, bound) in case out t of +-- Var v -> annotatedVar a v +-- Cycle body -> cycle' a (go bound body) +-- Abs x body -> abs' a x (go (Set.insert x bound) body) +-- Tm body -> tm' a (go bound <$> body) + +-- annotateBound'' :: (Ord v, Functor f, Foldable f) => Term f v a -> Term f v (a, [v]) +-- annotateBound'' = go [] where +-- go env t = let a = (annotation t, env) in case out t of +-- Abs v body -> abs' a v (go (v : env) body) +-- Cycle body -> cycle' a (go env body) +-- Tm f -> tm' a (go env <$> f) +-- Var v -> annotatedVar a v + +-- -- | Return the set of all variables bound by this ABT +-- bound :: (Ord v, Foldable f) => Term f v a -> Set v +-- bound t = Set.fromList (bound' t) + +-- -- | `True` if the term has no free variables, `False` otherwise +-- isClosed :: Term f v a -> Bool +-- isClosed t = Set.null (freeVars t) + +-- -- | `True` if `v` is a member of the set of free variables of `t` +-- isFreeIn :: Ord v => v -> Term f v a -> Bool +-- isFreeIn v t = Set.member v (freeVars t) + +-- -- | Replace the annotation with the given argument. +-- annotate :: a -> Term f v a -> Term f v a +-- annotate a (Term fvs _ out) = Term fvs a out + +-- vmap :: (Functor f, Foldable f, Ord v2) => (v -> v2) -> Term f v a -> Term f v2 a +-- vmap f (Term _ a out) = case out of +-- Var v -> annotatedVar a (f v) +-- Tm fa -> tm' a (fmap (vmap f) fa) +-- Cycle r -> cycle' a (vmap f r) +-- Abs v body -> abs' a (f v) (vmap f body) + +-- amap :: (Functor f, Foldable f, Ord v) => (a -> a2) -> Term f v a -> Term f v a2 +-- amap = amap' . const + +-- amap' :: (Functor f, Foldable f, Ord v) => (Term f v a -> a -> a2) -> Term f v a -> Term f v a2 +-- amap' f t@(Term _ a out) = case out of +-- Var v -> annotatedVar (f t a) v +-- Tm fa -> tm' (f t a) (fmap (amap' f) fa) +-- Cycle r -> cycle' (f t a) (amap' f r) +-- Abs v body -> abs' (f t a) v (amap' f body) + +-- -- | Modifies the annotations in this tree +-- instance Functor f => Functor (Term f v) where +-- fmap f (Term fvs a sub) = Term fvs (f a) (fmap (fmap f) sub) + +-- extraMap :: Functor g => (forall k . f k -> g k) -> Term f v a -> Term g v a +-- extraMap p (Term fvs a sub) = Term fvs a (go p sub) where +-- go :: Functor g => (forall k . f k -> g k) -> ABT f v (Term f v a) -> ABT g v (Term g v a) +-- go p = \case +-- Var v -> Var v +-- Cycle r -> Cycle (extraMap p r) +-- Abs v r -> Abs v (extraMap p r) +-- Tm x -> Tm (fmap (extraMap p) (p x)) + +-- pattern Var' v <- Term _ _ (Var v) +-- pattern Cycle' vs t <- Term _ _ (Cycle (AbsN' vs t)) +-- -- pattern Abs' v body <- Term _ _ (Abs v body) +-- pattern Abs' subst <- (unabs1 -> Just subst) +-- pattern AbsN' vs body <- (unabs -> (vs, body)) +-- pattern Tm' f <- Term _ _ (Tm f) +-- pattern CycleA' a avs t <- Term _ a (Cycle (AbsNA' avs t)) +-- pattern AbsNA' avs body <- (unabsA -> (avs, body)) +-- pattern Abs1NA' avs body <- (unabs1A -> Just (avs, body)) + +-- unabsA :: Term f v a -> ([(a,v)], Term f v a) +-- unabsA (Term _ a (Abs hd body)) = +-- let (tl, body') = unabsA body in ((a,hd) : tl, body') +-- unabsA t = ([], t) + +-- unabs1A :: Term f v a -> Maybe ([(a,v)], Term f v a) +-- unabs1A t = case unabsA t of +-- ([], _) -> Nothing +-- x -> Just x + +-- var :: v -> Term f v () +-- var = annotatedVar () + +annotatedVar :: a -> v -> Term f v a +annotatedVar a v = Term (Set.singleton v) a (Var v) + +-- abs :: Ord v => v -> Term f v () -> Term f v () +-- abs = abs' () + +abs' :: Ord v => a -> v -> Term f v a -> Term f v a +abs' a v body = Term (Set.delete v (freeVars body)) a (Abs v body) + +-- absr :: (Functor f, Foldable f, Var v) => v -> Term f (V v) () -> Term f (V v) () +-- absr = absr' () + +-- -- | Rebuild an `abs`, renaming `v` to avoid capturing any `Free v` in `body`. +-- absr' :: (Functor f, Foldable f, Var v) => a -> v -> Term f (V v) a -> Term f (V v) a +-- absr' a v body = wrap' v body $ \v body -> abs' a v body + +-- absChain :: Ord v => [v] -> Term f v () -> Term f v () +-- absChain vs t = foldr abs t vs + +-- absCycle :: Ord v => [v] -> Term f v () -> Term f v () +-- absCycle vs t = cycle $ absChain vs t + +-- absChain' :: Ord v => [(a, v)] -> Term f v a -> Term f v a +-- absChain' vs t = foldr (\(a,v) t -> abs' a v t) t vs + +-- tm :: (Foldable f, Ord v) => f (Term f v ()) -> Term f v () +-- tm = tm' () + +tm' :: (Foldable f, Ord v) => a -> f (Term f v a) -> Term f v a +tm' a t = Term (Set.unions (fmap freeVars (Foldable.toList t))) a (Tm t) + +-- cycle :: Term f v () -> Term f v () +-- cycle = cycle' () + +cycle' :: a -> Term f v a -> Term f v a +cycle' a t = Term (freeVars t) a (Cycle t) + +-- cycler' :: (Functor f, Foldable f, Var v) => a -> [v] -> Term f (V v) a -> Term f (V v) a +-- cycler' a vs t = cycle' a $ foldr (absr' a) t vs + +-- cycler :: (Functor f, Foldable f, Var v) => [v] -> Term f (V v) () -> Term f (V v) () +-- cycler = cycler' () + +-- into :: (Foldable f, Ord v) => ABT f v (Term f v ()) -> Term f v () +-- into = into' () + +-- into' :: (Foldable f, Ord v) => a -> ABT f v (Term f v a) -> Term f v a +-- into' a abt = case abt of +-- Var x -> annotatedVar a x +-- Cycle t -> cycle' a t +-- Abs v r -> abs' a v r +-- Tm t -> tm' a t + +-- -- | renames `old` to `new` in the given term, ignoring subtrees that bind `old` +-- rename :: (Foldable f, Functor f, Var v) => v -> v -> Term f v a -> Term f v a +-- rename old new t0@(Term fvs ann t) = +-- if Set.notMember old fvs then t0 +-- else case t of +-- Var v -> if v == old then annotatedVar ann new else t0 +-- Cycle body -> cycle' ann (rename old new body) +-- Abs v body -> +-- -- v shadows old, so skip this subtree +-- if v == old then abs' ann v body + +-- -- the rename would capture new, freshen this Abs +-- -- to make that no longer true, then proceed with +-- -- renaming `old` to `new` +-- else if v == new then +-- let v' = freshIn (Set.fromList [new,old] <> freeVars body) v +-- in abs' ann v' (rename old new (rename v v' body)) + +-- -- nothing special, just rename inside body of Abs +-- else abs' ann v (rename old new body) +-- Tm v -> tm' ann (fmap (rename old new) v) + +-- changeVars :: (Foldable f, Functor f, Var v) => Map v v -> Term f v a -> Term f v a +-- changeVars m t = case out t of +-- Abs v body -> case Map.lookup v m of +-- Nothing -> abs' (annotation t) v (changeVars m body) +-- Just v' -> abs' (annotation t) v' (changeVars m body) +-- Cycle body -> cycle' (annotation t) (changeVars m body) +-- Var v -> case Map.lookup v m of +-- Nothing -> t +-- Just v -> annotatedVar (annotation t) v +-- Tm v -> tm' (annotation t) (changeVars m <$> v) + +-- -- | Produce a variable which is free in both terms +-- freshInBoth :: Var v => Term f v a -> Term f v a -> v -> v +-- freshInBoth t1 t2 = freshIn $ Set.union (freeVars t1) (freeVars t2) + +-- fresh :: Var v => Term f v a -> v -> v +-- fresh t = freshIn (freeVars t) + +-- freshEverywhere :: (Foldable f, Var v) => Term f v a -> v -> v +-- freshEverywhere t = freshIn . Set.fromList $ allVars t + +-- allVars :: Foldable f => Term f v a -> [v] +-- allVars t = case out t of +-- Var v -> [v] +-- Cycle body -> allVars body +-- Abs v body -> v : allVars body +-- Tm v -> Foldable.toList v >>= allVars + +-- freshes :: Var v => Term f v a -> [v] -> [v] +-- freshes = freshes' . freeVars + +-- freshes' :: Var v => Set v -> [v] -> [v] +-- freshes' used vs = evalState (traverse freshenS vs) used + +-- -- | Freshens the given variable wrt. the set of used variables +-- -- tracked by state. Adds the result to the set of used variables. +-- freshenS :: (Var v, MonadState (Set v) m) => v -> m v +-- freshenS = freshenS' id + +-- -- | A more general version of `freshenS` that uses a lens +-- -- to focus on used variables inside state. +-- freshenS' :: (Var v, MonadState s m) => Lens' s (Set v) -> v -> m v +-- freshenS' uvLens v = do +-- usedVars <- use uvLens +-- let v' = freshIn usedVars v +-- uvLens .= Set.insert v' usedVars +-- pure v' + +-- -- | `subst v e body` substitutes `e` for `v` in `body`, avoiding capture by +-- -- renaming abstractions in `body` +-- subst +-- :: (Foldable f, Functor f, Var v) +-- => v +-- -> Term f v a +-- -> Term f v a +-- -> Term f v a +-- subst v r = subst' (const r) v (freeVars r) + +-- -- Slightly generalized version of `subst`, the replacement action is handled +-- -- by the function `replace`, which is given the annotation `a` at the point +-- -- of replacement. `r` should be the set of free variables contained in the +-- -- term returned by `replace`. See `substInheritAnnotation` for an example usage. +-- subst' :: (Foldable f, Functor f, Var v) => (a -> Term f v a) -> v -> Set v -> Term f v a -> Term f v a +-- subst' replace v r t2@(Term fvs ann body) +-- | Set.notMember v fvs = t2 -- subtrees not containing the var can be skipped +-- | otherwise = case body of +-- Var v' | v == v' -> replace ann -- var match; perform replacement +-- | otherwise -> t2 -- var did not match one being substituted; ignore +-- Cycle body -> cycle' ann (subst' replace v r body) +-- Abs x _ | x == v -> t2 -- x shadows v; ignore subtree +-- Abs x e -> abs' ann x' e' +-- where x' = freshIn (fvs `Set.union` r) x +-- -- rename x to something that cannot be captured by `r` +-- e' = if x /= x' then subst' replace v r (rename x x' e) +-- else subst' replace v r e +-- Tm body -> tm' ann (fmap (subst' replace v r) body) + +-- -- Like `subst`, but the annotation of the replacement is inherited from +-- -- the previous annotation at each replacement point. +-- substInheritAnnotation :: (Foldable f, Functor f, Var v) +-- => v -> Term f v b -> Term f v a -> Term f v a +-- substInheritAnnotation v r = +-- subst' (\ann -> const ann <$> r) v (freeVars r) + +-- substsInheritAnnotation +-- :: (Foldable f, Functor f, Var v) +-- => [(v, Term f v b)] +-- -> Term f v a +-- -> Term f v a +-- substsInheritAnnotation replacements body = +-- foldr (uncurry substInheritAnnotation) body (reverse replacements) + +-- -- | `substs [(t1,v1), (t2,v2), ...] body` performs multiple simultaneous +-- -- substitutions, avoiding capture +-- substs +-- :: (Foldable f, Functor f, Var v) +-- => [(v, Term f v a)] +-- -> Term f v a +-- -> Term f v a +-- substs replacements body = foldr (uncurry subst) body (reverse replacements) + +-- -- Count the number times the given variable appears free in the term +-- occurrences :: (Foldable f, Var v) => v -> Term f v a -> Int +-- occurrences v t | not (v `isFreeIn` t) = 0 +-- occurrences v t = case out t of +-- Var v2 -> if v == v2 then 1 else 0 +-- Cycle t -> occurrences v t +-- Abs v2 t -> if v == v2 then 0 else occurrences v t +-- Tm t -> foldl' (\s t -> s + occurrences v t) 0 $ Foldable.toList t + +-- rebuildUp :: (Ord v, Foldable f, Functor f) +-- => (f (Term f v a) -> f (Term f v a)) +-- -> Term f v a +-- -> Term f v a +-- rebuildUp f (Term _ ann body) = case body of +-- Var v -> annotatedVar ann v +-- Cycle body -> cycle' ann (rebuildUp f body) +-- Abs x e -> abs' ann x (rebuildUp f e) +-- Tm body -> tm' ann (f $ fmap (rebuildUp f) body) + +-- rebuildUp' :: (Ord v, Foldable f, Functor f) +-- => (Term f v a -> Term f v a) +-- -> Term f v a +-- -> Term f v a +-- rebuildUp' f (Term _ ann body) = case body of +-- Var v -> f (annotatedVar ann v) +-- Cycle body -> f $ cycle' ann (rebuildUp' f body) +-- Abs x e -> f $ abs' ann x (rebuildUp' f e) +-- Tm body -> f $ tm' ann (fmap (rebuildUp' f) body) + +-- freeVarOccurrences :: (Traversable f, Ord v) => Set v -> Term f v a -> [(v, a)] +-- freeVarOccurrences except t = +-- [ (v, a) | (v,a) <- go $ annotateBound t, not (Set.member v except) ] +-- where +-- go e = case out e of +-- Var v -> if Set.member v (snd $ annotation e) +-- then [] +-- else [(v, fst $ annotation e)] +-- Cycle body -> go body +-- Abs _ body -> go body +-- Tm body -> foldMap go body + +-- foreachSubterm +-- :: (Traversable f, Applicative g, Ord v) +-- => (Term f v a -> g b) +-- -> Term f v a +-- -> g [b] +-- foreachSubterm f e = case out e of +-- Var _ -> pure <$> f e +-- Cycle body -> (:) <$> f e <*> foreachSubterm f body +-- Abs _ body -> (:) <$> f e <*> foreachSubterm f body +-- Tm body -> +-- (:) +-- <$> f e +-- <*> (join . Foldable.toList <$> traverse (foreachSubterm f) body) + +-- subterms :: (Ord v, Traversable f) => Term f v a -> [Term f v a] +-- subterms t = runIdentity $ foreachSubterm pure t + +-- | `visit f t` applies an effectful function to each subtree of +-- `t` and sequences the results. When `f` returns `Nothing`, `visit` +-- descends into the children of the current subtree. When `f` returns +-- `Just t2`, `visit` replaces the current subtree with `t2`. Thus: +-- `visit (const Nothing) t == pure t` and +-- `visit (const (Just (pure t2))) t == pure t2` +visit :: + (Traversable f, Applicative g, Ord v) => + (Term f v a -> Maybe (g (Term f v a))) -> + Term f v a -> + g (Term f v a) +visit f t = flip fromMaybe (f t) $ case out t of + Var _ -> pure t + Cycle body -> cycle' (annotation t) <$> visit f body + Abs x e -> abs' (annotation t) x <$> visit f e + Tm body -> tm' (annotation t) <$> traverse (visit f) body + +-- | Apply an effectful function to an ABT tree top down, sequencing the results. +visit' :: + (Traversable f, Applicative g, Monad g, Ord v) => + (f (Term f v a) -> g (f (Term f v a))) -> + Term f v a -> + g (Term f v a) +visit' f t = case out t of + Var _ -> pure t + Cycle body -> cycle' (annotation t) <$> visit' f body + Abs x e -> abs' (annotation t) x <$> visit' f e + Tm body -> f body >>= (fmap (tm' (annotation t)) . traverse (visit' f)) + +-- -- | `visit` specialized to the `Identity` effect. +-- visitPure :: (Traversable f, Ord v) +-- => (Term f v a -> Maybe (Term f v a)) -> Term f v a -> Term f v a +-- visitPure f = runIdentity . visit (fmap pure . f) + +-- rewriteDown :: (Traversable f, Ord v) +-- => (Term f v a -> Term f v a) +-- -> Term f v a +-- -> Term f v a +-- rewriteDown f t = let t' = f t in case out t' of +-- Var _ -> t' +-- Cycle body -> cycle' (annotation t) (rewriteDown f body) +-- Abs x e -> abs' (annotation t) x (rewriteDown f e) +-- Tm body -> tm' (annotation t) (rewriteDown f `fmap` body) + +-- data Subst f v a = +-- Subst { freshen :: forall m v' . Monad m => (v -> m v') -> m v' +-- , bind :: Term f v a -> Term f v a +-- , bindInheritAnnotation :: forall b . Term f v b -> Term f v a +-- , variable :: v } + +-- unabs1 :: (Foldable f, Functor f, Var v) => Term f v a -> Maybe (Subst f v a) +-- unabs1 (Term _ _ (Abs v body)) = Just (Subst freshen bind bindInheritAnnotation v) where +-- freshen f = f v +-- bind x = subst v x body +-- bindInheritAnnotation x = substInheritAnnotation v x body +-- unabs1 _ = Nothing + +-- unabs :: Term f v a -> ([v], Term f v a) +-- unabs (Term _ _ (Abs hd body)) = +-- let (tl, body') = unabs body in (hd : tl, body') +-- unabs t = ([], t) + +-- reabs :: Ord v => [v] -> Term f v () -> Term f v () +-- reabs vs t = foldr abs t vs + +-- transform :: (Ord v, Foldable g, Functor f) +-- => (forall a. f a -> g a) -> Term f v a -> Term g v a +-- transform f tm = case out tm of +-- Var v -> annotatedVar (annotation tm) v +-- Abs v body -> abs' (annotation tm) v (transform f body) +-- Tm subterms -> +-- let subterms' = fmap (transform f) subterms +-- in tm' (annotation tm) (f subterms') +-- Cycle body -> cycle' (annotation tm) (transform f body) + +-- -- Rebuild the tree annotations upward, starting from the leaves, +-- -- using the Monoid to choose the annotation at intermediate nodes +-- reannotateUp :: (Ord v, Foldable f, Functor f, Monoid b) +-- => (Term f v a -> b) +-- -> Term f v a +-- -> Term f v (a, b) +-- reannotateUp g t = case out t of +-- Var v -> annotatedVar (annotation t, g t) v +-- Cycle body -> +-- let body' = reannotateUp g body +-- in cycle' (annotation t, snd (annotation body')) body' +-- Abs v body -> +-- let body' = reannotateUp g body +-- in abs' (annotation t, snd (annotation body')) v body' +-- Tm body -> +-- let +-- body' = reannotateUp g <$> body +-- ann = g t <> foldMap (snd . annotation) body' +-- in tm' (annotation t, ann) body' + +-- -- Find all subterms that match a predicate. Prune the search for speed. +-- -- (Some patterns of pruning can cut the complexity of the search.) +-- data FindAction x = Found x | Prune | Continue deriving Show +-- find :: (Ord v, Foldable f, Functor f) +-- => (Term f v a -> FindAction x) +-- -> Term f v a +-- -> [x] +-- find p t = case p t of +-- Found x -> x : go +-- Prune -> [] +-- Continue -> go +-- where go = case out t of +-- Var _ -> [] +-- Cycle body -> Unison.Codebase.V1.ABT.find p body +-- Abs _ body -> Unison.Codebase.V1.ABT.find p body +-- Tm body -> Foldable.concat (Unison.Codebase.V1.ABT.find p <$> body) + +-- find' :: (Ord v, Foldable f, Functor f) +-- => (Term f v a -> Bool) +-- -> Term f v a +-- -> [Term f v a] +-- find' p = Unison.Codebase.V1.ABT.find (\t -> if p t then Found t else Continue) + +-- instance (Foldable f, Functor f, Eq1 f, Var v) => Eq (Term f v a) where +-- -- alpha equivalence, works by renaming any aligned Abs ctors to use a common fresh variable +-- t1 == t2 = go (out t1) (out t2) where +-- go (Var v) (Var v2) | v == v2 = True +-- go (Cycle t1) (Cycle t2) = t1 == t2 +-- go (Abs v1 body1) (Abs v2 body2) = +-- if v1 == v2 then body1 == body2 +-- else let v3 = freshInBoth body1 body2 v1 +-- in rename v1 v3 body1 == rename v2 v3 body2 +-- go (Tm f1) (Tm f2) = f1 ==# f2 +-- go _ _ = False + +-- instance (Foldable f, Functor f, Ord1 f, Var v) => Ord (Term f v a) where +-- -- alpha equivalence, works by renaming any aligned Abs ctors to use a common fresh variable +-- t1 `compare` t2 = go (out t1) (out t2) where +-- go (Var v) (Var v2) = v `compare` v2 +-- go (Cycle t1) (Cycle t2) = t1 `compare` t2 +-- go (Abs v1 body1) (Abs v2 body2) = +-- if v1 == v2 then body1 `compare` body2 +-- else let v3 = freshInBoth body1 body2 v1 +-- in rename v1 v3 body1 `compare` rename v2 v3 body2 +-- go (Tm f1) (Tm f2) = compare1 f1 f2 +-- go t1 t2 = tag t1 `compare` tag t2 +-- tag (Var _) = 0 :: Word +-- tag (Tm _) = 1 +-- tag (Abs _ _) = 2 +-- tag (Cycle _) = 3 + +-- components :: Var v => [(v, Term f v a)] -> [[(v, Term f v a)]] +-- components = Components.components freeVars + +-- -- Converts to strongly connected components while preserving the +-- -- order of definitions. Satisfies `join (orderedComponents bs) == bs`. +-- orderedComponents' :: Var v => [(v, Term f v a)] -> [[(v, Term f v a)]] +-- orderedComponents' tms = go [] Set.empty tms +-- where +-- go [] _ [] = [] +-- go [] deps (hd:rem) = go [hd] (deps <> freeVars (snd hd)) rem +-- go cur deps rem = case findIndex isDep rem of +-- Nothing -> reverse cur : let (hd,tl) = splitAt 1 rem +-- in go hd (depsFor hd) tl +-- Just i -> go (reverse newMembers ++ cur) deps' (drop (i+1) rem) +-- where deps' = deps <> depsFor newMembers +-- newMembers = take (i+1) rem +-- where +-- depsFor = foldMap (freeVars . snd) +-- isDep (v, _) = Set.member v deps + +-- -- Like `orderedComponents'`, but further break up cycles and move +-- -- cyclic subcycles before other components in the same cycle. +-- -- Tweak suggested by @aryairani. +-- -- +-- -- Example: given `[[x],[ping,r,s,pong]]`, where `ping` and `pong` +-- -- are mutually recursive but `r` and `s` are uninvolved, this produces: +-- -- `[[x], [ping,pong], [r], [s]]`. +-- orderedComponents :: Var v => [(v, Term f v a)] -> [[(v, Term f v a)]] +-- orderedComponents bs0 = tweak =<< orderedComponents' bs0 where +-- tweak :: Var v => [(v,Term f v a)] -> [[(v,Term f v a)]] +-- tweak bs@(_:_:_) = case takeWhile isCyclic (components bs) of +-- [] -> [bs] +-- cycles -> cycles <> orderedComponents rest +-- where +-- rest = [ (v,b) | (v,b) <- bs, Set.notMember v cycleVars ] +-- cycleVars = Set.fromList (fst <$> join cycles) +-- tweak bs = [bs] -- any cycle with < 2 bindings is left alone +-- isCyclic [(v,b)] = Set.member v (freeVars b) +-- isCyclic bs = length bs > 1 + +-- -- Hash a strongly connected component and sort its definitions into a canonical order. +-- hashComponent :: +-- (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Var v, Ord h, Accumulate h) +-- => Map.Map v (Term f v a) -> (h, [(v, Term f v a)]) +-- hashComponent byName = let +-- ts = Map.toList byName +-- embeds = [ (v, void (transform Embed t)) | (v,t) <- ts ] +-- vs = fst <$> ts +-- tms = [ (v, absCycle vs (tm $ Component (snd <$> embeds) (var v))) | v <- vs ] +-- hashed = [ ((v,t), hash t) | (v,t) <- tms ] +-- sortedHashed = sortOn snd hashed +-- overallHash = Hashable.accumulate (Hashable.Hashed . snd <$> sortedHashed) +-- in (overallHash, [ (v, t) | ((v, _),_) <- sortedHashed, Just t <- [Map.lookup v byName] ]) + +-- -- Group the definitions into strongly connected components and hash +-- -- each component. Substitute the hash of each component into subsequent +-- -- components (using the `termFromHash` function). Requires that the +-- -- overall component has no free variables. +-- hashComponents +-- :: (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Var v, Ord h, Accumulate h) +-- => (h -> Word64 -> Word64 -> Term f v ()) +-- -> Map.Map v (Term f v a) +-- -> [(h, [(v, Term f v a)])] +-- hashComponents termFromHash termsByName = let +-- bound = Set.fromList (Map.keys termsByName) +-- escapedVars = Set.unions (freeVars <$> Map.elems termsByName) `Set.difference` bound +-- sccs = components (Map.toList termsByName) +-- go _ [] = [] +-- go prevHashes (component : rest) = let +-- sub = substsInheritAnnotation (Map.toList prevHashes) +-- (h, sortedComponent) = hashComponent $ Map.fromList [ (v, sub t) | (v, t) <- component ] +-- size = fromIntegral (length sortedComponent) +-- curHashes = Map.fromList [ (v, termFromHash h i size) | ((v, _),i) <- sortedComponent `zip` [0..]] +-- newHashes = prevHashes `Map.union` curHashes +-- newHashesL = Map.toList newHashes +-- sortedComponent' = [ (v, substsInheritAnnotation newHashesL t) | (v, t) <- sortedComponent ] +-- in (h, sortedComponent') : go newHashes rest +-- in if Set.null escapedVars then go Map.empty sccs +-- else error $ "can't hashComponents if bindings have free variables:\n " +-- ++ show (map show (Set.toList escapedVars)) +-- ++ "\n " ++ show (map show (Map.keys termsByName)) + +-- -- Implementation detail of hashComponent +-- data Component f a = Component [a] a | Embed (f a) deriving (Functor, Traversable, Foldable) + +-- instance (Hashable1 f, Functor f) => Hashable1 (Component f) where +-- hash1 hashCycle hash c = case c of +-- Component as a -> let +-- (hs, hash) = hashCycle as +-- toks = Hashable.Hashed <$> hs +-- in Hashable.accumulate $ (Hashable.Tag 1 : toks) ++ [Hashable.Hashed (hash a)] +-- Embed fa -> Hashable.hash1 hashCycle hash fa + +-- -- | We ignore annotations in the `Term`, as these should never affect the +-- -- meaning of the term. +-- hash :: forall f v a h . (Functor f, Hashable1 f, Eq v, Show v, Var v, Ord h, Accumulate h) +-- => Term f v a -> h +-- hash = hash' [] where +-- hash' :: [Either [v] v] -> Term f v a -> h +-- hash' env (Term _ _ t) = case t of +-- Var v -> maybe die hashInt ind +-- where lookup (Left cycle) = v `elem` cycle +-- lookup (Right v') = v == v' +-- ind = findIndex lookup env +-- hashInt :: Int -> h +-- hashInt i = Hashable.accumulate [Hashable.Nat $ fromIntegral i] +-- die = error $ "unknown var in environment: " ++ show v +-- ++ " environment = " ++ show env +-- Cycle (AbsN' vs t) -> hash' (Left vs : env) t +-- Cycle t -> hash' env t +-- Abs v t -> hash' (Right v : env) t +-- Tm t -> Hashable.hash1 (hashCycle env) (hash' env) t + +-- hashCycle :: [Either [v] v] -> [Term f v a] -> ([h], Term f v a -> h) +-- hashCycle env@(Left cycle : envTl) ts | length cycle == length ts = +-- let +-- permute p xs = case Vector.fromList xs of xs -> map (xs !) p +-- hashed = map (\(i,t) -> ((i,t), hash' env t)) (zip [0..] ts) +-- pt = fst <$> sortOn snd hashed +-- (p,ts') = unzip pt +-- in case map Right (permute p cycle) ++ envTl of +-- env -> (map (hash' env) ts', hash' env) +-- hashCycle env ts = (map (hash' env) ts, hash' env) + +-- -- | Use the `hash` function to efficiently remove duplicates from the list, preserving order. +-- distinct :: forall f v h a proxy . (Functor f, Hashable1 f, Eq v, Show v, Var v, Ord h, Accumulate h) +-- => proxy h +-- -> [Term f v a] -> [Term f v a] +-- distinct _ ts = fst <$> sortOn snd m +-- where m = Map.elems (Map.fromList (hashes `zip` (ts `zip` [0 :: Int .. 1]))) +-- hashes = map hash ts :: [h] + +-- -- | Use the `hash` function to remove elements from `t1s` that exist in `t2s`, preserving order. +-- subtract :: forall f v h a proxy . (Functor f, Hashable1 f, Eq v, Show v, Var v, Ord h, Accumulate h) +-- => proxy h +-- -> [Term f v a] -> [Term f v a] -> [Term f v a] +-- subtract _ t1s t2s = +-- let skips = Set.fromList (map hash t2s :: [h]) +-- in filter (\t -> Set.notMember (hash t) skips) t1s + +-- instance (Show1 f, Show v) => Show (Term f v a) where +-- -- annotations not shown +-- showsPrec p (Term _ _ out) = case out of +-- Var v -> \x -> "Var " ++ show v ++ x +-- Cycle body -> ("Cycle " ++) . showsPrec p body +-- Abs v body -> showParen True $ (show v ++) . showString ". " . showsPrec p body +-- Tm f -> showsPrec1 p f diff --git a/codebase1/codebase/Unison/Codebase/V1/Branch/NameSegment.hs b/codebase1/codebase/Unison/Codebase/V1/Branch/NameSegment.hs new file mode 100644 index 0000000000..82d895c248 --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Branch/NameSegment.hs @@ -0,0 +1,13 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Codebase.V1.Branch.NameSegment where + +import qualified Data.Text as Text +import Data.String (IsString(..)) +import Data.Text (Text) + +-- Represents the parts of a name between the `.`s +newtype NameSegment = NameSegment {toText :: Text} deriving (Show, Eq, Ord) + +instance IsString NameSegment where + fromString = NameSegment . Text.pack diff --git a/codebase1/codebase/Unison/Codebase/V1/Branch/Raw.hs b/codebase1/codebase/Unison/Codebase/V1/Branch/Raw.hs new file mode 100644 index 0000000000..c3585d1460 --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Branch/Raw.hs @@ -0,0 +1,34 @@ +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE RankNTypes #-} + +module Unison.Codebase.V1.Branch.Raw where + +import Data.Map (Map) +import qualified U.Util.Hash as Hash +import Unison.Codebase.V1.Star3 +import Unison.Codebase.V1.Reference +import Unison.Codebase.V1.Referent +import Unison.Codebase.V1.Branch.NameSegment (NameSegment) + +type MetadataType = Reference +type MetadataValue = Reference + +-- `a` is generally the type of references or hashes +-- `n` is generally the the type of name associated with the references +-- `Type` is the type of metadata. Duplicate info to speed up certain queries. +-- `(Type, Value)` is the metadata value itself along with its type. +type Star r n = Star3 r n MetadataType (MetadataType, MetadataValue) + +newtype EditHash = EditHash Hash.Hash +newtype BranchHash = BranchHash Hash.Hash deriving Show + +-- The raw Branch +data Raw = Raw + { terms :: Star Referent NameSegment + , types :: Star Reference NameSegment + , children :: Map NameSegment BranchHash + , edits :: Map NameSegment EditHash + } \ No newline at end of file diff --git a/codebase1/codebase/Unison/Codebase/V1/Causal/Raw.hs b/codebase1/codebase/Unison/Codebase/V1/Causal/Raw.hs new file mode 100644 index 0000000000..397bd7b9dc --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Causal/Raw.hs @@ -0,0 +1,26 @@ +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE RankNTypes #-} +module Unison.Codebase.V1.Causal.Raw where + +import U.Util.Hashable (Hashable) +import U.Util.Hash (Hash) +import Data.Set (Set) +import Data.Foldable (Foldable(toList)) + +newtype RawHash h = RawHash { unRawHash :: Hash } + deriving (Eq, Ord, Hashable) via Hash + +data Raw h e + = RawOne e + | RawCons e (RawHash h) + | RawMerge e (Set (RawHash h)) + +rawHead :: Raw h e -> e +rawHead (RawOne e ) = e +rawHead (RawCons e _) = e +rawHead (RawMerge e _) = e + +rawTails :: Raw h e -> [RawHash h] +rawTails (RawOne _) = [] +rawTails (RawCons _ h) = [h] +rawTails (RawMerge _ hs) = toList hs diff --git a/unison-core/src/Unison/ConstructorType.hs b/codebase1/codebase/Unison/Codebase/V1/ConstructorType.hs similarity index 60% rename from unison-core/src/Unison/ConstructorType.hs rename to codebase1/codebase/Unison/Codebase/V1/ConstructorType.hs index 4a1796bc31..21a2b4b2ae 100644 --- a/unison-core/src/Unison/ConstructorType.hs +++ b/codebase1/codebase/Unison/Codebase/V1/ConstructorType.hs @@ -1,6 +1,6 @@ -module Unison.ConstructorType where +module Unison.Codebase.V1.ConstructorType where -import Unison.Hashable (Hashable, Token(Tag), tokens) +import U.Util.Hashable (Hashable, Token(Tag), tokens) data ConstructorType = Data | Effect deriving (Eq, Ord, Show, Enum) diff --git a/codebase1/codebase/Unison/Codebase/V1/DataDeclaration.hs b/codebase1/codebase/Unison/Codebase/V1/DataDeclaration.hs new file mode 100644 index 0000000000..13ee496050 --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/DataDeclaration.hs @@ -0,0 +1,72 @@ +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# Language DeriveFoldable #-} +{-# Language DeriveTraversable #-} +{-# Language OverloadedStrings #-} +{-# Language PatternSynonyms #-} +{-# Language ViewPatterns #-} + +module Unison.Codebase.V1.DataDeclaration where + + +import qualified Data.Set as Set +import Prelude hiding ( cycle ) +import Unison.Codebase.V1.Reference ( Reference ) +import Unison.Codebase.V1.Type ( Type ) +import qualified Unison.Codebase.V1.Type as Type +import qualified Unison.Codebase.V1.ConstructorType as CT +import Data.Text (Text) +import Data.Set (Set) + +type Decl v a = Either (EffectDeclaration v a) (DataDeclaration v a) + +data DeclOrBuiltin v a = + Builtin CT.ConstructorType | Decl (Decl v a) + +asDataDecl :: Decl v a -> DataDeclaration v a +asDataDecl = either toDataDecl id + +declDependencies :: Ord v => Decl v a -> Set Reference +declDependencies = either (dependencies . toDataDecl) dependencies + +constructorType :: Decl v a -> CT.ConstructorType +constructorType = \case + Left{} -> CT.Effect + Right{} -> CT.Data + +data Modifier = Structural | Unique Text + deriving (Eq, Ord, Show) + +data DataDeclaration v a = DataDeclaration { + modifier :: Modifier, + annotation :: a, + bound :: [v], + constructors' :: [(a, v, Type v a)] +} + +newtype EffectDeclaration v a = EffectDeclaration { + toDataDecl :: DataDeclaration v a +} + +withEffectDecl + :: (DataDeclaration v a -> DataDeclaration v' a') + -> (EffectDeclaration v a -> EffectDeclaration v' a') +withEffectDecl f e = EffectDeclaration (f . toDataDecl $ e) + +withEffectDeclM :: Functor f + => (DataDeclaration v a -> f (DataDeclaration v' a')) + -> EffectDeclaration v a + -> f (EffectDeclaration v' a') +withEffectDeclM f = fmap EffectDeclaration . f . toDataDecl + +constructors :: DataDeclaration v a -> [(v, Type v a)] +constructors (DataDeclaration _ _ _ ctors) = [(v,t) | (_,v,t) <- ctors ] + +constructorTypes :: DataDeclaration v a -> [Type v a] +constructorTypes = (snd <$>) . constructors + +dependencies :: Ord v => DataDeclaration v a -> Set Reference +dependencies dd = + Set.unions (Type.dependencies <$> constructorTypes dd) diff --git a/codebase1/codebase/Unison/Codebase/V1/FileCodebase.hs b/codebase1/codebase/Unison/Codebase/V1/FileCodebase.hs new file mode 100644 index 0000000000..d2282ce602 --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/FileCodebase.hs @@ -0,0 +1,242 @@ +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.V1.FileCodebase + ( getRootBranch, -- used by Git module + codebaseExists, -- used by Main + getCodebaseDir, + termsDir, + reflogPath, + getTerm, + getTypeOfTerm, + getDecl, + getWatch, + deserializeEdits, + ) +where + +import Control.Error (ExceptT (..), runExceptT) +import Control.Monad.Catch (catch) +import Control.Monad.Extra (ifM) +import Data.Either.Extra (maybeToEither) +import Data.Functor ((<&>)) +import qualified Data.Text as Text +import System.Directory (getHomeDirectory) +import qualified System.Directory +import System.FilePath (()) +import qualified U.Util.Base32Hex as Base32Hex +import qualified U.Util.Hash as Hash +import qualified Unison.Codebase.V1.Branch.Raw as Branch +import Unison.Codebase.V1.Branch.Raw (BranchHash (..), EditHash (..)) +import qualified Unison.Codebase.V1.Causal.Raw as Causal +import qualified Unison.Codebase.V1.DataDeclaration as DD +import Unison.Codebase.V1.Patch (Patch (..)) +import Unison.Codebase.V1.Reference (Reference) +import qualified Unison.Codebase.V1.Reference as Reference +import qualified Unison.Codebase.V1.Serialization.Serialization as S +import qualified Unison.Codebase.V1.Serialization.V1 as V1 +import Unison.Codebase.V1.Term (Term) +import Unison.Codebase.V1.Type (Type) +import UnliftIO (MonadIO) +import UnliftIO (IOException) +import UnliftIO (MonadIO (liftIO)) +import UnliftIO.Directory (createDirectoryIfMissing, doesDirectoryExist, doesFileExist) + +newtype CodebasePath = CodebasePath FilePath + +data WatchKind = RegularWatch | TestWatch deriving (Eq, Ord, Show) + +getCodebaseDir :: Maybe FilePath -> IO FilePath +getCodebaseDir = maybe getHomeDirectory pure + +data Err + = InvalidBranchFile FilePath String + | InvalidEditsFile FilePath String + | NoBranchHead FilePath + | CantParseBranchHead FilePath + | AmbiguouslyTypeAndTerm Reference.Id + | UnknownTypeOrTerm Reference + deriving (Show) + +codebasePath :: FilePath +codebasePath = ".unison" "v1" + +termsDir, typesDir, branchesDir, branchHeadDir, editsDir :: CodebasePath -> FilePath +termsDir (CodebasePath root) = root codebasePath "terms" +typesDir (CodebasePath root) = root codebasePath "types" +branchesDir (CodebasePath root) = root codebasePath "paths" +branchHeadDir root = branchesDir root "_head" +editsDir (CodebasePath root) = root codebasePath "patches" + +termDir, declDir :: CodebasePath -> Reference.Id -> FilePath +termDir root r = termsDir root componentIdToString r +declDir root r = typesDir root componentIdToString r + +watchesDir :: CodebasePath -> WatchKind -> FilePath +watchesDir (CodebasePath root) k = + root codebasePath "watches" case k of + RegularWatch -> "_cache" + TestWatch -> "test" + +watchPath :: CodebasePath -> WatchKind -> Reference.Id -> FilePath +watchPath root kind id = + watchesDir root kind componentIdToString id <> ".ub" + +termPath, typePath, declPath :: CodebasePath -> Reference.Id -> FilePath +termPath path r = termDir path r "compiled.ub" +typePath path r = termDir path r "type.ub" +declPath path r = declDir path r "compiled.ub" + +branchPath :: CodebasePath -> BranchHash -> FilePath +branchPath root (BranchHash h) = branchesDir root hashToString h ++ ".ub" + +editsPath :: CodebasePath -> EditHash -> FilePath +editsPath root (EditHash h) = editsDir root hashToString h ++ ".up" + +reflogPath :: CodebasePath -> FilePath +reflogPath (CodebasePath root) = root codebasePath "reflog" + +-- checks if `path` looks like a unison codebase +minimalCodebaseStructure :: CodebasePath -> [FilePath] +minimalCodebaseStructure root = [branchHeadDir root] + +-- checks if a minimal codebase structure exists at `path` +codebaseExists :: MonadIO m => CodebasePath -> m Bool +codebaseExists root = + and <$> traverse doesDirectoryExist (minimalCodebaseStructure root) + +deserializeEdits :: MonadIO m => CodebasePath -> Branch.EditHash -> m Patch +deserializeEdits root h = + let file = editsPath root h + in S.getFromFile' V1.getEdits file >>= \case + Left err -> failWith $ InvalidEditsFile file err + Right edits -> pure edits + +data GetRootBranchError + = NoRootBranch + | CouldntParseRootBranch FilePath + | CouldntLoadRootBranch BranchHash + | ConflictedRootBranch [FilePath] + deriving (Show) + +getRootBranch :: + forall m. + MonadIO m => + CodebasePath -> + m (Either GetRootBranchError (Causal.Raw Branch.Raw Branch.Raw)) +getRootBranch root = + ifM + (codebaseExists root) + (listDirectory (branchHeadDir root) >>= filesToBranch) + (pure $ Left NoRootBranch) + where + filesToBranch :: [FilePath] -> m (Either GetRootBranchError (Causal.Raw Branch.Raw Branch.Raw)) + filesToBranch = \case + [] -> pure $ Left NoRootBranch + [single] -> runExceptT $ fileToBranch single + conflict -> pure $ Left $ ConflictedRootBranch conflict + fileToBranch :: FilePath -> ExceptT GetRootBranchError m (Causal.Raw Branch.Raw Branch.Raw) + fileToBranch single = ExceptT $ case hashFromString single of + Nothing -> pure . Left $ CouldntParseRootBranch single + Just (BranchHash -> h) -> + branchFromFiles root h + <&> maybeToEither (CouldntLoadRootBranch h) + branchFromFiles :: MonadIO m => CodebasePath -> BranchHash -> m (Maybe (Causal.Raw Branch.Raw Branch.Raw)) + branchFromFiles rootDir h = do + fileExists <- doesFileExist (branchPath rootDir h) + if fileExists + then Just <$> deserializeRawBranch rootDir h + else pure Nothing + where + deserializeRawBranch :: + MonadIO m => CodebasePath -> BranchHash -> m (Causal.Raw Branch.Raw Branch.Raw) + deserializeRawBranch root h = do + let ubf = branchPath root h + S.getFromFile' (V1.getCausal0 V1.getRawBranch) ubf >>= \case + Left err -> failWith $ InvalidBranchFile ubf err + Right c0 -> pure c0 + +-- here +hashFromString :: String -> Maybe Hash.Hash +hashFromString = fmap (Hash.fromBase32Hex . Base32Hex.fromByteString) . Base32Hex.textToByteString . Text.pack + +-- here +hashToString :: Hash.Hash -> String +hashToString = Text.unpack . Base32Hex.toText . Hash.toBase32Hex + +-- hashFromFilePath :: FilePath -> Maybe Hash.Hash +-- hashFromFilePath = hashFromString . takeBaseName + +-- here +componentIdToString :: Reference.Id -> String +componentIdToString = Text.unpack . Reference.toText . Reference.DerivedId + +-- here +-- componentIdFromString :: String -> Maybe Reference.Id +-- componentIdFromString = Reference.idFromText . Text.pack + +-- here +-- referentFromString :: String -> Maybe Referent +-- referentFromString = Referent.fromText . Text.pack + +-- referentIdFromString :: String -> Maybe Referent.Id +-- referentIdFromString s = referentFromString s >>= \case +-- Referent.Ref (Reference.DerivedId r) -> Just $ Referent.Ref' r +-- Referent.Con (Reference.DerivedId r) i t -> Just $ Referent.Con' r i t +-- _ -> Nothing + +-- here +-- referentToString :: Referent -> String +-- referentToString = Text.unpack . Referent.toText + +getTerm :: (MonadIO m, Ord v) => S.Get v -> S.Get a -> CodebasePath -> Reference.Id -> m (Maybe (Term v a)) +getTerm getV getA path h = S.getFromFile (V1.getTerm getV getA) (termPath path h) + +getTypeOfTerm :: (MonadIO m, Ord v) => S.Get v -> S.Get a -> CodebasePath -> Reference.Id -> m (Maybe (Type v a)) +getTypeOfTerm getV getA path h = S.getFromFile (V1.getType getV getA) (typePath path h) + +getDecl :: + (MonadIO m, Ord v) => + S.Get v -> + S.Get a -> + CodebasePath -> + Reference.Id -> + m (Maybe (DD.Decl v a)) +getDecl getV getA root h = + S.getFromFile + ( V1.getEither + (V1.getEffectDeclaration getV getA) + (V1.getDataDeclaration getV getA) + ) + (declPath root h) + +getWatch :: + (MonadIO m, Ord v) => + S.Get v -> + S.Get a -> + CodebasePath -> + WatchKind -> + Reference.Id -> + m (Maybe (Term v a)) +getWatch getV getA path k id = do + let wp = watchesDir path k + createDirectoryIfMissing True wp + S.getFromFile (V1.getTerm getV getA) (watchPath path k id) + +failWith :: MonadIO m => Err -> m a +failWith = liftIO . fail . show + +-- | A version of listDirectory that returns mempty if the directory doesn't exist +listDirectory :: MonadIO m => FilePath -> m [FilePath] +listDirectory dir = + liftIO $ + System.Directory.listDirectory dir `catch` (\(_ :: IOException) -> pure mempty) + +-- -- | delete all the elements of a given reference component from a set +-- deleteComponent :: Reference.Id -> Set Reference -> Set Reference +-- deleteComponent r rs = Set.difference rs +-- (Reference.members . Reference.componentFor . Reference.DerivedId $ r) diff --git a/unison-core/src/Unison/LabeledDependency.hs b/codebase1/codebase/Unison/Codebase/V1/LabeledDependency.hs similarity index 78% rename from unison-core/src/Unison/LabeledDependency.hs rename to codebase1/codebase/Unison/Codebase/V1/LabeledDependency.hs index 13f5a858a1..f9ae793d52 100644 --- a/unison-core/src/Unison/LabeledDependency.hs +++ b/codebase1/codebase/Unison/Codebase/V1/LabeledDependency.hs @@ -1,6 +1,7 @@ +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE PatternSynonyms #-} -module Unison.LabeledDependency +module Unison.Codebase.V1.LabeledDependency ( derivedTerm , derivedType , termRef @@ -15,12 +16,13 @@ module Unison.LabeledDependency , partition ) where -import Unison.Prelude hiding (fold) - -import Unison.ConstructorType (ConstructorType(Data, Effect)) -import Unison.Reference (Reference(DerivedId), Id) -import Unison.Referent (Referent, pattern Ref, pattern Con, Referent'(Ref', Con')) +import Unison.Codebase.V1.ConstructorType (ConstructorType(Data, Effect)) +import Unison.Codebase.V1.Reference (Reference(DerivedId), Id) +import Unison.Codebase.V1.Referent (Referent, pattern Ref, pattern Con, Referent'(Ref', Con')) import qualified Data.Set as Set +import Data.Set (Set) +import Data.Foldable (Foldable(toList)) +import Data.Either (partitionEithers) -- dumb constructor name is private newtype LabeledDependency = X (Either Reference Referent) deriving (Eq, Ord, Show) diff --git a/codebase1/codebase/Unison/Codebase/V1/Patch.hs b/codebase1/codebase/Unison/Codebase/V1/Patch.hs new file mode 100644 index 0000000000..92e7f5495f --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Patch.hs @@ -0,0 +1,60 @@ +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE RecordWildCards #-} + +module Unison.Codebase.V1.Patch where + +import Control.Lens hiding (children, cons, transform) +import Data.Foldable (Foldable (toList)) +import qualified Data.Set as Set +import Data.Set (Set) +import qualified U.Util.Relation as R +import U.Util.Relation (Relation) +import qualified Unison.Codebase.V1.LabeledDependency as LD +import Unison.Codebase.V1.LabeledDependency (LabeledDependency) +import Unison.Codebase.V1.Patch.TermEdit (TermEdit) +import qualified Unison.Codebase.V1.Patch.TermEdit as TermEdit +import Unison.Codebase.V1.Patch.TypeEdit (TypeEdit) +import qualified Unison.Codebase.V1.Patch.TypeEdit as TypeEdit +import Unison.Codebase.V1.Reference (Reference) + +data Patch = Patch + { _termEdits :: Relation Reference TermEdit, + _typeEdits :: Relation Reference TypeEdit + } + deriving (Eq, Ord, Show) + +makeLenses ''Patch + +labeledDependencies :: Patch -> Set LabeledDependency +labeledDependencies Patch {..} = + Set.map LD.termRef (R.dom _termEdits) + <> Set.fromList + (fmap LD.termRef $ TermEdit.references =<< toList (R.ran _termEdits)) + <> Set.map LD.typeRef (R.dom _typeEdits) + <> Set.fromList + (fmap LD.typeRef $ TypeEdit.references =<< toList (R.ran _typeEdits)) + +allReferences :: Patch -> Set Reference +allReferences p = typeReferences p <> termReferences p + where + typeReferences p = + Set.fromList + [ r | (old, TypeEdit.Replace new) <- R.toList (_typeEdits p), r <- [old, new] + ] + termReferences p = + Set.fromList + [ r | (old, TermEdit.Replace new _) <- R.toList (_termEdits p), r <- [old, new] + ] + +-- | Returns the set of references which are the target of an arrow in the patch +allReferenceTargets :: Patch -> Set Reference +allReferenceTargets p = typeReferences p <> termReferences p + where + typeReferences p = + Set.fromList + [new | (_, TypeEdit.Replace new) <- R.toList (_typeEdits p)] + termReferences p = + Set.fromList + [new | (_, TermEdit.Replace new _) <- R.toList (_termEdits p)] diff --git a/parser-typechecker/src/Unison/Codebase/TermEdit.hs b/codebase1/codebase/Unison/Codebase/V1/Patch/TermEdit.hs similarity index 54% rename from parser-typechecker/src/Unison/Codebase/TermEdit.hs rename to codebase1/codebase/Unison/Codebase/V1/Patch/TermEdit.hs index 7e2239024f..f3955ab09b 100644 --- a/parser-typechecker/src/Unison/Codebase/TermEdit.hs +++ b/codebase1/codebase/Unison/Codebase/V1/Patch/TermEdit.hs @@ -1,11 +1,6 @@ -module Unison.Codebase.TermEdit where +module Unison.Codebase.V1.Patch.TermEdit where -import Unison.Hashable (Hashable) -import qualified Unison.Hashable as H -import Unison.Reference (Reference) -import qualified Unison.Typechecker as Typechecker -import Unison.Type (Type) -import Unison.Var (Var) +import Unison.Codebase.V1.Reference (Reference) data TermEdit = Replace Reference Typing | Deprecate deriving (Eq, Ord, Show) @@ -20,15 +15,6 @@ references Deprecate = [] data Typing = Same | Subtype | Different deriving (Eq, Ord, Show) -instance Hashable Typing where - tokens Same = [H.Tag 0] - tokens Subtype = [H.Tag 1] - tokens Different = [H.Tag 2] - -instance Hashable TermEdit where - tokens (Replace r t) = [H.Tag 0] ++ H.tokens r ++ H.tokens t - tokens Deprecate = [H.Tag 1] - toReference :: TermEdit -> Maybe Reference toReference (Replace r _) = Just r toReference Deprecate = Nothing @@ -44,8 +30,3 @@ isSame e = case e of Replace _ Same -> True _ -> False -typing :: Var v => Type v loc -> Type v loc -> Typing -typing newType oldType | Typechecker.isEqual newType oldType = Same - | Typechecker.isSubtype newType oldType = Subtype - | otherwise = Different - diff --git a/codebase1/codebase/Unison/Codebase/V1/Patch/TypeEdit.hs b/codebase1/codebase/Unison/Codebase/V1/Patch/TypeEdit.hs new file mode 100644 index 0000000000..afb911cd8d --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Patch/TypeEdit.hs @@ -0,0 +1,14 @@ +module Unison.Codebase.V1.Patch.TypeEdit where + +import Unison.Codebase.V1.Reference (Reference) + +data TypeEdit = Replace Reference | Deprecate + deriving (Eq, Ord, Show) + +references :: TypeEdit -> [Reference] +references (Replace r) = [r] +references Deprecate = [] + +toReference :: TypeEdit -> Maybe Reference +toReference (Replace r) = Just r +toReference Deprecate = Nothing diff --git a/codebase1/codebase/Unison/Codebase/V1/Reference.hs b/codebase1/codebase/Unison/Codebase/V1/Reference.hs new file mode 100644 index 0000000000..6303d2fd76 --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Reference.hs @@ -0,0 +1,61 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.V1.Reference where + +import Data.Char (isDigit) +import qualified Data.Text as Text +import Data.Word (Word64) +import Data.Text (Text) +import U.Util.Base32Hex (Base32Hex) +import qualified U.Util.Hash as H +import qualified U.Util.Base32Hex as Base32Hex +import qualified Data.Set as Set +import Data.Set (Set) + +data Reference + = Builtin Text.Text + | -- `Derived` can be part of a strongly connected component. + -- The `Pos` refers to a particular element of the component + -- and the `Size` is the number of elements in the component. + -- Using an ugly name so no one tempted to use this + DerivedId Id + deriving (Eq, Ord, Show) + +pattern Derived :: H.Hash -> Pos -> Size -> Reference +pattern Derived h i n = DerivedId (Id h i n) + +type Pos = Word64 +type Size = Word64 + +-- todo: don't read or return size; must also update showSuffix and fromText +data Id = Id H.Hash Pos Size deriving (Eq, Ord, Show) + +readSuffix :: Text -> Either String (Pos, Size) +readSuffix t = case Text.breakOn "c" t of + (pos, Text.drop 1 -> size) + | Text.all isDigit pos && Text.all isDigit size -> + Right (read (Text.unpack pos), read (Text.unpack size)) + _ -> Left "suffix decoding error" + +toText :: Reference -> Text +toText (Builtin b) = "##" <> b +toText (DerivedId (Id h i n)) = + "#" <> (Base32Hex.toText . H.toBase32Hex) h + <> "." + <> (Text.pack . show) i + <> "c" + <> (Text.pack . show) n + +newtype Component = Component {members :: Set Reference} + +-- Gives the component (dependency cycle) that the reference is a part of +componentFor :: Reference -> Component +componentFor b@(Builtin _) = Component (Set.singleton b) +componentFor (DerivedId (Id h _ n)) = + Component (Set.fromList [DerivedId (Id h i n) | i <- take (fromIntegral n) [0 ..]]) + +derivedBase32Hex :: Base32Hex -> Pos -> Size -> Reference +derivedBase32Hex h i n = DerivedId (Id (H.fromBase32Hex h) i n) diff --git a/codebase1/codebase/Unison/Codebase/V1/Referent.hs b/codebase1/codebase/Unison/Codebase/V1/Referent.hs new file mode 100644 index 0000000000..07c338790a --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Referent.hs @@ -0,0 +1,54 @@ +{-# LANGUAGE DeriveFunctor #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} + +module Unison.Codebase.V1.Referent where + +import Unison.Codebase.V1.Reference (Reference) +import qualified Unison.Codebase.V1.Reference as R + +import Unison.Codebase.V1.ConstructorType (ConstructorType) +import Data.Word (Word64) + +-- Slightly odd naming. This is the "referent of term name in the codebase", +-- rather than the target of a Reference. +type Referent = Referent' Reference +pattern Ref :: Reference -> Referent +pattern Ref r = Ref' r +pattern Con :: Reference -> Int -> ConstructorType -> Referent +pattern Con r i t = Con' r i t +{-# COMPLETE Ref, Con #-} + +type Id = Referent' R.Id + +data Referent' r = Ref' r | Con' r Int ConstructorType + deriving (Show, Ord, Eq, Functor) + +type Pos = Word64 +type Size = Word64 + +isConstructor :: Referent -> Bool +isConstructor Con{} = True +isConstructor _ = False + +toTermReference :: Referent -> Maybe Reference +toTermReference = \case + Ref r -> Just r + _ -> Nothing + +toReference :: Referent -> Reference +toReference = toReference' + +toReference' :: Referent' r -> r +toReference' = \case + Ref' r -> r + Con' r _i _t -> r + +fromId :: Id -> Referent +fromId = fmap R.DerivedId + +toTypeReference :: Referent -> Maybe Reference +toTypeReference = \case + Con r _i _t -> Just r + _ -> Nothing diff --git a/parser-typechecker/src/Unison/Codebase/Serialization.hs b/codebase1/codebase/Unison/Codebase/V1/Serialization/Serialization.hs similarity index 95% rename from parser-typechecker/src/Unison/Codebase/Serialization.hs rename to codebase1/codebase/Unison/Codebase/V1/Serialization/Serialization.hs index edade8cc1c..f2a8f7a19a 100644 --- a/parser-typechecker/src/Unison/Codebase/Serialization.hs +++ b/codebase1/codebase/Unison/Codebase/V1/Serialization/Serialization.hs @@ -1,6 +1,6 @@ {-# LANGUAGE RankNTypes #-} -module Unison.Codebase.Serialization where +module Unison.Codebase.V1.Serialization.Serialization where import Data.Bytes.Get (MonadGet, runGetS) import Data.Bytes.Put (MonadPut, runPutS) diff --git a/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs b/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs new file mode 100644 index 0000000000..5e467b6033 --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs @@ -0,0 +1,377 @@ +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE Strict #-} +{-# LANGUAGE RankNTypes #-} + +module Unison.Codebase.V1.Serialization.V1 where + +import Control.Applicative (Applicative (liftA2), liftA3) +import Control.Monad (replicateM) +import Data.Bits (Bits) +import qualified Data.ByteString as B +import Data.Bytes.Get +import Data.Bytes.Serial (deserialize, deserializeBE) +import Data.Bytes.Signed (Unsigned) +import Data.Bytes.VarInt (VarInt (..)) +import Data.Int (Int64) +import qualified Data.Map as Map +import Data.Map (Map) +import qualified Data.Sequence as Sequence +import qualified Data.Set as Set +import Data.Text (Text) +import Data.Text.Encoding (decodeUtf8) +import Data.Word (Word64) +import U.Util.Hash (Hash) +import qualified U.Util.Hash as Hash +import U.Util.Relation (Relation) +import qualified U.Util.Relation as Relation +import qualified Unison.Codebase.V1.ABT as ABT +import Unison.Codebase.V1.Branch.NameSegment as NameSegment +import qualified Unison.Codebase.V1.Branch.Raw as Branch +import Unison.Codebase.V1.Causal.Raw (Raw (..), RawHash (..)) +import qualified Unison.Codebase.V1.Causal.Raw as Causal +import qualified Unison.Codebase.V1.ConstructorType as CT +import qualified Unison.Codebase.V1.DataDeclaration as DataDeclaration +import Unison.Codebase.V1.DataDeclaration (DataDeclaration, EffectDeclaration) +import Unison.Codebase.V1.Patch (Patch (..)) +import Unison.Codebase.V1.Patch.TermEdit (TermEdit) +import qualified Unison.Codebase.V1.Patch.TermEdit as TermEdit +import Unison.Codebase.V1.Patch.TypeEdit (TypeEdit) +import qualified Unison.Codebase.V1.Patch.TypeEdit as TypeEdit +import Unison.Codebase.V1.Reference (Reference) +import qualified Unison.Codebase.V1.Reference as Reference +import Unison.Codebase.V1.Referent (Referent) +import qualified Unison.Codebase.V1.Referent as Referent +import Unison.Codebase.V1.Star3 (Star3) +import qualified Unison.Codebase.V1.Star3 as Star3 +import Unison.Codebase.V1.Symbol (Symbol (..)) +import Unison.Codebase.V1.Term (Term) +import qualified Unison.Codebase.V1.Term as Term +import qualified Unison.Codebase.V1.Term.Pattern as Pattern +import Unison.Codebase.V1.Term.Pattern (Pattern, SeqOp) +import qualified Unison.Codebase.V1.Type as Type +import Unison.Codebase.V1.Type (Type) +import Unison.Codebase.V1.Type.Kind (Kind) +import qualified Unison.Codebase.V1.Type.Kind as Kind +import Prelude hiding (getChar, putChar) + +-- ABOUT THIS FORMAT: +-- +-- A serialization format for uncompiled Unison syntax trees. +-- +-- Finalized: No +-- +-- If Finalized: Yes, don't modify this file in a way that affects serialized form. +-- Instead, create a new file, V(n + 1). +-- This ensures that we have a well-defined serialized form and can read +-- and write old versions. + +unknownTag :: (MonadGet m, Show a) => String -> a -> m x +unknownTag msg tag = + fail $ + "unknown tag " ++ show tag + ++ " while deserializing: " + ++ msg + +getCausal0 :: MonadGet m => m a -> m (Causal.Raw h a) +getCausal0 getA = getWord8 >>= \case + 0 -> RawOne <$> getA + 1 -> flip RawCons <$> (RawHash <$> getHash) <*> getA + 2 -> flip RawMerge . Set.fromList <$> getList (RawHash <$> getHash) <*> getA + x -> unknownTag "Causal0" x + +getLength :: + ( MonadGet m, + Integral n, + Integral (Unsigned n), + Bits n, + Bits (Unsigned n) + ) => + m n +getLength = unVarInt <$> deserialize + +getText :: MonadGet m => m Text +getText = do + len <- getLength + bs <- B.copy <$> getBytes len + pure $ decodeUtf8 bs + +getFloat :: MonadGet m => m Double +getFloat = deserializeBE + +getNat :: MonadGet m => m Word64 +getNat = getWord64be + +getInt :: MonadGet m => m Int64 +getInt = deserializeBE + +getBoolean :: MonadGet m => m Bool +getBoolean = go =<< getWord8 + where + go 0 = pure False + go 1 = pure True + go t = unknownTag "Boolean" t + +getHash :: MonadGet m => m Hash +getHash = do + len <- getLength + bs <- B.copy <$> getBytes len + pure $ Hash.fromBytes bs + +getReference :: MonadGet m => m Reference +getReference = do + tag <- getWord8 + case tag of + 0 -> Reference.Builtin <$> getText + 1 -> Reference.DerivedId <$> (Reference.Id <$> getHash <*> getLength <*> getLength) + _ -> unknownTag "Reference" tag + +getReferent :: MonadGet m => m Referent +getReferent = do + tag <- getWord8 + case tag of + 0 -> Referent.Ref <$> getReference + 1 -> Referent.Con <$> getReference <*> getLength <*> getConstructorType + _ -> unknownTag "getReferent" tag + +getConstructorType :: MonadGet m => m CT.ConstructorType +getConstructorType = getWord8 >>= \case + 0 -> pure CT.Data + 1 -> pure CT.Effect + t -> unknownTag "getConstructorType" t + +getMaybe :: MonadGet m => m a -> m (Maybe a) +getMaybe getA = getWord8 >>= \tag -> case tag of + 0 -> pure Nothing + 1 -> Just <$> getA + _ -> unknownTag "Maybe" tag + +-- getFolded :: MonadGet m => (b -> a -> b) -> b -> m a -> m b +-- getFolded f z a = +-- foldl' f z <$> getList a + +getList :: MonadGet m => m a -> m [a] +getList a = getLength >>= (`replicateM` a) + +getABT :: + (MonadGet m, Foldable f, Functor f, Ord v) => + m v -> + m a -> + (forall x. m x -> m (f x)) -> + m (ABT.Term f v a) +getABT getVar getA getF = getList getVar >>= go [] + where + go env fvs = do + a <- getA + tag <- getWord8 + case tag of + 0 -> do + tag <- getWord8 + case tag of + 0 -> ABT.annotatedVar a . (env !!) <$> getLength + 1 -> ABT.annotatedVar a . (fvs !!) <$> getLength + _ -> unknownTag "getABT.Var" tag + 1 -> ABT.tm' a <$> getF (go env fvs) + 2 -> do + v <- getVar + body <- go (v : env) fvs + pure $ ABT.abs' a v body + 3 -> ABT.cycle' a <$> go env fvs + _ -> unknownTag "getABT" tag + +getKind :: MonadGet m => m Kind +getKind = getWord8 >>= \tag -> case tag of + 0 -> pure Kind.Star + 1 -> Kind.Arrow <$> getKind <*> getKind + _ -> unknownTag "getKind" tag + +getType :: + (MonadGet m, Ord v) => + m v -> + m a -> + m (Type v a) +getType getVar getA = getABT getVar getA go + where + go getChild = getWord8 >>= \tag -> case tag of + 0 -> Type.Ref <$> getReference + 1 -> Type.Arrow <$> getChild <*> getChild + 2 -> Type.Ann <$> getChild <*> getKind + 3 -> Type.App <$> getChild <*> getChild + 4 -> Type.Effect <$> getChild <*> getChild + 5 -> Type.Effects <$> getList getChild + 6 -> Type.Forall <$> getChild + 7 -> Type.IntroOuter <$> getChild + _ -> unknownTag "getType" tag + +getSymbol :: MonadGet m => m Symbol +getSymbol = Symbol <$> getLength <*> getText + +getSeqOp :: MonadGet m => m SeqOp +getSeqOp = getWord8 >>= \case + 0 -> pure Pattern.Cons + 1 -> pure Pattern.Snoc + 2 -> pure Pattern.Concat + tag -> unknownTag "SeqOp" tag + +getPattern :: MonadGet m => m a -> m Pattern +getPattern getA = getWord8 >>= \tag -> case tag of + 0 -> Pattern.Unbound <$ getA + 1 -> Pattern.Var <$ getA + 2 -> Pattern.Boolean <$ getA <*> getBoolean + 3 -> Pattern.Int <$ getA <*> getInt + 4 -> Pattern.Nat <$ getA <*> getNat + 5 -> Pattern.Float <$ getA <*> getFloat + 6 -> + Pattern.Constructor <$ getA <*> getReference <*> getLength + <*> getList + (getPattern getA) + 7 -> Pattern.As <$ getA <*> getPattern getA + 8 -> Pattern.EffectPure <$ getA <*> getPattern getA + 9 -> + Pattern.EffectBind + <$ getA + <*> getReference + <*> getLength + <*> getList (getPattern getA) + <*> getPattern getA + 10 -> Pattern.SequenceLiteral <$ getA <*> getList (getPattern getA) + 11 -> + Pattern.SequenceOp + <$ getA + <*> getPattern getA + <*> getSeqOp + <*> getPattern getA + 12 -> Pattern.Text <$ getA <*> getText + 13 -> Pattern.Char <$ getA <*> getChar + _ -> unknownTag "Pattern" tag + +getTerm :: + (MonadGet m, Ord v) => + m v -> + m a -> + m (Term v a) +getTerm getVar getA = getABT getVar getA go + where + go getChild = getWord8 >>= \tag -> case tag of + 0 -> Term.Int <$> getInt + 1 -> Term.Nat <$> getNat + 2 -> Term.Float <$> getFloat + 3 -> Term.Boolean <$> getBoolean + 4 -> Term.Text <$> getText + 5 -> Term.Ref <$> getReference + 6 -> Term.Constructor <$> getReference <*> getLength + 7 -> Term.Request <$> getReference <*> getLength + 8 -> Term.Handle <$> getChild <*> getChild + 9 -> Term.App <$> getChild <*> getChild + 10 -> Term.Ann <$> getChild <*> getType getVar getA + 11 -> Term.Sequence . Sequence.fromList <$> getList getChild + 12 -> Term.If <$> getChild <*> getChild <*> getChild + 13 -> Term.And <$> getChild <*> getChild + 14 -> Term.Or <$> getChild <*> getChild + 15 -> Term.Lam <$> getChild + 16 -> Term.LetRec False <$> getList getChild <*> getChild + 17 -> Term.Let False <$> getChild <*> getChild + 18 -> + Term.Match <$> getChild + <*> getList (Term.MatchCase <$> getPattern getA <*> getMaybe getChild <*> getChild) + 19 -> Term.Char <$> getChar + 20 -> Term.TermLink <$> getReferent + 21 -> Term.TypeLink <$> getReference + _ -> unknownTag "getTerm" tag + +getPair :: MonadGet m => m a -> m b -> m (a, b) +getPair = liftA2 (,) + +getTuple3 :: MonadGet m => m a -> m b -> m c -> m (a, b, c) +getTuple3 = liftA3 (,,) + +getRelation :: (MonadGet m, Ord a, Ord b) => m a -> m b -> m (Relation a b) +getRelation getA getB = Relation.fromList <$> getList (getPair getA getB) + +getMap :: (MonadGet m, Ord a) => m a -> m b -> m (Map a b) +getMap getA getB = Map.fromList <$> getList (getPair getA getB) + +getTermEdit :: MonadGet m => m TermEdit +getTermEdit = getWord8 >>= \case + 1 -> + TermEdit.Replace <$> getReference + <*> ( getWord8 >>= \case + 1 -> pure TermEdit.Same + 2 -> pure TermEdit.Subtype + 3 -> pure TermEdit.Different + t -> unknownTag "TermEdit.Replace" t + ) + 2 -> pure TermEdit.Deprecate + t -> unknownTag "TermEdit" t + +getTypeEdit :: MonadGet m => m TypeEdit +getTypeEdit = getWord8 >>= \case + 1 -> TypeEdit.Replace <$> getReference + 2 -> pure TypeEdit.Deprecate + t -> unknownTag "TypeEdit" t + +getStar3 :: + (MonadGet m, Ord fact, Ord d1, Ord d2, Ord d3) => + m fact -> + m d1 -> + m d2 -> + m d3 -> + m (Star3 fact d1 d2 d3) +getStar3 getF getD1 getD2 getD3 = + Star3.Star3 + <$> (Set.fromList <$> getList getF) + <*> getRelation getF getD1 + <*> getRelation getF getD2 + <*> getRelation getF getD3 + +getBranchStar :: (Ord a, Ord n, MonadGet m) => m a -> m n -> m (Branch.Star a n) +getBranchStar getA getN = getStar3 getA getN getMetadataType (getPair getMetadataType getMetadataValue) + +getChar :: MonadGet m => m Char +getChar = toEnum . unVarInt <$> deserialize + +getNameSegment :: MonadGet m => m NameSegment +getNameSegment = NameSegment <$> getText + +getMetadataType :: MonadGet m => m Branch.MetadataType +getMetadataType = getReference + +getMetadataValue :: MonadGet m => m Branch.MetadataValue +getMetadataValue = getReference + +getRawBranch :: MonadGet m => m Branch.Raw +getRawBranch = + Branch.Raw + <$> getBranchStar getReferent getNameSegment + <*> getBranchStar getReference getNameSegment + <*> getMap getNameSegment (Branch.BranchHash <$> getHash) + <*> getMap getNameSegment (Branch.EditHash <$> getHash) + +getDataDeclaration :: (MonadGet m, Ord v) => m v -> m a -> m (DataDeclaration v a) +getDataDeclaration getV getA = + DataDeclaration.DataDeclaration + <$> getModifier + <*> getA + <*> getList getV + <*> getList (getTuple3 getA getV (getType getV getA)) + +getModifier :: MonadGet m => m DataDeclaration.Modifier +getModifier = getWord8 >>= \case + 0 -> pure DataDeclaration.Structural + 1 -> DataDeclaration.Unique <$> getText + tag -> unknownTag "DataDeclaration.Modifier" tag + +getEffectDeclaration :: (MonadGet m, Ord v) => m v -> m a -> m (EffectDeclaration v a) +getEffectDeclaration getV getA = + DataDeclaration.EffectDeclaration <$> getDataDeclaration getV getA + +getEither :: MonadGet m => m a -> m b -> m (Either a b) +getEither getL getR = getWord8 >>= \case + 0 -> Left <$> getL + 1 -> Right <$> getR + tag -> unknownTag "Either" tag + +getEdits :: MonadGet m => m Patch +getEdits = + Patch <$> getRelation getReference getTermEdit + <*> getRelation getReference getTypeEdit diff --git a/parser-typechecker/src/Unison/Util/Star3.hs b/codebase1/codebase/Unison/Codebase/V1/Star3.hs similarity index 97% rename from parser-typechecker/src/Unison/Util/Star3.hs rename to codebase1/codebase/Unison/Codebase/V1/Star3.hs index 491b4bfb59..9b7a994033 100644 --- a/parser-typechecker/src/Unison/Util/Star3.hs +++ b/codebase1/codebase/Unison/Codebase/V1/Star3.hs @@ -1,13 +1,13 @@ {-# LANGUAGE RecordWildCards #-} -module Unison.Util.Star3 where +module Unison.Codebase.V1.Star3 where -import Unison.Prelude - -import Unison.Util.Relation (Relation) +import U.Util.Relation (Relation) import qualified Data.Set as Set -import qualified Unison.Hashable as H -import qualified Unison.Util.Relation as R +import Data.Set (Set) +import Data.Foldable (Foldable(foldl')) +import qualified U.Util.Hashable as H +import qualified U.Util.Relation as R -- Represents a set of (fact, d1, d2, d3), but indexed using a star schema so -- it can be efficiently queried from any of the dimensions. diff --git a/codebase1/codebase/Unison/Codebase/V1/Symbol.hs b/codebase1/codebase/Unison/Codebase/V1/Symbol.hs new file mode 100644 index 0000000000..e2169c53a3 --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Symbol.hs @@ -0,0 +1,13 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Codebase.V1.Symbol where + +import Data.Word (Word64) +import Data.Text (Text) + +data Symbol = Symbol !Word64 !Text deriving (Eq, Ord) + +-- instance Show Symbol where +-- show (Symbol 0 n) = show n +-- show (Symbol id n) = show n ++ "-" ++ show id diff --git a/codebase1/codebase/Unison/Codebase/V1/Term.hs b/codebase1/codebase/Unison/Codebase/V1/Term.hs new file mode 100644 index 0000000000..9e41438e4b --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Term.hs @@ -0,0 +1,167 @@ +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE UnicodeSyntax #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.V1.Term where + +import qualified Control.Monad.Writer.Strict as Writer +import Data.Foldable (Foldable (toList), traverse_) +import Data.Functor (($>)) +import Data.Int (Int64) +import Data.Maybe (mapMaybe) +import qualified Data.Set as Set +import Data.Sequence (Seq) +import Data.Set (Set) +import Data.Text (Text) +import Data.Word (Word64) +import qualified Unison.Codebase.V1.ABT as ABT +import qualified Unison.Codebase.V1.ConstructorType as CT +import qualified Unison.Codebase.V1.LabeledDependency as LD +import Unison.Codebase.V1.LabeledDependency (LabeledDependency) +import Unison.Codebase.V1.Reference (Reference) +import Unison.Codebase.V1.Referent (Referent) +import qualified Unison.Codebase.V1.Referent as Referent +import Unison.Codebase.V1.Term.Pattern (Pattern) +import qualified Unison.Codebase.V1.Term.Pattern as Pattern +import Unison.Codebase.V1.Type (Type) +import qualified Unison.Codebase.V1.Type as Type + +-- This gets reexported; should maybe live somewhere other than Pattern, though. +type ConstructorId = Pattern.ConstructorId + +data MatchCase a = MatchCase Pattern (Maybe a) a + deriving (Foldable, Functor, Traversable) + +-- | Base functor for terms in the Unison language +-- We need `typeVar` because the term and type variables may differ. +data F typeVar typeAnn a + = Int Int64 + | Nat Word64 + | Float Double + | Boolean Bool + | Text Text + | Char Char + | Ref Reference + | -- First argument identifies the data type, + -- second argument identifies the constructor + Constructor Reference ConstructorId + | Request Reference ConstructorId + | Handle a a + | App a a + | Ann a (Type typeVar typeAnn) + | Sequence (Seq a) + | If a a a + | And a a + | Or a a + | Lam a + | -- Note: let rec blocks have an outer ABT.Cycle which introduces as many + -- variables as there are bindings + LetRec IsTop [a] a + | -- Note: first parameter is the binding, second is the expression which may refer + -- to this let bound variable. Constructed as `Let b (abs v e)` + Let IsTop a a + | -- Pattern matching / eliminating data types, example: + -- case x of + -- Just n -> rhs1 + -- Nothing -> rhs2 + -- + -- translates to + -- + -- Match x + -- [ (Constructor 0 [Var], ABT.abs n rhs1) + -- , (Constructor 1 [], rhs2) ] + Match a [MatchCase a] + | TermLink Referent + | TypeLink Reference + deriving (Foldable, Functor, Traversable) + +type IsTop = Bool + +-- | Like `Term v`, but with an annotation of type `a` at every level in the tree +type Term v a = ABT.Term (F v a) v a + +-- Dependencies including referenced data and effect decls +dependencies :: Ord v => Term v a -> Set Reference +dependencies t = Set.map (LD.fold id Referent.toReference) (labeledDependencies t) + +typeDependencies :: Ord v => Term v a -> Set Reference +typeDependencies = + Set.fromList . mapMaybe (LD.fold Just (const Nothing)) . toList . labeledDependencies + +-- Gets the types to which this term contains references via patterns and +-- data constructors. +constructorDependencies :: + Ord v => Term v a -> Set Reference +constructorDependencies = + Set.unions + . generalizedDependencies + (const mempty) + (const mempty) + Set.singleton + (const . Set.singleton) + Set.singleton + (const . Set.singleton) + Set.singleton + +generalizedDependencies :: + (Ord v, Ord r) => + (Reference -> r) -> + (Reference -> r) -> + (Reference -> r) -> + (Reference -> ConstructorId -> r) -> + (Reference -> r) -> + (Reference -> ConstructorId -> r) -> + (Reference -> r) -> + Term v a -> + Set r +generalizedDependencies termRef typeRef literalType dataConstructor dataType effectConstructor effectType = + Set.fromList . Writer.execWriter . ABT.visit' f + where + f t@(Ref r) = Writer.tell [termRef r] $> t + f t@(TermLink r) = case r of + Referent.Ref r -> Writer.tell [termRef r] $> t + Referent.Con r id CT.Data -> Writer.tell [dataConstructor r id] $> t + Referent.Con r id CT.Effect -> Writer.tell [effectConstructor r id] $> t + f t@(TypeLink r) = Writer.tell [typeRef r] $> t + f t@(Ann _ typ) = + Writer.tell (map typeRef . toList $ Type.dependencies typ) $> t + f t@(Nat _) = Writer.tell [literalType Type.natRef] $> t + f t@(Int _) = Writer.tell [literalType Type.intRef] $> t + f t@(Float _) = Writer.tell [literalType Type.floatRef] $> t + f t@(Boolean _) = Writer.tell [literalType Type.booleanRef] $> t + f t@(Text _) = Writer.tell [literalType Type.textRef] $> t + f t@(Sequence _) = Writer.tell [literalType Type.vectorRef] $> t + f t@(Constructor r cid) = + Writer.tell [dataType r, dataConstructor r cid] $> t + f t@(Request r cid) = + Writer.tell [effectType r, effectConstructor r cid] $> t + f t@(Match _ cases) = traverse_ goPat cases $> t + f t = pure t + goPat (MatchCase pat _ _) = + Writer.tell . toList $ + Pattern.generalizedDependencies + literalType + dataConstructor + dataType + effectConstructor + effectType + pat + +labeledDependencies :: + Ord v => Term v a -> Set LabeledDependency +labeledDependencies = + generalizedDependencies + LD.termRef + LD.typeRef + LD.typeRef + LD.dataConstructor + LD.typeRef + LD.effectConstructor + LD.typeRef diff --git a/codebase1/codebase/Unison/Codebase/V1/Term/Pattern.hs b/codebase1/codebase/Unison/Codebase/V1/Term/Pattern.hs new file mode 100644 index 0000000000..f907e8b86d --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Term/Pattern.hs @@ -0,0 +1,102 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} + +module Unison.Codebase.V1.Term.Pattern where + +import Data.Int (Int64) +import qualified Data.Set as Set +import qualified Unison.Codebase.V1.LabeledDependency as LD +import Unison.Codebase.V1.LabeledDependency (LabeledDependency) +import Data.Set (Set) +import Data.Text (Text) +import Data.Word (Word64) +import Unison.Codebase.V1.Reference (Reference) +import qualified Unison.Codebase.V1.Type as Type + +type ConstructorId = Int + +data Pattern + = Unbound + | Var + | Boolean !Bool + | Int !Int64 + | Nat !Word64 + | Float !Double + | Text !Text + | Char !Char + | Constructor !Reference !Int [Pattern] + | As Pattern + | EffectPure Pattern + | EffectBind !Reference !Int [Pattern] Pattern + | SequenceLiteral [Pattern] + | SequenceOp Pattern !SeqOp Pattern + deriving (Eq, Ord, Show) + +data SeqOp + = Cons + | Snoc + | Concat + deriving (Eq, Ord, Show) + +application :: Pattern -> Bool +application (Constructor _ _ (_ : _)) = True +application _ = False + +foldMap' :: Monoid m => (Pattern -> m) -> Pattern -> m +foldMap' f p = case p of + Unbound -> f p + Var -> f p + Boolean _ -> f p + Int _ -> f p + Nat _ -> f p + Float _ -> f p + Text _ -> f p + Char _ -> f p + Constructor _ _ ps -> f p <> foldMap (foldMap' f) ps + As p' -> f p <> foldMap' f p' + EffectPure p' -> f p <> foldMap' f p' + EffectBind _ _ ps p' -> f p <> foldMap (foldMap' f) ps <> foldMap' f p' + SequenceLiteral ps -> f p <> foldMap (foldMap' f) ps + SequenceOp p1 _ p2 -> f p <> foldMap' f p1 <> foldMap' f p2 + +generalizedDependencies :: + Ord r => + (Reference -> r) -> + (Reference -> ConstructorId -> r) -> + (Reference -> r) -> + (Reference -> ConstructorId -> r) -> + (Reference -> r) -> + Pattern -> + Set r +generalizedDependencies literalType dataConstructor dataType effectConstructor effectType = + Set.fromList + . foldMap' + ( \case + Unbound -> mempty + Var -> mempty + As _ -> mempty + Constructor r cid _ -> [dataType r, dataConstructor r cid] + EffectPure _ -> [effectType Type.effectRef] + EffectBind r cid _ _ -> + [effectType Type.effectRef, effectType r, effectConstructor r cid] + SequenceLiteral _ -> [literalType Type.vectorRef] + SequenceOp {} -> [literalType Type.vectorRef] + Boolean _ -> [literalType Type.booleanRef] + Int _ -> [literalType Type.intRef] + Nat _ -> [literalType Type.natRef] + Float _ -> [literalType Type.floatRef] + Text _ -> [literalType Type.textRef] + Char _ -> [literalType Type.charRef] + ) + +labeledDependencies :: Pattern -> Set LabeledDependency +labeledDependencies = + generalizedDependencies + LD.typeRef + LD.dataConstructor + LD.typeRef + LD.effectConstructor + LD.typeRef diff --git a/codebase1/codebase/Unison/Codebase/V1/Type.hs b/codebase1/codebase/Unison/Codebase/V1/Type.hs new file mode 100644 index 0000000000..21e0615e1c --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Type.hs @@ -0,0 +1,55 @@ +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.V1.Type where + +import qualified Control.Monad.Writer as Writer +import Data.Functor (($>)) +import Data.Set (Set) +import qualified Data.Set as Set +import qualified Unison.Codebase.V1.ABT as ABT +import Unison.Codebase.V1.Reference (Reference) +import qualified Unison.Codebase.V1.Reference as Reference +import qualified Unison.Codebase.V1.Type.Kind as K + +-- | Base functor for types in the Unison language +data F a + = Ref Reference + | Arrow a a + | Ann a K.Kind + | App a a + | Effect a a + | Effects [a] + | Forall a + | IntroOuter a -- binder like ∀, used to introduce variables that are + -- bound by outer type signatures, to support scoped type + -- variables + deriving (Foldable, Functor, Traversable) + +-- | Types are represented as ABTs over the base functor F, with variables in `v` +type Type v a = ABT.Term F v a + +dependencies :: Ord v => Type v a -> Set Reference +dependencies t = Set.fromList . Writer.execWriter $ ABT.visit' f t + where + f t@(Ref r) = Writer.tell [r] $> t + f t = pure t + +intRef, natRef, floatRef, booleanRef, textRef, charRef, vectorRef, bytesRef, effectRef, termLinkRef, typeLinkRef :: Reference +intRef = Reference.Builtin "Int" +natRef = Reference.Builtin "Nat" +floatRef = Reference.Builtin "Float" +booleanRef = Reference.Builtin "Boolean" +textRef = Reference.Builtin "Text" +charRef = Reference.Builtin "Char" +vectorRef = Reference.Builtin "Sequence" +bytesRef = Reference.Builtin "Bytes" +effectRef = Reference.Builtin "Effect" +termLinkRef = Reference.Builtin "Link.Term" +typeLinkRef = Reference.Builtin "Link.Type" diff --git a/codebase1/codebase/Unison/Codebase/V1/Type/Kind.hs b/codebase1/codebase/Unison/Codebase/V1/Type/Kind.hs new file mode 100644 index 0000000000..fbddc81306 --- /dev/null +++ b/codebase1/codebase/Unison/Codebase/V1/Type/Kind.hs @@ -0,0 +1,3 @@ +module Unison.Codebase.V1.Type.Kind where + +data Kind = Star | Arrow Kind Kind deriving (Eq,Ord,Show) \ No newline at end of file diff --git a/codebase1/codebase/unison-codebase1.cabal b/codebase1/codebase/unison-codebase1.cabal new file mode 100644 index 0000000000..b8d9f5c57e --- /dev/null +++ b/codebase1/codebase/unison-codebase1.cabal @@ -0,0 +1,59 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-codebase1 +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +library + exposed-modules: + Unison.Codebase.V1.ABT + Unison.Codebase.V1.Branch.Raw + Unison.Codebase.V1.Branch.NameSegment + Unison.Codebase.V1.Causal.Raw + Unison.Codebase.V1.ConstructorType + Unison.Codebase.V1.DataDeclaration + Unison.Codebase.V1.FileCodebase + Unison.Codebase.V1.LabeledDependency + Unison.Codebase.V1.Patch + Unison.Codebase.V1.Patch.TermEdit + Unison.Codebase.V1.Patch.TypeEdit + Unison.Codebase.V1.Term + Unison.Codebase.V1.Term.Pattern + Unison.Codebase.V1.Type + Unison.Codebase.V1.Type.Kind + Unison.Codebase.V1.Reference + Unison.Codebase.V1.Referent + Unison.Codebase.V1.Serialization.Serialization + Unison.Codebase.V1.Serialization.V1 + Unison.Codebase.V1.Symbol + Unison.Codebase.V1.Star3 + -- other-modules: + -- other-extensions: + build-depends: + base, + base16, + bytes, + bytestring, + containers, + directory, + errors, + exceptions, + extra, + filepath, + lens, + mtl, + text, + unliftio, + unison-core, + unison-util + hs-source-dirs: . + default-language: Haskell2010 diff --git a/codebase2/CHANGELOG.md b/codebase2/CHANGELOG.md new file mode 100644 index 0000000000..ce1c7792ea --- /dev/null +++ b/codebase2/CHANGELOG.md @@ -0,0 +1,5 @@ +# Revision history for unison-codebase2-core + +## 0.1.0.0 -- YYYY-mm-dd + +* First version. Released on an unsuspecting world. diff --git a/codebase2/LICENSE b/codebase2/LICENSE new file mode 100644 index 0000000000..089d71dc86 --- /dev/null +++ b/codebase2/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2020 Unison Computing, PBC + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs new file mode 100644 index 0000000000..325c35be73 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs @@ -0,0 +1,23 @@ +module U.Codebase.Sqlite.Branch.Diff where + +import Data.Map (Map) +import Data.Set (Set) +import U.Codebase.Sqlite.Branch.MetadataSet +import U.Codebase.Sqlite.DbId +import U.Codebase.Sqlite.Reference +import U.Codebase.Sqlite.Referent + +data Diff = Diff + { reference :: BranchId, + add :: DiffSlice, + remove :: DiffSlice + } + +type NameSegment = TextId + +data DiffSlice = DiffSlice + { terms :: Map NameSegment (Set Referent), + types :: Map NameSegment (Set Reference), + termMetadata :: Map NameSegment (Map Referent MetadataSetFormat), + typeMetadata :: Map NameSegment (Map Reference MetadataSetFormat) + } diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs new file mode 100644 index 0000000000..2c86fede8c --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs @@ -0,0 +1,8 @@ +module U.Codebase.Sqlite.Branch.Format where + +import U.Codebase.Sqlite.Branch.Full +import U.Codebase.Sqlite.Branch.Diff + +data BranchFormat + = Full Branch + | Diff Diff \ No newline at end of file diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs new file mode 100644 index 0000000000..9e3d1be211 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs @@ -0,0 +1,16 @@ +module U.Codebase.Sqlite.Branch.Full where + +import Data.Map (Map) +import U.Codebase.Sqlite.Referent +import U.Codebase.Sqlite.Reference +import U.Codebase.Sqlite.DbId +import U.Codebase.Sqlite.Branch.MetadataSet + +type NameSegment = TextId + +data Branch = Branch + { terms :: Map NameSegment (Map Referent MetadataSetFormat), + types :: Map NameSegment (Map Reference MetadataSetFormat), + patches :: Map NameSegment PatchId, + children :: Map NameSegment BranchId + } diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/MetadataSet.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/MetadataSet.hs new file mode 100644 index 0000000000..50cc8ff5d7 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/MetadataSet.hs @@ -0,0 +1,6 @@ +module U.Codebase.Sqlite.Branch.MetadataSet where + +import Data.Set (Set) +import U.Codebase.Sqlite.Reference (Reference) + +data MetadataSetFormat = Inline (Set Reference) \ No newline at end of file diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs new file mode 100644 index 0000000000..c6deaddc3b --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs @@ -0,0 +1,7 @@ +module U.Codebase.Sqlite.Causal where + + +data Causal hc he = RawCausal { + valueHash :: he, + parentHashes :: [hc] +} \ No newline at end of file diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs new file mode 100644 index 0000000000..05e7b7339c --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -0,0 +1,23 @@ +{-# LANGUAGE UndecidableInstances #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE PatternSynonyms #-} + +module U.Codebase.Sqlite.DbId where + +import Database.SQLite.Simple.FromField +import Database.SQLite.Simple.ToField +import Data.Word (Word64) +import U.Util.Hashable (Hashable) +import Data.Bits (Bits) + +newtype ObjectId = ObjectId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 +newtype TextId = TextId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 + +newtype TermId = TermId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via ObjectId +newtype DeclId = DeclId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via ObjectId +newtype PatchId = PatchId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via ObjectId + +newtype BranchId = BranchId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via Word64 diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs new file mode 100644 index 0000000000..b99fc86c67 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs @@ -0,0 +1,32 @@ +module U.Codebase.Sqlite.Decl.Format where + +import U.Codebase.Decl (DeclType, Modifier) +import U.Codebase.Reference (Reference') +-- import U.Codebase.Sqlite.DbId +import qualified U.Codebase.Type as Type +import qualified U.Core.ABT as ABT +import U.Codebase.Sqlite.LocalIds + +-- | Add new formats here +data TermFormat v = Term (LocallyIndexedComponent v) + +-- | V1: Decls included `Hash`es inline +-- V2: Instead of `Hash`, we use a smaller index. +data LocallyIndexedComponent v = LocallyIndexedComponent + { lookup :: LocalIds, + component :: [Decl v] + } + +data Decl v = DataDeclaration + { declType :: DeclType, + modifier :: Modifier, + bound :: [v], + constructors' :: [Type v] + } + +type Type v = ABT.Term (Type.F' TypeRef) v () +type TypeRef = Reference' LocalTextId (Maybe LocalTypeId) + +-- Int, because that's what Data.Vector.(!) takes +newtype LocalTextId = LocalTextId Int +newtype LocalTypeId = LocalTypeId Int diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs new file mode 100644 index 0000000000..afa044aae7 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs @@ -0,0 +1,11 @@ +module U.Codebase.Sqlite.LocalIds where + +import Data.Vector (Vector) +import U.Codebase.Sqlite.DbId + +-- |A mapping between index ids that are local to an object and the ids in the database +data LocalIds = LocalIds + { textLookup :: Vector TextId, + objectLookup :: Vector ObjectId + } + diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs new file mode 100644 index 0000000000..fe3f56f639 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs @@ -0,0 +1,16 @@ +module U.Codebase.Sqlite.Patch.Diff where + +import Data.Map (Map) +import U.Codebase.Sqlite.Referent +import U.Codebase.Sqlite.Reference +import U.Codebase.Sqlite.Patch.TermEdit +import U.Codebase.Sqlite.Patch.TypeEdit +import U.Codebase.Sqlite.DbId + +data PatchDiff = PatchDiff + { reference :: PatchId + , addedTermEdits :: Map Referent TermEdit + , addedTypeEdits :: Map Reference TypeEdit + , removedTermEdits :: Map Referent TermEdit + , removedTypeEdits :: Map Reference TypeEdit + } deriving (Eq, Ord, Show) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs new file mode 100644 index 0000000000..ba4a0b6f14 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs @@ -0,0 +1 @@ +module U.Codebase.Sqlite.Patch.Format where \ No newline at end of file diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs new file mode 100644 index 0000000000..6d5c70033d --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs @@ -0,0 +1,11 @@ +module U.Codebase.Sqlite.Patch.Full where + +import Data.Map (Map) +import U.Codebase.Sqlite.Types +import U.Codebase.Sqlite.Patch.TermEdit +import U.Codebase.Sqlite.Patch.TypeEdit + +data Patch = Patch { + termEdits :: Map Referent TermEdit, + typeEdits :: Map Reference TypeEdit +} diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs new file mode 100644 index 0000000000..12d2fd39ef --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs @@ -0,0 +1,12 @@ +module U.Codebase.Sqlite.Patch.TermEdit where + +import U.Codebase.Sqlite.Reference (Reference) + +data TermEdit = Replace Reference Typing | Deprecate + deriving (Eq, Ord, Show) + +-- Replacements with the Same type can be automatically propagated. +-- Replacements with a Subtype can be automatically propagated but may result in dependents getting more general types, so requires re-inference. +-- Replacements of a Different type need to be manually propagated by the programmer. +data Typing = Same | Subtype | Different + deriving (Eq, Ord, Show) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TypeEdit.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TypeEdit.hs new file mode 100644 index 0000000000..cd3e3035ce --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TypeEdit.hs @@ -0,0 +1,6 @@ +module U.Codebase.Sqlite.Patch.TypeEdit where + +import U.Codebase.Sqlite.Reference (Reference) + +data TypeEdit = Replace Reference | Deprecate + deriving (Eq, Ord, Show) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs new file mode 100644 index 0000000000..f024bbd94f --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -0,0 +1,9 @@ +module U.Codebase.Sqlite.Queries where + +-- import Data.String.Here.Uninterpolated (here) +-- import qualified Database.SQLite.Simple as SQLite +-- import Database.SQLite.Simple (FromRow, Connection, Only(..), ToRow(..), SQLData(SQLNull,SQLText)) +-- import Database.SQLite.Simple.FromField +-- import Database.SQLite.Simple.ToField +-- import Data.Maybe (fromJust) + diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs new file mode 100644 index 0000000000..aa42fc12fc --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs @@ -0,0 +1,13 @@ +module U.Codebase.Sqlite.Reference where + +import U.Codebase.Sqlite.DbId +import Data.Word (Word64) + +data Reference = Builtin TextId | Derived Id + deriving (Eq, Ord, Show) + +data Id = Id ObjectId ComponentIndex + deriving (Eq, Ord, Show) + +newtype ComponentIndex = ComponentIndex Word64 + deriving (Eq, Ord, Show) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs new file mode 100644 index 0000000000..738da2ae27 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs @@ -0,0 +1,11 @@ +module U.Codebase.Sqlite.Referent where + +import Data.Word (Word64) +import U.Codebase.Sqlite.Reference (Reference) + +data Referent = Ref Reference | Con Reference ConstructorIndex + deriving (Eq, Ord, Show) + +newtype ConstructorIndex = ConstructorIndex Word64 + deriving (Eq, Ord, Show) + diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs new file mode 100644 index 0000000000..babaa6ba67 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -0,0 +1,303 @@ +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE RecordWildCards #-} + +module U.Codebase.Sqlite.Serialization where + +import Data.Bits (Bits) +import Data.Bytes.Get (MonadGet, getWord8) +import Data.Bytes.Put (MonadPut, putWord8) +import Data.Bytes.Serial (SerialEndian (serializeBE), deserialize, serialize) +import Data.Bytes.VarInt (VarInt (VarInt), unVarInt) +import Data.Int (Int64) +import Data.List (elemIndex) +import qualified Data.Set as Set +import U.Codebase.Kind (Kind) +import Data.Word (Word64) +import qualified U.Codebase.Kind as Kind +import U.Codebase.Reference (Reference' (ReferenceBuiltin, ReferenceDerived)) +import qualified U.Codebase.Reference as Reference +import U.Codebase.Referent (Referent') +import qualified U.Codebase.Referent as Referent +import U.Codebase.Sqlite.LocalIds +import qualified U.Codebase.Sqlite.Term.Format as TermFormat +import qualified U.Codebase.Term as Term +import qualified U.Codebase.Type as Type +import qualified U.Core.ABT as ABT +import U.Util.Serialization +import Prelude hiding (getChar, putChar) + +putABT :: + (MonadPut m, Foldable f, Functor f, Ord v) => + (v -> m ()) -> + (a -> m ()) -> + (forall x. (x -> m ()) -> f x -> m ()) -> + ABT.Term f v a -> + m () +putABT putVar putA putF abt = + putFoldable putVar fvs *> go (annotateBound abt) + where + fvs = Set.toList $ ABT.freeVars abt + go (ABT.Term _ (a, env) abt) = putA a *> case abt of + ABT.Var v -> putWord8 0 *> putVarRef env v + ABT.Tm f -> putWord8 1 *> putF go f + ABT.Abs v body -> putWord8 2 *> putVar v *> go body + ABT.Cycle body -> putWord8 3 *> go body + annotateBound :: (Ord v, Functor f, Foldable f) => ABT.Term f v a -> ABT.Term f v (a, [v]) + annotateBound = go [] + where + go env t = + let a = (ABT.annotation t, env) + in case ABT.out t of + ABT.Abs v body -> ABT.abs a v (go (v : env) body) + ABT.Cycle body -> ABT.cycle a (go env body) + ABT.Tm f -> ABT.tm a (go env <$> f) + ABT.Var v -> ABT.annotatedVar a v + putVarRef env v = case v `elemIndex` env of + Just i -> putWord8 0 *> putVarInt i + Nothing -> case v `elemIndex` fvs of + Just i -> putWord8 1 *> putVarInt i + Nothing -> error "impossible: var not free or bound" + +getABT :: + (MonadGet m, Foldable f, Functor f, Ord v) => + m v -> + m a -> + (forall x. m x -> m (f x)) -> + m (ABT.Term f v a) +getABT getVar getA getF = getList getVar >>= go [] + where + go env fvs = do + a <- getA + tag <- getWord8 + case tag of + 0 -> do + tag <- getWord8 + case tag of + 0 -> ABT.annotatedVar a . (env !!) <$> getVarInt + 1 -> ABT.annotatedVar a . (fvs !!) <$> getVarInt + _ -> unknownTag "getABT.Var" tag + 1 -> ABT.tm a <$> getF (go env fvs) + 2 -> do + v <- getVar + body <- go (v : env) fvs + pure $ ABT.abs a v body + 3 -> ABT.cycle a <$> go env fvs + _ -> unknownTag "getABT" tag + +{- +Write +- [ ] term component +- [ ] types of terms +- [ ] decl component +- [ ] causal +- [ ] full branch +- [ ] diff branch +- [ ] full patch +- [ ] diff patch + +- [ ] add to dependents index +- [ ] add to type index +- [ ] add to type mentions index +-} + +putLocalIds :: MonadPut m => LocalIds -> m () +putLocalIds LocalIds {..} = do + putFoldable putVarInt textLookup + putFoldable putVarInt objectLookup + +putUnit :: Applicative m => () -> m () +putUnit _ = pure () + +getUnit :: Applicative m => m () +getUnit = pure () + +putTermComponent :: + MonadPut m => + TermFormat.LocallyIndexedComponent -> + m () +putTermComponent TermFormat.LocallyIndexedComponent {..} = do + putWord8 0 -- this format + putLocalIds lookup + putFramedArray putTermElement component + where + go :: MonadPut m => (a -> m ()) -> TermFormat.F a -> m () + go putChild = \case + Term.Int n -> + putWord8 0 *> putInt n + Term.Nat n -> + putWord8 1 *> putNat n + Term.Float n -> + putWord8 2 *> putFloat n + Term.Boolean b -> + putWord8 3 *> putBoolean b + Term.Text t -> + putWord8 4 *> putVarInt t + Term.Ref r -> + putWord8 5 *> putRecursiveReference r + Term.Constructor r cid -> + putWord8 6 *> putReference r *> putVarInt cid + Term.Request r cid -> + putWord8 7 *> putReference r *> putVarInt cid + Term.Handle h a -> + putWord8 8 *> putChild h *> putChild a + Term.App f arg -> + putWord8 9 *> putChild f *> putChild arg + Term.Ann e t -> + putWord8 10 *> putChild e *> putType putReference putSymbol t + Term.Sequence vs -> + putWord8 11 *> putFoldable putChild vs + Term.If cond t f -> + putWord8 12 *> putChild cond *> putChild t *> putChild f + Term.And x y -> + putWord8 13 *> putChild x *> putChild y + Term.Or x y -> + putWord8 14 *> putChild x *> putChild y + Term.Lam body -> + putWord8 15 *> putChild body + Term.LetRec bs body -> + putWord8 16 *> putFoldable putChild bs *> putChild body + Term.Let b body -> + putWord8 17 *> putChild b *> putChild body + Term.Match s cases -> + putWord8 18 *> putChild s *> putFoldable (putMatchCase putChild) cases + Term.Char c -> + putWord8 19 *> putChar c + Term.TermLink r -> + putWord8 20 *> putReferent r + Term.TypeLink r -> + putWord8 21 *> putReference r + putTermElement :: MonadPut m => TermFormat.Term -> m () + putTermElement = putABT putSymbol putUnit go + putSymbol :: MonadPut m => TermFormat.Symbol -> m () + putSymbol (TermFormat.Symbol n t) = putVarInt n >> putText t + putReferent :: MonadPut m => Referent' TermFormat.TermRef TermFormat.TypeRef -> m () + putReferent = \case + Referent.Ref r -> do + putWord8 0 + putRecursiveReference r + Referent.Con r i -> do + putWord8 1 + putReference r + putVarInt i + putMatchCase :: MonadPut m => (a -> m ()) -> Term.MatchCase TermFormat.TypeRef a -> m () + putMatchCase putChild (Term.MatchCase pat guard body) = + putPattern pat *> putMaybe putChild guard *> putChild body + where + putPattern :: MonadPut m => Term.Pattern TermFormat.TypeRef -> m () + putPattern p = case p of + Term.PUnbound -> putWord8 0 + Term.PVar -> putWord8 1 + Term.PBoolean b -> putWord8 2 *> putBoolean b + Term.PInt n -> putWord8 3 *> putInt n + Term.PNat n -> putWord8 4 *> putNat n + Term.PFloat n -> putWord8 5 *> putFloat n + Term.PConstructor r cid ps -> + putWord8 6 + *> putReference r + *> putVarInt cid + *> putFoldable putPattern ps + Term.PAs p -> putWord8 7 *> putPattern p + Term.PEffectPure p -> putWord8 8 *> putPattern p + Term.PEffectBind r cid args k -> + putWord8 9 + *> putReference r + *> putVarInt cid + *> putFoldable putPattern args + *> putPattern k + Term.PSequenceLiteral ps -> + putWord8 10 *> putFoldable putPattern ps + Term.PSequenceOp l op r -> + putWord8 11 + *> putPattern l + *> putSeqOp op + *> putPattern r + Term.PText t -> putWord8 12 *> putText t + Term.PChar c -> putWord8 13 *> putChar c + where + putSeqOp :: MonadPut m => Term.SeqOp -> m () + putSeqOp Term.PCons = putWord8 0 + putSeqOp Term.PSnoc = putWord8 1 + putSeqOp Term.PConcat = putWord8 2 + +putReference :: + (MonadPut m, Integral t, Bits t, Integral r, Bits r) => + Reference' t r -> + m () +putReference = \case + ReferenceBuiltin t -> + putWord8 0 *> putVarInt t + ReferenceDerived (Reference.Id r index) -> + putWord8 1 *> putVarInt r *> putVarInt index + +putRecursiveReference :: + (MonadPut m, Integral t, Bits t, Integral r, Bits r) => + Reference' t (Maybe r) -> + m () +putRecursiveReference = \case + ReferenceBuiltin t -> + putWord8 0 *> putVarInt t + ReferenceDerived (Reference.Id r index) -> + putWord8 1 *> putMaybe putVarInt r *> putVarInt index + +putInt :: MonadPut m => Int64 -> m () +putInt = serializeBE + +putNat :: MonadPut m => Word64 -> m () +putNat = serializeBE + +putFloat = serializeBE +putFloat :: MonadPut m => Double -> m () + +putBoolean :: MonadPut m => Bool -> m () +putBoolean False = putWord8 0 +putBoolean True = putWord8 1 + +putType :: + (MonadPut m, Ord v) => + (r -> m ()) -> + (v -> m ()) -> + Type.TypeR r v -> + m () +putType putReference putVar = putABT putVar putUnit go + where + go putChild t = case t of + Type.Ref r -> putWord8 0 *> putReference r + Type.Arrow i o -> putWord8 1 *> putChild i *> putChild o + Type.Ann t k -> putWord8 2 *> putChild t *> putKind k + Type.App f x -> putWord8 3 *> putChild f *> putChild x + Type.Effect e t -> putWord8 4 *> putChild e *> putChild t + Type.Effects es -> putWord8 5 *> putFoldable putChild es + Type.Forall body -> putWord8 6 *> putChild body + Type.IntroOuter body -> putWord8 7 *> putChild body + putKind :: MonadPut m => Kind -> m () + putKind k = case k of + Kind.Star -> putWord8 0 + Kind.Arrow i o -> putWord8 1 *> putKind i *> putKind o + +putChar :: MonadPut m => Char -> m () +putChar = serialize . VarInt . fromEnum + +getChar :: MonadGet m => m Char +getChar = toEnum . unVarInt <$> deserialize + +putMaybe :: MonadPut m => (a -> m ()) -> Maybe a -> m () +putMaybe putA = \case + Nothing -> putWord8 0 + Just a -> putWord8 1 *> putA a + +getMaybe :: MonadGet m => m a -> m (Maybe a) +getMaybe getA = getWord8 >>= \tag -> case tag of + 0 -> pure Nothing + 1 -> Just <$> getA + _ -> unknownTag "Maybe" tag + +unknownTag :: (MonadGet m, Show a) => String -> a -> m x +unknownTag msg tag = + fail $ + "unknown tag " ++ show tag + ++ " while deserializing: " + ++ msg + +-- putSymbol :: MonadPut m => Symbol -> m () +-- putSymbol (Symbol id typ) = putLength id *> putText (Var.rawName typ) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs new file mode 100644 index 0000000000..0e759464d1 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs @@ -0,0 +1,36 @@ +{-# LANGUAGE DerivingVia #-} +module U.Codebase.Sqlite.Term.Format where + +import Data.Text (Text) +import U.Codebase.Reference (Reference') +import Data.Word (Word64) +import U.Codebase.Referent (Referent') +-- import U.Codebase.Sqlite.DbId +import U.Codebase.Sqlite.LocalIds +import qualified U.Codebase.Term as Term +import qualified U.Core.ABT as ABT +import Data.Bits (Bits) + +-- Int, because that's what Data.Vector.(!) takes +newtype LocalTermId = LocalTermId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalTypeId = LocalTypeId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 + +type TermRef = Reference' LocalTextId (Maybe LocalTermId) + +type TypeRef = Reference' LocalTextId LocalTypeId + +data LocallyIndexedComponent = LocallyIndexedComponent + { lookup :: LocalIds, + component :: [Term] + } + +type F = + Term.F' LocalTextId TermRef TypeRef (Referent' TermRef TypeRef) TypeRef Symbol + +type Term = ABT.Term F Symbol () + +data TermFormat + = Term LocallyIndexedComponent + +data Symbol = Symbol !Word64 !Text deriving (Eq, Ord, Show) \ No newline at end of file diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Types.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Types.hs new file mode 100644 index 0000000000..5cbc509c60 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Types.hs @@ -0,0 +1,8 @@ +module U.Codebase.Sqlite.Types where + +import U.Codebase.Sqlite.DbId +import U.Codebase.Referent (Referent') +import U.Codebase.Reference (Reference') + +type Reference = Reference' TextId ObjectId +type Referent = Referent' Reference Reference diff --git a/codebase2/codebase-sqlite/sql/create-index.sql b/codebase2/codebase-sqlite/sql/create-index.sql new file mode 100644 index 0000000000..db2cc5a779 --- /dev/null +++ b/codebase2/codebase-sqlite/sql/create-index.sql @@ -0,0 +1,122 @@ +--CREATE TABLE reference_derived ( +-- id INTEGER NOT NULL PRIMARY KEY, +-- hash_id INTEGER NOT NULL REFERENCES hash(id), +-- component_index INTEGER NOT NULL, +-- UNIQUE (hash_id, component_index) +--); +--CREATE INDEX reference_derived_hash_id ON reference_derived(hash_id); +-- +--CREATE TABLE reference ( +-- id INTEGER NOT NULL PRIMARY KEY, +-- builtin TEXT, -- a builtin name, or null +-- reference_derived_id INTEGER REFERENCES reference_derived(id), +-- UNIQUE(builtin, reference_derived_id), +-- -- exactly one should be null +-- CHECK (builtin IS NULL <> reference_derived_id IS NULL) +--); +-- +---- `Referent' ReferenceDerivedId` but without `ConstructorType`, +---- which is linked to the object. +--CREATE TABLE referent_derived ( +-- id INTEGER NOT NULL PRIMARY KEY, +-- reference_derived_id INTEGER NOT NULL REFERENCES reference_derived(id), +-- constructor_id INTEGER, +-- UNIQUE(reference_derived_id, constructor_id) +--); +-- +---- just using rowid since we don't need joins +---- index terms by types +--CREATE TABLE find_type_index ( +-- type_reference_id INTEGER NOT NULL REFERENCES reference(id), +-- referent_derived_id INTEGER NOT NULL REFERENCES referent_derived(id) +--); +--CREATE INDEX find_type_index_reference ON find_type_index(type_reference_id); +--CREATE INDEX find_type_index_referent ON find_type_index(referent_derived_id); +-- +--CREATE TABLE find_type_mentions_index ( +-- type_reference_id INTEGER NOT NULL REFERENCES reference(id), +-- referent_id INTEGER NOT NULL REFERENCES referent_derived(id) +--); +--CREATE INDEX find_type_mentions_index_reference ON find_type_mentions_index(type_reference_id); +--CREATE INDEX find_type_mentions_index_referent ON find_type_mentions_index(referent_id); +-- +--CREATE TABLE dependents_index ( +-- dependency_id INTEGER NOT NULL REFERENCES reference(id), +-- dependent_id INTEGER NOT NULL REFERENCES reference_derived(id) +--); +--CREATE INDEX dependents_index_dependency ON dependents_index(dependency_id); +--CREATE INDEX dependents_index_dependent ON dependents_index(dependent_id); + +CREATE TABLE find_type_index ( + type_reference_builtin TEXT NULL, + type_reference_hash_id INTEGER NULL REFERENCES hash(id), + type_reference_component_index INTEGER NULL, + term_referent_object_id INTEGER NOT NULL REFERENCES hash(id), + term_referent_component_index INTEGER NOT NULL, + term_referent_constructor_index INTEGER NULL, + PRIMARY KEY( + type_reference_builtin, + type_reference_derived_hash_id, + type_reference_derived_component_index + ), + UNIQUE ( + term_referent_derived_object_id, + term_referent_derived_component_index, + term_referent_derived_constructor_index + ), + CHECK ( + type_reference_builtin IS NULL = + type_reference_derived_hash_id IS NOT NULL + ), + CHECK ( + type_reference_derived_object_id IS NULL = + type_reference_derived_component_index IS NULL + ) +); + +CREATE TABLE find_type_mentions_index ( + type_reference_builtin TEXT NULL, + type_reference_hash_id INTEGER NULL REFERENCES hash(id), + type_reference_component_index INTEGER NULL, + term_referent_object_id INTEGER NOT NULL REFERENCES hash(id), + term_referent_derived_component_index INTEGER NOT NULL, + term_referent_constructor_index INTEGER NULL, + PRIMARY KEY( + type_reference_builtin, + type_reference_derived_hash_id, + type_reference_derived_component_index + ), + CHECK ( + type_reference_builtin IS NULL = + type_reference_derived_hash_id IS NOT NULL + ), + CHECK ( + type_reference_derived_hash_id IS NULL = + type_reference_derived_component_index IS NULL + ) +); + +CREATE TABLE dependents_index ( + dependency_builtin TEXT NULL, + dependency_object_id INTEGER NULL REFERENCES hash(id), + dependency_component_index INTEGER NULL + dependent_object_id INTEGER NOT NULL REFERENCES hash(id), + dependent_component_index INTEGER NOT NULL, + CHECK ( + type_reference_builtin IS NULL = + type_reference_derived_object_id IS NOT NULL + ), + CHECK ( + type_reference_derived_object_id IS NULL = + type_reference_derived_component_index IS NULL + ) +); +CREATE INDEX dependents_by_dependency ON dependents_index ( + dependency_builtin, + dependency_object_id, + dependency_component_index +); +CREATE INDEX dependencies_by_dependent ON dependents_index ( + dependent_object_id, + dependent_component_index +); diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql new file mode 100644 index 0000000000..48ecca5a52 --- /dev/null +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -0,0 +1,104 @@ +-- actually stores the 512-byte hashes +CREATE TABLE hash ( + id INTEGER PRIMARY KEY, + -- this would be the full hash, represented as base32 instead of bytes, + -- to optimize for looking them up by prefix. + base32 TEXT UNIQUE NOT NULL +); +CREATE INDEX hash_base32 ON hash(base32); + +-- just came up with this, a layer of indirection to allow multiple hash_ids to +-- reference the same object. +-- so: SELECT object.id, bytes FROM object +-- INNER JOIN hash_object ON object_id = object.id +-- INNER JOIN hash ON hash_id = hash.id +-- WHERE base32 LIKE 'a1b2c3%' +CREATE TABLE hash_object ( + -- hashes are UNIQUE; many hashes correspond to one object + -- (causal nodes are not considered objects atm) + hash_id INTEGER PRIMARY KEY NOT NULL REFERENCES hash(id), + object_id INTEGER NOT NULL REFERENCES object(id), + hash_version INTEGER NOT NULL +); +CREATE INDEX hash_object_hash_id ON hash_object(hash_id); +CREATE INDEX hash_object_object_id ON hash_object(object_id); + +-- this table is just documentation, it won't be used for joins. +CREATE TABLE object_type_description ( + id INTEGER UNIQUE NOT NULL, + description TEXT UNIQUE NOT NULL +); +INSERT INTO object_type_description (id, description) VALUES + (0, "Term Component"), -- foo x = x + 1 + (1, "Types of Term Component"), -- [Nat -> Nat] + (2, "Decl Component"), -- unique type Animal = Cat | Dog | Mouse + (3, "Namespace"), -- a one-level slice + (4, "Patch"), -- replace term #abc with term #def + (5, "Local Text/Object Lookup") + ; + +-- How should objects be linked to hashes? (and old hashes) +-- And which id should be linked into blobs? +-- a) object.id -- no: I ran into an issue in serializing a type annotation +-- within a term; there wasn't enough info here to properly +-- ser/des a type annotation that includes a self-ref, and I +-- couldn't convince myself that the situation wouldn't come up +-- b) hash.id -- ~~no: multiple hashes may refer to one object~~ +-- -- though, I guess that's true even when they are represented as +-- -- inline bytestrings. so I'm going with this option. +-- +CREATE TABLE object ( + id INTEGER PRIMARY KEY, + primary_hash_id UNIQUE INTEGER NOT NULL REFERENCES hash(id), + type_id INTEGER NOT NULL REFERENCES object_type_description(id), + bytes BLOB NOT NULL +); +CREATE INDEX object_hash_id ON object(primary_hash_id); +CREATE INDEX object_type_id ON object(type_id); + +-- `causal` references value hash ids instead of value ids, in case you want +-- to be able to drop values and keep just the causal spine. +-- to be able to drop values and keep just the causal spine. +-- This implementation keeps the hash of the dropped values, although I could +-- see an argument to drop them too and just use NULL, but I thought it better +-- to not lose their identities. +CREATE TABLE causal ( + self_hash_id INTEGER PRIMARY KEY NOT NULL REFERENCES hash(id), + -- intentionally not object_id, see above + value_hash_id INTEGER NOT NULL REFERENCES hash(id) +); + +-- valueHash : Hash = hash(value) +-- db.saveValue(valueHash, value) +-- causalHash : Hash = hash(new Causal(valueHash, parentCausalHashes)) +-- db.saveCausal(selfHash = causalHash, valueHash, parentCausalHashes) + +CREATE TABLE causal_parent ( + id INTEGER PRIMARY KEY NOT NULL, + causal_id INTEGER NOT NULL REFERENCES causal(self_hash_id), + parent_id INTEGER NOT NULL REFERENCES causal(self_hash_id), + UNIQUE(causal_id, parent_id) +); +CREATE INDEX causal_parent_causal_id ON causal_parent(causal_id); +CREATE INDEX causal_parent_parent_id ON causal_parent(parent_id); + +-- associate old (e.g. v1) causal hashes with new causal hashes +CREATE TABLE causal_old ( + old_hash_id INTEGER PRIMARY KEY NOT NULL REFERENCES hash(id), + new_hash_id INTEGER NOT NULL REFERENCES hash(id) +); + +-- |Links a referent to its type's object +CREATE TABLE type_of_referent ( + object_id INTEGER NOT NULL REFERENCES object(id), + component_index INTEGER NOT NULL, + constructor_index INTEGER NULL, + bytes BLOB NOT NULL, + PRIMARY KEY (object_id, component_index, constructor_index) +); + +--CREATE TABLE type_of_referent ( +-- referent_derived_id INTEGER NOT NULL PRIMARY KEY REFERENCES referent_derived(id), +-- type_object_id INTEGER NOT NULL REFERENCES object(id) +--); +--CREATE INDEX type_of_referent_object_id ON type_of_referent(type_object_id); diff --git a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal new file mode 100644 index 0000000000..49e3b1fb7b --- /dev/null +++ b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal @@ -0,0 +1,56 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-codebase-sqlite +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +library + exposed-modules: + U.Codebase.Sqlite.Branch.Format + U.Codebase.Sqlite.Branch.Full + U.Codebase.Sqlite.Branch.Diff + U.Codebase.Sqlite.Branch.MetadataSet + U.Codebase.Sqlite.Decl.Format + U.Codebase.Sqlite.Causal + U.Codebase.Sqlite.DbId + U.Codebase.Sqlite.LocalIds + U.Codebase.Sqlite.Patch.Format + U.Codebase.Sqlite.Patch.Full + U.Codebase.Sqlite.Patch.Diff + U.Codebase.Sqlite.Patch.TermEdit + U.Codebase.Sqlite.Patch.TypeEdit + U.Codebase.Sqlite.Reference + U.Codebase.Sqlite.Referent + U.Codebase.Sqlite.Serialization + U.Codebase.Sqlite.Term.Format + U.Codebase.Sqlite.Types + + -- other-modules: + -- other-extensions: + build-depends: + base, + bytes, + bytestring, + containers, + here, + lens, + mtl, + sqlite-simple, + text, + unliftio, + vector, + unison-codebase, + unison-core, + unison-util, + unison-util-serialization + hs-source-dirs: . + default-language: Haskell2010 diff --git a/codebase2/codebase/U/Codebase/Branch.hs b/codebase2/codebase/U/Codebase/Branch.hs new file mode 100644 index 0000000000..6149a95a73 --- /dev/null +++ b/codebase2/codebase/U/Codebase/Branch.hs @@ -0,0 +1,29 @@ +module U.Codebase.Branch where + +import Data.Map (Map) +import Data.Set (Set) +import Data.Text (Text) +import U.Codebase.Reference (Reference) +import U.Codebase.Referent (Referent) +import U.Codebase.TermEdit (TermEdit) +import U.Codebase.TypeEdit (TypeEdit) +import U.Util.Hash (Hash) + +newtype NameSegment = NameSegment Text +newtype EditHash = EditHash Hash +newtype CausalHash = CausalHash Hash +newtype BranchHash = BranchHash Hash +newtype MdValues = MdValues (Set Reference) +newtype PatchHash = PatchHash Hash + +data Branch m = Branch + { terms :: Map NameSegment (Map Referent (m MdValues)), + types :: Map NameSegment (Map Reference (m MdValues)), + patches :: Map NameSegment (PatchHash, m Patch), + children :: Map NameSegment (m (Branch m)) + } + +data Patch = Patch + { termEdits :: Map Referent TermEdit, + typeEdits :: Map Reference TypeEdit + } \ No newline at end of file diff --git a/codebase2/codebase/U/Codebase/Causal.hs b/codebase2/codebase/U/Codebase/Causal.hs new file mode 100644 index 0000000000..c4c0875d94 --- /dev/null +++ b/codebase2/codebase/U/Codebase/Causal.hs @@ -0,0 +1,11 @@ +module U.Codebase.Causal where + +import Data.Map (Map) + +-- | Causal doesn't necessarily pre-load anything other than some hashes. +data Causal m hc he e = Causal + { causalHash :: hc, + valueHash :: he, + parents :: Map hc (m (Causal m hc he e)), + value :: m (Maybe e) + } diff --git a/codebase2/codebase/U/Codebase/Codebase.hs b/codebase2/codebase/U/Codebase/Codebase.hs new file mode 100644 index 0000000000..f4d9fc000a --- /dev/null +++ b/codebase2/codebase/U/Codebase/Codebase.hs @@ -0,0 +1,81 @@ +{-# LANGUAGE RankNTypes #-} +module U.Codebase.Codebase where + +import qualified U.Codebase.Reference as Reference +import qualified U.Codebase.Referent as Referent +import U.Codebase.Causal (Causal) +import U.Codebase.Reference (Reference) +import U.Codebase.Term (Term) +import U.Codebase.Type (TypeT) +import U.Codebase.Decl (Decl) +import U.Codebase.WatchKind (WatchKind) +import U.Codebase.Branch (Branch) +import qualified U.Codebase.Reflog as Reflog +import U.Codebase.ShortHash (ShortBranchHash, ShortHash) +import U.Util.Hash (Hash) +import Data.Text (Text) +import Data.Set (Set) + +newtype BranchHash = BranchHash Hash +newtype CausalHash = CausalHash Hash +newtype CodebasePath = CodebasePath FilePath +data SyncMode = SyncShortCircuit | SyncComplete + +data Codebase m v = Codebase { + getTerm :: Reference.Id -> m (Maybe (Term v)), + getTypeOfTerm :: Reference.Id -> m (Maybe (TypeT v)), + getTypeDeclaration :: Reference.Id -> m (Maybe (Decl v)), + + putTerm :: Reference.Id -> Term v -> TypeT v -> m (), + putTypeDeclaration :: Reference.Id -> Decl v -> m (), + + getBranch :: BranchHash -> m (Maybe (Branch m)), + getRootBranch :: m (Either GetRootBranchError (Branch m)), + putRootBranch :: Branch m -> m (), + + getBranchForCausal :: CausalHash -> m (Maybe (Branch m)), + + -- |Supports syncing from a current or older codebase format + syncFromDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), + -- |Only writes the latest codebase format + syncToDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), + -- ^ maybe return type needs to reflect failure if remote codebase has an old version + + -- |Watch expressions are part of the codebase, the `Reference.Id` is + -- the hash of the source of the watch expression, and the `Term v a` + -- is the evaluated result of the expression, decompiled to a term. + watches :: WatchKind -> m [Reference.Id], + getWatch :: WatchKind -> Reference.Id -> m (Maybe (Term v)), + putWatch :: WatchKind -> Reference.Id -> Term v -> m (), + + getReflog :: m [Reflog.Entry], + appendReflog :: Text -> Branch m -> Branch m -> m (), + + -- |the nicely-named versions will utilize these, and add builtins to the result set + termsHavingType :: Reference -> m (Set Referent.Id), + termsMentioningType :: Reference -> m (Set Referent.Id), + + -- |number of base58 characters needed to distinguish any two hashes in the codebase; + -- we don't have to compute it separately for different namespaces + hashLength :: m Int, + termReferencesByPrefix :: ShortHash -> m (Set Reference.Id), + typeReferencesByPrefix :: ShortHash -> m (Set Reference.Id), + termReferentsByPrefix :: ShortHash -> m (Set Referent.Id), + branchHashesByPrefix :: ShortBranchHash -> m (Set BranchHash), + + -- + lca :: (forall he e. [Causal m CausalHash he e] -> m (Maybe BranchHash)), + dependents :: Reference -> m (Maybe (Set Reference.Id)), + termDependencies :: Reference.Id -> m (Maybe (Set Reference.Id)), + declDependencies :: Reference.Id -> m (Maybe (Set Reference.Id)) --, + -- -- |terms, types, patches, and branches + -- branchDependencies :: + -- Branch.Hash -> m (Maybe (CausalHash, BD.Dependencies)), + -- -- |the "new" terms and types mentioned in a patch + -- patchDependencies :: EditHash -> m (Set Reference, Set Reference) +} + +data GetRootBranchError + = NoRootBranch + | CouldntLoadRootBranch Hash + deriving Show diff --git a/codebase2/codebase/U/Codebase/Decl.hs b/codebase2/codebase/U/Codebase/Decl.hs new file mode 100644 index 0000000000..1c662c7b04 --- /dev/null +++ b/codebase2/codebase/U/Codebase/Decl.hs @@ -0,0 +1,27 @@ +module U.Codebase.Decl where + +import Data.Word (Word64) +import U.Codebase.Reference (Reference') +import Data.Text (Text) +import U.Util.Hash (Hash) +import U.Codebase.Type (TypeR) + +type ConstructorId = Word64 + +data DeclType = Data | Effect + deriving (Eq, Ord, Show, Enum) + +type Decl v = DeclR (Reference' Text Hash) v + +data Modifier = Structural | Unique Text -- | Opaque (Set Reference) + deriving (Eq, Ord, Show) + +data DeclR r v = DataDeclaration { + declType :: DeclType, + modifier :: Modifier, + bound :: [v], + constructors' :: [TypeR r v] +} + +-- instance Hashable ConstructorType where +-- tokens b = [Tag . fromIntegral $ fromEnum b] diff --git a/unison-core/src/Unison/Kind.hs b/codebase2/codebase/U/Codebase/Kind.hs similarity index 65% rename from unison-core/src/Unison/Kind.hs rename to codebase2/codebase/U/Codebase/Kind.hs index 531ff42268..0b201b3765 100644 --- a/unison-core/src/Unison/Kind.hs +++ b/codebase2/codebase/U/Codebase/Kind.hs @@ -1,11 +1,10 @@ {-# LANGUAGE DeriveGeneric #-} -module Unison.Kind where +module U.Codebase.Kind where -import Unison.Prelude - -import Unison.Hashable (Hashable) -import qualified Unison.Hashable as Hashable +import U.Util.Hashable (Hashable) +import qualified U.Util.Hashable as Hashable +import GHC.Generics (Generic) data Kind = Star | Arrow Kind Kind deriving (Eq,Ord,Read,Show,Generic) diff --git a/codebase2/codebase/U/Codebase/Reference.hs b/codebase2/codebase/U/Codebase/Reference.hs new file mode 100644 index 0000000000..461c420e0e --- /dev/null +++ b/codebase2/codebase/U/Codebase/Reference.hs @@ -0,0 +1,31 @@ +{-# LANGUAGE DeriveFunctor #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE TypeSynonymInstances #-} + +module U.Codebase.Reference where + +import Data.Text (Text) +import Data.Word (Word64) +import qualified U.Util.Hash as Hash +import U.Util.Hash (Hash) +import U.Util.Hashable (Hashable (..)) +import qualified U.Util.Hashable as Hashable + +-- |This is the canonical representation of Reference +type Reference = Reference' Text Hash +type Id = Id' Hash + +data Reference' t h + = ReferenceBuiltin t + | ReferenceDerived (Id' h) + deriving (Eq, Ord, Show, Functor) + +type ComponentIndex = Word64 +data Id' h = Id h ComponentIndex + deriving (Eq, Ord, Show, Functor) + +instance Hashable Reference where + tokens (ReferenceBuiltin txt) = + [Hashable.Tag 0, Hashable.Text txt] + tokens (ReferenceDerived (Id h i)) = + [Hashable.Tag 1, Hashable.Bytes (Hash.toBytes h), Hashable.Nat i] diff --git a/codebase2/codebase/U/Codebase/Referent.hs b/codebase2/codebase/U/Codebase/Referent.hs new file mode 100644 index 0000000000..705bb6164c --- /dev/null +++ b/codebase2/codebase/U/Codebase/Referent.hs @@ -0,0 +1,33 @@ +{-# LANGUAGE DeriveFunctor #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE TypeSynonymInstances #-} + +module U.Codebase.Referent where + +import Data.Text (Text) +import U.Codebase.Reference (Reference') +import qualified U.Codebase.Reference as Reference +import U.Util.Hash (Hash) +import U.Util.Hashable (Hashable (..)) +import Data.Word (Word64) +import qualified U.Util.Hashable as Hashable + +type Referent = Referent' (Reference' Text Hash) (Reference' Text Hash) +type ConstructorIndex = Word64 + + +data Referent' rTm rTp + = Ref rTm + | Con rTp ConstructorIndex + deriving (Eq, Ord) + +instance Hashable Referent where + tokens (Ref r) = Hashable.Tag 0 : Hashable.tokens r + tokens (Con r i) = [Hashable.Tag 1] ++ Hashable.tokens r ++ [Hashable.Nat (fromIntegral i)] + +type Id = Id' Hash Hash +data Id' hTm hTp + = RefId (Reference.Id' hTm) + | ConId (Reference.Id' hTp) ConstructorIndex + deriving (Eq, Ord, Functor) + diff --git a/codebase2/codebase/U/Codebase/Reflog.hs b/codebase2/codebase/U/Codebase/Reflog.hs new file mode 100644 index 0000000000..e70567ba8a --- /dev/null +++ b/codebase2/codebase/U/Codebase/Reflog.hs @@ -0,0 +1,21 @@ +{-# LANGUAGE ViewPatterns #-} + +module U.Codebase.Reflog where + +import Data.Text (Text) +import U.Codebase.Branch (BranchHash) + +data Entry = Entry {from :: BranchHash, to :: BranchHash, reason :: Text} + +-- fromText :: Text -> Maybe Entry +-- fromText t = +-- case Text.words t of +-- (Hash.fromBase32Hex -> Just old) : (Hash.fromBase32Hex -> Just new) : (Text.unwords -> reason) -> +-- Just $ Entry (Causal.RawHash old) (Causal.RawHash new) reason +-- _ -> Nothing + +-- toText :: Entry -> Text +-- toText (Entry old new reason) = +-- Text.unwords [ Hash.base32Hex . Causal.unRawHash $ old +-- , Hash.base32Hex . Causal.unRawHash $ new +-- , reason ] diff --git a/codebase2/codebase/U/Codebase/ShortHash.hs b/codebase2/codebase/U/Codebase/ShortHash.hs new file mode 100644 index 0000000000..548c6d9166 --- /dev/null +++ b/codebase2/codebase/U/Codebase/ShortHash.hs @@ -0,0 +1,15 @@ +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RecordWildCards #-} + +module U.Codebase.ShortHash where + +import Data.Text (Text) +import Data.Word (Word64) + +data ShortHash + = Builtin Text + | ShortHash { prefix :: Text, cycle :: Maybe Word64, cid :: Maybe Word64 } + deriving (Eq, Ord, Show) + +data ShortBranchHash = ShortBranchHash { toText :: Text } deriving (Eq, Ord, Show) diff --git a/codebase2/codebase/U/Codebase/Term.hs b/codebase2/codebase/U/Codebase/Term.hs new file mode 100644 index 0000000000..d03fcfe976 --- /dev/null +++ b/codebase2/codebase/U/Codebase/Term.hs @@ -0,0 +1,131 @@ +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE UnicodeSyntax #-} +{-# LANGUAGE ViewPatterns #-} + +module U.Codebase.Term where + +import Data.Int (Int64) +import Data.Sequence (Seq) +import Data.Text (Text) +import Data.Word (Word64) +import U.Util.Hash (Hash) +import GHC.Generics (Generic, Generic1) +import U.Codebase.Reference (Reference, Reference') +import U.Codebase.Referent (Referent') +import U.Codebase.Type (TypeR) +import qualified U.Core.ABT as ABT +import qualified U.Util.Hashable as H + +type ConstructorId = Word64 + +type Term v = ABT.Term (F v) v () + +-- | Base functor for terms in the Unison codebase +type F vt = + F' + Text -- text + (Reference' Text (Maybe Hash)) -- termRef + Reference -- typeRef + (Referent' (Reference' Text (Maybe Hash)) (Reference' Text Hash)) -- termLink + Reference -- typeLink + vt + +-- | Generalized version. We could generalize further to allow sharing within +-- terms. +data F' text termRef typeRef termLink typeLink vt a + = Int Int64 + | Nat Word64 + | Float Double + | Boolean Bool + | Text text + | Char Char + | Ref termRef + | -- First argument identifies the data type, + -- second argument identifies the constructor + Constructor typeRef ConstructorId + | Request typeRef ConstructorId + | Handle a a + | App a a + | Ann a (TypeR typeRef vt) + | Sequence (Seq a) + | If a a a + | And a a + | Or a a + | Lam a + | -- Note: let rec blocks have an outer ABT.Cycle which introduces as many + -- variables as there are bindings + LetRec [a] a + | -- Note: first parameter is the binding, second is the expression which may refer + -- to this let bound variable. Constructed as `Let b (abs v e)` + Let a a + | -- Pattern matching / eliminating data types, example: + -- case x of + -- Just n -> rhs1 + -- Nothing -> rhs2 + -- + -- translates to + -- + -- Match x + -- [ (Constructor 0 [Var], ABT.abs n rhs1) + -- , (Constructor 1 [], rhs2) ] + Match a [MatchCase typeRef a] + | TermLink termLink + | TypeLink typeLink + deriving (Foldable, Functor, Traversable) + +data MatchCase r a = MatchCase (Pattern r) (Maybe a) a + deriving (Foldable, Functor, Generic, Generic1, Traversable) + +data Pattern r + = PUnbound + | PVar + | PBoolean !Bool + | PInt !Int64 + | PNat !Word64 + | PFloat !Double + | PText !Text + | PChar !Char + | PConstructor !r !Int [Pattern r] + | PAs (Pattern r) + | PEffectPure (Pattern r) + | PEffectBind !r !Int [Pattern r] (Pattern r) + | PSequenceLiteral [Pattern r] + | PSequenceOp (Pattern r) !SeqOp (Pattern r) + deriving (Generic, Functor, Foldable, Traversable) + +data SeqOp + = PCons + | PSnoc + | PConcat + deriving (Eq, Show) + +-- rmap :: +-- (termRef -> termRef') -> +-- (typeRef -> typeRef') -> +-- (termLink -> termLink') -> +-- TermR termRef typeRef termLink typeRef (TypeR typeRef vt at) blankRepr ap v a -> +-- TermR termRef' typeRef' termLink' typeRef' (TypeR typeRef' vt at) blankRepr ap v a +-- rmap fTermRef fTypeRef fTermLink t = +-- extraMap fTermRef fTypeRef fTermLink fTypeRef (Type.rmap fTypeRef) undefined id t + +-- rmapPattern :: (r -> r') -> Pattern r loc -> Pattern r' loc +-- rmapPattern f = \case +-- PConstructor loc r i ps -> PConstructor loc (f r) i (rmap f <$> ps) +-- PAs loc p -> PAs loc (rmap f p) +-- PEffectPure loc p -> PEffectPure loc (rmap f p) +-- PEffectBind loc r i ps p -> PEffectBind loc (f r) i (rmap f <$> ps) (rmap f p) +-- PSequenceLiteral loc ps -> PSequenceLiteral loc (rmap f <$> ps) +-- PSequenceOp loc p1 op p2 -> PSequenceOp loc (rmap f p1) op (rmap f p2) +-- -- cover all cases having references or subpatterns above; the rest are fine +-- x -> unsafeCoerce x + +instance H.Hashable SeqOp where + tokens PCons = [H.Tag 0] + tokens PSnoc = [H.Tag 1] + tokens PConcat = [H.Tag 2] \ No newline at end of file diff --git a/codebase2/codebase/U/Codebase/TermEdit.hs b/codebase2/codebase/U/Codebase/TermEdit.hs new file mode 100644 index 0000000000..625cb4cb72 --- /dev/null +++ b/codebase2/codebase/U/Codebase/TermEdit.hs @@ -0,0 +1,24 @@ +module U.Codebase.TermEdit where + +import U.Util.Hashable (Hashable) +import U.Codebase.Reference (Reference) +import qualified U.Util.Hashable as H + +data TermEdit = Replace Reference Typing | Deprecate + deriving (Eq, Ord, Show) + +-- Replacements with the Same type can be automatically propagated. +-- Replacements with a Subtype can be automatically propagated but may result in dependents getting more general types, so requires re-inference. +-- Replacements of a Different type need to be manually propagated by the programmer. +data Typing = Same | Subtype | Different + deriving (Eq, Ord, Show) + +instance Hashable Typing where + tokens Same = [H.Tag 0] + tokens Subtype = [H.Tag 1] + tokens Different = [H.Tag 2] + +instance Hashable TermEdit where + tokens (Replace r t) = [H.Tag 0] ++ H.tokens r ++ H.tokens t + tokens Deprecate = [H.Tag 1] + diff --git a/codebase2/codebase/U/Codebase/Type.hs b/codebase2/codebase/U/Codebase/Type.hs new file mode 100644 index 0000000000..5651ca5c12 --- /dev/null +++ b/codebase2/codebase/U/Codebase/Type.hs @@ -0,0 +1,49 @@ +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ViewPatterns #-} + +module U.Codebase.Type where + +import qualified U.Core.ABT as ABT +import U.Codebase.Reference (Reference, Reference') +import Data.Text (Text) +import U.Util.Hash (Hash) +import U.Codebase.Kind (Kind) +import Unsafe.Coerce (unsafeCoerce) + +-- | For standalone types, like those in Term.Ann +type FT = F' Reference + +-- | For potentially recursive types, like those in DataDeclaration +type FD = F' (Reference' Text (Maybe Hash)) + +data F' r a + = Ref r + | Arrow a a + | Ann a Kind + | App a a + | Effect a a + | Effects [a] + | Forall a + | IntroOuter a -- binder like ∀, used to introduce variables that are + -- bound by outer type signatures, to support scoped type + -- variables + deriving (Foldable, Functor, Eq, Ord, Traversable) + +-- | Types are represented as ABTs over the base functor F, with variables in `v` +type TypeT v = ABT.Term FT v () + +type TypeD v = ABT.Term FD v () + +type TypeR r v = ABT.Term (F' r) v () + +rmap :: (r -> r') -> ABT.Term (F' r) v a -> ABT.Term (F' r') v a +rmap f = ABT.extraMap $ \case + Ref r -> Ref (f r) + x -> unsafeCoerce x \ No newline at end of file diff --git a/parser-typechecker/src/Unison/Codebase/TypeEdit.hs b/codebase2/codebase/U/Codebase/TypeEdit.hs similarity index 72% rename from parser-typechecker/src/Unison/Codebase/TypeEdit.hs rename to codebase2/codebase/U/Codebase/TypeEdit.hs index a6d2cd665c..2ca0889a04 100644 --- a/parser-typechecker/src/Unison/Codebase/TypeEdit.hs +++ b/codebase2/codebase/U/Codebase/TypeEdit.hs @@ -1,8 +1,8 @@ -module Unison.Codebase.TypeEdit where +module U.Codebase.TypeEdit where -import Unison.Reference (Reference) -import Unison.Hashable (Hashable) -import qualified Unison.Hashable as H +import U.Codebase.Reference (Reference) +import U.Util.Hashable (Hashable) +import qualified U.Util.Hashable as H data TypeEdit = Replace Reference | Deprecate deriving (Eq, Ord, Show) diff --git a/codebase2/codebase/U/Codebase/WatchKind.hs b/codebase2/codebase/U/Codebase/WatchKind.hs new file mode 100644 index 0000000000..13df429b13 --- /dev/null +++ b/codebase2/codebase/U/Codebase/WatchKind.hs @@ -0,0 +1,3 @@ +module U.Codebase.WatchKind where + +data WatchKind = RegularWatch | TestWatch deriving (Eq, Ord, Show) \ No newline at end of file diff --git a/codebase2/codebase/unison-codebase.cabal b/codebase2/codebase/unison-codebase.cabal new file mode 100644 index 0000000000..c13c3a09fe --- /dev/null +++ b/codebase2/codebase/unison-codebase.cabal @@ -0,0 +1,41 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-codebase +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +library + exposed-modules: + U.Codebase.Branch + U.Codebase.Causal + U.Codebase.Codebase + U.Codebase.Decl + U.Codebase.Kind + U.Codebase.Reference + U.Codebase.Referent + U.Codebase.Reflog + U.Codebase.ShortHash + U.Codebase.Term + U.Codebase.TermEdit + U.Codebase.Type + U.Codebase.TypeEdit + U.Codebase.WatchKind + -- other-modules: + -- other-extensions: + build-depends: + base, + containers, + text, + unison-core, + unison-util + hs-source-dirs: . + default-language: Haskell2010 diff --git a/codebase2/core/U/Core/ABT.hs b/codebase2/core/U/Core/ABT.hs new file mode 100644 index 0000000000..58de733038 --- /dev/null +++ b/codebase2/core/U/Core/ABT.hs @@ -0,0 +1,53 @@ +{-# LANGUAGE LambdaCase #-} +-- Based on: http://semantic-domain.blogspot.com/2015/03/abstract-binding-trees.html +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ViewPatterns #-} + +module U.Core.ABT where + +import Data.Set (Set) +import qualified Data.Set as Set +import qualified Data.Foldable as Foldable + +data ABT f v r + = Var v + | Cycle r + | Abs v r + | Tm (f r) deriving (Show, Functor, Foldable, Traversable) + +-- | At each level in the tree, we store the set of free variables and +-- a value of type `a`. Variables are of type `v`. +data Term f v a = Term { freeVars :: Set v, annotation :: a, out :: ABT f v (Term f v a) } + +-- instance (Show1 f, Show v) => Show (Term f v a) where +-- -- annotations not shown +-- showsPrec p (Term _ _ out) = case out of +-- Var v -> \x -> "Var " ++ show v ++ x +-- Cycle body -> ("Cycle " ++) . showsPrec p body +-- Abs v body -> showParen True $ (show v ++) . showString ". " . showsPrec p body +-- Tm f -> showsPrec1 p f + +extraMap :: Functor g => (forall k . f k -> g k) -> Term f v a -> Term g v a +extraMap p (Term fvs a sub) = Term fvs a (go p sub) where + go :: Functor g => (forall k . f k -> g k) -> ABT f v (Term f v a) -> ABT g v (Term g v a) + go p = \case + Var v -> Var v + Cycle r -> Cycle (extraMap p r) + Abs v r -> Abs v (extraMap p r) + Tm x -> Tm (fmap (extraMap p) (p x)) + +abs :: Ord v => a -> v -> Term f v a -> Term f v a +abs a v body = Term (Set.delete v (freeVars body)) a (Abs v body) + +annotatedVar :: a -> v -> Term f v a +annotatedVar a v = Term (Set.singleton v) a (Var v) + +cycle :: a -> Term f v a -> Term f v a +cycle a t = Term (freeVars t) a (Cycle t) + +tm :: (Foldable f, Ord v) => a -> f (Term f v a) -> Term f v a +tm a t = Term (Set.unions (fmap freeVars (Foldable.toList t))) a (Tm t) diff --git a/codebase2/core/unison-core.cabal b/codebase2/core/unison-core.cabal new file mode 100644 index 0000000000..49e5d4660f --- /dev/null +++ b/codebase2/core/unison-core.cabal @@ -0,0 +1,31 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-core +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +library + exposed-modules: + U.Core.ABT + -- U.Core.Reference + -- U.Core.Referent + -- U.Core.Term + -- U.Core2.Referent.Short + -- other-modules: + -- other-extensions: + build-depends: + base, + containers, + text, + unison-util + hs-source-dirs: . + default-language: Haskell2010 diff --git a/codebase2/editor/U/Editor/Codebase.hs b/codebase2/editor/U/Editor/Codebase.hs new file mode 100644 index 0000000000..7c9ec5a6f1 --- /dev/null +++ b/codebase2/editor/U/Editor/Codebase.hs @@ -0,0 +1,57 @@ +module U.Editor.Codebase where + +-- data Codebase m v a = Codebase { +-- getTerm :: Reference.Id -> m (Maybe (Term v a)), +-- getTypeOfTermImpl :: Reference.Id -> m (Maybe (Type v a)), +-- getTypeDeclaration :: Reference.Id -> m (Maybe (Decl v a)), + +-- putTerm :: Reference.Id -> Term v a -> Type v a -> m (), +-- putTypeDeclaration :: Reference.Id -> Decl v a -> m (), + +-- getBranch :: Branch.Hash -> m (Maybe (Branch m)), +-- getRootBranch :: m (Either GetRootBranchError (Branch m)), +-- putRootBranch :: Branch m -> m (), + +-- rootBranchUpdates :: m (m (), m (Set Branch.Hash), +-- getBranchForCausal :: Branch.CausalHash -> m (Maybe (Branch m)), + +-- -- --|Supports syncing from a current or older codebase format +-- syncFromDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), +-- -- -- |Only writes the latest codebase format +-- syncToDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), +-- -- -- ^ maybe return type needs to reflect failure if remote codebase has an old version + +-- -- -- |Watch expressions are part of the codebase, the `Reference.Id` is +-- -- the hash of the source of the watch expression, and the `Term v a` +-- -- is the evaluated result of the expression, decompiled to a term. +-- watches :: UF.WatchKind -> m [Reference.Id], +-- getWatch :: UF.WatchKind -> Reference.Id -> m (Maybe (Term v a)), +-- putWatch :: UF.WatchKind -> Reference.Id -> Term v a -> m (), + +-- getReflog :: m [Reflog.Entry], +-- appendReflog :: Text -> Branch m -> Branch m -> m (), + +-- -- -- |the nicely-named versions will utilize these, and add builtins to the result set +-- termsHavingType_impl :: Reference -> m (Set Referent.Id), +-- termsMentioningType_impl :: Reference -> m (Set Referent.Id), + +-- -- -- |number of base58 characters needed to distinguish any two hashes in the codebase; +-- -- we don't have to compute it separately for different namespaces +-- hashLength :: m Int, +-- termReferencesByPrefix :: ShortHash -> m (Set Reference.Id), +-- typeReferencesByPrefix :: ShortHash -> m (Set Reference.Id), +-- termReferentsByPrefix :: ShortHash -> m (Set Referent.Id), +-- branchHashesByPrefix :: ShortBranchHash -> m (Set Branch.Hash), + +-- -- +-- lca :: [Causal m Branch.Raw e] -> m (Maybe Branch.Hash), +-- dependentsImpl :: Reference -> m (Maybe (Set Reference.Id)), +-- termDependencies :: Reference.Id -> m (Maybe (Set Reference.Id)), +-- declDependencies :: Reference.Id -> m (Maybe (Set Reference.Id)), +-- -- -- |terms, types, patches, and branches +-- branchDependencies :: +-- Branch.Hash -> m (Maybe (Branch.CausalHash, BD.Dependencies)), +-- -- -- |the "new" terms and types mentioned in a patch +-- patchDependencies :: EditHash -> m (Set Reference, Set Reference) + +-- } \ No newline at end of file diff --git a/codebase2/editor/unison-editor.cabal b/codebase2/editor/unison-editor.cabal new file mode 100644 index 0000000000..cac5476005 --- /dev/null +++ b/codebase2/editor/unison-editor.cabal @@ -0,0 +1,27 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-editor +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +library + exposed-modules: + -- U.Editor.Codebase + -- other-modules: + -- other-extensions: + build-depends: + base, + unison-codebase, + unison-language, + unison-runtime + hs-source-dirs: . + default-language: Haskell2010 diff --git a/unison-core/src/Unison/Blank.hs b/codebase2/language/U/Language/Blank.hs similarity index 76% rename from unison-core/src/Unison/Blank.hs rename to codebase2/language/U/Language/Blank.hs index d10199db95..0832dda35b 100644 --- a/unison-core/src/Unison/Blank.hs +++ b/codebase2/language/U/Language/Blank.hs @@ -1,4 +1,7 @@ -module Unison.Blank where +{-# LANGUAGE DeriveFunctor #-} +-- |This is clearly an aspect of type-checking only, but included as a +-- dependency of Term.F +module U.Language.Blank where loc :: Recorded loc -> loc loc (Placeholder loc _) = loc diff --git a/codebase2/language/unison-language.cabal b/codebase2/language/unison-language.cabal new file mode 100644 index 0000000000..28d319759f --- /dev/null +++ b/codebase2/language/unison-language.cabal @@ -0,0 +1,27 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-language +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +library + exposed-modules: + U.Language.Blank + -- U.Core.Referent.Short + -- other-modules: + -- other-extensions: + build-depends: + base, + unison-core, + unison-syntax + hs-source-dirs: . + default-language: Haskell2010 diff --git a/codebase2/notes.txt b/codebase2/notes.txt new file mode 100644 index 0000000000..83d3d50993 --- /dev/null +++ b/codebase2/notes.txt @@ -0,0 +1,3 @@ +Decision: + Try to reuse ABT.hashComponent stuff as-is? + Or rejigger it to be more general/lovely/etc.? <-- choosing \ No newline at end of file diff --git a/codebase2/runtime/U/Runtime/CodeLookup.hs b/codebase2/runtime/U/Runtime/CodeLookup.hs new file mode 100644 index 0000000000..a0878ffb72 --- /dev/null +++ b/codebase2/runtime/U/Runtime/CodeLookup.hs @@ -0,0 +1 @@ +module U.Runtime.CodeLookup where \ No newline at end of file diff --git a/codebase2/runtime/unison-runtime.cabal b/codebase2/runtime/unison-runtime.cabal new file mode 100644 index 0000000000..5d6572c756 --- /dev/null +++ b/codebase2/runtime/unison-runtime.cabal @@ -0,0 +1,28 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-runtime +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +library + exposed-modules: + U.Runtime.CodeLookup + -- other-modules: + -- other-extensions: + build-depends: + base, + containers, + text, + unison-core + -- unison-util + hs-source-dirs: . + default-language: Haskell2010 diff --git a/codebase2/syntax/unison-syntax.cabal b/codebase2/syntax/unison-syntax.cabal new file mode 100644 index 0000000000..bd05aa8040 --- /dev/null +++ b/codebase2/syntax/unison-syntax.cabal @@ -0,0 +1,25 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-syntax +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +library + exposed-modules: + -- U.Syntax2 + -- other-modules: + -- other-extensions: + build-depends: + base, + unison-core + hs-source-dirs: . + default-language: Haskell2010 diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs new file mode 100644 index 0000000000..7d107a68cd --- /dev/null +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -0,0 +1,191 @@ +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE ViewPatterns #-} + +module U.Util.Serialization where + +import Control.Monad (replicateM) +import Data.Bits (setBit, shiftR, clearBit, (.|.), shiftL, testBit, Bits) +import Data.ByteString (ByteString, readFile, writeFile) +import qualified Data.ByteString as BS +import qualified Data.ByteString.Short as BSS +import Data.ByteString.Short (ShortByteString) +import Data.Bytes.Get (getWord8, MonadGet, getByteString, getBytes, runGetS, skip) +import Data.Bytes.Put (putWord8, MonadPut, putByteString, runPutS) +import Data.Bytes.VarInt (VarInt(VarInt)) +import Data.Foldable (Foldable (toList), traverse_) +import Data.List.Extra (dropEnd) +import Data.Text (Text) +import Data.Text.Encoding (decodeUtf8, encodeUtf8) +import Data.Vector (Vector) +import qualified Data.Vector as Vector +import Data.Text.Short (ShortText) +import qualified Data.Text.Short as TS +import qualified Data.Text.Short.Unsafe as TSU +import System.FilePath (takeDirectory) +import UnliftIO (MonadIO, liftIO) +import UnliftIO.Directory (createDirectoryIfMissing, doesFileExist) +import Prelude hiding (readFile, writeFile) +import Data.Word (Word8) + +type Get a = forall m. MonadGet m => m a +type Put a = forall m. MonadPut m => a -> m () + + +-- todo: do we use this? +data Format a = Format + { get :: Get a, + put :: Put a + } + +getFromBytes :: Get a -> ByteString -> Maybe a +getFromBytes getA bytes = + case runGetS getA bytes of Left _ -> Nothing; Right a -> Just a + +getFromFile :: MonadIO m => Get a -> FilePath -> m (Maybe a) +getFromFile getA file = do + b <- doesFileExist file + if b then getFromBytes getA <$> liftIO (readFile file) else pure Nothing + +getFromFile' :: MonadIO m => Get a -> FilePath -> m (Either String a) +getFromFile' getA file = do + b <- doesFileExist file + if b + then runGetS getA <$> liftIO (readFile file) + else pure . Left $ "No such file: " ++ file + +putBytes :: Put a -> a -> ByteString +putBytes put a = runPutS (put a) + +putWithParentDirs :: MonadIO m => Put a -> FilePath -> a -> m () +putWithParentDirs putA file a = do + createDirectoryIfMissing True (takeDirectory file) + liftIO . writeFile file $ putBytes putA a + +putVarInt :: (MonadPut m, Integral a, Bits a) => a -> m () +putVarInt n + | n < 0x80 = putWord8 $ fromIntegral n + | otherwise = do + putWord8 $ setBit (fromIntegral n) 7 + putVarInt $ shiftR n 7 +{-# INLINE putVarInt #-} + +getVarInt :: (MonadGet m, Num b, Bits b) => m b +getVarInt = getWord8 >>= getVarInt + where + getVarInt :: (MonadGet m, Num b, Bits b) => Word8 -> m b + getVarInt n + | testBit n 7 = do + VarInt m <- getWord8 >>= getVarInt + return $ shiftL m 7 .|. clearBit (fromIntegral n) 7 + | otherwise = return $ fromIntegral n + {-# INLINE getVarInt #-} +{-# INLINE getVarInt #-} + +putText :: MonadPut m => Text -> m () +putText text = do + let bs = encodeUtf8 text + putVarInt $ BS.length bs + putByteString bs + +getText :: MonadGet m => m Text +getText = do + len <- getVarInt + bs <- BS.copy <$> getBytes len + pure $ decodeUtf8 bs + +skipText :: MonadGet m => m () +skipText = skip =<< getVarInt + +putShortText :: MonadPut m => ShortText -> m () +putShortText text = do + let sbs = TS.toShortByteString text + putVarInt $ BSS.length sbs + putShortByteString sbs + +getShortText :: MonadGet m => m ShortText +getShortText = do + len <- getVarInt + sbs <- getShortByteString len + pure $ TSU.fromShortByteStringUnsafe sbs + +-- | the `binary` package has a native version of this, +-- which may be more efficient by a constant factor +putShortByteString :: MonadPut m => ShortByteString -> m () +putShortByteString = putByteString . BSS.fromShort + +-- | the `binary` package has a native version of this, +-- which may be more efficient by a constant factor +getShortByteString :: MonadGet m => Int -> m ShortByteString +getShortByteString len = BSS.toShort <$> getByteString len + +putFoldable :: + (Foldable f, MonadPut m) => (a -> m ()) -> f a -> m () +putFoldable putA as = do + putVarInt (length as) + traverse_ putA as + +getList :: MonadGet m => m a -> m [a] +getList getA = do + length <- getVarInt + replicateM length getA + +getVector :: MonadGet m => m a -> m (Vector a) +getVector getA = do + length <- getVarInt + Vector.replicateM length getA + +getFramed :: MonadGet m => Get a -> m (Maybe a) +getFramed get = do + size <- getVarInt + bytes <- getByteString size + pure $ getFromBytes get bytes + +putFramed :: MonadPut m => Put a -> a -> m () +putFramed put a = do + -- 1. figure out the length `len` of serialized `a` + -- 2. Put the length `len` + -- 3. Put `a` + let bs = putBytes put a + putVarInt (BS.length bs) + putByteString bs + +skipFramed :: MonadGet m => m () +skipFramed = do + len <- getVarInt + skip len + +putFramedArray :: (MonadPut m, Foldable f) => Put a -> f a -> m () +putFramedArray put (toList -> as) = do + let bss = fmap (putBytes put) as + let lengths = fmap BS.length bss + let offsets = scanl (+) 0 (dropEnd 1 lengths) + putFoldable putVarInt offsets + traverse_ putByteString bss + +getFramedArray :: MonadGet m => m a -> m (Vector a) +getFramedArray getA = do + offsets :: [Int] <- getList getVarInt + let count = length offsets - 1 + Vector.replicateM count getA + +-- | Look up a 0-based index in a framed array, O(num array elements), +-- because it reads the start indices for all elements first. +-- This could be skipped if the indices had a fixed size instead of varint +framedArrayLookup :: MonadGet m => Get a -> Int -> m (Maybe a) +framedArrayLookup getA index = do + offsets <- getVector getVarInt + if index > Vector.length offsets + then pure Nothing + else do + skip (Vector.unsafeIndex offsets index) + Just <$> getA + +unsafeFramedArrayLookup :: MonadGet m => Get a -> Int -> m a +unsafeFramedArrayLookup getA index = do + offsets <- getVector getVarInt + skip (Vector.unsafeIndex offsets index) + getA \ No newline at end of file diff --git a/codebase2/util-serialization/unison-util-serialization.cabal b/codebase2/util-serialization/unison-util-serialization.cabal new file mode 100644 index 0000000000..45c782ac26 --- /dev/null +++ b/codebase2/util-serialization/unison-util-serialization.cabal @@ -0,0 +1,33 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-util-serialization +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +library + exposed-modules: + U.Util.Serialization + -- other-modules: + -- other-extensions: + build-depends: + base, + bytes, + bytestring, + containers, + extra, + filepath, + text, + text-short, + unliftio, + vector + hs-source-dirs: . + default-language: Haskell2010 diff --git a/codebase2/util/U/Util/Base32Hex.hs b/codebase2/util/U/Util/Base32Hex.hs new file mode 100644 index 0000000000..3e1110a996 --- /dev/null +++ b/codebase2/util/U/Util/Base32Hex.hs @@ -0,0 +1,63 @@ +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE OverloadedStrings #-} + + +module U.Util.Base32Hex + (Base32Hex, fromByteString, toByteString, toText, textToByteString) +where + +import Data.Text (Text) +import qualified Codec.Binary.Base32Hex +import Data.ByteString (ByteString) +import qualified Data.Text as Text +import Data.Text.Encoding (encodeUtf8, decodeUtf8) +import Data.Maybe (fromJust) + +newtype Base32Hex = Base32Hex { toText :: Text } + deriving (Eq, Ord, Show) + +-- | Return the lowercase unpadded base32Hex encoding of this 'ByteString'. +-- Multibase prefix would be 'v', see https://github.com/multiformats/multibase +fromByteString :: ByteString -> Base32Hex +fromByteString bs = + -- we're using an uppercase encoder that adds padding, so we drop the + -- padding and convert it to lowercase + Base32Hex . Text.toLower . Text.dropWhileEnd (== '=') . decodeUtf8 $ + Codec.Binary.Base32Hex.encode bs + +-- by not exporting the Base32Hex constructor, we can trust that it's valid +toByteString :: Base32Hex -> ByteString +toByteString = fromJust . textToByteString . toText + +-- | Produce a 'Hash' from a base32hex-encoded version of its binary representation +textToByteString :: Text -> Maybe ByteString +textToByteString txt = + case Codec.Binary.Base32Hex.decode (encodeUtf8 $ Text.toUpper txt <> paddingChars) of + Left (_, _rem) -> Nothing + Right h -> pure h + where + -- The decoder we're using is a base32 uppercase decoder that expects padding, + -- so we provide it with the appropriate number of padding characters for the + -- expected hash length. + -- + -- The decoder requires 40 bit (8 5-bit characters) chunks, so if the number + -- of characters of the input is not a multiple of 8, we add '=' padding chars + -- until it is. + -- + -- See https://tools.ietf.org/html/rfc4648#page-8 + paddingChars :: Text + paddingChars = case Text.length txt `mod` 8 of + 0 -> "" + n -> Text.replicate (8 - n) "=" + + hashLength :: Int + hashLength = 512 + + _paddingChars :: Text + _paddingChars = case hashLength `mod` 40 of + 0 -> "" + 8 -> "======" + 16 -> "====" + 24 -> "===" + 32 -> "=" + i -> error $ "impossible hash length `mod` 40 not in {0,8,16,24,32}: " <> show i diff --git a/codebase2/util/U/Util/Hash.hs b/codebase2/util/U/Util/Hash.hs new file mode 100644 index 0000000000..5e1bcaa9f5 --- /dev/null +++ b/codebase2/util/U/Util/Hash.hs @@ -0,0 +1,59 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE OverloadedStrings #-} + +module U.Util.Hash where + +-- (Hash, toBytes, base32Hex, base32Hexs, fromBase32Hex, fromBytes, unsafeFromBase32Hex, showBase32Hex, validBase32HexChars) where + +-- import Unison.Prelude + +import qualified Data.ByteArray as BA +import Data.ByteString (ByteString) +import qualified Crypto.Hash as CH +import qualified Data.ByteString as B +import Data.ByteString.Builder (doubleBE, int64BE, toLazyByteString, word64BE) +import qualified Data.ByteString.Lazy as BL +import qualified Data.ByteString.Short as B.Short +import Data.Text.Encoding (encodeUtf8) +import GHC.Generics (Generic) +import qualified U.Util.Hashable as H +import Data.ByteString.Short (fromShort, ShortByteString) +import qualified U.Util.Base32Hex as Base32Hex +import U.Util.Base32Hex (Base32Hex) + +-- | Hash which uniquely identifies a Unison type or term +newtype Hash = Hash {toShort :: ShortByteString} deriving (Eq, Ord, Generic, Show) + +toBase32Hex :: Hash -> Base32Hex +toBase32Hex = Base32Hex.fromByteString . toBytes + +fromBase32Hex :: Base32Hex -> Hash +fromBase32Hex = Hash . B.Short.toShort . Base32Hex.toByteString + +toBytes :: Hash -> ByteString +toBytes = fromShort . toShort + +instance H.Hashable Hash where + tokens h = [H.Bytes (toBytes h)] + +instance H.Accumulate Hash where + accumulate = fromBytes . BA.convert . CH.hashFinalize . go CH.hashInit + where + go :: CH.Context CH.SHA3_512 -> [H.Token Hash] -> CH.Context CH.SHA3_512 + go acc tokens = CH.hashUpdates acc (tokens >>= toBS) + toBS (H.Tag b) = [B.singleton b] + toBS (H.Bytes bs) = [encodeLength $ B.length bs, bs] + toBS (H.Int i) = BL.toChunks . toLazyByteString . int64BE $ i + toBS (H.Nat i) = BL.toChunks . toLazyByteString . word64BE $ i + toBS (H.Double d) = BL.toChunks . toLazyByteString . doubleBE $ d + toBS (H.Text txt) = + let tbytes = encodeUtf8 txt + in [encodeLength (B.length tbytes), tbytes] + toBS (H.Hashed h) = [toBytes h] + encodeLength :: Integral n => n -> B.ByteString + encodeLength = BL.toStrict . toLazyByteString . word64BE . fromIntegral + fromBytes = U.Util.Hash.fromBytes + toBytes = U.Util.Hash.toBytes + +fromBytes :: ByteString -> Hash +fromBytes = Hash . B.Short.toShort diff --git a/unison-core/src/Unison/Hashable.hs b/codebase2/util/U/Util/Hashable.hs similarity index 95% rename from unison-core/src/Unison/Hashable.hs rename to codebase2/util/U/Util/Hashable.hs index 45b06a05ea..165933a866 100644 --- a/unison-core/src/Unison/Hashable.hs +++ b/codebase2/util/U/Util/Hashable.hs @@ -1,9 +1,11 @@ -module Unison.Hashable where - -import Unison.Prelude +module U.Util.Hashable where import qualified Data.Map as Map import qualified Data.Set as Set +import Data.Word (Word64, Word8) +import Data.Int (Int64) +import Data.ByteString (ByteString) +import Data.Text (Text) data Token h = Tag !Word8 diff --git a/unison-core/src/Unison/Util/Relation.hs b/codebase2/util/U/Util/Relation.hs similarity index 98% rename from unison-core/src/Unison/Util/Relation.hs rename to codebase2/util/U/Util/Relation.hs index 48d2a3c91c..0ef86b1122 100644 --- a/unison-core/src/Unison/Util/Relation.hs +++ b/codebase2/util/U/Util/Relation.hs @@ -1,7 +1,5 @@ {-# LANGUAGE ViewPatterns #-} -module Unison.Util.Relation where - -import Unison.Prelude hiding (empty, toList) +module U.Util.Relation where import Prelude hiding ( null, map, filter ) import Data.Bifunctor ( first, second ) @@ -9,7 +7,11 @@ import qualified Data.List as List import qualified Data.Map as M import qualified Data.Set as S import qualified Data.Map as Map -import qualified Unison.Hashable as H +import qualified U.Util.Hashable as H +import Data.Set (Set) +import Data.Map (Map) +import Data.Maybe (fromMaybe, isJust) +import Data.Foldable (Foldable(foldl')) -- | -- This implementation avoids using @"Set (a,b)"@ because @@ -284,8 +286,6 @@ ran r = M.keysSet (range r) compactSet :: Ord a => Set (Maybe (Set a)) -> Set a compactSet = S.fold (S.union . fromMaybe S.empty) S.empty - - -- $selectops -- -- Primitive implementation for the /right selection/ and /left selection/ operators. diff --git a/codebase2/util/unison-util.cabal b/codebase2/util/unison-util.cabal new file mode 100644 index 0000000000..c872883dc2 --- /dev/null +++ b/codebase2/util/unison-util.cabal @@ -0,0 +1,33 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-util +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +library + exposed-modules: + U.Util.Base32Hex + U.Util.Hash + U.Util.Hashable + U.Util.Relation + -- other-modules: + -- other-extensions: + build-depends: + base, + bytestring, + containers, + cryptonite, + memory, + text, + sandi + hs-source-dirs: . + default-language: Haskell2010 diff --git a/hie.yaml b/hie.yaml index 0907429e1c..451395ac90 100644 --- a/hie.yaml +++ b/hie.yaml @@ -1,37 +1,31 @@ cradle: - multi: - - path: "./" - config: - cradle: - stack: - - path: "./parser-typechecker/src" - component: "unison-parser-typechecker:lib" - - path: "./parser-typechecker/unison" - component: "unison-parser-typechecker:exe:unison" - - path: "./parser-typechecker/prettyprintdemo" - component: "unison-parser-typechecker:exe:prettyprintdemo" - - path: "./parser-typechecker/tests" - component: "unison-parser-typechecker:exe:tests" - - path: "./parser-typechecker/transcripts" - component: "unison-parser-typechecker:exe:transcripts" - - - path: "./unison-core/src" - component: "unison-core:lib" - - - path: "./yaks/easytest/src" - component: "easytest:lib" - - path: "./yaks/easytest/tests" - component: "easytest:exe:runtests" - - # Attempt to skip ./unison-src/parser-tests/GenerateErrors.hs - # which doesn't have a corresponding cabal file. - # - # This is the skipping strategy suggested by: - # https://github.com/mpickering/hie-bios/tree/7f298424e30e0453dc21062ffa543998a2145ab6#ignoring-directories - # but it isn't working for some reason. - # - # Until it does you can expect to see "1 file failed" in the ghcide output. - - path: "./unison-src" - config: - cradle: - none: + stack: + - path: "codebase1/codebase/." + component: "unison-codebase1:lib" + + - path: "codebase2/codebase/." + component: "unison-codebase:lib" + + - path: "codebase2/codebase-sqlite/." + component: "unison-codebase-sqlite:lib" + + - path: "codebase2/core/." + component: "unison-core:lib" + + - path: "codebase2/editor/." + component: "unison-editor:lib" + + - path: "codebase2/language/." + component: "unison-language:lib" + + - path: "codebase2/runtime/." + component: "unison-runtime:lib" + + - path: "codebase2/syntax/." + component: "unison-syntax:lib" + + - path: "codebase2/util/." + component: "unison-util:lib" + + - path: "codebase2/util-serialization/." + component: "unison-util-serialization:lib" diff --git a/parser-typechecker/LICENSE b/parser-typechecker/LICENSE deleted file mode 100644 index cca9c4376c..0000000000 --- a/parser-typechecker/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2013, Paul Chiusano and contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/parser-typechecker/benchmarks/runtime/Main.hs b/parser-typechecker/benchmarks/runtime/Main.hs deleted file mode 100644 index 3970fa518d..0000000000 --- a/parser-typechecker/benchmarks/runtime/Main.hs +++ /dev/null @@ -1,286 +0,0 @@ -{-# language PatternSynonyms #-} - -module Main(main) where - -import Criterion.Main - -import Data.Word - -import Unison.Runtime.MCode -import Unison.Runtime.Machine - -import Unison.Util.EnumContainers - -infixr 0 $$ -($$) :: Instr -> Section -> Section -($$) = Ins - -loop :: Section -loop = Match 0 $ Test1 0 (Yield $ UArg1 1) rec - where - rec = Prim2 ADDI 0 1 - $$ Prim1 DECI 1 - $$ App False (Env 0) (UArg2 0 1) - --- Boxed version of loop to see how fast we are without --- worker/wrapper. -sloop :: Section -sloop = Unpack 1 $$ Unpack 0 $$ body - where - body = Match 1 $ Test1 - 0 (Pack 0 (UArg1 3) $$ Yield (BArg1 0)) - {-else-} rec - rec = Prim2 ADDI 1 3 - $$ Prim1 DECI 2 - $$ Pack 0 (UArg1 1) - $$ Pack 0 (UArg1 0) - $$ App False (Env 1) (BArg2 0 1) - --- loop with fast path optimization -oloop :: Section -oloop = Match 0 $ Test1 0 (Yield $ UArg1 1) rec - where - rec = Prim2 ADDI 0 1 - $$ Prim1 DECI 1 - $$ Call False 7 (UArg2 0 1) - --- sloop with fast path optimization -soloop :: Section -soloop = Unpack 1 $$ Unpack 0 $$ body - where - body = Match 1 $ Test1 - 0 (Pack 0 (UArg1 3) $$ Yield (BArg1 0)) - {-else-} rec - rec = Prim2 ADDI 1 3 - $$ Prim1 DECI 2 - $$ Pack 0 (UArg1 1) - $$ Pack 0 (UArg1 0) - $$ Call False 8 (BArg2 0 1) - -konst :: Section -konst = Yield (BArg1 0) - -add :: Section -add = Unpack 1 - $$ Unpack 0 - $$ Prim2 ADDI 1 3 - $$ Pack 0 (UArg1 0) - $$ Yield (BArg1 0) - --- get = shift $ \k s -> k s s --- put s = shift $ \k _ -> k () s --- loop :: Int -> Int -> Int --- loop n s0 = reset (body n) s0 --- where --- body m | m == 0 = x = get ; f _ = x ; f --- | otherwise = x = get ; put (x+m) ; body (m-1) - --- k s => (k s) s -- k continuation -diag :: Section -diag = Let (Reset (setSingleton 0) $$ Jump 0 (BArg1 1)) - $ App False (Stk 0) (BArg1 2) - --- => shift k. diag k -get :: Section -get = Capture 0 - $$ App False (Env 12) (BArg1 0) - --- k s _ => (k) s -kid :: Section -kid = Let (Reset (setSingleton 0) $$ Jump 0 ZArgs) - $ App False (Stk 0) (BArg1 2) - --- s => shift k. kid k s -put :: Section -put = Capture 0 - $$ App False (Env 15) (BArg2 0 1) - --- m => ... -kloopb :: Section -kloopb = - Match 0 $ Test1 - 0 (Let (App False (Env 13) ZArgs) $ App False (Env 10) (BArg1 0)) - {-else-} $ rec - where - rec = Let (App False (Env 13) ZArgs) -- get - $ Pack 0 (UArg1 0) - $$ Let (App False (Env 11) (BArg2 0 1)) -- add - $ Let (App False (Env 14) (BArg1 0)) -- put - $ Prim1 DECI 0 - $$ App False (Env 5) (UArg1 0) - --- m a => f = reset (kloopb m) ; y = f (I# a) ; print y -kloop :: Section -kloop = Let (Reset (setSingleton 0) $$ App False (Env 5) (UArg1 0)) - $ Pack 0 (UArg1 1) - $$ App False (Stk 1) (BArg1 0) - --- s0 0 => s0 --- s0 1 s => tinst s setDyn 0 (teff s) -teff :: Section -teff - = Match 0 $ Test1 - 0 (Yield $ BArg1 0) - $ {-else-} Call True 21 ZArgs - --- s => setDyn 0 (teff s) -tinst :: Section -tinst - = Name 20 (BArg1 0) - $$ SetDyn 0 0 - $$ Yield ZArgs - --- m => ... -tloopb :: Section -tloopb = - Match 0 $ Test1 - 0 (Lit 0 $$ App True (Dyn 0) (UArg1 0)) -- get - {-else-} rec - where - rec = Let (Lit 0 $$ App False (Dyn 0) (UArg1 0)) -- get - $ Pack 0 (UArg1 0) -- I# m - $$ Let (App False (Env 11) (BArg2 0 1)) -- add - $ Let (Lit 1 $$ App False (Dyn 0) (UArg1 0)) -- put - $ Prim1 DECI 0 - $$ Call False 25 (UArg1 0) - --- m s => reset (tinst (I# s) ; tloopb m) -tloop :: Section -tloop = Reset (setSingleton 0) - $$ Pack 0 (UArg1 1) - $$ Let (Call True 21 $ BArg1 0) - $ Call True 25 $ UArg1 0 - -fib :: Section -fib = Match 0 $ Test2 - 0 (Lit 0 $$ Yield $ UArg1 0) - 1 (Lit 1 $$ Yield $ UArg1 0) - {-else-} rec - where - rec = Prim1 DECI 0 - $$ Prim1 DECI 0 - $$ Let (App False (Env 2) (UArg1 1)) - $ Let (App False (Env 2) (UArg1 1)) - $ Prim2 ADDI 0 1 $$ Yield (UArg1 0) - -ofib :: Section -ofib = Match 0 $ Test2 - 0 (Lit 0 $$ Yield $ UArg1 0) - 1 (Lit 1 $$ Yield $ UArg1 0) - {-else-} rec - where - rec = Prim1 DECI 0 - $$ Prim1 DECI 0 - $$ Let (Call True 9 (UArg1 1)) - $ Let (Call True 9 (UArg1 1)) - $ Prim2 ADDI 0 1 $$ Yield (UArg1 0) - -stackEater :: Section -stackEater - = Match 0 $ Test1 - 0 (Yield ZArgs) - $ Prim1 DECI 0 - $$ Let (App False (Env 4) (UArg1 0)) - $ Yield ZArgs - -testEnv :: Word64 -> Comb -testEnv 0 = Lam 2 0 4 0 loop -testEnv 1 = Lam 0 2 6 4 sloop -testEnv 2 = Lam 1 0 6 0 fib -testEnv 4 = Lam 1 0 1 0 stackEater -testEnv 5 = Lam 1 0 2 3 kloopb -testEnv 6 = Lam 2 0 2 2 kloop -testEnv 7 = Lam 2 0 4 0 oloop -testEnv 8 = Lam 0 2 6 4 soloop -testEnv 9 = Lam 1 0 6 0 ofib -testEnv 10 = Lam 0 2 0 2 konst -testEnv 11 = Lam 0 2 5 3 add -testEnv 12 = Lam 0 2 0 2 diag -testEnv 13 = Lam 0 0 0 1 get -testEnv 14 = Lam 0 1 0 2 put -testEnv 15 = Lam 0 3 0 3 kid -testEnv 20 = Lam 1 1 1 2 teff -testEnv 21 = Lam 0 1 0 2 tinst -testEnv 25 = Lam 1 0 4 3 tloopb -testEnv 26 = Lam 1 0 4 3 tloop -testEnv _ = error "testEnv" - -setupu1 :: Word64 -> Int -> Section -setupu1 f n = Lit n $$ App False (Env f) (UArg1 0) - -setupu2 :: Word64 -> Int -> Int -> Section -setupu2 f m n = Lit m $$ Lit n $$ App False (Env f) (UArg2 0 1) - -setupb2 :: Word64 -> Int -> Int -> Section -setupb2 f m n - = Lit m $$ Pack 0 (UArg1 0) - $$ Lit n $$ Pack 0 (UArg1 0) - $$ App False (Env f) (BArgR 0 2) - -benchEv :: String -> Section -> Benchmark -benchEv str code = bench str . whnfIO . eval0 testEnv $ code - -main = defaultMain - [ bgroup "loop" - [ benchEv "2500" $ setupu2 0 0 2500 - , benchEv "5000" $ setupu2 0 0 5000 - , benchEv "10000" $ setupu2 0 0 10000 - , benchEv "100000" $ setupu2 0 0 100000 - , benchEv "1000000" $ setupu2 0 0 1000000 - ] - , bgroup "oloop" - [ benchEv "2500" $ setupu2 7 0 2500 - , benchEv "5000" $ setupu2 7 0 5000 - , benchEv "10000" $ setupu2 7 0 10000 - , benchEv "100000" $ setupu2 7 0 100000 - , benchEv "1000000" $ setupu2 7 0 1000000 - ] - , bgroup "sloop" - [ benchEv "2500" $ setupb2 1 0 2500 - , benchEv "5000" $ setupb2 1 0 5000 - , benchEv "10000" $ setupb2 1 0 10000 - , benchEv "100000" $ setupb2 1 0 100000 - , benchEv "1000000" $ setupb2 1 0 1000000 - ] - , bgroup "soloop" - [ benchEv "2500" $ setupb2 8 0 2500 - , benchEv "5000" $ setupb2 8 0 5000 - , benchEv "10000" $ setupb2 8 0 10000 - , benchEv "100000" $ setupb2 8 0 100000 - , benchEv "1000000" $ setupb2 8 0 1000000 - ] - , bgroup "kloop" - [ benchEv "2500" $ setupu2 6 0 2500 - , benchEv "5000" $ setupu2 6 0 5000 - , benchEv "10000" $ setupu2 6 0 10000 - , benchEv "100000" $ setupu2 6 0 100000 - , benchEv "1000000" $ setupu2 6 0 1000000 - ] - , bgroup "tloop" - [ benchEv "2500" $ setupu2 26 0 2500 - , benchEv "5000" $ setupu2 26 0 5000 - , benchEv "10000" $ setupu2 26 0 10000 - , benchEv "100000" $ setupu2 26 0 100000 - , benchEv "1000000" $ setupu2 26 0 1000000 - ] - , bgroup "fib" - [ benchEv "10" $ setupu1 2 10 - , benchEv "15" $ setupu1 2 15 - , benchEv "20" $ setupu1 2 20 - , benchEv "25" $ setupu1 2 25 - , benchEv "30" $ setupu1 2 30 - ] - , bgroup "ofib" - [ benchEv "10" $ setupu1 9 10 - , benchEv "15" $ setupu1 9 15 - , benchEv "20" $ setupu1 9 20 - , benchEv "25" $ setupu1 9 25 - , benchEv "30" $ setupu1 9 30 - ] - , bgroup "stackEater" - [ benchEv "100" $ setupu1 4 100 - , benchEv "1000" $ setupu1 4 1000 - , benchEv "10000" $ setupu1 4 10000 - , benchEv "100000" $ setupu1 4 100000 - ] - ] diff --git a/parser-typechecker/prettyprintdemo/Main.hs b/parser-typechecker/prettyprintdemo/Main.hs deleted file mode 100644 index 9e3402cb0e..0000000000 --- a/parser-typechecker/prettyprintdemo/Main.hs +++ /dev/null @@ -1,68 +0,0 @@ -{-# Language OverloadedStrings #-} - -module Main where - -import Data.String (fromString) -import Unison.Util.Pretty as PP -import Data.Text (Text) - -main :: IO () -main = do - -- putStrLn . PP.toANSI 60 $ ex1 - -- print $ examples - putStrLn . PP.toANSI 25 $ examples - where - -- ex1 = PP.linesSpaced [PP.red "hi", PP.blue "blue"] - examples = PP.linesSpaced [ - PP.bold "Creating `Pretty`s", - - "Use `OverloadedStrings`, `lit`, and `text` to get values into `Pretty`", - "Here's an overloaded string", - PP.lit "Here's a call to `lit`", -- works for any `IsString` - PP.text ("No need to Text.unpack, just `PP.text` directly" :: Text), - - PP.bold "Use the `Monoid` and/or `Semigroup` to combine strings", - "Hello, " <> PP.red "world!", - - PP.yellow "`wrap` does automatic line wrapping", - PP.wrap $ loremIpsum, - PP.wrapString "Can also call `wrapString` directly if you have a String value.", - - PP.bold "Indentation: can indent by n spaces, or by another `Pretty`", - PP.indentN 2 (PP.wrap loremIpsum), - PP.indent (PP.red ">> ") (PP.wrap loremIpsum), - - PP.bold "Other handy functions", - - PP.bulleted [ - PP.sep ", " (replicate 10 "a"), - PP.lines ["Alice", PP.hiBlue "Bob", "Carol"], - PP.blue "foo bar baz" - ], - - PP.indentN 4 $ PP.bulleted ["Alice", "Bob", "Carol"], - PP.dashed ["Alice", PP.red "Bob", "Carol"], - PP.column2 [ - (PP.bold "Name", PP.bold "Favorite color"), - ("Alice" , PP.red "Red"), - ("Bob" , PP.blue "Blue"), - ("Carolina" , PP.green "Green"), - ("Dave" , PP.black "Black") - ], - PP.numbered (fromString . show) [ - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], - -- Feel free to start the numbering wherever you like - PP.numbered (fromString . show . (10 +)) ["uno", "dos", "tres"], - - PP.bold "Grouping and breaking", - PP.wrap "The orElse function chooses between two `Pretty`, preferring the first if it fits, and using the second otherwise.", - - PP.wrap "The `group` function introduces a level of breaking. The renderer will try to avoid breaking up a `group` unless it's needed. Groups are broken \"outside in\".", - - -- question - I think this group shouldn't be needed - PP.group (PP.orElse "This fits." "So this won't be used."), - "This is a very long string which won't fit." - `PP.orElse` "This is a very...(truncated)" - ] - loremIpsum = "Lorem ipsum dolor sit amet, consectetur adipiscing elit." - -- loremIpsum = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas sem nisi, venenatis viverra ex eu, tristique dapibus justo. Ut lobortis mattis rutrum. Vivamus mattis eros diam, a egestas mi venenatis vel. Nunc felis dui, consectetur ac volutpat vitae, molestie in augue. Cras nec aliquet ex. In et sem vel sapien auctor euismod. Pellentesque eu aliquam dolor. Cras porttitor mi velit, dapibus vulputate odio pharetra non. Etiam iaculis nulla eu nisl euismod ultricies." diff --git a/parser-typechecker/src/Unison/Builtin.hs b/parser-typechecker/src/Unison/Builtin.hs deleted file mode 100644 index f51d4345be..0000000000 --- a/parser-typechecker/src/Unison/Builtin.hs +++ /dev/null @@ -1,518 +0,0 @@ -{-# LANGUAGE ExplicitForAll #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE Rank2Types #-} - -module Unison.Builtin - (codeLookup - ,constructorType - ,names - ,names0 - ,builtinDataDecls - ,builtinEffectDecls - ,builtinConstructorType - ,builtinTypeDependents - ,builtinTermsByType - ,builtinTermsByTypeMention - ,intrinsicTermReferences - ,intrinsicTypeReferences - ,isBuiltinType - ,typeLookup - ,termRefTypes - ) where - -import Unison.Prelude - -import Data.Bifunctor ( second, first ) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Data.Text as Text -import qualified Unison.ConstructorType as CT -import Unison.Codebase.CodeLookup ( CodeLookup(..) ) -import qualified Unison.Builtin.Decls as DD -import qualified Unison.DataDeclaration as DD -import Unison.Parser ( Ann(..) ) -import qualified Unison.Reference as R -import qualified Unison.Referent as Referent -import Unison.Symbol ( Symbol ) -import qualified Unison.Type as Type -import Unison.Var ( Var ) -import qualified Unison.Var as Var -import Unison.Name ( Name ) -import qualified Unison.Name as Name -import Unison.Names3 (Names(Names), Names0) -import qualified Unison.Names3 as Names3 -import qualified Unison.Typechecker.TypeLookup as TL -import qualified Unison.Util.Relation as Rel - -type DataDeclaration v = DD.DataDeclaration v Ann -type EffectDeclaration v = DD.EffectDeclaration v Ann -type Type v = Type.Type v () - -names :: Names -names = Names names0 mempty - -names0 :: Names0 -names0 = Names3.names0 terms types where - terms = Rel.mapRan Referent.Ref (Rel.fromMap termNameRefs) <> - Rel.fromList [ (Name.fromVar vc, Referent.Con (R.DerivedId r) cid ct) - | (ct, (_,(r,decl))) <- ((CT.Data,) <$> builtinDataDecls @Symbol) <> - ((CT.Effect,) . (second . second) DD.toDataDecl <$> builtinEffectDecls) - , ((_,vc,_), cid) <- DD.constructors' decl `zip` [0..]] - types = Rel.fromList builtinTypes <> - Rel.fromList [ (Name.fromVar v, R.DerivedId r) - | (v,(r,_)) <- builtinDataDecls @Symbol ] <> - Rel.fromList [ (Name.fromVar v, R.DerivedId r) - | (v,(r,_)) <- builtinEffectDecls @Symbol ] - --- note: this function is really for deciding whether `r` is a term or type, --- but it can only answer correctly for Builtins. -isBuiltinType :: R.Reference -> Bool -isBuiltinType r = elem r . fmap snd $ builtinTypes - -typeLookup :: Var v => TL.TypeLookup v Ann -typeLookup = - TL.TypeLookup - (fmap (const Intrinsic) <$> termRefTypes) - (Map.fromList . map (first R.DerivedId) $ map snd builtinDataDecls) - (Map.fromList . map (first R.DerivedId) $ map snd builtinEffectDecls) - -constructorType :: R.Reference -> Maybe CT.ConstructorType -constructorType r = TL.constructorType (typeLookup @Symbol) r - <|> Map.lookup r builtinConstructorType - -builtinDataDecls :: Var v => [(v, (R.Id, DataDeclaration v))] -builtinDataDecls = - [ (v, (r, Intrinsic <$ d)) | (v, r, d) <- DD.builtinDataDecls ] - -builtinEffectDecls :: Var v => [(v, (R.Id, EffectDeclaration v))] -builtinEffectDecls = [ (v, (r, Intrinsic <$ d)) | (v, r, d) <- DD.builtinEffectDecls ] - -codeLookup :: (Applicative m, Var v) => CodeLookup v m Ann -codeLookup = CodeLookup (const $ pure Nothing) $ \r -> - pure - $ lookup r [ (r, Right x) | (r, x) <- snd <$> builtinDataDecls ] - <|> lookup r [ (r, Left x) | (r, x) <- snd <$> builtinEffectDecls ] - --- Relation predicate: Domain depends on range. -builtinDependencies :: Rel.Relation R.Reference R.Reference -builtinDependencies = - Rel.fromMultimap (Type.dependencies <$> termRefTypes @Symbol) - --- a relation whose domain is types and whose range is builtin terms with that type -builtinTermsByType :: Rel.Relation R.Reference Referent.Referent -builtinTermsByType = - Rel.fromList [ (Type.toReference ty, Referent.Ref r) - | (r, ty) <- Map.toList (termRefTypes @Symbol) ] - --- a relation whose domain is types and whose range is builtin terms that mention that type --- example: Nat.+ mentions the type `Nat` -builtinTermsByTypeMention :: Rel.Relation R.Reference Referent.Referent -builtinTermsByTypeMention = - Rel.fromList [ (m, Referent.Ref r) | (r, ty) <- Map.toList (termRefTypes @Symbol) - , m <- toList $ Type.toReferenceMentions ty ] - --- The dependents of a builtin type is the set of builtin terms which --- mention that type. -builtinTypeDependents :: R.Reference -> Set R.Reference -builtinTypeDependents r = Rel.lookupRan r builtinDependencies - --- WARNING: --- As with the terms, we should avoid changing these references, even --- if we decide to change their names. -builtinTypes :: [(Name, R.Reference)] -builtinTypes = Map.toList . Map.mapKeys Name.unsafeFromText - $ foldl' go mempty builtinTypesSrc where - go m = \case - B' r _ -> Map.insert r (R.Builtin r) m - D' r -> Map.insert r (R.Builtin r) m - Rename' r name -> case Map.lookup name m of - Just _ -> error . Text.unpack $ - "tried to rename `" <> r <> "` to `" <> name <> "`, " <> - "which already exists." - Nothing -> case Map.lookup r m of - Nothing -> error . Text.unpack $ - "tried to rename `" <> r <> "` before it was declared." - Just t -> Map.insert name t . Map.delete r $ m - Alias' r name -> case Map.lookup name m of - Just _ -> error . Text.unpack $ - "tried to alias `" <> r <> "` to `" <> name <> "`, " <> - "which already exists." - Nothing -> case Map.lookup r m of - Nothing -> error . Text.unpack $ - "tried to alias `" <> r <> "` before it was declared." - Just t -> Map.insert name t m - --- WARNING: Don't delete any of these lines, only add corrections. -builtinTypesSrc :: [BuiltinTypeDSL] -builtinTypesSrc = - [ B' "Int" CT.Data - , B' "Nat" CT.Data - , B' "Float" CT.Data - , B' "Boolean" CT.Data - , B' "Sequence" CT.Data, Rename' "Sequence" "List" - , B' "Text" CT.Data - , B' "Char" CT.Data - , B' "Effect" CT.Data, Rename' "Effect" "Request" - , B' "Bytes" CT.Data - , B' "Link.Term" CT.Data - , B' "Link.Type" CT.Data - , B' "IO" CT.Effect, Rename' "IO" "io2.IO" - , B' "Handle" CT.Data, Rename' "Handle" "io2.Handle" - , B' "Socket" CT.Data, Rename' "Socket" "io2.Socket" - , B' "ThreadId" CT.Data, Rename' "ThreadId" "io2.ThreadId" - ] - --- rename these to "builtin" later, when builtin means intrinsic as opposed to --- stuff that intrinsics depend on. -intrinsicTypeReferences :: Set R.Reference -intrinsicTypeReferences = foldl' go mempty builtinTypesSrc where - go acc = \case - B' r _ -> Set.insert (R.Builtin r) acc - D' r -> Set.insert (R.Builtin r) acc - _ -> acc - -intrinsicTermReferences :: Set R.Reference -intrinsicTermReferences = Map.keysSet (termRefTypes @Symbol) - -builtinConstructorType :: Map R.Reference CT.ConstructorType -builtinConstructorType = Map.fromList [ (R.Builtin r, ct) | B' r ct <- builtinTypesSrc ] - -data BuiltinTypeDSL = B' Text CT.ConstructorType | D' Text | Rename' Text Text | Alias' Text Text - - -data BuiltinDSL v - -- simple builtin: name=ref, type - = B Text (Type v) - -- deprecated builtin: name=ref, type (TBD) - | D Text (Type v) - -- rename builtin: refname, newname - -- must not appear before corresponding B/D - -- will overwrite newname - | Rename Text Text - -- alias builtin: refname, newname - -- must not appear before corresponding B/D - -- will overwrite newname - | Alias Text Text - -termNameRefs :: Map Name R.Reference -termNameRefs = Map.mapKeys Name.unsafeFromText $ foldl' go mempty (builtinsSrc @Symbol) where - go m = \case - B r _tp -> Map.insert r (R.Builtin r) m - D r _tp -> Map.insert r (R.Builtin r) m - Rename r name -> case Map.lookup name m of - Just _ -> error . Text.unpack $ - "tried to rename `" <> r <> "` to `" <> name <> "`, " <> - "which already exists." - Nothing -> case Map.lookup r m of - Nothing -> error . Text.unpack $ - "tried to rename `" <> r <> "` before it was declared." - Just t -> Map.insert name t . Map.delete r $ m - Alias r name -> case Map.lookup name m of - Just _ -> error . Text.unpack $ - "tried to alias `" <> r <> "` to `" <> name <> "`, " <> - "which already exists." - Nothing -> case Map.lookup r m of - Nothing -> error . Text.unpack $ - "tried to alias `" <> r <> "` before it was declared." - Just t -> Map.insert name t m - -termRefTypes :: Var v => Map R.Reference (Type v) -termRefTypes = foldl' go mempty builtinsSrc where - go m = \case - B r t -> Map.insert (R.Builtin r) t m - D r t -> Map.insert (R.Builtin r) t m - _ -> m - -builtinsSrc :: Var v => [BuiltinDSL v] -builtinsSrc = - [ B "Int.+" $ int --> int --> int - , B "Int.-" $ int --> int --> int - , B "Int.*" $ int --> int --> int - , B "Int./" $ int --> int --> int - , B "Int.<" $ int --> int --> boolean - , B "Int.>" $ int --> int --> boolean - , B "Int.<=" $ int --> int --> boolean - , B "Int.>=" $ int --> int --> boolean - , B "Int.==" $ int --> int --> boolean - , B "Int.and" $ int --> int --> int - , B "Int.or" $ int --> int --> int - , B "Int.xor" $ int --> int --> int - , B "Int.complement" $ int --> int - , B "Int.increment" $ int --> int - , B "Int.isEven" $ int --> boolean - , B "Int.isOdd" $ int --> boolean - , B "Int.signum" $ int --> int - , B "Int.leadingZeros" $ int --> nat - , B "Int.negate" $ int --> int - , B "Int.negate" $ int --> int - , B "Int.mod" $ int --> int --> int - , B "Int.pow" $ int --> nat --> int - , B "Int.shiftLeft" $ int --> nat --> int - , B "Int.shiftRight" $ int --> nat --> int - , B "Int.truncate0" $ int --> nat - , B "Int.toText" $ int --> text - , B "Int.fromText" $ text --> optional int - , B "Int.toFloat" $ int --> float - , B "Int.trailingZeros" $ int --> nat - - , B "Nat.*" $ nat --> nat --> nat - , B "Nat.+" $ nat --> nat --> nat - , B "Nat./" $ nat --> nat --> nat - , B "Nat.<" $ nat --> nat --> boolean - , B "Nat.<=" $ nat --> nat --> boolean - , B "Nat.==" $ nat --> nat --> boolean - , B "Nat.>" $ nat --> nat --> boolean - , B "Nat.>=" $ nat --> nat --> boolean - , B "Nat.and" $ nat --> nat --> nat - , B "Nat.or" $ nat --> nat --> nat - , B "Nat.xor" $ nat --> nat --> nat - , B "Nat.complement" $ nat --> nat - , B "Nat.drop" $ nat --> nat --> nat - , B "Nat.fromText" $ text --> optional nat - , B "Nat.increment" $ nat --> nat - , B "Nat.isEven" $ nat --> boolean - , B "Nat.isOdd" $ nat --> boolean - , B "Nat.leadingZeros" $ nat --> nat - , B "Nat.mod" $ nat --> nat --> nat - , B "Nat.pow" $ nat --> nat --> nat - , B "Nat.shiftLeft" $ nat --> nat --> nat - , B "Nat.shiftRight" $ nat --> nat --> nat - , B "Nat.sub" $ nat --> nat --> int - , B "Nat.toFloat" $ nat --> float - , B "Nat.toInt" $ nat --> int - , B "Nat.toText" $ nat --> text - , B "Nat.trailingZeros" $ nat --> nat - - , B "Float.+" $ float --> float --> float - , B "Float.-" $ float --> float --> float - , B "Float.*" $ float --> float --> float - , B "Float./" $ float --> float --> float - , B "Float.<" $ float --> float --> boolean - , B "Float.>" $ float --> float --> boolean - , B "Float.<=" $ float --> float --> boolean - , B "Float.>=" $ float --> float --> boolean - , B "Float.==" $ float --> float --> boolean - - -- Trigonmetric Functions - , B "Float.acos" $ float --> float - , B "Float.asin" $ float --> float - , B "Float.atan" $ float --> float - , B "Float.atan2" $ float --> float --> float - , B "Float.cos" $ float --> float - , B "Float.sin" $ float --> float - , B "Float.tan" $ float --> float - - -- Hyperbolic Functions - , B "Float.acosh" $ float --> float - , B "Float.asinh" $ float --> float - , B "Float.atanh" $ float --> float - , B "Float.cosh" $ float --> float - , B "Float.sinh" $ float --> float - , B "Float.tanh" $ float --> float - - -- Exponential Functions - , B "Float.exp" $ float --> float - , B "Float.log" $ float --> float - , B "Float.logBase" $ float --> float --> float - - -- Power Functions - , B "Float.pow" $ float --> float --> float - , B "Float.sqrt" $ float --> float - - -- Rounding and Remainder Functions - , B "Float.ceiling" $ float --> int - , B "Float.floor" $ float --> int - , B "Float.round" $ float --> int - , B "Float.truncate" $ float --> int - - -- Float Utils - , B "Float.abs" $ float --> float - , B "Float.max" $ float --> float --> float - , B "Float.min" $ float --> float --> float - , B "Float.toText" $ float --> text - , B "Float.fromText" $ text --> optional float - - , B "Universal.==" $ forall1 "a" (\a -> a --> a --> boolean) - -- Don't we want a Universal.!= ? - - -- Universal.compare intended as a low level function that just returns - -- `Int` rather than some Ordering data type. If we want, later, - -- could provide a pure Unison wrapper for Universal.compare that - -- returns a proper data type. - -- - -- 0 is equal, < 0 is less than, > 0 is greater than - , B "Universal.compare" $ forall1 "a" (\a -> a --> a --> int) - , B "Universal.>" $ forall1 "a" (\a -> a --> a --> boolean) - , B "Universal.<" $ forall1 "a" (\a -> a --> a --> boolean) - , B "Universal.>=" $ forall1 "a" (\a -> a --> a --> boolean) - , B "Universal.<=" $ forall1 "a" (\a -> a --> a --> boolean) - - , B "bug" $ forall1 "a" (\a -> forall1 "b" (\b -> a --> b)) - , B "todo" $ forall1 "a" (\a -> forall1 "b" (\b -> a --> b)) - - , B "Boolean.not" $ boolean --> boolean - - , B "Text.empty" text - , B "Text.++" $ text --> text --> text - , B "Text.take" $ nat --> text --> text - , B "Text.drop" $ nat --> text --> text - , B "Text.size" $ text --> nat - , B "Text.==" $ text --> text --> boolean - , D "Text.!=" $ text --> text --> boolean - , B "Text.<=" $ text --> text --> boolean - , B "Text.>=" $ text --> text --> boolean - , B "Text.<" $ text --> text --> boolean - , B "Text.>" $ text --> text --> boolean - , B "Text.uncons" $ text --> optional (tuple [char, text]) - , B "Text.unsnoc" $ text --> optional (tuple [text, char]) - - , B "Text.toCharList" $ text --> list char - , B "Text.fromCharList" $ list char --> text - - , B "Char.toNat" $ char --> nat - , B "Char.fromNat" $ nat --> char - - , B "Bytes.empty" bytes - , B "Bytes.fromList" $ list nat --> bytes - , B "Bytes.++" $ bytes --> bytes --> bytes - , B "Bytes.take" $ nat --> bytes --> bytes - , B "Bytes.drop" $ nat --> bytes --> bytes - , B "Bytes.at" $ nat --> bytes --> optional nat - , B "Bytes.toList" $ bytes --> list nat - , B "Bytes.size" $ bytes --> nat - , B "Bytes.flatten" $ bytes --> bytes - - , B "List.empty" $ forall1 "a" list - , B "List.cons" $ forall1 "a" (\a -> a --> list a --> list a) - , Alias "List.cons" "List.+:" - , B "List.snoc" $ forall1 "a" (\a -> list a --> a --> list a) - , Alias "List.snoc" "List.:+" - , B "List.take" $ forall1 "a" (\a -> nat --> list a --> list a) - , B "List.drop" $ forall1 "a" (\a -> nat --> list a --> list a) - , B "List.++" $ forall1 "a" (\a -> list a --> list a --> list a) - , B "List.size" $ forall1 "a" (\a -> list a --> nat) - , B "List.at" $ forall1 "a" (\a -> nat --> list a --> optional a) - - , B "Debug.watch" $ forall1 "a" (\a -> text --> a --> a) - ] ++ - -- avoid name conflicts with Universal == < > <= >= - [ Rename (t <> "." <> old) (t <> "." <> new) - | t <- ["Int", "Nat", "Float", "Text"] - , (old, new) <- [("==", "eq") - ,("<" , "lt") - ,("<=", "lteq") - ,(">" , "gt") - ,(">=", "gteq")] - ] ++ - (ioBuiltins >>= \(n,ty) -> [B n ty, Rename n ("io2." <> n)]) - where - int = Type.int () - nat = Type.nat () - boolean = Type.boolean () - float = Type.float () - text = Type.text () - bytes = Type.bytes () - char = Type.char () - - (-->) :: Ord v => Type v -> Type v -> Type v - a --> b = Type.arrow () a b - - infixr --> - - forall1 :: Var v => Text -> (Type v -> Type v) -> Type v - forall1 name body = - let - a = Var.named name - in Type.forall () a (body $ Type.var () a) - - app :: Ord v => Type v -> Type v -> Type v - app = Type.app () - - list :: Ord v => Type v -> Type v - list arg = Type.vector () `app` arg - - optional :: Ord v => Type v -> Type v - optional arg = DD.optionalType () `app` arg - - tuple :: Ord v => [Type v] -> Type v - tuple [t] = t - tuple ts = foldr pair (DD.unitType ()) ts - - pair :: Ord v => Type v -> Type v -> Type v - pair l r = DD.pairType () `app` l `app` r - -ioBuiltins :: Var v => [(Text, Type v)] -ioBuiltins = - [ ("IO.openFile", text --> ioe handle) - , ("IO.closeFile", handle --> ioe unit) - , ("IO.isFileEOF", handle --> ioe boolean) - , ("IO.isFileOpen", handle --> ioe boolean) - , ("IO.isSeekable", handle --> ioe boolean) - , ("IO.seekHandle", handle --> fmode --> int --> ioe unit) - , ("IO.handlePosition", handle --> ioe int) - , ("IO.getBuffering", handle --> ioe bmode) - , ("IO.setBuffering", handle --> bmode --> ioe unit) - , ("IO.getLine", handle --> ioe text) - , ("IO.getText", handle --> ioe text) - , ("IO.putText", handle --> text --> ioe unit) - , ("IO.systemTime", unit --> ioe nat) - , ("IO.getTempDirectory", unit --> ioe text) - , ("IO.getCurrentDirectory", unit --> ioe text) - , ("IO.setCurrentDirectory", text --> ioe unit) - , ("IO.fileExists", text --> ioe boolean) - , ("IO.isDirectory", text --> ioe boolean) - , ("IO.createDirectory", text --> ioe unit) - , ("IO.removeDirectory", text --> ioe unit) - , ("IO.renameDirectory", text --> text --> ioe unit) - , ("IO.removeFile", text --> ioe unit) - , ("IO.renameFile", text --> text --> ioe unit) - , ("IO.getFileTimestamp", text --> ioe nat) - , ("IO.getFileSize", text --> ioe nat) - , ("IO.serverSocket", text --> text --> ioe socket) - , ("IO.listen", socket --> ioe unit) - , ("IO.clientSocket", text --> text --> ioe socket) - , ("IO.closeSocket", socket --> ioe unit) - , ("IO.socketAccept", socket --> ioe socket) - , ("IO.socketSend", socket --> bytes --> ioe unit) - , ("IO.socketReceive", socket --> nat --> ioe bytes) - , ("IO.forkComp" - , forall1 "a" $ \a -> (unit --> ioe a) --> ioe threadId) - , ("IO.stdHandle", nat --> optional handle) - ] - where - (-->) :: Ord v => Type v -> Type v -> Type v - a --> b = Type.arrow () a b - infixr --> - - forall1 :: Var v => Text -> (Type v -> Type v) -> Type v - forall1 name body = - let - a = Var.named name - in Type.forall () a (body $ Type.var () a) - - - either :: Ord v => Type v -> Type v -> Type v - either l r = DD.eitherType () `app` l `app` r - - ioe = Type.effect1 () (Type.builtinIO ()) - . either (DD.ioErrorType ()) - - socket = Type.socket () - threadId = Type.threadId () - handle = Type.fileHandle () - unit = DD.unitType () - - fmode = DD.fileModeType () - bmode = DD.bufferModeType () - - app :: Ord v => Type v -> Type v -> Type v - app = Type.app () - - int = Type.int () - nat = Type.nat () - bytes = Type.bytes () - text = Type.text () - boolean = Type.boolean () - - optional :: Ord v => Type v -> Type v - optional arg = DD.optionalType () `app` arg diff --git a/parser-typechecker/src/Unison/Builtin/Decls.hs b/parser-typechecker/src/Unison/Builtin/Decls.hs deleted file mode 100644 index 6ce8c3f25a..0000000000 --- a/parser-typechecker/src/Unison/Builtin/Decls.hs +++ /dev/null @@ -1,310 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Builtin.Decls where - -import Data.List ( elemIndex, find ) -import qualified Data.Map as Map -import Data.Text (Text) -import qualified Unison.ABT as ABT -import qualified Unison.ConstructorType as CT -import qualified Unison.DataDeclaration as DD -import Unison.DataDeclaration ( DataDeclaration(..) - , Modifier(Structural, Unique) - , hashDecls ) -import qualified Unison.Pattern as Pattern -import Unison.Reference (Reference) -import qualified Unison.Reference as Reference -import Unison.Referent (Referent) -import qualified Unison.Referent as Referent -import Unison.Symbol (Symbol) -import Unison.Term (ConstructorId, Term, Term2) -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import Unison.Type (Type) -import qualified Unison.Var as Var -import Unison.Var (Var) - - -unitRef, pairRef, optionalRef, eitherRef :: Reference -testResultRef, linkRef, docRef, ioErrorRef :: Reference -fileModeRef, bufferModeRef, seqViewRef :: Reference -(unitRef, pairRef, optionalRef, testResultRef, linkRef, docRef, eitherRef, ioErrorRef, fileModeRef, bufferModeRef, seqViewRef) = - let decls = builtinDataDecls @Symbol - [(_, unit, _)] = filter (\(v, _, _) -> v == Var.named "Unit") decls - [(_, pair, _)] = filter (\(v, _, _) -> v == Var.named "Tuple") decls - [(_, opt , _)] = filter (\(v, _, _) -> v == Var.named "Optional") decls - [(_, testResult, _)] = - filter (\(v, _, _) -> v == Var.named "Test.Result") decls - [(_, link , _)] = filter (\(v, _, _) -> v == Var.named "Link") decls - [(_, doc , _)] = filter (\(v, _, _) -> v == Var.named "Doc") decls - - [(_,ethr,_)] = filter (\(v,_,_) -> v == Var.named "Either") decls - [(_,ioerr,_)] = filter (\(v,_,_) -> v == Var.named "io2.IOError") decls - [(_,fmode,_)] = filter (\(v,_,_) -> v == Var.named "io2.FileMode") decls - [(_,bmode,_)] = filter (\(v,_,_) -> v == Var.named "io2.BufferMode") decls - [(_,seqv,_)] = filter (\(v,_,_) -> v == Var.named "SeqView") decls - r = Reference.DerivedId - in (r unit, r pair, r opt, r testResult, r link, r doc, r ethr, r ioerr, r fmode, r bmode, r seqv) - -pairCtorRef, unitCtorRef :: Referent -pairCtorRef = Referent.Con pairRef 0 CT.Data -unitCtorRef = Referent.Con unitRef 0 CT.Data - -constructorId :: Reference -> Text -> Maybe Int -constructorId ref name = do - (_,_,dd) <- find (\(_,r,_) -> Reference.DerivedId r == ref) (builtinDataDecls @Symbol) - elemIndex name $ DD.constructorNames dd - -okConstructorId, failConstructorId, docBlobId, docLinkId, docSignatureId, docSourceId, docEvaluateId, docJoinId, linkTermId, linkTypeId :: ConstructorId -Just okConstructorId = constructorId testResultRef "Test.Result.Ok" -Just failConstructorId = constructorId testResultRef "Test.Result.Fail" -Just docBlobId = constructorId docRef "Doc.Blob" -Just docLinkId = constructorId docRef "Doc.Link" -Just docSignatureId = constructorId docRef "Doc.Signature" -Just docSourceId = constructorId docRef "Doc.Source" -Just docEvaluateId = constructorId docRef "Doc.Evaluate" -Just docJoinId = constructorId docRef "Doc.Join" -Just linkTermId = constructorId linkRef "Link.Term" -Just linkTypeId = constructorId linkRef "Link.Type" - -okConstructorReferent, failConstructorReferent :: Referent.Referent -okConstructorReferent = Referent.Con testResultRef okConstructorId CT.Data -failConstructorReferent = Referent.Con testResultRef failConstructorId CT.Data - --- | parse some builtin data types, and resolve their free variables using --- | builtinTypes' and those types defined herein -builtinDataDecls :: Var v => [(v, Reference.Id, DataDeclaration v ())] -builtinDataDecls = rs1 ++ rs - where - rs1 = case hashDecls $ Map.fromList - [ (v "Link" , link) - ] of Right a -> a; Left e -> error $ "builtinDataDecls: " <> show e - rs = case hashDecls $ Map.fromList - [ (v "Unit" , unit) - , (v "Tuple" , tuple) - , (v "Optional" , opt) - , (v "Either" , eith) - , (v "Test.Result" , tr) - , (v "Doc" , doc) - , (v "io2.FileMode" , fmode) - , (v "io2.BufferMode" , bmode) - , (v "SeqView" , seqview) - - , (v "io2.IOError" , ioerr) - ] of Right a -> a; Left e -> error $ "builtinDataDecls: " <> show e - [(_, linkRef, _)] = rs1 - v = Var.named - var name = Type.var () (v name) - arr = Type.arrow' - -- see note on `hashDecls` above for why ctor must be called `Unit.Unit`. - unit = DataDeclaration Structural () [] [((), v "Unit.Unit", var "Unit")] - tuple = DataDeclaration - Structural - () - [v "a", v "b"] - [ ( () - , v "Tuple.Cons" - , Type.foralls - () - [v "a", v "b"] - ( var "a" - `arr` (var "b" `arr` Type.apps' (var "Tuple") [var "a", var "b"]) - ) - ) - ] - opt = DataDeclaration - Structural - () - [v "a"] - [ ( () - , v "Optional.None" - , Type.foralls () [v "a"] (Type.app' (var "Optional") (var "a")) - ) - , ( () - , v "Optional.Some" - , Type.foralls () - [v "a"] - (var "a" `arr` Type.app' (var "Optional") (var "a")) - ) - ] - eith = DataDeclaration - Structural - () - [v "a", v "b"] - [ ( () - , v "Either.Left" - , Type.foralls () [v "a", v "b"] - (var "a" `arr` Type.apps' (var "Either") [var "a", var "b"]) - ) - , ( () - , v "Either.Right" - , Type.foralls () [v "a", v "b"] - (var "b" `arr` Type.apps' (var "Either") [var "a", var "b"]) - ) - ] - fmode = DataDeclaration - (Unique "3c11ba4f0a5d8fedd427b476cdd2d7673197d11e") - () - [] - [ ((), v "io2.FileMode.Read", var "io2.FileMode") - , ((), v "io2.FileMode.Write", var "io2.FileMode") - , ((), v "io2.FileMode.Append", var "io2.FileMode") - , ((), v "io2.FileMode.ReadWrite", var "io2.FileMode") - ] - bmode = DataDeclaration - (Unique "7dd9560d3826c21e5e6a7e08f575b61adcddf849") - () - [] - [ ((), v "io2.BufferMode.NoBuffering", var "io2.BufferMode") - , ((), v "io2.BufferMode.LineBuffering", var "io2.BufferMode") - , ((), v "io2.BufferMode.BlockBuffering", var "io2.BufferMode") - , ((), v "io2.BufferMode.SizedBlockBuffering" - , Type.nat () `arr` var "io2.BufferMode") - ] - ioerr = DataDeclaration - (Unique "5915e25ac83205f7885395cc6c6c988bc5ec69a1") - () - [] - [ ((), v "io2.IOError.AlreadyExists", var "io2.IOError") - , ((), v "io2.IOError.NoSuchThing", var "io2.IOError") - , ((), v "io2.IOError.ResourceBusy", var "io2.IOError") - , ((), v "io2.IOError.ResourceExhausted", var "io2.IOError") - , ((), v "io2.IOError.EOF", var "io2.IOError") - , ((), v "io2.IOError.IllegalOperation", var "io2.IOError") - , ((), v "io2.IOError.PermissionDenied", var "io2.IOError") - , ((), v "io2.IOError.UserError", var "io2.IOError") - ] - seqview = DataDeclaration - Structural - () - [v "a", v "b"] - [ ( () - , v "SeqView.VEmpty" - , Type.foralls () [v "a", v "b"] - (Type.apps' (var "SeqView") [var "a", var "b"]) - ) - , ( () - , v "SeqView.VElem" - , let sv = Type.apps' (var "SeqView") [var "a", var "b"] - in Type.foralls () [v "a", v "b"] - (var "a" `arr` (var "b" `arr` sv)) - ) - ] - tr = DataDeclaration - (Unique "70621e539cd802b2ad53105697800930411a3ebc") - () - [] - [ ((), v "Test.Result.Fail", Type.text () `arr` var "Test.Result") - , ((), v "Test.Result.Ok" , Type.text () `arr` var "Test.Result") - ] - doc = DataDeclaration - (Unique "c63a75b845e4f7d01107d852e4c2485c51a50aaaa94fc61995e71bbee983a2ac3713831264adb47fb6bd1e058d5f004") - () - [] - [ ((), v "Doc.Blob", Type.text () `arr` var "Doc") - , ((), v "Doc.Link", Type.refId () linkRef `arr` var "Doc") - , ((), v "Doc.Signature", Type.termLink () `arr` var "Doc") - , ((), v "Doc.Source", Type.refId () linkRef `arr` var "Doc") - , ((), v "Doc.Evaluate", Type.termLink () `arr` var "Doc") - , ((), v "Doc.Join", Type.app () (Type.vector()) (var "Doc") `arr` var "Doc") - ] - link = DataDeclaration - (Unique "a5803524366ead2d7f3780871d48771e8142a3b48802f34a96120e230939c46bd5e182fcbe1fa64e9bff9bf741f3c04") - () - [] - [ ((), v "Link.Term", Type.termLink () `arr` var "Link") - , ((), v "Link.Type", Type.typeLink () `arr` var "Link") - ] - -builtinEffectDecls :: [(v, Reference.Id, DD.EffectDeclaration v ())] -builtinEffectDecls = [] - -pattern UnitRef <- (unUnitRef -> True) -pattern PairRef <- (unPairRef -> True) -pattern OptionalRef <- (unOptionalRef -> True) -pattern TupleType' ts <- (unTupleType -> Just ts) -pattern TupleTerm' xs <- (unTupleTerm -> Just xs) -pattern TuplePattern ps <- (unTuplePattern -> Just ps) - --- some pattern synonyms to make pattern matching on some of these constants more pleasant -pattern DocRef <- ((== docRef) -> True) -pattern DocJoin segs <- Term.App' (Term.Constructor' DocRef DocJoinId) (Term.Sequence' segs) -pattern DocBlob txt <- Term.App' (Term.Constructor' DocRef DocBlobId) (Term.Text' txt) -pattern DocLink link <- Term.App' (Term.Constructor' DocRef DocLinkId) link -pattern DocSource link <- Term.App' (Term.Constructor' DocRef DocSourceId) link -pattern DocSignature link <- Term.App' (Term.Constructor' DocRef DocSignatureId) link -pattern DocEvaluate link <- Term.App' (Term.Constructor' DocRef DocEvaluateId) link -pattern Doc <- Term.App' (Term.Constructor' DocRef _) _ -pattern DocSignatureId <- ((== docSignatureId) -> True) -pattern DocBlobId <- ((== docBlobId) -> True) -pattern DocLinkId <- ((== docLinkId) -> True) -pattern DocSourceId <- ((== docSourceId) -> True) -pattern DocEvaluateId <- ((== docEvaluateId) -> True) -pattern DocJoinId <- ((== docJoinId) -> True) -pattern LinkTermId <- ((== linkTermId) -> True) -pattern LinkTypeId <- ((== linkTypeId) -> True) -pattern LinkRef <- ((== linkRef) -> True) -pattern LinkTerm tm <- Term.App' (Term.Constructor' LinkRef LinkTermId) tm -pattern LinkType ty <- Term.App' (Term.Constructor' LinkRef LinkTypeId) ty - -unitType, pairType, optionalType, testResultType, - eitherType, ioErrorType, fileModeType, bufferModeType - :: Ord v => a -> Type v a -unitType a = Type.ref a unitRef -pairType a = Type.ref a pairRef -testResultType a = Type.app a (Type.vector a) (Type.ref a testResultRef) -optionalType a = Type.ref a optionalRef -eitherType a = Type.ref a eitherRef -ioErrorType a = Type.ref a ioErrorRef -fileModeType a = Type.ref a fileModeRef -bufferModeType a = Type.ref a bufferModeRef - -unitTerm :: Var v => a -> Term v a -unitTerm ann = Term.constructor ann unitRef 0 - -tupleConsTerm :: (Ord v, Semigroup a) - => Term2 vt at ap v a - -> Term2 vt at ap v a - -> Term2 vt at ap v a -tupleConsTerm hd tl = - Term.apps' (Term.constructor (ABT.annotation hd) pairRef 0) [hd, tl] - -tupleTerm :: (Var v, Monoid a) => [Term v a] -> Term v a -tupleTerm = foldr tupleConsTerm (unitTerm mempty) - --- delayed terms are just lambdas that take a single `()` arg --- `force` calls the function -forceTerm :: Var v => a -> a -> Term v a -> Term v a -forceTerm a au e = Term.app a e (unitTerm au) - -delayTerm :: Var v => a -> Term v a -> Term v a -delayTerm a = Term.lam a $ Var.named "()" - -unTupleTerm - :: Term.Term2 vt at ap v a - -> Maybe [Term.Term2 vt at ap v a] -unTupleTerm t = case t of - Term.Apps' (Term.Constructor' PairRef 0) [fst, snd] -> - (fst :) <$> unTupleTerm snd - Term.Constructor' UnitRef 0 -> Just [] - _ -> Nothing - -unTupleType :: Var v => Type v a -> Maybe [Type v a] -unTupleType t = case t of - Type.Apps' (Type.Ref' PairRef) [fst, snd] -> (fst :) <$> unTupleType snd - Type.Ref' UnitRef -> Just [] - _ -> Nothing - -unTuplePattern :: Pattern.Pattern loc -> Maybe [Pattern.Pattern loc] -unTuplePattern p = case p of - Pattern.Constructor _ PairRef 0 [fst, snd] -> (fst : ) <$> unTuplePattern snd - Pattern.Constructor _ UnitRef 0 [] -> Just [] - _ -> Nothing - -unUnitRef,unPairRef,unOptionalRef:: Reference -> Bool -unUnitRef = (== unitRef) -unPairRef = (== pairRef) -unOptionalRef = (== optionalRef) - diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs deleted file mode 100644 index b815b8f9a7..0000000000 --- a/parser-typechecker/src/Unison/Codebase.hs +++ /dev/null @@ -1,294 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Codebase where - -import Unison.Prelude - -import Control.Lens ( _1, _2, (%=) ) -import Control.Monad.State ( State, evalState, get ) -import Data.Bifunctor ( bimap ) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Unison.ABT as ABT -import qualified Unison.Builtin as Builtin -import Unison.Codebase.Branch ( Branch ) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.CodeLookup as CL -import qualified Unison.Codebase.Reflog as Reflog -import Unison.Codebase.SyncMode ( SyncMode ) -import qualified Unison.DataDeclaration as DD -import qualified Unison.Names2 as Names -import Unison.Reference ( Reference ) -import qualified Unison.Reference as Reference -import qualified Unison.Referent as Referent -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import Unison.Typechecker.TypeLookup (TypeLookup(TypeLookup)) -import qualified Unison.Typechecker.TypeLookup as TL -import qualified Unison.Parser as Parser -import qualified Unison.UnisonFile as UF -import qualified Unison.Util.Relation as Rel -import qualified Unison.Util.Set as Set -import qualified Unison.Var as Var -import Unison.Var ( Var ) -import qualified Unison.Runtime.IOSource as IOSource -import Unison.Symbol ( Symbol ) -import Unison.DataDeclaration (Decl) -import Unison.Term (Term) -import Unison.Type (Type) -import Unison.Codebase.ShortBranchHash (ShortBranchHash) -import Unison.ShortHash (ShortHash) - -type DataDeclaration v a = DD.DataDeclaration v a -type EffectDeclaration v a = DD.EffectDeclaration v a - --- | this FileCodebase detail lives here, because the interface depends on it 🙃 -type CodebasePath = FilePath - -data Codebase m v a = - Codebase { getTerm :: Reference.Id -> m (Maybe (Term v a)) - , getTypeOfTermImpl :: Reference.Id -> m (Maybe (Type v a)) - , getTypeDeclaration :: Reference.Id -> m (Maybe (Decl v a)) - - , putTerm :: Reference.Id -> Term v a -> Type v a -> m () - , putTypeDeclaration :: Reference.Id -> Decl v a -> m () - - , getRootBranch :: m (Either GetRootBranchError (Branch m)) - , putRootBranch :: Branch m -> m () - , rootBranchUpdates :: m (m (), m (Set Branch.Hash)) - , getBranchForHash :: Branch.Hash -> m (Maybe (Branch m)) - - , dependentsImpl :: Reference -> m (Set Reference.Id) - -- This copies all the dependencies of `b` from the specified - -- FileCodebase into this Codebase, and sets our root branch to `b` - , syncFromDirectory :: CodebasePath -> SyncMode -> Branch m -> m () - -- This copies all the dependencies of `b` from the this Codebase - -- into the specified FileCodebase, and sets its _head to `b` - , syncToDirectory :: CodebasePath -> SyncMode -> Branch m -> m () - - -- Watch expressions are part of the codebase, the `Reference.Id` is - -- the hash of the source of the watch expression, and the `Term v a` - -- is the evaluated result of the expression, decompiled to a term. - , watches :: UF.WatchKind -> m [Reference.Id] - , getWatch :: UF.WatchKind -> Reference.Id -> m (Maybe (Term v a)) - , putWatch :: UF.WatchKind -> Reference.Id -> Term v a -> m () - - , getReflog :: m [Reflog.Entry] - , appendReflog :: Text -> Branch m -> Branch m -> m () - - -- list of terms of the given type - , termsOfTypeImpl :: Reference -> m (Set Referent.Id) - -- list of terms that mention the given type anywhere in their signature - , termsMentioningTypeImpl :: Reference -> m (Set Referent.Id) - -- number of base58 characters needed to distinguish any two references in the codebase - , hashLength :: m Int - , termReferencesByPrefix :: ShortHash -> m (Set Reference.Id) - , typeReferencesByPrefix :: ShortHash -> m (Set Reference.Id) - , termReferentsByPrefix :: ShortHash -> m (Set Referent.Id) - - , branchHashLength :: m Int - , branchHashesByPrefix :: ShortBranchHash -> m (Set Branch.Hash) - } - -data GetRootBranchError - = NoRootBranch - | CouldntParseRootBranch String - | CouldntLoadRootBranch Branch.Hash - deriving Show - -data SyncFileCodebaseResult = SyncOk | UnknownDestinationRootBranch Branch.Hash | NotFastForward - -bootstrapNames :: Names.Names0 -bootstrapNames = - Builtin.names0 <> UF.typecheckedToNames0 IOSource.typecheckedFile - --- | Write all of the builtins types into the codebase and create empty namespace -initializeCodebase :: forall m. Monad m => Codebase m Symbol Parser.Ann -> m () -initializeCodebase c = do - let uf = (UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls) - (Map.fromList Builtin.builtinEffectDecls) - mempty mempty) - addDefsToCodebase c uf - putRootBranch c (Branch.one Branch.empty0) - --- Feel free to refactor this to use some other type than TypecheckedUnisonFile --- if it makes sense to later. -addDefsToCodebase :: forall m v a. (Monad m, Var v) - => Codebase m v a -> UF.TypecheckedUnisonFile v a -> m () -addDefsToCodebase c uf = do - traverse_ (goType Right) (UF.dataDeclarationsId' uf) - traverse_ (goType Left) (UF.effectDeclarationsId' uf) - -- put terms - traverse_ goTerm (UF.hashTermsId uf) - where - goTerm (r, tm, tp) = putTerm c r tm tp - goType :: (t -> Decl v a) -> (Reference.Id, t) -> m () - goType f (ref, decl) = putTypeDeclaration c ref (f decl) - -getTypeOfConstructor :: - (Monad m, Ord v) => Codebase m v a -> Reference -> Int -> m (Maybe (Type v a)) -getTypeOfConstructor codebase (Reference.DerivedId r) cid = do - maybeDecl <- getTypeDeclaration codebase r - pure $ case maybeDecl of - Nothing -> Nothing - Just decl -> DD.typeOfConstructor (either DD.toDataDecl id decl) cid -getTypeOfConstructor _ r cid = - error $ "Don't know how to getTypeOfConstructor " ++ show r ++ " " ++ show cid - -typeLookupForDependencies - :: (Monad m, Var v, BuiltinAnnotation a) - => Codebase m v a -> Set Reference -> m (TL.TypeLookup v a) -typeLookupForDependencies codebase = foldM go mempty - where - go tl ref@(Reference.DerivedId id) = fmap (tl <>) $ - getTypeOfTerm codebase ref >>= \case - Just typ -> pure $ TypeLookup (Map.singleton ref typ) mempty mempty - Nothing -> getTypeDeclaration codebase id >>= \case - Just (Left ed) -> - pure $ TypeLookup mempty mempty (Map.singleton ref ed) - Just (Right dd) -> - pure $ TypeLookup mempty (Map.singleton ref dd) mempty - Nothing -> pure mempty - go tl Reference.Builtin{} = pure tl -- codebase isn't consulted for builtins - --- todo: can this be implemented in terms of TransitiveClosure.transitiveClosure? --- todo: add some tests on this guy? -transitiveDependencies - :: (Monad m, Var v) - => CL.CodeLookup v m a - -> Set Reference.Id - -> Reference.Id - -> m (Set Reference.Id) -transitiveDependencies code seen0 rid = if Set.member rid seen0 - then pure seen0 - else - let seen = Set.insert rid seen0 - getIds = Set.mapMaybe Reference.toId - in CL.getTerm code rid >>= \case - Just t -> - foldM (transitiveDependencies code) seen (getIds $ Term.dependencies t) - Nothing -> - CL.getTypeDeclaration code rid >>= \case - Nothing -> pure seen - Just (Left ed) -> foldM (transitiveDependencies code) - seen - (getIds $ DD.dependencies (DD.toDataDecl ed)) - Just (Right dd) -> foldM (transitiveDependencies code) - seen - (getIds $ DD.dependencies dd) - -toCodeLookup :: Codebase m v a -> CL.CodeLookup v m a -toCodeLookup c = CL.CodeLookup (getTerm c) (getTypeDeclaration c) - --- Like the other `makeSelfContained`, but takes and returns a `UnisonFile`. --- Any watches in the input `UnisonFile` will be watches in the returned --- `UnisonFile`. -makeSelfContained' - :: forall m v a . (Monad m, Monoid a, Var v) - => CL.CodeLookup v m a - -> UF.UnisonFile v a - -> m (UF.UnisonFile v a) -makeSelfContained' code uf = do - let UF.UnisonFileId ds0 es0 bs0 ws0 = uf - deps0 = getIds . Term.dependencies . snd <$> (UF.allWatches uf <> bs0) - where getIds = Set.mapMaybe Reference.toId - -- transitive dependencies (from codebase) of all terms (including watches) in the UF - deps <- foldM (transitiveDependencies code) Set.empty (Set.unions deps0) - -- load all decls from deps list - decls <- fmap catMaybes - . forM (toList deps) - $ \rid -> fmap (rid, ) <$> CL.getTypeDeclaration code rid - -- partition the decls into effects and data - let es1 :: [(Reference.Id, DD.EffectDeclaration v a)] - ds1 :: [(Reference.Id, DD.DataDeclaration v a)] - (es1, ds1) = partitionEithers [ bimap (r,) (r,) d | (r, d) <- decls ] - -- load all terms from deps list - bs1 <- fmap catMaybes - . forM (toList deps) - $ \rid -> fmap (rid, ) <$> CL.getTerm code rid - let - allVars :: Set v - allVars = Set.unions - [ UF.allVars uf - , Set.unions [ DD.allVars dd | (_, dd) <- ds1 ] - , Set.unions [ DD.allVars (DD.toDataDecl ed) | (_, ed) <- es1 ] - , Set.unions [ Term.allVars tm | (_, tm) <- bs1 ] - ] - refVar :: Reference.Id -> State (Set v, Map Reference.Id v) v - refVar r = do - m <- snd <$> get - case Map.lookup r m of - Just v -> pure v - Nothing -> do - v <- ABT.freshenS' _1 (Var.refNamed (Reference.DerivedId r)) - _2 %= Map.insert r v - pure v - assignVars :: [(Reference.Id, b)] -> State (Set v, Map Reference.Id v) [(v, (Reference.Id, b))] - assignVars = traverse (\e@(r, _) -> (,e) <$> refVar r) - unref :: Term v a -> State (Set v, Map Reference.Id v) (Term v a) - unref = ABT.visit go where - go t@(Term.Ref' (Reference.DerivedId r)) = - Just (Term.var (ABT.annotation t) <$> refVar r) - go _ = Nothing - unrefb = traverse (\(v, tm) -> (v,) <$> unref tm) - pair :: forall f a b. Applicative f => f a -> f b -> f (a,b) - pair = liftA2 (,) - uf' = flip evalState (allVars, Map.empty) $ do - datas' <- Map.union ds0 . Map.fromList <$> assignVars ds1 - effects' <- Map.union es0 . Map.fromList <$> assignVars es1 - -- bs0 is terms from the input file - bs0' <- unrefb bs0 - ws0' <- traverse unrefb ws0 - -- bs1 is dependency terms - bs1' <- traverse (\(r, tm) -> refVar r `pair` unref tm) bs1 - pure $ UF.UnisonFileId datas' effects' (bs1' ++ bs0') ws0' - pure uf' - -getTypeOfTerm :: (Applicative m, Var v, BuiltinAnnotation a) => - Codebase m v a -> Reference -> m (Maybe (Type v a)) -getTypeOfTerm c = \case - Reference.DerivedId h -> getTypeOfTermImpl c h - r@Reference.Builtin{} -> - pure $ fmap (const builtinAnnotation) - <$> Map.lookup r Builtin.termRefTypes - - --- The dependents of a builtin type is the set of builtin terms which --- mention that type. -dependents :: Functor m => Codebase m v a -> Reference -> m (Set Reference) -dependents c r - = Set.union (Builtin.builtinTypeDependents r) - . Set.map Reference.DerivedId - <$> dependentsImpl c r - -termsOfType :: (Var v, Functor m) => Codebase m v a -> Type v a -> m (Set Referent.Referent) -termsOfType c ty = - Set.union (Rel.lookupDom r Builtin.builtinTermsByType) - . Set.map (fmap Reference.DerivedId) - <$> termsOfTypeImpl c r - where - r = Type.toReference ty - -termsMentioningType :: (Var v, Functor m) => Codebase m v a -> Type v a -> m (Set Referent.Referent) -termsMentioningType c ty = - Set.union (Rel.lookupDom r Builtin.builtinTermsByTypeMention) - . Set.map (fmap Reference.DerivedId) - <$> termsMentioningTypeImpl c r - where - r = Type.toReference ty - --- todo: could have a way to look this up just by checking for a file rather than loading it -isTerm :: (Applicative m, Var v, BuiltinAnnotation a) - => Codebase m v a -> Reference -> m Bool -isTerm code = fmap isJust . getTypeOfTerm code - -isType :: Applicative m => Codebase m v a -> Reference -> m Bool -isType c r = case r of - Reference.Builtin{} -> pure $ Builtin.isBuiltinType r - Reference.DerivedId r -> isJust <$> getTypeDeclaration c r - -class BuiltinAnnotation a where - builtinAnnotation :: a - -instance BuiltinAnnotation Parser.Ann where - builtinAnnotation = Parser.Intrinsic diff --git a/parser-typechecker/src/Unison/Codebase/Branch.hs b/parser-typechecker/src/Unison/Codebase/Branch.hs deleted file mode 100644 index 0568038acd..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Branch.hs +++ /dev/null @@ -1,900 +0,0 @@ -{-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE RankNTypes #-} - -module Unison.Codebase.Branch - ( -- * Branch types - Branch(..) - , Branch0(..) - , MergeMode(..) - , Raw(..) - , Star - , Hash - , EditHash - , pattern Hash - - -- * Branch construction - , empty - , empty0 - , branch0 - , one - , toCausalRaw - , transform - - -- * Branch history - -- ** History queries - , isEmpty - , isEmpty0 - , isOne - , head - , headHash - , before - , findHistoricalHQs - , findHistoricalRefs - , findHistoricalRefs' - , namesDiff - -- ** History updates - , step - , stepEverywhere - , uncons - , merge - , merge' - - -- * Branch children - -- ** Children lenses - , children - -- ** Children queries - , toList0 - , getAt - , getAt' - , getAt0 - -- ** Children updates - , setChildBranch - , stepManyAt - , stepManyAt0 - , stepManyAtM - , modifyAtM - - -- * Branch terms/types - -- ** Term/type lenses - , terms - , types - -- ** Term/type queries - , deepReferents - , deepTypeReferences - , toNames0 - -- ** Term/type updates - , addTermName - , addTypeName - , deleteTermName - , deleteTypeName - - - -- * Branch patches - -- ** Patch queries - , deepEdits' - , getPatch - , getMaybePatch - -- ** Patch updates - , replacePatch - , deletePatch - , modifyPatches - - -- * Branch serialization - , cachedRead - , boundedCache - , Cache - , sync - - -- * Unused - , childrenR - , debugPaths - , editedPatchRemoved - , editsR - , findHistoricalSHs - , fork - , lca - , move - , numHashChars - , printDebugPaths - , removedPatchEdited - , stepAt - , stepAtM - , termsR - , typesR - ) where - -import Unison.Prelude hiding (empty) - -import Prelude hiding (head,read,subtract) - -import Control.Lens hiding ( children, cons, transform, uncons ) -import qualified Control.Monad.State as State -import Control.Monad.State ( StateT ) -import Data.Bifunctor ( second ) -import qualified Data.Map as Map -import qualified Data.Map.Merge.Lazy as Map -import qualified Data.Set as Set -import qualified Unison.Codebase.Patch as Patch -import Unison.Codebase.Patch ( Patch ) -import qualified Unison.Codebase.Causal as Causal -import Unison.Codebase.Causal ( Causal - , pattern RawOne - , pattern RawCons - , pattern RawMerge - ) -import Unison.Codebase.Path ( Path(..) ) -import qualified Unison.Codebase.Path as Path -import Unison.NameSegment ( NameSegment ) -import qualified Unison.NameSegment as NameSegment -import qualified Unison.Codebase.Metadata as Metadata -import qualified Unison.Hash as Hash -import Unison.Hashable ( Hashable ) -import qualified Unison.Hashable as H -import Unison.Name ( Name(..) ) -import qualified Unison.Name as Name -import qualified Unison.Names2 as Names -import qualified Unison.Names3 as Names -import Unison.Names2 ( Names'(Names), Names0 ) -import Unison.Reference ( Reference ) -import Unison.Referent ( Referent ) -import qualified Unison.Referent as Referent -import qualified Unison.Reference as Reference - -import qualified Unison.Util.Cache as Cache -import qualified Unison.Util.Relation as R -import Unison.Util.Relation ( Relation ) -import qualified Unison.Util.Relation4 as R4 -import qualified Unison.Util.List as List -import Unison.Util.Map ( unionWithM ) -import qualified Unison.Util.Star3 as Star3 -import Unison.ShortHash (ShortHash) -import qualified Unison.ShortHash as SH -import qualified Unison.HashQualified as HQ -import Unison.HashQualified (HashQualified) -import qualified Unison.LabeledDependency as LD -import Unison.LabeledDependency (LabeledDependency) - -newtype Branch m = Branch { _history :: Causal m Raw (Branch0 m) } - deriving (Eq, Ord) - -type Hash = Causal.RawHash Raw -type EditHash = Hash.Hash - --- Star3 r n Metadata.Type (Metadata.Type, Metadata.Value) -type Star r n = Metadata.Star r n - -data Branch0 m = Branch0 - { _terms :: Star Referent NameSegment - , _types :: Star Reference NameSegment - , _children :: Map NameSegment (Branch m) - , _edits :: Map NameSegment (EditHash, m Patch) - -- names and metadata for this branch and its children - -- (ref, (name, value)) iff ref has metadata `value` at name `name` - , deepTerms :: Relation Referent Name - , deepTypes :: Relation Reference Name - , deepTermMetadata :: Metadata.R4 Referent Name - , deepTypeMetadata :: Metadata.R4 Reference Name - , deepPaths :: Set Path - , deepEdits :: Map Name EditHash - } - --- Represents a shallow diff of a Branch0. --- Each of these `Star`s contain metadata as well, so an entry in --- `added` or `removed` could be an update to the metadata. -data BranchDiff = BranchDiff - { addedTerms :: Star Referent NameSegment - , removedTerms :: Star Referent NameSegment - , addedTypes :: Star Reference NameSegment - , removedTypes :: Star Reference NameSegment - , changedPatches :: Map NameSegment Patch.PatchDiff - } deriving (Eq, Ord, Show) - -instance Semigroup BranchDiff where - left <> right = BranchDiff - { addedTerms = addedTerms left <> addedTerms right - , removedTerms = removedTerms left <> removedTerms right - , addedTypes = addedTypes left <> addedTypes right - , removedTypes = removedTypes left <> removedTypes right - , changedPatches = - Map.unionWith (<>) (changedPatches left) (changedPatches right) - } - -instance Monoid BranchDiff where - mappend = (<>) - mempty = BranchDiff mempty mempty mempty mempty mempty - --- The raw Branch -data Raw = Raw - { _termsR :: Star Referent NameSegment - , _typesR :: Star Reference NameSegment - , _childrenR :: Map NameSegment Hash - , _editsR :: Map NameSegment EditHash - } - -makeLenses ''Branch -makeLensesFor [("_edits", "edits")] ''Branch0 -makeLenses ''Raw - -toNames0 :: Branch0 m -> Names0 -toNames0 b = Names (R.swap . deepTerms $ b) - (R.swap . deepTypes $ b) - --- This stops searching for a given ShortHash once it encounters --- any term or type in any Branch0 that satisfies that ShortHash. -findHistoricalSHs - :: Monad m => Set ShortHash -> Branch m -> m (Set ShortHash, Names0) -findHistoricalSHs = findInHistory - (\sh r _n -> sh `SH.isPrefixOf` Referent.toShortHash r) - (\sh r _n -> sh `SH.isPrefixOf` Reference.toShortHash r) - --- This stops searching for a given HashQualified once it encounters --- any term or type in any Branch0 that satisfies that HashQualified. -findHistoricalHQs :: Monad m - => Set HashQualified - -> Branch m - -> m (Set HashQualified, Names0) -findHistoricalHQs = findInHistory - (\hq r n -> HQ.matchesNamedReferent n r hq) - (\hq r n -> HQ.matchesNamedReference n r hq) - -findHistoricalRefs :: Monad m => Set LabeledDependency -> Branch m - -> m (Set LabeledDependency, Names0) -findHistoricalRefs = findInHistory - (\query r _n -> LD.fold (const False) (==r) query) - (\query r _n -> LD.fold (==r) (const False) query) - -findHistoricalRefs' :: Monad m => Set Reference -> Branch m - -> m (Set Reference, Names0) -findHistoricalRefs' = findInHistory - (\queryRef r _n -> r == Referent.Ref queryRef) - (\queryRef r _n -> r == queryRef) - -findInHistory :: forall m q. (Monad m, Ord q) - => (q -> Referent -> Name -> Bool) - -> (q -> Reference -> Name -> Bool) - -> Set q -> Branch m -> m (Set q, Names0) -findInHistory termMatches typeMatches queries b = - (Causal.foldHistoryUntil f (queries, mempty) . _history) b <&> \case - -- could do something more sophisticated here later to report that some SH - -- couldn't be found anywhere in the history. but for now, I assume that - -- the normal thing will happen when it doesn't show up in the namespace. - Causal.Satisfied (_, names) -> (mempty, names) - Causal.Unsatisfied (missing, names) -> (missing, names) - where - -- in order to not favor terms over types, we iterate through the ShortHashes, - -- for each `remainingQueries`, if we find a matching Referent or Reference, - -- we remove `q` from the accumulated `remainingQueries`, and add the Ref* to - -- the accumulated `names0`. - f acc@(remainingQueries, _) b0 = (acc', null remainingQueries') - where - acc'@(remainingQueries', _) = foldl' findQ acc remainingQueries - findQ :: (Set q, Names0) -> q -> (Set q, Names0) - findQ acc sh = - foldl' (doType sh) (foldl' (doTerm sh) acc - (R.toList $ deepTerms b0)) - (R.toList $ deepTypes b0) - doTerm q acc@(remainingSHs, names0) (r, n) = if termMatches q r n - then (Set.delete q remainingSHs, Names.addTerm n r names0) else acc - doType q acc@(remainingSHs, names0) (r, n) = if typeMatches q r n - then (Set.delete q remainingSHs, Names.addType n r names0) else acc - -deepReferents :: Branch0 m -> Set Referent -deepReferents = R.dom . deepTerms - -deepTypeReferences :: Branch0 m -> Set Reference -deepTypeReferences = R.dom . deepTypes - -terms :: Lens' (Branch0 m) (Star Referent NameSegment) -terms = lens _terms (\Branch0{..} x -> branch0 x _types _children _edits) - -types :: Lens' (Branch0 m) (Star Reference NameSegment) -types = lens _types (\Branch0{..} x -> branch0 _terms x _children _edits) - -children :: Lens' (Branch0 m) (Map NameSegment (Branch m)) -children = lens _children (\Branch0{..} x -> branch0 _terms _types x _edits) - --- creates a Branch0 from the primary fields and derives the others. -branch0 :: Metadata.Star Referent NameSegment - -> Metadata.Star Reference NameSegment - -> Map NameSegment (Branch m) - -> Map NameSegment (EditHash, m Patch) - -> Branch0 m -branch0 terms types children edits = - Branch0 terms types children edits - deepTerms' deepTypes' - deepTermMetadata' deepTypeMetadata' - deepPaths' deepEdits' - where - nameSegToName = Name.unsafeFromText . NameSegment.toText - deepTerms' = (R.mapRan nameSegToName . Star3.d1) terms - <> foldMap go (Map.toList children) - where - go (nameSegToName -> n, b) = - R.mapRan (Name.joinDot n) (deepTerms $ head b) -- could use mapKeysMonotonic - deepTypes' = (R.mapRan nameSegToName . Star3.d1) types - <> foldMap go (Map.toList children) - where - go (nameSegToName -> n, b) = - R.mapRan (Name.joinDot n) (deepTypes $ head b) -- could use mapKeysMonotonic - deepTermMetadata' = R4.mapD2 nameSegToName (Metadata.starToR4 terms) - <> foldMap go (Map.toList children) - where - go (nameSegToName -> n, b) = - R4.mapD2 (Name.joinDot n) (deepTermMetadata $ head b) - deepTypeMetadata' = R4.mapD2 nameSegToName (Metadata.starToR4 types) - <> foldMap go (Map.toList children) - where - go (nameSegToName -> n, b) = - R4.mapD2 (Name.joinDot n) (deepTypeMetadata $ head b) - deepPaths' = Set.map Path.singleton (Map.keysSet children) - <> foldMap go (Map.toList children) - where go (nameSeg, b) = Set.map (Path.cons nameSeg) (deepPaths $ head b) - deepEdits' = Map.mapKeys nameSegToName (Map.map fst edits) - <> foldMap go (Map.toList children) - where - go (nameSeg, b) = - Map.mapKeys (nameSegToName nameSeg `Name.joinDot`) . deepEdits $ head b - -head :: Branch m -> Branch0 m -head (Branch c) = Causal.head c - -headHash :: Branch m -> Hash -headHash (Branch c) = Causal.currentHash c - -deepEdits' :: Branch0 m -> Map Name (EditHash, m Patch) -deepEdits' b = go id b where - -- can change this to an actual prefix once Name is a [NameSegment] - go :: (Name -> Name) -> Branch0 m -> Map Name (EditHash, m Patch) - go addPrefix Branch0{..} = - Map.mapKeysMonotonic (addPrefix . Name.fromSegment) _edits - <> foldMap f (Map.toList _children) - where - f :: (NameSegment, Branch m) -> Map Name (EditHash, m Patch) - f (c, b) = go (addPrefix . Name.joinDot (Name.fromSegment c)) (head b) - -data MergeMode = RegularMerge | SquashMerge deriving (Eq,Ord,Show) - -merge :: forall m . Monad m => Branch m -> Branch m -> m (Branch m) -merge = merge' RegularMerge - --- Discards the history of a Branch0's children, recursively -discardHistory0 :: Applicative m => Branch0 m -> Branch0 m -discardHistory0 = over children (fmap tweak) where - tweak b = cons (discardHistory0 (head b)) empty - -merge' :: forall m . Monad m => MergeMode -> Branch m -> Branch m -> m (Branch m) -merge' _ b1 b2 | isEmpty b1 = pure b2 -merge' mode b1 b2 | isEmpty b2 = case mode of - RegularMerge -> pure b1 - SquashMerge -> pure $ cons (discardHistory0 (head b1)) b2 -merge' mode (Branch x) (Branch y) = - Branch <$> case mode of - RegularMerge -> Causal.threeWayMerge combine x y - SquashMerge -> Causal.squashMerge combine x y - where - combine :: Maybe (Branch0 m) -> Branch0 m -> Branch0 m -> m (Branch0 m) - combine Nothing l r = merge0 mode l r - combine (Just ca) l r = do - dl <- diff0 ca l - dr <- diff0 ca r - head0 <- apply ca (dl <> dr) - children <- Map.mergeA - (Map.traverseMaybeMissing $ combineMissing ca) - (Map.traverseMaybeMissing $ combineMissing ca) - (Map.zipWithAMatched $ const (merge' mode)) - (_children l) (_children r) - pure $ branch0 (_terms head0) (_types head0) children (_edits head0) - - combineMissing ca k cur = - case Map.lookup k (_children ca) of - Nothing -> pure $ Just cur - Just old -> do - nw <- merge' mode (cons empty0 old) cur - if isEmpty0 $ head nw - then pure Nothing - else pure $ Just nw - - apply :: Branch0 m -> BranchDiff -> m (Branch0 m) - apply b0 BranchDiff {..} = do - patches <- sequenceA - $ Map.differenceWith patchMerge (pure @m <$> _edits b0) changedPatches - let newPatches = makePatch <$> Map.difference changedPatches (_edits b0) - makePatch Patch.PatchDiff {..} = - let p = Patch.Patch _addedTermEdits _addedTypeEdits - in (H.accumulate' p, pure p) - pure $ branch0 (Star3.difference (_terms b0) removedTerms <> addedTerms) - (Star3.difference (_types b0) removedTypes <> addedTypes) - (_children b0) - (patches <> newPatches) - patchMerge mhp Patch.PatchDiff {..} = Just $ do - (_, mp) <- mhp - p <- mp - let np = Patch.Patch - { _termEdits = R.difference (Patch._termEdits p) _removedTermEdits - <> _addedTermEdits - , _typeEdits = R.difference (Patch._typeEdits p) _removedTypeEdits - <> _addedTypeEdits - } - pure (H.accumulate' np, pure np) - --- `before b1 b2` is true if `b2` incorporates all of `b1` -before :: Monad m => Branch m -> Branch m -> m Bool -before (Branch x) (Branch y) = Causal.before x y - -merge0 :: forall m. Monad m => MergeMode -> Branch0 m -> Branch0 m -> m (Branch0 m) -merge0 mode b1 b2 = do - c3 <- unionWithM (merge' mode) (_children b1) (_children b2) - e3 <- unionWithM g (_edits b1) (_edits b2) - pure $ branch0 (_terms b1 <> _terms b2) - (_types b1 <> _types b2) - c3 - e3 - where - g :: (EditHash, m Patch) -> (EditHash, m Patch) -> m (EditHash, m Patch) - g (h1, m1) (h2, _) | h1 == h2 = pure (h1, m1) - g (_, m1) (_, m2) = do - e1 <- m1 - e2 <- m2 - let e3 = e1 <> e2 - pure (H.accumulate' e3, pure e3) - -pattern Hash h = Causal.RawHash h - -toList0 :: Branch0 m -> [(Path, Branch0 m)] -toList0 = go Path.empty where - go p b = (p, b) : (Map.toList (_children b) >>= (\(seg, cb) -> - go (Path.snoc p seg) (head cb) )) - -printDebugPaths :: Branch m -> String -printDebugPaths = unlines . map show . Set.toList . debugPaths - -debugPaths :: Branch m -> Set (Path, Hash) -debugPaths = go Path.empty where - go p b = Set.insert (p, headHash b) . Set.unions $ - [ go (Path.snoc p seg) b | (seg, b) <- Map.toList $ _children (head b) ] - -data Target = TargetType | TargetTerm | TargetBranch - deriving (Eq, Ord, Show) - -instance Eq (Branch0 m) where - a == b = view terms a == view terms b - && view types a == view types b - && view children a == view children b - && (fmap fst . view edits) a == (fmap fst . view edits) b - -data ForkFailure = SrcNotFound | DestExists - --- consider delegating to Names.numHashChars when ready to implement? --- are those enough? --- could move this to a read-only field in Branch0 --- could move a Names0 to a read-only field in Branch0 until it gets too big -numHashChars :: Branch m -> Int -numHashChars _b = 3 - --- This type is a little ugly, so we wrap it up with a nice type alias for --- use outside this module. -type Cache m = Cache.Cache m (Causal.RawHash Raw) (Causal m Raw (Branch0 m)) - -boundedCache :: MonadIO m => Word -> m (Cache m) -boundedCache = Cache.semispaceCache - --- Can use `Cache.nullCache` to disable caching if needed -cachedRead :: forall m . Monad m - => Cache m - -> Causal.Deserialize m Raw Raw - -> (EditHash -> m Patch) - -> Hash - -> m (Branch m) -cachedRead cache deserializeRaw deserializeEdits h = - Branch <$> Causal.cachedRead cache d h - where - fromRaw :: Raw -> m (Branch0 m) - fromRaw Raw {..} = do - children <- traverse go _childrenR - edits <- for _editsR $ \hash -> (hash,) . pure <$> deserializeEdits hash - pure $ branch0 _termsR _typesR children edits - go = cachedRead cache deserializeRaw deserializeEdits - d :: Causal.Deserialize m Raw (Branch0 m) - d h = deserializeRaw h >>= \case - RawOne raw -> RawOne <$> fromRaw raw - RawCons raw h -> flip RawCons h <$> fromRaw raw - RawMerge raw hs -> flip RawMerge hs <$> fromRaw raw - -sync - :: Monad m - => (Hash -> m Bool) - -> Causal.Serialize m Raw Raw - -> (EditHash -> m Patch -> m ()) - -> Branch m - -> m () -sync exists serializeRaw serializeEdits b = do - _written <- State.execStateT (sync' exists serializeRaw serializeEdits b) mempty - -- traceM $ "Branch.sync wrote " <> show (Set.size written) <> " namespace files." - pure () - --- serialize a `Branch m` indexed by the hash of its corresponding Raw -sync' - :: forall m - . Monad m - => (Hash -> m Bool) - -> Causal.Serialize m Raw Raw - -> (EditHash -> m Patch -> m ()) - -> Branch m - -> StateT (Set Hash) m () -sync' exists serializeRaw serializeEdits b = Causal.sync exists - serialize0 - (view history b) - where - serialize0 :: Causal.Serialize (StateT (Set Hash) m) Raw (Branch0 m) - serialize0 h b0 = case b0 of - RawOne b0 -> do - writeB0 b0 - lift $ serializeRaw h $ RawOne (toRaw b0) - RawCons b0 ht -> do - writeB0 b0 - lift $ serializeRaw h $ RawCons (toRaw b0) ht - RawMerge b0 hs -> do - writeB0 b0 - lift $ serializeRaw h $ RawMerge (toRaw b0) hs - where - writeB0 :: Branch0 m -> StateT (Set Hash) m () - writeB0 b0 = do - for_ (view children b0) $ \c -> do - queued <- State.get - when (Set.notMember (headHash c) queued) $ - sync' exists serializeRaw serializeEdits c - for_ (view edits b0) (lift . uncurry serializeEdits) - - -- this has to serialize the branch0 and its descendants in the tree, - -- and then serialize the rest of the history of the branch as well - -toRaw :: Branch0 m -> Raw -toRaw Branch0 {..} = - Raw _terms _types (headHash <$> _children) (fst <$> _edits) - -toCausalRaw :: Branch m -> Causal.Raw Raw Raw -toCausalRaw = \case - Branch (Causal.One _h e) -> RawOne (toRaw e) - Branch (Causal.Cons _h e (ht, _m)) -> RawCons (toRaw e) ht - Branch (Causal.Merge _h e tls) -> RawMerge (toRaw e) (Map.keysSet tls) - --- copy a path to another path -fork - :: Applicative m - => Path - -> Path - -> Branch m - -> Either ForkFailure (Branch m) -fork src dest root = case getAt src root of - Nothing -> Left SrcNotFound - Just src' -> case setIfNotExists dest src' root of - Nothing -> Left DestExists - Just root' -> Right root' - --- Move the node at src to dest. --- It's okay if `dest` is inside `src`, just create empty levels. --- Try not to `step` more than once at each node. -move :: Applicative m - => Path - -> Path - -> Branch m - -> Either ForkFailure (Branch m) -move src dest root = case getAt src root of - Nothing -> Left SrcNotFound - Just src' -> - -- make sure dest doesn't already exist - case getAt dest root of - Just _destExists -> Left DestExists - Nothing -> - -- find and update common ancestor of `src` and `dest`: - Right $ modifyAt ancestor go root - where - (ancestor, relSrc, relDest) = Path.relativeToAncestor src dest - go = deleteAt relSrc . setAt relDest src' - -setIfNotExists - :: Applicative m => Path -> Branch m -> Branch m -> Maybe (Branch m) -setIfNotExists dest b root = case getAt dest root of - Just _destExists -> Nothing - Nothing -> Just $ setAt dest b root - -setAt :: Applicative m => Path -> Branch m -> Branch m -> Branch m -setAt path b = modifyAt path (const b) - -deleteAt :: Applicative m => Path -> Branch m -> Branch m -deleteAt path = setAt path empty - --- returns `Nothing` if no Branch at `path` or if Branch is empty at `path` -getAt :: Path - -> Branch m - -> Maybe (Branch m) -getAt path root = case Path.uncons path of - Nothing -> if isEmpty root then Nothing else Just root - Just (seg, path) -> case Map.lookup seg (_children $ head root) of - Just b -> getAt path b - Nothing -> Nothing - -getAt' :: Path -> Branch m -> Branch m -getAt' p b = fromMaybe empty $ getAt p b - -getAt0 :: Path -> Branch0 m -> Branch0 m -getAt0 p b = case Path.uncons p of - Nothing -> b - Just (seg, path) -> case Map.lookup seg (_children b) of - Just c -> getAt0 path (head c) - Nothing -> empty0 - -empty :: Branch m -empty = Branch $ Causal.one empty0 - -one :: Branch0 m -> Branch m -one = Branch . Causal.one - -empty0 :: Branch0 m -empty0 = - Branch0 mempty mempty mempty mempty mempty mempty mempty mempty mempty mempty - -isEmpty0 :: Branch0 m -> Bool -isEmpty0 = (== empty0) - -isEmpty :: Branch m -> Bool -isEmpty = (== empty) - -step :: Applicative m => (Branch0 m -> Branch0 m) -> Branch m -> Branch m -step f = over history (Causal.stepDistinct f) - -stepM :: (Monad m, Monad n) => (Branch0 m -> n (Branch0 m)) -> Branch m -> n (Branch m) -stepM f = mapMOf history (Causal.stepDistinctM f) - -cons :: Applicative m => Branch0 m -> Branch m -> Branch m -cons = step . const - -isOne :: Branch m -> Bool -isOne (Branch Causal.One{}) = True -isOne _ = False - -uncons :: Applicative m => Branch m -> m (Maybe (Branch0 m, Branch m)) -uncons (Branch b) = go <$> Causal.uncons b where - go = over (_Just . _2) Branch - --- Modify the branch0 at the head of at `path` with `f`, --- after creating it if necessary. Preserves history. -stepAt :: forall m. Applicative m - => Path - -> (Branch0 m -> Branch0 m) - -> Branch m -> Branch m -stepAt p f = modifyAt p g where - g :: Branch m -> Branch m - g (Branch b) = Branch . Causal.consDistinct (f (Causal.head b)) $ b - -stepManyAt :: (Monad m, Foldable f) - => f (Path, Branch0 m -> Branch0 m) -> Branch m -> Branch m -stepManyAt actions = step (stepManyAt0 actions) - --- Modify the branch0 at the head of at `path` with `f`, --- after creating it if necessary. Preserves history. -stepAtM :: forall n m. (Functor n, Applicative m) - => Path -> (Branch0 m -> n (Branch0 m)) -> Branch m -> n (Branch m) -stepAtM p f = modifyAtM p g where - g :: Branch m -> n (Branch m) - g (Branch b) = do - b0' <- f (Causal.head b) - pure $ Branch . Causal.consDistinct b0' $ b - -stepManyAtM :: (Monad m, Monad n, Foldable f) - => f (Path, Branch0 m -> n (Branch0 m)) -> Branch m -> n (Branch m) -stepManyAtM actions = stepM (stepManyAt0M actions) - --- starting at the leaves, apply `f` to every level of the branch. -stepEverywhere - :: Applicative m => (Branch0 m -> Branch0 m) -> (Branch0 m -> Branch0 m) -stepEverywhere f Branch0 {..} = f (branch0 _terms _types children _edits) - where children = fmap (step $ stepEverywhere f) _children - --- Creates a function to fix up the children field._1 --- If the action emptied a child, then remove the mapping, --- otherwise update it. --- Todo: Fix this in hashing & serialization instead of here? -getChildBranch :: NameSegment -> Branch0 m -> Branch m -getChildBranch seg b = fromMaybe empty $ Map.lookup seg (_children b) - -setChildBranch :: NameSegment -> Branch m -> Branch0 m -> Branch0 m -setChildBranch seg b = over children (updateChildren seg b) - -getPatch :: Applicative m => NameSegment -> Branch0 m -> m Patch -getPatch seg b = case Map.lookup seg (_edits b) of - Nothing -> pure Patch.empty - Just (_, p) -> p - -getMaybePatch :: Applicative m => NameSegment -> Branch0 m -> m (Maybe Patch) -getMaybePatch seg b = case Map.lookup seg (_edits b) of - Nothing -> pure Nothing - Just (_, p) -> Just <$> p - -modifyPatches - :: Monad m => NameSegment -> (Patch -> Patch) -> Branch0 m -> m (Branch0 m) -modifyPatches seg f = mapMOf edits update - where - update m = do - p' <- case Map.lookup seg m of - Nothing -> pure $ f Patch.empty - Just (_, p) -> f <$> p - let h = H.accumulate' p' - pure $ Map.insert seg (h, pure p') m - -replacePatch :: Applicative m => NameSegment -> Patch -> Branch0 m -> Branch0 m -replacePatch n p = over edits (Map.insert n (H.accumulate' p, pure p)) - -deletePatch :: NameSegment -> Branch0 m -> Branch0 m -deletePatch n = over edits (Map.delete n) - -updateChildren ::NameSegment - -> Branch m - -> Map NameSegment (Branch m) - -> Map NameSegment (Branch m) -updateChildren seg updatedChild = - if isEmpty updatedChild - then Map.delete seg - else Map.insert seg updatedChild - --- Modify the Branch at `path` with `f`, after creating it if necessary. --- Because it's a `Branch`, it overwrites the history at `path`. -modifyAt :: Applicative m - => Path -> (Branch m -> Branch m) -> Branch m -> Branch m -modifyAt path f = runIdentity . modifyAtM path (pure . f) - --- Modify the Branch at `path` with `f`, after creating it if necessary. --- Because it's a `Branch`, it overwrites the history at `path`. -modifyAtM - :: forall n m - . Functor n - => Applicative m -- because `Causal.cons` uses `pure` - => Path - -> (Branch m -> n (Branch m)) - -> Branch m - -> n (Branch m) -modifyAtM path f b = case Path.uncons path of - Nothing -> f b - Just (seg, path) -> do -- Functor - let child = getChildBranch seg (head b) - child' <- modifyAtM path f child - -- step the branch by updating its children according to fixup - pure $ step (setChildBranch seg child') b - --- stepManyAt0 consolidates several changes into a single step -stepManyAt0 :: forall f m . (Monad m, Foldable f) - => f (Path, Branch0 m -> Branch0 m) - -> Branch0 m -> Branch0 m -stepManyAt0 actions = - runIdentity . stepManyAt0M [ (p, pure . f) | (p,f) <- toList actions ] - -stepManyAt0M :: forall m n f . (Monad m, Monad n, Foldable f) - => f (Path, Branch0 m -> n (Branch0 m)) - -> Branch0 m -> n (Branch0 m) -stepManyAt0M actions b = go (toList actions) b where - go :: [(Path, Branch0 m -> n (Branch0 m))] -> Branch0 m -> n (Branch0 m) - go actions b = let - -- combines the functions that apply to this level of the tree - currentAction b = foldM (\b f -> f b) b [ f | (Path.Empty, f) <- actions ] - - -- groups the actions based on the child they apply to - childActions :: Map NameSegment [(Path, Branch0 m -> n (Branch0 m))] - childActions = - List.multimap [ (seg, (rest,f)) | (seg :< rest, f) <- actions ] - - -- alters the children of `b` based on the `childActions` map - stepChildren :: Map NameSegment (Branch m) -> n (Map NameSegment (Branch m)) - stepChildren children0 = foldM g children0 $ Map.toList childActions - where - g children (seg, actions) = do - -- Recursively applies the relevant actions to the child branch - -- The `findWithDefault` is important - it allows the stepManyAt - -- to create new children at paths that don't previously exist. - child <- stepM (go actions) (Map.findWithDefault empty seg children0) - pure $ updateChildren seg child children - in do - c2 <- stepChildren (view children b) - currentAction (set children c2 b) - -instance Hashable (Branch0 m) where - tokens b = - [ H.accumulateToken (_terms b) - , H.accumulateToken (_types b) - , H.accumulateToken (headHash <$> _children b) - ] - --- getLocalBranch :: Hash -> IO Branch --- getGithubBranch :: RemotePath -> IO Branch --- getLocalEdit :: GUID -> IO Patch - --- todo: consider inlining these into Actions2 -addTermName - :: Referent -> NameSegment -> Metadata.Metadata -> Branch0 m -> Branch0 m -addTermName r new md = - over terms (Metadata.insertWithMetadata (r, md) . Star3.insertD1 (r, new)) - -addTypeName - :: Reference -> NameSegment -> Metadata.Metadata -> Branch0 m -> Branch0 m -addTypeName r new md = - over types (Metadata.insertWithMetadata (r, md) . Star3.insertD1 (r, new)) - --- addTermNameAt :: Path.Split -> Referent -> Branch0 m -> Branch0 m --- addTypeNameAt :: Path.Split -> Reference -> Branch0 m -> Branch0 m - -deleteTermName :: Referent -> NameSegment -> Branch0 m -> Branch0 m -deleteTermName r n b | Star3.memberD1 (r,n) (view terms b) - = over terms (Star3.deletePrimaryD1 (r,n)) b -deleteTermName _ _ b = b - -deleteTypeName :: Reference -> NameSegment -> Branch0 m -> Branch0 m -deleteTypeName r n b | Star3.memberD1 (r,n) (view types b) - = over types (Star3.deletePrimaryD1 (r,n)) b -deleteTypeName _ _ b = b - -namesDiff :: Branch m -> Branch m -> Names.Diff -namesDiff b1 b2 = Names.diff0 (toNames0 (head b1)) (toNames0 (head b2)) - -lca :: Monad m => Branch m -> Branch m -> m (Maybe (Branch m)) -lca (Branch a) (Branch b) = fmap Branch <$> Causal.lca a b - -diff0 :: Monad m => Branch0 m -> Branch0 m -> m BranchDiff -diff0 old new = do - newEdits <- sequenceA $ snd <$> _edits new - oldEdits <- sequenceA $ snd <$> _edits old - let diffEdits = Map.merge (Map.mapMissing $ \_ p -> Patch.diff p mempty) - (Map.mapMissing $ \_ p -> Patch.diff mempty p) - (Map.zipWithMatched (const Patch.diff)) - newEdits - oldEdits - pure $ BranchDiff - { addedTerms = Star3.difference (_terms new) (_terms old) - , removedTerms = Star3.difference (_terms old) (_terms new) - , addedTypes = Star3.difference (_types new) (_types old) - , removedTypes = Star3.difference (_types old) (_types new) - , changedPatches = diffEdits - } - -transform :: Functor m => (forall a . m a -> n a) -> Branch m -> Branch n -transform f b = case _history b of - causal -> Branch . Causal.transform f $ transformB0s f causal - where - transformB0 :: Functor m => (forall a . m a -> n a) -> Branch0 m -> Branch0 n - transformB0 f b = - b { _children = transform f <$> _children b - , _edits = second f <$> _edits b - } - - transformB0s :: Functor m => (forall a . m a -> n a) - -> Causal m Raw (Branch0 m) - -> Causal m Raw (Branch0 n) - transformB0s f = Causal.unsafeMapHashPreserving (transformB0 f) - -data BranchAttentions = BranchAttentions - { -- Patches that were edited on the right but entirely removed on the left. - removedPatchEdited :: [Name] - -- Patches that were edited on the left but entirely removed on the right. - , editedPatchRemoved :: [Name] - } - -instance Semigroup BranchAttentions where - BranchAttentions edited1 removed1 <> BranchAttentions edited2 removed2 - = BranchAttentions (edited1 <> edited2) (removed1 <> removed2) - -instance Monoid BranchAttentions where - mempty = BranchAttentions [] [] - mappend = (<>) - -data RefCollisions = - RefCollisions { termCollisions :: Relation Name Name - , typeCollisions :: Relation Name Name - } deriving (Eq, Show) - -instance Semigroup RefCollisions where - (<>) = mappend -instance Monoid RefCollisions where - mempty = RefCollisions mempty mempty - mappend r1 r2 = RefCollisions (termCollisions r1 <> termCollisions r2) - (typeCollisions r1 <> typeCollisions r2) diff --git a/parser-typechecker/src/Unison/Codebase/Branch/Dependencies.hs b/parser-typechecker/src/Unison/Codebase/Branch/Dependencies.hs deleted file mode 100644 index d54e2ace49..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Branch/Dependencies.hs +++ /dev/null @@ -1,89 +0,0 @@ -{-# LANGUAGE DerivingVia #-} -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE RecordWildCards #-} - -module Unison.Codebase.Branch.Dependencies where - -import Data.Set (Set) -import Data.Foldable (toList) -import qualified Data.Set as Set -import qualified Data.Map as Map -import Unison.Codebase.Branch (Branch(Branch), Branch0, EditHash) -import qualified Unison.Codebase.Causal as Causal -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Reference as Reference -import qualified Unison.Referent as Referent -import GHC.Generics (Generic) -import Data.Monoid.Generic -import Data.Map (Map) -import Unison.NameSegment (NameSegment) -import Unison.Referent (Referent) -import Unison.Codebase.Patch (Patch) -import qualified Unison.Util.Star3 as Star3 -import qualified Unison.Util.Relation as R -import Unison.Reference (Reference(DerivedId)) - -type Branches m = [(Branch.Hash, Maybe (m (Branch m)))] - -data Dependencies = Dependencies - { patches :: Set EditHash - , terms :: Set Reference.Id - , decls :: Set Reference.Id - } - deriving Show - deriving Generic - deriving Semigroup via GenericSemigroup Dependencies - deriving Monoid via GenericMonoid Dependencies - -data Dependencies' = Dependencies' - { patches' :: [EditHash] - , terms' :: [Reference.Id] - , decls' :: [Reference.Id] - } deriving Show - -to' :: Dependencies -> Dependencies' -to' Dependencies{..} = Dependencies' (toList patches) (toList terms) (toList decls) - -fromBranch :: Applicative m => Branch m -> (Branches m, Dependencies) -fromBranch (Branch c) = case c of - Causal.One _hh e -> fromBranch0 e - Causal.Cons _hh e (h, m) -> fromBranch0 e <> fromTails (Map.singleton h m) - Causal.Merge _hh e tails -> fromBranch0 e <> fromTails tails - where - fromTails m = ([(h, Just (Branch <$> mc)) | (h, mc) <- Map.toList m], mempty) - -fromRawCausal :: Causal.Raw Branch.Raw (Branches m, Dependencies) - -> (Branches m, Dependencies) -fromRawCausal = \case - Causal.RawOne e -> e - Causal.RawCons e h -> e <> fromTails [h] - Causal.RawMerge e hs -> e <> fromTails (toList hs) - where - fromTails ts = (fmap (,Nothing) ts, mempty) - -fromBranch0 :: Applicative m => Branch0 m -> (Branches m, Dependencies) -fromBranch0 b = - ( fromChildren (Branch._children b) - , fromTermsStar (Branch._terms b) - <> fromTypesStar (Branch._types b) - <> fromEdits (Branch._edits b) ) - where - fromChildren :: Applicative m => Map NameSegment (Branch m) -> Branches m - fromChildren m = [ (Branch.headHash b, Just (pure b)) | b <- toList m ] - references :: Branch.Star r NameSegment -> [r] - references = toList . R.dom . Star3.d1 - mdValues :: Branch.Star r NameSegment -> [Reference] - mdValues = fmap snd . toList . R.ran . Star3.d3 - fromTermsStar :: Branch.Star Referent NameSegment -> Dependencies - fromTermsStar s = Dependencies mempty terms decls where - terms = Set.fromList $ - [ i | Referent.Ref (DerivedId i) <- references s] ++ - [ i | DerivedId i <- mdValues s] - decls = Set.fromList $ - [ i | Referent.Con (DerivedId i) _ _ <- references s ] - fromTypesStar :: Branch.Star Reference NameSegment -> Dependencies - fromTypesStar s = Dependencies mempty terms decls where - terms = Set.fromList [ i | DerivedId i <- mdValues s ] - decls = Set.fromList [ i | DerivedId i <- references s ] - fromEdits :: Map NameSegment (EditHash, m Patch) -> Dependencies - fromEdits m = Dependencies (Set.fromList . fmap fst $ toList m) mempty mempty diff --git a/parser-typechecker/src/Unison/Codebase/BranchDiff.hs b/parser-typechecker/src/Unison/Codebase/BranchDiff.hs deleted file mode 100644 index 980ee3da2c..0000000000 --- a/parser-typechecker/src/Unison/Codebase/BranchDiff.hs +++ /dev/null @@ -1,166 +0,0 @@ -module Unison.Codebase.BranchDiff where - -import Unison.Prelude -import qualified Data.Set as Set -import qualified Data.Map as Map -import Unison.Codebase.Branch (Branch0(..)) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Metadata as Metadata -import qualified Unison.Codebase.Patch as Patch -import Unison.Codebase.Patch (Patch, PatchDiff) -import Unison.Name (Name) -import Unison.Reference (Reference) -import Unison.Referent (Referent) -import qualified Unison.Util.Relation as R -import qualified Unison.Util.Relation3 as R3 -import qualified Unison.Util.Relation4 as R4 -import Unison.Util.Relation (Relation) -import Unison.Util.Relation3 (Relation3) -import Unison.Runtime.IOSource (isPropagatedValue) - -data DiffType a = Create a | Delete a | Modify a deriving Show - --- todo: maybe simplify this file using Relation3? -data NamespaceSlice r = NamespaceSlice { - names :: Relation r Name, - metadata :: Relation3 r Name Metadata.Value -} deriving Show - -data DiffSlice r = DiffSlice { --- tpatchUpdates :: Relation r r, -- old new - tallnamespaceUpdates :: Map Name (Set r, Set r), - talladds :: Relation r Name, - tallremoves :: Relation r Name, - trenames :: Map r (Set Name, Set Name), -- ref (old, new) - taddedMetadata :: Relation3 r Name Metadata.Value, - tremovedMetadata :: Relation3 r Name Metadata.Value -} deriving Show - -data BranchDiff = BranchDiff - { termsDiff :: DiffSlice Referent - , typesDiff :: DiffSlice Reference - , patchesDiff :: Map Name (DiffType PatchDiff) - } deriving Show - -diff0 :: forall m. Monad m => Branch0 m -> Branch0 m -> m BranchDiff -diff0 old new = BranchDiff terms types <$> patchDiff old new where - (terms, types) = - computeSlices - (deepr4ToSlice (Branch.deepTerms old) (Branch.deepTermMetadata old)) - (deepr4ToSlice (Branch.deepTerms new) (Branch.deepTermMetadata new)) - (deepr4ToSlice (Branch.deepTypes old) (Branch.deepTypeMetadata old)) - (deepr4ToSlice (Branch.deepTypes new) (Branch.deepTypeMetadata new)) - -patchDiff :: forall m. Monad m => Branch0 m -> Branch0 m -> m (Map Name (DiffType PatchDiff)) -patchDiff old new = do - let oldDeepEdits, newDeepEdits :: Map Name (Branch.EditHash, m Patch) - oldDeepEdits = Branch.deepEdits' old - newDeepEdits = Branch.deepEdits' new - added <- do - addedPatches :: Map Name Patch <- - traverse snd $ Map.difference newDeepEdits oldDeepEdits - pure $ fmap (\p -> Create (Patch.diff p mempty)) addedPatches - removed <- do - removedPatches :: Map Name Patch <- - traverse snd $ Map.difference oldDeepEdits newDeepEdits - pure $ fmap (\p -> Delete (Patch.diff mempty p)) removedPatches - - let f acc k = case (Map.lookup k oldDeepEdits, Map.lookup k newDeepEdits) of - (Just (h1,p1), Just (h2,p2)) -> - if h1 == h2 then pure acc - else Map.singleton k . Modify <$> (Patch.diff <$> p2 <*> p1) - _ -> error "we've done something very wrong" - modified <- foldM f mempty (Set.intersection (Map.keysSet oldDeepEdits) (Map.keysSet newDeepEdits)) - pure $ added <> removed <> modified - -deepr4ToSlice :: Ord r - => R.Relation r Name - -> Metadata.R4 r Name - -> NamespaceSlice r -deepr4ToSlice deepNames deepMetadata = - NamespaceSlice deepNames (unpackMetadata deepMetadata) - where - unpackMetadata = R3.fromList . fmap (\(r,n,_t,v) -> (r,n,v)) . R4.toList - -computeSlices :: NamespaceSlice Referent - -> NamespaceSlice Referent - -> NamespaceSlice Reference - -> NamespaceSlice Reference - -> (DiffSlice Referent, DiffSlice Reference) -computeSlices oldTerms newTerms oldTypes newTypes = (termsOut, typesOut) where - termsOut = - let nc = allNames oldTerms newTerms - nu = allNamespaceUpdates oldTerms newTerms in - DiffSlice - nu - (allAdds nc nu) - (allRemoves nc nu) - (remainingNameChanges nc) - (addedMetadata oldTerms newTerms) - (removedMetadata oldTerms newTerms) - typesOut = - let nc = allNames oldTypes newTypes - nu = allNamespaceUpdates oldTypes newTypes in - DiffSlice - nu - (allAdds nc nu) - (allRemoves nc nu) - (remainingNameChanges nc) - (addedMetadata oldTypes newTypes) - (removedMetadata oldTypes newTypes) - - allNames :: Ord r => NamespaceSlice r -> NamespaceSlice r -> Map r (Set Name, Set Name) - allNames old new = R.outerJoinDomMultimaps (names old) (names new) - - allAdds, allRemoves :: forall r. Ord r - => Map r (Set Name, Set Name) - -> Map Name (Set r, Set r) - -> Relation r Name - allAdds nc nu = R.fromMultimap . fmap snd . Map.filterWithKey f $ nc where - f r (oldNames, newNames) = null oldNames && any (notInUpdates r) newNames - -- if an add matches RHS of an update, we exclude it from "Adds" - notInUpdates r name = case Map.lookup name nu of - Nothing -> True - Just (_, rs_new) -> Set.notMember r rs_new - - allRemoves nc nu = R.fromMultimap . fmap fst . Map.filterWithKey f $ nc where - f r (oldNames, newNames) = null newNames && any (notInUpdates r) oldNames - -- if a remove matches LHS of an update, we exclude it from "Removes" - notInUpdates r name = case Map.lookup name nu of - Nothing -> True - Just (rs_old, _) -> Set.notMember r rs_old - - -- renames and stuff, name changes without a reference change - remainingNameChanges :: forall r. Ord r - => Map r (Set Name, Set Name) -> Map r (Set Name, Set Name) - remainingNameChanges = - Map.filter (\(old, new) -> not (null old) && not (null new) && old /= new) - - allNamespaceUpdates :: Ord r => NamespaceSlice r -> NamespaceSlice r -> Map Name (Set r, Set r) - allNamespaceUpdates old new = - Map.filter f $ R.innerJoinRanMultimaps (names old) (names new) - where f (old, new) = old /= new - - addedMetadata :: Ord r => NamespaceSlice r -> NamespaceSlice r -> Relation3 r Name Metadata.Value - addedMetadata old new = metadata new `R3.difference` metadata old - - removedMetadata :: Ord r => NamespaceSlice r -> NamespaceSlice r -> Relation3 r Name Metadata.Value - removedMetadata old new = metadata old `R3.difference` metadata new - --- the namespace updates that aren't propagated -namespaceUpdates :: Ord r => DiffSlice r -> Map Name (Set r, Set r) -namespaceUpdates s = Map.mapMaybeWithKey f (tallnamespaceUpdates s) - where - f name (olds, news) = let - news' = Set.difference news (Map.findWithDefault mempty name propagated) - in if null news' then Nothing else Just (olds, news') - propagated = propagatedUpdates s - -propagatedUpdates :: Ord r => DiffSlice r -> Map Name (Set r) -propagatedUpdates s = Map.fromList - [ (name, news) - | (name, (_olds0, news0)) <- Map.toList $ tallnamespaceUpdates s - , let news = Set.filter propagated news0 - propagated rnew = R3.member rnew name isPropagatedValue (taddedMetadata s) - , not (null news) - ] diff --git a/parser-typechecker/src/Unison/Codebase/BranchUtil.hs b/parser-typechecker/src/Unison/Codebase/BranchUtil.hs deleted file mode 100644 index 749f2e75c0..0000000000 --- a/parser-typechecker/src/Unison/Codebase/BranchUtil.hs +++ /dev/null @@ -1,135 +0,0 @@ -module Unison.Codebase.BranchUtil where - -import Unison.Prelude - -import qualified Data.Set as Set -import qualified Data.Map as Map -import Unison.Codebase.Path (Path) -import qualified Unison.Codebase.Path as Path -import qualified Unison.Codebase.Branch as Branch -import Unison.Codebase.Branch (Branch, Branch0) -import qualified Unison.Names2 as Names -import Unison.Names2 (Names0) -import qualified Unison.Referent as Referent -import qualified Unison.Reference as Reference -import Unison.Referent (Referent) -import Unison.Reference (Reference) -import Unison.HashQualified' (HashQualified'(NameOnly, HashQualified)) -import qualified Unison.HashQualified' as HQ' -import qualified Unison.ShortHash as SH -import qualified Unison.Util.Relation as R -import qualified Unison.Util.Relation4 as R4 -import qualified Unison.Util.Star3 as Star3 -import Unison.Codebase.Metadata (Metadata) -import qualified Unison.Codebase.Metadata as Metadata -import qualified Unison.Util.List as List -import Unison.Codebase.Patch (Patch) -import Unison.NameSegment (NameSegment) -import Control.Lens (view) - -fromNames0 :: Monad m => Names0 -> Branch m -fromNames0 names0 = Branch.one $ addFromNames0 names0 Branch.empty0 - --- can produce a pure value because there's no history to traverse -hashesFromNames0 :: Monad m => Names0 -> Map Branch.Hash (Branch m) -hashesFromNames0 = deepHashes . fromNames0 where - deepHashes :: Branch m -> Map Branch.Hash (Branch m) - deepHashes b = Map.singleton (Branch.headHash b) b - <> (foldMap deepHashes . view Branch.children . Branch.head) b - -addFromNames0 :: Monad m => Names0 -> Branch0 m -> Branch0 m -addFromNames0 names0 = Branch.stepManyAt0 (typeActions <> termActions) - where - typeActions = map doType . R.toList $ Names.types names0 - termActions = map doTerm . R.toList $ Names.terms names0 --- doTerm :: (Name, Referent) -> (Path, Branch0 m -> Branch0 m) - doTerm (n, r) = case Path.splitFromName n of - Nothing -> errorEmptyName - Just split -> makeAddTermName split r mempty -- no metadata --- doType :: (Name, Reference) -> (Path, Branch0 m -> Branch0 m) - doType (n, r) = case Path.splitFromName n of - Nothing -> errorEmptyName - Just split -> makeAddTypeName split r mempty -- no metadata - errorEmptyName = error "encountered an empty name" - -getTerm :: Path.HQSplit -> Branch0 m -> Set Referent -getTerm (p, hq) b = case hq of - NameOnly n -> Star3.lookupD1 n terms - HashQualified n sh -> filter sh $ Star3.lookupD1 n terms - where - filter sh = Set.filter (SH.isPrefixOf sh . Referent.toShortHash) - terms = Branch._terms (Branch.getAt0 p b) - -getTermMetadataHQNamed - :: (Path.Path, HQ'.HQSegment) -> Branch0 m -> Metadata.R4 Referent NameSegment -getTermMetadataHQNamed (path, hqseg) b = - R4.filter (\(r,n,_t,_v) -> HQ'.matchesNamedReferent n r hqseg) terms - where terms = Metadata.starToR4 . Branch._terms $ Branch.getAt0 path b - -getTypeMetadataHQNamed - :: (Path.Path, HQ'.HQSegment) - -> Branch0 m - -> Metadata.R4 Reference NameSegment -getTypeMetadataHQNamed (path, hqseg) b = - R4.filter (\(r,n,_t,_v) -> HQ'.matchesNamedReference n r hqseg) types - where types = Metadata.starToR4 . Branch._types $ Branch.getAt0 path b - --- todo: audit usages and maybe eliminate! --- Only returns metadata for the term at the exact level given -getTermMetadataAt :: (Path.Path, a) -> Referent -> Branch0 m -> Metadata -getTermMetadataAt (path,_) r b = Set.fromList <$> List.multimap mdList - where - mdList :: [(Metadata.Type, Metadata.Value)] - mdList = Set.toList . R.ran . Star3.d3 . Star3.selectFact (Set.singleton r) $ terms - terms = Branch._terms $ Branch.getAt0 path b - -getType :: Path.HQSplit -> Branch0 m -> Set Reference -getType (p, hq) b = case hq of - NameOnly n -> Star3.lookupD1 n types - HashQualified n sh -> filter sh $ Star3.lookupD1 n types - where - filter sh = Set.filter (SH.isPrefixOf sh . Reference.toShortHash) - types = Branch._types (Branch.getAt0 p b) - -getTypeByShortHash :: SH.ShortHash -> Branch0 m -> Set Reference -getTypeByShortHash sh b = filter sh $ Branch.deepTypeReferences b - where - filter sh = Set.filter (SH.isPrefixOf sh . Reference.toShortHash) - -getTypeMetadataAt :: (Path.Path, a) -> Reference -> Branch0 m -> Metadata -getTypeMetadataAt (path,_) r b = Set.fromList <$> List.multimap mdList - where - mdList :: [(Metadata.Type, Metadata.Value)] - mdList = Set.toList . R.ran . Star3.d3 . Star3.selectFact (Set.singleton r) $ types - types = Branch._types $ Branch.getAt0 path b - -getBranch :: Path.Split -> Branch0 m -> Maybe (Branch m) -getBranch (p, seg) b = case Path.toList p of - [] -> Map.lookup seg (Branch._children b) - h : p -> - (Branch.head <$> Map.lookup h (Branch._children b)) >>= - getBranch (Path.fromList p, seg) - - -makeAddTermName :: Path.Split -> Referent -> Metadata -> (Path, Branch0 m -> Branch0 m) -makeAddTermName (p, name) r md = (p, Branch.addTermName r name md) - -makeDeleteTermName :: Path.Split -> Referent -> (Path, Branch0 m -> Branch0 m) -makeDeleteTermName (p, name) r = (p, Branch.deleteTermName r name) - -makeReplacePatch :: Applicative m => Path.Split -> Patch -> (Path, Branch0 m -> Branch0 m) -makeReplacePatch (p, name) patch = (p, Branch.replacePatch name patch) - -makeDeletePatch :: Path.Split -> (Path, Branch0 m -> Branch0 m) -makeDeletePatch (p, name) = (p, Branch.deletePatch name) - -makeAddTypeName :: Path.Split -> Reference -> Metadata -> (Path, Branch0 m -> Branch0 m) -makeAddTypeName (p, name) r md = (p, Branch.addTypeName r name md) - -makeDeleteTypeName :: Path.Split -> Reference -> (Path, Branch0 m -> Branch0 m) -makeDeleteTypeName (p, name) r = (p, Branch.deleteTypeName r name) - --- to delete, just set with Branch.empty -makeSetBranch :: - Path.Split -> Branch m -> (Path, Branch0 m -> Branch0 m) -makeSetBranch (p, name) b = (p, Branch.setChildBranch name b) diff --git a/parser-typechecker/src/Unison/Codebase/Causal.hs b/parser-typechecker/src/Unison/Codebase/Causal.hs deleted file mode 100644 index 46164d168f..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Causal.hs +++ /dev/null @@ -1,373 +0,0 @@ -{-# LANGUAGE RankNTypes #-} -module Unison.Codebase.Causal where - -import Unison.Prelude - -import Prelude hiding ( head - , tail - , read - ) -import qualified Control.Monad.State as State -import Control.Monad.State ( StateT ) -import Data.Sequence ( ViewL(..) ) -import qualified Data.Sequence as Seq -import Unison.Hash ( Hash ) -import qualified Unison.Hashable as Hashable -import Unison.Hashable ( Hashable ) -import qualified Unison.Util.Cache as Cache -import qualified Data.Map as Map -import qualified Data.Set as Set - -{- -`Causal a` has 5 operations, specified algebraically here: - -* `before : Causal m a -> Causal m a -> m Bool` defines a partial order on - `Causal`. -* `head : Causal m a -> a`, which represents the "latest" `a` value in a causal - chain. -* `one : a -> Causal m a`, satisfying `head (one hd) == hd` -* `cons : a -> Causal a -> Causal a`, satisfying `head (cons hd tl) == hd` and - also `before tl (cons hd tl)`. -* `merge : CommutativeSemigroup a => Causal a -> Causal a -> Causal a`, which is - commutative (but not associative) and satisfies: - * `before c1 (merge c1 c2)` - * `before c2 (merge c1 c2)` -* `sequence : Causal a -> Causal a -> Causal a`, which is defined as - `sequence c1 c2 = cons (head c2) (merge c1 c2)`. - * `before c1 (sequence c1 c2)` - * `head (sequence c1 c2) == head c2` --} - -newtype RawHash a = RawHash { unRawHash :: Hash } - deriving (Eq, Ord) - -instance Show (RawHash a) where - show = show . unRawHash - -instance Show e => Show (Causal m h e) where - show = \case - One h e -> "One " ++ (take 3 . show) h ++ " " ++ show e - Cons h e t -> "Cons " ++ (take 3 . show) h ++ " " ++ show e ++ " " ++ (take 3 . show) (fst t) - Merge h e ts -> "Merge " ++ (take 3 . show) h ++ " " ++ show e ++ " " ++ (show . fmap (take 3 . show) . toList) (Map.keysSet ts) - --- h is the type of the pure data structure that will be hashed and used as --- an index; e.g. h = Branch00, e = Branch0 m -data Causal m h e - = One { currentHash :: RawHash h - , head :: e - } - | Cons { currentHash :: RawHash h - , head :: e - , tail :: (RawHash h, m (Causal m h e)) - } - -- The merge operation `<>` flattens and normalizes for order - | Merge { currentHash :: RawHash h - , head :: e - , tails :: Map (RawHash h) (m (Causal m h e)) - } - --- Convert the Causal to an adjacency matrix for debugging purposes. -toGraph - :: Monad m - => Set (RawHash h) - -> Causal m h e - -> m (Seq (RawHash h, RawHash h)) -toGraph seen c = case c of - One _ _ -> pure Seq.empty - Cons h1 _ (h2, m) -> if Set.notMember h1 seen - then do - tail <- m - g <- toGraph (Set.insert h1 seen) tail - pure $ (h1, h2) Seq.<| g - else pure Seq.empty - Merge h _ ts -> if Set.notMember h seen - then do - tails <- sequence $ Map.elems ts - gs <- Seq.fromList <$> traverse (toGraph (Set.insert h seen)) tails - pure $ Seq.fromList ((h, ) <$> Set.toList (Map.keysSet ts)) <> join gs - else pure Seq.empty - --- A serializer `Causal m h e`. Nonrecursive -- only responsible for --- writing a single node of the causal structure. -data Raw h e - = RawOne e - | RawCons e (RawHash h) - | RawMerge e (Set (RawHash h)) - -rawHead :: Raw h e -> e -rawHead (RawOne e ) = e -rawHead (RawCons e _) = e -rawHead (RawMerge e _) = e - --- Don't need to deserialize the `e` to calculate `before`. -data Tails h - = TailsOne - | TailsCons (RawHash h) - | TailsMerge (Set (RawHash h)) - -type Deserialize m h e = RawHash h -> m (Raw h e) - -cachedRead :: Monad m - => Cache.Cache m (RawHash h) (Causal m h e) - -> Deserialize m h e - -> RawHash h -> m (Causal m h e) -cachedRead cache deserializeRaw h = Cache.lookup cache h >>= \case - Nothing -> do - raw <- deserializeRaw h - causal <- pure $ case raw of - RawOne e -> One h e - RawCons e tailHash -> Cons h e (tailHash, read tailHash) - RawMerge e tailHashes -> Merge h e $ - Map.fromList [(h, read h) | h <- toList tailHashes ] - Cache.insert cache h causal - pure causal - Just causal -> pure causal - where - read = cachedRead cache deserializeRaw - -type Serialize m h e = RawHash h -> Raw h e -> m () - --- Sync a causal to some persistent store, stopping when hitting a Hash which --- has already been written, according to the `exists` function provided. -sync - :: forall m h e - . Monad m - => (RawHash h -> m Bool) - -> Serialize (StateT (Set (RawHash h)) m) h e - -> Causal m h e - -> StateT (Set (RawHash h)) m () -sync exists serialize c = do - queued <- State.get - itExists <- if Set.member (currentHash c) queued then pure True - else lift . exists $ currentHash c - unless itExists $ go c - where - go :: Causal m h e -> StateT (Set (RawHash h)) m () - go c = do - queued <- State.get - when (Set.notMember (currentHash c) queued) $ do - State.modify (Set.insert $ currentHash c) - case c of - One currentHash head -> serialize currentHash $ RawOne head - Cons currentHash head (tailHash, tailm) -> do - -- write out the tail first, so what's on disk is always valid - b <- lift $ exists tailHash - unless b $ go =<< lift tailm - serialize currentHash (RawCons head tailHash) - Merge currentHash head tails -> do - for_ (Map.toList tails) $ \(hash, cm) -> do - b <- lift $ exists hash - unless b $ go =<< lift cm - serialize currentHash (RawMerge head (Map.keysSet tails)) - -instance Eq (Causal m h a) where - a == b = currentHash a == currentHash b - -instance Ord (Causal m h a) where - a <= b = currentHash a <= currentHash b - -instance Hashable (RawHash h) where - tokens (RawHash h) = Hashable.tokens h - --- Find the lowest common ancestor of two causals. -lca :: Monad m => Causal m h e -> Causal m h e -> m (Maybe (Causal m h e)) -lca a b = - lca' (Seq.singleton $ pure a) (Seq.singleton $ pure b) - --- `lca' xs ys` finds the lowest common ancestor of any element of `xs` and any --- element of `ys`. --- This is a breadth-first search used in the implementation of `lca a b`. -lca' - :: Monad m - => Seq (m (Causal m h e)) - -> Seq (m (Causal m h e)) - -> m (Maybe (Causal m h e)) -lca' = go Set.empty Set.empty where - go seenLeft seenRight remainingLeft remainingRight = - case Seq.viewl remainingLeft of - Seq.EmptyL -> search seenLeft remainingRight - a :< as -> do - left <- a - if Set.member (currentHash left) seenRight - then pure $ Just left - -- Note: swapping position of left and right when we recurse so that - -- we search each side equally. This avoids having to case on both - -- arguments, and the order shouldn't really matter. - else go seenRight - (Set.insert (currentHash left) seenLeft) - remainingRight - (as <> children left) - search seen remaining = case Seq.viewl remaining of - Seq.EmptyL -> pure Nothing - a :< as -> do - current <- a - if Set.member (currentHash current) seen - then pure $ Just current - else search seen (as <> children current) - -children :: Causal m h e -> Seq (m (Causal m h e)) -children (One _ _ ) = Seq.empty -children (Cons _ _ (_, t)) = Seq.singleton t -children (Merge _ _ ts ) = Seq.fromList $ Map.elems ts - --- A `squashMerge combine c1 c2` gives the same resulting `e` --- as a `threeWayMerge`, but doesn't introduce a merge node for the --- result. Instead, the resulting causal is a simple `Cons` onto `c2` --- (or is equal to `c2` if `c1` changes nothing). -squashMerge - :: forall m h e - . (Monad m, Hashable e, Eq e) - => (Maybe e -> e -> e -> m e) - -> Causal m h e - -> Causal m h e - -> m (Causal m h e) -squashMerge combine c1 c2 = do - theLCA <- lca c1 c2 - let done newHead = consDistinct newHead c2 - case theLCA of - Nothing -> done <$> combine Nothing (head c1) (head c2) - Just lca - | lca == c1 -> pure c2 - - -- Pretty subtle: if we were to add this short circuit, then - -- the history of c1's children would still make it into the result - -- Calling `combine` will recursively call into `squashMerge` - -- for the children, discarding their history before calling `done` - -- on the parent. - -- | lca == c2 -> pure $ done c1 - - | otherwise -> done <$> combine (Just $ head lca) (head c1) (head c2) - -threeWayMerge - :: forall m h e - . (Monad m, Hashable e) - => (Maybe e -> e -> e -> m e) - -> Causal m h e - -> Causal m h e - -> m (Causal m h e) -threeWayMerge combine c1 c2 = do - theLCA <- lca c1 c2 - case theLCA of - Nothing -> done <$> combine Nothing (head c1) (head c2) - Just lca - | lca == c1 -> pure c2 - | lca == c2 -> pure c1 - | otherwise -> done <$> combine (Just $ head lca) (head c1) (head c2) - where - children = - Map.fromList [(currentHash c1, pure c1), (currentHash c2, pure c2)] - done :: e -> Causal m h e - done newHead = - Merge (RawHash (hash (newHead, Map.keys children))) newHead children - -before :: Monad m => Causal m h e -> Causal m h e -> m Bool -before a b = (== Just a) <$> lca a b - -hash :: Hashable e => e -> Hash -hash = Hashable.accumulate' - -step :: (Applicative m, Hashable e) => (e -> e) -> Causal m h e -> Causal m h e -step f c = f (head c) `cons` c - -stepDistinct :: (Applicative m, Eq e, Hashable e) => (e -> e) -> Causal m h e -> Causal m h e -stepDistinct f c = f (head c) `consDistinct` c - -stepIf - :: (Applicative m, Hashable e) - => (e -> Bool) - -> (e -> e) - -> Causal m h e - -> Causal m h e -stepIf cond f c = if cond (head c) then step f c else c - -stepM - :: (Applicative m, Hashable e) => (e -> m e) -> Causal m h e -> m (Causal m h e) -stepM f c = (`cons` c) <$> f (head c) - -stepDistinctM - :: (Applicative m, Functor n, Eq e, Hashable e) - => (e -> n e) -> Causal m h e -> n (Causal m h e) -stepDistinctM f c = (`consDistinct` c) <$> f (head c) - -one :: Hashable e => e -> Causal m h e -one e = One (RawHash $ hash e) e - -cons :: (Applicative m, Hashable e) => e -> Causal m h e -> Causal m h e -cons e tl = - Cons (RawHash $ hash [hash e, unRawHash . currentHash $ tl]) e (currentHash tl, pure tl) - -consDistinct :: (Applicative m, Eq e, Hashable e) => e -> Causal m h e -> Causal m h e -consDistinct e tl = - if head tl == e then tl - else cons e tl - -uncons :: Applicative m => Causal m h e -> m (Maybe (e, Causal m h e)) -uncons c = case c of - Cons _ e (_,tl) -> fmap (e,) . Just <$> tl - _ -> pure Nothing - -transform :: Functor m => (forall a . m a -> n a) -> Causal m h e -> Causal n h e -transform nt c = case c of - One h e -> One h e - Cons h e (ht, tl) -> Cons h e (ht, nt (transform nt <$> tl)) - Merge h e tls -> Merge h e $ Map.map (\mc -> nt (transform nt <$> mc)) tls - -unsafeMapHashPreserving :: Functor m => (e -> e2) -> Causal m h e -> Causal m h e2 -unsafeMapHashPreserving f c = case c of - One h e -> One h (f e) - Cons h e (ht, tl) -> Cons h (f e) (ht, unsafeMapHashPreserving f <$> tl) - Merge h e tls -> Merge h (f e) $ Map.map (fmap $ unsafeMapHashPreserving f) tls - -data FoldHistoryResult a = Satisfied a | Unsatisfied a deriving (Eq,Ord,Show) - --- foldHistoryUntil some condition on the accumulator is met, --- attempting to work backwards fairly through merge nodes --- (rather than following one back all the way to its root before working --- through others). Returns Unsatisfied if the condition was never satisfied, --- otherwise Satisfied. --- --- NOTE by RÓB: this short-circuits immediately and only looks at the first --- entry in the history, since this operation is far too slow to be practical. -foldHistoryUntil - :: forall m h e a - . (Monad m) - => (a -> e -> (a, Bool)) - -> a - -> Causal m h e - -> m (FoldHistoryResult a) -foldHistoryUntil f a c = step a mempty (pure c) where - step :: a -> Set (RawHash h) -> Seq (Causal m h e) -> m (FoldHistoryResult a) - step a _seen Seq.Empty = pure (Unsatisfied a) - step a seen (c Seq.:<| rest) | currentHash c `Set.member` seen = - step a seen rest - step a seen (c Seq.:<| rest) = case f a (head c) of - (a, True ) -> pure (Satisfied a) - (a, False) -> do - tails <- case c of - One{} -> pure mempty - Cons{} -> - let (_, t) = tail c - in --if h `Set.member` seen - if not (Set.null seen) then pure mempty else Seq.singleton <$> t - Merge{} -> - fmap Seq.fromList - . traverse snd - . filter (\(_, _) -> not (Set.null seen)) - . Map.toList - $ tails c - step a (Set.insert (currentHash c) seen) (rest <> tails) - -hashToRaw :: - forall m h e. Monad m => Causal m h e -> m (Map (RawHash h) [RawHash h]) -hashToRaw c = go mempty [c] where - go :: Map (RawHash h) [RawHash h] -> [Causal m h e] - -> m (Map (RawHash h) [RawHash h]) - go output [] = pure output - go output (c : queue) = case c of - One h _ -> go (Map.insert h [] output) queue - Cons h _ (htail, mctail) -> do - ctail <- mctail - go (Map.insert h [htail] output) (ctail : queue) - Merge h _ mtails -> do - tails <- sequence mtails - go (Map.insert h (Map.keys tails) output) (toList tails ++ queue) diff --git a/parser-typechecker/src/Unison/Codebase/Classes.hs b/parser-typechecker/src/Unison/Codebase/Classes.hs deleted file mode 100644 index afc6108da0..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Classes.hs +++ /dev/null @@ -1,40 +0,0 @@ - -module Unison.Codebase.Classes where --- ( GetDecls(..) --- , PutDecls(..) --- , GetBranch(..) --- , PutBranch(..) --- , GetDependents(..) --- ) where --- ---import Data.Set ( Set ) ---import Unison.Codebase.Branch ( Branch ) ---import Unison.DataDeclaration ( Decl ) ---import qualified Unison.Reference as Reference ---import Unison.Reference ( Reference ) ---import qualified Unison.Term as Term ---import qualified Unison.Type as Type ---import qualified Unison.Typechecker.TypeLookup as TL --- ---type Term v a = Term.AnnotatedTerm v a ---type Type v a = Type.AnnotatedType v a --- ---class GetDecls d m v a | d -> m v a where --- getTerm :: d -> Reference.Id -> m (Maybe (Term v a)) --- getTypeOfTerm :: d -> Reference -> m (Maybe (Type v a)) --- getTypeDeclaration :: d -> Reference.Id -> m (Maybe (Decl v a)) --- hasTerm :: d -> Reference.Id -> m Bool --- hasType :: d -> Reference.Id -> m Bool --- ---class PutDecls d m v a | d -> m v a where --- putTerm :: d -> Reference.Id -> Term v a -> Type v a -> m () --- putTypeDeclarationImpl :: d -> Reference.Id -> Decl v a -> m () --- ---class GetBranch b m | b -> m where --- getRootBranch :: b -> m (Branch m) --- ---class PutBranch b m | b -> m where --- putRootBranch :: b -> Branch m -> m () --- ---class GetDependents d m | d -> m where --- dependentsImpl :: d -> Reference -> m (Set Reference.Id) diff --git a/parser-typechecker/src/Unison/Codebase/CodeLookup.hs b/parser-typechecker/src/Unison/Codebase/CodeLookup.hs deleted file mode 100644 index e283adbe71..0000000000 --- a/parser-typechecker/src/Unison/Codebase/CodeLookup.hs +++ /dev/null @@ -1,57 +0,0 @@ -module Unison.Codebase.CodeLookup where - -import Unison.Prelude - -import Control.Monad.Morph -import qualified Data.Map as Map -import Unison.UnisonFile ( UnisonFile ) -import qualified Unison.UnisonFile as UF -import qualified Unison.Term as Term -import Unison.Term ( Term ) -import Unison.Var ( Var ) -import qualified Unison.Reference as Reference -import Unison.DataDeclaration (Decl) - -fromUnisonFile :: (Var v, Monad m) => UnisonFile v a -> CodeLookup v m a -fromUnisonFile uf = CodeLookup tm ty where - tm id = pure $ Map.lookup id termMap - ty id = pure $ Map.lookup id typeMap1 <|> Map.lookup id typeMap2 - typeMap1 = Map.fromList [ (id, Right dd) | - (_, (Reference.DerivedId id, dd)) <- - Map.toList (UF.dataDeclarations uf) ] - typeMap2 = Map.fromList [ (id, Left ad) | - (_, (Reference.DerivedId id, ad)) <- - Map.toList (UF.effectDeclarations uf) ] - tmm = Map.fromList (UF.terms uf) - termMap = Map.fromList [ (id, e) | - (_, (id, e)) <- - Map.toList (Term.hashComponents tmm) ] - -data CodeLookup v m a - = CodeLookup { - getTerm :: Reference.Id -> m (Maybe (Term v a)), - getTypeDeclaration :: Reference.Id -> m (Maybe (Decl v a)) - } - -instance MFunctor (CodeLookup v) where - hoist f (CodeLookup tm tp) = CodeLookup (f . tm) (f . tp) - -instance (Ord v, Functor m) => Functor (CodeLookup v m) where - fmap f cl = CodeLookup tm ty where - tm id = fmap (Term.amap f) <$> getTerm cl id - ty id = fmap md <$> getTypeDeclaration cl id - md (Left e) = Left (f <$> e) - md (Right d) = Right (f <$> d) - -instance Monad m => Semigroup (CodeLookup v m a) where - (<>) = mappend - -instance Monad m => Monoid (CodeLookup v m a) where - mempty = CodeLookup (const $ pure Nothing) (const $ pure Nothing) - c1 `mappend` c2 = CodeLookup tm ty where - tm id = do - o <- getTerm c1 id - case o of Nothing -> getTerm c2 id; Just _ -> pure o - ty id = do - o <- getTypeDeclaration c1 id - case o of Nothing -> getTypeDeclaration c2 id; Just _ -> pure o diff --git a/parser-typechecker/src/Unison/Codebase/Editor/AuthorInfo.hs b/parser-typechecker/src/Unison/Codebase/Editor/AuthorInfo.hs deleted file mode 100644 index 264a491ad2..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/AuthorInfo.hs +++ /dev/null @@ -1,59 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Codebase.Editor.AuthorInfo where - -import Unison.Term (Term, hashComponents) - -import qualified Unison.Reference as Reference -import Unison.Prelude (MonadIO, Word8) -import Unison.Var (Var) -import Data.ByteString (unpack) -import Crypto.Random (getRandomBytes) -import qualified Data.Map as Map -import qualified Unison.Var as Var -import Data.Foldable (toList) -import UnliftIO (liftIO) -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import Unison.Type (Type) -import Data.Text (Text) - -data AuthorInfo v a = AuthorInfo - { guid, author, copyrightHolder :: (Reference.Id, Term v a, Type v a) } - -createAuthorInfo :: forall m v a. MonadIO m => Var v => a -> Text -> m (AuthorInfo v a) -createAuthorInfo a t = createAuthorInfo' . unpack <$> liftIO (getRandomBytes 32) - where - createAuthorInfo' :: [Word8] -> AuthorInfo v a - createAuthorInfo' bytes = let - [(guidRef, guidTerm)] = hashAndWrangle "guid" $ - Term.app a - (Term.constructor a guidTypeRef 0) - (Term.app a - (Term.builtin a "Bytes.fromList") - (Term.seq a (map (Term.nat a . fromIntegral) bytes))) - - [(authorRef, authorTerm)] = hashAndWrangle "author" $ - Term.apps - (Term.constructor a authorTypeRef 0) - [(a, Term.ref a (Reference.DerivedId guidRef)) - ,(a, Term.text a t)] - - [(chRef, chTerm)] = hashAndWrangle "copyrightHolder" $ - Term.apps - (Term.constructor a chTypeRef 0) - [(a, Term.ref a (Reference.DerivedId guidRef)) - ,(a, Term.text a t)] - - in AuthorInfo - (guidRef, guidTerm, guidType) - (authorRef, authorTerm, authorType) - (chRef, chTerm, chType) - hashAndWrangle v tm = toList . hashComponents $ Map.fromList [(Var.named v, tm)] - (chType, chTypeRef) = (Type.ref a chTypeRef, unsafeParse copyrightHolderHash) - (authorType, authorTypeRef) = (Type.ref a authorTypeRef, unsafeParse authorHash) - (guidType, guidTypeRef) = (Type.ref a guidTypeRef, unsafeParse guidHash) - unsafeParse = either error id . Reference.fromText - guidHash = "#rc29vdqe019p56kupcgkg07fkib86r3oooatbmsgfbdsgpmjhsh00l307iuts3r973q5etb61vbjkes42b6adb3mkorusvmudiuorno" - copyrightHolderHash = "#aohndsu9bl844vspujp142j5aijv86rifmnrbnjvpv3h3f3aekn45rj5s1uf1ucrrtm5urbc5d1ajtm7lqq1tr8lkgv5fathp6arqug" - authorHash = "#5hi1vvs5t1gmu6vn1kpqmgksou8ie872j31gc294lgqks71di6gm3d4ugnrr4mq8ov0ap1e20lq099d5g6jjf9c6cbp361m9r9n5g50" diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Command.hs b/parser-typechecker/src/Unison/Codebase/Editor/Command.hs deleted file mode 100644 index 4cab12ebb0..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/Command.hs +++ /dev/null @@ -1,191 +0,0 @@ -{-# LANGUAGE GADTs #-} - -module Unison.Codebase.Editor.Command ( - Command(..), - AmbientAbilities, - LexedSource, - Source, - SourceName, - TypecheckingResult, - LoadSourceResult(..) - ) where - -import Unison.Prelude - -import Data.Configurator.Types ( Configured ) - -import Unison.Codebase.Editor.Output -import Unison.Codebase.Editor.RemoteRepo - -import Unison.Codebase.Branch ( Branch ) -import qualified Unison.Codebase.Branch as Branch -import Unison.Codebase.GitError -import qualified Unison.Codebase.Reflog as Reflog -import Unison.Codebase.SyncMode ( SyncMode ) -import Unison.Names3 ( Names, Names0 ) -import Unison.Parser ( Ann ) -import Unison.Referent ( Referent ) -import Unison.Reference ( Reference ) -import Unison.Result ( Note - , Result) -import Unison.DataDeclaration ( Decl ) -import qualified Unison.Codebase.Runtime as Runtime -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.Reference as Reference -import Unison.Term ( Term ) -import qualified Unison.UnisonFile as UF -import qualified Unison.Lexer as L -import qualified Unison.Parser as Parser -import Unison.ShortHash ( ShortHash ) -import Unison.Type ( Type ) -import Unison.Codebase.ShortBranchHash - ( ShortBranchHash ) -import Unison.Codebase.Editor.AuthorInfo (AuthorInfo) - - -type AmbientAbilities v = [Type v Ann] -type SourceName = Text -type Source = Text -type LexedSource = (Text, [L.Token L.Lexeme]) - -data LoadSourceResult = InvalidSourceNameError - | LoadError - | LoadSuccess Text - -type TypecheckingResult v = - Result (Seq (Note v Ann)) - (Either Names0 (UF.TypecheckedUnisonFile v Ann)) - -data Command m i v a where - Eval :: m a -> Command m i v a - - ConfigLookup :: Configured a => Text -> Command m i v (Maybe a) - - Input :: Command m i v i - - -- Presents some output to the user - Notify :: Output v -> Command m i v () - NotifyNumbered :: NumberedOutput v -> Command m i v NumberedArgs - - -- literally just write some terms and types .unison/{terms,types} - AddDefsToCodebase :: UF.TypecheckedUnisonFile v Ann -> Command m i v () - - -- the hash length needed to disambiguate any definition in the codebase - CodebaseHashLength :: Command m i v Int - - TypeReferencesByShortHash :: ShortHash -> Command m i v (Set Reference) - TermReferencesByShortHash :: ShortHash -> Command m i v (Set Reference) - TermReferentsByShortHash :: ShortHash -> Command m i v (Set Referent) - - -- the hash length needed to disambiguate any branch in the codebase - BranchHashLength :: Command m i v Int - - BranchHashesByPrefix :: ShortBranchHash -> Command m i v (Set Branch.Hash) - - ParseType :: Names -> LexedSource - -> Command m i v (Either (Parser.Err v) (Type v Ann)) - - LoadSource :: SourceName -> Command m i v LoadSourceResult - - Typecheck :: AmbientAbilities v - -> Names - -> SourceName - -> LexedSource - -> Command m i v (TypecheckingResult v) - - TypecheckFile :: UF.UnisonFile v Ann - -> [Type v Ann] - -> Command m i v (TypecheckingResult v) - - -- Evaluate all watched expressions in a UnisonFile and return - -- their results, keyed by the name of the watch variable. The tuple returned - -- has the form: - -- (hash, (ann, sourceTerm, evaluatedTerm, isCacheHit)) - -- - -- where - -- `hash` is the hash of the original watch expression definition - -- `ann` gives the location of the watch expression - -- `sourceTerm` is a closed term (no free vars) for the watch expression - -- `evaluatedTerm` is the result of evaluating that `sourceTerm` - -- `isCacheHit` is True if the result was computed by just looking up - -- in a cache - -- - -- It's expected that the user of this action might add the - -- `(hash, evaluatedTerm)` mapping to a cache to make future evaluations - -- of the same watches instantaneous. - - Evaluate :: PPE.PrettyPrintEnv - -> UF.TypecheckedUnisonFile v Ann - -> Command m i v (Either Runtime.Error - ([(v, Term v ())], Map v - (Ann, UF.WatchKind, Reference, Term v (), Term v (), Runtime.IsCacheHit))) - - -- Evaluate a single closed definition - Evaluate1 :: PPE.PrettyPrintEnv -> Term v Ann -> Command m i v (Either Runtime.Error (Term v Ann)) - - -- Add a cached watch to the codebase - PutWatch :: UF.WatchKind -> Reference.Id -> Term v Ann -> Command m i v () - - -- Loads any cached watches of the given kind - LoadWatches :: UF.WatchKind -> Set Reference -> Command m i v [(Reference, Term v Ann)] - - -- Loads a root branch from some codebase, returning `Nothing` if not found. - -- Any definitions in the head of the requested root that aren't in the local - -- codebase are copied there. - LoadLocalRootBranch :: Command m i v (Branch m) - - -- Like `LoadLocalRootBranch`. - LoadLocalBranch :: Branch.Hash -> Command m i v (Branch m) - - ViewRemoteBranch :: - RemoteNamespace -> Command m i v (Either GitError (Branch m)) - - -- we want to import as little as possible, so we pass the SBH/path as part - -- of the `RemoteNamespace`. - ImportRemoteBranch :: - RemoteNamespace -> SyncMode -> Command m i v (Either GitError (Branch m)) - - -- Syncs the Branch to some codebase and updates the head to the head of this causal. - -- Any definitions in the head of the supplied branch that aren't in the target - -- codebase are copied there. - SyncLocalRootBranch :: Branch m -> Command m i v () - - SyncRemoteRootBranch :: - RemoteRepo -> Branch m -> SyncMode -> Command m i v (Either GitError ()) - - AppendToReflog :: Text -> Branch m -> Branch m -> Command m i v () - - -- load the reflog in file (chronological) order - LoadReflog :: Command m i v [Reflog.Entry] - - LoadTerm :: Reference.Id -> Command m i v (Maybe (Term v Ann)) - - -- todo: change this to take Reference and return DeclOrBuiltin - LoadType :: Reference.Id -> Command m i v (Maybe (Decl v Ann)) - - LoadTypeOfTerm :: Reference -> Command m i v (Maybe (Type v Ann)) - - PutTerm :: Reference.Id -> Term v Ann -> Type v Ann -> Command m i v () - - PutDecl :: Reference.Id -> Decl v Ann -> Command m i v () - - -- todo: eliminate these hopefully - -- (why, again? because we can know from the Reference?) - IsTerm :: Reference -> Command m i v Bool - IsType :: Reference -> Command m i v Bool - - -- Get the immediate (not transitive) dependents of the given reference - -- This might include historical definitions not in any current path; these - -- should be filtered by the caller of this command if that's not desired. - GetDependents :: Reference -> Command m i v (Set Reference) - - GetTermsOfType :: Type v Ann -> Command m i v (Set Referent) - GetTermsMentioningType :: Type v Ann -> Command m i v (Set Referent) - - -- Execute a UnisonFile for its IO effects - -- todo: Execute should do some evaluation? - Execute :: PPE.PrettyPrintEnv -> UF.TypecheckedUnisonFile v Ann -> Command m i v () - - CreateAuthorInfo :: Text -> Command m i v (AuthorInfo v Ann) - - RuntimeMain :: Command m i v (Type v Ann) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/DisplayThing.hs b/parser-typechecker/src/Unison/Codebase/Editor/DisplayThing.hs deleted file mode 100644 index 7f47e07797..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/DisplayThing.hs +++ /dev/null @@ -1,12 +0,0 @@ -module Unison.Codebase.Editor.DisplayThing where - -import Unison.Reference as Reference - -data DisplayThing a = BuiltinThing | MissingThing Reference.Id | RegularThing a - deriving (Eq, Ord, Show) - -toMaybe :: DisplayThing a -> Maybe a -toMaybe = \case - RegularThing a -> Just a - _ -> Nothing - diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Git.hs b/parser-typechecker/src/Unison/Codebase/Editor/Git.hs deleted file mode 100644 index 2e66e2122d..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/Git.hs +++ /dev/null @@ -1,249 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Codebase.Editor.Git - ( importRemoteBranch - , pushGitRootBranch - , viewRemoteBranch - ) where - -import Unison.Prelude - -import Control.Monad.Except ( MonadError - , throwError - , ExceptT - ) -import Control.Monad.Extra ((||^)) -import qualified Control.Exception -import qualified Data.Text as Text -import Shellmet ( ($?), ($|), ($^)) -import System.FilePath ( () ) -import qualified Unison.Codebase.GitError as GitError -import Unison.Codebase.GitError (GitError) -import qualified Unison.Codebase as Codebase -import Unison.Codebase (Codebase, CodebasePath) -import Unison.Codebase.Editor.RemoteRepo ( RemoteRepo(GitRepo) - , RemoteNamespace - , printRepo - ) -import Unison.Codebase.FileCodebase as FC -import Unison.Codebase.Branch ( Branch - , headHash - ) -import qualified Unison.Codebase.Path as Path -import Unison.Codebase.SyncMode ( SyncMode ) -import qualified Unison.Util.Exception as Ex -import Unison.Util.Timing (time) -import qualified Unison.Codebase.Branch as Branch -import UnliftIO.IO (hFlush, stdout) -import UnliftIO.Directory (getXdgDirectory, XdgDirectory(XdgCache), doesDirectoryExist, findExecutable, removeDirectoryRecursive) -import Unison.Codebase.FileCodebase.Common (encodeFileName, updateCausalHead, branchHeadDir) - -tempGitDir :: MonadIO m => Text -> m FilePath -tempGitDir url = - getXdgDirectory XdgCache - $ "unisonlanguage" - "gitfiles" - encodeFileName (Text.unpack url) - -withStatus :: MonadIO m => String -> m a -> m a -withStatus str ma = do - flushStr str - a <- ma - flushStr (const ' ' <$> str) - pure a - where - flushStr str = do - liftIO . putStr $ " " ++ str ++ "\r" - hFlush stdout - --- | Given a remote git repo url, and branch/commit hash (currently --- not allowed): checks for git, clones or updates a cached copy of the repo -pullBranch :: (MonadIO m, MonadError GitError m) => RemoteRepo -> m CodebasePath -pullBranch (GitRepo _uri (Just t)) = error $ - "Pulling a specific commit isn't fully implemented or tested yet.\n" ++ - "InputPatterns.parseUri was expected to have prevented you " ++ - "from supplying the git treeish `" ++ Text.unpack t ++ "`!" -pullBranch repo@(GitRepo uri Nothing) = do - checkForGit - localPath <- tempGitDir uri - ifM (doesDirectoryExist localPath) - -- try to update existing directory - (ifM (isGitRepo localPath) - (checkoutExisting localPath) - (throwError (GitError.UnrecognizableCacheDir uri localPath))) - -- directory doesn't exist, so clone anew - (checkOutNew localPath Nothing) - pure localPath - - where - -- | Do a `git clone` (for a not-previously-cached repo). - checkOutNew :: (MonadIO m, MonadError GitError m) => CodebasePath -> Maybe Text -> m () - checkOutNew localPath branch = do - withStatus ("Downloading from " ++ Text.unpack uri ++ " ...") $ - (liftIO $ - "git" $^ (["clone", "--quiet"] ++ ["--depth", "1"] - ++ maybe [] (\t -> ["--branch", t]) branch - ++ [uri, Text.pack localPath])) - `withIOError` (throwError . GitError.CloneException repo . show) - isGitDir <- liftIO $ isGitRepo localPath - unless isGitDir . throwError $ GitError.UnrecognizableCheckoutDir uri localPath - - -- | Do a `git pull` on a cached repo. - checkoutExisting :: (MonadIO m, MonadError GitError m) => FilePath -> m () - checkoutExisting localPath = - ifM (isEmptyGitRepo localPath) - -- I don't know how to properly update from an empty remote repo. - -- As a heuristic, if this cached copy is empty, then the remote might - -- be too, so this impl. just wipes the cached copy and starts from scratch. - (do wipeDir localPath; checkOutNew localPath Nothing) - -- Otherwise proceed! - (withStatus ("Updating cached copy of " ++ Text.unpack uri ++ " ...") $ do - gitIn localPath ["reset", "--hard", "--quiet", "HEAD"] - gitIn localPath ["clean", "-d", "--force", "--quiet"] - gitIn localPath ["pull", "--force", "--quiet"]) - - isEmptyGitRepo :: MonadIO m => FilePath -> m Bool - isEmptyGitRepo localPath = liftIO $ - -- if rev-parse succeeds, the repo is _not_ empty, so return False; else True - (gitTextIn localPath ["rev-parse", "--verify", "--quiet", "HEAD"] $> False) - $? pure True - - -- | try removing a cached copy - wipeDir localPath = do - e <- Ex.tryAny . whenM (doesDirectoryExist localPath) $ - removeDirectoryRecursive localPath - case e of - Left e -> throwError (GitError.SomeOtherError (show e)) - Right _ -> pure () - --- | Sync elements as needed from a remote codebase into the local one. --- If `sbh` is supplied, we try to load the specified branch hash; --- otherwise we try to load the root branch. -importRemoteBranch - :: forall m v a - . MonadIO m - => Codebase m v a - -> Branch.Cache m - -> RemoteNamespace - -> SyncMode - -> ExceptT GitError m (Branch m) -importRemoteBranch codebase cache ns mode = do - (branch, cacheDir) <- viewRemoteBranch' cache ns - withStatus "Importing downloaded files into local codebase..." $ - time "SyncFromDirectory" $ - lift $ Codebase.syncFromDirectory codebase cacheDir mode branch - pure branch - --- | Pull a git branch and view it from the cache, without syncing into the --- local codebase. -viewRemoteBranch :: forall m. MonadIO m - => Branch.Cache m -> RemoteNamespace -> ExceptT GitError m (Branch m) -viewRemoteBranch cache = fmap fst . viewRemoteBranch' cache - -viewRemoteBranch' :: forall m. MonadIO m - => Branch.Cache m -> RemoteNamespace -> ExceptT GitError m (Branch m, CodebasePath) -viewRemoteBranch' cache (repo, sbh, path) = do - -- set up the cache dir - remotePath <- time "Git fetch" $ pullBranch repo - -- try to load the requested branch from it - branch <- time "Git fetch (sbh)" $ case sbh of - -- load the root branch - Nothing -> lift (FC.getRootBranch cache remotePath) >>= \case - Left Codebase.NoRootBranch -> pure Branch.empty - Left (Codebase.CouldntLoadRootBranch h) -> - throwError $ GitError.CouldntLoadRootBranch repo h - Left (Codebase.CouldntParseRootBranch s) -> - throwError $ GitError.CouldntParseRootBranch repo s - Right b -> pure b - -- load from a specific `ShortBranchHash` - Just sbh -> do - branchCompletions <- lift $ FC.branchHashesByPrefix remotePath sbh - case toList branchCompletions of - [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh - [h] -> (lift $ FC.branchFromFiles cache remotePath h) >>= \case - Just b -> pure b - Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh - _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions - pure (Branch.getAt' path branch, remotePath) - --- | See if `git` is on the system path. -checkForGit :: MonadIO m => MonadError GitError m => m () -checkForGit = do - gitPath <- liftIO $ findExecutable "git" - when (isNothing gitPath) $ throwError GitError.NoGit - --- | Does `git` recognize this directory as being managed by git? -isGitRepo :: MonadIO m => FilePath -> m Bool -isGitRepo dir = liftIO $ - (True <$ gitIn dir ["rev-parse"]) $? pure False - --- | Perform an IO action, passing any IO exception to `handler` -withIOError :: MonadIO m => IO a -> (IOException -> m a) -> m a -withIOError action handler = - liftIO (fmap Right action `Control.Exception.catch` (pure . Left)) >>= - either handler pure - --- | Generate some `git` flags for operating on some arbitary checked out copy -setupGitDir :: FilePath -> [Text] -setupGitDir localPath = - ["--git-dir", Text.pack $ localPath ".git" - ,"--work-tree", Text.pack localPath] - -gitIn :: MonadIO m => FilePath -> [Text] -> m () -gitIn localPath args = liftIO $ "git" $^ (setupGitDir localPath <> args) - -gitTextIn :: MonadIO m => FilePath -> [Text] -> m Text -gitTextIn localPath args = liftIO $ "git" $| setupGitDir localPath <> args - --- Given a branch that is "after" the existing root of a given git repo, --- stage and push the branch (as the new root) + dependencies to the repo. -pushGitRootBranch - :: MonadIO m - => Codebase m v a - -> Branch.Cache m - -> Branch m - -> RemoteRepo - -> SyncMode - -> ExceptT GitError m () -pushGitRootBranch codebase cache branch repo syncMode = do - -- Pull the remote repo into a staging directory - (remoteRoot, remotePath) <- viewRemoteBranch' cache (repo, Nothing, Path.empty) - ifM (pure (remoteRoot == Branch.empty) - ||^ lift (remoteRoot `Branch.before` branch)) - -- ours is newer 👍, meaning this is a fast-forward push, - -- so sync branch to staging area - (stageAndPush remotePath) - (throwError $ GitError.PushDestinationHasNewStuff repo) - where - stageAndPush remotePath = do - let repoString = Text.unpack $ printRepo repo - withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ - lift (Codebase.syncToDirectory codebase remotePath syncMode branch) - updateCausalHead (branchHeadDir remotePath) (Branch._history branch) - -- push staging area to remote - withStatus ("Uploading to " ++ repoString ++ " ...") $ - unlessM - (push remotePath repo - `withIOError` (throwError . GitError.PushException repo . show)) - (throwError $ GitError.PushNoOp repo) - -- Commit our changes - push :: CodebasePath -> RemoteRepo -> IO Bool -- withIOError needs IO - push remotePath (GitRepo url gitbranch) = do - -- has anything changed? - status <- gitTextIn remotePath ["status", "--short"] - if Text.null status then - pure False - else do - gitIn remotePath ["add", "--all", "."] - gitIn remotePath - ["commit", "-q", "-m", "Sync branch " <> Text.pack (show $ headHash branch)] - -- Push our changes to the repo - case gitbranch of - Nothing -> gitIn remotePath ["push", "--quiet", url] - Just gitbranch -> error $ - "Pushing to a specific branch isn't fully implemented or tested yet.\n" - ++ "InputPatterns.parseUri was expected to have prevented you " - ++ "from supplying the git treeish `" ++ Text.unpack gitbranch ++ "`!" - -- gitIn remotePath ["push", "--quiet", url, gitbranch] - pure True diff --git a/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs b/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs deleted file mode 100644 index a96d1b824d..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs +++ /dev/null @@ -1,272 +0,0 @@ -{-# OPTIONS_GHC -Wno-partial-type-signatures #-} - -{-# LANGUAGE GADTs #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE PartialTypeSignatures #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Codebase.Editor.HandleCommand where - -import Unison.Prelude - -import Unison.Codebase.Editor.Output -import Unison.Codebase.Editor.Command - -import qualified Unison.Builtin as B - -import qualified Crypto.Random as Random -import Control.Monad.Except ( runExceptT ) -import qualified Data.Configurator as Config -import Data.Configurator.Types ( Config ) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Data.Text as Text -import Unison.Codebase ( Codebase ) -import qualified Unison.Codebase as Codebase -import Unison.Codebase.Branch ( Branch ) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Editor.Git as Git -import Unison.Parser ( Ann ) -import qualified Unison.Parser as Parser -import qualified Unison.Parsers as Parsers -import qualified Unison.Reference as Reference -import qualified Unison.Referent as Referent -import qualified Unison.Codebase.Runtime as Runtime -import Unison.Codebase.Runtime (Runtime) -import qualified Unison.Term as Term -import qualified Unison.UnisonFile as UF -import Unison.Util.Free ( Free ) -import qualified Unison.Util.Free as Free -import Unison.Var ( Var ) -import qualified Unison.Result as Result -import Unison.FileParsers ( parseAndSynthesizeFile - , synthesizeFile' - ) -import qualified Unison.PrettyPrintEnv as PPE -import Unison.Term (Term) -import Unison.Type (Type) -import qualified Unison.Codebase.Editor.AuthorInfo as AuthorInfo - -typecheck - :: (Monad m, Var v) - => [Type v Ann] - -> Codebase m v Ann - -> Parser.ParsingEnv - -> SourceName - -> LexedSource - -> m (TypecheckingResult v) -typecheck ambient codebase parsingEnv sourceName src = - Result.getResult $ parseAndSynthesizeFile ambient - (((<> B.typeLookup) <$>) . Codebase.typeLookupForDependencies codebase) - parsingEnv - (Text.unpack sourceName) - (fst src) - -typecheck' - :: Monad m - => Var v - => [Type v Ann] - -> Codebase m v Ann - -> UF.UnisonFile v Ann - -> m (TypecheckingResult v) -typecheck' ambient codebase file = do - typeLookup <- (<> B.typeLookup) - <$> Codebase.typeLookupForDependencies codebase (UF.dependencies file) - pure . fmap Right $ synthesizeFile' ambient typeLookup file - -commandLine - :: forall i v a gen - . (Var v, Random.DRG gen) - => Config - -> IO i - -> (Branch IO -> IO ()) - -> Runtime v - -> (Output v -> IO ()) - -> (NumberedOutput v -> IO NumberedArgs) - -> (SourceName -> IO LoadSourceResult) - -> Codebase IO v Ann - -> (Int -> IO gen) - -> Branch.Cache IO - -> Free (Command IO i v) a - -> IO a -commandLine config awaitInput setBranchRef rt notifyUser notifyNumbered loadSource codebase rngGen branchCache = - Free.foldWithIndex go - where - go :: forall x . Int -> Command IO i v x -> IO x - go i x = case x of - -- Wait until we get either user input or a unison file update - Eval m -> m - Input -> awaitInput - Notify output -> notifyUser output - NotifyNumbered output -> notifyNumbered output - ConfigLookup name -> - Config.lookup config name - LoadSource sourcePath -> loadSource sourcePath - - Typecheck ambient names sourceName source -> do - -- todo: if guids are being shown to users, - -- not ideal to generate new guid every time - rng <- rngGen i - let namegen = Parser.uniqueBase32Namegen rng - env = Parser.ParsingEnv namegen names - typecheck ambient codebase env sourceName source - TypecheckFile file ambient -> typecheck' ambient codebase file - Evaluate ppe unisonFile -> evalUnisonFile ppe unisonFile - Evaluate1 ppe term -> eval1 ppe term - LoadLocalRootBranch -> either (const Branch.empty) id <$> Codebase.getRootBranch codebase - LoadLocalBranch h -> fromMaybe Branch.empty <$> Codebase.getBranchForHash codebase h - SyncLocalRootBranch branch -> do - setBranchRef branch - Codebase.putRootBranch codebase branch - ViewRemoteBranch ns -> - runExceptT $ Git.viewRemoteBranch branchCache ns - ImportRemoteBranch ns syncMode -> - runExceptT $ Git.importRemoteBranch codebase branchCache ns syncMode - SyncRemoteRootBranch repo branch syncMode -> - runExceptT $ Git.pushGitRootBranch codebase branchCache branch repo syncMode - LoadTerm r -> Codebase.getTerm codebase r - LoadType r -> Codebase.getTypeDeclaration codebase r - LoadTypeOfTerm r -> Codebase.getTypeOfTerm codebase r - PutTerm r tm tp -> Codebase.putTerm codebase r tm tp - PutDecl r decl -> Codebase.putTypeDeclaration codebase r decl - PutWatch kind r e -> Codebase.putWatch codebase kind r e - LoadWatches kind rs -> catMaybes <$> traverse go (toList rs) where - go (Reference.Builtin _) = pure Nothing - go r@(Reference.DerivedId rid) = - fmap (r,) <$> Codebase.getWatch codebase kind rid - IsTerm r -> Codebase.isTerm codebase r - IsType r -> Codebase.isType codebase r - GetDependents r -> Codebase.dependents codebase r - AddDefsToCodebase unisonFile -> Codebase.addDefsToCodebase codebase unisonFile - GetTermsOfType ty -> Codebase.termsOfType codebase ty - GetTermsMentioningType ty -> Codebase.termsMentioningType codebase ty - CodebaseHashLength -> Codebase.hashLength codebase - -- all builtin and derived type references - TypeReferencesByShortHash sh -> do - fromCodebase <- Codebase.typeReferencesByPrefix codebase sh - let fromBuiltins = Set.filter (\r -> sh == Reference.toShortHash r) - $ B.intrinsicTypeReferences - pure (fromBuiltins <> Set.map Reference.DerivedId fromCodebase) - -- all builtin and derived term references - TermReferencesByShortHash sh -> do - fromCodebase <- Codebase.termReferencesByPrefix codebase sh - let fromBuiltins = Set.filter (\r -> sh == Reference.toShortHash r) - $ B.intrinsicTermReferences - pure (fromBuiltins <> Set.map Reference.DerivedId fromCodebase) - -- all builtin and derived term references & type constructors - TermReferentsByShortHash sh -> do - fromCodebase <- Codebase.termReferentsByPrefix codebase sh - let fromBuiltins = Set.map Referent.Ref - . Set.filter (\r -> sh == Reference.toShortHash r) - $ B.intrinsicTermReferences - pure (fromBuiltins <> Set.map (fmap Reference.DerivedId) fromCodebase) - BranchHashLength -> Codebase.branchHashLength codebase - BranchHashesByPrefix h -> Codebase.branchHashesByPrefix codebase h - ParseType names (src, _) -> pure $ - Parsers.parseType (Text.unpack src) (Parser.ParsingEnv mempty names) - RuntimeMain -> pure $ Runtime.mainType rt - --- Todo b -> doTodo codebase (Branch.head b) --- Propagate b -> do --- b0 <- Codebase.propagate codebase (Branch.head b) --- pure $ Branch.append b0 b - Execute ppe uf -> void $ evalUnisonFile ppe uf - AppendToReflog reason old new -> Codebase.appendReflog codebase reason old new - LoadReflog -> Codebase.getReflog codebase - CreateAuthorInfo t -> AuthorInfo.createAuthorInfo Parser.External t - - eval1 :: PPE.PrettyPrintEnv -> Term v Ann -> _ - eval1 ppe tm = do - let codeLookup = Codebase.toCodeLookup codebase - r <- Runtime.evaluateTerm codeLookup ppe rt tm - pure $ r <&> Term.amap (const Parser.External) - - evalUnisonFile :: PPE.PrettyPrintEnv -> UF.TypecheckedUnisonFile v Ann -> _ - evalUnisonFile ppe (UF.discardTypes -> unisonFile) = do - let codeLookup = Codebase.toCodeLookup codebase - selfContained <- Codebase.makeSelfContained' codeLookup unisonFile - let watchCache (Reference.DerivedId h) = do - m1 <- Codebase.getWatch codebase UF.RegularWatch h - m2 <- maybe (Codebase.getWatch codebase UF.TestWatch h) (pure . Just) m1 - pure $ Term.amap (const ()) <$> m2 - watchCache Reference.Builtin{} = pure Nothing - r <- Runtime.evaluateWatches codeLookup ppe watchCache rt selfContained - case r of - Left e -> pure (Left e) - Right rs@(_,map) -> do - forM_ (Map.elems map) $ \(_loc, kind, hash, _src, value, isHit) -> - if isHit then pure () - else case hash of - Reference.DerivedId h -> do - let value' = Term.amap (const Parser.External) value - Codebase.putWatch codebase kind h value' - Reference.Builtin{} -> pure () - pure $ Right rs - --- doTodo :: Monad m => Codebase m v a -> Branch0 -> m (TodoOutput v a) --- doTodo code b = do --- -- traceM $ "edited terms: " ++ show (Branch.editedTerms b) --- f <- Codebase.frontier code b --- let dirty = R.dom f --- frontier = R.ran f --- ppe = Branch.prettyPrintEnv b --- (frontierTerms, frontierTypes) <- loadDefinitions code frontier --- (dirtyTerms, dirtyTypes) <- loadDefinitions code dirty --- -- todo: something more intelligent here? --- scoreFn <- pure $ const 1 --- remainingTransitive <- Codebase.frontierTransitiveDependents code b frontier --- let --- addTermNames terms = [(PPE.termName ppe (Referent.Ref r), r, t) | (r,t) <- terms ] --- addTypeNames types = [(PPE.typeName ppe r, r, d) | (r,d) <- types ] --- frontierTermsNamed = addTermNames frontierTerms --- frontierTypesNamed = addTypeNames frontierTypes --- dirtyTermsNamed = sortOn (\(s,_,_,_) -> s) $ --- [ (scoreFn r, n, r, t) | (n,r,t) <- addTermNames dirtyTerms ] --- dirtyTypesNamed = sortOn (\(s,_,_,_) -> s) $ --- [ (scoreFn r, n, r, t) | (n,r,t) <- addTypeNames dirtyTypes ] --- pure $ --- TodoOutput_ --- (Set.size remainingTransitive) --- (frontierTermsNamed, frontierTypesNamed) --- (dirtyTermsNamed, dirtyTypesNamed) --- (Branch.conflicts' b) - --- loadDefinitions :: Monad m => Codebase m v a -> Set Reference --- -> m ( [(Reference, Maybe (Type v a))], --- [(Reference, DisplayThing (Decl v a))] ) --- loadDefinitions code refs = do --- termRefs <- filterM (Codebase.isTerm code) (toList refs) --- terms <- forM termRefs $ \r -> (r,) <$> Codebase.getTypeOfTerm code r --- typeRefs <- filterM (Codebase.isType code) (toList refs) --- types <- forM typeRefs $ \r -> do --- case r of --- Reference.Builtin _ -> pure (r, BuiltinThing) --- Reference.DerivedId id -> do --- decl <- Codebase.getTypeDeclaration code id --- case decl of --- Nothing -> pure (r, MissingThing id) --- Just d -> pure (r, RegularThing d) --- pure (terms, types) --- --- -- | Write all of the builtins into the codebase --- initializeCodebase :: forall m . Monad m => Codebase m Symbol Ann -> m () --- initializeCodebase c = do --- traverse_ (go Right) B.builtinDataDecls --- traverse_ (go Left) B.builtinEffectDecls --- void $ fileToBranch updateCollisionHandler c mempty IOSource.typecheckedFile --- where --- go :: (t -> Decl Symbol Ann) -> (a, (Reference.Reference, t)) -> m () --- go f (_, (ref, decl)) = case ref of --- Reference.DerivedId id -> Codebase.putTypeDeclaration c id (f decl) --- _ -> pure () --- --- -- todo: probably don't use this anywhere --- nameDistance :: Name -> Name -> Maybe Int --- nameDistance (Name.toString -> q) (Name.toString -> n) = --- if q == n then Just 0-- exact match is top choice --- else if map toLower q == map toLower n then Just 1-- ignore case --- else if q `isSuffixOf` n then Just 2-- matching suffix is p.good --- else if q `isPrefixOf` n then Just 3-- matching prefix --- else Nothing diff --git a/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs b/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs deleted file mode 100644 index 5ebcfd754c..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs +++ /dev/null @@ -1,2898 +0,0 @@ -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE ApplicativeDo #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE GADTs #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE PartialTypeSignatures #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE BangPatterns #-} -{-# LANGUAGE EmptyCase #-} - -module Unison.Codebase.Editor.HandleInput - ( loop - , loopState0 - , LoopState(..) - , currentPath - , parseSearchType - ) -where - -import Unison.Prelude - -import Unison.Codebase.MainTerm ( getMainTerm ) -import qualified Unison.Codebase.MainTerm as MainTerm -import Unison.Codebase.Editor.Command -import Unison.Codebase.Editor.Input -import Unison.Codebase.Editor.Output -import Unison.Codebase.Editor.DisplayThing -import qualified Unison.Codebase.Editor.Output as Output -import Unison.Codebase.Editor.SlurpResult (SlurpResult(..)) -import qualified Unison.Codebase.Editor.SlurpResult as Slurp -import Unison.Codebase.Editor.SlurpComponent (SlurpComponent(..)) -import qualified Unison.Codebase.Editor.SlurpComponent as SC -import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace, printNamespace) -import qualified Unison.CommandLine.InputPattern as InputPattern -import qualified Unison.CommandLine.InputPatterns as InputPatterns - -import Control.Lens -import Control.Monad.State ( StateT ) -import Control.Monad.Except ( ExceptT(..), runExceptT, withExceptT) -import Data.Bifunctor ( second, first ) -import Data.Configurator () -import qualified Data.List as List -import Data.List ( partition ) -import Data.List.Extra ( nubOrd, sort ) -import qualified Data.Map as Map -import qualified Data.Text as Text -import qualified Text.Megaparsec as P -import qualified Data.Set as Set -import Data.Sequence ( Seq(..) ) -import qualified Unison.ABT as ABT -import qualified Unison.Codebase.BranchDiff as BranchDiff -import qualified Unison.Codebase.Editor.Output.BranchDiff as OBranchDiff -import Unison.Codebase.Branch ( Branch(..) - , Branch0(..) - ) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.BranchUtil as BranchUtil -import qualified Unison.Codebase.Causal as Causal -import qualified Unison.Codebase.Metadata as Metadata -import Unison.Codebase.Patch ( Patch(..) ) -import qualified Unison.Codebase.Patch as Patch -import Unison.Codebase.Path ( Path - , Path'(..) ) -import qualified Unison.Codebase.Path as Path -import qualified Unison.Codebase.Reflog as Reflog -import Unison.Codebase.SearchResult ( SearchResult ) -import qualified Unison.Codebase.SearchResult as SR -import qualified Unison.Codebase.ShortBranchHash as SBH -import qualified Unison.Codebase.SyncMode as SyncMode -import qualified Unison.Builtin.Decls as DD -import qualified Unison.DataDeclaration as DD -import qualified Unison.HashQualified as HQ -import qualified Unison.HashQualified' as HQ' -import qualified Unison.Name as Name -import Unison.Name ( Name ) -import Unison.Names3 ( Names(..), Names0 - , pattern Names0 ) -import qualified Unison.Names2 as Names -import qualified Unison.Names3 as Names3 -import Unison.Parser ( Ann(..) ) -import Unison.Reference ( Reference(..) ) -import qualified Unison.Reference as Reference -import Unison.Referent ( Referent ) -import qualified Unison.Referent as Referent -import Unison.Result ( pattern Result ) -import qualified Unison.ShortHash as SH -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import qualified Unison.Result as Result -import qualified Unison.UnisonFile as UF -import qualified Unison.Util.Find as Find -import Unison.Util.Free ( Free ) -import qualified Unison.Util.Free as Free -import Unison.Util.List ( uniqueBy ) -import qualified Unison.Util.Relation as R -import qualified Unison.Util.Relation4 as R4 -import Unison.Util.Timing (unsafeTime) -import Unison.Util.TransitiveClosure (transitiveClosure) -import Unison.Var ( Var ) -import qualified Unison.Var as Var -import qualified Unison.Codebase.TypeEdit as TypeEdit -import Unison.Codebase.TermEdit (TermEdit(..)) -import qualified Unison.Codebase.TermEdit as TermEdit -import qualified Unison.Typechecker as Typechecker -import qualified Unison.PrettyPrintEnv as PPE -import Unison.Runtime.IOSource ( isTest ) -import qualified Unison.Runtime.IOSource as IOSource -import qualified Unison.Util.Star3 as Star3 -import qualified Unison.Util.Monoid as Monoid -import Unison.UnisonFile (TypecheckedUnisonFile) -import qualified Unison.Codebase.Editor.TodoOutput as TO -import qualified Unison.Lexer as L -import Unison.Codebase.Editor.SearchResult' (SearchResult') -import qualified Unison.Codebase.Editor.SearchResult' as SR' -import qualified Unison.LabeledDependency as LD -import Unison.LabeledDependency (LabeledDependency) -import Unison.Term (Term) -import Unison.Type (Type) -import qualified Unison.Builtin as Builtin -import Unison.NameSegment (NameSegment(..)) -import qualified Unison.NameSegment as NameSegment -import Unison.Codebase.ShortBranchHash (ShortBranchHash) -import qualified Unison.Codebase.Editor.Propagate as Propagate -import qualified Unison.Codebase.Editor.UriParser as UriParser -import Data.Tuple.Extra (uncurry3) -import qualified Unison.CommandLine.DisplayValues as DisplayValues -import qualified Control.Error.Util as ErrorUtil -import Unison.Util.Monoid (intercalateMap) -import Data.List.NonEmpty (NonEmpty) -import qualified Data.List.NonEmpty as Nel -import Unison.Codebase.Editor.AuthorInfo (AuthorInfo(..)) - -type F m i v = Free (Command m i v) - --- type (Action m i v) a -type Action m i v = MaybeT (StateT (LoopState m v) (F m i v)) - -_liftToAction :: m a -> Action m i v a -_liftToAction = lift . lift . Free.eval . Eval - -data LoopState m v - = LoopState - { _root :: Branch m - , _lastSavedRoot :: Branch m - -- the current position in the namespace - , _currentPathStack :: NonEmpty Path.Absolute - - -- TBD - -- , _activeEdits :: Set Branch.EditGuid - - -- The file name last modified, and whether to skip the next file - -- change event for that path (we skip file changes if the file has - -- just been modified programmatically) - , _latestFile :: Maybe (FilePath, SkipNextUpdate) - , _latestTypecheckedFile :: Maybe (UF.TypecheckedUnisonFile v Ann) - - -- The previous user input. Used to request confirmation of - -- questionable user commands. - , _lastInput :: Maybe Input - - -- A 1-indexed list of strings that can be referenced by index at the - -- CLI prompt. e.g. Given ["Foo.bat", "Foo.cat"], - -- `rename 2 Foo.foo` will rename `Foo.cat` to `Foo.foo`. - , _numberedArgs :: NumberedArgs - } - -type SkipNextUpdate = Bool -type InputDescription = Text - -makeLenses ''LoopState - --- replacing the old read/write scalar Lens with "peek" Getter for the NonEmpty -currentPath :: Getter (LoopState m v) Path.Absolute -currentPath = currentPathStack . to Nel.head - -loopState0 :: Branch m -> Path.Absolute -> LoopState m v -loopState0 b p = LoopState b b (pure p) Nothing Nothing Nothing [] - -type Action' m v = Action m (Either Event Input) v - -defaultPatchNameSegment :: NameSegment -defaultPatchNameSegment = "patch" - -loop :: forall m v . (Monad m, Var v) => Action m (Either Event Input) v () -loop = do - uf <- use latestTypecheckedFile - root' <- use root - currentPath' <- use currentPath - latestFile' <- use latestFile - currentBranch' <- getAt currentPath' - e <- eval Input - hqLength <- eval CodebaseHashLength - sbhLength <- eval BranchHashLength - let - sbh = SBH.fromHash sbhLength - root0 = Branch.head root' - currentBranch0 = Branch.head currentBranch' - defaultPatchPath :: PatchPath - defaultPatchPath = (Path' $ Left currentPath', defaultPatchNameSegment) - resolveSplit' :: (Path', a) -> (Path, a) - resolveSplit' = Path.fromAbsoluteSplit . Path.toAbsoluteSplit currentPath' - resolveToAbsolute :: Path' -> Path.Absolute - resolveToAbsolute = Path.resolve currentPath' - getAtSplit :: Path.Split -> Maybe (Branch m) - getAtSplit p = BranchUtil.getBranch p root0 - getAtSplit' :: Path.Split' -> Maybe (Branch m) - getAtSplit' = getAtSplit . resolveSplit' - getPatchAtSplit' :: Path.Split' -> Action' m v (Maybe Patch) - getPatchAtSplit' s = do - let (p, seg) = Path.toAbsoluteSplit currentPath' s - b <- getAt p - eval . Eval $ Branch.getMaybePatch seg (Branch.head b) - getHQ'TermsIncludingHistorical p = - getTermsIncludingHistorical (resolveSplit' p) root0 - - getHQ'Terms :: Path.HQSplit' -> Set Referent - getHQ'Terms p = BranchUtil.getTerm (resolveSplit' p) root0 - getHQ'Types :: Path.HQSplit' -> Set Reference - getHQ'Types p = BranchUtil.getType (resolveSplit' p) root0 - getHQTerms :: HQ.HashQualified -> Action' m v (Set Referent) - getHQTerms hq = case hq of - HQ.NameOnly n -> let - -- absolute-ify the name, then lookup in deepTerms of root - path :: Path.Path' - path = Path.fromName' n - Path.Absolute absPath = resolveToAbsolute path - in pure $ R.lookupRan (Path.toName absPath) (Branch.deepTerms root0) - HQ.HashOnly sh -> hashOnly sh - HQ.HashQualified _ sh -> hashOnly sh - where - hashOnly sh = eval $ TermReferentsByShortHash sh - - resolveHHQS'Types :: HashOrHQSplit' -> Action' m v (Set Reference) - resolveHHQS'Types = either - (eval . TypeReferencesByShortHash) - (pure . getHQ'Types) - -- Term Refs and Cons - resolveHHQS'Referents = either - (eval . TermReferentsByShortHash) - (pure . getHQ'Terms) - getTypes :: Path.Split' -> Set Reference - getTypes = getHQ'Types . fmap HQ'.NameOnly - getTerms :: Path.Split' -> Set Referent - getTerms = getHQ'Terms . fmap HQ'.NameOnly - getPatchAt :: Path.Split' -> Action' m v Patch - getPatchAt patchPath' = do - let (p, seg) = Path.toAbsoluteSplit currentPath' patchPath' - b <- getAt p - eval . Eval $ Branch.getPatch seg (Branch.head b) - withFile ambient sourceName lexed@(text, tokens) k = do - let - getHQ = \case - L.Backticks s (Just sh) -> - Just (HQ.HashQualified (Name.unsafeFromString s) sh) - L.WordyId s (Just sh) -> - Just (HQ.HashQualified (Name.unsafeFromString s) sh) - L.SymbolyId s (Just sh) -> - Just (HQ.HashQualified (Name.unsafeFromString s) sh) - L.Hash sh -> Just (HQ.HashOnly sh) - _ -> Nothing - hqs = Set.fromList . mapMaybe (getHQ . L.payload) $ tokens - parseNames :: Names <- makeHistoricalParsingNames hqs - latestFile .= Just (Text.unpack sourceName, False) - latestTypecheckedFile .= Nothing - Result notes r <- eval $ Typecheck ambient parseNames sourceName lexed - case r of - -- Parsing failed - Nothing -> respond $ - ParseErrors text [ err | Result.Parsing err <- toList notes ] - Just (Left errNames) -> do - ppe <- prettyPrintEnv =<< makeShadowedPrintNamesFromHQ hqs errNames - respond $ - TypeErrors text ppe [ err | Result.TypeError err <- toList notes ] - Just (Right uf) -> k uf - loadUnisonFile sourceName text = do - let lexed = L.lexer (Text.unpack sourceName) (Text.unpack text) - withFile [] sourceName (text, lexed) $ \unisonFile -> do - sr <- toSlurpResult currentPath' unisonFile <$> slurpResultNames0 - names <- makeShadowedPrintNamesFromLabeled - (UF.termSignatureExternalLabeledDependencies unisonFile) - (UF.typecheckedToNames0 unisonFile) - ppe <- PPE.suffixifiedPPE <$> prettyPrintEnvDecl names - eval . Notify $ Typechecked sourceName ppe sr unisonFile - unlessError' EvaluationFailure do - (bindings, e) <- ExceptT . eval . Evaluate ppe $ unisonFile - lift do - let e' = Map.map go e - go (ann, kind, _hash, _uneval, eval, isHit) = (ann, kind, eval, isHit) - unless (null e') $ - eval . Notify $ Evaluated text ppe bindings e' - latestTypecheckedFile .= Just unisonFile - - case e of - Left (IncomingRootBranch hashes) -> - eval . Notify $ WarnIncomingRootBranch - (SBH.fromHash sbhLength $ Branch.headHash root') - (Set.map (SBH.fromHash sbhLength) hashes) - Left (UnisonFileChanged sourceName text) -> - -- We skip this update if it was programmatically generated - if maybe False snd latestFile' - then modifying latestFile (fmap (const False) <$>) - else loadUnisonFile sourceName text - Right input -> - let - ifConfirmed = ifM (confirmedCommand input) - branchNotFound = respond . BranchNotFound - branchNotFound' = respond . BranchNotFound . Path.unsplit' - patchNotFound :: Path.Split' -> Action' m v () - patchNotFound s = respond $ PatchNotFound s - patchExists :: Path.Split' -> Action' m v () - patchExists s = respond $ PatchAlreadyExists s - typeNotFound = respond . TypeNotFound - typeNotFound' = respond . TypeNotFound' - termNotFound = respond . TermNotFound - termNotFound' = respond . TermNotFound' - nameConflicted src tms tys = respond (DeleteNameAmbiguous hqLength src tms tys) - typeConflicted src = nameConflicted src Set.empty - termConflicted src tms = nameConflicted src tms Set.empty - hashConflicted src = respond . HashAmbiguous src - hqNameQuery' doSuffixify hqs = do - let (hqnames, hashes) = partition (isJust . HQ.toName) hqs - termRefs <- filter (not . Set.null . snd) . zip hashes <$> traverse - (eval . TermReferentsByShortHash) - (catMaybes (HQ.toHash <$> hashes)) - typeRefs <- filter (not . Set.null . snd) . zip hashes <$> traverse - (eval . TypeReferencesByShortHash) - (catMaybes (HQ.toHash <$> hashes)) - parseNames0 <- makeHistoricalParsingNames $ Set.fromList hqnames - let - mkTermResult n r = SR.termResult (HQ'.fromHQ' n) r Set.empty - mkTypeResult n r = SR.typeResult (HQ'.fromHQ' n) r Set.empty - termResults = - (\(n, tms) -> (n, toList $ mkTermResult n <$> toList tms)) <$> termRefs - typeResults = - (\(n, tps) -> (n, toList $ mkTypeResult n <$> toList tps)) <$> typeRefs - parseNames = (if doSuffixify then Names3.suffixify else id) parseNames0 - resultss = searchBranchExact hqLength parseNames hqnames - missingRefs = - [ x - | x <- hashes - , isNothing (lookup x termRefs) && isNothing (lookup x typeRefs) - ] - (misses, hits) = - partition (\(_, results) -> null results) (zip hqs resultss) - results = - List.sort - . uniqueBy SR.toReferent - $ (hits ++ termResults ++ typeResults) - >>= snd - pure (missingRefs ++ (fst <$> misses), results) - hqNameQuery = hqNameQuery' False - hqNameQuerySuffixify = hqNameQuery' True - typeReferences :: [SearchResult] -> [Reference] - typeReferences rs - = [ r | SR.Tp (SR.TypeResult _ r _) <- rs ] - termReferences :: [SearchResult] -> [Reference] - termReferences rs = - [ r | SR.Tm (SR.TermResult _ (Referent.Ref r) _) <- rs ] - termResults rs = [ r | SR.Tm r <- rs ] - typeResults rs = [ r | SR.Tp r <- rs ] - doRemoveReplacement from patchPath isTerm = do - let patchPath' = fromMaybe defaultPatchPath patchPath - patch <- getPatchAt patchPath' - (misses', hits) <- hqNameQuery [from] - let tpRefs = Set.fromList $ typeReferences hits - tmRefs = Set.fromList $ termReferences hits - tmMisses = misses' - <> (HQ'.toHQ . SR.termName <$> termResults hits) - tpMisses = misses' - <> (HQ'.toHQ . SR.typeName <$> typeResults hits) - misses = if isTerm then tpMisses else tmMisses - go :: Reference -> Action m (Either Event Input) v () - go fr = do - let termPatch = - over Patch.termEdits (R.deleteDom fr) patch - typePatch = - over Patch.typeEdits (R.deleteDom fr) patch - (patchPath'', patchName) = resolveSplit' patchPath' - -- Save the modified patch - stepAtM inputDescription - (patchPath'', - Branch.modifyPatches - patchName - (const (if isTerm then termPatch else typePatch))) - -- Say something - success - unless (null misses) $ - respond $ SearchTermsNotFound misses - traverse_ go (if isTerm then tmRefs else tpRefs) - branchExists dest _x = respond $ BranchAlreadyExists dest - branchExistsSplit = branchExists . Path.unsplit' - typeExists dest = respond . TypeAlreadyExists dest - termExists dest = respond . TermAlreadyExists dest - -- | try to get these as close as possible to the command that caused the change - inputDescription :: InputDescription - inputDescription = case input of - ForkLocalBranchI src dest -> "fork " <> hp' src <> " " <> p' dest - MergeLocalBranchI src dest mode -> case mode of - Branch.RegularMerge -> "merge " <> p' src <> " " <> p' dest - Branch.SquashMerge -> "merge.squash " <> p' src <> " " <> p' dest - ResetRootI src -> "reset-root " <> hp' src - AliasTermI src dest -> "alias.term " <> hhqs' src <> " " <> ps' dest - AliasTypeI src dest -> "alias.type " <> hhqs' src <> " " <> ps' dest - AliasManyI srcs dest -> - "alias.many " <> intercalateMap " " hqs srcs <> " " <> p' dest - MoveTermI src dest -> "move.term " <> hqs' src <> " " <> ps' dest - MoveTypeI src dest -> "move.type " <> hqs' src <> " " <> ps' dest - MoveBranchI src dest -> "move.namespace " <> ops' src <> " " <> ps' dest - MovePatchI src dest -> "move.patch " <> ps' src <> " " <> ps' dest - CopyPatchI src dest -> "copy.patch " <> ps' src <> " " <> ps' dest - DeleteI thing -> "delete " <> hqs' thing - DeleteTermI def -> "delete.term " <> hqs' def - DeleteTypeI def -> "delete.type " <> hqs' def - DeleteBranchI opath -> "delete.namespace " <> ops' opath - DeletePatchI path -> "delete.patch " <> ps' path - ReplaceTermI src target p -> - "replace.term " <> HQ.toText src <> " " - <> HQ.toText target <> " " - <> opatch p - ReplaceTypeI src target p -> - "replace.type " <> HQ.toText src <> " " - <> HQ.toText target <> " " - <> opatch p - ResolveTermNameI path -> "resolve.termName " <> hqs' path - ResolveTypeNameI path -> "resolve.typeName " <> hqs' path - AddI _selection -> "add" - UpdateI p _selection -> "update " <> opatch p - PropagatePatchI p scope -> "patch " <> ps' p <> " " <> p' scope - UndoI{} -> "undo" - ExecuteI s -> "execute " <> Text.pack s - LinkI md defs -> - "link " <> HQ.toText md <> " " <> intercalateMap " " hqs' defs - UnlinkI md defs -> - "unlink " <> HQ.toText md <> " " <> intercalateMap " " hqs' defs - UpdateBuiltinsI -> "builtins.update" - MergeBuiltinsI -> "builtins.merge" - MergeIOBuiltinsI -> "builtins.mergeio" - PullRemoteBranchI orepo dest _syncMode -> - (Text.pack . InputPattern.patternName - $ InputPatterns.patternFromInput input) - <> " " - -- todo: show the actual config-loaded namespace - <> maybe "(remote namespace from .unisonConfig)" - (uncurry3 printNamespace) orepo - <> " " - <> p' dest - LoadI{} -> wat - PreviewAddI{} -> wat - PreviewUpdateI{} -> wat - CreateAuthorI (NameSegment id) name -> "create.author " <> id <> " " <> name - CreatePullRequestI{} -> wat - LoadPullRequestI base head dest -> - "pr.load " - <> uncurry3 printNamespace base - <> " " - <> uncurry3 printNamespace head - <> " " - <> p' dest - PushRemoteBranchI{} -> wat - PreviewMergeLocalBranchI{} -> wat - DiffNamespaceI{} -> wat - SwitchBranchI{} -> wat - PopBranchI{} -> wat - NamesI{} -> wat - TodoI{} -> wat - ListEditsI{} -> wat - ListDependenciesI{} -> wat - ListDependentsI{} -> wat - HistoryI{} -> wat - TestI{} -> wat - LinksI{} -> wat - SearchByNameI{} -> wat - FindShallowI{} -> wat - FindPatchI{} -> wat - ShowDefinitionI{} -> wat - DisplayI{} -> wat - DocsI{} -> wat - ShowDefinitionByPrefixI{} -> wat - ShowReflogI{} -> wat - DebugNumberedArgsI{} -> wat - DebugBranchHistoryI{} -> wat - DebugTypecheckedUnisonFileI{} -> wat - QuitI{} -> wat - DeprecateTermI{} -> undefined - DeprecateTypeI{} -> undefined - RemoveTermReplacementI src p -> - "delete.term-replacement" <> HQ.toText src <> " " <> opatch p - RemoveTypeReplacementI src p -> - "delete.type-replacement" <> HQ.toText src <> " " <> opatch p - where - hp' = either (Text.pack . show) p' - p' = Text.pack . show . resolveToAbsolute - ops' = maybe "." ps' - opatch = ps' . fromMaybe defaultPatchPath - wat = error $ show input ++ " is not expected to alter the branch" - hhqs' (Left sh) = SH.toText sh - hhqs' (Right x) = hqs' x - hqs' (p, hq) = - Monoid.unlessM (Path.isRoot' p) (p' p) <> "." <> Text.pack (show hq) - hqs (p, hq) = hqs' (Path' . Right . Path.Relative $ p, hq) - ps' = p' . Path.unsplit' - stepAt = Unison.Codebase.Editor.HandleInput.stepAt inputDescription - stepManyAt = Unison.Codebase.Editor.HandleInput.stepManyAt inputDescription - stepManyAtNoSync = - Unison.Codebase.Editor.HandleInput.stepManyAtNoSync - updateRoot = flip Unison.Codebase.Editor.HandleInput.updateRoot inputDescription - syncRoot = use root >>= updateRoot - updateAtM = Unison.Codebase.Editor.HandleInput.updateAtM inputDescription - unlessGitError = unlessError' (Output.GitError input) - importRemoteBranch ns mode = ExceptT . eval $ ImportRemoteBranch ns mode - viewRemoteBranch ns = ExceptT . eval $ ViewRemoteBranch ns - syncRemoteRootBranch repo b mode = - ExceptT . eval $ SyncRemoteRootBranch repo b mode - handleFailedDelete failed failedDependents = do - failed <- loadSearchResults $ SR.fromNames failed - failedDependents <- loadSearchResults $ SR.fromNames failedDependents - ppe <- prettyPrintEnv =<< makePrintNamesFromLabeled' - (foldMap SR'.labeledDependencies $ failed <> failedDependents) - respond $ CantDelete ppe failed failedDependents - saveAndApplyPatch patchPath'' patchName patch' = do - stepAtM (inputDescription <> " (1/2)") - (patchPath'', - Branch.modifyPatches patchName (const patch')) - -- Apply the modified patch to the current path - -- since we might be able to propagate further. - void $ propagatePatch inputDescription patch' currentPath' - -- Say something - success - previewResponse sourceName sr uf = do - names <- makeShadowedPrintNamesFromLabeled - (UF.termSignatureExternalLabeledDependencies uf) - (UF.typecheckedToNames0 uf) - ppe <- PPE.suffixifiedPPE <$> prettyPrintEnvDecl names - respond $ Typechecked (Text.pack sourceName) ppe sr uf - - addDefaultMetadata - :: SlurpComponent v - -> Action m (Either Event Input) v () - addDefaultMetadata adds = do - let addedVs = Set.toList $ SC.types adds <> SC.terms adds - addedNs = traverse (Path.hqSplitFromName' . Name.fromVar) addedVs - case addedNs of - Nothing -> - error $ "I couldn't parse a name I just added to the codebase! " - <> "-- Added names: " <> show addedVs - Just addedNames -> do - dm <- resolveDefaultMetadata currentPath' - case toList dm of - [] -> pure () - dm' -> do - let hqs = traverse InputPatterns.parseHashQualifiedName dm' - case hqs of - Left e -> respond $ ConfiguredMetadataParseError - (Path.absoluteToPath' currentPath') - (show dm') - e - Right defaultMeta -> - manageLinks True addedNames defaultMeta Metadata.insert - - -- Add/remove links between definitions and metadata. - -- `silent` controls whether this produces any output to the user. - -- `srcs` is (names of the) definitions to pass to `op` - -- `mdValues` is (names of the) metadata to pass to `op` - -- `op` is the operation to add/remove/alter metadata mappings. - -- e.g. `Metadata.insert` is passed to add metadata links. - manageLinks :: Bool - -> [(Path', HQ'.HQSegment)] - -> [HQ.HashQualified] - -> (forall r. Ord r - => (r, Metadata.Type, Metadata.Value) - -> Branch.Star r NameSegment - -> Branch.Star r NameSegment) - -> Action m (Either Event Input) v () - manageLinks silent srcs mdValues op = do - mdValuels <- fmap (first toList) <$> - traverse (\x -> fmap (,x) (getHQTerms x)) mdValues - before <- Branch.head <$> use root - traverse_ go mdValuels - after <- Branch.head <$> use root - (ppe, outputDiff) <- diffHelper before after - if not silent then - if OBranchDiff.isEmpty outputDiff - then respond NoOp - else respondNumbered $ ShowDiffNamespace Path.absoluteEmpty - Path.absoluteEmpty - ppe - outputDiff - else unless (OBranchDiff.isEmpty outputDiff) $ - respond DefaultMetadataNotification - where - go (mdl, hqn) = do - newRoot <- use root - let r0 = Branch.head newRoot - getTerms p = BranchUtil.getTerm (resolveSplit' p) r0 - getTypes p = BranchUtil.getType (resolveSplit' p) r0 - !srcle = toList . getTerms =<< srcs - !srclt = toList . getTypes =<< srcs - names0 <- basicPrettyPrintNames0 - ppe <- PPE.suffixifiedPPE <$> prettyPrintEnvDecl (Names names0 mempty) - case mdl of - [r@(Referent.Ref mdValue)] -> do - mdType <- eval $ LoadTypeOfTerm mdValue - case mdType of - Nothing -> respond $ MetadataMissingType ppe r - Just ty -> do - let steps = - bimap (Path.unabsolute . resolveToAbsolute) - (const . step $ Type.toReference ty) - <$> srcs - stepManyAtNoSync steps - where - step mdType b0 = - let tmUpdates terms = foldl' go terms srcle - where go terms src = op (src, mdType, mdValue) terms - tyUpdates types = foldl' go types srclt - where go types src = op (src, mdType, mdValue) types - in over Branch.terms tmUpdates . over Branch.types tyUpdates $ b0 - mdValues -> respond $ MetadataAmbiguous hqn ppe mdValues - delete - :: (Path.HQSplit' -> Set Referent) -- compute matching terms - -> (Path.HQSplit' -> Set Reference) -- compute matching types - -> Path.HQSplit' - -> Action' m v () - delete getHQ'Terms getHQ'Types hq = do - let matchingTerms = toList (getHQ'Terms hq) - let matchingTypes = toList (getHQ'Types hq) - case (matchingTerms, matchingTypes) of - ([], []) -> respond (NameNotFound hq) - (Set.fromList -> tms, Set.fromList -> tys) -> goMany tms tys - where - resolvedPath = resolveSplit' (HQ'.toName <$> hq) - goMany tms tys = do - let rootNames = Branch.toNames0 root0 - name = Path.toName (Path.unsplit resolvedPath) - toRel :: Ord ref => Set ref -> R.Relation Name ref - toRel = R.fromList . fmap (name,) . toList - -- these names are relative to the root - toDelete = Names0 (toRel tms) (toRel tys) - (failed, failedDependents) <- - getEndangeredDependents (eval . GetDependents) toDelete rootNames - if failed == mempty then do - let makeDeleteTermNames = fmap (BranchUtil.makeDeleteTermName resolvedPath) . toList $ tms - let makeDeleteTypeNames = fmap (BranchUtil.makeDeleteTypeName resolvedPath) . toList $ tys - stepManyAt (makeDeleteTermNames ++ makeDeleteTypeNames) - root'' <- use root - diffHelper (Branch.head root') (Branch.head root'') >>= - respondNumbered . uncurry ShowDiffAfterDeleteDefinitions - else handleFailedDelete failed failedDependents - in case input of - ShowReflogI -> do - entries <- convertEntries Nothing [] <$> eval LoadReflog - numberedArgs .= - fmap (('#':) . SBH.toString . Output.hash) entries - respond $ ShowReflog entries - where - -- reverses & formats entries, adds synthetic entries when there is a - -- discontinuity in the reflog. - convertEntries :: Maybe Branch.Hash - -> [Output.ReflogEntry] - -> [Reflog.Entry] - -> [Output.ReflogEntry] - convertEntries _ acc [] = acc - convertEntries Nothing acc entries@(Reflog.Entry old _ _ : _) = - convertEntries - (Just old) - (Output.ReflogEntry (SBH.fromHash sbhLength old) "(initial reflogged namespace)" : acc) - entries - convertEntries (Just lastHash) acc entries@(Reflog.Entry old new reason : rest) = - if lastHash /= old then - convertEntries - (Just old) - (Output.ReflogEntry (SBH.fromHash sbhLength old) "(external change)" : acc) - entries - else - convertEntries - (Just new) - (Output.ReflogEntry (SBH.fromHash sbhLength new) reason : acc) - rest - - ResetRootI src0 -> - case src0 of - Left hash -> unlessError do - newRoot <- resolveShortBranchHash hash - lift do - updateRoot newRoot - success - Right path' -> do - newRoot <- getAt $ resolveToAbsolute path' - if Branch.isEmpty newRoot then respond $ BranchNotFound path' - else do - updateRoot newRoot - success - ForkLocalBranchI src0 dest0 -> do - let tryUpdateDest srcb dest0 = do - let dest = resolveToAbsolute dest0 - -- if dest isn't empty: leave dest unchanged, and complain. - destb <- getAt dest - if Branch.isEmpty destb then do - ok <- updateAtM dest (const $ pure srcb) - if ok then success else respond $ BranchEmpty src0 - else respond $ BranchAlreadyExists dest0 - case src0 of - Left hash -> unlessError do - srcb <- resolveShortBranchHash hash - lift $ tryUpdateDest srcb dest0 - Right path' -> do - srcb <- getAt $ resolveToAbsolute path' - if Branch.isEmpty srcb then respond $ BranchNotFound path' - else tryUpdateDest srcb dest0 - MergeLocalBranchI src0 dest0 mergeMode -> do - let [src, dest] = resolveToAbsolute <$> [src0, dest0] - srcb <- getAt src - if Branch.isEmpty srcb then branchNotFound src0 - else do - let err = Just $ MergeAlreadyUpToDate src0 dest0 - mergeBranchAndPropagateDefaultPatch mergeMode inputDescription err srcb (Just dest0) dest - - PreviewMergeLocalBranchI src0 dest0 -> do - let [src, dest] = resolveToAbsolute <$> [src0, dest0] - srcb <- getAt src - if Branch.isEmpty srcb then branchNotFound src0 - else do - destb <- getAt dest - merged <- eval . Eval $ Branch.merge srcb destb - if merged == destb - then respond (PreviewMergeAlreadyUpToDate src0 dest0) - else - diffHelper (Branch.head destb) (Branch.head merged) >>= - respondNumbered . uncurry (ShowDiffAfterMergePreview dest0 dest) - - DiffNamespaceI before0 after0 -> do - let [beforep, afterp] = - resolveToAbsolute <$> [before0, after0] - before <- Branch.head <$> getAt beforep - after <- Branch.head <$> getAt afterp - (ppe, outputDiff) <- diffHelper before after - respondNumbered $ ShowDiffNamespace beforep afterp ppe outputDiff - - CreatePullRequestI baseRepo headRepo -> unlessGitError do - baseBranch <- viewRemoteBranch baseRepo - headBranch <- viewRemoteBranch headRepo - lift do - merged <- eval . Eval $ Branch.merge baseBranch headBranch - (ppe, diff) <- diffHelper (Branch.head baseBranch) (Branch.head merged) - respondNumbered $ ShowDiffAfterCreatePR baseRepo headRepo ppe diff - - LoadPullRequestI baseRepo headRepo dest0 -> do - let desta = resolveToAbsolute dest0 - let dest = Path.unabsolute desta - destb <- getAt desta - if Branch.isEmpty0 (Branch.head destb) then unlessGitError do - baseb <- importRemoteBranch baseRepo SyncMode.ShortCircuit - headb <- importRemoteBranch headRepo SyncMode.ShortCircuit - lift $ do - mergedb <- eval . Eval $ Branch.merge baseb headb - squashedb <- eval . Eval $ Branch.merge' Branch.SquashMerge headb baseb - stepManyAt - [BranchUtil.makeSetBranch (dest, "base") baseb - ,BranchUtil.makeSetBranch (dest, "head") headb - ,BranchUtil.makeSetBranch (dest, "merged") mergedb - ,BranchUtil.makeSetBranch (dest, "squashed") squashedb] - let base = snoc dest0 "base" - head = snoc dest0 "head" - merged = snoc dest0 "merged" - squashed = snoc dest0 "squashed" - respond $ LoadPullRequest baseRepo headRepo base head merged squashed - loadPropagateDiffDefaultPatch - inputDescription - (Just merged) - (snoc desta "merged") - else - respond . BranchNotEmpty . Path.Path' . Left $ currentPath' - - - -- move the root to a sub-branch - MoveBranchI Nothing dest -> do - b <- use root - stepManyAt [ (Path.empty, const Branch.empty0) - , BranchUtil.makeSetBranch (resolveSplit' dest) b ] - success - - MoveBranchI (Just src) dest -> - maybe (branchNotFound' src) srcOk (getAtSplit' src) - where - srcOk b = maybe (destOk b) (branchExistsSplit dest) (getAtSplit' dest) - destOk b = do - stepManyAt - [ BranchUtil.makeSetBranch (resolveSplit' src) Branch.empty - , BranchUtil.makeSetBranch (resolveSplit' dest) b ] - success -- could give rando stats about new defns - - MovePatchI src dest -> do - psrc <- getPatchAtSplit' src - pdest <- getPatchAtSplit' dest - case (psrc, pdest) of - (Nothing, _) -> patchNotFound src - (_, Just _) -> patchExists dest - (Just p, Nothing) -> do - stepManyAt [ - BranchUtil.makeDeletePatch (resolveSplit' src), - BranchUtil.makeReplacePatch (resolveSplit' dest) p ] - success - - CopyPatchI src dest -> do - psrc <- getPatchAtSplit' src - pdest <- getPatchAtSplit' dest - case (psrc, pdest) of - (Nothing, _) -> patchNotFound src - (_, Just _) -> patchExists dest - (Just p, Nothing) -> do - stepAt (BranchUtil.makeReplacePatch (resolveSplit' dest) p) - success - - DeletePatchI src -> do - psrc <- getPatchAtSplit' src - case psrc of - Nothing -> patchNotFound src - Just _ -> do - stepAt (BranchUtil.makeDeletePatch (resolveSplit' src)) - success - - DeleteBranchI Nothing -> - ifConfirmed - (do - stepAt (Path.empty, const Branch.empty0) - respond DeletedEverything) - (respond DeleteEverythingConfirmation) - - DeleteBranchI (Just p) -> - maybe (branchNotFound' p) go $ getAtSplit' p - where - go (Branch.head -> b) = do - (failed, failedDependents) <- - let rootNames = Branch.toNames0 root0 - toDelete = Names.prefix0 - (Path.toName . Path.unsplit . resolveSplit' $ p) -- resolveSplit' incorporates currentPath - (Branch.toNames0 b) - in getEndangeredDependents (eval . GetDependents) toDelete rootNames - if failed == mempty then do - stepAt $ BranchUtil.makeSetBranch (resolveSplit' p) Branch.empty - -- Looks similar to the 'toDelete' above... investigate me! ;) - diffHelper b Branch.empty0 >>= - respondNumbered - . uncurry (ShowDiffAfterDeleteBranch - $ resolveToAbsolute (Path.unsplit' p)) - else handleFailedDelete failed failedDependents - SwitchBranchI path' -> do - let path = resolveToAbsolute path' - currentPathStack %= Nel.cons path - branch' <- getAt path - when (Branch.isEmpty branch') (respond $ CreatedNewBranch path) - - PopBranchI -> use (currentPathStack . to Nel.uncons) >>= \case - (_, Nothing) -> respond StartOfCurrentPathHistory - (_, Just t) -> currentPathStack .= t - - HistoryI resultsCap diffCap from -> case from of - Left hash -> unlessError do - b <- resolveShortBranchHash hash - lift $ doHistory 0 b [] - Right path' -> do - let path = resolveToAbsolute path' - branch' <- getAt path - if Branch.isEmpty branch' then respond $ CreatedNewBranch path - else doHistory 0 branch' [] - where - doHistory !n b acc = - if maybe False (n >=) resultsCap then - respond $ History diffCap acc (PageEnd (sbh $ Branch.headHash b) n) - else case Branch._history b of - Causal.One{} -> - respond $ History diffCap acc (EndOfLog . sbh $ Branch.headHash b) - Causal.Merge{..} -> - respond $ History diffCap acc (MergeTail (sbh $ Branch.headHash b) . map sbh $ Map.keys tails) - Causal.Cons{..} -> do - b' <- fmap Branch.Branch . eval . Eval $ snd tail - let elem = (sbh $ Branch.headHash b, Branch.namesDiff b' b) - doHistory (n+1) b' (elem : acc) - - UndoI -> do - prev <- eval . Eval $ Branch.uncons root' - case prev of - Nothing -> - respond . CantUndo $ if Branch.isOne root' then CantUndoPastStart - else CantUndoPastMerge - Just (_, prev) -> do - updateRoot prev - diffHelper (Branch.head prev) (Branch.head root') >>= - respondNumbered . uncurry Output.ShowDiffAfterUndo - - AliasTermI src dest -> do - referents <- resolveHHQS'Referents src - case (toList referents, toList (getTerms dest)) of - ([r], []) -> do - stepAt (BranchUtil.makeAddTermName (resolveSplit' dest) r (oldMD r)) - success - ([_], rs@(_:_)) -> termExists dest (Set.fromList rs) - ([], _) -> either termNotFound' termNotFound src - (rs, _) -> - either hashConflicted termConflicted src (Set.fromList rs) - where - oldMD r = either (const mempty) - (\src -> - let p = resolveSplit' src in - BranchUtil.getTermMetadataAt p r root0) - src - - AliasTypeI src dest -> do - refs <- resolveHHQS'Types src - case (toList refs, toList (getTypes dest)) of - ([r], []) -> do - stepAt (BranchUtil.makeAddTypeName (resolveSplit' dest) r (oldMD r)) - success - ([_], rs@(_:_)) -> typeExists dest (Set.fromList rs) - ([], _) -> either typeNotFound' typeNotFound src - (rs, _) -> - either - (\src -> hashConflicted src . Set.map Referent.Ref) - typeConflicted - src - (Set.fromList rs) - - - where - oldMD r = - either (const mempty) - (\src -> - let p = resolveSplit' src in - BranchUtil.getTypeMetadataAt p r root0) - src - - -- this implementation will happily produce name conflicts, - -- but will surface them in a normal diff at the end of the operation. - AliasManyI srcs dest' -> do - let destAbs = resolveToAbsolute dest' - old <- getAt destAbs - let (unknown, actions) = foldl' go mempty srcs - stepManyAt actions - new <- getAt destAbs - diffHelper (Branch.head old) (Branch.head new) >>= - respondNumbered . uncurry (ShowDiffAfterModifyBranch dest' destAbs) - unless (null unknown) $ - respond . SearchTermsNotFound . fmap fixupOutput $ unknown - where - -- a list of missing sources (if any) and the actions that do the work - go :: ([Path.HQSplit], [(Path, Branch0 m -> Branch0 m)]) - -> Path.HQSplit - -> ([Path.HQSplit], [(Path, Branch0 m -> Branch0 m)]) - go (missingSrcs, actions) hqsrc = - let - src :: Path.Split - src = second HQ'.toName hqsrc - proposedDest :: Path.Split - proposedDest = second HQ'.toName hqProposedDest - hqProposedDest :: Path.HQSplit - hqProposedDest = first Path.unabsolute $ - Path.resolve (resolveToAbsolute dest') hqsrc - -- `Nothing` if src doesn't exist - doType :: Maybe [(Path, Branch0 m -> Branch0 m)] - doType = case ( BranchUtil.getType hqsrc currentBranch0 - , BranchUtil.getType hqProposedDest root0 - ) of - (null -> True, _) -> Nothing -- missing src - (rsrcs, existing) -> -- happy path - Just . map addAlias . toList $ Set.difference rsrcs existing - where - addAlias r = BranchUtil.makeAddTypeName proposedDest r (oldMD r) - oldMD r = BranchUtil.getTypeMetadataAt src r currentBranch0 - doTerm :: Maybe [(Path, Branch0 m -> Branch0 m)] - doTerm = case ( BranchUtil.getTerm hqsrc currentBranch0 - , BranchUtil.getTerm hqProposedDest root0 - ) of - (null -> True, _) -> Nothing -- missing src - (rsrcs, existing) -> - Just . map addAlias . toList $ Set.difference rsrcs existing - where - addAlias r = BranchUtil.makeAddTermName proposedDest r (oldMD r) - oldMD r = BranchUtil.getTermMetadataAt src r currentBranch0 - in case (doType, doTerm) of - (Nothing, Nothing) -> (missingSrcs :> hqsrc, actions) - (Just as, Nothing) -> (missingSrcs, actions ++ as) - (Nothing, Just as) -> (missingSrcs, actions ++ as) - (Just as1, Just as2) -> (missingSrcs, actions ++ as1 ++ as2) - - fixupOutput :: Path.HQSplit -> HQ.HashQualified - fixupOutput = fmap Path.toName . HQ'.toHQ . Path.unsplitHQ - - NamesI thing -> do - parseNames0 <- Names3.suffixify0 <$> basicParseNames0 - let filtered = case thing of - HQ.HashOnly shortHash -> - Names.filterBySHs (Set.singleton shortHash) parseNames0 - HQ.HashQualified n sh -> - Names.filterByHQs (Set.singleton $ HQ'.HashQualified n sh) parseNames0 - HQ.NameOnly n -> - Names.filterByHQs (Set.singleton $ HQ'.NameOnly n) parseNames0 - printNames0 <- basicPrettyPrintNames0 - let printNames = Names printNames0 mempty - let terms' ::Set (Referent, Set HQ'.HashQualified) - terms' = (`Set.map` Names.termReferents filtered) $ - \r -> (r, Names3.termName hqLength r printNames) - types' :: Set (Reference, Set HQ'.HashQualified) - types' = (`Set.map` Names.typeReferences filtered) $ - \r -> (r, Names3.typeName hqLength r printNames) - respond $ ListNames hqLength (toList types') (toList terms') --- let (p, hq) = p0 --- namePortion = HQ'.toName hq --- case hq of --- HQ'.NameOnly _ -> --- respond $ uncurry ListNames (results p namePortion) --- HQ'.HashQualified _ sh -> let --- (terms, types) = results p namePortion --- -- filter terms and types based on `sh : ShortHash` --- terms' = filter (Reference.isPrefixOf sh . Referent.toReference . fst) terms --- types' = filter (Reference.isPrefixOf sh . fst) types --- in respond $ ListNames terms' types' --- where --- results p namePortion = let --- name = Path.toName . Path.unprefix currentPath' . Path.snoc' p --- $ namePortion --- ns = prettyPrintNames0 --- terms = [ (r, Names.namesForReferent ns r) --- | r <- toList $ Names.termsNamed ns name ] --- types = [ (r, Names.namesForReference ns r) --- | r <- toList $ Names.typesNamed ns name ] --- in (terms, types) - - LinkI mdValue srcs -> do - manageLinks False srcs [mdValue] Metadata.insert - syncRoot - - UnlinkI mdValue srcs -> do - manageLinks False srcs [mdValue] Metadata.delete - syncRoot - - -- > links List.map (.Docs .English) - -- > links List.map -- give me all the - -- > links Optional License - LinksI src mdTypeStr -> unlessError do - (ppe, out) <- getLinks input src (Right mdTypeStr) - lift do - numberedArgs .= fmap (HQ.toString . view _1) out - respond $ ListOfLinks ppe out - - DocsI src -> unlessError do - (ppe, out) <- getLinks input src (Left $ Set.singleton DD.docRef) - lift case out of - [(_name, ref, _tm)] -> do - names <- basicPrettyPrintNames0 - doDisplay ConsoleLocation (Names3.Names names mempty) (Referent.Ref ref) - out -> do - numberedArgs .= fmap (HQ.toString . view _1) out - respond $ ListOfLinks ppe out - - CreateAuthorI authorNameSegment authorFullName -> do - initialBranch <- getAt currentPath' - AuthorInfo - guid@(guidRef, _, _) - author@(authorRef, _, _) - copyrightHolder@(copyrightHolderRef, _, _) <- - eval $ CreateAuthorInfo authorFullName - -- add the new definitions to the codebase and to the namespace - traverse_ (eval . uncurry3 PutTerm) [guid, author, copyrightHolder] - stepManyAt - [ BranchUtil.makeAddTermName (resolveSplit' authorPath) (d authorRef) mempty - , BranchUtil.makeAddTermName (resolveSplit' copyrightHolderPath) (d copyrightHolderRef) mempty - , BranchUtil.makeAddTermName (resolveSplit' guidPath) (d guidRef) mempty - ] - finalBranch <- getAt currentPath' - -- print some output - diffHelper (Branch.head initialBranch) (Branch.head finalBranch) >>= - respondNumbered - . uncurry (ShowDiffAfterCreateAuthor - authorNameSegment - (Path.unsplit' base) - currentPath') - where - d :: Reference.Id -> Referent - d = Referent.Ref . Reference.DerivedId - base :: Path.Split' = (Path.relativeEmpty', "metadata") - authorPath = base |> "authors" |> authorNameSegment - copyrightHolderPath = base |> "copyrightHolders" |> authorNameSegment - guidPath = authorPath |> "guid" - - MoveTermI src dest -> - case (toList (getHQ'Terms src), toList (getTerms dest)) of - ([r], []) -> do - stepManyAt - [ BranchUtil.makeDeleteTermName p r - , BranchUtil.makeAddTermName (resolveSplit' dest) r (mdSrc r)] - success - ([_], rs) -> termExists dest (Set.fromList rs) - ([], _) -> termNotFound src - (rs, _) -> termConflicted src (Set.fromList rs) - where p = resolveSplit' (HQ'.toName <$> src) - mdSrc r = BranchUtil.getTermMetadataAt p r root0 - - MoveTypeI src dest -> - case (toList (getHQ'Types src), toList (getTypes dest)) of - ([r], []) -> do - stepManyAt - [ BranchUtil.makeDeleteTypeName p r - , BranchUtil.makeAddTypeName (resolveSplit' dest) r (mdSrc r) ] - success - ([_], rs) -> typeExists dest (Set.fromList rs) - ([], _) -> typeNotFound src - (rs, _) -> typeConflicted src (Set.fromList rs) - where - p = resolveSplit' (HQ'.toName <$> src) - mdSrc r = BranchUtil.getTypeMetadataAt p r root0 - - DeleteI hq -> delete getHQ'Terms getHQ'Types hq - DeleteTypeI hq -> delete (const Set.empty) getHQ'Types hq - DeleteTermI hq -> delete getHQ'Terms (const Set.empty) hq - - DisplayI outputLoc hq -> do - parseNames0 <- (`Names3.Names` mempty) <$> basicPrettyPrintNames0 - -- use suffixed names for resolving the argument to display - let parseNames = Names3.suffixify parseNames0 - let results = Names3.lookupHQTerm hq parseNames - if Set.null results then - respond $ SearchTermsNotFound [hq] - else if Set.size results > 1 then - respond $ TermAmbiguous hq results - -- ... but use the unsuffixed names for display - else doDisplay outputLoc parseNames0 (Set.findMin results) - - ShowDefinitionI outputLoc query -> do - (misses, results) <- hqNameQuerySuffixify query - results' <- loadSearchResults results - let termTypes :: Map.Map Reference (Type v Ann) - termTypes = - Map.fromList - [ (r, t) | SR'.Tm _ (Just t) (Referent.Ref r) _ <- results' ] - (collatedTypes, collatedTerms) = collateReferences - (mapMaybe SR'.tpReference results') - (mapMaybe SR'.tmReferent results') - -- load the `collatedTerms` and types into a Map Reference.Id Term/Type - -- for later - loadedDerivedTerms <- - fmap (Map.fromList . catMaybes) . for (toList collatedTerms) $ \case - Reference.DerivedId i -> fmap (i,) <$> eval (LoadTerm i) - Reference.Builtin{} -> pure Nothing - loadedDerivedTypes <- - fmap (Map.fromList . catMaybes) . for (toList collatedTypes) $ \case - Reference.DerivedId i -> fmap (i,) <$> eval (LoadType i) - Reference.Builtin{} -> pure Nothing - -- Populate DisplayThings for the search results, in anticipation of - -- displaying the definitions. - loadedDisplayTerms :: Map Reference (DisplayThing (Term v Ann)) <- - fmap Map.fromList . for (toList collatedTerms) $ \case - r@(Reference.DerivedId i) -> do - let tm = Map.lookup i loadedDerivedTerms - -- We add a type annotation to the term using if it doesn't - -- already have one that the user provided - pure . (r, ) $ case liftA2 (,) tm (Map.lookup r termTypes) of - Nothing -> MissingThing i - Just (tm, typ) -> case tm of - Term.Ann' _ _ -> RegularThing tm - _ -> RegularThing (Term.ann (ABT.annotation tm) tm typ) - r@(Reference.Builtin _) -> pure (r, BuiltinThing) - let loadedDisplayTypes :: Map Reference (DisplayThing (DD.Decl v Ann)) - loadedDisplayTypes = - Map.fromList . (`fmap` toList collatedTypes) $ \case - r@(Reference.DerivedId i) -> - (r,) . maybe (MissingThing i) RegularThing - $ Map.lookup i loadedDerivedTypes - r@(Reference.Builtin _) -> (r, BuiltinThing) - -- the SR' deps include the result term/type names, and the - let deps = foldMap SR'.labeledDependencies results' - <> foldMap Term.labeledDependencies loadedDerivedTerms - printNames <- makePrintNamesFromLabeled' deps - - -- We might like to make sure that the user search terms get used as - -- the names in the pretty-printer, but the current implementation - -- doesn't. - ppe <- prettyPrintEnvDecl printNames - let loc = case outputLoc of - ConsoleLocation -> Nothing - FileLocation path -> Just path - LatestFileLocation -> fmap fst latestFile' <|> Just "scratch.u" - do - unless (null loadedDisplayTypes && null loadedDisplayTerms) $ - eval . Notify $ - DisplayDefinitions loc ppe loadedDisplayTypes loadedDisplayTerms - unless (null misses) $ - eval . Notify $ SearchTermsNotFound misses - -- We set latestFile to be programmatically generated, if we - -- are viewing these definitions to a file - this will skip the - -- next update for that file (which will happen immediately) - latestFile .= ((, True) <$> loc) - - FindPatchI -> do - let patches = - [ Path.toName $ Path.snoc p seg - | (p, b) <- Branch.toList0 currentBranch0 - , (seg, _) <- Map.toList (Branch._edits b) ] - respond $ ListOfPatches $ Set.fromList patches - numberedArgs .= fmap Name.toString patches - - FindShallowI pathArg -> do - prettyPrintNames0 <- basicPrettyPrintNames0 - ppe <- fmap PPE.suffixifiedPPE . prettyPrintEnvDecl $ Names prettyPrintNames0 mempty - let pathArgAbs = resolveToAbsolute pathArg - b0 <- Branch.head <$> getAt pathArgAbs - let - hqTerm b0 ns r = - let refs = Star3.lookupD1 ns . _terms $ b0 - in case length refs of - 1 -> HQ'.fromName ns - _ -> HQ'.take hqLength $ HQ'.fromNamedReferent ns r - hqType b0 ns r = - let refs = Star3.lookupD1 ns . _types $ b0 - in case length refs of - 1 -> HQ'.fromName ns - _ -> HQ'.take hqLength $ HQ'.fromNamedReference ns r - defnCount b = - (R.size . deepTerms $ Branch.head b) + - (R.size . deepTypes $ Branch.head b) - - termEntries <- for (R.toList . Star3.d1 $ _terms b0) $ - \(r, ns) -> do - ot <- loadReferentType r - pure $ ShallowTermEntry r (hqTerm b0 ns r) ot - let - typeEntries = - [ ShallowTypeEntry r (hqType b0 ns r) - | (r, ns) <- R.toList . Star3.d1 $ _types b0 ] - branchEntries = - [ ShallowBranchEntry ns (defnCount b) - | (ns, b) <- Map.toList $ _children b0 ] - patchEntries = - [ ShallowPatchEntry ns - | (ns, (_h, _mp)) <- Map.toList $ _edits b0 ] - let - entries :: [ShallowListEntry v Ann] - entries = sort $ termEntries ++ typeEntries ++ branchEntries ++ patchEntries - entryToHQString :: ShallowListEntry v Ann -> String - -- caching the result as an absolute path, for easier jumping around - entryToHQString e = fixup $ case e of - ShallowTypeEntry _ hq -> HQ'.toString hq - ShallowTermEntry _ hq _ -> HQ'.toString hq - ShallowBranchEntry ns _ -> NameSegment.toString ns - ShallowPatchEntry ns -> NameSegment.toString ns - where - fixup s = - if last pathArgStr == '.' - then pathArgStr ++ s - else pathArgStr ++ "." ++ s - pathArgStr = show pathArgAbs - numberedArgs .= fmap entryToHQString entries - respond $ ListShallow ppe entries - where - - SearchByNameI isVerbose _showAll ws -> do - prettyPrintNames0 <- basicPrettyPrintNames0 - unlessError do - results <- case ws of - -- no query, list everything - [] -> pure . listBranch $ Branch.head currentBranch' - - -- type query - ":" : ws -> ExceptT (parseSearchType input (unwords ws)) >>= \typ -> ExceptT $ do - let named = Branch.deepReferents root0 - matches <- fmap toList . eval $ GetTermsOfType typ - matches <- filter (`Set.member` named) <$> - if null matches then do - respond NoExactTypeMatches - fmap toList . eval $ GetTermsMentioningType typ - else pure matches - let results = - -- in verbose mode, aliases are shown, so we collapse all - -- aliases to a single search result; in non-verbose mode, - -- a separate result may be shown for each alias - (if isVerbose then uniqueBy SR.toReferent else id) $ - searchResultsFor prettyPrintNames0 matches [] - pure . pure $ results - - -- name query - (map HQ.unsafeFromString -> qs) -> do - ns <- lift basicPrettyPrintNames0 - let srs = searchBranchScored ns fuzzyNameDistance qs - pure $ uniqueBy SR.toReferent srs - lift do - numberedArgs .= fmap searchResultToHQString results - results' <- loadSearchResults results - ppe <- prettyPrintEnv . Names3.suffixify =<< - makePrintNamesFromLabeled' - (foldMap SR'.labeledDependencies results') - respond $ ListOfDefinitions ppe isVerbose results' - - ResolveTypeNameI hq -> - zeroOneOrMore (getHQ'Types hq) (typeNotFound hq) go (typeConflicted hq) - where - conflicted = getHQ'Types (fmap HQ'.toNameOnly hq) - makeDelete = - BranchUtil.makeDeleteTypeName (resolveSplit' (HQ'.toName <$> hq)) - go r = stepManyAt . fmap makeDelete . toList . Set.delete r $ conflicted - - ResolveTermNameI hq -> do - refs <- getHQ'TermsIncludingHistorical hq - zeroOneOrMore refs (termNotFound hq) go (termConflicted hq) - where - conflicted = getHQ'Terms (fmap HQ'.toNameOnly hq) - makeDelete = - BranchUtil.makeDeleteTermName (resolveSplit' (HQ'.toName <$> hq)) - go r = stepManyAt . fmap makeDelete . toList . Set.delete r $ conflicted - - ReplaceTermI from to patchPath -> do - let patchPath' = fromMaybe defaultPatchPath patchPath - patch <- getPatchAt patchPath' - (fromMisses', fromHits) <- hqNameQuery [from] - (toMisses', toHits) <- hqNameQuery [to] - let fromRefs = termReferences fromHits - toRefs = termReferences toHits - -- Type hits are term misses - fromMisses = fromMisses' - <> (HQ'.toHQ . SR.typeName <$> typeResults fromHits) - toMisses = toMisses' - <> (HQ'.toHQ . SR.typeName <$> typeResults fromHits) - go :: Reference - -> Reference - -> Action m (Either Event Input) v () - go fr tr = do - mft <- eval $ LoadTypeOfTerm fr - mtt <- eval $ LoadTypeOfTerm tr - let termNotFound = respond . TermNotFound' - . SH.take hqLength - . Reference.toShortHash - case (mft, mtt) of - (Nothing, _) -> termNotFound fr - (_, Nothing) -> termNotFound tr - (Just ft, Just tt) -> do - let - patch' = - -- The modified patch - over Patch.termEdits - (R.insert fr (Replace tr (TermEdit.typing tt ft)) - . R.deleteDom fr) - patch - (patchPath'', patchName) = resolveSplit' patchPath' - saveAndApplyPatch patchPath'' patchName patch' - misses = fromMisses <> toMisses - ambiguous t rs = - let rs' = Set.map Referent.Ref $ Set.fromList rs - in case t of - HQ.HashOnly h -> - hashConflicted h rs' - (Path.parseHQSplit' . HQ.toString -> Right n) -> - termConflicted n rs' - _ -> respond . BadName $ HQ.toString t - unless (null misses) $ - respond $ SearchTermsNotFound misses - case (fromRefs, toRefs) of - ([fr], [tr]) -> go fr tr - ([_], tos) -> ambiguous to tos - (frs, _) -> ambiguous from frs - ReplaceTypeI from to patchPath -> do - let patchPath' = fromMaybe defaultPatchPath patchPath - (fromMisses', fromHits) <- hqNameQuery [from] - (toMisses', toHits) <- hqNameQuery [to] - patch <- getPatchAt patchPath' - let fromRefs = typeReferences fromHits - toRefs = typeReferences toHits - -- Term hits are type misses - fromMisses = fromMisses' - <> (HQ'.toHQ . SR.termName <$> termResults fromHits) - toMisses = toMisses' - <> (HQ'.toHQ . SR.termName <$> termResults fromHits) - go :: Reference - -> Reference - -> Action m (Either Event Input) v () - go fr tr = do - let patch' = - -- The modified patch - over Patch.typeEdits - (R.insert fr (TypeEdit.Replace tr) . R.deleteDom fr) patch - (patchPath'', patchName) = resolveSplit' patchPath' - saveAndApplyPatch patchPath'' patchName patch' - misses = fromMisses <> toMisses - ambiguous t rs = - let rs' = Set.map Referent.Ref $ Set.fromList rs - in case t of - HQ.HashOnly h -> - hashConflicted h rs' - (Path.parseHQSplit' . HQ.toString -> Right n) -> - typeConflicted n $ Set.fromList rs - -- This is unlikely to happen, as t has to be a parsed - -- hash-qualified name already. - -- Still, the types say we need to handle this case. - _ -> respond . BadName $ HQ.toString t - unless (null misses) $ - respond $ SearchTermsNotFound misses - case (fromRefs, toRefs) of - ([fr], [tr]) -> go fr tr - ([_], tos) -> ambiguous to tos - (frs, _) -> ambiguous from frs - LoadI maybePath -> - case maybePath <|> (fst <$> latestFile') of - Nothing -> respond NoUnisonFile - Just path -> do - res <- eval . LoadSource . Text.pack $ path - case res of - InvalidSourceNameError -> respond $ InvalidSourceName path - LoadError -> respond $ SourceLoadFailed path - LoadSuccess contents -> loadUnisonFile (Text.pack path) contents - - AddI hqs -> case uf of - Nothing -> respond NoUnisonFile - Just uf -> do - sr <- Slurp.disallowUpdates - . applySelection hqs uf - . toSlurpResult currentPath' uf - <$> slurpResultNames0 - let adds = Slurp.adds sr - when (Slurp.isNonempty sr) $ do - stepAtNoSync ( Path.unabsolute currentPath' - , doSlurpAdds adds uf) - eval . AddDefsToCodebase . filterBySlurpResult sr $ uf - ppe <- prettyPrintEnvDecl =<< - makeShadowedPrintNamesFromLabeled - (UF.termSignatureExternalLabeledDependencies uf) - (UF.typecheckedToNames0 uf) - respond $ SlurpOutput input (PPE.suffixifiedPPE ppe) sr - addDefaultMetadata adds - syncRoot - - PreviewAddI hqs -> case (latestFile', uf) of - (Just (sourceName, _), Just uf) -> do - sr <- Slurp.disallowUpdates - . applySelection hqs uf - . toSlurpResult currentPath' uf - <$> slurpResultNames0 - previewResponse sourceName sr uf - _ -> respond NoUnisonFile - - UpdateI maybePatchPath hqs -> case uf of - Nothing -> respond NoUnisonFile - Just uf -> do - let patchPath = fromMaybe defaultPatchPath maybePatchPath - slurpCheckNames0 <- slurpResultNames0 - currentPathNames0 <- currentPathNames0 - let sr = applySelection hqs uf - . toSlurpResult currentPath' uf - $ slurpCheckNames0 - addsAndUpdates = Slurp.updates sr <> Slurp.adds sr - fileNames0 = UF.typecheckedToNames0 uf - -- todo: display some error if typeEdits or termEdits itself contains a loop - typeEdits :: Map Name (Reference, Reference) - typeEdits = Map.fromList $ map f (toList $ SC.types (updates sr)) where - f v = case (toList (Names.typesNamed slurpCheckNames0 n) - ,toList (Names.typesNamed fileNames0 n)) of - ([old],[new]) -> (n, (old, new)) - _ -> error $ "Expected unique matches for " - ++ Var.nameStr v ++ " but got: " - ++ show otherwise - where n = Name.fromVar v - hashTerms :: Map Reference (Type v Ann) - hashTerms = Map.fromList (toList hashTerms0) where - hashTerms0 = (\(r, _, typ) -> (r, typ)) <$> UF.hashTerms uf - termEdits :: Map Name (Reference, Reference) - termEdits = Map.fromList $ map g (toList $ SC.terms (updates sr)) where - g v = case ( toList (Names.refTermsNamed slurpCheckNames0 n) - , toList (Names.refTermsNamed fileNames0 n)) of - ([old], [new]) -> (n, (old, new)) - _ -> error $ "Expected unique matches for " - ++ Var.nameStr v ++ " but got: " - ++ show otherwise - where n = Name.fromVar v - termDeprecations :: [(Name, Referent)] - termDeprecations = - [ (n, r) | (oldTypeRef,_) <- Map.elems typeEdits - , (n, r) <- Names3.constructorsForType0 oldTypeRef currentPathNames0 ] - - ye'ol'Patch <- getPatchAt patchPath - -- If `uf` updates a -> a', we want to replace all (a0 -> a) in patch - -- with (a0 -> a') in patch'. - -- So for all (a0 -> a) in patch, for all (a -> a') in `uf`, - -- we must know the type of a0, a, a'. - let - -- we need: - -- all of the `old` references from the `new` edits, - -- plus all of the `old` references for edits from patch we're replacing - collectOldForTyping :: [(Reference, Reference)] -> Patch -> Set Reference - collectOldForTyping new old = foldl' f mempty (new ++ fromOld) where - f acc (r, _r') = Set.insert r acc - newLHS = Set.fromList . fmap fst $ new - fromOld :: [(Reference, Reference)] - fromOld = [ (r,r') | (r, TermEdit.Replace r' _) <- R.toList . Patch._termEdits $ old - , Set.member r' newLHS ] - neededTypes = collectOldForTyping (toList termEdits) ye'ol'Patch - - allTypes :: Map Reference (Type v Ann) <- - fmap Map.fromList . for (toList neededTypes) $ \r -> - (r,) . fromMaybe (Type.builtin External "unknown type") - <$> (eval . LoadTypeOfTerm) r - - let typing r1 r2 = case (Map.lookup r1 allTypes, Map.lookup r2 hashTerms) of - (Just t1, Just t2) - | Typechecker.isEqual t1 t2 -> TermEdit.Same - | Typechecker.isSubtype t1 t2 -> TermEdit.Subtype - | otherwise -> TermEdit.Different - e -> error $ "compiler bug: typing map not constructed properly\n" <> - "typing " <> show r1 <> " " <> show r2 <> " : " <> show e - - let updatePatch :: Patch -> Patch - updatePatch p = foldl' step2 p' termEdits - where - p' = foldl' step1 p typeEdits - step1 p (r,r') = Patch.updateType r (TypeEdit.Replace r') p - step2 p (r,r') = Patch.updateTerm typing r (TermEdit.Replace r' (typing r r')) p - (p, seg) = Path.toAbsoluteSplit currentPath' patchPath - updatePatches :: Branch0 m -> m (Branch0 m) - updatePatches = Branch.modifyPatches seg updatePatch - - when (Slurp.isNonempty sr) $ do - -- take a look at the `updates` from the SlurpResult - -- and make a patch diff to record a replacement from the old to new references - stepManyAtMNoSync - [( Path.unabsolute currentPath' - , pure . doSlurpUpdates typeEdits termEdits termDeprecations) - ,( Path.unabsolute currentPath' - , pure . doSlurpAdds addsAndUpdates uf) - ,( Path.unabsolute p, updatePatches )] - eval . AddDefsToCodebase . filterBySlurpResult sr $ uf - ppe <- prettyPrintEnvDecl =<< - makeShadowedPrintNamesFromLabeled - (UF.termSignatureExternalLabeledDependencies uf) - (UF.typecheckedToNames0 uf) - respond $ SlurpOutput input (PPE.suffixifiedPPE ppe) sr - -- propagatePatch prints TodoOutput - void $ propagatePatchNoSync (updatePatch ye'ol'Patch) currentPath' - addDefaultMetadata addsAndUpdates - syncRoot - - PreviewUpdateI hqs -> case (latestFile', uf) of - (Just (sourceName, _), Just uf) -> do - sr <- applySelection hqs uf - . toSlurpResult currentPath' uf - <$> slurpResultNames0 - previewResponse sourceName sr uf - _ -> respond NoUnisonFile - - TodoI patchPath branchPath' -> do - patch <- getPatchAt (fromMaybe defaultPatchPath patchPath) - doShowTodoOutput patch $ resolveToAbsolute branchPath' - - TestI showOk showFail -> do - let - testTerms = Map.keys . R4.d1 . uncurry R4.selectD34 isTest - . Branch.deepTermMetadata $ currentBranch0 - testRefs = Set.fromList [ r | Referent.Ref r <- toList testTerms ] - oks results = - [ (r, msg) - | (r, Term.Sequence' ts) <- Map.toList results - , Term.App' (Term.Constructor' ref cid) (Term.Text' msg) <- toList ts - , cid == DD.okConstructorId && ref == DD.testResultRef ] - fails results = - [ (r, msg) - | (r, Term.Sequence' ts) <- Map.toList results - , Term.App' (Term.Constructor' ref cid) (Term.Text' msg) <- toList ts - , cid == DD.failConstructorId && ref == DD.testResultRef ] - cachedTests <- fmap Map.fromList . eval $ LoadWatches UF.TestWatch testRefs - let stats = Output.CachedTests (Set.size testRefs) (Map.size cachedTests) - names <- makePrintNamesFromLabeled' $ - LD.referents testTerms <> - LD.referents [ DD.okConstructorReferent, DD.failConstructorReferent ] - ppe <- prettyPrintEnv names - respond $ TestResults stats ppe showOk showFail - (oks cachedTests) (fails cachedTests) - let toCompute = Set.difference testRefs (Map.keysSet cachedTests) - unless (Set.null toCompute) $ do - let total = Set.size toCompute - computedTests <- fmap join . for (toList toCompute `zip` [1..]) $ \(r,n) -> - case r of - Reference.DerivedId rid -> do - tm <- eval $ LoadTerm rid - case tm of - Nothing -> [] <$ respond (TermNotFound' . SH.take hqLength . Reference.toShortHash $ Reference.DerivedId rid) - Just tm -> do - respond $ TestIncrementalOutputStart ppe (n,total) r tm - tm' <- eval $ Evaluate1 ppe tm - case tm' of - Left e -> respond (EvaluationFailure e) $> [] - Right tm' -> do - eval $ PutWatch UF.TestWatch rid tm' - respond $ TestIncrementalOutputEnd ppe (n,total) r tm' - pure [(r, tm')] - r -> error $ "unpossible, tests can't be builtins: " <> show r - let m = Map.fromList computedTests - respond $ TestResults Output.NewlyComputed ppe showOk showFail (oks m) (fails m) - - -- ListBranchesI -> - -- eval ListBranches >>= respond . ListOfBranches currentBranchName' - -- DeleteBranchI branchNames -> withBranches branchNames $ \bnbs -> do - -- uniqueToDelete <- prettyUniqueDefinitions bnbs - -- let deleteBranches b = - -- traverse (eval . DeleteBranch) b >> respond (Success input) - -- if (currentBranchName' `elem` branchNames) - -- then respond DeletingCurrentBranch - -- else if null uniqueToDelete - -- then deleteBranches branchNames - -- else ifM (confirmedCommand input) - -- (deleteBranches branchNames) - -- (respond . DeleteBranchConfirmation $ uniqueToDelete) - - PropagatePatchI patchPath scopePath -> do - patch <- getPatchAt patchPath - updated <- propagatePatch inputDescription patch (resolveToAbsolute scopePath) - unless updated (respond $ NothingToPatch patchPath scopePath) - - ExecuteI main -> addRunMain main uf >>= \case - Nothing -> do - names0 <- basicPrettyPrintNames0 - ppe <- prettyPrintEnv (Names3.Names names0 mempty) - mainType <- eval RuntimeMain - respond $ NoMainFunction main ppe [mainType] - Just unisonFile -> do - ppe <- executePPE unisonFile - eval $ Execute ppe unisonFile - - -- UpdateBuiltinsI -> do - -- stepAt updateBuiltins - -- checkTodo - - MergeBuiltinsI -> do - -- these were added once, but maybe they've changed and need to be - -- added again. - let uf = UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls) - (Map.fromList Builtin.builtinEffectDecls) - mempty mempty - eval $ AddDefsToCodebase uf - -- add the names; note, there are more names than definitions - -- due to builtin terms; so we don't just reuse `uf` above. - let srcb = BranchUtil.fromNames0 Builtin.names0 - _ <- updateAtM (currentPath' `snoc` "builtin") $ \destb -> - eval . Eval $ Branch.merge srcb destb - success - - MergeIOBuiltinsI -> do - -- these were added once, but maybe they've changed and need to be - -- added again. - let uf = UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls) - (Map.fromList Builtin.builtinEffectDecls) - mempty mempty - eval $ AddDefsToCodebase uf - -- these have not neceesarily been added yet - eval $ AddDefsToCodebase IOSource.typecheckedFile' - - -- add the names; note, there are more names than definitions - -- due to builtin terms; so we don't just reuse `uf` above. - let names0 = Builtin.names0 - <> UF.typecheckedToNames0 @v IOSource.typecheckedFile' - let srcb = BranchUtil.fromNames0 names0 - _ <- updateAtM (currentPath' `snoc` "builtin") $ \destb -> - eval . Eval $ Branch.merge srcb destb - - success - - ListEditsI maybePath -> do - let (p, seg) = - maybe (Path.toAbsoluteSplit currentPath' defaultPatchPath) - (Path.toAbsoluteSplit currentPath') - maybePath - patch <- eval . Eval . Branch.getPatch seg . Branch.head =<< getAt p - ppe <- prettyPrintEnv =<< - makePrintNamesFromLabeled' (Patch.labeledDependencies patch) - respond $ ListEdits patch ppe - - PullRemoteBranchI mayRepo path syncMode -> unlessError do - ns <- resolveConfiguredGitUrl Pull path mayRepo - lift $ unlessGitError do - b <- importRemoteBranch ns syncMode - let msg = Just $ PullAlreadyUpToDate ns path - let destAbs = resolveToAbsolute path - lift $ mergeBranchAndPropagateDefaultPatch Branch.RegularMerge inputDescription msg b (Just path) destAbs - - PushRemoteBranchI mayRepo path syncMode -> do - let srcAbs = resolveToAbsolute path - srcb <- getAt srcAbs - let expandRepo (r, rp) = (r, Nothing, rp) - unlessError do - (repo, sbh, remotePath) <- - resolveConfiguredGitUrl Push path (fmap expandRepo mayRepo) - case sbh of - Nothing -> lift $ unlessGitError do - remoteRoot <- viewRemoteBranch (repo, Nothing, Path.empty) - newRemoteRoot <- lift . eval . Eval $ - Branch.modifyAtM remotePath (Branch.merge srcb) remoteRoot - syncRemoteRootBranch repo newRemoteRoot syncMode - lift $ respond Success - Just{} -> - error $ "impossible match, resolveConfiguredGitUrl shouldn't return" - <> " `Just` unless it was passed `Just`; and here it is passed" - <> " `Nothing` by `expandRepo`." - ListDependentsI hq -> -- todo: add flag to handle transitive efficiently - resolveHQToLabeledDependencies hq >>= \lds -> - if null lds - then respond $ LabeledReferenceNotFound hq - else for_ lds $ \ld -> do - dependents <- let - tp r = eval $ GetDependents r - tm (Referent.Ref r) = eval $ GetDependents r - tm (Referent.Con r _i _ct) = eval $ GetDependents r - in LD.fold tp tm ld - (missing, names0) <- eval . Eval $ Branch.findHistoricalRefs' dependents root' - let types = R.toList $ Names3.types0 names0 - let terms = fmap (second Referent.toReference) $ R.toList $ Names.terms names0 - let names = types <> terms - numberedArgs .= fmap (Text.unpack . Reference.toText) ((fmap snd names) <> toList missing) - respond $ ListDependents hqLength ld names missing - ListDependenciesI hq -> -- todo: add flag to handle transitive efficiently - resolveHQToLabeledDependencies hq >>= \lds -> - if null lds - then respond $ LabeledReferenceNotFound hq - else for_ lds $ \ld -> do - dependencies :: Set Reference <- let - tp r@(Reference.DerivedId i) = eval (LoadType i) <&> \case - Nothing -> error $ "What happened to " ++ show i ++ "?" - Just decl -> Set.delete r . DD.dependencies $ DD.asDataDecl decl - tp _ = pure mempty - tm (Referent.Ref r@(Reference.DerivedId i)) = eval (LoadTerm i) <&> \case - Nothing -> error $ "What happened to " ++ show i ++ "?" - Just tm -> Set.delete r $ Term.dependencies tm - tm con@(Referent.Con (Reference.DerivedId i) cid _ct) = eval (LoadType i) <&> \case - Nothing -> error $ "What happened to " ++ show i ++ "?" - Just decl -> case DD.typeOfConstructor (DD.asDataDecl decl) cid of - Nothing -> error $ "What happened to " ++ show con ++ "?" - Just tp -> Type.dependencies tp - tm _ = pure mempty - in LD.fold tp tm ld - (missing, names0) <- eval . Eval $ Branch.findHistoricalRefs' dependencies root' - let types = R.toList $ Names3.types0 names0 - let terms = fmap (second Referent.toReference) $ R.toList $ Names.terms names0 - let names = types <> terms - numberedArgs .= fmap (Text.unpack . Reference.toText) ((fmap snd names) <> toList missing) - respond $ ListDependencies hqLength ld names missing - DebugNumberedArgsI -> use numberedArgs >>= respond . DumpNumberedArgs - DebugBranchHistoryI -> - eval . Notify . DumpBitBooster (Branch.headHash currentBranch') =<< - (eval . Eval $ Causal.hashToRaw (Branch._history currentBranch')) - DebugTypecheckedUnisonFileI -> case uf of - Nothing -> respond NoUnisonFile - Just uf -> let - datas, effects, terms :: [(Name, Reference.Id)] - datas = [ (Name.fromVar v, r) | (v, (r, _d)) <- Map.toList $ UF.dataDeclarationsId' uf ] - effects = [ (Name.fromVar v, r) | (v, (r, _e)) <- Map.toList $ UF.effectDeclarationsId' uf ] - terms = [ (Name.fromVar v, r) | (v, (r, _tm, _tp)) <- Map.toList $ UF.hashTermsId uf ] - in eval . Notify $ DumpUnisonFileHashes hqLength datas effects terms - - DeprecateTermI {} -> notImplemented - DeprecateTypeI {} -> notImplemented - RemoveTermReplacementI from patchPath -> - doRemoveReplacement from patchPath True - RemoveTypeReplacementI from patchPath -> - doRemoveReplacement from patchPath False - ShowDefinitionByPrefixI {} -> notImplemented - UpdateBuiltinsI -> notImplemented - QuitI -> MaybeT $ pure Nothing - where - notImplemented = eval $ Notify NotImplemented - success = respond Success - - resolveDefaultMetadata :: Path.Absolute -> Action' m v [String] - resolveDefaultMetadata path = do - let superpaths = Path.ancestors path - xs <- for - superpaths - (\path -> do - mayNames <- - eval . ConfigLookup @[String] $ configKey "DefaultMetadata" path - pure . join $ toList mayNames - ) - pure . join $ toList xs - - configKey k p = - Text.intercalate "." . toList $ k :<| fmap - NameSegment.toText - (Path.toSeq $ Path.unabsolute p) - - -- Takes a maybe (namespace address triple); returns it as-is if `Just`; - -- otherwise, tries to load a value from .unisonConfig, and complains - -- if needed. - resolveConfiguredGitUrl - :: PushPull - -> Path' - -> Maybe RemoteNamespace - -> ExceptT (Output v) (Action' m v) RemoteNamespace - resolveConfiguredGitUrl pushPull destPath' = \case - Just ns -> pure ns - Nothing -> ExceptT do - let destPath = resolveToAbsolute destPath' - let configKey = gitUrlKey destPath - (eval . ConfigLookup) configKey >>= \case - Just url -> - case P.parse UriParser.repoPath (Text.unpack configKey) url of - Left e -> - pure . Left $ - ConfiguredGitUrlParseError pushPull destPath' url (show e) - Right (repo, Just sbh, remotePath) -> - pure . Left $ - ConfiguredGitUrlIncludesShortBranchHash pushPull repo sbh remotePath - Right ns -> - pure . Right $ ns - Nothing -> - pure . Left $ NoConfiguredGitUrl pushPull destPath' - - gitUrlKey = configKey "GitUrl" - - case e of - Right input -> lastInput .= Just input - _ -> pure () - --- todo: compare to `getHQTerms` / `getHQTypes`. Is one universally better? -resolveHQToLabeledDependencies :: Functor m => HQ.HashQualified -> Action' m v (Set LabeledDependency) -resolveHQToLabeledDependencies = \case - HQ.NameOnly n -> do - parseNames <- Names3.suffixify0 <$> basicParseNames0 - let terms, types :: Set LabeledDependency - terms = Set.map LD.referent . R.lookupDom n $ Names3.terms0 parseNames - types = Set.map LD.typeRef . R.lookupDom n $ Names3.types0 parseNames - pure $ terms <> types - -- rationale: the hash should be unique enough that the name never helps - HQ.HashQualified _n sh -> resolveHashOnly sh - HQ.HashOnly sh -> resolveHashOnly sh - where - resolveHashOnly sh = do - terms <- eval $ TermReferentsByShortHash sh - types <- eval $ TypeReferencesByShortHash sh - pure $ Set.map LD.referent terms <> Set.map LD.typeRef types - -doDisplay :: Var v => OutputLocation -> Names -> Referent -> Action' m v () -doDisplay outputLoc names r = do - let tm = Term.fromReferent External r - ppe <- prettyPrintEnvDecl names - latestFile' <- use latestFile - let - loc = case outputLoc of - ConsoleLocation -> Nothing - FileLocation path -> Just path - LatestFileLocation -> fmap fst latestFile' <|> Just "scratch.u" - evalTerm r = fmap ErrorUtil.hush . eval $ - Evaluate1 (PPE.suffixifiedPPE ppe) (Term.ref External r) - loadTerm (Reference.DerivedId r) = eval $ LoadTerm r - loadTerm _ = pure Nothing - loadDecl (Reference.DerivedId r) = eval $ LoadType r - loadDecl _ = pure Nothing - rendered <- DisplayValues.displayTerm ppe loadTerm loadTypeOfTerm evalTerm loadDecl tm - respond $ DisplayRendered loc rendered - -getLinks :: (Var v, Monad m) - => Input - -> Path.HQSplit' - -> Either (Set Reference) (Maybe String) - -> ExceptT (Output v) - (Action' m v) - (PPE.PrettyPrintEnv, - -- e.g. ("Foo.doc", #foodoc, Just (#builtin.Doc) - [(HQ.HashQualified, Reference, Maybe (Type v Ann))]) -getLinks input src mdTypeStr = ExceptT $ do - let go = fmap Right . getLinks' src - case mdTypeStr of - Left s -> go (Just s) - Right Nothing -> go Nothing - Right (Just mdTypeStr) -> parseType input mdTypeStr >>= \case - Left e -> pure $ Left e - Right typ -> go . Just . Set.singleton $ Type.toReference typ - -getLinks' :: (Var v, Monad m) - => Path.HQSplit' -- definition to print metadata of - -> Maybe (Set Reference) -- return all metadata if empty - -> Action' m v (PPE.PrettyPrintEnv, - -- e.g. ("Foo.doc", #foodoc, Just (#builtin.Doc) - [(HQ.HashQualified, Reference, Maybe (Type v Ann))]) -getLinks' src selection0 = do - root0 <- Branch.head <$> use root - currentPath' <- use currentPath - let resolveSplit' = Path.fromAbsoluteSplit . Path.toAbsoluteSplit currentPath' - p = resolveSplit' src -- ex: the (parent,hqsegment) of `List.map` - `List` - -- all metadata (type+value) associated with name `src` - allMd = R4.d34 (BranchUtil.getTermMetadataHQNamed p root0) - <> R4.d34 (BranchUtil.getTypeMetadataHQNamed p root0) - allMd' = maybe allMd (`R.restrictDom` allMd) selection0 - -- then list the values after filtering by type - allRefs :: Set Reference = R.ran allMd' - sigs <- for (toList allRefs) (loadTypeOfTerm . Referent.Ref) - let deps = Set.map LD.termRef allRefs <> - Set.unions [ Set.map LD.typeRef . Type.dependencies $ t | Just t <- sigs ] - ppe <- prettyPrintEnvDecl =<< makePrintNamesFromLabeled' deps - let ppeDecl = PPE.unsuffixifiedPPE ppe - let sortedSigs = sortOn snd (toList allRefs `zip` sigs) - let out = [(PPE.termName ppeDecl (Referent.Ref r), r, t) | (r, t) <- sortedSigs ] - pure (PPE.suffixifiedPPE ppe, out) - -resolveShortBranchHash :: - ShortBranchHash -> ExceptT (Output v) (Action' m v) (Branch m) -resolveShortBranchHash hash = ExceptT do - hashSet <- eval $ BranchHashesByPrefix hash - len <- eval BranchHashLength - case Set.toList hashSet of - [] -> pure . Left $ NoBranchWithHash hash - [h] -> fmap Right . eval $ LoadLocalBranch h - _ -> pure . Left $ BranchHashAmbiguous hash (Set.map (SBH.fromHash len) hashSet) - --- Returns True if the operation changed the namespace, False otherwise. -propagatePatchNoSync - :: (Monad m, Var v) - => Patch - -> Path.Absolute - -> Action' m v Bool -propagatePatchNoSync patch scopePath = stepAtMNoSync' - (Path.unabsolute scopePath, lift . lift . Propagate.propagateAndApply patch) - --- Returns True if the operation changed the namespace, False otherwise. -propagatePatch :: (Monad m, Var v) => - InputDescription -> Patch -> Path.Absolute -> Action' m v Bool -propagatePatch inputDescription patch scopePath = - stepAtM' (inputDescription <> " (applying patch)") - (Path.unabsolute scopePath, - lift . lift . Propagate.propagateAndApply patch) - --- | Create the args needed for showTodoOutput and call it -doShowTodoOutput :: Monad m => Patch -> Path.Absolute -> Action' m v () -doShowTodoOutput patch scopePath = do - scope <- getAt scopePath - let names0 = Branch.toNames0 (Branch.head scope) - -- only needs the local references to check for obsolete defs - let getPpe = do - names <- makePrintNamesFromLabeled' (Patch.labeledDependencies patch) - prettyPrintEnvDecl names - showTodoOutput getPpe patch names0 - --- | Show todo output if there are any conflicts or edits. -showTodoOutput - :: Action' m v PPE.PrettyPrintEnvDecl - -- ^ Action that fetches the pretty print env. It's expensive because it - -- involves looking up historical names, so only call it if necessary. - -> Patch - -> Names0 - -> Action' m v () -showTodoOutput getPpe patch names0 = do - todo <- checkTodo patch names0 - if TO.noConflicts todo && TO.noEdits todo - then respond NoConflictsOrEdits - else do - numberedArgs .= - (Text.unpack . Reference.toText . view _2 <$> - fst (TO.todoFrontierDependents todo)) - ppe <- getPpe - respond $ TodoOutput ppe todo - -checkTodo :: Patch -> Names0 -> Action m i v (TO.TodoOutput v Ann) -checkTodo patch names0 = do - f <- computeFrontier (eval . GetDependents) patch names0 - let dirty = R.dom f - frontier = R.ran f - (frontierTerms, frontierTypes) <- loadDisplayInfo frontier - (dirtyTerms, dirtyTypes) <- loadDisplayInfo dirty - -- todo: something more intelligent here? - let scoreFn = const 1 - remainingTransitive <- - frontierTransitiveDependents (eval . GetDependents) names0 frontier - let - scoredDirtyTerms = - List.sortOn (view _1) [ (scoreFn r, r, t) | (r,t) <- dirtyTerms ] - scoredDirtyTypes = - List.sortOn (view _1) [ (scoreFn r, r, t) | (r,t) <- dirtyTypes ] - pure $ - TO.TodoOutput - (Set.size remainingTransitive) - (frontierTerms, frontierTypes) - (scoredDirtyTerms, scoredDirtyTypes) - (Names.conflicts names0) - (Patch.conflicts patch) - where - frontierTransitiveDependents :: - Monad m => (Reference -> m (Set Reference)) -> Names0 -> Set Reference -> m (Set Reference) - frontierTransitiveDependents dependents names0 rs = do - let branchDependents r = Set.filter (Names.contains names0) <$> dependents r - tdeps <- transitiveClosure branchDependents rs - -- we don't want the frontier in the result - pure $ tdeps `Set.difference` rs - --- (d, f) when d is "dirty" (needs update), --- f is in the frontier (an edited dependency of d), --- and d depends on f --- a ⋖ b = a depends directly on b --- dirty(d) ∧ frontier(f) <=> not(edited(d)) ∧ edited(f) ∧ d ⋖ f --- --- The range of this relation is the frontier, and the domain is --- the set of dirty references. -computeFrontier :: forall m . Monad m - => (Reference -> m (Set Reference)) -- eg Codebase.dependents codebase - -> Patch - -> Names0 - -> m (R.Relation Reference Reference) -computeFrontier getDependents patch names = let - edited :: Set Reference - edited = R.dom (Patch._termEdits patch) <> R.dom (Patch._typeEdits patch) - addDependents :: R.Relation Reference Reference -> Reference -> m (R.Relation Reference Reference) - addDependents dependents ref = - (\ds -> R.insertManyDom ds ref dependents) . Set.filter (Names.contains names) - <$> getDependents ref - in do - -- (r,r2) ∈ dependsOn if r depends on r2 - dependsOn <- foldM addDependents R.empty edited - -- Dirty is everything that `dependsOn` Frontier, minus already edited defns - pure $ R.filterDom (not . flip Set.member edited) dependsOn - -eval :: Command m i v a -> Action m i v a -eval = lift . lift . Free.eval - -confirmedCommand :: Input -> Action m i v Bool -confirmedCommand i = do - i0 <- use lastInput - pure $ Just i == i0 - -listBranch :: Branch0 m -> [SearchResult] -listBranch (Branch.toNames0 -> b) = - List.sortOn (\s -> (SR.name s, s)) (SR.fromNames b) - --- | restores the full hash to these search results, for _numberedArgs purposes -searchResultToHQString :: SearchResult -> String -searchResultToHQString = \case - SR.Tm' n r _ -> HQ'.toString $ HQ'.requalify n r - SR.Tp' n r _ -> HQ'.toString $ HQ'.requalify n (Referent.Ref r) - _ -> error "unpossible match failure" - --- Return a list of definitions whose names fuzzy match the given queries. -fuzzyNameDistance :: Name -> Name -> Maybe Int -fuzzyNameDistance (Name.toString -> q) (Name.toString -> n) = - Find.simpleFuzzyScore q n - --- return `name` and `name....` -_searchBranchPrefix :: Branch m -> Name -> [SearchResult] -_searchBranchPrefix b n = case Path.unsnoc (Path.fromName n) of - Nothing -> [] - Just (init, last) -> case Branch.getAt init b of - Nothing -> [] - Just b -> SR.fromNames . Names.prefix0 n $ names0 - where - lastName = Path.toName (Path.singleton last) - subnames = Branch.toNames0 . Branch.head $ - Branch.getAt' (Path.singleton last) b - rootnames = - Names.filter (== lastName) . - Branch.toNames0 . set Branch.children mempty $ Branch.head b - names0 = rootnames <> Names.prefix0 lastName subnames - -searchResultsFor :: Names0 -> [Referent] -> [Reference] -> [SearchResult] -searchResultsFor ns terms types = - [ SR.termSearchResult ns name ref - | ref <- terms - , name <- toList (Names.namesForReferent ns ref) - ] <> - [ SR.typeSearchResult ns name ref - | ref <- types - , name <- toList (Names.namesForReference ns ref) - ] - -searchBranchScored :: forall score. (Ord score) - => Names0 - -> (Name -> Name -> Maybe score) - -> [HQ.HashQualified] - -> [SearchResult] -searchBranchScored names0 score queries = - nubOrd . fmap snd . toList $ searchTermNamespace <> searchTypeNamespace - where - searchTermNamespace = foldMap do1query queries - where - do1query :: HQ.HashQualified -> Set (Maybe score, SearchResult) - do1query q = foldMap (score1hq q) (R.toList . Names.terms $ names0) - score1hq :: HQ.HashQualified -> (Name, Referent) -> Set (Maybe score, SearchResult) - score1hq query (name, ref) = case query of - HQ.NameOnly qn -> - pair qn - HQ.HashQualified qn h | h `SH.isPrefixOf` Referent.toShortHash ref -> - pair qn - HQ.HashOnly h | h `SH.isPrefixOf` Referent.toShortHash ref -> - Set.singleton (Nothing, result) - _ -> mempty - where - result = SR.termSearchResult names0 name ref - pair qn = case score qn name of - Just score -> Set.singleton (Just score, result) - Nothing -> mempty - searchTypeNamespace = foldMap do1query queries - where - do1query :: HQ.HashQualified -> Set (Maybe score, SearchResult) - do1query q = foldMap (score1hq q) (R.toList . Names.types $ names0) - score1hq :: HQ.HashQualified -> (Name, Reference) -> Set (Maybe score, SearchResult) - score1hq query (name, ref) = case query of - HQ.NameOnly qn -> - pair qn - HQ.HashQualified qn h | h `SH.isPrefixOf` Reference.toShortHash ref -> - pair qn - HQ.HashOnly h | h `SH.isPrefixOf` Reference.toShortHash ref -> - Set.singleton (Nothing, result) - _ -> mempty - where - result = SR.typeSearchResult names0 name ref - pair qn = case score qn name of - Just score -> Set.singleton (Just score, result) - Nothing -> mempty - --- Separates type references from term references and returns types and terms, --- respectively. For terms that are constructors, turns them into their data --- types. -collateReferences - :: Foldable f - => Foldable g - => f Reference -- types requested - -> g Referent -- terms requested, including ctors - -> (Set Reference, Set Reference) -collateReferences (toList -> types) (toList -> terms) = - let terms' = [ r | Referent.Ref r <- terms ] - types' = [ r | Referent.Con r _ _ <- terms ] - in (Set.fromList types' <> Set.fromList types, Set.fromList terms') - --- | The output list (of lists) corresponds to the query list. -searchBranchExact :: Int -> Names -> [HQ.HashQualified] -> [[SearchResult]] -searchBranchExact len names queries = let - searchTypes :: HQ.HashQualified -> [SearchResult] - searchTypes query = - -- a bunch of references will match a HQ ref. - let refs = toList $ Names3.lookupHQType query names in - refs <&> \r -> - let hqNames = Names3.typeName len r names in - let primaryName = - last . sortOn (\n -> HQ.matchesNamedReference (HQ'.toName n) r query) - $ toList hqNames in - let aliases = Set.delete primaryName hqNames in - SR.typeResult primaryName r aliases - searchTerms :: HQ.HashQualified -> [SearchResult] - searchTerms query = - -- a bunch of references will match a HQ ref. - let refs = toList $ Names3.lookupHQTerm query names in - refs <&> \r -> - let hqNames = Names3.termName len r names in - let primaryName = - last . sortOn (\n -> HQ.matchesNamedReferent (HQ'.toName n) r query) - $ toList hqNames in - let aliases = Set.delete primaryName hqNames in - SR.termResult primaryName r aliases - in [ searchTypes q <> searchTerms q | q <- queries ] - -respond :: Output v -> Action m i v () -respond output = eval $ Notify output - -respondNumbered :: NumberedOutput v -> Action m i v () -respondNumbered output = do - args <- eval $ NotifyNumbered output - unless (null args) $ - numberedArgs .= toList args - -unlessError :: ExceptT (Output v) (Action' m v) () -> Action' m v () -unlessError ma = runExceptT ma >>= either (eval . Notify) pure - -unlessError' :: (e -> Output v) -> ExceptT e (Action' m v) () -> Action' m v () -unlessError' f ma = unlessError $ withExceptT f ma - --- | supply `dest0` if you want to print diff messages --- supply unchangedMessage if you want to display it if merge had no effect -mergeBranchAndPropagateDefaultPatch :: (Monad m, Var v) => Branch.MergeMode -> - InputDescription -> Maybe (Output v) -> Branch m -> Maybe Path.Path' -> Path.Absolute -> Action' m v () -mergeBranchAndPropagateDefaultPatch mode inputDescription unchangedMessage srcb dest0 dest = - ifM (mergeBranch mode inputDescription srcb dest0 dest) - (loadPropagateDiffDefaultPatch inputDescription dest0 dest) - (for_ unchangedMessage respond) - where - mergeBranch :: (Monad m, Var v) => - Branch.MergeMode -> InputDescription -> Branch m -> Maybe Path.Path' -> Path.Absolute -> Action' m v Bool - mergeBranch mode inputDescription srcb dest0 dest = unsafeTime "Merge Branch" $ do - destb <- getAt dest - merged <- eval . Eval $ Branch.merge' mode srcb destb - b <- updateAtM inputDescription dest (const $ pure merged) - for_ dest0 $ \dest0 -> - diffHelper (Branch.head destb) (Branch.head merged) >>= - respondNumbered . uncurry (ShowDiffAfterMerge dest0 dest) - pure b - -loadPropagateDiffDefaultPatch :: (Monad m, Var v) => - InputDescription -> Maybe Path.Path' -> Path.Absolute -> Action' m v () -loadPropagateDiffDefaultPatch inputDescription dest0 dest = unsafeTime "Propagate Default Patch" $ do - original <- getAt dest - patch <- eval . Eval $ Branch.getPatch defaultPatchNameSegment (Branch.head original) - patchDidChange <- propagatePatch inputDescription patch dest - when patchDidChange . for_ dest0 $ \dest0 -> do - patched <- getAt dest - let patchPath = snoc dest0 defaultPatchNameSegment - diffHelper (Branch.head original) (Branch.head patched) >>= - respondNumbered . uncurry (ShowDiffAfterMergePropagate dest0 dest patchPath) - -getAt :: Functor m => Path.Absolute -> Action m i v (Branch m) -getAt (Path.Absolute p) = - use root <&> fromMaybe Branch.empty . Branch.getAt p - --- Update a branch at the given path, returning `True` if --- an update occurred and false otherwise -updateAtM :: Applicative m - => InputDescription - -> Path.Absolute - -> (Branch m -> Action m i v (Branch m)) - -> Action m i v Bool -updateAtM reason (Path.Absolute p) f = do - b <- use lastSavedRoot - b' <- Branch.modifyAtM p f b - updateRoot b' reason - pure $ b /= b' - -stepAt - :: forall m i v - . Monad m - => InputDescription - -> (Path, Branch0 m -> Branch0 m) - -> Action m i v () -stepAt cause = stepManyAt @m @[] cause . pure - -stepAtNoSync :: forall m i v. Monad m - => (Path, Branch0 m -> Branch0 m) - -> Action m i v () -stepAtNoSync = stepManyAtNoSync @m @[] . pure - -stepAtM :: forall m i v. Monad m - => InputDescription - -> (Path, Branch0 m -> m (Branch0 m)) - -> Action m i v () -stepAtM cause = stepManyAtM @m @[] cause . pure - -stepAtM' - :: forall m i v - . Monad m - => InputDescription - -> (Path, Branch0 m -> Action m i v (Branch0 m)) - -> Action m i v Bool -stepAtM' cause = stepManyAtM' @m @[] cause . pure - -stepAtMNoSync' - :: forall m i v - . Monad m - => (Path, Branch0 m -> Action m i v (Branch0 m)) - -> Action m i v Bool -stepAtMNoSync' = stepManyAtMNoSync' @m @[] . pure - -stepManyAt - :: (Monad m, Foldable f) - => InputDescription - -> f (Path, Branch0 m -> Branch0 m) - -> Action m i v () -stepManyAt reason actions = do - stepManyAtNoSync actions - b <- use root - updateRoot b reason - --- Like stepManyAt, but doesn't update the root -stepManyAtNoSync - :: (Monad m, Foldable f) - => f (Path, Branch0 m -> Branch0 m) - -> Action m i v () -stepManyAtNoSync actions = do - b <- use root - let new = Branch.stepManyAt actions b - root .= new - -stepManyAtM :: (Monad m, Foldable f) - => InputDescription - -> f (Path, Branch0 m -> m (Branch0 m)) - -> Action m i v () -stepManyAtM reason actions = do - stepManyAtMNoSync actions - b <- use root - updateRoot b reason - -stepManyAtMNoSync :: (Monad m, Foldable f) - => f (Path, Branch0 m -> m (Branch0 m)) - -> Action m i v () -stepManyAtMNoSync actions = do - b <- use root - b' <- eval . Eval $ Branch.stepManyAtM actions b - root .= b' - -stepManyAtM' :: (Monad m, Foldable f) - => InputDescription - -> f (Path, Branch0 m -> Action m i v (Branch0 m)) - -> Action m i v Bool -stepManyAtM' reason actions = do - b <- use root - b' <- Branch.stepManyAtM actions b - updateRoot b' reason - pure (b /= b') - -stepManyAtMNoSync' :: (Monad m, Foldable f) - => f (Path, Branch0 m -> Action m i v (Branch0 m)) - -> Action m i v Bool -stepManyAtMNoSync' actions = do - b <- use root - b' <- Branch.stepManyAtM actions b - root .= b' - pure (b /= b') - -updateRoot :: Branch m -> InputDescription -> Action m i v () -updateRoot new reason = do - old <- use lastSavedRoot - when (old /= new) $ do - root .= new - eval $ SyncLocalRootBranch new - eval $ AppendToReflog reason old new - lastSavedRoot .= new - --- cata for 0, 1, or more elements of a Foldable --- tries to match as lazily as possible -zeroOneOrMore :: Foldable f => f a -> b -> (a -> b) -> (f a -> b) -> b -zeroOneOrMore f zero one more = case toList f of - _ : _ : _ -> more f - a : _ -> one a - _ -> zero - --- Goal: If `remaining = root - toBeDeleted` contains definitions X which --- depend on definitions Y not in `remaining` (which should also be in --- `toBeDeleted`), then complain by returning (Y, X). -getEndangeredDependents :: forall m. Monad m - => (Reference -> m (Set Reference)) - -> Names0 - -> Names0 - -> m (Names0, Names0) -getEndangeredDependents getDependents toDelete root = do - let remaining = root `Names.difference` toDelete - toDelete', remaining', extinct :: Set Reference - toDelete' = Names.allReferences toDelete - remaining' = Names.allReferences remaining -- left over after delete - extinct = toDelete' `Set.difference` remaining' -- deleting and not left over - accumulateDependents m r = getDependents r <&> \ds -> Map.insert r ds m - dependentsOfExtinct :: Map Reference (Set Reference) <- - foldM accumulateDependents mempty extinct - let orphaned, endangered, failed :: Set Reference - orphaned = fold dependentsOfExtinct - endangered = orphaned `Set.intersection` remaining' - failed = Set.filter hasEndangeredDependent extinct - hasEndangeredDependent r = any (`Set.member` endangered) - (dependentsOfExtinct Map.! r) - pure ( Names.restrictReferences failed toDelete - , Names.restrictReferences endangered root `Names.difference` toDelete) - --- Applies the selection filter to the adds/updates of a slurp result, --- meaning that adds/updates should only contain the selection or its transitive --- dependencies, any unselected transitive dependencies of the selection will --- be added to `extraDefinitions`. -applySelection :: forall v a. Var v => - [HQ'.HashQualified] -> UF.TypecheckedUnisonFile v a -> SlurpResult v -> SlurpResult v -applySelection [] _ = id -applySelection hqs file = \sr@SlurpResult{..} -> - sr { adds = adds `SC.intersection` closed - , updates = updates `SC.intersection` closed - , extraDefinitions = closed `SC.difference` selection - } - where - selectedNames0 = - Names.filterByHQs (Set.fromList hqs) (UF.typecheckedToNames0 file) - selection, closed :: SlurpComponent v - selection = SlurpComponent selectedTypes selectedTerms - closed = SC.closeWithDependencies file selection - selectedTypes, selectedTerms :: Set v - selectedTypes = Set.map var $ R.dom (Names.types selectedNames0) - selectedTerms = Set.map var $ R.dom (Names.terms selectedNames0) - -var :: Var v => Name -> v -var name = Var.named (Name.toText name) - -toSlurpResult - :: forall v - . Var v - => Path.Absolute - -> UF.TypecheckedUnisonFile v Ann - -> Names0 - -> SlurpResult v -toSlurpResult currentPath uf existingNames = - Slurp.subtractComponent (conflicts <> ctorCollisions) $ SlurpResult - uf - mempty - adds - dups - mempty - conflicts - updates - termCtorCollisions - ctorTermCollisions - termAliases - typeAliases - mempty - where - fileNames0 = UF.typecheckedToNames0 uf - - sc :: R.Relation Name Referent -> R.Relation Name Reference -> SlurpComponent v - sc terms types = SlurpComponent { terms = Set.map var (R.dom terms) - , types = Set.map var (R.dom types) } - - -- conflict (n,r) if n is conflicted in names0 - conflicts :: SlurpComponent v - conflicts = sc terms types where - terms = R.filterDom (conflicted . Names.termsNamed existingNames) - (Names.terms fileNames0) - types = R.filterDom (conflicted . Names.typesNamed existingNames) - (Names.types fileNames0) - conflicted s = Set.size s > 1 - - ctorCollisions :: SlurpComponent v - ctorCollisions = - mempty { SC.terms = termCtorCollisions <> ctorTermCollisions } - - -- termCtorCollision (n,r) if (n, r' /= r) exists in existingNames and - -- r is Ref and r' is Con - termCtorCollisions :: Set v - termCtorCollisions = Set.fromList - [ var n - | (n, Referent.Ref{}) <- R.toList (Names.terms fileNames0) - , [r@Referent.Con{}] <- [toList $ Names.termsNamed existingNames n] - -- ignore collisions w/ ctors of types being updated - , Set.notMember (Referent.toReference r) typesToUpdate - ] - - -- the set of typerefs that are being updated by this file - typesToUpdate :: Set Reference - typesToUpdate = Set.fromList - [ r - | (n, r') <- R.toList (Names.types fileNames0) - , r <- toList (Names.typesNamed existingNames n) - , r /= r' - ] - - -- ctorTermCollisions (n,r) if (n, r' /= r) exists in names0 and r is Con - -- and r' is Ref except we relaxed it to where r' can be Con or Ref - -- what if (n,r) and (n,r' /= r) exists in names and r, r' are Con - ctorTermCollisions :: Set v - ctorTermCollisions = Set.fromList - [ var n - | (n, Referent.Con{}) <- R.toList (Names.terms fileNames0) - , r <- toList $ Names.termsNamed existingNames n - -- ignore collisions w/ ctors of types being updated - , Set.notMember (Referent.toReference r) typesToUpdate - , Set.notMember (var n) (terms dups) - ] - - -- duplicate (n,r) if (n,r) exists in names0 - dups :: SlurpComponent v - dups = sc terms types where - terms = R.intersection (Names.terms existingNames) (Names.terms fileNames0) - types = R.intersection (Names.types existingNames) (Names.types fileNames0) - - -- update (n,r) if (n,r' /= r) exists in existingNames and r, r' are Ref - updates :: SlurpComponent v - updates = SlurpComponent (Set.fromList types) (Set.fromList terms) where - terms = - [ var n - | (n, r'@Referent.Ref{}) <- R.toList (Names.terms fileNames0) - , [r@Referent.Ref{}] <- [toList $ Names.termsNamed existingNames n] - , r' /= r - ] - types = - [ var n - | (n, r') <- R.toList (Names.types fileNames0) - , [r] <- [toList $ Names.typesNamed existingNames n] - , r' /= r - ] - - buildAliases - :: R.Relation Name Referent - -> R.Relation Name Referent - -> Set v - -> Map v Slurp.Aliases - buildAliases existingNames namesFromFile duplicates = Map.fromList - [ ( var n - , if null aliasesOfOld - then Slurp.AddAliases aliasesOfNew - else Slurp.UpdateAliases aliasesOfOld aliasesOfNew - ) - | (n, r@Referent.Ref{}) <- R.toList namesFromFile - -- All the refs whose names include `n`, and are not `r` - , let - refs = Set.delete r $ R.lookupDom n existingNames - aliasesOfNew = - Set.map (Path.unprefixName currentPath) . Set.delete n $ - R.lookupRan r existingNames - aliasesOfOld = - Set.map (Path.unprefixName currentPath) . Set.delete n . R.dom $ - R.restrictRan existingNames refs - , not (null aliasesOfNew && null aliasesOfOld) - , Set.notMember (var n) duplicates - ] - - termAliases :: Map v Slurp.Aliases - termAliases = buildAliases (Names.terms existingNames) - (Names.terms fileNames0) - (SC.terms dups) - - typeAliases :: Map v Slurp.Aliases - typeAliases = buildAliases (R.mapRan Referent.Ref $ Names.types existingNames) - (R.mapRan Referent.Ref $ Names.types fileNames0) - (SC.types dups) - - -- (n,r) is in `adds` if n isn't in existingNames - adds = sc terms types where - terms = addTerms (Names.terms existingNames) (Names.terms fileNames0) - types = addTypes (Names.types existingNames) (Names.types fileNames0) - addTerms existingNames = R.filter go where - go (n, Referent.Ref{}) = (not . R.memberDom n) existingNames - go _ = False - addTypes existingNames = R.filter go where - go (n, _) = (not . R.memberDom n) existingNames - -filterBySlurpResult :: Ord v - => SlurpResult v - -> UF.TypecheckedUnisonFile v Ann - -> UF.TypecheckedUnisonFile v Ann -filterBySlurpResult SlurpResult{..} - (UF.TypecheckedUnisonFileId - dataDeclarations' - effectDeclarations' - topLevelComponents' - watchComponents - hashTerms) = - UF.TypecheckedUnisonFileId datas effects tlcs watches hashTerms' - where - keep = updates <> adds - keepTerms = SC.terms keep - keepTypes = SC.types keep - hashTerms' = Map.restrictKeys hashTerms keepTerms - datas = Map.restrictKeys dataDeclarations' keepTypes - effects = Map.restrictKeys effectDeclarations' keepTypes - tlcs = filter (not.null) $ fmap (List.filter filterTLC) topLevelComponents' - watches = filter (not.null.snd) $ fmap (second (List.filter filterTLC)) watchComponents - filterTLC (v,_,_) = Set.member v keepTerms - --- updates the namespace for adding `slurp` -doSlurpAdds :: forall m v. (Monad m, Var v) - => SlurpComponent v - -> UF.TypecheckedUnisonFile v Ann - -> (Branch0 m -> Branch0 m) -doSlurpAdds slurp uf = Branch.stepManyAt0 (typeActions <> termActions) - where - typeActions = map doType . toList $ SC.types slurp - termActions = map doTerm . toList $ - SC.terms slurp <> Slurp.constructorsFor (SC.types slurp) uf - names = UF.typecheckedToNames0 uf - tests = Set.fromList $ fst <$> UF.watchesOfKind UF.TestWatch (UF.discardTypes uf) - (isTestType, isTestValue) = isTest - md v = - if Set.member v tests then Metadata.singleton isTestType isTestValue - else Metadata.empty - doTerm :: v -> (Path, Branch0 m -> Branch0 m) - doTerm v = case toList (Names.termsNamed names (Name.fromVar v)) of - [] -> errorMissingVar v - [r] -> case Path.splitFromName (Name.fromVar v) of - Nothing -> errorEmptyVar - Just split -> BranchUtil.makeAddTermName split r (md v) - wha -> error $ "Unison bug, typechecked file w/ multiple terms named " - <> Var.nameStr v <> ": " <> show wha - doType :: v -> (Path, Branch0 m -> Branch0 m) - doType v = case toList (Names.typesNamed names (Name.fromVar v)) of - [] -> errorMissingVar v - [r] -> case Path.splitFromName (Name.fromVar v) of - Nothing -> errorEmptyVar - Just split -> BranchUtil.makeAddTypeName split r Metadata.empty - wha -> error $ "Unison bug, typechecked file w/ multiple types named " - <> Var.nameStr v <> ": " <> show wha - errorEmptyVar = error "encountered an empty var name" - errorMissingVar v = error $ "expected to find " ++ show v ++ " in " ++ show uf - -doSlurpUpdates :: Monad m - => Map Name (Reference, Reference) - -> Map Name (Reference, Reference) - -> [(Name, Referent)] - -> (Branch0 m -> Branch0 m) -doSlurpUpdates typeEdits termEdits deprecated b0 = - Branch.stepManyAt0 (typeActions <> termActions <> deprecateActions) b0 - where - typeActions = join . map doType . Map.toList $ typeEdits - termActions = join . map doTerm . Map.toList $ termEdits - deprecateActions = join . map doDeprecate $ deprecated where - doDeprecate (n, r) = case Path.splitFromName n of - Nothing -> errorEmptyVar - Just split -> [BranchUtil.makeDeleteTermName split r] - - -- we copy over the metadata on the old thing - -- todo: if the thing being updated, m, is metadata for something x in b0 - -- update x's md to reference `m` - doType, doTerm :: - (Name, (Reference, Reference)) -> [(Path, Branch0 m -> Branch0 m)] - doType (n, (old, new)) = case Path.splitFromName n of - Nothing -> errorEmptyVar - Just split -> [ BranchUtil.makeDeleteTypeName split old - , BranchUtil.makeAddTypeName split new oldMd ] - where - oldMd = BranchUtil.getTypeMetadataAt split old b0 - doTerm (n, (old, new)) = case Path.splitFromName n of - Nothing -> errorEmptyVar - Just split -> [ BranchUtil.makeDeleteTermName split (Referent.Ref old) - , BranchUtil.makeAddTermName split (Referent.Ref new) oldMd ] - where - -- oldMd is the metadata linked to the old definition - -- we relink it to the new definition - oldMd = BranchUtil.getTermMetadataAt split (Referent.Ref old) b0 - errorEmptyVar = error "encountered an empty var name" - -loadSearchResults :: Ord v => [SR.SearchResult] -> Action m i v [SearchResult' v Ann] -loadSearchResults = traverse loadSearchResult - where - loadSearchResult = \case - SR.Tm (SR.TermResult name r aliases) -> do - typ <- loadReferentType r - pure $ SR'.Tm name typ r aliases - SR.Tp (SR.TypeResult name r aliases) -> do - dt <- loadTypeDisplayThing r - pure $ SR'.Tp name dt r aliases - -loadDisplayInfo :: - Set Reference -> Action m i v ([(Reference, Maybe (Type v Ann))] - ,[(Reference, DisplayThing (DD.Decl v Ann))]) -loadDisplayInfo refs = do - termRefs <- filterM (eval . IsTerm) (toList refs) - typeRefs <- filterM (eval . IsType) (toList refs) - terms <- forM termRefs $ \r -> (r,) <$> eval (LoadTypeOfTerm r) - types <- forM typeRefs $ \r -> (r,) <$> loadTypeDisplayThing r - pure (terms, types) - -loadReferentType :: Referent -> Action m i v (Maybe (Type v Ann)) -loadReferentType = \case - Referent.Ref r -> eval $ LoadTypeOfTerm r - Referent.Con r cid _ -> getTypeOfConstructor r cid - where - getTypeOfConstructor :: Reference -> Int -> Action m i v (Maybe (Type v Ann)) - getTypeOfConstructor (Reference.DerivedId r) cid = do - maybeDecl <- eval $ LoadType r - pure $ case maybeDecl of - Nothing -> Nothing - Just decl -> DD.typeOfConstructor (either DD.toDataDecl id decl) cid - getTypeOfConstructor r cid = - error $ "Don't know how to getTypeOfConstructor " ++ show r ++ " " ++ show cid - -loadTypeDisplayThing :: Reference -> Action m i v (DisplayThing (DD.Decl v Ann)) -loadTypeDisplayThing = \case - Reference.Builtin _ -> pure BuiltinThing - Reference.DerivedId id -> - maybe (MissingThing id) RegularThing <$> eval (LoadType id) - -lexedSource :: Monad m => SourceName -> Source -> Action' m v (Names, LexedSource) -lexedSource name src = do - let tokens = L.lexer (Text.unpack name) (Text.unpack src) - getHQ = \case - L.Backticks s (Just sh) -> Just (HQ.HashQualified (Name.unsafeFromString s) sh) - L.WordyId s (Just sh) -> Just (HQ.HashQualified (Name.unsafeFromString s) sh) - L.SymbolyId s (Just sh) -> Just (HQ.HashQualified (Name.unsafeFromString s) sh) - L.Hash sh -> Just (HQ.HashOnly sh) - _ -> Nothing - hqs = Set.fromList . mapMaybe (getHQ . L.payload) $ tokens - parseNames <- makeHistoricalParsingNames hqs - pure (parseNames, (src, tokens)) - -prettyPrintEnv :: Names -> Action' m v PPE.PrettyPrintEnv -prettyPrintEnv ns = eval CodebaseHashLength <&> (`PPE.fromNames` ns) - -prettyPrintEnvDecl :: Names -> Action' m v PPE.PrettyPrintEnvDecl -prettyPrintEnvDecl ns = eval CodebaseHashLength <&> (`PPE.fromNamesDecl` ns) - -parseSearchType :: (Monad m, Var v) - => Input -> String -> Action' m v (Either (Output v) (Type v Ann)) -parseSearchType input typ = fmap Type.removeAllEffectVars <$> parseType input typ - -parseType :: (Monad m, Var v) - => Input -> String -> Action' m v (Either (Output v) (Type v Ann)) -parseType input src = do - -- `show Input` is the name of the "file" being lexed - (names0, lexed) <- lexedSource (Text.pack $ show input) (Text.pack src) - parseNames <- Names3.suffixify0 <$> basicParseNames0 - let names = Names3.push (Names3.currentNames names0) - (Names3.Names parseNames (Names3.oldNames names0)) - e <- eval $ ParseType names lexed - pure $ case e of - Left err -> Left $ TypeParseError src err - Right typ -> case Type.bindNames mempty (Names3.currentNames names) - $ Type.generalizeLowercase mempty typ of - Left es -> Left $ ParseResolutionFailures src (toList es) - Right typ -> Right typ - -makeShadowedPrintNamesFromLabeled - :: Monad m => Set LabeledDependency -> Names0 -> Action' m v Names -makeShadowedPrintNamesFromLabeled deps shadowing = - Names3.shadowing shadowing <$> makePrintNamesFromLabeled' deps - -getTermsIncludingHistorical - :: Monad m => Path.HQSplit -> Branch0 m -> Action' m v (Set Referent) -getTermsIncludingHistorical (p, hq) b = case Set.toList refs of - [] -> case hq of - HQ'.HashQualified n hs -> do - names <- findHistoricalHQs - $ Set.fromList [HQ.HashQualified (Name.unsafeFromText (NameSegment.toText n)) hs] - pure . R.ran $ Names.terms names - _ -> pure Set.empty - _ -> pure refs - where refs = BranchUtil.getTerm (p, hq) b - --- discards inputs that aren't hashqualified; --- I'd enforce it with finer-grained types if we had them. -findHistoricalHQs :: Monad m => Set HQ.HashQualified -> Action' m v Names0 -findHistoricalHQs lexedHQs0 = do - root <- use root - currentPath <- use currentPath - let - -- omg this nightmare name-to-path parsing code is littered everywhere. - -- We need to refactor so that the absolute-ness of a name isn't represented - -- by magical text combinations. - -- Anyway, this function takes a name, tries to determine whether it is - -- relative or absolute, and tries to return the corresponding name that is - -- /relative/ to the root. - preprocess n = case Name.toString n of - -- some absolute name that isn't just "." - '.' : t@(_:_) -> Name.unsafeFromString t - -- something in current path - _ -> if Path.isRoot currentPath then n - else Name.joinDot (Path.toName . Path.unabsolute $ currentPath) n - - lexedHQs = Set.map (fmap preprocess) . Set.filter HQ.hasHash $ lexedHQs0 - (_missing, rawHistoricalNames) <- eval . Eval $ Branch.findHistoricalHQs lexedHQs root - pure rawHistoricalNames - -makeShadowedPrintNamesFromHQ :: Monad m => Set HQ.HashQualified -> Names0 -> Action' m v Names -makeShadowedPrintNamesFromHQ lexedHQs shadowing = do - rawHistoricalNames <- findHistoricalHQs lexedHQs - basicNames0 <- basicPrettyPrintNames0 - currentPath <- use currentPath - -- The basic names go into "current", but are shadowed by "shadowing". - -- They go again into "historical" as a hack that makes them available HQ-ed. - pure $ - Names3.shadowing - shadowing - (Names basicNames0 (fixupNamesRelative currentPath rawHistoricalNames)) - -makePrintNamesFromLabeled' - :: Monad m => Set LabeledDependency -> Action' m v Names -makePrintNamesFromLabeled' deps = do - root <- use root - currentPath <- use currentPath - (_missing, rawHistoricalNames) <- eval . Eval $ Branch.findHistoricalRefs - deps - root - basicNames0 <- basicPrettyPrintNames0 - pure $ Names basicNames0 (fixupNamesRelative currentPath rawHistoricalNames) - --- Any absolute names in the input which have `currentPath` as a prefix --- are converted to names relative to current path. All other names are --- converted to absolute names. For example: --- --- e.g. if currentPath = .foo.bar --- then name foo.bar.baz becomes baz --- name cat.dog becomes .cat.dog -fixupNamesRelative :: Path.Absolute -> Names0 -> Names0 -fixupNamesRelative currentPath' = Names3.map0 fixName where - prefix = Path.toName (Path.unabsolute currentPath') - fixName n = if currentPath' == Path.absoluteEmpty then n else - fromMaybe (Name.makeAbsolute n) (Name.stripNamePrefix prefix n) - -makeHistoricalParsingNames :: - Monad m => Set HQ.HashQualified -> Action' m v Names -makeHistoricalParsingNames lexedHQs = do - rawHistoricalNames <- findHistoricalHQs lexedHQs - basicNames0 <- basicParseNames0 - currentPath <- use currentPath - pure $ Names basicNames0 - (Names3.makeAbsolute0 rawHistoricalNames <> - fixupNamesRelative currentPath rawHistoricalNames) - -basicParseNames0, basicPrettyPrintNames0, slurpResultNames0 :: Functor m => Action' m v Names0 -basicParseNames0 = fst <$> basicNames0' -basicPrettyPrintNames0 = snd <$> basicNames0' --- we check the file against everything in the current path -slurpResultNames0 = currentPathNames0 - -currentPathNames0 :: Functor m => Action' m v Names0 -currentPathNames0 = do - currentPath' <- use currentPath - currentBranch' <- getAt currentPath' - pure $ Branch.toNames0 (Branch.head currentBranch') - --- implementation detail of baseicParseNames0 and basicPrettyPrintNames0 -basicNames0' :: Functor m => Action' m v (Names0, Names0) -basicNames0' = do - root' <- use root - currentPath' <- use currentPath - currentBranch' <- getAt currentPath' - let root0 = Branch.head root' - absoluteRootNames0 = Names3.makeAbsolute0 (Branch.toNames0 root0) - currentBranch0 = Branch.head currentBranch' - currentPathNames0 = Branch.toNames0 currentBranch0 - -- all names, but with local names in their relative form only, rather - -- than absolute; external names appear as absolute - currentAndExternalNames0 = currentPathNames0 `Names3.unionLeft0` absDot externalNames where - absDot = Names.prefix0 (Name.unsafeFromText "") - externalNames = rootNames `Names.difference` pathPrefixed currentPathNames0 - rootNames = Branch.toNames0 root0 - pathPrefixed = case Path.unabsolute currentPath' of - Path.Path (toList -> []) -> id - p -> Names.prefix0 (Path.toName p) - -- parsing should respond to local and absolute names - parseNames00 = currentPathNames0 <> absoluteRootNames0 - -- pretty-printing should use local names where available - prettyPrintNames00 = currentAndExternalNames0 - pure (parseNames00, prettyPrintNames00) - --- Given a typechecked file with a main function called `mainName` --- of the type `'{IO} ()`, adds an extra binding which --- forces the `main` function. --- --- If that function doesn't exist in the typechecked file, the --- codebase is consulted. -addRunMain - :: (Monad m, Var v) - => String - -> Maybe (TypecheckedUnisonFile v Ann) - -> Action' m v (Maybe (TypecheckedUnisonFile v Ann)) -addRunMain mainName Nothing = do - parseNames0 <- basicParseNames0 - let loadTypeOfTerm ref = eval $ LoadTypeOfTerm ref - mainType <- eval RuntimeMain - mainToFile <$> - getMainTerm loadTypeOfTerm parseNames0 mainName mainType - where - mainToFile (MainTerm.NotAFunctionName _) = Nothing - mainToFile (MainTerm.NotFound _) = Nothing - mainToFile (MainTerm.BadType _) = Nothing - mainToFile (MainTerm.Success hq tm typ) = Just $ - let v = Var.named (HQ.toText hq) in - UF.typecheckedUnisonFile mempty mempty mempty [("main",[(v, tm, typ)])] -- mempty -addRunMain mainName (Just uf) = do - let components = join $ UF.topLevelComponents uf - let mainComponent = filter ((\v -> Var.nameStr v == mainName) . view _1) components - mainType <- eval RuntimeMain - case mainComponent of - [(v, tm, ty)] -> pure $ let - v2 = Var.freshIn (Set.fromList [v]) v - a = ABT.annotation tm - in - if Typechecker.isSubtype ty mainType then Just $ let - runMain = DD.forceTerm a a (Term.var a v) - in UF.typecheckedUnisonFile - (UF.dataDeclarationsId' uf) - (UF.effectDeclarationsId' uf) - (UF.topLevelComponents' uf) - (UF.watchComponents uf <> [("main", [(v2, runMain, mainType)])]) - else Nothing - _ -> addRunMain mainName Nothing - -executePPE - :: (Var v, Monad m) - => TypecheckedUnisonFile v a - -> Action' m v PPE.PrettyPrintEnv -executePPE unisonFile = - -- voodoo - prettyPrintEnv =<< - makeShadowedPrintNamesFromLabeled - (UF.termSignatureExternalLabeledDependencies unisonFile) - (UF.typecheckedToNames0 unisonFile) - -diffHelper :: Monad m - => Branch0 m - -> Branch0 m - -> Action' m v (PPE.PrettyPrintEnv, OBranchDiff.BranchDiffOutput v Ann) -diffHelper before after = do - hqLength <- eval CodebaseHashLength - diff <- eval . Eval $ BranchDiff.diff0 before after - names0 <- basicPrettyPrintNames0 - ppe <- PPE.suffixifiedPPE <$> prettyPrintEnvDecl (Names names0 mempty) - (ppe,) <$> - OBranchDiff.toOutput - loadTypeOfTerm - declOrBuiltin - hqLength - (Branch.toNames0 before) - (Branch.toNames0 after) - ppe - diff - -loadTypeOfTerm :: Referent -> Action m i v (Maybe (Type v Ann)) -loadTypeOfTerm (Referent.Ref r) = eval $ LoadTypeOfTerm r -loadTypeOfTerm (Referent.Con (Reference.DerivedId r) cid _) = do - decl <- eval $ LoadType r - case decl of - Just (either DD.toDataDecl id -> dd) -> pure $ DD.typeOfConstructor dd cid - Nothing -> pure Nothing -loadTypeOfTerm Referent.Con{} = error $ - reportBug "924628772" "Attempt to load a type declaration which is a builtin!" - -declOrBuiltin :: Reference -> Action m i v (Maybe (DD.DeclOrBuiltin v Ann)) -declOrBuiltin r = case r of - Reference.Builtin{} -> - pure . fmap DD.Builtin $ Map.lookup r Builtin.builtinConstructorType - Reference.DerivedId id -> - fmap DD.Decl <$> eval (LoadType id) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Input.hs b/parser-typechecker/src/Unison/Codebase/Editor/Input.hs deleted file mode 100644 index f25665d400..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/Input.hs +++ /dev/null @@ -1,144 +0,0 @@ -module Unison.Codebase.Editor.Input - ( Input(..) - , Event(..) - , OutputLocation(..) - , PatchPath - , BranchId, parseBranchId - , HashOrHQSplit' - ) where - -import Unison.Prelude - -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.HashQualified as HQ -import qualified Unison.HashQualified' as HQ' -import Unison.Codebase.Path ( Path' ) -import qualified Unison.Codebase.Path as Path -import Unison.Codebase.Editor.RemoteRepo -import Unison.ShortHash (ShortHash) -import Unison.Codebase.ShortBranchHash (ShortBranchHash) -import qualified Unison.Codebase.ShortBranchHash as SBH -import Unison.Codebase.SyncMode ( SyncMode ) -import qualified Data.Text as Text -import Unison.NameSegment ( NameSegment ) - -data Event - = UnisonFileChanged SourceName Source - | IncomingRootBranch (Set Branch.Hash) - -type Source = Text -- "id x = x\nconst a b = a" -type SourceName = Text -- "foo.u" or "buffer 7" -type PatchPath = Path.Split' -type BranchId = Either ShortBranchHash Path' -type HashOrHQSplit' = Either ShortHash Path.HQSplit' - -parseBranchId :: String -> Either String BranchId -parseBranchId ('#':s) = case SBH.fromText (Text.pack s) of - Nothing -> Left "Invalid hash, expected a base32hex string." - Just h -> pure $ Left h -parseBranchId s = Right <$> Path.parsePath' s - -data Input - -- names stuff: - -- directory ops - -- `Link` must describe a repo and a source path within that repo. - -- clone w/o merge, error if would clobber - = ForkLocalBranchI (Either ShortBranchHash Path') Path' - -- merge first causal into destination - | MergeLocalBranchI Path' Path' Branch.MergeMode - | PreviewMergeLocalBranchI Path' Path' - | DiffNamespaceI Path' Path' -- old new - | PullRemoteBranchI (Maybe RemoteNamespace) Path' SyncMode - | PushRemoteBranchI (Maybe RemoteHead) Path' SyncMode - | CreatePullRequestI RemoteNamespace RemoteNamespace - | LoadPullRequestI RemoteNamespace RemoteNamespace Path' - | ResetRootI (Either ShortBranchHash Path') - -- todo: Q: Does it make sense to publish to not-the-root of a Github repo? - -- Does it make sense to fork from not-the-root of a Github repo? - -- change directory - | SwitchBranchI Path' - | PopBranchI - -- > names foo - -- > names foo.bar - -- > names .foo.bar - -- > names .foo.bar#asdflkjsdf - -- > names #sdflkjsdfhsdf - | NamesI HQ.HashQualified - | AliasTermI HashOrHQSplit' Path.Split' - | AliasTypeI HashOrHQSplit' Path.Split' - | AliasManyI [Path.HQSplit] Path' - -- Move = Rename; It's an HQSplit' not an HQSplit', meaning the arg has to have a name. - | MoveTermI Path.HQSplit' Path.Split' - | MoveTypeI Path.HQSplit' Path.Split' - | MoveBranchI (Maybe Path.Split') Path.Split' - | MovePatchI Path.Split' Path.Split' - | CopyPatchI Path.Split' Path.Split' - -- delete = unname - | DeleteI Path.HQSplit' - | DeleteTermI Path.HQSplit' - | DeleteTypeI Path.HQSplit' - | DeleteBranchI (Maybe Path.Split') - | DeletePatchI Path.Split' - -- resolving naming conflicts within `branchpath` - -- Add the specified name after deleting all others for a given reference - -- within a given branch. - | ResolveTermNameI Path.HQSplit' - | ResolveTypeNameI Path.HQSplit' - -- edits stuff: - | LoadI (Maybe FilePath) - | AddI [HQ'.HashQualified] - | PreviewAddI [HQ'.HashQualified] - | UpdateI (Maybe PatchPath) [HQ'.HashQualified] - | PreviewUpdateI [HQ'.HashQualified] - | TodoI (Maybe PatchPath) Path' - | PropagatePatchI PatchPath Path' - | ListEditsI (Maybe PatchPath) - -- -- create and remove update directives - | DeprecateTermI PatchPath Path.HQSplit' - | DeprecateTypeI PatchPath Path.HQSplit' - | ReplaceTermI HQ.HashQualified HQ.HashQualified (Maybe PatchPath) - | ReplaceTypeI HQ.HashQualified HQ.HashQualified (Maybe PatchPath) - | RemoveTermReplacementI HQ.HashQualified (Maybe PatchPath) - | RemoveTypeReplacementI HQ.HashQualified (Maybe PatchPath) - | UndoI - -- First `Maybe Int` is cap on number of results, if any - -- Second `Maybe Int` is cap on diff elements shown, if any - | HistoryI (Maybe Int) (Maybe Int) BranchId - -- execute an IO thunk - | ExecuteI String - | TestI Bool Bool -- TestI showSuccesses showFailures - -- metadata - -- `link metadata definitions` (adds metadata to all of `definitions`) - | LinkI HQ.HashQualified [Path.HQSplit'] - -- `unlink metadata definitions` (removes metadata from all of `definitions`) - | UnlinkI HQ.HashQualified [Path.HQSplit'] - -- links from - | LinksI Path.HQSplit' (Maybe String) - | CreateAuthorI NameSegment {- identifier -} Text {- name -} - | DisplayI OutputLocation HQ.HashQualified - | DocsI Path.HQSplit' - -- other - | SearchByNameI Bool Bool [String] -- SearchByName isVerbose showAll query - | FindShallowI Path' - | FindPatchI - | ShowDefinitionI OutputLocation [HQ.HashQualified] - | ShowDefinitionByPrefixI OutputLocation [HQ.HashQualified] - | ShowReflogI - | UpdateBuiltinsI - | MergeBuiltinsI - | MergeIOBuiltinsI - | ListDependenciesI HQ.HashQualified - | ListDependentsI HQ.HashQualified - | DebugNumberedArgsI - | DebugBranchHistoryI - | DebugTypecheckedUnisonFileI - | QuitI - deriving (Eq, Show) - --- Some commands, like `view`, can dump output to either console or a file. -data OutputLocation - = ConsoleLocation - | LatestFileLocation - | FileLocation FilePath - -- ClipboardLocation - deriving (Eq, Show) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Output.hs b/parser-typechecker/src/Unison/Codebase/Editor/Output.hs deleted file mode 100644 index abc51a34cb..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/Output.hs +++ /dev/null @@ -1,359 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Codebase.Editor.Output - ( Output(..) - , NumberedOutput(..) - , NumberedArgs - , ListDetailed - , ShallowListEntry(..) - , HistoryTail(..) - , TestReportStats(..) - , UndoFailureReason(..) - , PushPull(..) - , ReflogEntry(..) - , pushPull - , isFailure - , isNumberedFailure - ) where - -import Unison.Prelude - -import Unison.Codebase.Editor.Input -import Unison.Codebase.Editor.SlurpResult (SlurpResult(..)) -import Unison.Codebase.GitError -import Unison.Codebase.Path (Path', Path) -import Unison.Codebase.Patch (Patch) -import Unison.Name ( Name ) -import Unison.Names2 ( Names ) -import Unison.Parser ( Ann ) -import qualified Unison.Reference as Reference -import Unison.Reference ( Reference ) -import Unison.Referent ( Referent ) -import Unison.DataDeclaration ( Decl ) -import Unison.Util.Relation (Relation) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Editor.SlurpResult as SR -import qualified Unison.Codebase.Metadata as Metadata -import qualified Unison.Codebase.Path as Path -import qualified Unison.Codebase.Runtime as Runtime -import qualified Unison.HashQualified as HQ -import qualified Unison.HashQualified' as HQ' -import qualified Unison.Parser as Parser -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.Typechecker.Context as Context -import qualified Unison.UnisonFile as UF -import qualified Unison.Util.Pretty as P -import Unison.Codebase.Editor.DisplayThing (DisplayThing) -import qualified Unison.Codebase.Editor.TodoOutput as TO -import Unison.Codebase.Editor.SearchResult' (SearchResult') -import Unison.Term (Term) -import Unison.Type (Type) -import qualified Unison.Names3 as Names -import qualified Data.Set as Set -import Unison.NameSegment (NameSegment) -import Unison.ShortHash (ShortHash) -import Unison.Var (Var) -import Unison.Codebase.ShortBranchHash (ShortBranchHash) -import Unison.Codebase.Editor.RemoteRepo as RemoteRepo -import Unison.Codebase.Editor.Output.BranchDiff (BranchDiffOutput) -import Unison.LabeledDependency (LabeledDependency) - -type ListDetailed = Bool -type SourceName = Text -type NumberedArgs = [String] - -data PushPull = Push | Pull deriving (Eq, Ord, Show) - -pushPull :: a -> a -> PushPull -> a -pushPull push pull p = case p of - Push -> push - Pull -> pull - -data NumberedOutput v - = ShowDiffNamespace Path.Absolute Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) - | ShowDiffAfterUndo PPE.PrettyPrintEnv (BranchDiffOutput v Ann) - | ShowDiffAfterDeleteDefinitions PPE.PrettyPrintEnv (BranchDiffOutput v Ann) - | ShowDiffAfterDeleteBranch Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) - | ShowDiffAfterModifyBranch Path.Path' Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) - | ShowDiffAfterMerge Path.Path' Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) - | ShowDiffAfterMergePropagate Path.Path' Path.Absolute Path.Path' PPE.PrettyPrintEnv (BranchDiffOutput v Ann) - | ShowDiffAfterMergePreview Path.Path' Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) - | ShowDiffAfterPull Path.Path' Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) - | ShowDiffAfterCreatePR RemoteNamespace RemoteNamespace PPE.PrettyPrintEnv (BranchDiffOutput v Ann) - -- - | ShowDiffAfterCreateAuthor NameSegment Path.Path' Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) - --- | ShowDiff - -data Output v - -- Generic Success response; we might consider deleting this. - = Success - -- User did `add` or `update` before typechecking a file? - | NoUnisonFile - | InvalidSourceName String - | SourceLoadFailed String - -- No main function, the [Type v Ann] are the allowed types - | NoMainFunction String PPE.PrettyPrintEnv [Type v Ann] - | BranchEmpty (Either ShortBranchHash Path') - | BranchNotEmpty Path' - | LoadPullRequest RemoteNamespace RemoteNamespace Path' Path' Path' Path' - | CreatedNewBranch Path.Absolute - | BranchAlreadyExists Path' - | PatchAlreadyExists Path.Split' - | NoExactTypeMatches - | TypeAlreadyExists Path.Split' (Set Reference) - | TypeParseError String (Parser.Err v) - | ParseResolutionFailures String [Names.ResolutionFailure v Ann] - | TypeHasFreeVars (Type v Ann) - | TermAlreadyExists Path.Split' (Set Referent) - | LabeledReferenceAmbiguous Int HQ.HashQualified (Set LabeledDependency) - | LabeledReferenceNotFound HQ.HashQualified - | DeleteNameAmbiguous Int Path.HQSplit' (Set Referent) (Set Reference) - | TermAmbiguous HQ.HashQualified (Set Referent) - | HashAmbiguous ShortHash (Set Referent) - | BranchHashAmbiguous ShortBranchHash (Set ShortBranchHash) - | BranchNotFound Path' - | NameNotFound Path.HQSplit' - | PatchNotFound Path.Split' - | TypeNotFound Path.HQSplit' - | TermNotFound Path.HQSplit' - | TypeNotFound' ShortHash - | TermNotFound' ShortHash - | SearchTermsNotFound [HQ.HashQualified] - -- ask confirmation before deleting the last branch that contains some defns - -- `Path` is one of the paths the user has requested to delete, and is paired - -- with whatever named definitions would not have any remaining names if - -- the path is deleted. - | DeleteBranchConfirmation - [(Path', (Names, [SearchResult' v Ann]))] - -- CantDelete input couldntDelete becauseTheseStillReferenceThem - | CantDelete PPE.PrettyPrintEnv [SearchResult' v Ann] [SearchResult' v Ann] - | DeleteEverythingConfirmation - | DeletedEverything - | ListNames Int -- hq length to print References - [(Reference, Set HQ'.HashQualified)] -- type match, type names - [(Referent, Set HQ'.HashQualified)] -- term match, term names - -- list of all the definitions within this branch - | ListOfDefinitions PPE.PrettyPrintEnv ListDetailed [SearchResult' v Ann] - | ListOfLinks PPE.PrettyPrintEnv [(HQ.HashQualified, Reference, Maybe (Type v Ann))] - | ListShallow PPE.PrettyPrintEnv [ShallowListEntry v Ann] - | ListOfPatches (Set Name) - -- show the result of add/update - | SlurpOutput Input PPE.PrettyPrintEnv (SlurpResult v) - -- Original source, followed by the errors: - | ParseErrors Text [Parser.Err v] - | TypeErrors Text PPE.PrettyPrintEnv [Context.ErrorNote v Ann] - | DisplayConflicts (Relation Name Referent) (Relation Name Reference) - | EvaluationFailure Runtime.Error - | Evaluated SourceFileContents - PPE.PrettyPrintEnv - [(v, Term v ())] - (Map v (Ann, UF.WatchKind, Term v (), Runtime.IsCacheHit)) - | Typechecked SourceName PPE.PrettyPrintEnv (SlurpResult v) (UF.TypecheckedUnisonFile v Ann) - | DisplayRendered (Maybe FilePath) (P.Pretty P.ColorText) - -- "display" definitions, possibly to a FilePath on disk (e.g. editing) - | DisplayDefinitions (Maybe FilePath) - PPE.PrettyPrintEnvDecl - (Map Reference (DisplayThing (Decl v Ann))) - (Map Reference (DisplayThing (Term v Ann))) - -- | Invariant: there's at least one conflict or edit in the TodoOutput. - | TodoOutput PPE.PrettyPrintEnvDecl (TO.TodoOutput v Ann) - | TestIncrementalOutputStart PPE.PrettyPrintEnv (Int,Int) Reference (Term v Ann) - | TestIncrementalOutputEnd PPE.PrettyPrintEnv (Int,Int) Reference (Term v Ann) - | TestResults TestReportStats - PPE.PrettyPrintEnv ShowSuccesses ShowFailures - [(Reference, Text)] -- oks - [(Reference, Text)] -- fails - | CantUndo UndoFailureReason - | ListEdits Patch PPE.PrettyPrintEnv - - -- new/unrepresented references followed by old/removed - -- todo: eventually replace these sets with [SearchResult' v Ann] - -- and a nicer render. - | BustedBuiltins (Set Reference) (Set Reference) - | GitError Input GitError - | ConfiguredMetadataParseError Path' String (P.Pretty P.ColorText) - | NoConfiguredGitUrl PushPull Path' - | ConfiguredGitUrlParseError PushPull Path' Text String - | ConfiguredGitUrlIncludesShortBranchHash PushPull RemoteRepo ShortBranchHash Path - | DisplayLinks PPE.PrettyPrintEnvDecl Metadata.Metadata - (Map Reference (DisplayThing (Decl v Ann))) - (Map Reference (DisplayThing (Term v Ann))) - | MetadataMissingType PPE.PrettyPrintEnv Referent - | MetadataAmbiguous HQ.HashQualified PPE.PrettyPrintEnv [Referent] - -- todo: tell the user to run `todo` on the same patch they just used - | NothingToPatch PatchPath Path' - | PatchNeedsToBeConflictFree - | PatchInvolvesExternalDependents PPE.PrettyPrintEnv (Set Reference) - | WarnIncomingRootBranch ShortBranchHash (Set ShortBranchHash) - | StartOfCurrentPathHistory - | History (Maybe Int) [(ShortBranchHash, Names.Diff)] HistoryTail - | ShowReflog [ReflogEntry] - | PullAlreadyUpToDate RemoteNamespace Path' - | MergeAlreadyUpToDate Path' Path' - | PreviewMergeAlreadyUpToDate Path' Path' - -- | No conflicts or edits remain for the current patch. - | NoConflictsOrEdits - | NotImplemented - | NoBranchWithHash ShortBranchHash - | ListDependencies Int LabeledDependency [(Name, Reference)] (Set Reference) - | ListDependents Int LabeledDependency [(Name, Reference)] (Set Reference) - | DumpNumberedArgs NumberedArgs - | DumpBitBooster Branch.Hash (Map Branch.Hash [Branch.Hash]) - | DumpUnisonFileHashes Int [(Name, Reference.Id)] [(Name, Reference.Id)] [(Name, Reference.Id)] - | BadName String - | DefaultMetadataNotification - | NoOp - deriving (Show) - -data ReflogEntry = - ReflogEntry { hash :: ShortBranchHash, reason :: Text } - deriving (Show) - -data ShallowListEntry v a - = ShallowTermEntry Referent HQ'.HQSegment (Maybe (Type v a)) - | ShallowTypeEntry Reference HQ'.HQSegment - | ShallowBranchEntry NameSegment Int -- number of child definitions - | ShallowPatchEntry NameSegment - deriving (Eq, Show) - --- requires Var v to derive Eq, which is required by Ord though not by `compare` -instance Var v => Ord (ShallowListEntry v a) where - compare x y = case compare (toNS x) (toNS y) of - EQ -> compare (toHash x) (toHash y) - c -> c - where - toNS = \case - ShallowTermEntry _ hq _ -> HQ'.toName hq - ShallowTypeEntry _ hq -> HQ'.toName hq - ShallowBranchEntry ns _ -> ns - ShallowPatchEntry ns -> ns - toHash :: ShallowListEntry v a -> Maybe ShortHash - toHash = \case - ShallowTermEntry _ hq _ -> HQ'.toHash hq - ShallowTypeEntry _ hq -> HQ'.toHash hq - ShallowBranchEntry _ _ -> Nothing - ShallowPatchEntry _ -> Nothing - -data HistoryTail = - EndOfLog ShortBranchHash | - MergeTail ShortBranchHash [ShortBranchHash] | - PageEnd ShortBranchHash Int -- PageEnd nextHash nextIndex - deriving (Show) - -data TestReportStats - = CachedTests TotalCount CachedCount - | NewlyComputed deriving Show - -type TotalCount = Int -- total number of tests -type CachedCount = Int -- number of tests found in the cache -type ShowSuccesses = Bool -- whether to list results or just summarize -type ShowFailures = Bool -- whether to list results or just summarize - -data UndoFailureReason = CantUndoPastStart | CantUndoPastMerge deriving Show - -type SourceFileContents = Text - -isFailure :: Ord v => Output v -> Bool -isFailure o = case o of - Success{} -> False - NoUnisonFile{} -> True - InvalidSourceName{} -> True - SourceLoadFailed{} -> True - NoMainFunction{} -> True - CreatedNewBranch{} -> False - BranchAlreadyExists{} -> True - PatchAlreadyExists{} -> True - NoExactTypeMatches -> True - BranchEmpty{} -> True - BranchNotEmpty{} -> True - TypeAlreadyExists{} -> True - TypeParseError{} -> True - ParseResolutionFailures{} -> True - TypeHasFreeVars{} -> True - TermAlreadyExists{} -> True - LabeledReferenceAmbiguous{} -> True - LabeledReferenceNotFound{} -> True - DeleteNameAmbiguous{} -> True - TermAmbiguous{} -> True - BranchHashAmbiguous{} -> True - BadName{} -> True - BranchNotFound{} -> True - NameNotFound{} -> True - PatchNotFound{} -> True - TypeNotFound{} -> True - TypeNotFound'{} -> True - TermNotFound{} -> True - TermNotFound'{} -> True - SearchTermsNotFound ts -> not (null ts) - DeleteBranchConfirmation{} -> False - CantDelete{} -> True - DeleteEverythingConfirmation -> False - DeletedEverything -> False - ListNames _ tys tms -> null tms && null tys - ListOfLinks _ ds -> null ds - ListOfDefinitions _ _ ds -> null ds - ListOfPatches s -> Set.null s - SlurpOutput _ _ sr -> not $ SR.isOk sr - ParseErrors{} -> True - TypeErrors{} -> True - DisplayConflicts{} -> False - EvaluationFailure{} -> True - Evaluated{} -> False - Typechecked{} -> False - DisplayDefinitions _ _ m1 m2 -> null m1 && null m2 - DisplayRendered{} -> False - TodoOutput _ todo -> TO.todoScore todo /= 0 - TestIncrementalOutputStart{} -> False - TestIncrementalOutputEnd{} -> False - TestResults _ _ _ _ _ fails -> not (null fails) - CantUndo{} -> True - ListEdits{} -> False - GitError{} -> True - BustedBuiltins{} -> True - ConfiguredMetadataParseError{} -> True - NoConfiguredGitUrl{} -> True - ConfiguredGitUrlParseError{} -> True - ConfiguredGitUrlIncludesShortBranchHash{} -> True - DisplayLinks{} -> False - MetadataMissingType{} -> True - MetadataAmbiguous{} -> True - PatchNeedsToBeConflictFree{} -> True - PatchInvolvesExternalDependents{} -> True - NothingToPatch{} -> False - WarnIncomingRootBranch{} -> False - History{} -> False - StartOfCurrentPathHistory -> True - NotImplemented -> True - DumpNumberedArgs{} -> False - DumpBitBooster{} -> False - NoBranchWithHash{} -> True - PullAlreadyUpToDate{} -> False - MergeAlreadyUpToDate{} -> False - PreviewMergeAlreadyUpToDate{} -> False - NoConflictsOrEdits{} -> False - ListShallow _ es -> null es - HashAmbiguous{} -> True - ShowReflog{} -> False - LoadPullRequest{} -> False - DefaultMetadataNotification -> False - NoOp -> False - ListDependencies{} -> False - ListDependents{} -> False - DumpUnisonFileHashes _ x y z -> x == mempty && y == mempty && z == mempty - -isNumberedFailure :: NumberedOutput v -> Bool -isNumberedFailure = \case - ShowDiffNamespace{} -> False - ShowDiffAfterDeleteDefinitions{} -> False - ShowDiffAfterDeleteBranch{} -> False - ShowDiffAfterModifyBranch{} -> False - ShowDiffAfterMerge{} -> False - ShowDiffAfterMergePropagate{} -> False - ShowDiffAfterMergePreview{} -> False - ShowDiffAfterUndo{} -> False - ShowDiffAfterPull{} -> False - ShowDiffAfterCreatePR{} -> False - ShowDiffAfterCreateAuthor{} -> False - - diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Output/BranchDiff.hs b/parser-typechecker/src/Unison/Codebase/Editor/Output/BranchDiff.hs deleted file mode 100644 index d9d213f63b..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/Output/BranchDiff.hs +++ /dev/null @@ -1,338 +0,0 @@ -{-# Language DeriveFoldable, DeriveTraversable #-} -{-# LANGUAGE PartialTypeSignatures #-} -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE RecordWildCards #-} - -module Unison.Codebase.Editor.Output.BranchDiff where - -import Control.Lens (_1,view) -import Unison.Prelude -import Unison.Name (Name) -import qualified Unison.Codebase.Patch as P -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.Codebase.BranchDiff as BranchDiff -import Unison.Codebase.BranchDiff (BranchDiff(BranchDiff), DiffSlice) -import qualified Unison.Util.Relation as R -import qualified Unison.Util.Relation3 as R3 -import qualified Unison.Codebase.Metadata as Metadata -import qualified Data.Set as Set -import qualified Data.Map as Map -import Unison.Util.Set (symmetricDifference) - -import Unison.Reference (Reference) -import Unison.Type (Type) -import Unison.HashQualified' (HashQualified) -import qualified Unison.HashQualified as HQ -import qualified Unison.Referent as Referent -import Unison.Referent (Referent) -import qualified Unison.Names2 as Names2 -import Unison.Names3 (Names0) -import Unison.DataDeclaration (DeclOrBuiltin) -import Unison.Runtime.IOSource (isPropagatedValue) - -data MetadataDiff tm = - MetadataDiff { addedMetadata :: [tm] - , removedMetadata :: [tm] } - deriving (Ord,Eq,Functor,Foldable,Traversable,Show) - -instance Semigroup (MetadataDiff tm) where - a <> b = MetadataDiff (addedMetadata a <> addedMetadata b) - (removedMetadata a <> removedMetadata b) - -instance Monoid (MetadataDiff tm) where - mempty = MetadataDiff mempty mempty - mappend = (<>) - -data BranchDiffOutput v a = BranchDiffOutput { - updatedTypes :: [UpdateTypeDisplay v a], - updatedTerms :: [UpdateTermDisplay v a], - newTypeConflicts :: [UpdateTypeDisplay v a], - newTermConflicts :: [UpdateTermDisplay v a], - resolvedTypeConflicts :: [UpdateTypeDisplay v a], - resolvedTermConflicts :: [UpdateTermDisplay v a], - propagatedUpdates :: Int, - updatedPatches :: [PatchDisplay], - addedTypes :: [AddedTypeDisplay v a], - addedTerms :: [AddedTermDisplay v a], - addedPatches :: [PatchDisplay], - removedTypes :: [RemovedTypeDisplay v a], - removedTerms :: [RemovedTermDisplay v a], - removedPatches :: [PatchDisplay], - renamedTypes :: [RenameTypeDisplay v a], - renamedTerms :: [RenameTermDisplay v a] - } deriving Show - -isEmpty :: BranchDiffOutput v a -> Bool -isEmpty BranchDiffOutput{..} = - null updatedTypes && null updatedTerms && - null newTypeConflicts && null newTermConflicts && - null resolvedTypeConflicts && null resolvedTermConflicts && - null addedTypes && null addedTerms && null addedPatches && - null removedTypes && null removedTerms && null removedPatches && - null renamedTypes && null renamedTerms && null updatedPatches && - propagatedUpdates == 0 - --- Need to be able to turn a (Name,Reference) into a HashQualified relative to... what. --- the new namespace? - -type TermDisplay v a = (HashQualified, Referent, Maybe (Type v a), MetadataDiff (MetadataDisplay v a)) -type TypeDisplay v a = (HashQualified, Reference, Maybe (DeclOrBuiltin v a), MetadataDiff (MetadataDisplay v a)) - -type AddedTermDisplay v a = ([(HashQualified, [MetadataDisplay v a])], Referent, Maybe (Type v a)) -type AddedTypeDisplay v a = ([(HashQualified, [MetadataDisplay v a])], Reference, Maybe (DeclOrBuiltin v a)) - -type RemovedTermDisplay v a = ([HashQualified], Referent, Maybe (Type v a)) -type RemovedTypeDisplay v a = ([HashQualified], Reference, Maybe (DeclOrBuiltin v a)) - -type SimpleTermDisplay v a = (HashQualified, Referent, Maybe (Type v a)) -type SimpleTypeDisplay v a = (HashQualified, Reference, Maybe (DeclOrBuiltin v a)) - -type UpdateTermDisplay v a = (Maybe [SimpleTermDisplay v a], [TermDisplay v a]) -type UpdateTypeDisplay v a = (Maybe [SimpleTypeDisplay v a], [TypeDisplay v a]) - -type MetadataDisplay v a = (HQ.HashQualified, Referent, Maybe (Type v a)) -type RenameTermDisplay v a = (Referent, Maybe (Type v a), Set HashQualified, Set HashQualified) -type RenameTypeDisplay v a = (Reference, Maybe (DeclOrBuiltin v a), Set HashQualified, Set HashQualified) -type PatchDisplay = (Name, P.PatchDiff) - -toOutput :: forall m v a - . Monad m - => (Referent -> m (Maybe (Type v a))) - -> (Reference -> m (Maybe (DeclOrBuiltin v a))) - -> Int - -> Names0 - -> Names0 - -> PPE.PrettyPrintEnv - -> BranchDiff.BranchDiff - -> m (BranchDiffOutput v a) -toOutput typeOf declOrBuiltin hqLen names1 names2 ppe - (BranchDiff termsDiff typesDiff patchesDiff) = do - let - -- | This calculates the new reference's metadata as: - -- adds: now-attached metadata that was missing from - -- any of the old references associated with the name - -- removes: not-attached metadata that had been attached to any of - -- the old references associated with the name - getNewMetadataDiff :: Ord r => Bool -> DiffSlice r -> Name -> Set r -> r -> MetadataDiff Metadata.Value - getNewMetadataDiff hidePropagatedMd s n rs_old r_new = let - old_metadatas :: [Set Metadata.Value] = - toList . R.toMultimap . R.restrictDom rs_old . R3.lookupD2 n $ - BranchDiff.tremovedMetadata s - old_intersection :: Set Metadata.Value = - foldl' Set.intersection mempty old_metadatas - old_union :: Set Metadata.Value = - foldl' Set.union mempty old_metadatas - new_metadata :: Set Metadata.Value = - R.lookupDom n . R3.lookupD1 r_new $ BranchDiff.taddedMetadata s - toDelete = if hidePropagatedMd then Set.singleton isPropagatedValue else mempty - in MetadataDiff - { addedMetadata = toList $ new_metadata `Set.difference` old_intersection `Set.difference` toDelete - , removedMetadata = toList $ old_union `Set.difference` new_metadata `Set.difference` toDelete - } - -- For the metadata on a definition to have changed, the name - -- and the reference must have existed before and the reference - -- must not have been removed and the name must not have been removed or added - -- or updated 😅 - -- "getMetadataUpdates" = a defn has been updated via change of metadata - getMetadataUpdates :: Ord r => DiffSlice r -> Map Name (Set r, Set r) - getMetadataUpdates s = Map.fromList - [ (n, (Set.singleton r, Set.singleton r)) -- the reference is unchanged - | (r,n,v) <- R3.toList $ BranchDiff.taddedMetadata s <> - BranchDiff.tremovedMetadata s - , R.notMember r n (BranchDiff.talladds s) - , R.notMember r n (BranchDiff.tallremoves s) - -- don't count it as a metadata update if it already's already a regular update - , let (oldRefs, newRefs) = - Map.findWithDefault mempty n (BranchDiff.tallnamespaceUpdates s) - in Set.notMember r oldRefs && Set.notMember r newRefs --- trenames :: Map r (Set Name, Set Name), -- ref (old, new) - , case Map.lookup r (BranchDiff.trenames s) of - Nothing -> True - Just (olds, news) -> - Set.notMember n (symmetricDifference olds news) - , v /= isPropagatedValue ] - - let isSimpleUpdate, isNewConflict, isResolvedConflict :: Eq r => (Set r, Set r) -> Bool - isSimpleUpdate (old, new) = Set.size old == 1 && Set.size new == 1 - isNewConflict (_old, new) = Set.size new > 1 -- should already be the case that old /= new - isResolvedConflict (old, new) = Set.size old > 1 && Set.size new == 1 - - (updatedTypes :: [UpdateTypeDisplay v a], - newTypeConflicts :: [UpdateTypeDisplay v a], - resolvedTypeConflicts :: [UpdateTypeDisplay v a]) <- let - -- things where what the name pointed to changed - nsUpdates :: Map Name (Set Reference, Set Reference) = - BranchDiff.namespaceUpdates typesDiff - -- things where the metadata changed (`uniqueBy` below removes these - -- if they were already included in `nsUpdates) - metadataUpdates = getMetadataUpdates typesDiff - loadOld :: Bool -> Name -> Reference -> m (SimpleTypeDisplay v a) - loadOld forceHQ n r_old = - (,,) <$> pure (if forceHQ - then Names2.hqTypeName' hqLen n r_old - else Names2.hqTypeName hqLen names1 n r_old) - <*> pure r_old - <*> declOrBuiltin r_old - loadNew :: Bool -> Bool -> Name -> Set Reference -> Reference -> m (TypeDisplay v a) - loadNew hidePropagatedMd forceHQ n rs_old r_new = - (,,,) <$> pure (if forceHQ - then Names2.hqTypeName' hqLen n r_new - else Names2.hqTypeName hqLen names2 n r_new) - <*> pure r_new - <*> declOrBuiltin r_new - <*> fillMetadata ppe (getNewMetadataDiff hidePropagatedMd typesDiff n rs_old r_new) - loadEntry :: Bool -> (Name, (Set Reference, Set Reference)) -> m (UpdateTypeDisplay v a) - loadEntry hidePropagatedMd (n, (Set.toList -> [rold], Set.toList -> [rnew])) | rold == rnew = - (Nothing,) <$> for [rnew] (loadNew hidePropagatedMd False n (Set.singleton rold)) - loadEntry hidePropagatedMd (n, (rs_old, rs_new)) = - let forceHQ = Set.size rs_old > 1 || Set.size rs_new > 1 in - (,) <$> (Just <$> for (toList rs_old) (loadOld forceHQ n)) - <*> for (toList rs_new) (loadNew hidePropagatedMd forceHQ n rs_old) - in liftA3 (,,) - (sortOn (view _1 . head . snd) <$> liftA2 (<>) - (for (Map.toList $ Map.filter isSimpleUpdate nsUpdates) (loadEntry True)) - (for (Map.toList metadataUpdates) (loadEntry False))) - (for (Map.toList $ Map.filter isNewConflict nsUpdates) (loadEntry True)) - (for (Map.toList $ Map.filter isResolvedConflict nsUpdates) (loadEntry True)) - - (updatedTerms :: [UpdateTermDisplay v a], - newTermConflicts :: [UpdateTermDisplay v a], - resolvedTermConflicts :: [UpdateTermDisplay v a]) <- let - -- things where what the name pointed to changed - nsUpdates = BranchDiff.namespaceUpdates termsDiff - -- things where the metadata changed (`uniqueBy` below removes these - -- if they were already included in `nsUpdates) - metadataUpdates = getMetadataUpdates termsDiff - loadOld forceHQ n r_old = - (,,) <$> pure (if forceHQ then Names2.hqTermName' hqLen n r_old - else Names2.hqTermName hqLen names1 n r_old) - <*> pure r_old - <*> typeOf r_old - loadNew hidePropagatedMd forceHQ n rs_old r_new = - (,,,) <$> pure (if forceHQ then Names2.hqTermName' hqLen n r_new - else Names2.hqTermName hqLen names2 n r_new) - <*> pure r_new - <*> typeOf r_new - <*> fillMetadata ppe (getNewMetadataDiff hidePropagatedMd termsDiff n rs_old r_new) - loadEntry hidePropagatedMd (n, (rs_old, rs_new)) - -- if the references haven't changed, it's code for: only the metadata has changed - -- and we can ignore the old references in the output. - | rs_old == rs_new = (Nothing,) <$> for (toList rs_new) (loadNew hidePropagatedMd False n rs_old) - | otherwise = let forceHQ = Set.size rs_old > 1 || Set.size rs_new > 1 in - (,) <$> (Just <$> for (toList rs_old) (loadOld forceHQ n)) - <*> for (toList rs_new) (loadNew hidePropagatedMd forceHQ n rs_old) - in liftA3 (,,) - -- this is sorting the Update section back into alphabetical Name order - -- after calling loadEntry on the two halves. - (sortOn (view _1 . head . snd) <$> liftA2 (<>) - (for (Map.toList $ Map.filter isSimpleUpdate nsUpdates) (loadEntry True)) - (for (Map.toList metadataUpdates) (loadEntry False))) - (for (Map.toList $ Map.filter isNewConflict nsUpdates) (loadEntry True)) - (for (Map.toList $ Map.filter isResolvedConflict nsUpdates) (loadEntry True)) - - let propagatedUpdates :: Int = - -- counting the number of named auto-propagated definitions - (Set.size . Set.unions . toList . BranchDiff.propagatedUpdates) typesDiff + - (Set.size . Set.unions . toList . BranchDiff.propagatedUpdates) termsDiff - - let updatedPatches :: [PatchDisplay] = - [(name, diff) | (name, BranchDiff.Modify diff) <- Map.toList patchesDiff] - - addedTypes :: [AddedTypeDisplay v a] <- do - let typeAdds :: [(Reference, [(Name, [Metadata.Value])])] = sortOn snd - [ (r, nsmd) - | (r, ns) <- Map.toList . R.toMultimap . BranchDiff.talladds $ typesDiff - , let nsmd = [ (n, toList $ getAddedMetadata r n typesDiff) - | n <- toList ns ] - ] - for typeAdds $ \(r, nsmd) -> do - hqmds :: [(HashQualified, [MetadataDisplay v a])] <- - for nsmd $ \(n, mdRefs) -> - (,) <$> pure (Names2.hqTypeName hqLen names2 n r) - <*> fillMetadata ppe mdRefs - (hqmds, r, ) <$> declOrBuiltin r - - addedTerms :: [AddedTermDisplay v a] <- do - let termAdds :: [(Referent, [(Name, [Metadata.Value])])] = sortOn snd - [ (r, nsmd) - | (r, ns) <- Map.toList . R.toMultimap . BranchDiff.talladds $ termsDiff - , let nsmd = [ (n, toList $ getAddedMetadata r n termsDiff) - | n <- toList ns ] - ] - for termAdds $ \(r, nsmd) -> do - hqmds <- for nsmd $ \(n, mdRefs) -> - (,) <$> pure (Names2.hqTermName hqLen names2 n r) - <*> fillMetadata ppe mdRefs - (hqmds, r, ) <$> typeOf r - - let addedPatches :: [PatchDisplay] = - [ (name, diff) - | (name, BranchDiff.Create diff) <- Map.toList patchesDiff ] - - removedTypes :: [RemovedTypeDisplay v a] <- let - typeRemoves :: [(Reference, [Name])] = sortOn snd $ - Map.toList . fmap toList . R.toMultimap . BranchDiff.tallremoves $ typesDiff - in for typeRemoves $ \(r, ns) -> - (,,) <$> pure ((\n -> Names2.hqTypeName hqLen names1 n r) <$> ns) - <*> pure r - <*> declOrBuiltin r - - removedTerms :: [RemovedTermDisplay v a] <- let - termRemoves :: [(Referent, [Name])] = sortOn snd $ - Map.toList . fmap toList . R.toMultimap . BranchDiff.tallremoves $ termsDiff - in for termRemoves $ \(r, ns) -> - (,,) <$> pure ((\n -> Names2.hqTermName hqLen names1 n r) <$> ns) - <*> pure r - <*> typeOf r - - let removedPatches :: [PatchDisplay] = - [ (name, diff) - | (name, BranchDiff.Delete diff) <- Map.toList patchesDiff ] - - let renamedTerm :: Map Referent (Set Name, Set Name) -> m [RenameTermDisplay v a] - renamedTerm renames = - for (sortOn snd $ Map.toList renames) $ \(r, (ol'names, new'names)) -> - (,,,) <$> pure r - <*> typeOf r - <*> pure (Set.map (\n -> Names2.hqTermName hqLen names1 n r) ol'names) - <*> pure (Set.map (\n -> Names2.hqTermName hqLen names2 n r) new'names) - - let renamedType :: Map Reference (Set Name, Set Name) -> m [RenameTypeDisplay v a] - renamedType renames = - for (sortOn snd $ Map.toList renames) $ \(r, (ol'names, new'names)) -> - (,,,) <$> pure r - <*> declOrBuiltin r - <*> pure (Set.map (\n -> Names2.hqTypeName hqLen names1 n r) ol'names) - <*> pure (Set.map (\n -> Names2.hqTypeName hqLen names2 n r) new'names) - - renamedTypes :: [RenameTypeDisplay v a] <- renamedType (BranchDiff.trenames typesDiff) - renamedTerms :: [RenameTermDisplay v a] <- renamedTerm (BranchDiff.trenames termsDiff) - - pure $ BranchDiffOutput - updatedTypes - updatedTerms - newTypeConflicts - newTermConflicts - resolvedTypeConflicts - resolvedTermConflicts - propagatedUpdates - updatedPatches - addedTypes - addedTerms - addedPatches - removedTypes - removedTerms - removedPatches - renamedTypes - renamedTerms - where - fillMetadata :: Traversable t => PPE.PrettyPrintEnv -> t Metadata.Value -> m (t (MetadataDisplay v a)) - fillMetadata ppe = traverse $ -- metadata values are all terms - \(Referent.Ref -> mdRef) -> - let name = PPE.termName ppe mdRef - in (name, mdRef, ) <$> typeOf mdRef - getMetadata :: Ord r => r -> Name -> R3.Relation3 r Name Metadata.Value -> Set Metadata.Value - getMetadata r n = R.lookupDom n . R3.lookupD1 r - - getAddedMetadata :: Ord r => r -> Name -> BranchDiff.DiffSlice r -> Set Metadata.Value - getAddedMetadata r n slice = getMetadata r n $ BranchDiff.taddedMetadata slice diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Propagate.hs b/parser-typechecker/src/Unison/Codebase/Editor/Propagate.hs deleted file mode 100644 index 9ec10150f8..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/Propagate.hs +++ /dev/null @@ -1,522 +0,0 @@ -{-# OPTIONS_GHC -Wno-partial-type-signatures #-} -{-# LANGUAGE PartialTypeSignatures #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE RecordWildCards #-} - -module Unison.Codebase.Editor.Propagate where - -import Control.Error.Util ( hush ) -import Control.Lens -import Data.Configurator ( ) -import qualified Data.Graph as Graph -import qualified Data.Map as Map -import qualified Data.Set as Set -import Unison.Codebase.Branch ( Branch0(..) ) -import Unison.Prelude -import qualified Unison.Codebase.Branch as Branch -import Unison.Codebase.Editor.Command -import Unison.Codebase.Editor.Output -import Unison.Codebase.Patch ( Patch(..) ) -import qualified Unison.Codebase.Patch as Patch -import Unison.DataDeclaration ( Decl ) -import qualified Unison.DataDeclaration as Decl -import Unison.Names3 ( Names0 ) -import qualified Unison.Names2 as Names -import Unison.Parser ( Ann(..) ) -import Unison.Reference ( Reference(..) ) -import qualified Unison.Reference as Reference -import qualified Unison.Referent as Referent -import qualified Unison.Result as Result -import qualified Unison.Term as Term -import Unison.Term ( Term ) -import Unison.Util.Free ( Free - , eval - ) -import qualified Unison.Util.Relation as R -import Unison.Util.TransitiveClosure ( transitiveClosure ) -import Unison.Var ( Var ) -import qualified Unison.Codebase.Metadata as Metadata -import qualified Unison.Codebase.TypeEdit as TypeEdit -import Unison.Codebase.TermEdit ( TermEdit(..) ) -import qualified Unison.Codebase.TermEdit as TermEdit -import Unison.Codebase.TypeEdit ( TypeEdit(..) ) -import Unison.UnisonFile ( UnisonFile(..) ) -import qualified Unison.UnisonFile as UF -import qualified Unison.Util.Star3 as Star3 -import Unison.Type ( Type ) -import qualified Unison.Type as Type -import qualified Unison.Typechecker as Typechecker -import Unison.ConstructorType ( ConstructorType ) -import qualified Unison.Runtime.IOSource as IOSource - -type F m i v = Free (Command m i v) - -data Edits v = Edits - { termEdits :: Map Reference TermEdit - -- same info as `termEdits` but in more efficient form for calling `Term.updateDependencies` - , termReplacements :: Map Reference Reference - , newTerms :: Map Reference (Term v Ann, Type v Ann) - , typeEdits :: Map Reference TypeEdit - , typeReplacements :: Map Reference Reference - , newTypes :: Map Reference (Decl v Ann) - , constructorReplacements :: Map (Reference, Int, ConstructorType) - (Reference, Int, ConstructorType) - } deriving (Eq, Show) - -noEdits :: Edits v -noEdits = Edits mempty mempty mempty mempty mempty mempty mempty - -propagateAndApply - :: forall m i v - . (Applicative m, Var v) - => Patch - -> Branch0 m - -> F m i v (Branch0 m) -propagateAndApply patch branch = do - edits <- propagate patch branch - f <- applyPropagate patch edits - (pure . f . applyDeprecations patch) branch - - --- Creates a mapping from old data constructors to new data constructors --- by looking at the original names for the data constructors which are --- embedded in the Decl object because we carefully planned that. -generateConstructorMapping - :: Eq v - => Map v (Reference, Decl v _) - -> Map v (Reference, Decl.DataDeclaration v _) - -> Map - (Reference, Int, ConstructorType) - (Reference, Int, ConstructorType) -generateConstructorMapping oldComponent newComponent = Map.fromList - [ let t = Decl.constructorType oldDecl in ((oldR, oldC, t), (newR, newC, t)) - | (v1, (oldR, oldDecl)) <- Map.toList oldComponent - , (v2, (newR, newDecl)) <- Map.toList newComponent - , v1 == v2 - , (oldC, (_, oldName, _)) <- zip [0 ..] - $ Decl.constructors' (Decl.asDataDecl oldDecl) - , (newC, (_, newName, _)) <- zip [0 ..] $ Decl.constructors' newDecl - , oldName == newName - ] - --- Note: this function adds definitions to the codebase as it propagates. --- Description: ------------------- --- For any `Reference` in the frontier which has an unconflicted --- term edit, `old -> new`, replace `old` with `new` in dependents of the --- frontier, and call `propagate'` recursively on the new frontier if --- the dependents still typecheck. --- --- If the term is `Typing.Same`, the dependents don't need to be typechecked. --- If the term is `Typing.Subtype`, and the dependent only has inferred type, --- it should be re-typechecked, and the new inferred type should be used. --- --- This will create a whole bunch of new terms and types in the codebase and --- move the names onto those new terms. Uses `updateDependencies` to perform --- the substitutions. --- --- Algorithm: ----------------- --- compute the frontier relation (dependencies of updated terms and types) --- for each dirty definition d: --- for each member c of cycle(d): --- construct c', an updated c incorporating all edits --- Add an edit c -> c' --- and save c' to a `Map Reference Term` or `Map Reference Type` --- as appropriate --- Collect all c' into a new cycle and typecheck (TODO: kindcheck) that cycle. --- If the cycle doesn't check, discard edits to that cycle. --- --- "dirty" means in need of update --- "frontier" means updated definitions responsible for the "dirty" -propagate - :: forall m i v - . (Applicative m, Var v) - => Patch - -> Branch0 m - -> F m i v (Edits v) -propagate patch b = case validatePatch patch of - Nothing -> do - eval $ Notify PatchNeedsToBeConflictFree - pure noEdits - Just (initialTermEdits, initialTypeEdits) -> do - let - entireBranch = Set.union - (Branch.deepTypeReferences b) - (Set.fromList - [ r | Referent.Ref r <- Set.toList $ Branch.deepReferents b ] - ) - initialDirty <- - R.dom <$> computeFrontier (eval . GetDependents) patch names0 - order <- sortDependentsGraph initialDirty entireBranch - let - - getOrdered :: Set Reference -> Map Int Reference - getOrdered rs = - Map.fromList [ (i, r) | r <- toList rs, Just i <- [Map.lookup r order] ] - collectEdits - :: (Applicative m, Var v) - => Edits v - -> Set Reference - -> Map Int Reference - -> F m i v (Edits v) - collectEdits es@Edits {..} seen todo = case Map.minView todo of - Nothing -> pure es - Just (r, todo) -> case r of - Reference.Builtin _ -> collectEdits es seen todo - Reference.DerivedId _ -> go r todo - where - go r todo = - if Map.member r termEdits - || Map.member r typeEdits - || Set.member r seen - then - collectEdits es seen todo - else - do - haveType <- eval $ IsType r - haveTerm <- eval $ IsTerm r - let message = - "This reference is not a term nor a type " <> show r - mmayEdits | haveTerm = doTerm r - | haveType = doType r - | otherwise = error message - mayEdits <- mmayEdits - case mayEdits of - (Nothing , seen') -> collectEdits es seen' todo - (Just edits', seen') -> do - -- plan to update the dependents of this component too - dependents <- - fmap Set.unions - . traverse (eval . GetDependents) - . toList - . Reference.members - $ Reference.componentFor r - let todo' = todo <> getOrdered dependents - collectEdits edits' seen' todo' - doType :: Reference -> F m i v (Maybe (Edits v), Set Reference) - doType r = do - componentMap <- unhashTypeComponent r - let componentMap' = - over _2 (Decl.updateDependencies typeReplacements) - <$> componentMap - declMap = over _2 (either Decl.toDataDecl id) <$> componentMap' - -- TODO: kind-check the new components - hashedDecls = (fmap . fmap) (over _2 DerivedId) - . Decl.hashDecls - $ view _2 <$> declMap - hashedComponents' <- case hashedDecls of - Left _ -> - error - $ "Edit propagation failed because some of the dependencies of " - <> show r - <> " could not be resolved." - Right c -> pure . Map.fromList $ (\(v, r, d) -> (v, (r, d))) <$> c - let - -- Relation: (nameOfType, oldRef, newRef, newType) - joinedStuff - :: [(v, (Reference, Reference, Decl.DataDeclaration v _))] - joinedStuff = - Map.toList (Map.intersectionWith f declMap hashedComponents') - f (oldRef, _) (newRef, newType) = (oldRef, newRef, newType) - typeEdits' = typeEdits <> (Map.fromList . fmap toEdit) joinedStuff - toEdit (_, (r, r', _)) = (r, TypeEdit.Replace r') - typeReplacements' = typeReplacements - <> (Map.fromList . fmap toReplacement) joinedStuff - toReplacement (_, (r, r', _)) = (r, r') - -- New types this iteration - newNewTypes = (Map.fromList . fmap toNewType) joinedStuff - -- Accumulated new types - newTypes' = newTypes <> newNewTypes - toNewType (v, (_, r', tp)) = - ( r' - , case Map.lookup v componentMap of - Just (_, Left _ ) -> Left (Decl.EffectDeclaration tp) - Just (_, Right _) -> Right tp - _ -> error "It's not gone well!" - ) - seen' = seen <> Set.fromList (view _1 . view _2 <$> joinedStuff) - writeTypes = - traverse_ (\(Reference.DerivedId id, tp) -> eval $ PutDecl id tp) - constructorMapping = - constructorReplacements - <> generateConstructorMapping componentMap hashedComponents' - writeTypes $ Map.toList newNewTypes - pure - ( Just $ Edits termEdits - termReplacements - newTerms - typeEdits' - typeReplacements' - newTypes' - constructorMapping - , seen' - ) - doTerm :: Reference -> F m i v (Maybe (Edits v), Set Reference) - doTerm r = do - componentMap <- unhashTermComponent r - let componentMap' = - over - _2 - (Term.updateDependencies termReplacements typeReplacements) - <$> componentMap - seen' = seen <> Set.fromList (view _1 <$> Map.elems componentMap) - mayComponent <- verifyTermComponent componentMap' es - case mayComponent of - Nothing -> pure (Nothing, seen') - Just componentMap'' -> do - let - joinedStuff = - toList (Map.intersectionWith f componentMap componentMap'') - f (oldRef, _oldTerm, oldType) (newRef, newTerm, newType) = - (oldRef, newRef, newTerm, oldType, newType') - -- Don't replace the type if it hasn't changed. - - where - newType' | Typechecker.isEqual oldType newType = oldType - | otherwise = newType - -- collect the hashedComponents into edits/replacements/newterms/seen - termEdits' = - termEdits <> (Map.fromList . fmap toEdit) joinedStuff - toEdit (r, r', _newTerm, oldType, newType) = - (r, TermEdit.Replace r' $ TermEdit.typing newType oldType) - termReplacements' = termReplacements - <> (Map.fromList . fmap toReplacement) joinedStuff - toReplacement (r, r', _, _, _) = (r, r') - newTerms' = - newTerms <> (Map.fromList . fmap toNewTerm) joinedStuff - toNewTerm (_, r', tm, _, tp) = (r', (tm, tp)) - writeTerms = - traverse_ - (\(Reference.DerivedId id, (tm, tp)) -> - eval $ PutTerm id tm tp - ) - writeTerms - [ (r, (tm, ty)) | (_old, r, tm, _oldTy, ty) <- joinedStuff ] - pure - ( Just $ Edits termEdits' - termReplacements' - newTerms' - typeEdits - typeReplacements - newTypes - constructorReplacements - , seen' - ) - collectEdits - (Edits initialTermEdits - (Map.mapMaybe TermEdit.toReference initialTermEdits) - mempty - initialTypeEdits - (Map.mapMaybe TypeEdit.toReference initialTypeEdits) - mempty - mempty - ) - mempty -- things to skip - (getOrdered initialDirty) - where - sortDependentsGraph :: Set Reference -> Set Reference -> _ (Map Reference Int) - sortDependentsGraph dependencies restrictTo = do - closure <- transitiveClosure - (fmap (Set.intersection restrictTo) . eval . GetDependents) - dependencies - dependents <- traverse (\r -> (r, ) <$> (eval . GetDependents) r) - (toList closure) - let graphEdges = [ (r, r, toList deps) | (r, deps) <- toList dependents ] - (graph, getReference, _) = Graph.graphFromEdges graphEdges - pure $ Map.fromList - (zip (view _1 . getReference <$> Graph.topSort graph) [0 ..]) - -- vertex i precedes j whenever i has an edge to j and not vice versa. - -- vertex i precedes j when j is a dependent of i. - names0 = Branch.toNames0 b - validatePatch - :: Patch -> Maybe (Map Reference TermEdit, Map Reference TypeEdit) - validatePatch p = - (,) <$> R.toMap (Patch._termEdits p) <*> R.toMap (Patch._typeEdits p) - -- Turns a cycle of references into a term with free vars that we can edit - -- and hash again. - -- todo: Maybe this an others can be moved to HandleCommand, in the - -- Free (Command m i v) monad, passing in the actions that are needed. - -- However, if we want this to be parametric in the annotation type, then - -- Command would have to be made parametric in the annotation type too. - unhashTermComponent - :: forall m v - . (Applicative m, Var v) - => Reference - -> F m i v (Map v (Reference, Term v _, Type v _)) - unhashTermComponent ref = do - let component = Reference.members $ Reference.componentFor ref - termInfo - :: Reference -> F m i v (Maybe (Reference, (Term v Ann, Type v Ann))) - termInfo termRef = do - tpm <- eval $ LoadTypeOfTerm termRef - tp <- maybe (error $ "Missing type for term " <> show termRef) - pure - tpm - case termRef of - Reference.DerivedId id -> do - mtm <- eval $ LoadTerm id - tm <- maybe (error $ "Missing term with id " <> show id) pure mtm - pure $ Just (termRef, (tm, tp)) - Reference.Builtin{} -> pure Nothing - unhash m = - let f (_oldTm, oldTyp) (v, newTm) = (v, newTm, oldTyp) - m' = Map.intersectionWith f m (Term.unhashComponent (fst <$> m)) - in Map.fromList - [ (v, (r, tm, tp)) | (r, (v, tm, tp)) <- Map.toList m' ] - unhash . Map.fromList . catMaybes <$> traverse termInfo (toList component) - unhashTypeComponent - :: forall m v - . (Applicative m, Var v) - => Reference - -> F m i v (Map v (Reference, Decl v _)) - unhashTypeComponent ref = do - let - component = Reference.members $ Reference.componentFor ref - typeInfo :: Reference -> F m i v (Maybe (Reference, Decl v Ann)) - typeInfo typeRef = case typeRef of - Reference.DerivedId id -> do - declm <- eval $ LoadType id - decl <- maybe (error $ "Missing type declaration " <> show typeRef) - pure - declm - pure $ Just (typeRef, decl) - Reference.Builtin{} -> pure Nothing - unhash = - Map.fromList . map reshuffle . Map.toList . Decl.unhashComponent - where reshuffle (r, (v, decl)) = (v, (r, decl)) - unhash . Map.fromList . catMaybes <$> traverse typeInfo (toList component) - verifyTermComponent - :: Map v (Reference, Term v _, a) - -> Edits v - -> F m i v (Maybe (Map v (Reference, Term v _, Type v _))) - verifyTermComponent componentMap Edits {..} = do - -- If the term contains references to old patterns, we can't update it. - -- If the term had a redunant type signature, it's discarded and a new type - -- is inferred. If it wasn't redunant, we have already substituted any updates - -- into it and we're going to check against that signature. - -- - -- Note: This only works if the type update is kind-preserving. - let - -- See if the constructor dependencies of any element of the cycle - -- contains one of the old types. - terms = Map.elems $ view _2 <$> componentMap - oldTypes = Map.keysSet typeEdits - if not . Set.null $ Set.intersection - (foldMap Term.constructorDependencies terms) - oldTypes - then pure Nothing - else do - let file = UnisonFileId - mempty - mempty - (Map.toList $ (\(_, tm, _) -> tm) <$> componentMap) - mempty - typecheckResult <- eval $ TypecheckFile file [] - pure - . fmap UF.hashTerms - $ runIdentity (Result.toMaybe typecheckResult) - >>= hush - -applyDeprecations :: Applicative m => Patch -> Branch0 m -> Branch0 m -applyDeprecations patch = deleteDeprecatedTerms deprecatedTerms - . deleteDeprecatedTypes deprecatedTypes - where - deprecatedTerms, deprecatedTypes :: Set Reference - deprecatedTerms = Set.fromList - [ r | (r, TermEdit.Deprecate) <- R.toList (Patch._termEdits patch) ] - deprecatedTypes = Set.fromList - [ r | (r, TypeEdit.Deprecate) <- R.toList (Patch._typeEdits patch) ] - deleteDeprecatedTerms, deleteDeprecatedTypes - :: Set Reference -> Branch0 m -> Branch0 m - deleteDeprecatedTerms rs = - over Branch.terms (Star3.deleteFact (Set.map Referent.Ref rs)) - deleteDeprecatedTypes rs = over Branch.types (Star3.deleteFact rs) - --- | Things in the patch are not marked as propagated changes, but every other --- definition that is created by the `Edits` which is passed in is marked as --- a propagated change. -applyPropagate - :: Var v => Applicative m => Patch -> Edits v -> F m i v (Branch0 m -> Branch0 m) -applyPropagate patch Edits {..} = do - let termRefs = Map.mapMaybe TermEdit.toReference termEdits - typeRefs = Map.mapMaybe TypeEdit.toReference typeEdits - termTypes = Map.map (Type.toReference . snd) newTerms - -- recursively update names and delete deprecated definitions - pure $ Branch.stepEverywhere (updateLevel termRefs typeRefs termTypes) - where - updateLevel - :: Map Reference Reference - -> Map Reference Reference - -> Map Reference Reference - -> Branch0 m - -> Branch0 m - updateLevel termEdits typeEdits termTypes Branch0 {..} = - Branch.branch0 termsWithCons types _children _edits - where - isPropagated = (`Set.notMember` allPatchTargets) where - allPatchTargets = Patch.allReferenceTargets patch - - terms = foldl' replaceTerm _terms (Map.toList termEdits) - types = foldl' replaceType _types (Map.toList typeEdits) - - updateMetadata r r' (tp, v) = if v == r then (typeOf r' tp, r') else (tp, v) - where typeOf r t = fromMaybe t $ Map.lookup r termTypes - - propagatedMd :: r -> (r, Metadata.Type, Metadata.Value) - propagatedMd r = (r, IOSource.isPropagatedReference, IOSource.isPropagatedValue) - termsWithCons = - foldl' replaceConstructor terms (Map.toList constructorReplacements) - replaceTerm s (r, r') = - (if isPropagated r' - then Metadata.insert (propagatedMd (Referent.Ref r')) - else Metadata.delete (propagatedMd (Referent.Ref r'))) . - Star3.replaceFact (Referent.Ref r) (Referent.Ref r') $ - Star3.mapD3 (updateMetadata r r') s - - replaceConstructor s ((oldr, oldc, oldt), (newr, newc, newt)) = - -- always insert the metadata since patches can't contain ctor mappings (yet) - Metadata.insert (propagatedMd con') . - Star3.replaceFact (Referent.Con oldr oldc oldt) con' $ s - where - con' = Referent.Con newr newc newt - replaceType s (r, r') = - (if isPropagated r' then Metadata.insert (propagatedMd r') - else Metadata.delete (propagatedMd r')) . - Star3.replaceFact r r' $ s - - -- typePreservingTermEdits :: Patch -> Patch - -- typePreservingTermEdits Patch {..} = Patch termEdits mempty - -- where termEdits = R.filterRan TermEdit.isTypePreserving _termEdits - --- (d, f) when d is "dirty" (needs update), --- f is in the frontier (an edited dependency of d), --- and d depends on f --- a ⋖ b = a depends directly on b --- dirty(d) ∧ frontier(f) <=> not(edited(d)) ∧ edited(f) ∧ d ⋖ f --- --- The range of this relation is the frontier, and the domain is --- the set of dirty references. -computeFrontier - :: forall m - . Monad m - => (Reference -> m (Set Reference)) -- eg Codebase.dependents codebase - -> Patch - -> Names0 - -> m (R.Relation Reference Reference) -computeFrontier getDependents patch names = do - -- (r,r2) ∈ dependsOn if r depends on r2 - dependsOn <- foldM addDependents R.empty edited - -- Dirty is everything that `dependsOn` Frontier, minus already edited defns - pure $ R.filterDom (not . flip Set.member edited) dependsOn - where - edited :: Set Reference - edited = R.dom (Patch._termEdits patch) <> R.dom (Patch._typeEdits patch) - addDependents - :: R.Relation Reference Reference - -> Reference - -> m (R.Relation Reference Reference) - addDependents dependents ref = - (\ds -> R.insertManyDom ds ref dependents) - . Set.filter (Names.contains names) - <$> getDependents ref diff --git a/parser-typechecker/src/Unison/Codebase/Editor/RemoteRepo.hs b/parser-typechecker/src/Unison/Codebase/Editor/RemoteRepo.hs deleted file mode 100644 index 9648b398f0..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/RemoteRepo.hs +++ /dev/null @@ -1,32 +0,0 @@ -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE OverloadedStrings #-} -module Unison.Codebase.Editor.RemoteRepo where - -import Unison.Prelude -import Unison.Util.Monoid as Monoid -import Data.Text as Text -import qualified Unison.Codebase.Path as Path -import Unison.Codebase.Path (Path) -import Unison.Codebase.ShortBranchHash (ShortBranchHash) -import qualified Unison.Codebase.ShortBranchHash as SBH - -data RemoteRepo = GitRepo { url :: Text, commit :: Maybe Text } - deriving (Eq, Ord, Show) - -printRepo :: RemoteRepo -> Text -printRepo GitRepo{..} = url <> Monoid.fromMaybe (Text.cons ':' <$> commit) - -printNamespace :: RemoteRepo -> Maybe ShortBranchHash -> Path -> Text -printNamespace repo sbh path = - printRepo repo <> case sbh of - Nothing -> if path == Path.empty then mempty - else ":." <> Path.toText path - Just sbh -> ":#" <> SBH.toText sbh <> - if path == Path.empty then mempty - else "." <> Path.toText path - -printHead :: RemoteRepo -> Path -> Text -printHead repo path = printNamespace repo Nothing path - -type RemoteNamespace = (RemoteRepo, Maybe ShortBranchHash, Path) -type RemoteHead = (RemoteRepo, Path) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/SearchResult'.hs b/parser-typechecker/src/Unison/Codebase/Editor/SearchResult'.hs deleted file mode 100644 index ea08604948..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/SearchResult'.hs +++ /dev/null @@ -1,52 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Codebase.Editor.SearchResult' where - -import Unison.Prelude - -import Unison.Referent (Referent) -import Unison.Reference (Reference) -import qualified Unison.HashQualified' as HQ' -import qualified Data.Set as Set -import qualified Unison.DataDeclaration as DD -import qualified Unison.Codebase.Editor.DisplayThing as DT -import qualified Unison.Type as Type -import Unison.DataDeclaration (Decl) -import Unison.Codebase.Editor.DisplayThing (DisplayThing) -import Unison.Type (Type) -import qualified Unison.LabeledDependency as LD -import Unison.LabeledDependency (LabeledDependency) - -data SearchResult' v a - = Tm' (TermResult' v a) - | Tp' (TypeResult' v a) - deriving (Eq, Show) -data TermResult' v a = - TermResult' HQ'.HashQualified (Maybe (Type v a)) Referent (Set HQ'.HashQualified) - deriving (Eq, Show) -data TypeResult' v a = - TypeResult' HQ'.HashQualified (DisplayThing (Decl v a)) Reference (Set HQ'.HashQualified) - deriving (Eq, Show) - -pattern Tm n t r as = Tm' (TermResult' n t r as) -pattern Tp n t r as = Tp' (TypeResult' n t r as) - -tmReferent :: SearchResult' v a -> Maybe Referent -tmReferent = \case; Tm _ _ r _ -> Just r; _ -> Nothing -tpReference :: SearchResult' v a -> Maybe Reference -tpReference = \case; Tp _ _ r _ -> Just r; _ -> Nothing - -foldResult' :: (TermResult' v a -> b) -> (TypeResult' v a -> b) -> SearchResult' v a -> b -foldResult' f g = \case - Tm' tm -> f tm - Tp' tp -> g tp - --- todo: comment me out, is this actually useful, given what we saw in ShowDefinitionI? --- namely, that it doesn't include the Term's deps, just the Decl's and the --- result Term/Type names. -labeledDependencies :: Ord v => SearchResult' v a -> Set LabeledDependency -labeledDependencies = \case - Tm' (TermResult' _ t r _) -> - Set.insert (LD.referent r) $ maybe mempty (Set.map LD.typeRef . Type.dependencies) t - Tp' (TypeResult' _ d r _) -> - Set.map LD.typeRef . Set.insert r $ maybe mempty DD.declDependencies (DT.toMaybe d) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/SlurpComponent.hs b/parser-typechecker/src/Unison/Codebase/Editor/SlurpComponent.hs deleted file mode 100644 index ff772168a2..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/SlurpComponent.hs +++ /dev/null @@ -1,87 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Codebase.Editor.SlurpComponent where - -import Unison.Prelude - -import Data.Tuple (swap) -import Unison.Reference ( Reference ) -import Unison.UnisonFile (TypecheckedUnisonFile) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Unison.DataDeclaration as DD -import qualified Unison.Term as Term -import qualified Unison.UnisonFile as UF - -data SlurpComponent v = - SlurpComponent { types :: Set v, terms :: Set v } - deriving (Eq,Ord,Show) - -isEmpty :: SlurpComponent v -> Bool -isEmpty sc = Set.null (types sc) && Set.null (terms sc) - -empty :: Ord v => SlurpComponent v -empty = SlurpComponent mempty mempty - -difference :: Ord v => SlurpComponent v -> SlurpComponent v -> SlurpComponent v -difference c1 c2 = SlurpComponent types' terms' where - types' = types c1 `Set.difference` types c2 - terms' = terms c1 `Set.difference` terms c2 - -intersection :: Ord v => SlurpComponent v -> SlurpComponent v -> SlurpComponent v -intersection c1 c2 = SlurpComponent types' terms' where - types' = types c1 `Set.intersection` types c2 - terms' = terms c1 `Set.intersection` terms c2 - -instance Ord v => Semigroup (SlurpComponent v) where (<>) = mappend -instance Ord v => Monoid (SlurpComponent v) where - mempty = SlurpComponent mempty mempty - c1 `mappend` c2 = SlurpComponent (types c1 <> types c2) - (terms c1 <> terms c2) - - --- I'm calling this `closeWithDependencies` because it doesn't just compute --- the dependencies of the inputs, it mixes them together. Make sure this --- is what you want. -closeWithDependencies :: forall v a. Ord v - => TypecheckedUnisonFile v a -> SlurpComponent v -> SlurpComponent v -closeWithDependencies uf inputs = seenDefns where - seenDefns = foldl' termDeps (SlurpComponent mempty seenTypes) (terms inputs) - seenTypes = foldl' typeDeps mempty (types inputs) - - termDeps :: SlurpComponent v -> v -> SlurpComponent v - termDeps seen v | Set.member v (terms seen) = seen - termDeps seen v = fromMaybe seen $ do - term <- findTerm v - let -- get the `v`s for the transitive dependency types - -- (the ones for terms are just the `freeVars below`) - -- although this isn't how you'd do it for a term that's already in codebase - tdeps :: [v] - tdeps = resolveTypes $ Term.dependencies term - seenTypes :: Set v - seenTypes = foldl' typeDeps (types seen) tdeps - seenTerms = Set.insert v (terms seen) - pure $ foldl' termDeps (seen { types = seenTypes - , terms = seenTerms}) - (Term.freeVars term) - - typeDeps :: Set v -> v -> Set v - typeDeps seen v | Set.member v seen = seen - typeDeps seen v = fromMaybe seen $ do - dd <- fmap snd (Map.lookup v (UF.dataDeclarations' uf)) <|> - fmap (DD.toDataDecl . snd) (Map.lookup v (UF.effectDeclarations' uf)) - pure $ foldl' typeDeps (Set.insert v seen) (resolveTypes $ DD.dependencies dd) - - resolveTypes :: Set Reference -> [v] - resolveTypes rs = [ v | r <- Set.toList rs, Just v <- [Map.lookup r typeNames]] - - findTerm :: v -> Maybe (Term.Term v a) - findTerm v = Map.lookup v allTerms - - allTerms = UF.allTerms uf - - typeNames :: Map Reference v - typeNames = invert (fst <$> UF.dataDeclarations' uf) <> invert (fst <$> UF.effectDeclarations' uf) - - invert :: forall k v . Ord k => Ord v => Map k v -> Map v k - invert m = Map.fromList (swap <$> Map.toList m) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/SlurpResult.hs b/parser-typechecker/src/Unison/Codebase/Editor/SlurpResult.hs deleted file mode 100644 index a65d80f183..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/SlurpResult.hs +++ /dev/null @@ -1,391 +0,0 @@ -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Codebase.Editor.SlurpResult where - -import Unison.Prelude - -import Unison.Codebase.Editor.SlurpComponent (SlurpComponent(..)) -import Unison.Name ( Name ) -import Unison.Parser ( Ann ) -import Unison.Var (Var) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Unison.Codebase.Editor.SlurpComponent as SC -import qualified Unison.DataDeclaration as DD -import qualified Unison.DeclPrinter as DeclPrinter -import qualified Unison.HashQualified as HQ -import qualified Unison.Name as Name -import qualified Unison.Names2 as Names -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.Referent as Referent -import qualified Unison.TypePrinter as TP -import qualified Unison.UnisonFile as UF -import qualified Unison.Util.Monoid as Monoid -import qualified Unison.Util.Pretty as P -import qualified Unison.Util.Relation as R -import qualified Unison.Var as Var - --- `oldRefNames` are the previously existing names for the old reference --- (these names will all be pointed to a new reference) --- `newRefNames` are the previously existing names for the new reference --- (the reference that all the old names will point to after the update) -data Aliases - = AddAliases (Set Name) - | UpdateAliases { oldRefNames :: Set Name - , newRefNames :: Set Name } - deriving (Show, Eq, Ord) - -data SlurpResult v = SlurpResult { - -- The file that we tried to add from - originalFile :: UF.TypecheckedUnisonFile v Ann - -- Extra definitions that were added to satisfy transitive closure, - -- beyond what the user specified. - , extraDefinitions :: SlurpComponent v - -- Previously existed only in the file; now added to the codebase. - , adds :: SlurpComponent v - -- Exists in the branch and the file, with the same name and contents. - , duplicates :: SlurpComponent v - -- Not added to codebase due to the name already existing - -- in the branch with a different definition. - , collisions :: SlurpComponent v - -- Not added to codebase due to the name existing - -- in the branch with a conflict (two or more definitions). - , conflicts :: SlurpComponent v - -- Names that already exist in the branch, but whose definitions - -- in `originalFile` are treated as updates. - , updates :: SlurpComponent v - -- Names of terms in `originalFile` that couldn't be updated because - -- they refer to existing constructors. (User should instead do a find/replace, - -- a constructor rename, or refactor the type that the name comes from). - , termExistingConstructorCollisions :: Set v - , constructorExistingTermCollisions :: Set v - -- -- Already defined in the branch, but with a different name. - , termAlias :: Map v Aliases - , typeAlias :: Map v Aliases - , defsWithBlockedDependencies :: SlurpComponent v - } deriving (Show) - --- Returns the set of constructor names for type names in the given `Set`. -constructorsFor :: Var v => Set v -> UF.TypecheckedUnisonFile v Ann -> Set v -constructorsFor types uf = let - names = UF.typecheckedToNames0 uf - typesRefs = Set.unions $ Names.typesNamed names . Name.fromVar <$> toList types - ctorNames = R.filterRan isOkCtor (Names.terms names) - isOkCtor (Referent.Con r _ _) | Set.member r typesRefs = True - isOkCtor _ = False - in Set.map Name.toVar $ R.dom ctorNames - --- Remove `removed` from the slurp result, and move any defns with transitive --- dependencies on the removed component into `defsWithBlockedDependencies`. --- Also removes `removed` from `extraDefinitions`. -subtractComponent :: forall v. Var v => SlurpComponent v -> SlurpResult v -> SlurpResult v -subtractComponent removed sr = - sr { adds = SC.difference (adds sr) (removed <> blocked) - , updates = SC.difference (updates sr) (removed <> blocked) - , defsWithBlockedDependencies = blocked - , extraDefinitions = SC.difference (extraDefinitions sr) blocked - } - where - -- for each v in adds, move to blocked if transitive dependency in removed - blocked = defsWithBlockedDependencies sr <> - SC.difference (blockedTerms <> blockedTypes) removed - - uf = originalFile sr - constructorsFor v = case UF.lookupDecl v uf of - Nothing -> mempty - Just (_, e) -> Set.fromList . DD.constructorVars $ either DD.toDataDecl id e - - blockedTypes = foldMap doType . SC.types $ adds sr <> updates sr where - -- include this type if it or any of its dependencies are removed - doType :: v -> SlurpComponent v - doType v = - if null (Set.intersection (SC.types removed) (SC.types (SC.closeWithDependencies uf vc))) - && null (Set.intersection (SC.terms removed) (constructorsFor v)) - then mempty else vc - where vc = mempty { types = Set.singleton v } - - blockedTerms = foldMap doTerm . SC.terms $ adds sr <> updates sr where - doTerm :: v -> SlurpComponent v - doTerm v = - if mempty == SC.intersection removed (SC.closeWithDependencies uf vc) - then mempty else vc - where vc = mempty { terms = Set.singleton v } - --- Move `updates` to `collisions`, and move any dependents of those updates to `*WithBlockedDependencies`. --- Subtract stuff from `extraDefinitions` that isn't in `adds` or `updates` -disallowUpdates :: forall v. Var v => SlurpResult v -> SlurpResult v -disallowUpdates sr = - let sr2 = subtractComponent (updates sr) sr - in sr2 { collisions = collisions sr2 <> updates sr } - -isNonempty :: Ord v => SlurpResult v -> Bool -isNonempty s = Monoid.nonEmpty (adds s) || Monoid.nonEmpty (updates s) - -data Status = - Add | Update | Duplicate | Collision | Conflicted | - TermExistingConstructorCollision | ConstructorExistingTermCollision | - ExtraDefinition | BlockedDependency - deriving (Ord,Eq,Show) - -isFailure :: Status -> Bool -isFailure s = case s of - TermExistingConstructorCollision -> True - ConstructorExistingTermCollision -> True - BlockedDependency -> True - Collision -> True - Conflicted -> True - _ -> False - -prettyStatus :: Status -> P.Pretty P.ColorText -prettyStatus s = case s of - Add -> "added" - Update -> "updated" - Collision -> "needs update" - Conflicted -> "conflicted" - Duplicate -> "duplicate" - TermExistingConstructorCollision -> "term/ctor collision" - ConstructorExistingTermCollision -> "ctor/term collision" - BlockedDependency -> "blocked" - ExtraDefinition -> "extra dependency" - -type IsPastTense = Bool - -prettyVar :: Var v => v -> P.Pretty P.ColorText -prettyVar = P.text . Var.name - -aliasesToShow :: Int -aliasesToShow = 5 - -pretty - :: forall v - . Var v - => IsPastTense - -> PPE.PrettyPrintEnv - -> SlurpResult v - -> P.Pretty P.ColorText -pretty isPast ppe sr = - let - tms = UF.hashTerms (originalFile sr) - goodIcon = P.green "⍟ " - badIcon = P.red "x " - plus = P.green " " - oxfordAliases shown sz end = - P.oxfordCommasWith end $ (P.shown <$> shown) ++ case sz of - 0 -> [] - n -> [P.shown n <> " more"] - okType v = (plus <>) $ case UF.lookupDecl v (originalFile sr) of - Just (_, dd) -> - P.syntaxToColor (DeclPrinter.prettyDeclHeader (HQ.unsafeFromVar v) dd) - <> if null aliases - then mempty - else P.newline <> P.indentN 2 (P.lines aliases) - where aliases = aliasesMessage . Map.lookup v $ typeAlias sr - Nothing -> P.bold (prettyVar v) <> P.red " (Unison bug, unknown type)" - - aliasesMessage aliases = case aliases of - Nothing -> [] - Just (AddAliases (splitAt aliasesToShow . toList -> (shown, rest))) -> - [ P.indentN 2 . P.wrap $ - P.hiBlack "(also named " <> oxfordAliases - shown - (length rest) - (P.hiBlack ")") - ] - Just (UpdateAliases oldNames newNames) -> - let oldMessage = - let (shown, rest) = splitAt aliasesToShow $ toList oldNames - sz = length oldNames - in P.indentN - 2 - ( P.wrap - $ P.hiBlack - ( "(The old definition " - <> (if isPast then "was" else "is") - <> " also named " - ) - <> oxfordAliases shown (length rest) (P.hiBlack ".") - <> P.hiBlack - (case (sz, isPast) of - (1, True ) -> "I updated this name too.)" - (1, False) -> "I'll update this name too.)" - (_, True ) -> "I updated these names too.)" - (_, False) -> "I'll update these names too.)" - ) - ) - newMessage = - let (shown, rest) = splitAt aliasesToShow $ toList newNames - sz = length rest - in P.indentN - 2 - ( P.wrap - $ P.hiBlack "(The new definition is already named " - <> oxfordAliases shown sz (P.hiBlack " as well.)") - ) - in (if null oldNames then mempty else [oldMessage]) - ++ (if null newNames then mempty else [newMessage]) - - -- The second field in the result is an optional second column. - okTerm :: v -> [(P.Pretty P.ColorText, Maybe (P.Pretty P.ColorText))] - okTerm v = case Map.lookup v tms of - Nothing -> - [(P.bold (prettyVar v), Just $ P.red "(Unison bug, unknown term)")] - Just (_, _, ty) -> - ( plus <> P.bold (prettyVar v) - , Just $ ": " <> P.indentNAfterNewline 2 (TP.pretty ppe ty) - ) - : ((, Nothing) <$> aliases) - where - aliases = fmap (P.indentN 2) . aliasesMessage . Map.lookup v $ termAlias sr - ok _ _ sc | SC.isEmpty sc = mempty - ok past present sc = - let header = goodIcon <> P.indentNAfterNewline - 2 - (P.wrap (if isPast then past else present)) - updatedTypes = P.lines $ okType <$> toList (SC.types sc) - updatedTerms = P.mayColumn2 . (=<<) okTerm . Set.toList $ SC.terms sc - in header <> "\n\n" <> P.linesNonEmpty [updatedTypes, updatedTerms] - okToUpdate = ok - (P.green "I've updated these names to your new definition:") - ( P.green - $ "These names already exist. You can `update` them " - <> "to your new definition:" - ) - okToAdd = ok (P.green "I've added these definitions:") - (P.green "These new definitions are ok to `add`:") - notOks _past _present sr | isOk sr = mempty - notOks past present sr = - let - header = badIcon <> P.indentNAfterNewline - 2 - (P.wrap (if isPast then past else present)) - typeLineFor status v = case UF.lookupDecl v (originalFile sr) of - Just (_, dd) -> - ( prettyStatus status - , P.syntaxToColor - $ DeclPrinter.prettyDeclHeader (HQ.unsafeFromVar v) dd - ) - Nothing -> - ( prettyStatus status - , prettyVar v <> P.red (P.wrap " (Unison bug, unknown type)") - ) - typeMsgs = - P.column2 - $ (typeLineFor Conflicted <$> toList (types (conflicts sr))) - ++ (typeLineFor Collision <$> toList (types (collisions sr))) - ++ ( typeLineFor BlockedDependency - <$> toList (types (defsWithBlockedDependencies sr)) - ) - termLineFor status v = case Map.lookup v tms of - Just (_ref, _tm, ty) -> - ( prettyStatus status - , P.bold (P.text $ Var.name v) - , ": " <> P.indentNAfterNewline 6 (TP.pretty ppe ty) - ) - Nothing -> (prettyStatus status, P.text (Var.name v), "") - termMsgs = - P.column3sep " " - $ (termLineFor Conflicted <$> toList (terms (conflicts sr))) - ++ (termLineFor Collision <$> toList (terms (collisions sr))) - ++ ( termLineFor TermExistingConstructorCollision - <$> toList (termExistingConstructorCollisions sr) - ) - ++ ( termLineFor ConstructorExistingTermCollision - <$> toList (constructorExistingTermCollisions sr) - ) - ++ ( termLineFor BlockedDependency - <$> toList (terms (defsWithBlockedDependencies sr)) - ) - in - header - <> "\n\n" - <> P.hiBlack " Reason" - <> "\n" - <> P.indentN 2 (P.linesNonEmpty [typeMsgs, termMsgs]) - <> "\n\n" - <> P.indentN - 2 - (P.column2 [("Tip:", "Use `help filestatus` to learn more.")]) - dups = Set.toList (SC.terms (duplicates sr) <> SC.types (duplicates sr)) - more i = - "... " - <> P.bold (P.shown i) - <> P.hiBlack " more." - <> "Try moving these below the `---` \"fold\" in your file." - in - P.sepNonEmpty - "\n\n" - [ if SC.isEmpty (duplicates sr) - then mempty - else - (if isPast - then "⊡ Ignored previously added definitions: " - else "⊡ Previously added definitions will be ignored: " - ) - <> P.indentNAfterNewline - 2 - (P.wrap $ P.excerptSep' (Just 7) - more - " " - (P.hiBlack . prettyVar <$> dups) - ) - , okToAdd (adds sr) - , okToUpdate (updates sr) - , notOks - (P.red "These definitions failed:") - (P.wrap $ P.red "These definitions would fail on `add` or `update`:") - sr - ] - -isOk :: Ord v => SlurpResult v -> Bool -isOk SlurpResult {..} = - SC.isEmpty collisions && - SC.isEmpty conflicts && - Set.null termExistingConstructorCollisions && - Set.null constructorExistingTermCollisions && - SC.isEmpty defsWithBlockedDependencies - -isAllDuplicates :: Ord v => SlurpResult v -> Bool -isAllDuplicates SlurpResult {..} = - SC.isEmpty adds && - SC.isEmpty updates && - SC.isEmpty extraDefinitions && - SC.isEmpty collisions && - SC.isEmpty conflicts && - Map.null typeAlias && - Map.null termAlias && - Set.null termExistingConstructorCollisions && - Set.null constructorExistingTermCollisions && - SC.isEmpty defsWithBlockedDependencies - --- stack repl --- --- λ> import Unison.Util.Pretty --- λ> import Unison.Codebase.Editor.SlurpResult --- λ> putStrLn $ toANSI 80 ex -ex :: P.Pretty P.ColorText -ex = P.indentN 2 $ P.lines ["", - P.green "▣ I've added these definitions: ", "", - P.indentN 2 . P.column2 $ [("a", "Nat"), ("map", "(a -> b) -> [a] -> [b]")], - "", - P.green "▣ I've updated these definitions: ", "", - P.indentN 2 . P.column2 $ [("c", "Nat"), ("flatMap", "(a -> [b]) -> [a] -> [b]")], - "", - P.wrap $ P.red "x" <> P.bold "These definitions couldn't be added:", "", - P.indentN 2 $ - P.lines [ - P.column2 [(P.hiBlack - "Reason for failure Symbol ", P.hiBlack "Type"), - ("ctor/term collision foo ", "Nat"), - ("failed dependency zoot ", "[a] -> [a] -> [a]"), - ("term/ctor collision unique type Foo ", "f x")], - "", "Tip: use `help filestatus` to learn more." - ], - "", - "⊡ Ignoring previously added definitions: " <> - P.indentNAfterNewline 2 ( - P.hiBlack (P.wrap $ P.sep " " ["zonk", "anotherOne", "List.wrangle", "oatbag", "blarg", "mcgee", P.group "ability Woot"])), - "" - ] diff --git a/parser-typechecker/src/Unison/Codebase/Editor/TodoOutput.hs b/parser-typechecker/src/Unison/Codebase/Editor/TodoOutput.hs deleted file mode 100644 index f117523a94..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/TodoOutput.hs +++ /dev/null @@ -1,63 +0,0 @@ -{-# LANGUAGE RecordWildCards #-} -module Unison.Codebase.Editor.TodoOutput where - -import Unison.Prelude - -import qualified Unison.Names3 as Names -import qualified Unison.Type as Type -import qualified Unison.Util.Relation as R -import qualified Unison.Codebase.Patch as Patch -import qualified Data.Set as Set -import qualified Unison.DataDeclaration as DD -import Unison.Reference (Reference) -import Unison.Names3 (Names0) -import Unison.Codebase.Patch (Patch) -import Unison.Codebase.Editor.DisplayThing (DisplayThing(RegularThing)) -import Unison.Type (Type) -import Unison.DataDeclaration (Decl) -import qualified Unison.LabeledDependency as LD -import Unison.LabeledDependency (LabeledDependency) - -type Score = Int - -data TodoOutput v a = TodoOutput - { todoScore :: Score - , todoFrontier :: - ( [(Reference, Maybe (Type v a))] - , [(Reference, DisplayThing (Decl v a))]) - , todoFrontierDependents :: - ( [(Score, Reference, Maybe (Type v a))] - , [(Score, Reference, DisplayThing (Decl v a))]) - , nameConflicts :: Names0 - , editConflicts :: Patch - } deriving (Show) - -labeledDependencies :: Ord v => TodoOutput v a -> Set LabeledDependency -labeledDependencies TodoOutput{..} = Set.fromList ( - -- term refs - [LD.termRef r | (r, _) <- fst todoFrontier] <> - [LD.termRef r | (_, r, _) <- fst todoFrontierDependents] <> - [LD.typeRef r | (r, _) <- snd todoFrontier] <> - [LD.typeRef r | (_, r, _) <- snd todoFrontierDependents] <> - -- types of term refs - [LD.typeRef r | (_, Just t) <- fst todoFrontier - , r <- toList (Type.dependencies t)] <> - [LD.typeRef r | (_, _, Just t) <- fst todoFrontierDependents - , r <- toList (Type.dependencies t)] <> - -- and decls of type refs - [LD.typeRef r | (_, RegularThing d) <- snd todoFrontier - , r <- toList (DD.declDependencies d)] <> - [LD.typeRef r | (_, _, RegularThing d) <- snd todoFrontierDependents - , r <- toList (DD.declDependencies d)]) <> - -- name conflicts - Set.map LD.referent (R.ran (Names.terms0 nameConflicts)) <> - Set.map LD.typeRef (R.ran (Names.types0 nameConflicts)) <> - Patch.labeledDependencies editConflicts - -noConflicts :: TodoOutput v a -> Bool -noConflicts todo = - nameConflicts todo == mempty && editConflicts todo == Patch.empty - -noEdits :: TodoOutput v a -> Bool -noEdits todo = - todoScore todo == 0 diff --git a/parser-typechecker/src/Unison/Codebase/Editor/UriParser.hs b/parser-typechecker/src/Unison/Codebase/Editor/UriParser.hs deleted file mode 100644 index 99f4c12642..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/UriParser.hs +++ /dev/null @@ -1,166 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Codebase.Editor.UriParser (repoPath) where - -import qualified Text.Megaparsec as P -import qualified Text.Megaparsec.Char.Lexer as L -import qualified Text.Megaparsec.Char as C -import Data.Text as Text - -import Unison.Codebase.Path (Path(..)) -import qualified Unison.Codebase.Path as Path -import Unison.Codebase.Editor.RemoteRepo (RemoteRepo(..), RemoteNamespace) -import Unison.Codebase.ShortBranchHash (ShortBranchHash(..)) -import Unison.Prelude -import qualified Unison.Hash as Hash -import qualified Unison.Lexer -import Unison.NameSegment (NameSegment(..)) -import Data.Sequence as Seq -import Data.Char (isAlphaNum, isSpace, isDigit) - -type P = P.Parsec () Text - --- Here are the git protocols that we know how to parse --- Local Protocol --- $ git clone /srv/git/project.git --- $ git clone /srv/git/project.git[:treeish][:[#hash][.path]] --- File Protocol --- $ git clone file:///srv/git/project.git[:treeish][:[#hash][.path]] --- Smart / Dumb HTTP protocol --- $ git clone https://example.com/gitproject.git[:treeish][:[#hash][.path]] --- SSH Protocol --- $ git clone ssh://[user@]server/project.git[:treeish][:[#hash][.path]] --- $ git clone [user@]server:project.git[:treeish][:[#hash][.path]] --- Git Protocol (obsolete) -repoPath :: P RemoteNamespace -repoPath = P.label "generic git repo" $ do - protocol <- parseProtocol - treeish <- P.optional treeishSuffix - let repo = GitRepo (printProtocol protocol) treeish - nshashPath <- P.optional (C.char ':' *> namespaceHashPath) - case nshashPath of - Nothing -> pure (repo, Nothing, Path.empty) - Just (sbh, p) -> pure (repo, sbh, p) - --- does this not exist somewhere in megaparsec? yes in 7.0 -symbol :: Text -> P Text -symbol = L.symbol (pure ()) - -data GitProtocol - = HttpsProtocol (Maybe User) HostInfo UrlPath - | SshProtocol (Maybe User) HostInfo UrlPath - | ScpProtocol (Maybe User) Host UrlPath - | FileProtocol UrlPath - | LocalProtocol UrlPath - deriving (Eq, Ord, Show) - -printProtocol :: GitProtocol -> Text ---printProtocol x | traceShow x False = undefined -printProtocol x = case x of - HttpsProtocol muser hostInfo path -> "https://" - <> printUser muser - <> printHostInfo hostInfo - <> path - SshProtocol muser hostInfo path -> "ssh://" - <> printUser muser - <> printHostInfo hostInfo - <> path - ScpProtocol muser host path -> printUser muser <> host <> ":" <> path - FileProtocol path -> "file://" <> path - LocalProtocol path -> path - where - printUser = maybe mempty (\(User u) -> u <> "@") - printHostInfo :: HostInfo -> Text - printHostInfo (HostInfo hostname mport) = - hostname <> maybe mempty (Text.cons ':') mport - -data Scheme = Ssh | Https - deriving (Eq, Ord, Show) - -data User = User Text - deriving (Eq, Ord, Show) - -type UrlPath = Text - -data HostInfo = HostInfo Text (Maybe Text) - deriving (Eq, Ord, Show) - -type Host = Text -- no port - --- doesn't yet handle basic authentication like https://user:pass@server.com --- (does anyone even want that?) --- or handle ipv6 addresses (https://en.wikipedia.org/wiki/IPv6#Addressing) -parseProtocol :: P GitProtocol -parseProtocol = P.label "parseProtocol" $ - fileRepo <|> httpsRepo <|> sshRepo <|> scpRepo <|> localRepo - where - localRepo, fileRepo, httpsRepo, sshRepo, scpRepo :: P GitProtocol - parsePath = - P.takeWhile1P (Just "repo path character") - (\c -> not (isSpace c || c == ':')) - localRepo = LocalProtocol <$> parsePath - fileRepo = P.label "fileRepo" $ do - void $ symbol "file://" - FileProtocol <$> parsePath - httpsRepo = P.label "httpsRepo" $ do - void $ symbol "https://" - HttpsProtocol <$> P.optional userInfo <*> parseHostInfo <*> parsePath - sshRepo = P.label "sshRepo" $ do - void $ symbol "ssh://" - SshProtocol <$> P.optional userInfo <*> parseHostInfo <*> parsePath - scpRepo = P.label "scpRepo" . P.try $ - ScpProtocol <$> P.optional userInfo <*> parseHost <* symbol ":" <*> parsePath - userInfo :: P User - userInfo = P.label "userInfo" . P.try $ do - username <- P.takeWhile1P (Just "username character") (/= '@') - void $ C.char '@' - pure $ User username - parseHostInfo :: P HostInfo - parseHostInfo = P.label "parseHostInfo" $ - HostInfo <$> parseHost <*> (P.optional $ do - void $ symbol ":" - P.takeWhile1P (Just "digits") isDigit) - - parseHost = P.label "parseHost" $ hostname <|> ipv4 -- <|> ipv6 - where - hostname = - P.takeWhile1P (Just "hostname character") - (\c -> isAlphaNum c || c == '.' || c == '-') - ipv4 = P.label "ipv4 address" $ do - o1 <- decOctet - void $ C.char '.' - o2 <- decOctet - void $ C.char '.' - o3 <- decOctet - void $ C.char '.' - o4 <- decOctet - pure $ Text.pack $ o1 <> "." <> o2 <> "." <> o3 <> "." <> o4 - decOctet = P.count' 1 3 C.digitChar - --- #nshashabc.path.foo.bar or .path.foo.bar -namespaceHashPath :: P (Maybe ShortBranchHash, Path) -namespaceHashPath = do - sbh <- P.optional shortBranchHash - p <- P.optional $ do - void $ C.char '.' - P.sepBy1 - ((:) <$> C.satisfy Unison.Lexer.wordyIdStartChar - <*> P.many (C.satisfy Unison.Lexer.wordyIdChar)) - (C.char '.') - case p of - Nothing -> pure (sbh, Path.empty) - Just p -> pure (sbh, makePath p) - where makePath = Path . Seq.fromList . fmap (NameSegment . Text.pack) - -treeishSuffix :: P Text -treeishSuffix = P.label "git treeish" . P.try $ do - void $ C.char ':' - notdothash <- C.noneOf @[] ".#:" - rest <- P.takeWhileP (Just "not colon") (/= ':') - pure $ Text.cons notdothash rest - -shortBranchHash :: P ShortBranchHash -shortBranchHash = P.label "short branch hash" $ do - void $ C.char '#' - ShortBranchHash <$> - P.takeWhile1P (Just "base32hex chars") (`elem` Hash.validBase32HexChars) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/VersionParser.hs b/parser-typechecker/src/Unison/Codebase/Editor/VersionParser.hs deleted file mode 100644 index 3e638cda85..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Editor/VersionParser.hs +++ /dev/null @@ -1,27 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Codebase.Editor.VersionParser where - -import Text.Megaparsec -import Unison.Codebase.Editor.RemoteRepo -import Text.Megaparsec.Char -import Data.Functor (($>)) -import Data.Text (Text) -import qualified Data.Text as Text -import qualified Unison.Codebase.Path as Path -import Data.Void (Void) - --- |"release/M1j.2" -> "releases._M1j" --- "devel/*" -> "trunk" -defaultBaseLib :: Parsec Void Text RemoteNamespace -defaultBaseLib = fmap makeNS $ devel <|> release - where - devel, release, version :: Parsec Void Text Text - devel = "devel/" *> many anyChar *> eof $> "trunk" - release = fmap ("releases._" <>) $ "release/" *> version <* eof - version = fmap Text.pack $ - try (someTill anyChar "." <* many anyChar) <|> many anyChar - makeNS :: Text -> RemoteNamespace - makeNS t = ( GitRepo "https://github.com/unisonweb/base" Nothing - , Nothing - , Path.fromText t) diff --git a/parser-typechecker/src/Unison/Codebase/Execute.hs b/parser-typechecker/src/Unison/Codebase/Execute.hs deleted file mode 100644 index 97507be75b..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Execute.hs +++ /dev/null @@ -1,56 +0,0 @@ -{-# LANGUAGE GADTs #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE PartialTypeSignatures #-} -{-# LANGUAGE OverloadedStrings #-} - --- | Execute a computation of type '{IO} () that has been previously added to --- the codebase, without setting up an interactive environment. --- --- This allows one to run standalone applications implemented in the Unison --- language. - -module Unison.Codebase.Execute where - -import Unison.Prelude - -import Unison.Codebase.MainTerm ( getMainTerm ) -import qualified Unison.Codebase.MainTerm as MainTerm -import qualified Unison.Codebase as Codebase -import Unison.Parser ( Ann ) -import qualified Unison.Codebase.Runtime as Runtime -import Unison.Codebase.Runtime ( Runtime ) -import Unison.Var ( Var ) -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.Names3 as Names3 -import qualified Unison.Codebase.Branch as Branch -import System.Exit (die) -import Control.Exception (finally) - -execute - :: Var v - => Codebase.Codebase IO v Ann - -> Runtime v - -> String - -> IO () -execute codebase runtime mainName = - (`finally` Runtime.terminate runtime) $ do - root <- Codebase.getRootBranch codebase >>= \case - Right r -> pure r - Left Codebase.NoRootBranch -> - die ("Couldn't identify a root namespace.") - Left (Codebase.CouldntLoadRootBranch h) -> - die ("Couldn't load root branch " ++ show h) - Left (Codebase.CouldntParseRootBranch h) -> - die ("Couldn't parse root branch head " ++ show h) - let parseNames0 = Names3.makeAbsolute0 (Branch.toNames0 (Branch.head root)) - loadTypeOfTerm = Codebase.getTypeOfTerm codebase - let mainType = Runtime.mainType runtime - mt <- getMainTerm loadTypeOfTerm parseNames0 mainName mainType - case mt of - MainTerm.NotAFunctionName s -> die ("Not a function name: " ++ s) - MainTerm.NotFound s -> die ("Not found: " ++ s) - MainTerm.BadType s -> die (s ++ " is not of type '{IO} ()") - MainTerm.Success _ tm _ -> do - let codeLookup = Codebase.toCodeLookup codebase - ppe = PPE.PrettyPrintEnv (const Nothing) (const Nothing) - void $ Runtime.evaluateTerm codeLookup ppe runtime tm diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs deleted file mode 100644 index 55b4558deb..0000000000 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs +++ /dev/null @@ -1,282 +0,0 @@ -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Codebase.FileCodebase -( getRootBranch -- used by Git module -, branchHashesByPrefix -- used by Git module -, branchFromFiles -- used by Git module -, codebase1 -- used by Main -, codebase1' -- used by Test/Git -, codebaseExists -- used by Main -, initCodebaseAndExit -, initCodebase -, getCodebaseOrExit -, getCodebaseDir -) where - -import Unison.Prelude - -import UnliftIO ( MonadUnliftIO ) -import UnliftIO.Exception ( catchIO ) -import UnliftIO.Concurrent ( forkIO - , killThread - ) -import UnliftIO.STM ( atomically ) -import qualified Data.Set as Set -import qualified Data.Text as Text -import qualified Data.Text.IO as TextIO -import UnliftIO.Directory ( createDirectoryIfMissing - , doesDirectoryExist - ) -import System.FilePath ( takeFileName - ) -import System.Directory ( getHomeDirectory - , canonicalizePath - ) -import System.Environment ( getProgName ) -import System.Exit ( exitFailure, exitSuccess ) -import qualified Unison.Codebase as Codebase -import Unison.Codebase ( Codebase(Codebase) - , BuiltinAnnotation - , CodebasePath - ) -import Unison.Codebase.Branch ( Branch ) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Reflog as Reflog -import qualified Unison.Codebase.Serialization as S -import qualified Unison.Codebase.Serialization.V1 - as V1 -import qualified Unison.Codebase.Watch as Watch -import Unison.Parser (Ann() ) -import Unison.Reference ( Reference ) -import qualified Unison.Reference as Reference -import qualified Unison.Referent as Referent -import qualified Unison.Util.TQueue as TQueue -import Unison.Var ( Var ) -import qualified Unison.UnisonFile as UF -import qualified Unison.Util.Cache as Cache -import qualified Unison.Util.Pretty as P -import qualified Unison.PrettyTerminal as PT -import Unison.Symbol ( Symbol ) -import qualified Unison.Codebase.FileCodebase.Common as Common -import Unison.Codebase.FileCodebase.Common - ( Err(CantParseBranchHead) - , codebaseExists - --- - , branchHeadDir - , dependentsDir - , reflogPath - , typeIndexDir - , typeMentionsIndexDir - , watchesDir - --- - , componentIdFromString - , hashFromFilePath - , referentIdFromString - , decodeFileName - , formatAnn - , getRootBranch - , getDecl - , getTerm - , getTypeOfTerm - , getWatch - , putDecl - , putTerm - , putRootBranch - , putWatch - --- - , branchFromFiles - , branchHashesByPrefix - , termReferencesByPrefix - , termReferentsByPrefix - , typeReferencesByPrefix - --- - , failWith - , listDirectory - ) - -import qualified Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex as Sync - -initCodebaseAndExit :: Maybe FilePath -> IO () -initCodebaseAndExit mdir = do - dir <- getCodebaseDir mdir - cache <- Cache.cache - _ <- initCodebase cache dir - exitSuccess - --- initializes a new codebase here (i.e. `ucm -codebase dir init`) -initCodebase :: Branch.Cache IO -> FilePath -> IO (Codebase IO Symbol Ann) -initCodebase cache path = do - theCodebase <- codebase1 cache V1.formatSymbol Common.formatAnn path - prettyDir <- P.string <$> canonicalizePath path - - whenM (codebaseExists path) $ - do PT.putPrettyLn' - . P.wrap - $ "It looks like there's already a codebase in: " - <> prettyDir - exitFailure - - PT.putPrettyLn' - . P.wrap - $ "Initializing a new codebase in: " - <> prettyDir - Codebase.initializeCodebase theCodebase - pure theCodebase - --- get the codebase in dir, or in the home directory if not provided. -getCodebaseOrExit :: Branch.Cache IO -> Maybe FilePath -> IO (Codebase IO Symbol Ann) -getCodebaseOrExit cache mdir = do - dir <- getCodebaseDir mdir - progName <- getProgName - prettyDir <- P.string <$> canonicalizePath dir - let errMsg = getNoCodebaseErrorMsg ((P.text . Text.pack) progName) prettyDir mdir - let theCodebase = codebase1 cache V1.formatSymbol formatAnn dir - unlessM (codebaseExists dir) $ do - PT.putPrettyLn' errMsg - exitFailure - theCodebase - -getNoCodebaseErrorMsg :: IsString s => P.Pretty s -> P.Pretty s -> Maybe FilePath -> P.Pretty s -getNoCodebaseErrorMsg executable prettyDir mdir = - let secondLine = - case mdir of - Just dir -> "Run `" <> executable <> " -codebase " <> fromString dir - <> " init` to create one, then try again!" - Nothing -> "Run `" <> executable <> " init` to create one there," - <> " then try again;" - <> " or `" <> executable <> " -codebase ` to load a codebase from someplace else!" - in - P.lines - [ "No codebase exists in " <> prettyDir <> "." - , secondLine ] - -getCodebaseDir :: Maybe FilePath -> IO FilePath -getCodebaseDir = maybe getHomeDirectory pure - --- builds a `Codebase IO v a`, given serializers for `v` and `a` -codebase1 - :: forall m v a - . MonadUnliftIO m - => Var v - => BuiltinAnnotation a - => Branch.Cache m -> S.Format v -> S.Format a -> CodebasePath -> m (Codebase m v a) -codebase1 = codebase1' Sync.syncToDirectory - -codebase1' - :: forall m v a - . MonadUnliftIO m - => Var v - => BuiltinAnnotation a - => Common.SyncToDir m v a -> Branch.Cache m -> S.Format v -> S.Format a -> CodebasePath -> m (Codebase m v a) -codebase1' syncToDirectory branchCache fmtV@(S.Format getV putV) fmtA@(S.Format getA putA) path = do - termCache <- Cache.semispaceCache 8192 - typeOfTermCache <- Cache.semispaceCache 8192 - declCache <- Cache.semispaceCache 1024 - let c = - Codebase - (Cache.applyDefined termCache $ getTerm getV getA path) - (Cache.applyDefined typeOfTermCache $ getTypeOfTerm getV getA path) - (Cache.applyDefined declCache $ getDecl getV getA path) - (putTerm putV putA path) - (putDecl putV putA path) - (getRootBranch branchCache path) - (putRootBranch path) - (branchHeadUpdates path) - (branchFromFiles branchCache path) - dependents - (flip (syncToDirectory fmtV fmtA) path) - (syncToDirectory fmtV fmtA path) - watches - (getWatch getV getA path) - (putWatch putV putA path) - getReflog - appendReflog - getTermsOfType - getTermsMentioningType - -- todo: maintain a trie of references to come up with this number - (pure 10) - -- The same trie can be used to make this lookup fast: - (termReferencesByPrefix path) - (typeReferencesByPrefix path) - (termReferentsByPrefix (getDecl getV getA) path) - (pure 10) - (branchHashesByPrefix path) - in pure c - where - dependents :: Reference -> m (Set Reference.Id) - dependents r = listDirAsIds (dependentsDir path r) - getTermsOfType :: Reference -> m (Set Referent.Id) - getTermsOfType r = listDirAsReferents (typeIndexDir path r) - getTermsMentioningType :: Reference -> m (Set Referent.Id) - getTermsMentioningType r = listDirAsReferents (typeMentionsIndexDir path r) - -- todo: revisit these - listDirAsIds :: FilePath -> m (Set Reference.Id) - listDirAsIds d = do - e <- doesDirectoryExist d - if e - then do - ls <- fmap decodeFileName <$> listDirectory d - pure . Set.fromList $ ls >>= (toList . componentIdFromString) - else pure Set.empty - listDirAsReferents :: FilePath -> m (Set Referent.Id) - listDirAsReferents d = do - e <- doesDirectoryExist d - if e - then do - ls <- fmap decodeFileName <$> listDirectory d - pure . Set.fromList $ ls >>= (toList . referentIdFromString) - else pure Set.empty - watches :: UF.WatchKind -> m [Reference.Id] - watches k = - liftIO $ do - let wp = watchesDir path (Text.pack k) - createDirectoryIfMissing True wp - ls <- listDirectory wp - pure $ ls >>= (toList . componentIdFromString . takeFileName) - getReflog :: m [Reflog.Entry] - getReflog = - liftIO - (do contents <- TextIO.readFile (reflogPath path) - let lines = Text.lines contents - let entries = parseEntry <$> lines - pure entries) `catchIO` - const (pure []) - where - parseEntry t = fromMaybe (err t) (Reflog.fromText t) - err t = error $ - "I couldn't understand this line in " ++ reflogPath path ++ "\n\n" ++ - Text.unpack t - appendReflog :: Text -> Branch m -> Branch m -> m () - appendReflog reason old new = - let - t = Reflog.toText $ - Reflog.Entry (Branch.headHash old) (Branch.headHash new) reason - in liftIO $ TextIO.appendFile (reflogPath path) (t <> "\n") - --- watches in `branchHeadDir root` for externally deposited heads; --- parse them, and return them -branchHeadUpdates - :: MonadUnliftIO m => CodebasePath -> m (m (), m (Set Branch.Hash)) -branchHeadUpdates root = do - branchHeadChanges <- TQueue.newIO - (cancelWatch, watcher) <- Watch.watchDirectory' (branchHeadDir root) --- -- add .ubf file changes to intermediate queue - watcher1 <- - forkIO - $ forever - $ do - -- Q: what does watcher return on a file deletion? - -- A: nothing - (filePath, _) <- watcher - case hashFromFilePath filePath of - Nothing -> failWith $ CantParseBranchHead filePath - Just h -> - atomically . TQueue.enqueue branchHeadChanges $ Branch.Hash h - -- smooth out intermediate queue - pure - ( cancelWatch >> killThread watcher1 - , Set.fromList <$> Watch.collectUntilPause branchHeadChanges 400000 - ) diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs deleted file mode 100644 index 426047a2e5..0000000000 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs +++ /dev/null @@ -1,590 +0,0 @@ -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE DerivingVia #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Codebase.FileCodebase.Common - ( Err(..) - , SyncToDir - , SimpleLens - , codebaseExists - , codebasePath - , hashExists - -- dirs (parent of all the files) - , branchHeadDir - , dependentsDir - , dependentsDir' - , typeIndexDir - , typeIndexDir' - , typeMentionsIndexDir - , typeMentionsIndexDir' - , watchesDir - -- paths (looking up one file) - , branchPath - , declPath - , editsPath - , reflogPath - , termPath - , typePath - , watchPath - -- core stuff - , formatAnn - , getDecl - , putDecl - , putRootBranch - , getTerm - , getTypeOfTerm - , putTerm - , getWatch - , putWatch - , updateCausalHead - , serializeEdits - , deserializeEdits - , serializeRawBranch - , branchFromFiles - , branchHashesByPrefix - , termReferencesByPrefix - , termReferentsByPrefix - , typeReferencesByPrefix - -- stringing - , hashFromFilePath - , componentIdFromString - , componentIdToString - , referentIdFromString - -- touching files - , touchIdFile - , touchReferentFile - , touchReferentIdFile - -- util - , copyFileWithParents - , doFileOnce - , failWith - , listDirectory - -- expose for tests :| - , encodeFileName - , decodeFileName - , getRootBranch - - ) where - -import Unison.Prelude - -import Control.Error (runExceptT, ExceptT(..)) -import Control.Lens (Lens, use, to, (%=)) -import Control.Monad.Catch (catch) -import Control.Monad.State (MonadState) -import qualified Data.ByteString.Base16 as ByteString (decodeBase16, encodeBase16) -import qualified Data.Char as Char -import Data.List ( isPrefixOf ) -import qualified Data.Set as Set -import qualified Data.Text as Text -import UnliftIO.Directory ( createDirectoryIfMissing - , doesFileExist - , removeFile - , doesDirectoryExist, copyFile - ) -import UnliftIO.IO.File (writeBinaryFile) -import qualified System.Directory -import System.FilePath ( takeBaseName - , takeDirectory - , () - ) -import qualified Unison.Codebase as Codebase -import Unison.Codebase (CodebasePath) -import Unison.Codebase.Causal ( Causal - , RawHash(..) - ) -import qualified Unison.Codebase.Causal as Causal -import Unison.Codebase.Branch ( Branch ) -import qualified Unison.Codebase.Branch as Branch -import Unison.Codebase.ShortBranchHash (ShortBranchHash(..)) -import qualified Unison.Codebase.ShortBranchHash as SBH -import qualified Unison.Codebase.Serialization as S -import qualified Unison.Codebase.Serialization.V1 as V1 -import Unison.Codebase.SyncMode ( SyncMode ) -import Unison.Codebase.Patch ( Patch(..) ) -import qualified Unison.ConstructorType as CT -import qualified Unison.DataDeclaration as DD -import qualified Unison.Hash as Hash -import Unison.Parser ( Ann(External) ) -import Unison.Reference ( Reference ) -import qualified Unison.Reference as Reference -import Unison.Referent ( Referent ) -import qualified Unison.Referent as Referent -import Unison.ShortHash (ShortHash) -import qualified Unison.ShortHash as SH -import Unison.Term ( Term ) -import qualified Unison.Term as Term -import Unison.Type ( Type ) -import qualified Unison.Type as Type -import Unison.Var ( Var ) -import qualified Unison.UnisonFile as UF -import Unison.Util.Monoid (foldMapM) -import Unison.Util.Timing (time) -import Data.Either.Extra (maybeToEither) - -data Err - = InvalidBranchFile FilePath String - | InvalidEditsFile FilePath String - | NoBranchHead FilePath - | CantParseBranchHead FilePath - | AmbiguouslyTypeAndTerm Reference.Id - | UnknownTypeOrTerm Reference - deriving Show - -type SimpleLens s a = Lens s s a a - -codebasePath :: FilePath -codebasePath = ".unison" "v1" - -formatAnn :: S.Format Ann -formatAnn = S.Format (pure External) (\_ -> pure ()) - --- Write Branch and its dependents to the dest codebase, and set it as the root. -type SyncToDir m v a - = S.Format v - -> S.Format a - -> CodebasePath -- src codebase - -> CodebasePath -- dest codebase - -> SyncMode - -> Branch m -- new dest root branch - -> m () - -termsDir, typesDir, branchesDir, branchHeadDir, editsDir - :: CodebasePath -> FilePath -termsDir root = root codebasePath "terms" -typesDir root = root codebasePath "types" -branchesDir root = root codebasePath "paths" -branchHeadDir root = branchesDir root "_head" -editsDir root = root codebasePath "patches" - -termDir, declDir :: CodebasePath -> Reference.Id -> FilePath -termDir root r = termsDir root componentIdToString r -declDir root r = typesDir root componentIdToString r - -referenceToDir :: Reference -> FilePath -referenceToDir r = case r of - Reference.Builtin name -> "_builtin" encodeFileName (Text.unpack name) - Reference.DerivedId hash -> componentIdToString hash - -dependentsDir', typeIndexDir', typeMentionsIndexDir' :: FilePath -> FilePath - -dependentsDir :: CodebasePath -> Reference -> FilePath -dependentsDir root r = dependentsDir' root referenceToDir r -dependentsDir' root = root codebasePath "dependents" - -watchesDir :: CodebasePath -> Text -> FilePath -watchesDir root UF.RegularWatch = - root codebasePath "watches" "_cache" -watchesDir root kind = - root codebasePath "watches" encodeFileName (Text.unpack kind) -watchPath :: CodebasePath -> UF.WatchKind -> Reference.Id -> FilePath -watchPath root kind id = - watchesDir root (Text.pack kind) componentIdToString id <> ".ub" - -typeIndexDir :: CodebasePath -> Reference -> FilePath -typeIndexDir root r = typeIndexDir' root referenceToDir r -typeIndexDir' root = root codebasePath "type-index" - -typeMentionsIndexDir :: CodebasePath -> Reference -> FilePath -typeMentionsIndexDir root r = typeMentionsIndexDir' root referenceToDir r -typeMentionsIndexDir' root = root codebasePath "type-mentions-index" - -decodeFileName :: FilePath -> String -decodeFileName = let - go ('$':tl) = case span (/= '$') tl of - ("forward-slash", _:tl) -> '/' : go tl - ("back-slash", _:tl) -> '\\' : go tl - ("colon", _:tl) -> ':' : go tl - ("star", _:tl) -> '*' : go tl - ("question-mark", _:tl) -> '?' : go tl - ("double-quote", _:tl) -> '\"' : go tl - ("less-than", _:tl) -> '<' : go tl - ("greater-than", _:tl) -> '>' : go tl - ("pipe", _:tl) -> '|' : go tl - ('x':hex, _:tl) -> decodeHex hex ++ go tl - ("",_:tl) -> '$' : go tl - (s,_:tl) -> '$' : s ++ '$' : go tl -- unknown escapes left unchanged - (s,[]) -> s - go (hd:tl) = hd : go tl - go [] = [] - decodeHex :: String -> String - decodeHex s = either (const s) (Text.unpack . decodeUtf8) - . ByteString.decodeBase16 . encodeUtf8 . Text.pack $ s - in \case - "$dot$" -> "." - "$dotdot$" -> ".." - t -> go t - --- https://superuser.com/questions/358855/what-characters-are-safe-in-cross-platform-file-names-for-linux-windows-and-os -encodeFileName :: String -> FilePath -encodeFileName = let - go ('/' : rem) = "$forward-slash$" <> go rem - go ('\\' : rem) = "$back-slash$" <> go rem - go (':' : rem) = "$colon$" <> go rem - go ('*' : rem) = "$star$" <> go rem - go ('?' : rem) = "$question-mark$" <> go rem - go ('"' : rem) = "$double-quote$" <> go rem - go ('<' : rem) = "$less-than$" <> go rem - go ('>' : rem) = "$greater-than$" <> go rem - go ('|' : rem) = "$pipe$" <> go rem - go ('$' : rem) = "$$" <> go rem - go (c : rem) | not (Char.isPrint c && Char.isAscii c) - = "$x" <> encodeHex [c] <> "$" <> go rem - | otherwise = c : go rem - go [] = [] - encodeHex :: String -> String - encodeHex = Text.unpack . Text.toUpper . ByteString.encodeBase16 . - encodeUtf8 . Text.pack - in \case - "." -> "$dot$" - ".." -> "$dotdot$" - t -> go t - -termPath, typePath, declPath :: CodebasePath -> Reference.Id -> FilePath -termPath path r = termDir path r "compiled.ub" -typePath path r = termDir path r "type.ub" -declPath path r = declDir path r "compiled.ub" - -branchPath :: CodebasePath -> Branch.Hash -> FilePath -branchPath root (RawHash h) = branchesDir root hashToString h ++ ".ub" - -editsPath :: CodebasePath -> Branch.EditHash -> FilePath -editsPath root h = editsDir root hashToString h ++ ".up" - -reflogPath :: CodebasePath -> FilePath -reflogPath root = root codebasePath "reflog" - -touchIdFile :: MonadIO m => Reference.Id -> FilePath -> m () -touchIdFile id fp = - touchFile (fp encodeFileName (componentIdToString id)) - -touchReferentFile :: MonadIO m => Referent -> FilePath -> m () -touchReferentFile id fp = - touchFile (fp encodeFileName (referentToString id)) - -touchReferentIdFile :: MonadIO m => Referent.Id -> FilePath -> m () -touchReferentIdFile = touchReferentFile . Referent.fromId - -touchFile :: MonadIO m => FilePath -> m () -touchFile fp = do - createDirectoryIfMissing True (takeDirectory fp) - writeBinaryFile fp mempty - --- checks if `path` looks like a unison codebase -minimalCodebaseStructure :: CodebasePath -> [FilePath] -minimalCodebaseStructure root = [ branchHeadDir root ] - --- checks if a minimal codebase structure exists at `path` -codebaseExists :: MonadIO m => CodebasePath -> m Bool -codebaseExists root = - and <$> traverse doesDirectoryExist (minimalCodebaseStructure root) - --- | load a branch w/ children from a FileCodebase -branchFromFiles :: MonadIO m => Branch.Cache m -> CodebasePath -> Branch.Hash -> m (Maybe (Branch m)) -branchFromFiles cache rootDir h = time "FileCodebase.Common.branchFromFiles" $ do - fileExists <- doesFileExist (branchPath rootDir h) - if fileExists then Just <$> - Branch.cachedRead - cache - (deserializeRawBranch rootDir) - (deserializeEdits rootDir) - h - else - pure Nothing - where - deserializeRawBranch - :: MonadIO m => CodebasePath -> Causal.Deserialize m Branch.Raw Branch.Raw - deserializeRawBranch root h = do - let ubf = branchPath root h - S.getFromFile' (V1.getCausal0 V1.getRawBranch) ubf >>= \case - Left err -> failWith $ InvalidBranchFile ubf err - Right c0 -> pure c0 - -deserializeEdits :: MonadIO m => CodebasePath -> Branch.EditHash -> m Patch -deserializeEdits root h = - let file = editsPath root h - in S.getFromFile' V1.getEdits file >>= \case - Left err -> failWith $ InvalidEditsFile file err - Right edits -> pure edits - -getRootBranch :: forall m. - MonadIO m => Branch.Cache m -> CodebasePath -> m (Either Codebase.GetRootBranchError (Branch m)) -getRootBranch cache root = time "FileCodebase.Common.getRootBranch" $ - ifM (codebaseExists root) - (listDirectory (branchHeadDir root) >>= filesToBranch) - (pure $ Left Codebase.NoRootBranch) - where - filesToBranch :: [FilePath] -> m (Either Codebase.GetRootBranchError (Branch m)) - filesToBranch = \case - [] -> pure $ Left Codebase.NoRootBranch - [single] -> runExceptT $ fileToBranch single - conflict -> runExceptT (traverse fileToBranch conflict) >>= \case - Right (x : xs) -> Right <$> foldM Branch.merge x xs - Right _ -> error "FileCodebase.getRootBranch.conflict can't be empty." - Left e -> Left <$> pure e - - fileToBranch :: String -> ExceptT Codebase.GetRootBranchError m (Branch m) - fileToBranch single = ExceptT $ case hashFromString single of - Nothing -> pure . Left $ Codebase.CouldntParseRootBranch single - Just (Branch.Hash -> h) -> branchFromFiles cache root h <&> - maybeToEither (Codebase.CouldntLoadRootBranch h) - --- |only syncs branches and edits -- no dependencies -putRootBranch :: MonadIO m => CodebasePath -> Branch m -> m () -putRootBranch root b = do - Branch.sync (hashExists root) - (serializeRawBranch root) - (serializeEdits root) - b - updateCausalHead (branchHeadDir root) (Branch._history b) - -hashExists :: MonadIO m => CodebasePath -> Branch.Hash -> m Bool -hashExists root h = doesFileExist (branchPath root h) - -serializeRawBranch - :: (MonadIO m) => CodebasePath -> Causal.Serialize m Branch.Raw Branch.Raw -serializeRawBranch root h = - S.putWithParentDirs (V1.putRawCausal V1.putRawBranch) (branchPath root h) - -serializeEdits - :: MonadIO m => CodebasePath -> Branch.EditHash -> m Patch -> m () -serializeEdits root h medits = - unlessM (doesFileExist (editsPath root h)) $ do - edits <- medits - S.putWithParentDirs V1.putEdits (editsPath root h) edits - --- `headDir` is like ".unison/branches/head", or ".unison/edits/head"; --- not ".unison"; a little weird. I guess the reason this doesn't take --- the codebase root path is because it's applicable to any causal. --- We just have one though, and I suppose that won't change any time soon. -updateCausalHead :: MonadIO m => FilePath -> Causal n h e -> m () -updateCausalHead headDir c = do - let (RawHash h) = Causal.currentHash c - hs = hashToString h - -- write new head - touchFile (headDir hs) - -- delete existing heads - fmap (filter (/= hs)) (listDirectory headDir) - >>= traverse_ (removeFile . (headDir )) - --- here -hashFromString :: String -> Maybe Hash.Hash -hashFromString = Hash.fromBase32Hex . Text.pack - --- here -hashToString :: Hash.Hash -> String -hashToString = Hash.base32Hexs - -hashFromFilePath :: FilePath -> Maybe Hash.Hash -hashFromFilePath = hashFromString . takeBaseName - --- here -componentIdToString :: Reference.Id -> String -componentIdToString = Text.unpack . Reference.toText . Reference.DerivedId - --- here -componentIdFromString :: String -> Maybe Reference.Id -componentIdFromString = Reference.idFromText . Text.pack - --- here -referentFromString :: String -> Maybe Referent -referentFromString = Referent.fromText . Text.pack - -referentIdFromString :: String -> Maybe Referent.Id -referentIdFromString s = referentFromString s >>= \case - Referent.Ref (Reference.DerivedId r) -> Just $ Referent.Ref' r - Referent.Con (Reference.DerivedId r) i t -> Just $ Referent.Con' r i t - _ -> Nothing - --- here -referentToString :: Referent -> String -referentToString = Text.unpack . Referent.toText - -copyFileWithParents :: MonadIO m => FilePath -> FilePath -> m () -copyFileWithParents src dest = - unlessM (doesFileExist dest) $ do - createDirectoryIfMissing True (takeDirectory dest) - copyFile src dest - --- Use State and Lens to do some specified thing at most once, to create a file. -doFileOnce :: forall m s h. (MonadIO m, MonadState s m, Ord h) - => CodebasePath - -> SimpleLens s (Set h) -- lens to track if `h` is already done - -> (CodebasePath -> h -> FilePath) -- done if this filepath exists - -> (h -> m ()) -- do! - -> h -> m () -doFileOnce destPath l getFilename f h = - unlessM (use (l . to (Set.member h))) $ do - l %= Set.insert h - unlessM (doesFileExist (getFilename destPath h)) (f h) - -getTerm :: (MonadIO m, Ord v) => S.Get v -> S.Get a -> CodebasePath -> Reference.Id -> m (Maybe (Term v a)) -getTerm getV getA path h = S.getFromFile (V1.getTerm getV getA) (termPath path h) - -getTypeOfTerm :: (MonadIO m, Ord v) => S.Get v -> S.Get a -> CodebasePath -> Reference.Id -> m (Maybe (Type v a)) -getTypeOfTerm getV getA path h = S.getFromFile (V1.getType getV getA) (typePath path h) - -putTerm - :: MonadIO m - => Var v - => S.Put v - -> S.Put a - -> CodebasePath - -> Reference.Id - -> Term v a - -> Type v a - -> m () -putTerm putV putA path h e typ = do - let typeForIndexing = Type.removeAllEffectVars typ - rootTypeHash = Type.toReference typeForIndexing - typeMentions = Type.toReferenceMentions typeForIndexing - S.putWithParentDirs (V1.putTerm putV putA) (termPath path h) e - S.putWithParentDirs (V1.putType putV putA) (typePath path h) typ - -- Add the term as a dependent of its dependencies - let r = Referent.Ref (Reference.DerivedId h) - let deps = deleteComponent h $ Term.dependencies e <> Type.dependencies typ - traverse_ (touchIdFile h . dependentsDir path) deps - traverse_ (touchReferentFile r . typeMentionsIndexDir path) typeMentions - touchReferentFile r (typeIndexDir path rootTypeHash) - -getDecl :: (MonadIO m, Ord v) - => S.Get v -> S.Get a -> CodebasePath -> Reference.Id -> m (Maybe (DD.Decl v a)) -getDecl getV getA root h = - S.getFromFile - (V1.getEither - (V1.getEffectDeclaration getV getA) - (V1.getDataDeclaration getV getA)) - (declPath root h) - -putDecl - :: MonadIO m - => Var v - => S.Put v - -> S.Put a - -> CodebasePath - -> Reference.Id - -> DD.Decl v a - -> m () -putDecl putV putA path h decl = do - S.putWithParentDirs - (V1.putEither - (V1.putEffectDeclaration putV putA) - (V1.putDataDeclaration putV putA)) - (declPath path h) - decl - traverse_ (touchIdFile h . dependentsDir path) deps - traverse_ addCtorToTypeIndex ctors - where - deps = deleteComponent h . DD.dependencies $ either DD.toDataDecl id decl - r = Reference.DerivedId h - decl' = either DD.toDataDecl id decl - addCtorToTypeIndex (r, typ) = do - let rootHash = Type.toReference typ - typeMentions = Type.toReferenceMentions typ - touchReferentFile r (typeIndexDir path rootHash) - traverse_ (touchReferentFile r . typeMentionsIndexDir path) typeMentions - ct = DD.constructorType decl - ctors = - [ (Referent.Con r i ct, Type.removeAllEffectVars t) - | (t,i) <- DD.constructorTypes decl' `zip` [0..] ] - -getWatch :: (MonadIO m, Ord v) - => S.Get v - -> S.Get a - -> CodebasePath - -> UF.WatchKind - -> Reference.Id - -> m (Maybe (Term v a)) -getWatch getV getA path k id = do - let wp = watchesDir path (Text.pack k) - createDirectoryIfMissing True wp - S.getFromFile (V1.getTerm getV getA) (wp componentIdToString id <> ".ub") - -putWatch - :: MonadIO m - => Var v - => S.Put v - -> S.Put a - -> CodebasePath - -> UF.WatchKind - -> Reference.Id - -> Term v a - -> m () -putWatch putV putA root k id e = - S.putWithParentDirs - (V1.putTerm putV putA) - (watchPath root k id) - e - -loadReferencesByPrefix - :: MonadIO m => FilePath -> ShortHash -> m (Set Reference.Id) -loadReferencesByPrefix dir sh = do - refs <- mapMaybe Reference.fromShortHash - . filter (SH.isPrefixOf sh) - . mapMaybe SH.fromString - <$> listDirectory dir - pure $ Set.fromList [ i | Reference.DerivedId i <- refs] - -termReferencesByPrefix, typeReferencesByPrefix - :: MonadIO m => CodebasePath -> ShortHash -> m (Set Reference.Id) -termReferencesByPrefix root = loadReferencesByPrefix (termsDir root) -typeReferencesByPrefix root = loadReferencesByPrefix (typesDir root) - --- returns all the derived terms and derived constructors --- that have `sh` as a prefix -termReferentsByPrefix :: MonadIO m - => (CodebasePath -> Reference.Id -> m (Maybe (DD.Decl v a))) - -> CodebasePath - -> ShortHash - -> m (Set Referent.Id) -termReferentsByPrefix _ root sh@SH.Builtin{} = - Set.map Referent.Ref' <$> termReferencesByPrefix root sh - -- builtin types don't provide any referents we could match against, - -- only decl types do. Those get handled in the next case. -termReferentsByPrefix getDecl root sh@SH.ShortHash{} = do - terms <- termReferencesByPrefix root sh - ctors <- do - -- clear out any CID from the SH, so we can use it to find a type decl - types <- typeReferencesByPrefix root sh { SH.cid = Nothing } - foldMapM collectCtors types - pure (Set.map Referent.Ref' terms <> ctors) - where - -- load up the Decl for `ref` to see how many constructors it has, - -- and what constructor type - collectCtors ref = getDecl root ref <&> \case - Nothing -> mempty - Just decl -> - Set.fromList [ con - | i <- [0 .. ctorCount-1] - , let con = Referent.Con' ref i ct - , SH.isPrefixOf sh $ Referent.toShortHashId con] - where ct = either (const CT.Effect) (const CT.Data) decl - ctorCount = length . DD.constructors' $ DD.asDataDecl decl - -branchHashesByPrefix :: MonadIO m => CodebasePath -> ShortBranchHash -> m (Set Branch.Hash) -branchHashesByPrefix codebasePath p = - fmap (Set.fromList . join) . for [branchesDir] $ \f -> do - let dir = f codebasePath - paths <- filter (isPrefixOf . Text.unpack . SBH.toText $ p) <$> listDirectory dir - let refs = paths >>= (toList . filenameToHash) - pure refs - where - filenameToHash :: String -> Maybe Branch.Hash - filenameToHash f = case Text.splitOn "." $ Text.pack f of - [h, "ub"] -> Causal.RawHash <$> Hash.fromBase32Hex h - _ -> Nothing - -failWith :: MonadIO m => Err -> m a -failWith = liftIO . fail . show - --- | A version of listDirectory that returns mempty if the directory doesn't exist -listDirectory :: MonadIO m => FilePath -> m [FilePath] -listDirectory dir = liftIO $ - System.Directory.listDirectory dir `catch` (\(_ :: IOException) -> pure mempty) - --- | delete all the elements of a given reference component from a set -deleteComponent :: Reference.Id -> Set Reference -> Set Reference -deleteComponent r rs = Set.difference rs - (Reference.members . Reference.componentFor . Reference.DerivedId $ r) diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase/SlimCopyRegenerateIndex.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase/SlimCopyRegenerateIndex.hs deleted file mode 100644 index 1fec405f9f..0000000000 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase/SlimCopyRegenerateIndex.hs +++ /dev/null @@ -1,321 +0,0 @@ -{-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE DerivingVia #-} -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE ViewPatterns #-} - - -module Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex (syncToDirectory) where - -import Unison.Prelude - -import qualified Data.Set as Set -import Control.Lens -import Control.Monad.State.Strict ( MonadState, evalStateT ) -import Control.Monad.Writer.Strict ( MonadWriter, execWriterT ) -import qualified Control.Monad.Writer.Strict as Writer -import UnliftIO.Directory ( doesFileExist ) -import Unison.Codebase ( CodebasePath ) -import qualified Unison.Codebase.Causal as Causal -import Unison.Codebase.Branch ( Branch(..) ) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Branch.Dependencies as BD -import qualified Unison.Codebase.Patch as Patch -import qualified Unison.Codebase.Serialization as S -import qualified Unison.Codebase.Serialization.V1 as V1 -import Unison.Codebase.SyncMode ( SyncMode ) -import qualified Unison.Codebase.SyncMode as SyncMode -import qualified Unison.Codebase.TermEdit as TermEdit -import qualified Unison.Codebase.TypeEdit as TypeEdit -import qualified Unison.DataDeclaration as DD -import qualified Unison.LabeledDependency as LD -import Unison.Reference ( Reference ) -import qualified Unison.Reference as Reference -import qualified Unison.Referent as Referent -import qualified Unison.Term as Term -import Unison.Type ( Type ) -import qualified Unison.Type as Type -import Unison.Var ( Var ) -import qualified Unison.UnisonFile as UF -import qualified Unison.Util.Relation as Relation -import Unison.Util.Relation ( Relation ) -import Unison.Util.Monoid (foldMapM) -import Unison.Util.Timing (time) - -import Data.Monoid.Generic -import Unison.Codebase.FileCodebase.Common - -data SyncedEntities = SyncedEntities - { _syncedTerms :: Set Reference.Id - , _syncedDecls :: Set Reference.Id - , _syncedEdits :: Set Branch.EditHash - , _syncedBranches :: Set Branch.Hash - , _dependentsIndex :: Relation Reference Reference.Id - , _typeIndex :: Relation Reference Referent.Id - , _typeMentionsIndex :: Relation Reference Referent.Id - } deriving Generic - deriving Show - deriving Semigroup via GenericSemigroup SyncedEntities - deriving Monoid via GenericMonoid SyncedEntities - -makeLenses ''SyncedEntities - -syncToDirectory :: forall m v a - . MonadIO m - => Var v - => S.Format v - -> S.Format a - -> CodebasePath - -> CodebasePath - -> SyncMode - -> Branch m - -> m () -syncToDirectory fmtV fmtA = syncToDirectory' (S.get fmtV) (S.get fmtA) - -data Error - = MissingBranch Branch.Hash - | MissingPatch Branch.EditHash - | MissingTerm Reference.Id - | MissingTypeOfTerm Reference.Id - | MissingDecl Reference.Id - | InvalidBranch Branch.Hash - | InvalidTerm Reference.Id - | InvalidTypeOfTerm Reference.Id - | InvalidDecl Reference.Id - deriving (Eq, Ord, Show) - -syncToDirectory' :: forall m v a - . MonadIO m - => Var v - => S.Get v - -> S.Get a - -> CodebasePath - -> CodebasePath - -> SyncMode - -> Branch m - -> m () -syncToDirectory' getV getA srcPath destPath mode newRoot = - let warnMissingEntities = False in - flip evalStateT mempty $ do -- MonadState s m - (deps, errors) <- time "Sync Branches" $ execWriterT $ - processBranches [(Branch.headHash newRoot - ,Just . pure . Branch.transform (lift . lift) $ newRoot)] - errors' <- time "Sync Definitions" $ - execWriterT $ processDependencies (BD.to' deps) - time "Write indices" $ do - lift . writeDependentsIndex =<< use dependentsIndex - lift . writeTypeIndex =<< use typeIndex - lift . writeTypeMentionsIndex =<< use typeMentionsIndex - when (warnMissingEntities) $ for_ (errors <> errors') traceShowM - where - writeDependentsIndex :: MonadIO m => Relation Reference Reference.Id -> m () - writeDependentsIndex = writeIndexHelper (\k v -> touchIdFile v (dependentsDir destPath k)) - writeTypeIndex, writeTypeMentionsIndex :: MonadIO m => Relation Reference Referent.Id -> m () - writeTypeIndex = - writeIndexHelper (\k v -> touchReferentIdFile v (typeIndexDir destPath k)) - writeTypeMentionsIndex = - writeIndexHelper (\k v -> touchReferentIdFile v (typeMentionsIndexDir destPath k)) - writeIndexHelper - :: forall m a b. MonadIO m => (a -> b -> m ()) -> Relation a b -> m () - writeIndexHelper touchIndexFile index = - traverse_ (uncurry touchIndexFile) (Relation.toList index) - processBranches :: forall m - . MonadIO m - => MonadState SyncedEntities m - => MonadWriter (BD.Dependencies, Set Error) m - => [(Branch.Hash, Maybe (m (Branch m)))] - -> m () - processBranches [] = pure () - -- for each branch, - processBranches ((h, mmb) : rest) = - let tellError = Writer.tell . (mempty,) . Set.singleton - tellDependencies = Writer.tell . (,mempty) in - -- if hash exists at the destination, skip it, mark it done - ifNeedsSyncing h destPath branchPath syncedBranches - (\h -> - -- else if hash exists at the source, enqueue its dependencies, copy it, mark it done - ifM (doesFileExist (branchPath srcPath h)) - (do - (branches, deps) <- BD.fromRawCausal <$> - (deserializeRawBranchDependencies tellError srcPath h) - copyFileWithParents (branchPath srcPath h) (branchPath destPath h) - tellDependencies deps - processBranches (branches ++ rest)) - -- else if it's in memory, enqueue its dependencies, write it, mark it done - case mmb of - Just mb -> do - b <- mb - let (branches, deps) = BD.fromBranch b - let causalRaw = Branch.toCausalRaw b - serializeRawBranch destPath h causalRaw - tellDependencies deps - processBranches (branches ++ rest) - -- else -- error? - Nothing -> do - tellError (MissingBranch h) - processBranches rest - ) - (processBranches rest) - processDependencies :: forall n - . MonadIO n - => MonadState SyncedEntities n - => MonadWriter (Set Error) n - => BD.Dependencies' - -> n () - processDependencies = \case - -- for each patch - -- enqueue its target term and type references - BD.Dependencies' (editHash : editHashes) terms decls -> - -- This code assumes that patches are always available on disk, - -- not ever just held in memory with `pure`. If that's not the case, - -- then we can do something similar to what we did with branches. - ifNeedsSyncing editHash destPath editsPath syncedEdits - (\h -> do - patch <- deserializeEdits srcPath h - -- I'm calling all the replacement terms dependents of the patches. - -- If we're supposed to replace X with Y, we don't necessarily need X, - -- but we do need Y. - let newTerms, newDecls :: [Reference.Id] - newTerms = [ i | TermEdit.Replace (Reference.DerivedId i) _ <- - toList . Relation.ran $ Patch._termEdits patch] - newDecls = [ i | TypeEdit.Replace (Reference.DerivedId i) <- - toList . Relation.ran $ Patch._typeEdits patch] - ifM (doesFileExist (editsPath srcPath h)) - (do - copyFileWithParents (editsPath srcPath h) (editsPath destPath h) - processDependencies $ - BD.Dependencies' editHashes (newTerms ++ terms) (newDecls ++ decls)) - (do - tellError (MissingPatch h) - (processDependencies $ BD.Dependencies' editHashes terms decls))) - (processDependencies $ BD.Dependencies' editHashes terms decls) - - -- for each term id - BD.Dependencies' [] (termHash : termHashes) decls -> - -- if it exists at the destination, skip it, mark it done - ifNeedsSyncing termHash destPath termPath syncedTerms - (\h -> do - -- else if it exists at the source, - ifM (doesFileExist (termPath srcPath h)) - (do - -- copy it, - -- load it, - -- enqueue its dependencies for syncing - -- enqueue its type's type dependencies for syncing - -- enqueue its type's dependencies, type & type mentions into respective indices - -- and continue - (newTerms, newDecls) <- enqueueTermDependencies h - processDependencies $ - BD.Dependencies' [] (newTerms ++ termHashes) (newDecls ++ decls) - ) - -- else -- an error? - (do - tellError (MissingTerm h) - (processDependencies $ BD.Dependencies' [] termHashes decls))) - (processDependencies $ BD.Dependencies' [] termHashes decls) - -- for each decl id - BD.Dependencies' [] [] (declHash : declHashes) -> - -- if it exists at the destination, skip it, mark it done - ifNeedsSyncing declHash destPath declPath syncedDecls - (\h -> do - -- else if it exists at the source, - ifM (doesFileExist (declPath srcPath h)) - -- copy it, - -- load it, - -- enqueue its type dependencies for syncing - -- for each constructor, - -- enqueue its dependencies, type, type mentions into respective indices - (do - newDecls <- copyAndIndexDecls h - processDependencies $ BD.Dependencies' [] [] (newDecls ++ declHashes)) - (do - tellError (MissingDecl h) - (processDependencies $ BD.Dependencies' [] [] declHashes))) - (processDependencies $ BD.Dependencies' [] [] declHashes) - BD.Dependencies' [] [] [] -> pure () - copyAndIndexDecls :: forall m - . MonadIO m - => MonadState SyncedEntities m - => MonadWriter (Set Error) m - => Reference.Id - -> m [Reference.Id] - copyAndIndexDecls h = (getDecl getV getA srcPath h :: m (Maybe (DD.Decl v a))) >>= \case - Just decl -> do - copyFileWithParents (declPath srcPath h) (declPath destPath h) - let referentTypes :: [(Referent.Id, Type v a)] - referentTypes = DD.declConstructorReferents h decl - `zip` (DD.constructorTypes . DD.asDataDecl) decl - flip foldMapM referentTypes \(r, typ) -> do - let dependencies = toList (Type.dependencies typ) - dependentsIndex <>= Relation.fromManyDom dependencies h - let typeForIndexing = Type.removeAllEffectVars typ - let typeReference = Type.toReference typeForIndexing - let typeMentions = Type.toReferenceMentions typeForIndexing - typeIndex <>= Relation.singleton typeReference r - typeMentionsIndex <>= Relation.fromManyDom typeMentions r - pure [ i | Reference.DerivedId i <- dependencies ] - Nothing -> tellError (InvalidDecl h) $> mempty - - enqueueTermDependencies :: forall m - . MonadIO m - => MonadState SyncedEntities m - => MonadWriter (Set Error) m - => Reference.Id - -> m ([Reference.Id], [Reference.Id]) - enqueueTermDependencies h = getTerm getV getA srcPath h >>= \case - Just term -> do - let (typeDeps, termDeps) = partitionEithers . fmap LD.toReference . toList - $ Term.labeledDependencies term - ifM (doesFileExist (typePath srcPath h)) - (getTypeOfTerm getV getA srcPath h >>= \case - Just typ -> do - copyFileWithParents (termPath srcPath h) (termPath destPath h) - copyFileWithParents (typePath srcPath h) (typePath destPath h) - whenM (doesFileExist $ watchPath srcPath UF.TestWatch h) $ - copyFileWithParents (watchPath srcPath UF.TestWatch h) - (watchPath destPath UF.TestWatch h) - let typeDeps' = toList (Type.dependencies typ) - let typeForIndexing = Type.removeAllEffectVars typ - let typeReference = Type.toReference typeForIndexing - let typeMentions = Type.toReferenceMentions typeForIndexing - dependentsIndex <>= - Relation.fromManyDom (typeDeps ++ typeDeps' ++ termDeps) h - typeIndex <>= - Relation.singleton typeReference (Referent.Ref' h) - typeMentionsIndex <>= - Relation.fromManyDom typeMentions (Referent.Ref' h) - let newDecls = [ i | Reference.DerivedId i <- typeDeps ++ typeDeps'] - let newTerms = [ i | Reference.DerivedId i <- termDeps ] - pure (newTerms, newDecls) - Nothing -> tellError (InvalidTypeOfTerm h) $> mempty) - (tellError (MissingTypeOfTerm h) $> mempty) - Nothing -> tellError (InvalidTerm h) $> mempty - - deserializeRawBranchDependencies :: forall m - . MonadIO m - => (Error -> m ()) - -> CodebasePath - -> Causal.Deserialize m Branch.Raw (BD.Branches m, BD.Dependencies) - deserializeRawBranchDependencies tellError root h = - S.getFromFile (V1.getCausal0 V1.getBranchDependencies) (branchPath root h) >>= \case - Nothing -> tellError (InvalidBranch h) $> Causal.RawOne mempty - Just results -> pure results - tellError :: forall m a. MonadWriter (Set a) m => a -> m () - tellError = Writer.tell . Set.singleton - - -- Use State and Lens to do some specified thing at most once, to create a file. - ifNeedsSyncing :: forall m s h. (MonadIO m, MonadState s m, Ord h) - => h - -> CodebasePath - -> (CodebasePath -> h -> FilePath) -- done if this filepath exists - -> SimpleLens s (Set h) -- lens to track if `h` is already done - -> (h -> m ()) -- do! - -> m () -- don't - -> m () - ifNeedsSyncing h destPath getFilename l doSync dontSync = - ifM (use (l . to (Set.member h))) dontSync $ do - l %= Set.insert h - if mode == SyncMode.Complete then doSync h - else ifM (doesFileExist (getFilename destPath h)) dontSync (doSync h) diff --git a/parser-typechecker/src/Unison/Codebase/GitError.hs b/parser-typechecker/src/Unison/Codebase/GitError.hs deleted file mode 100644 index 082a13b188..0000000000 --- a/parser-typechecker/src/Unison/Codebase/GitError.hs +++ /dev/null @@ -1,23 +0,0 @@ -module Unison.Codebase.GitError where - -import Unison.Prelude - -import Unison.Codebase (CodebasePath) -import Unison.Codebase.ShortBranchHash (ShortBranchHash) -import qualified Unison.Codebase.Branch as Branch -import Unison.Codebase.Editor.RemoteRepo (RemoteRepo) - -data GitError = NoGit - | UnrecognizableCacheDir Text CodebasePath - | UnrecognizableCheckoutDir Text CodebasePath - | CloneException RemoteRepo String - | PushException RemoteRepo String - | PushNoOp RemoteRepo - -- url commit Diff of what would change on merge with remote - | PushDestinationHasNewStuff RemoteRepo - | NoRemoteNamespaceWithHash RemoteRepo ShortBranchHash - | RemoteNamespaceHashAmbiguous RemoteRepo ShortBranchHash (Set Branch.Hash) - | CouldntLoadRootBranch RemoteRepo Branch.Hash - | CouldntParseRootBranch RemoteRepo String - | SomeOtherError String - deriving Show diff --git a/parser-typechecker/src/Unison/Codebase/MainTerm.hs b/parser-typechecker/src/Unison/Codebase/MainTerm.hs deleted file mode 100644 index ca9dc83ce5..0000000000 --- a/parser-typechecker/src/Unison/Codebase/MainTerm.hs +++ /dev/null @@ -1,72 +0,0 @@ -{-# LANGUAGE GADTs #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE PartialTypeSignatures #-} - --- | Find a computation of type '{IO} () in the codebase. -module Unison.Codebase.MainTerm where - -import Unison.Prelude - -import Unison.Parser ( Ann ) -import qualified Unison.Parser as Parser -import qualified Unison.Term as Term -import Unison.Term ( Term ) -import Unison.Var ( Var ) -import qualified Unison.Builtin.Decls as DD -import qualified Unison.HashQualified as HQ -import qualified Unison.Referent as Referent -import qualified Unison.Names3 as Names3 -import Unison.Reference ( Reference ) -import qualified Unison.Type as Type -import Unison.Type ( Type ) -import qualified Unison.Typechecker as Typechecker -import Unison.Runtime.IOSource ( ioReference ) - -data MainTerm v - = NotAFunctionName String - | NotFound String - | BadType String - | Success HQ.HashQualified (Term v Ann) (Type v Ann) - -getMainTerm - :: (Monad m, Var v) - => (Reference -> m (Maybe (Type v Ann))) - -> Names3.Names0 - -> String - -> Type.Type v Ann - -> m (MainTerm v) -getMainTerm loadTypeOfTerm parseNames0 mainName mainType = - case HQ.fromString mainName of - Nothing -> pure (NotAFunctionName mainName) - Just hq -> do - let refs = Names3.lookupHQTerm hq (Names3.Names parseNames0 mempty) - let a = Parser.External - case toList refs of - [Referent.Ref ref] -> do - typ <- loadTypeOfTerm ref - case typ of - Just typ | Typechecker.isSubtype typ mainType -> do - let tm = DD.forceTerm a a (Term.ref a ref) - return (Success hq tm typ) - _ -> pure (BadType mainName) - _ -> pure (NotFound mainName) - --- {IO} () -ioUnit :: Ord v => a -> Type.Type v a -ioUnit a = Type.effect a [Type.ref a ioReference] (Type.ref a DD.unitRef) - -builtinIOUnit :: Ord v => a -> Type.Type v a -builtinIOUnit a - = Type.effect1 a (Type.builtinIO a) (Type.ref a DD.unitRef) - --- '{IO} () -nullaryMain :: Ord v => a -> Type.Type v a -nullaryMain a - = Type.arrow a (Type.ref a DD.unitRef) (ioUnit a) - -builtinMain :: Ord v => a -> Type.Type v a -builtinMain a - = Type.arrow a (Type.ref a DD.unitRef) (builtinIOUnit a) - -mainTypes :: Ord v => a -> [Type v a] -mainTypes a = [nullaryMain a] diff --git a/parser-typechecker/src/Unison/Codebase/Metadata.hs b/parser-typechecker/src/Unison/Codebase/Metadata.hs deleted file mode 100644 index 1df8b070d2..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Metadata.hs +++ /dev/null @@ -1,72 +0,0 @@ -module Unison.Codebase.Metadata where - -import Unison.Prelude - -import Unison.Reference (Reference) -import Unison.Util.Star3 (Star3) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Unison.Util.Star3 as Star3 -import Unison.Util.Relation (Relation) -import qualified Unison.Util.Relation as R -import Unison.Util.Relation4 (Relation4) -import qualified Unison.Util.Relation4 as R4 -import qualified Unison.Util.List as List - -type Type = Reference -type Value = Reference - --- keys can be terms or types -type Metadata = Map Type (Set Value) - --- `a` is generally the type of references or hashes --- `n` is generally the the type of name associated with the references --- `Type` is the type of metadata. Duplicate info to speed up certain queries. --- `(Type, Value)` is the metadata value itself along with its type. -type Star a n = Star3 a n Type (Type, Value) -type R4 a n = R4.Relation4 a n Type Value - -starToR4 :: (Ord r, Ord n) => Star r n -> Relation4 r n Type Value -starToR4 = R4.fromList . fmap (\(r,n,_,(t,v)) -> (r,n,t,v)) . Star3.toList - -hasMetadata :: Ord a => a -> Type -> Value -> Star a n -> Bool -hasMetadata a t v = Set.member (t, v) . R.lookupDom a . Star3.d3 - -inserts :: (Ord a, Ord n) => [(a, Type, Value)] -> Star a n -> Star a n -inserts tups s = foldl' (flip insert) s tups - -insertWithMetadata - :: (Ord a, Ord n) => (a, Metadata) -> Star a n -> Star a n -insertWithMetadata (a, md) = - inserts [ (a, ty, v) | (ty, vs) <- Map.toList md, v <- toList vs ] - -insert :: (Ord a, Ord n) => (a, Type, Value) -> Star a n -> Star a n -insert (a, ty, v) = Star3.insertD23 (a, ty, (ty,v)) - -delete :: (Ord a, Ord n) => (a, Type, Value) -> Star a n -> Star a n -delete (a, ty, v) s = let - s' = Star3.deleteD3 (a, (ty,v)) s - -- if (ty,v) is the last metadata of type ty - -- we also delete (a, ty) from the d2 index - metadataByType = List.multimap (toList (R.lookupDom a (Star3.d3 s))) - in - case Map.lookup ty metadataByType of - Just vs | all (== v) vs -> Star3.deleteD2 (a, ty) s' - _ -> s' - --- parallel composition - commutative and associative -merge :: Metadata -> Metadata -> Metadata -merge = Map.unionWith (<>) - --- sequential composition, right-biased -append :: Metadata -> Metadata -> Metadata -append = Map.unionWith (flip const) - -empty :: Metadata -empty = mempty - -singleton :: Type -> Value -> Metadata -singleton ty v = Map.singleton ty (Set.singleton v) - -toRelation :: Star3 a n x y -> Relation a n -toRelation = Star3.d1 diff --git a/parser-typechecker/src/Unison/Codebase/NameEdit.hs b/parser-typechecker/src/Unison/Codebase/NameEdit.hs deleted file mode 100644 index 3a872e1b0a..0000000000 --- a/parser-typechecker/src/Unison/Codebase/NameEdit.hs +++ /dev/null @@ -1,15 +0,0 @@ -module Unison.Codebase.NameEdit where - -import Unison.Prelude - -import Unison.Reference (Reference) -import Unison.Hashable (Hashable, tokens) - -data NameEdit = - NameEdit { added :: Set Reference, removed :: Set Reference } - -instance Semigroup NameEdit where - NameEdit add1 del1 <> NameEdit add2 del2 = NameEdit (add1 <> add2) (del1 <> del2) - -instance Hashable NameEdit where - tokens (NameEdit added removed) = tokens (toList added, toList removed) diff --git a/parser-typechecker/src/Unison/Codebase/Patch.hs b/parser-typechecker/src/Unison/Codebase/Patch.hs deleted file mode 100644 index a5cbdd5902..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Patch.hs +++ /dev/null @@ -1,136 +0,0 @@ -{-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE RecordWildCards #-} - -module Unison.Codebase.Patch where - -import Unison.Prelude hiding (empty) - -import Prelude hiding (head,read,subtract) - -import Control.Lens hiding ( children, cons, transform ) -import qualified Data.Set as Set -import Unison.Codebase.TermEdit ( TermEdit, Typing(Same) ) -import qualified Unison.Codebase.TermEdit as TermEdit -import Unison.Codebase.TypeEdit ( TypeEdit ) -import qualified Unison.Codebase.TypeEdit as TypeEdit -import Unison.Hashable ( Hashable ) -import qualified Unison.Hashable as H -import Unison.Reference ( Reference ) -import qualified Unison.Util.Relation as R -import Unison.Util.Relation ( Relation ) -import qualified Unison.LabeledDependency as LD -import Unison.LabeledDependency ( LabeledDependency ) - -data Patch = Patch - { _termEdits :: Relation Reference TermEdit - , _typeEdits :: Relation Reference TypeEdit - } deriving (Eq, Ord, Show) - -data PatchDiff = PatchDiff - { _addedTermEdits :: Relation Reference TermEdit - , _addedTypeEdits :: Relation Reference TypeEdit - , _removedTermEdits :: Relation Reference TermEdit - , _removedTypeEdits :: Relation Reference TypeEdit - } deriving (Eq, Ord, Show) - -makeLenses ''Patch -makeLenses ''PatchDiff - -diff :: Patch -> Patch -> PatchDiff -diff new old = PatchDiff - { _addedTermEdits = R.difference (view termEdits new) (view termEdits old) - , _addedTypeEdits = R.difference (view typeEdits new) (view typeEdits old) - , _removedTypeEdits = R.difference (view typeEdits old) (view typeEdits new) - , _removedTermEdits = R.difference (view termEdits old) (view termEdits new) - } - -labeledDependencies :: Patch -> Set LabeledDependency -labeledDependencies Patch {..} = - Set.map LD.termRef (R.dom _termEdits) - <> Set.fromList - (fmap LD.termRef $ TermEdit.references =<< toList (R.ran _termEdits)) - <> Set.map LD.typeRef (R.dom _typeEdits) - <> Set.fromList - (fmap LD.typeRef $ TypeEdit.references =<< toList (R.ran _typeEdits)) - -empty :: Patch -empty = Patch mempty mempty - -isEmpty :: Patch -> Bool -isEmpty p = p == empty - -allReferences :: Patch -> Set Reference -allReferences p = typeReferences p <> termReferences p where - typeReferences p = Set.fromList - [ r | (old, TypeEdit.Replace new) <- R.toList (_typeEdits p) - , r <- [old, new] ] - termReferences p = Set.fromList - [ r | (old, TermEdit.Replace new _) <- R.toList (_termEdits p) - , r <- [old, new] ] - --- | Returns the set of references which are the target of an arrow in the patch -allReferenceTargets :: Patch -> Set Reference -allReferenceTargets p = typeReferences p <> termReferences p where - typeReferences p = Set.fromList - [ new | (_, TypeEdit.Replace new) <- R.toList (_typeEdits p) ] - termReferences p = Set.fromList - [ new | (_, TermEdit.Replace new _) <- R.toList (_termEdits p) ] - -updateTerm :: (Reference -> Reference -> Typing) - -> Reference -> TermEdit -> Patch -> Patch -updateTerm typing r edit p = - -- get D ~= lookupRan r - -- for each d ∈ D, remove (d, r) and add (d, r') - -- add (r, r') and remove (r', r') - let deleteCycle = case edit of - TermEdit.Deprecate -> id - TermEdit.Replace r' _ -> R.delete r' (TermEdit.Replace r' Same) - edits' :: Relation Reference TermEdit - edits' = deleteCycle . R.insert r edit . R.map f $ _termEdits p - f (x, TermEdit.Replace y _) | y == r = case edit of - TermEdit.Replace r' _ -> (x, TermEdit.Replace r' (typing x r')) - TermEdit.Deprecate -> (x, TermEdit.Deprecate) - f p = p - in p { _termEdits = edits' } - -updateType :: Reference -> TypeEdit -> Patch -> Patch -updateType r edit p = - let deleteCycle = case edit of - TypeEdit.Deprecate -> id - TypeEdit.Replace r' -> R.delete r' (TypeEdit.Replace r') - edits' :: Relation Reference TypeEdit - edits' = deleteCycle . R.insert r edit . R.map f $ _typeEdits p - f (x, TypeEdit.Replace y) | y == r = case edit of - TypeEdit.Replace r' -> (x, TypeEdit.Replace r') - TypeEdit.Deprecate -> (x, TypeEdit.Deprecate) - f p = p - in p { _typeEdits = edits' } - -conflicts :: Patch -> Patch -conflicts Patch{..} = - Patch (R.filterManyDom _termEdits) (R.filterManyDom _typeEdits) - -instance Semigroup Patch where - a <> b = Patch (_termEdits a <> _termEdits b) - (_typeEdits a <> _typeEdits b) - -instance Monoid Patch where - mappend = (<>) - mempty = Patch mempty mempty - -instance Hashable Patch where - tokens e = [ H.Hashed (H.accumulate (H.tokens (_termEdits e))), - H.Hashed (H.accumulate (H.tokens (_typeEdits e))) ] - -instance Semigroup PatchDiff where - a <> b = PatchDiff - { _addedTermEdits = _addedTermEdits a <> _addedTermEdits b - , _addedTypeEdits = _addedTypeEdits a <> _addedTypeEdits b - , _removedTermEdits = _removedTermEdits a <> _removedTermEdits b - , _removedTypeEdits = _removedTypeEdits a <> _removedTypeEdits b - } - -instance Monoid PatchDiff where - mappend = (<>) - mempty = PatchDiff mempty mempty mempty mempty diff --git a/parser-typechecker/src/Unison/Codebase/Path.hs b/parser-typechecker/src/Unison/Codebase/Path.hs deleted file mode 100644 index ca0df22ea6..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Path.hs +++ /dev/null @@ -1,440 +0,0 @@ -{-# LANGUAGE OverloadedLists #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Codebase.Path where - -import Unison.Prelude hiding (empty, toList) - -import Data.Bifunctor ( first ) -import Data.List.Extra ( stripPrefix, dropPrefix ) -import Control.Lens hiding (unsnoc, cons, snoc) -import qualified Control.Lens as Lens -import qualified Data.Foldable as Foldable -import qualified Data.Text as Text -import Data.Sequence (Seq((:<|),(:|>) )) -import qualified Data.Sequence as Seq -import Unison.Name ( Name ) -import qualified Unison.Name as Name -import Unison.Util.Monoid (intercalateMap) -import qualified Unison.Lexer as Lexer -import qualified Unison.HashQualified' as HQ' -import qualified Unison.ShortHash as SH - -import Unison.NameSegment ( NameSegment(NameSegment)) -import qualified Unison.NameSegment as NameSegment - --- `Foo.Bar.baz` becomes ["Foo", "Bar", "baz"] -newtype Path = Path { toSeq :: Seq NameSegment } deriving (Eq, Ord) - -newtype Absolute = Absolute { unabsolute :: Path } deriving (Eq,Ord) -newtype Relative = Relative { unrelative :: Path } deriving (Eq,Ord) -newtype Path' = Path' { unPath' :: Either Absolute Relative } - deriving (Eq,Ord) - -isCurrentPath :: Path' -> Bool -isCurrentPath p = p == currentPath - -currentPath :: Path' -currentPath = Path' (Right (Relative (Path mempty))) - -isRoot' :: Path' -> Bool -isRoot' = either isRoot (const False) . unPath' - -isRoot :: Absolute -> Bool -isRoot = Seq.null . toSeq . unabsolute - -absoluteToPath' :: Absolute -> Path' -absoluteToPath' abs = Path' (Left abs) - -instance Show Path' where - show (Path' (Left abs)) = show abs - show (Path' (Right rel)) = show rel - -instance Show Absolute where - show s = "." ++ show (unabsolute s) - -instance Show Relative where - show = show . unrelative - -unsplit' :: Split' -> Path' -unsplit' (Path' (Left (Absolute p)), seg) = Path' (Left (Absolute (unsplit (p, seg)))) -unsplit' (Path' (Right (Relative p)), seg) = Path' (Right (Relative (unsplit (p, seg)))) - -unsplit :: Split -> Path -unsplit (Path p, a) = Path (p :|> a) - -unsplitHQ :: HQSplit -> HQ'.HashQualified' Path -unsplitHQ (p, a) = fmap (snoc p) a - -unsplitHQ' :: HQSplit' -> HQ'.HashQualified' Path' -unsplitHQ' (p, a) = fmap (snoc' p) a - -type Split = (Path, NameSegment) -type HQSplit = (Path, HQ'.HQSegment) - -type Split' = (Path', NameSegment) -type HQSplit' = (Path', HQ'.HQSegment) - -type SplitAbsolute = (Absolute, NameSegment) -type HQSplitAbsolute = (Absolute, HQ'.HQSegment) - --- examples: --- unprefix .foo.bar .blah == .blah (absolute paths left alone) --- unprefix .foo.bar id == id (relative paths starting w/ nonmatching prefix left alone) --- unprefix .foo.bar foo.bar.baz == baz (relative paths w/ common prefix get stripped) -unprefix :: Absolute -> Path' -> Path -unprefix (Absolute prefix) (Path' p) = case p of - Left abs -> unabsolute abs - Right (unrelative -> rel) -> fromList $ dropPrefix (toList prefix) (toList rel) - --- too many types -prefix :: Absolute -> Path' -> Path -prefix (Absolute (Path prefix)) (Path' p) = case p of - Left (unabsolute -> abs) -> abs - Right (unrelative -> rel) -> Path $ prefix <> toSeq rel - --- .libs.blah.poo is Absolute --- libs.blah.poo is Relative --- Left is some parse error tbd -parsePath' :: String -> Either String Path' -parsePath' p = case parsePathImpl' p of - Left e -> Left e - Right (p, "") -> Right p - Right (p, rem) -> - case (first show . (Lexer.wordyId0 <> Lexer.symbolyId0) <> unit') rem of - Right (seg, "") -> Right (unsplit' (p, NameSegment . Text.pack $ seg)) - Right (_, rem) -> - Left ("extra characters after " <> show p <> ": " <> show rem) - Left e -> Left e - --- implementation detail of parsePath' and parseSplit' --- foo.bar.baz.34 becomes `Right (foo.bar.baz, "34") --- foo.bar.baz becomes `Right (foo.bar, "baz") --- baz becomes `Right (, "baz") --- foo.bar.baz#a8fj becomes `Left`; we don't hash-qualify paths. --- TODO: Get rid of this thing. -parsePathImpl' :: String -> Either String (Path', String) -parsePathImpl' p = case p of - "." -> Right (Path' . Left $ absoluteEmpty, "") - '.' : p -> over _1 (Path' . Left . Absolute . fromList) <$> segs p - p -> over _1 (Path' . Right . Relative . fromList) <$> segs p - where - go f p = case f p of - Right (a, "") -> case Lens.unsnoc (Name.segments' $ Text.pack a) of - Nothing -> Left "empty path" - Just (segs, last) -> Right (NameSegment <$> segs, Text.unpack last) - Right (segs, '.' : rem) -> - let segs' = Name.segments' (Text.pack segs) - in Right (NameSegment <$> segs', rem) - Right (segs, rem) -> - Left $ "extra characters after " <> segs <> ": " <> show rem - Left e -> Left e - segs p = go (first show . (Lexer.symbolyId <> Lexer.wordyId) <> unit') p - -wordyNameSegment, definitionNameSegment :: String -> Either String NameSegment -wordyNameSegment s = case Lexer.wordyId0 s of - Left e -> Left (show e) - Right (a, "") -> Right (NameSegment (Text.pack a)) - Right (a, rem) -> - Left $ "trailing characters after " <> show a <> ": " <> show rem - -optionalWordyNameSegment :: String -> Either String NameSegment -optionalWordyNameSegment "" = Right $ NameSegment "" -optionalWordyNameSegment s = wordyNameSegment s - --- Parse a name segment like "()" -unit' :: String -> Either String (String, String) -unit' s = case stripPrefix "()" s of - Nothing -> Left $ "Expected () but found: " <> s - Just rem -> Right ("()", rem) - -unit :: String -> Either String NameSegment -unit s = case unit' s of - Right (_, "" ) -> Right $ NameSegment "()" - Right (_, rem) -> Left $ "trailing characters after (): " <> show rem - Left _ -> Left $ "I don't know how to parse " <> s - - -definitionNameSegment s = wordyNameSegment s <> symbolyNameSegment s <> unit s - where - symbolyNameSegment s = case Lexer.symbolyId0 s of - Left e -> Left (show e) - Right (a, "") -> Right (NameSegment (Text.pack a)) - Right (a, rem) -> - Left $ "trailing characters after " <> show a <> ": " <> show rem - --- parseSplit' wordyNameSegment "foo.bar.baz" returns Right (foo.bar, baz) --- parseSplit' wordyNameSegment "foo.bar.+" returns Left err --- parseSplit' definitionNameSegment "foo.bar.+" returns Right (foo.bar, +) -parseSplit' :: (String -> Either String NameSegment) - -> String - -> Either String Split' -parseSplit' lastSegment p = do - (p', rem) <- parsePathImpl' p - seg <- lastSegment rem - pure (p', seg) - -parseShortHashOrHQSplit' :: String -> Either String (Either SH.ShortHash HQSplit') -parseShortHashOrHQSplit' s = - case Text.breakOn "#" $ Text.pack s of - ("","") -> error $ "encountered empty string parsing '" <> s <> "'" - (n,"") -> do - (p, rem) <- parsePathImpl' (Text.unpack n) - seg <- definitionNameSegment rem - pure $ Right (p, HQ'.NameOnly seg) - ("", sh) -> do - sh <- maybeToRight (shError s) . SH.fromText $ sh - pure $ Left sh - (n, sh) -> do - (p, rem) <- parsePathImpl' (Text.unpack n) - seg <- definitionNameSegment rem - hq <- maybeToRight (shError s) . - fmap (\sh -> (p, HQ'.HashQualified seg sh)) . - SH.fromText $ sh - pure $ Right hq - where - shError s = "couldn't parse shorthash from " <> s - -parseHQSplit :: String -> Either String HQSplit -parseHQSplit s = case parseHQSplit' s of - Right (Path' (Right (Relative p)), hqseg) -> Right (p, hqseg) - Right (Path' Left{}, _) -> - Left $ "Sorry, you can't use an absolute name like " <> s <> " here." - Left e -> Left e - -parseHQSplit' :: String -> Either String HQSplit' -parseHQSplit' s = case Text.breakOn "#" $ Text.pack s of - ("", "") -> error $ "encountered empty string parsing '" <> s <> "'" - ("", _ ) -> Left "Sorry, you can't use a hash-only reference here." - (n , "") -> do - (p, rem) <- parsePath n - seg <- definitionNameSegment rem - pure (p, HQ'.NameOnly seg) - (n, sh) -> do - (p, rem) <- parsePath n - seg <- definitionNameSegment rem - maybeToRight (shError s) - . fmap (\sh -> (p, HQ'.HashQualified seg sh)) - . SH.fromText - $ sh - where - shError s = "couldn't parse shorthash from " <> s - parsePath n = do - x <- parsePathImpl' $ Text.unpack n - pure $ case x of - (Path' (Left e), "") | e == absoluteEmpty -> (relativeEmpty', ".") - x -> x - -toAbsoluteSplit :: Absolute -> (Path', a) -> (Absolute, a) -toAbsoluteSplit a (p, s) = (resolve a p, s) - -fromSplit' :: (Path', a) -> (Path, a) -fromSplit' (Path' (Left (Absolute p)), a) = (p, a) -fromSplit' (Path' (Right (Relative p)), a) = (p, a) - -fromAbsoluteSplit :: (Absolute, a) -> (Path, a) -fromAbsoluteSplit (Absolute p, a) = (p, a) - -absoluteEmpty :: Absolute -absoluteEmpty = Absolute empty - -relativeEmpty' :: Path' -relativeEmpty' = Path' (Right (Relative empty)) - -relativeSingleton :: NameSegment -> Relative -relativeSingleton = Relative . Path . Seq.singleton - -toPath' :: Path -> Path' -toPath' = \case - Path (NameSegment "" :<| tail) -> Path' . Left . Absolute . Path $ tail - p -> Path' . Right . Relative $ p - -toList :: Path -> [NameSegment] -toList = Foldable.toList . toSeq - -fromList :: [NameSegment] -> Path -fromList = Path . Seq.fromList - -ancestors :: Absolute -> Seq Absolute -ancestors (Absolute (Path segments)) = Absolute . Path <$> Seq.inits segments - -hqSplitFromName' :: Name -> Maybe HQSplit' -hqSplitFromName' = fmap (fmap HQ'.fromName) . Lens.unsnoc . fromName' - -splitFromName :: Name -> Maybe Split -splitFromName = unsnoc . fromName - -unprefixName :: Absolute -> Name -> Name -unprefixName prefix = toName . unprefix prefix . fromName' - -prefixName :: Absolute -> Name -> Name -prefixName p = toName . prefix p . fromName' - -singleton :: NameSegment -> Path -singleton n = fromList [n] - -cons :: NameSegment -> Path -> Path -cons = Lens.cons - -snoc :: Path -> NameSegment -> Path -snoc = Lens.snoc - -snoc' :: Path' -> NameSegment -> Path' -snoc' = Lens.snoc - -unsnoc :: Path -> Maybe (Path, NameSegment) -unsnoc = Lens.unsnoc - -uncons :: Path -> Maybe (NameSegment, Path) -uncons = Lens.uncons - ---asDirectory :: Path -> Text ---asDirectory p = case toList p of --- NameSegment "_root_" : (Seq.fromList -> tail) -> --- "/" <> asDirectory (Path tail) --- other -> Text.intercalate "/" . fmap NameSegment.toText $ other - --- > Path.fromName . Name.unsafeFromText $ ".Foo.bar" --- /Foo/bar --- Int./ -> "Int"/"/" --- pkg/Int.. -> "pkg"/"Int"/"." --- Int./foo -> error because "/foo" is not a valid NameSegment --- and "Int." is not a valid NameSegment --- and "Int" / "" / "foo" is not a valid path (internal "") --- todo: fromName needs to be a little more complicated if we want to allow --- identifiers called Function.(.) -fromName :: Name -> Path -fromName = fromList . Name.segments - -fromName' :: Name -> Path' -fromName' n = case take 1 (Name.toString n) of - "." -> Path' . Left . Absolute $ Path seq - _ -> Path' . Right $ Relative path - where - path = fromName n - seq = toSeq path - -toName :: Path -> Name -toName = Name.unsafeFromText . toText - --- | Convert a Path' to a Name -toName' :: Path' -> Name -toName' = Name.unsafeFromText . toText' - --- Returns the nearest common ancestor, along with the --- two inputs relativized to that ancestor. -relativeToAncestor :: Path -> Path -> (Path, Path, Path) -relativeToAncestor (Path a) (Path b) = case (a, b) of - (ha :<| ta, hb :<| tb) | ha == hb -> - let (ancestor, relA, relB) = relativeToAncestor (Path ta) (Path tb) - in (ha `cons` ancestor, relA, relB) - -- nothing in common - _ -> (empty, Path a, Path b) - -pattern Parent h t = Path (NameSegment h :<| t) -pattern Empty = Path Seq.Empty - -empty :: Path -empty = Path mempty - -instance Show Path where - show = Text.unpack . toText - -toText :: Path -> Text -toText (Path nss) = intercalateMap "." NameSegment.toText nss - -fromText :: Text -> Path -fromText = \case - "" -> empty - t -> fromList $ NameSegment <$> Name.segments' t - -toText' :: Path' -> Text -toText' = \case - Path' (Left (Absolute path)) -> Text.cons '.' (toText path) - Path' (Right (Relative path)) -> toText path - -instance Cons Path Path NameSegment NameSegment where - _Cons = prism (uncurry cons) uncons where - cons :: NameSegment -> Path -> Path - cons ns (Path p) = Path (ns :<| p) - uncons :: Path -> Either Path (NameSegment, Path) - uncons p = case p of - Path (hd :<| tl) -> Right (hd, Path tl) - _ -> Left p - -instance Snoc Relative Relative NameSegment NameSegment where - _Snoc = prism (uncurry snocRelative) $ \case - Relative (Lens.unsnoc -> Just (s,a)) -> Right (Relative s,a) - e -> Left e - where - snocRelative :: Relative -> NameSegment -> Relative - snocRelative r n = Relative . (`Lens.snoc` n) $ unrelative r - -instance Snoc Absolute Absolute NameSegment NameSegment where - _Snoc = prism (uncurry snocAbsolute) $ \case - Absolute (Lens.unsnoc -> Just (s,a)) -> Right (Absolute s, a) - e -> Left e - where - snocAbsolute :: Absolute -> NameSegment -> Absolute - snocAbsolute a n = Absolute . (`Lens.snoc` n) $ unabsolute a - -instance Snoc Path Path NameSegment NameSegment where - _Snoc = prism (uncurry snoc) unsnoc - where - unsnoc :: Path -> Either Path (Path, NameSegment) - unsnoc = \case - Path (s Seq.:|> a) -> Right (Path s, a) - e -> Left e - snoc :: Path -> NameSegment -> Path - snoc (Path p) ns = Path (p <> pure ns) - -instance Snoc Path' Path' NameSegment NameSegment where - _Snoc = prism (uncurry snoc') $ \case - Path' (Left (Lens.unsnoc -> Just (s,a))) -> Right (Path' (Left s), a) - Path' (Right (Lens.unsnoc -> Just (s,a))) -> Right (Path' (Right s), a) - e -> Left e - where - snoc' :: Path' -> NameSegment -> Path' - snoc' (Path' e) n = case e of - Left abs -> Path' (Left . Absolute $ Lens.snoc (unabsolute abs) n) - Right rel -> Path' (Right . Relative $ Lens.snoc (unrelative rel) n) - -instance Snoc Split' Split' NameSegment NameSegment where - _Snoc = prism (uncurry snoc') $ \case -- unsnoc - (Lens.unsnoc -> Just (s, a), ns) -> Right ((s, a), ns) - e -> Left e - where - snoc' :: Split' -> NameSegment -> Split' - snoc' (p, a) n = (Lens.snoc p a, n) - -class Resolve l r o where - resolve :: l -> r -> o - -instance Resolve Path Path Path where - resolve (Path l) (Path r) = Path (l <> r) - -instance Resolve Relative Relative Relative where - resolve (Relative (Path l)) (Relative (Path r)) = Relative (Path (l <> r)) - -instance Resolve Absolute Relative Absolute where - resolve (Absolute l) (Relative r) = Absolute (resolve l r) - -instance Resolve Path' Path' Path' where - resolve _ a@(Path' Left{}) = a - resolve (Path' (Left a)) (Path' (Right r)) = Path' (Left (resolve a r)) - resolve (Path' (Right r1)) (Path' (Right r2)) = Path' (Right (resolve r1 r2)) - -instance Resolve Path' Split' Path' where - resolve l r = resolve l (unsplit' r) - -instance Resolve Path' Split' Split' where - resolve l (r, ns) = (resolve l r, ns) - -instance Resolve Absolute HQSplit HQSplitAbsolute where - resolve l (r, hq) = (resolve l (Relative r), hq) - -instance Resolve Absolute Path' Absolute where - resolve _ (Path' (Left a)) = a - resolve a (Path' (Right r)) = resolve a r diff --git a/parser-typechecker/src/Unison/Codebase/Reflog.hs b/parser-typechecker/src/Unison/Codebase/Reflog.hs deleted file mode 100644 index 07df0bd380..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Reflog.hs +++ /dev/null @@ -1,30 +0,0 @@ -{-# LANGUAGE ViewPatterns #-} - -module Unison.Codebase.Reflog where - -import Data.Text (Text) -import qualified Data.Text as Text -import Unison.Codebase.Branch (Hash) -import qualified Unison.Codebase.Causal as Causal -import qualified Unison.Hash as Hash - -data Entry = - Entry - { from :: Hash - , to :: Hash - , reason :: Text - } - -fromText :: Text -> Maybe Entry -fromText t = - case Text.words t of - (Hash.fromBase32Hex -> Just old) : (Hash.fromBase32Hex -> Just new) : (Text.unwords -> reason) -> - Just $ Entry (Causal.RawHash old) (Causal.RawHash new) reason - _ -> Nothing - - -toText :: Entry -> Text -toText (Entry old new reason) = - Text.unwords [ Hash.base32Hex . Causal.unRawHash $ old - , Hash.base32Hex . Causal.unRawHash $ new - , reason ] diff --git a/parser-typechecker/src/Unison/Codebase/Runtime.hs b/parser-typechecker/src/Unison/Codebase/Runtime.hs deleted file mode 100644 index 08bea36724..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Runtime.hs +++ /dev/null @@ -1,132 +0,0 @@ -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Codebase.Runtime where - -import Unison.Prelude - -import qualified Unison.ABT as ABT -import Data.Bifunctor (first) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Unison.Codebase.CodeLookup as CL -import qualified Unison.Codebase as Codebase -import Unison.UnisonFile ( UnisonFile ) -import Unison.Parser ( Ann ) -import qualified Unison.Term as Term -import Unison.Type ( Type ) -import Unison.Var ( Var ) -import qualified Unison.Var as Var -import Unison.Reference ( Reference ) -import qualified Unison.Reference as Reference -import qualified Unison.UnisonFile as UF -import Unison.Builtin.Decls (pattern TupleTerm', tupleTerm) -import qualified Unison.Util.Pretty as P -import qualified Unison.PrettyPrintEnv as PPE - -type Error = P.Pretty P.ColorText -type Term v = Term.Term v () - -data Runtime v = Runtime - { terminate :: IO () - , evaluate - :: CL.CodeLookup v IO () - -> PPE.PrettyPrintEnv - -> Term v - -> IO (Either Error (Term v)) - , mainType :: Type v Ann - } - -type IsCacheHit = Bool - -noCache :: Reference -> IO (Maybe (Term v)) -noCache _ = pure Nothing - --- Evaluates the watch expressions in the file, returning a `Map` of their --- results. This has to be a bit fancy to handle that the definitions in the --- file depend on each other and evaluation must proceed in a way that respects --- these dependencies. --- --- Note: The definitions in the file are hashed and looked up in --- `evaluationCache`. If that returns a result, evaluation of that definition --- can be skipped. -evaluateWatches - :: forall v a - . Var v - => CL.CodeLookup v IO a - -> PPE.PrettyPrintEnv - -> (Reference -> IO (Maybe (Term v))) - -> Runtime v - -> UnisonFile v a - -> IO (Either Error - ( [(v, Term v)] - -- Map watchName (loc, hash, expression, value, isHit) - , Map v (a, UF.WatchKind, Reference, Term v, Term v, IsCacheHit) - )) - -- IO (bindings :: [v,Term v], map :: ^^^) -evaluateWatches code ppe evaluationCache rt uf = do - -- 1. compute hashes for everything in the file - let m :: Map v (Reference, Term.Term v a) - m = first Reference.DerivedId <$> - Term.hashComponents (Map.fromList (UF.terms uf <> UF.allWatches uf)) - watches = Set.fromList (fst <$> UF.allWatches uf) - watchKinds :: Map v UF.WatchKind - watchKinds = Map.fromList [ (v, k) | (k, ws) <- Map.toList (UF.watches uf) - , (v,_) <- ws ] - unann = Term.amap (const ()) - -- 2. use the cache to lookup things already computed - m' <- fmap Map.fromList . for (Map.toList m) $ \(v, (r, t)) -> do - o <- evaluationCache r - case o of - Nothing -> pure (v, (r, ABT.annotation t, unann t, False)) - Just t' -> pure (v, (r, ABT.annotation t, t', True)) - -- 3. create a big ol' let rec whose body is a big tuple of all watches - let rv :: Map Reference v - rv = Map.fromList [ (r, v) | (v, (r, _)) <- Map.toList m ] - bindings :: [(v, Term v)] - bindings = [ (v, unref rv b) | (v, (_, _, b, _)) <- Map.toList m' ] - watchVars = [ Term.var () v | v <- toList watches ] - bigOl'LetRec = Term.letRec' True bindings (tupleTerm watchVars) - cl = void $ CL.fromUnisonFile uf <> code - -- 4. evaluate it and get all the results out of the tuple, then - -- create the result Map - out <- evaluate rt cl ppe bigOl'LetRec - case out of - Right out -> do - let - (bindings, results) = case out of - TupleTerm' results -> (mempty, results) - Term.LetRecNamed' bs (TupleTerm' results) -> (bs, results) - _ -> error $ "Evaluation should produce a tuple, but gave: " ++ show out - let go v eval (ref, a, uneval, isHit) = - (a, Map.findWithDefault (die v) v watchKinds, - ref, uneval, Term.etaNormalForm eval, isHit) - watchMap = Map.intersectionWithKey go - (Map.fromList (toList watches `zip` results)) m' - die v = error $ "not sure what kind of watch this is: " <> show v - pure $ Right (bindings, watchMap) - Left e -> pure (Left e) - where - -- unref :: Map Reference v -> Term.Term v a -> Term.Term v a - unref rv t = ABT.visitPure go t - where - go t@(Term.Ref' r@(Reference.DerivedId _)) = case Map.lookup r rv of - Nothing -> Nothing - Just v -> Just (Term.var (ABT.annotation t) v) - go _ = Nothing - -evaluateTerm - :: (Var v, Monoid a) - => CL.CodeLookup v IO a - -> PPE.PrettyPrintEnv - -> Runtime v - -> Term.Term v a - -> IO (Either Error (Term v)) -evaluateTerm codeLookup ppe rt tm = do - let uf = UF.UnisonFileId mempty mempty mempty - (Map.singleton UF.RegularWatch [(Var.nameds "result", tm)]) - selfContained <- Codebase.makeSelfContained' codeLookup uf - r <- evaluateWatches codeLookup ppe noCache rt selfContained - pure $ r <&> \(_,map) -> - let [(_loc, _kind, _hash, _src, value, _isHit)] = Map.elems map - in value diff --git a/parser-typechecker/src/Unison/Codebase/SearchResult.hs b/parser-typechecker/src/Unison/Codebase/SearchResult.hs deleted file mode 100644 index 1c3272f109..0000000000 --- a/parser-typechecker/src/Unison/Codebase/SearchResult.hs +++ /dev/null @@ -1,83 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Codebase.SearchResult where - -import Unison.Prelude - -import qualified Data.Set as Set -import Unison.HashQualified' (HashQualified) -import qualified Unison.HashQualified' as HQ -import Unison.Name (Name) -import Unison.Names2 (Names'(Names), Names0) -import qualified Unison.Names2 as Names -import Unison.Reference (Reference) -import Unison.Referent (Referent) -import qualified Unison.Referent as Referent -import qualified Unison.Util.Relation as R - --- this Ord instance causes types < terms -data SearchResult = Tp TypeResult | Tm TermResult deriving (Eq, Ord, Show) - -data TermResult = TermResult - { termName :: HashQualified - , referent :: Referent - , termAliases :: Set HashQualified - } deriving (Eq, Ord, Show) - -data TypeResult = TypeResult - { typeName :: HashQualified - , reference :: Reference - , typeAliases :: Set HashQualified - } deriving (Eq, Ord, Show) - -pattern Tm' hq r as = Tm (TermResult hq r as) -pattern Tp' hq r as = Tp (TypeResult hq r as) - -termResult :: HashQualified -> Referent -> Set HashQualified -> SearchResult -termResult hq r as = Tm (TermResult hq r as) - -termSearchResult :: Names0 -> Name -> Referent -> SearchResult -termSearchResult b n r = - termResult (Names._hqTermName b n r) r (Names._hqTermAliases b n r) - -typeResult :: HashQualified -> Reference -> Set HashQualified -> SearchResult -typeResult hq r as = Tp (TypeResult hq r as) - -typeSearchResult :: Names0 -> Name -> Reference -> SearchResult -typeSearchResult b n r = - typeResult (Names._hqTypeName b n r) r (Names._hqTypeAliases b n r) - -name :: SearchResult -> HashQualified -name = \case - Tm t -> termName t - Tp t -> typeName t - -aliases :: SearchResult -> Set HashQualified -aliases = \case - Tm t -> termAliases t - Tp t -> typeAliases t - --- | TypeResults yield a `Referent.Ref` -toReferent :: SearchResult -> Referent -toReferent (Tm (TermResult _ r _)) = r -toReferent (Tp (TypeResult _ r _)) = Referent.Ref r - -truncateAliases :: Int -> SearchResult -> SearchResult -truncateAliases n = \case - Tm (TermResult hq r as) -> termResult hq r (Set.map (HQ.take n) as) - Tp (TypeResult hq r as) -> typeResult hq r (Set.map (HQ.take n) as) - --- | You may want to sort this list differently afterward. -fromNames :: Names0 -> [SearchResult] -fromNames b = - map (uncurry (typeSearchResult b)) (R.toList . Names.types $ b) <> - map (uncurry (termSearchResult b)) (R.toList . Names.terms $ b) - -_fromNames :: Names0 -> [SearchResult] -_fromNames n0@(Names terms types) = typeResults <> termResults where - typeResults = - [ typeResult (Names._hqTypeName n0 name r) r (Names._hqTypeAliases n0 name r) - | (name, r) <- R.toList types ] - termResults = - [ termResult (Names._hqTermName n0 name r) r (Names._hqTermAliases n0 name r) - | (name, r) <- R.toList terms] diff --git a/parser-typechecker/src/Unison/Codebase/Serialization/PutT.hs b/parser-typechecker/src/Unison/Codebase/Serialization/PutT.hs deleted file mode 100644 index 57d2f645c0..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Serialization/PutT.hs +++ /dev/null @@ -1,57 +0,0 @@ -module Unison.Codebase.Serialization.PutT where - -import Data.Bytes.Put -import qualified Data.Serialize.Put as Ser -import Data.Serialize.Put ( PutM - , runPutM - ) - -newtype PutT m a = PutT { unPutT :: m (PutM a) } - -instance Monad m => MonadPut (PutT m) where - putWord8 = PutT . pure . putWord8 - {-# INLINE putWord8 #-} - putByteString = PutT . pure . putByteString - {-# INLINE putByteString #-} - putLazyByteString = PutT . pure . putLazyByteString - {-# INLINE putLazyByteString #-} - flush = PutT $ pure flush - {-# INLINE flush #-} - putWord16le = PutT . pure . putWord16le - {-# INLINE putWord16le #-} - putWord16be = PutT . pure . putWord16be - {-# INLINE putWord16be #-} - putWord16host = PutT . pure . putWord16host - {-# INLINE putWord16host #-} - putWord32le = PutT . pure . putWord32le - {-# INLINE putWord32le #-} - putWord32be = PutT . pure . putWord32be - {-# INLINE putWord32be #-} - putWord32host = PutT . pure . putWord32host - {-# INLINE putWord32host #-} - putWord64le = PutT . pure . putWord64le - {-# INLINE putWord64le #-} - putWord64be = PutT . pure . putWord64be - {-# INLINE putWord64be #-} - putWord64host = PutT . pure . putWord64host - {-# INLINE putWord64host #-} - putWordhost = PutT . pure . putWordhost - {-# INLINE putWordhost #-} - -instance Functor m => Functor (PutT m) where - fmap f (PutT m) = PutT $ fmap (fmap f) m - -instance Applicative m => Applicative (PutT m) where - pure = PutT . pure . pure - (PutT f) <*> (PutT a) = PutT $ (<*>) <$> f <*> a - -instance Monad m => Monad (PutT m) where - (PutT m) >>= f = PutT $ do - putm <- m - let (a, bs) = runPutM putm - putm' <- unPutT $ f a - let (b, bs') = runPutM putm' - pure $ do - Ser.putByteString bs - Ser.putByteString bs' - pure b diff --git a/parser-typechecker/src/Unison/Codebase/Serialization/V1.hs b/parser-typechecker/src/Unison/Codebase/Serialization/V1.hs deleted file mode 100644 index e22e760983..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Serialization/V1.hs +++ /dev/null @@ -1,811 +0,0 @@ -{-# LANGUAGE Strict #-} -{-# LANGUAGE RankNTypes #-} - -module Unison.Codebase.Serialization.V1 where - -import Unison.Prelude - -import Prelude hiding (getChar, putChar) - --- import qualified Data.Text as Text -import qualified Unison.Pattern as Pattern -import Unison.Pattern ( Pattern - , SeqOp - ) -import Data.Bits ( Bits ) -import Data.Bytes.Get -import Data.Bytes.Put -import Data.Bytes.Serial ( serialize - , deserialize - , serializeBE - , deserializeBE - ) -import Data.Bytes.Signed ( Unsigned ) -import Data.Bytes.VarInt ( VarInt(..) ) -import qualified Data.Map as Map -import Data.List ( elemIndex - ) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Branch.Dependencies as BD -import Unison.Codebase.Causal ( Raw(..) - , RawHash(..) - , unRawHash - ) -import qualified Unison.Codebase.Causal as Causal -import qualified Unison.Codebase.Metadata as Metadata -import Unison.NameSegment as NameSegment -import Unison.Codebase.Patch ( Patch(..) ) -import qualified Unison.Codebase.Patch as Patch -import Unison.Codebase.TermEdit ( TermEdit ) -import Unison.Codebase.TypeEdit ( TypeEdit ) -import Unison.Hash ( Hash ) -import Unison.Kind ( Kind ) -import Unison.Reference ( Reference ) -import Unison.Symbol ( Symbol(..) ) -import Unison.Term ( Term ) -import qualified Data.ByteString as B -import qualified Data.Sequence as Sequence -import qualified Data.Set as Set -import qualified Unison.ABT as ABT -import qualified Unison.Codebase.TermEdit as TermEdit -import qualified Unison.Codebase.TypeEdit as TypeEdit -import qualified Unison.Codebase.Serialization as S -import qualified Unison.Hash as Hash -import qualified Unison.Kind as Kind -import qualified Unison.Reference as Reference -import Unison.Referent (Referent) -import qualified Unison.Referent as Referent -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import Unison.Util.Star3 ( Star3 ) -import qualified Unison.Util.Star3 as Star3 -import Unison.Util.Relation ( Relation ) -import qualified Unison.Util.Relation as Relation -import qualified Unison.DataDeclaration as DataDeclaration -import Unison.DataDeclaration ( DataDeclaration - , EffectDeclaration - ) -import qualified Unison.Var as Var -import qualified Unison.ConstructorType as CT -import Unison.Type (Type) - --- ABOUT THIS FORMAT: --- --- A serialization format for uncompiled Unison syntax trees. --- --- Finalized: No --- --- If Finalized: Yes, don't modify this file in a way that affects serialized form. --- Instead, create a new file, V(n + 1). --- This ensures that we have a well-defined serialized form and can read --- and write old versions. - -unknownTag :: (MonadGet m, Show a) => String -> a -> m x -unknownTag msg tag = - fail $ "unknown tag " ++ show tag ++ - " while deserializing: " ++ msg - -putRawCausal :: MonadPut m => (a -> m ()) -> Causal.Raw h a -> m () -putRawCausal putA = \case - RawOne a -> putWord8 0 >> putA a - RawCons a t -> putWord8 1 >> (putHash . unRawHash) t >> putA a - RawMerge a ts -> - putWord8 2 >> putFoldable (putHash . unRawHash) ts >> putA a - -getCausal0 :: MonadGet m => m a -> m (Causal.Raw h a) -getCausal0 getA = getWord8 >>= \case - 0 -> RawOne <$> getA - 1 -> flip RawCons <$> (RawHash <$> getHash) <*> getA - 2 -> flip RawMerge . Set.fromList <$> getList (RawHash <$> getHash) <*> getA - x -> unknownTag "Causal0" x - --- Like getCausal, but doesn't bother to read the actual value in the causal, --- it just reads the hashes. Useful for more efficient implementation of --- `Causal.before`. --- getCausal00 :: MonadGet m => m Causal00 --- getCausal00 = getWord8 >>= \case --- 0 -> pure One00 --- 1 -> Cons00 <$> getHash --- 2 -> Merge00 . Set.fromList <$> getList getHash - --- 1. Can no longer read a causal using just MonadGet; --- need a way to construct the loader that forms its tail. --- Same problem with loading Branch0 with monadic tails. --- 2. Without the monadic tail, need external info to know how to --- load the tail. When modifying a nested structure, we --- need a way to save the intermediate nodes. (e.g. zipper?) --- 3. We ran into trouble trying to intermingle the marshalling monad --- (put/get) with the loading/saving monad (io). --- 4. PutT was weird because we don't think we want the Codebase monad to --- randomly be able to accumulate bytestrings (put) that don't even reset. --- 5. We could specialize `Causal m e` to a particular monad that tries to do --- the right things wrt caching? --- putCausal0 :: MonadPut m => Causal a -> (a -> m ()) -> m () --- putCausal0 = undefined - --- This loads the tail in order to write it? --- May be crucial to do so, if "loading" tail from `pure`, but --- otherwise weird. We'd like to skip writing the tail if it already --- exists, but how can we tell? --- Also, we're not even supposed to be writing the tail into the same buffer --- as head. We should be writing the hash of the tail though, so we can --- know which file we need to load it from; loading another file is also --- something we can't do in this model. ----- --- putCausal :: (MonadPut m, Monad n) => Causal n a -> (a -> m ()) -> n (m ()) --- putCausal (Causal.One hash a) putA = --- pure $ putWord8 1 *> putHash hash *> putA a --- putCausal (Causal.ConsN m) putA = do --- (conss, tail) <- m --- pure (putWord8 2 *> putFoldable conss (putPair' putHash putA)) --- *> putCausal tail putA --- putCausal (Causal.Merge hash a tails) putA = do --- pure (putWord8 3 *> putHash hash *> putA a) --- putFoldableN (Map.toList tails) $ putPair'' putHash (>>= (`putCausal` putA)) --- putCausal (Causal.Cons _ _ _) _ = --- error "deserializing 'Causal': the ConsN pattern should have matched here!" - - --- getCausal :: MonadGet m => m a -> m (Causal a) --- getCausal getA = getWord8 >>= \case --- 1 -> Causal.One <$> getHash <*> getA --- 2 -> Causal.consN <$> getList (getPair getHash getA) <*> getCausal getA --- 3 -> Causal.Merge <$> getHash <*> getA <*> --- (Map.fromList <$> getList (getPair getHash $ getCausal getA)) --- x -> unknownTag "causal" x - --- getCausal :: - -putLength :: - (MonadPut m, Integral n, Integral (Unsigned n), - Bits n, Bits (Unsigned n)) - => n -> m () -putLength = serialize . VarInt - -getLength :: - (MonadGet m, Integral n, Integral (Unsigned n), - Bits n, Bits (Unsigned n)) - => m n -getLength = unVarInt <$> deserialize - -putText :: MonadPut m => Text -> m () -putText text = do - let bs = encodeUtf8 text - putLength $ B.length bs - putByteString bs - -getText :: MonadGet m => m Text -getText = do - len <- getLength - bs <- B.copy <$> getBytes len - pure $ decodeUtf8 bs - -skipText :: MonadGet m => m () -skipText = do - len <- getLength - void $ getBytes len - -putFloat :: MonadPut m => Double -> m () -putFloat = serializeBE - -getFloat :: MonadGet m => m Double -getFloat = deserializeBE - -putNat :: MonadPut m => Word64 -> m () -putNat = putWord64be - -getNat :: MonadGet m => m Word64 -getNat = getWord64be - -putInt :: MonadPut m => Int64 -> m () -putInt = serializeBE - -getInt :: MonadGet m => m Int64 -getInt = deserializeBE - -putBoolean :: MonadPut m => Bool -> m () -putBoolean False = putWord8 0 -putBoolean True = putWord8 1 - -getBoolean :: MonadGet m => m Bool -getBoolean = go =<< getWord8 where - go 0 = pure False - go 1 = pure True - go t = unknownTag "Boolean" t - -putHash :: MonadPut m => Hash -> m () -putHash h = do - let bs = Hash.toBytes h - putLength (B.length bs) - putByteString bs - -getHash :: MonadGet m => m Hash -getHash = do - len <- getLength - bs <- B.copy <$> getBytes len - pure $ Hash.fromBytes bs - -putReference :: MonadPut m => Reference -> m () -putReference r = case r of - Reference.Builtin name -> do - putWord8 0 - putText name - Reference.Derived hash i n -> do - putWord8 1 - putHash hash - putLength i - putLength n - _ -> error "unpossible" - -getReference :: MonadGet m => m Reference -getReference = do - tag <- getWord8 - case tag of - 0 -> Reference.Builtin <$> getText - 1 -> Reference.DerivedId <$> (Reference.Id <$> getHash <*> getLength <*> getLength) - _ -> unknownTag "Reference" tag - -putReferent :: MonadPut m => Referent -> m () -putReferent = \case - Referent.Ref r -> do - putWord8 0 - putReference r - Referent.Con r i ct -> do - putWord8 1 - putReference r - putLength i - putConstructorType ct - -putConstructorType :: MonadPut m => CT.ConstructorType -> m () -putConstructorType = \case - CT.Data -> putWord8 0 - CT.Effect -> putWord8 1 - -getReferent :: MonadGet m => m Referent -getReferent = do - tag <- getWord8 - case tag of - 0 -> Referent.Ref <$> getReference - 1 -> Referent.Con <$> getReference <*> getLength <*> getConstructorType - _ -> unknownTag "getReferent" tag - -getConstructorType :: MonadGet m => m CT.ConstructorType -getConstructorType = getWord8 >>= \case - 0 -> pure CT.Data - 1 -> pure CT.Effect - t -> unknownTag "getConstructorType" t - -putMaybe :: MonadPut m => Maybe a -> (a -> m ()) -> m () -putMaybe Nothing _ = putWord8 0 -putMaybe (Just a) putA = putWord8 1 *> putA a - -getMaybe :: MonadGet m => m a -> m (Maybe a) -getMaybe getA = getWord8 >>= \tag -> case tag of - 0 -> pure Nothing - 1 -> Just <$> getA - _ -> unknownTag "Maybe" tag - -putFoldable - :: (Foldable f, MonadPut m) => (a -> m ()) -> f a -> m () -putFoldable putA as = do - putLength (length as) - traverse_ putA as - - --- putFoldableN --- :: forall f m n a --- . (Traversable f, MonadPut m, Applicative n) --- => f a --- -> (a -> n (m ())) --- -> n (m ()) --- putFoldableN as putAn = --- pure (putLength @m (length as)) *> (fmap sequence_ $ traverse putAn as) - -getFolded :: MonadGet m => (b -> a -> b) -> b -> m a -> m b -getFolded f z a = - foldl' f z <$> getList a - -getList :: MonadGet m => m a -> m [a] -getList a = getLength >>= (`replicateM` a) - -putABT - :: (MonadPut m, Foldable f, Functor f, Ord v) - => (v -> m ()) - -> (a -> m ()) - -> (forall x . (x -> m ()) -> f x -> m ()) - -> ABT.Term f v a - -> m () -putABT putVar putA putF abt = - putFoldable putVar fvs *> go (ABT.annotateBound'' abt) - where - fvs = Set.toList $ ABT.freeVars abt - go (ABT.Term _ (a, env) abt) = putA a *> case abt of - ABT.Var v -> putWord8 0 *> putVarRef env v - ABT.Tm f -> putWord8 1 *> putF go f - ABT.Abs v body -> putWord8 2 *> putVar v *> go body - ABT.Cycle body -> putWord8 3 *> go body - - putVarRef env v = case v `elemIndex` env of - Just i -> putWord8 0 *> putLength i - Nothing -> case v `elemIndex` fvs of - Just i -> putWord8 1 *> putLength i - Nothing -> error "impossible: var not free or bound" - -getABT - :: (MonadGet m, Foldable f, Functor f, Ord v) - => m v - -> m a - -> (forall x . m x -> m (f x)) - -> m (ABT.Term f v a) -getABT getVar getA getF = getList getVar >>= go [] where - go env fvs = do - a <- getA - tag <- getWord8 - case tag of - 0 -> do - tag <- getWord8 - case tag of - 0 -> ABT.annotatedVar a . (env !!) <$> getLength - 1 -> ABT.annotatedVar a . (fvs !!) <$> getLength - _ -> unknownTag "getABT.Var" tag - 1 -> ABT.tm' a <$> getF (go env fvs) - 2 -> do - v <- getVar - body <- go (v:env) fvs - pure $ ABT.abs' a v body - 3 -> ABT.cycle' a <$> go env fvs - _ -> unknownTag "getABT" tag - -putKind :: MonadPut m => Kind -> m () -putKind k = case k of - Kind.Star -> putWord8 0 - Kind.Arrow i o -> putWord8 1 *> putKind i *> putKind o - -getKind :: MonadGet m => m Kind -getKind = getWord8 >>= \tag -> case tag of - 0 -> pure Kind.Star - 1 -> Kind.Arrow <$> getKind <*> getKind - _ -> unknownTag "getKind" tag - -putType :: (MonadPut m, Ord v) - => (v -> m ()) -> (a -> m ()) - -> Type v a - -> m () -putType putVar putA = putABT putVar putA go where - go putChild t = case t of - Type.Ref r -> putWord8 0 *> putReference r - Type.Arrow i o -> putWord8 1 *> putChild i *> putChild o - Type.Ann t k -> putWord8 2 *> putChild t *> putKind k - Type.App f x -> putWord8 3 *> putChild f *> putChild x - Type.Effect e t -> putWord8 4 *> putChild e *> putChild t - Type.Effects es -> putWord8 5 *> putFoldable putChild es - Type.Forall body -> putWord8 6 *> putChild body - Type.IntroOuter body -> putWord8 7 *> putChild body - -getType :: (MonadGet m, Ord v) - => m v -> m a -> m (Type v a) -getType getVar getA = getABT getVar getA go where - go getChild = getWord8 >>= \tag -> case tag of - 0 -> Type.Ref <$> getReference - 1 -> Type.Arrow <$> getChild <*> getChild - 2 -> Type.Ann <$> getChild <*> getKind - 3 -> Type.App <$> getChild <*> getChild - 4 -> Type.Effect <$> getChild <*> getChild - 5 -> Type.Effects <$> getList getChild - 6 -> Type.Forall <$> getChild - 7 -> Type.IntroOuter <$> getChild - _ -> unknownTag "getType" tag - -putSymbol :: MonadPut m => Symbol -> m () -putSymbol (Symbol id typ) = putLength id *> putText (Var.rawName typ) - -getSymbol :: MonadGet m => m Symbol -getSymbol = Symbol <$> getLength <*> (Var.User <$> getText) - -putPattern :: MonadPut m => (a -> m ()) -> Pattern a -> m () -putPattern putA p = case p of - Pattern.Unbound a -> putWord8 0 *> putA a - Pattern.Var a -> putWord8 1 *> putA a - Pattern.Boolean a b -> putWord8 2 *> putA a *> putBoolean b - Pattern.Int a n -> putWord8 3 *> putA a *> putInt n - Pattern.Nat a n -> putWord8 4 *> putA a *> putNat n - Pattern.Float a n -> putWord8 5 *> putA a *> putFloat n - Pattern.Constructor a r cid ps -> - putWord8 6 - *> putA a - *> putReference r - *> putLength cid - *> putFoldable (putPattern putA) ps - Pattern.As a p -> putWord8 7 *> putA a *> putPattern putA p - Pattern.EffectPure a p -> putWord8 8 *> putA a *> putPattern putA p - Pattern.EffectBind a r cid args k -> - putWord8 9 - *> putA a - *> putReference r - *> putLength cid - *> putFoldable (putPattern putA) args - *> putPattern putA k - Pattern.SequenceLiteral a ps -> - putWord8 10 *> putA a *> putFoldable (putPattern putA) ps - Pattern.SequenceOp a l op r -> - putWord8 11 - *> putA a - *> putPattern putA l - *> putSeqOp op - *> putPattern putA r - Pattern.Text a t -> putWord8 12 *> putA a *> putText t - Pattern.Char a c -> putWord8 13 *> putA a *> putChar c - -putSeqOp :: MonadPut m => SeqOp -> m () -putSeqOp Pattern.Cons = putWord8 0 -putSeqOp Pattern.Snoc = putWord8 1 -putSeqOp Pattern.Concat = putWord8 2 - -getSeqOp :: MonadGet m => m SeqOp -getSeqOp = getWord8 >>= \case - 0 -> pure Pattern.Cons - 1 -> pure Pattern.Snoc - 2 -> pure Pattern.Concat - tag -> unknownTag "SeqOp" tag - -getPattern :: MonadGet m => m a -> m (Pattern a) -getPattern getA = getWord8 >>= \tag -> case tag of - 0 -> Pattern.Unbound <$> getA - 1 -> Pattern.Var <$> getA - 2 -> Pattern.Boolean <$> getA <*> getBoolean - 3 -> Pattern.Int <$> getA <*> getInt - 4 -> Pattern.Nat <$> getA <*> getNat - 5 -> Pattern.Float <$> getA <*> getFloat - 6 -> Pattern.Constructor <$> getA <*> getReference <*> getLength <*> getList - (getPattern getA) - 7 -> Pattern.As <$> getA <*> getPattern getA - 8 -> Pattern.EffectPure <$> getA <*> getPattern getA - 9 -> - Pattern.EffectBind - <$> getA - <*> getReference - <*> getLength - <*> getList (getPattern getA) - <*> getPattern getA - 10 -> Pattern.SequenceLiteral <$> getA <*> getList (getPattern getA) - 11 -> - Pattern.SequenceOp - <$> getA - <*> getPattern getA - <*> getSeqOp - <*> getPattern getA - 12 -> Pattern.Text <$> getA <*> getText - 13 -> Pattern.Char <$> getA <*> getChar - _ -> unknownTag "Pattern" tag - -putTerm :: (MonadPut m, Ord v) - => (v -> m ()) -> (a -> m ()) - -> Term v a - -> m () -putTerm putVar putA = putABT putVar putA go where - go putChild t = case t of - Term.Int n - -> putWord8 0 *> putInt n - Term.Nat n - -> putWord8 1 *> putNat n - Term.Float n - -> putWord8 2 *> putFloat n - Term.Boolean b - -> putWord8 3 *> putBoolean b - Term.Text t - -> putWord8 4 *> putText t - Term.Blank _ - -> error "can't serialize term with blanks" - Term.Ref r - -> putWord8 5 *> putReference r - Term.Constructor r cid - -> putWord8 6 *> putReference r *> putLength cid - Term.Request r cid - -> putWord8 7 *> putReference r *> putLength cid - Term.Handle h a - -> putWord8 8 *> putChild h *> putChild a - Term.App f arg - -> putWord8 9 *> putChild f *> putChild arg - Term.Ann e t - -> putWord8 10 *> putChild e *> putType putVar putA t - Term.Sequence vs - -> putWord8 11 *> putFoldable putChild vs - Term.If cond t f - -> putWord8 12 *> putChild cond *> putChild t *> putChild f - Term.And x y - -> putWord8 13 *> putChild x *> putChild y - Term.Or x y - -> putWord8 14 *> putChild x *> putChild y - Term.Lam body - -> putWord8 15 *> putChild body - Term.LetRec _ bs body - -> putWord8 16 *> putFoldable putChild bs *> putChild body - Term.Let _ b body - -> putWord8 17 *> putChild b *> putChild body - Term.Match s cases - -> putWord8 18 *> putChild s *> putFoldable (putMatchCase putA putChild) cases - Term.Char c - -> putWord8 19 *> putChar c - Term.TermLink r - -> putWord8 20 *> putReferent r - Term.TypeLink r - -> putWord8 21 *> putReference r - - putMatchCase :: MonadPut m => (a -> m ()) -> (x -> m ()) -> Term.MatchCase a x -> m () - putMatchCase putA putChild (Term.MatchCase pat guard body) = - putPattern putA pat *> putMaybe guard putChild *> putChild body - -getTerm :: (MonadGet m, Ord v) - => m v -> m a -> m (Term v a) -getTerm getVar getA = getABT getVar getA go where - go getChild = getWord8 >>= \tag -> case tag of - 0 -> Term.Int <$> getInt - 1 -> Term.Nat <$> getNat - 2 -> Term.Float <$> getFloat - 3 -> Term.Boolean <$> getBoolean - 4 -> Term.Text <$> getText - 5 -> Term.Ref <$> getReference - 6 -> Term.Constructor <$> getReference <*> getLength - 7 -> Term.Request <$> getReference <*> getLength - 8 -> Term.Handle <$> getChild <*> getChild - 9 -> Term.App <$> getChild <*> getChild - 10 -> Term.Ann <$> getChild <*> getType getVar getA - 11 -> Term.Sequence . Sequence.fromList <$> getList getChild - 12 -> Term.If <$> getChild <*> getChild <*> getChild - 13 -> Term.And <$> getChild <*> getChild - 14 -> Term.Or <$> getChild <*> getChild - 15 -> Term.Lam <$> getChild - 16 -> Term.LetRec False <$> getList getChild <*> getChild - 17 -> Term.Let False <$> getChild <*> getChild - 18 -> Term.Match <$> getChild - <*> getList (Term.MatchCase <$> getPattern getA <*> getMaybe getChild <*> getChild) - 19 -> Term.Char <$> getChar - 20 -> Term.TermLink <$> getReferent - 21 -> Term.TypeLink <$> getReference - _ -> unknownTag "getTerm" tag - -putPair :: MonadPut m => (a -> m ()) -> (b -> m ()) -> (a,b) -> m () -putPair putA putB (a,b) = putA a *> putB b - -putPair'' - :: (MonadPut m, Monad n) - => (a -> m ()) - -> (b -> n (m ())) - -> (a, b) - -> n (m ()) -putPair'' putA putBn (a, b) = pure (putA a) *> putBn b - -getPair :: MonadGet m => m a -> m b -> m (a,b) -getPair = liftA2 (,) - -putTuple3' - :: MonadPut m - => (a -> m ()) - -> (b -> m ()) - -> (c -> m ()) - -> (a, b, c) - -> m () -putTuple3' putA putB putC (a, b, c) = putA a *> putB b *> putC c - -getTuple3 :: MonadGet m => m a -> m b -> m c -> m (a,b,c) -getTuple3 = liftA3 (,,) - -putRelation :: MonadPut m => (a -> m ()) -> (b -> m ()) -> Relation a b -> m () -putRelation putA putB r = putFoldable (putPair putA putB) (Relation.toList r) - -getRelation :: (MonadGet m, Ord a, Ord b) => m a -> m b -> m (Relation a b) -getRelation getA getB = Relation.fromList <$> getList (getPair getA getB) - -putMap :: MonadPut m => (a -> m ()) -> (b -> m ()) -> Map a b -> m () -putMap putA putB m = putFoldable (putPair putA putB) (Map.toList m) - -getMap :: (MonadGet m, Ord a) => m a -> m b -> m (Map a b) -getMap getA getB = Map.fromList <$> getList (getPair getA getB) - -putTermEdit :: MonadPut m => TermEdit -> m () -putTermEdit (TermEdit.Replace r typing) = - putWord8 1 *> putReference r *> case typing of - TermEdit.Same -> putWord8 1 - TermEdit.Subtype -> putWord8 2 - TermEdit.Different -> putWord8 3 -putTermEdit TermEdit.Deprecate = putWord8 2 - -getTermEdit :: MonadGet m => m TermEdit -getTermEdit = getWord8 >>= \case - 1 -> TermEdit.Replace <$> getReference <*> (getWord8 >>= \case - 1 -> pure TermEdit.Same - 2 -> pure TermEdit.Subtype - 3 -> pure TermEdit.Different - t -> unknownTag "TermEdit.Replace" t - ) - 2 -> pure TermEdit.Deprecate - t -> unknownTag "TermEdit" t - -putTypeEdit :: MonadPut m => TypeEdit -> m () -putTypeEdit (TypeEdit.Replace r) = putWord8 1 *> putReference r -putTypeEdit TypeEdit.Deprecate = putWord8 2 - -getTypeEdit :: MonadGet m => m TypeEdit -getTypeEdit = getWord8 >>= \case - 1 -> TypeEdit.Replace <$> getReference - 2 -> pure TypeEdit.Deprecate - t -> unknownTag "TypeEdit" t - -putStar3 - :: MonadPut m - => (f -> m ()) - -> (d1 -> m ()) - -> (d2 -> m ()) - -> (d3 -> m ()) - -> Star3 f d1 d2 d3 - -> m () -putStar3 putF putD1 putD2 putD3 s = do - putFoldable putF (Star3.fact s) - putRelation putF putD1 (Star3.d1 s) - putRelation putF putD2 (Star3.d2 s) - putRelation putF putD3 (Star3.d3 s) - -getStar3 - :: (MonadGet m, Ord fact, Ord d1, Ord d2, Ord d3) - => m fact - -> m d1 - -> m d2 - -> m d3 - -> m (Star3 fact d1 d2 d3) -getStar3 getF getD1 getD2 getD3 = - Star3.Star3 - <$> (Set.fromList <$> getList getF) - <*> getRelation getF getD1 - <*> getRelation getF getD2 - <*> getRelation getF getD3 - -putBranchStar :: MonadPut m => (a -> m ()) -> (n -> m ()) -> Branch.Star a n -> m () -putBranchStar putA putN = - putStar3 putA putN putMetadataType (putPair putMetadataType putMetadataValue) - -getBranchStar :: (Ord a, Ord n, MonadGet m) => m a -> m n -> m (Branch.Star a n) -getBranchStar getA getN = getStar3 getA getN getMetadataType (getPair getMetadataType getMetadataValue) - -putLink :: MonadPut m => (Hash, mb) -> m () -putLink (h, _) = do - -- 0 means local; later we may have remote links with other ids - putWord8 0 - putHash h - -putChar :: MonadPut m => Char -> m () -putChar = serialize . VarInt . fromEnum - -getChar :: MonadGet m => m Char -getChar = toEnum . unVarInt <$> deserialize - -putNameSegment :: MonadPut m => NameSegment -> m () -putNameSegment = putText . NameSegment.toText - -getNameSegment :: MonadGet m => m NameSegment -getNameSegment = NameSegment <$> getText - -putRawBranch :: MonadPut m => Branch.Raw -> m () -putRawBranch (Branch.Raw terms types children edits) = do - putBranchStar putReferent putNameSegment terms - putBranchStar putReference putNameSegment types - putMap putNameSegment (putHash . unRawHash) children - putMap putNameSegment putHash edits - -getMetadataType :: MonadGet m => m Metadata.Type -getMetadataType = getReference - -putMetadataType :: MonadPut m => Metadata.Type -> m () -putMetadataType = putReference - -getMetadataValue :: MonadGet m => m Metadata.Value -getMetadataValue = getReference - -putMetadataValue :: MonadPut m => Metadata.Value -> m () -putMetadataValue = putReference - -getRawBranch :: MonadGet m => m Branch.Raw -getRawBranch = - Branch.Raw - <$> getBranchStar getReferent getNameSegment - <*> getBranchStar getReference getNameSegment - <*> getMap getNameSegment (RawHash <$> getHash) - <*> getMap getNameSegment getHash - --- `getBranchDependencies` consumes the same data as `getRawBranch` -getBranchDependencies :: MonadGet m => m (BD.Branches n, BD.Dependencies) -getBranchDependencies = do - (terms1, types1) <- getTermStarDependencies - (terms2, types2) <- getTypeStarDependencies - childHashes <- fmap (RawHash . snd) <$> getList (getPair skipText getHash) - editHashes <- Set.fromList . fmap snd <$> getList (getPair skipText getHash) - pure ( childHashes `zip` repeat Nothing - , BD.Dependencies editHashes (terms1 <> terms2) (types1 <> types2) ) - where - -- returns things, metadata types, metadata values - getStarReferences :: - (MonadGet m, Ord r) => m r -> m ([r], [Metadata.Value]) - getStarReferences getR = do - void $ getList getR -- throw away the `facts` - -- d1: references and namesegments - rs :: [r] <- fmap fst <$> getList (getPair getR skipText) - -- d2: metadata type index - void $ getList (getPair getR getMetadataType) - -- d3: metadata (type, value) index - (_metadataTypes, metadataValues) <- unzip . fmap snd <$> - getList (getPair getR (getPair getMetadataType getMetadataValue)) - pure (rs, metadataValues) - - getTermStarDependencies :: MonadGet m => m (Set Reference.Id, Set Reference.Id) - getTermStarDependencies = do - (referents, mdValues) <- getStarReferences getReferent - let termIds = Set.fromList $ - [ i | Referent.Ref (Reference.DerivedId i) <- referents ] ++ - [ i | Reference.DerivedId i <- mdValues ] - declIds = Set.fromList $ - [ i | Referent.Con (Reference.DerivedId i) _cid _ct <- referents ] - pure (termIds, declIds) - - getTypeStarDependencies :: MonadGet m => m (Set Reference.Id, Set Reference.Id) - getTypeStarDependencies = do - (references, mdValues) <- getStarReferences getReference - let termIds = Set.fromList $ [ i | Reference.DerivedId i <- mdValues ] - declIds = Set.fromList $ [ i | Reference.DerivedId i <- references ] - pure (termIds, declIds) - -putDataDeclaration :: (MonadPut m, Ord v) - => (v -> m ()) -> (a -> m ()) - -> DataDeclaration v a - -> m () -putDataDeclaration putV putA decl = do - putModifier $ DataDeclaration.modifier decl - putA $ DataDeclaration.annotation decl - putFoldable putV (DataDeclaration.bound decl) - putFoldable (putTuple3' putA putV (putType putV putA)) (DataDeclaration.constructors' decl) - -getDataDeclaration :: (MonadGet m, Ord v) => m v -> m a -> m (DataDeclaration v a) -getDataDeclaration getV getA = DataDeclaration.DataDeclaration <$> - getModifier <*> - getA <*> - getList getV <*> - getList (getTuple3 getA getV (getType getV getA)) - -putModifier :: MonadPut m => DataDeclaration.Modifier -> m () -putModifier DataDeclaration.Structural = putWord8 0 -putModifier (DataDeclaration.Unique txt) = putWord8 1 *> putText txt - -getModifier :: MonadGet m => m DataDeclaration.Modifier -getModifier = getWord8 >>= \case - 0 -> pure DataDeclaration.Structural - 1 -> DataDeclaration.Unique <$> getText - tag -> unknownTag "DataDeclaration.Modifier" tag - -putEffectDeclaration :: - (MonadPut m, Ord v) => (v -> m ()) -> (a -> m ()) -> EffectDeclaration v a -> m () -putEffectDeclaration putV putA (DataDeclaration.EffectDeclaration d) = - putDataDeclaration putV putA d - -getEffectDeclaration :: (MonadGet m, Ord v) => m v -> m a -> m (EffectDeclaration v a) -getEffectDeclaration getV getA = - DataDeclaration.EffectDeclaration <$> getDataDeclaration getV getA - -putEither :: (MonadPut m) => (a -> m ()) -> (b -> m ()) -> Either a b -> m () -putEither putL _ (Left a) = putWord8 0 *> putL a -putEither _ putR (Right b) = putWord8 1 *> putR b - -getEither :: MonadGet m => m a -> m b -> m (Either a b) -getEither getL getR = getWord8 >>= \case - 0 -> Left <$> getL - 1 -> Right <$> getR - tag -> unknownTag "Either" tag - -formatSymbol :: S.Format Symbol -formatSymbol = S.Format getSymbol putSymbol - -putEdits :: MonadPut m => Patch -> m () -putEdits edits = - putRelation putReference putTermEdit (Patch._termEdits edits) >> - putRelation putReference putTypeEdit (Patch._typeEdits edits) - -getEdits :: MonadGet m => m Patch -getEdits = Patch <$> getRelation getReference getTermEdit - <*> getRelation getReference getTypeEdit diff --git a/parser-typechecker/src/Unison/Codebase/ShortBranchHash.hs b/parser-typechecker/src/Unison/Codebase/ShortBranchHash.hs deleted file mode 100644 index 5496f5a1fc..0000000000 --- a/parser-typechecker/src/Unison/Codebase/ShortBranchHash.hs +++ /dev/null @@ -1,35 +0,0 @@ -module Unison.Codebase.ShortBranchHash where - -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Causal as Causal -import qualified Unison.Hash as Hash -import qualified Data.Text as Text -import qualified Data.Set as Set -import Data.Text (Text) - -newtype ShortBranchHash = - ShortBranchHash { toText :: Text } -- base32hex characters - deriving (Eq, Ord) - -toString :: ShortBranchHash -> String -toString = Text.unpack . toText - -toHash :: ShortBranchHash -> Maybe Branch.Hash -toHash = fmap Causal.RawHash . Hash.fromBase32Hex . toText - -fromHash :: Int -> Branch.Hash -> ShortBranchHash -fromHash len = - ShortBranchHash . Text.take len . Hash.base32Hex . Causal.unRawHash - -fullFromHash :: Branch.Hash -> ShortBranchHash -fullFromHash = ShortBranchHash . Hash.base32Hex . Causal.unRawHash - --- abc -> SBH abc --- #abc -> SBH abc -fromText :: Text -> Maybe ShortBranchHash -fromText t | Text.all (`Set.member` Hash.validBase32HexChars) t = - Just . ShortBranchHash . Text.dropWhile (=='#') $ t -fromText _ = Nothing - -instance Show ShortBranchHash where - show (ShortBranchHash h) = '#' : Text.unpack h diff --git a/parser-typechecker/src/Unison/Codebase/SyncMode.hs b/parser-typechecker/src/Unison/Codebase/SyncMode.hs deleted file mode 100644 index 67f79a6518..0000000000 --- a/parser-typechecker/src/Unison/Codebase/SyncMode.hs +++ /dev/null @@ -1,3 +0,0 @@ -module Unison.Codebase.SyncMode where - -data SyncMode = ShortCircuit | Complete deriving (Eq, Show) diff --git a/parser-typechecker/src/Unison/Codebase/TranscriptParser.hs b/parser-typechecker/src/Unison/Codebase/TranscriptParser.hs deleted file mode 100644 index 8eb2b9e2bb..0000000000 --- a/parser-typechecker/src/Unison/Codebase/TranscriptParser.hs +++ /dev/null @@ -1,427 +0,0 @@ -{-# Language OverloadedStrings #-} -{-# Language BangPatterns #-} -{-# Language ViewPatterns #-} - -module Unison.Codebase.TranscriptParser ( - Stanza(..), FenceType, ExpectingError, Hidden, Err, UcmCommand(..), - run, parse, parseFile) - where - --- import qualified Text.Megaparsec.Char as P -import Control.Concurrent.STM (atomically) -import Control.Exception (finally) -import Control.Monad.State (runStateT) -import Data.List (isSubsequenceOf) -import Data.IORef -import Prelude hiding (readFile, writeFile) -import System.Directory ( doesFileExist ) -import System.Exit (die) -import System.IO.Error (catchIOError) -import System.Environment (getProgName) -import Unison.Codebase (Codebase) -import Unison.Codebase.Editor.Command (LoadSourceResult (..)) -import Unison.Codebase.Editor.Input (Input (..), Event(UnisonFileChanged)) -import Unison.CommandLine -import Unison.CommandLine.InputPattern (InputPattern (aliases, patternName)) -import Unison.CommandLine.InputPatterns (validInputs) -import Unison.CommandLine.OutputMessages (notifyUser, notifyNumbered) -import Unison.Parser (Ann) -import Unison.Prelude -import Unison.PrettyTerminal -import Unison.Symbol (Symbol) -import Unison.CommandLine.Main (asciiartUnison, expandNumber) -import qualified Data.Char as Char -import qualified Data.Map as Map -import qualified Data.Text as Text -import qualified System.IO as IO -import qualified Data.Configurator as Config -import qualified Crypto.Random as Random -import qualified Text.Megaparsec as P -import qualified Unison.Codebase as Codebase -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Editor.HandleCommand as HandleCommand -import qualified Unison.Codebase.Editor.HandleInput as HandleInput -import qualified Unison.Codebase.Path as Path -import qualified Unison.Codebase.Runtime as Runtime -import qualified Unison.CommandLine.InputPattern as IP -import qualified Unison.Runtime.Rt1IO as Rt1 -import qualified Unison.Runtime.Interface as RTI -import qualified Unison.Util.Pretty as P -import qualified Unison.Util.TQueue as Q -import qualified Unison.Codebase.Editor.Output as Output -import Control.Lens (view) -import Control.Error (rightMay) - -type ExpectingError = Bool -type Err = String -type ScratchFileName = Text -type FenceType = Text - -data Hidden = Shown | HideOutput | HideAll - deriving (Eq, Show) -data UcmCommand = UcmCommand Path.Absolute Text - -data Stanza - = Ucm Hidden ExpectingError [UcmCommand] - | Unison Hidden ExpectingError (Maybe ScratchFileName) Text - | UnprocessedFence FenceType Text - | Unfenced Text - -instance Show UcmCommand where - show (UcmCommand path txt) = show path <> ">" <> Text.unpack txt - -instance Show Stanza where - show s = case s of - Ucm _ _ cmds -> unlines [ - "```ucm", - foldl (\x y -> x ++ show y) "" cmds, - "```" - ] - Unison _hide _ fname txt -> unlines [ - "```unison", - case fname of - Nothing -> Text.unpack txt <> "```\n" - Just fname -> unlines [ - "---", - "title: " <> Text.unpack fname, - "---", - Text.unpack txt, - "```", - "" ] - ] - UnprocessedFence typ txt -> unlines [ - "```" <> Text.unpack typ, - Text.unpack txt, - "```", "" ] - Unfenced txt -> Text.unpack txt - -parseFile :: FilePath -> IO (Either Err [Stanza]) -parseFile filePath = do - exists <- doesFileExist filePath - if exists then do - txt <- readUtf8 filePath - pure $ parse filePath txt - else - pure $ Left $ show filePath ++ " does not exist" - -parse :: String -> Text -> Either Err [Stanza] -parse srcName txt = case P.parse (stanzas <* P.eof) srcName txt of - Right a -> Right a - Left e -> Left (show e) - -run :: Maybe Bool -> FilePath -> FilePath -> [Stanza] -> Codebase IO Symbol Ann -> Branch.Cache IO -> IO Text -run newRt dir configFile stanzas codebase branchCache = do - let initialPath = Path.absoluteEmpty - putPrettyLn $ P.lines [ - asciiartUnison, "", - "Running the provided transcript file...", - "" - ] - root <- fromMaybe Branch.empty . rightMay <$> Codebase.getRootBranch codebase - do - pathRef <- newIORef initialPath - numberedArgsRef <- newIORef [] - inputQueue <- Q.newIO - cmdQueue <- Q.newIO - unisonFiles <- newIORef Map.empty - out <- newIORef mempty - hidden <- newIORef Shown - allowErrors <- newIORef False - hasErrors <- newIORef False - mStanza <- newIORef Nothing - (config, cancelConfig) <- - catchIOError (watchConfig configFile) $ \_ -> - die "Your .unisonConfig could not be loaded. Check that it's correct!" - runtime <- do - b <- maybe (Config.lookupDefault False config "new-runtime") pure newRt - if b then RTI.startRuntime else pure Rt1.runtime - traverse_ (atomically . Q.enqueue inputQueue) (stanzas `zip` [1..]) - let patternMap = - Map.fromList - $ validInputs - >>= (\p -> (patternName p, p) : ((, p) <$> aliases p)) - let - output' :: Bool -> String -> IO () - output' inputEcho msg = do - hide <- readIORef hidden - unless (hideOutput inputEcho hide) $ modifyIORef' out (\acc -> acc <> pure msg) - - hideOutput :: Bool -> Hidden -> Bool - hideOutput inputEcho = \case - Shown -> False - HideOutput -> True && (not inputEcho) - HideAll -> True - - output = output' False - outputEcho = output' True - - awaitInput = do - cmd <- atomically (Q.tryDequeue cmdQueue) - case cmd of - -- end of ucm block - Just Nothing -> do - output "\n```\n" - dieUnexpectedSuccess - awaitInput - -- ucm command to run - Just (Just p@(UcmCommand path lineTxt)) -> do - curPath <- readIORef pathRef - numberedArgs <- readIORef numberedArgsRef - if curPath /= path then do - atomically $ Q.undequeue cmdQueue (Just p) - pure $ Right (SwitchBranchI (Path.absoluteToPath' path)) - else case (>>= expandNumber numberedArgs) - . words . Text.unpack $ lineTxt of - [] -> awaitInput - cmd:args -> do - output ("\n" <> show p <> "\n") - case Map.lookup cmd patternMap of - -- invalid command is treated as a failure - Nothing -> - dieWithMsg - Just pat -> case IP.parse pat args of - Left msg -> do - output $ P.toPlain 65 (P.indentN 2 msg <> P.newline <> P.newline) - dieWithMsg - Right input -> pure $ Right input - - Nothing -> do - dieUnexpectedSuccess - writeIORef hidden Shown - writeIORef allowErrors False - maybeStanza <- atomically (Q.tryDequeue inputQueue) - _ <- writeIORef mStanza maybeStanza - case maybeStanza of - Nothing -> do - putStrLn "" - pure $ Right QuitI - Just (s,idx) -> do - putStr $ "\r⚙️ Processing stanza " ++ show idx ++ " of " - ++ show (length stanzas) ++ "." - IO.hFlush IO.stdout - case s of - Unfenced _ -> do - output $ show s - awaitInput - UnprocessedFence _ _ -> do - output $ show s - awaitInput - Unison hide errOk filename txt -> do - writeIORef hidden hide - outputEcho $ show s - writeIORef allowErrors errOk - output "```ucm\n" - atomically . Q.enqueue cmdQueue $ Nothing - modifyIORef' unisonFiles (Map.insert (fromMaybe "scratch.u" filename) txt) - pure $ Left (UnisonFileChanged (fromMaybe "scratch.u" filename) txt) - Ucm hide errOk cmds -> do - writeIORef hidden hide - writeIORef allowErrors errOk - writeIORef hasErrors False - output "```ucm" - traverse_ (atomically . Q.enqueue cmdQueue . Just) cmds - atomically . Q.enqueue cmdQueue $ Nothing - awaitInput - - loadPreviousUnisonBlock name = do - ufs <- readIORef unisonFiles - case Map.lookup name ufs of - Just uf -> - return (LoadSuccess uf) - Nothing -> - return InvalidSourceNameError - - cleanup = do Runtime.terminate runtime; cancelConfig - print o = do - msg <- notifyUser dir o - errOk <- readIORef allowErrors - let rendered = P.toPlain 65 (P.border 2 msg) - output rendered - when (Output.isFailure o) $ - if errOk then writeIORef hasErrors True - else dieWithMsg - - printNumbered o = do - let (msg, numberedArgs) = notifyNumbered o - errOk <- readIORef allowErrors - let rendered = P.toPlain 65 (P.border 2 msg) - output rendered - when (Output.isNumberedFailure o) $ - if errOk then writeIORef hasErrors True - else dieWithMsg - pure numberedArgs - - -- Looks at the current stanza and decides if it is contained in the - -- output so far. Appends it if not. - appendFailingStanza :: IO () - appendFailingStanza = do - stanzaOpt <- readIORef mStanza - currentOut <- readIORef out - let stnz = maybe "" show (fmap fst stanzaOpt :: Maybe Stanza) - unless (stnz `isSubsequenceOf` concat currentOut) $ - modifyIORef' out (\acc -> acc <> pure stnz) - - -- output ``` and new lines then call transcriptFailure - dieWithMsg :: forall a. IO a - dieWithMsg = do - executable <- getProgName - output "\n```\n\n" - appendFailingStanza - transcriptFailure out $ Text.unlines [ - "\128721", "", - "The transcript failed due to an error encountered in the stanza above.", "", - "Run `" <> Text.pack executable <> " -codebase " <> Text.pack dir <> "` " <> "to do more work with it."] - - dieUnexpectedSuccess :: IO () - dieUnexpectedSuccess = do - executable <- getProgName - errOk <- readIORef allowErrors - hasErr <- readIORef hasErrors - when (errOk && not hasErr) $ do - output "\n```\n\n" - appendFailingStanza - transcriptFailure out $ Text.unlines [ - "\128721", "", - "The transcript was expecting an error in the stanza above, but did not encounter one.", "", - "Run `" <> Text.pack executable <> " -codebase " <> Text.pack dir <> "` " <> "to do more work with it."] - - loop state = do - writeIORef pathRef (view HandleInput.currentPath state) - let free = runStateT (runMaybeT HandleInput.loop) state - rng i = pure $ Random.drgNewSeed (Random.seedFromInteger (fromIntegral i)) - (o, state') <- HandleCommand.commandLine config awaitInput - (const $ pure ()) - runtime - print - printNumbered - loadPreviousUnisonBlock - codebase - rng - branchCache - free - case o of - Nothing -> do - texts <- readIORef out - pure $ Text.concat (Text.pack <$> toList (texts :: Seq String)) - Just () -> do - writeIORef numberedArgsRef (HandleInput._numberedArgs state') - loop state' - (`finally` cleanup) - $ loop (HandleInput.loopState0 root initialPath) - -transcriptFailure :: IORef (Seq String) -> Text -> IO b -transcriptFailure out msg = do - texts <- readIORef out - die - . Text.unpack - $ Text.concat (Text.pack <$> toList (texts :: Seq String)) - <> "\n\n" - <> msg - -type P = P.Parsec () Text - -stanzas :: P [Stanza] -stanzas = P.many (fenced <|> unfenced) - -ucmCommand :: P UcmCommand -ucmCommand = do - P.lookAhead (word ".") - path <- P.takeWhile1P Nothing (/= '>') - void $ word ">" - line <- P.takeWhileP Nothing (/= '\n') <* spaces - path <- case Path.parsePath' (Text.unpack path) of - Right (Path.unPath' -> Left abs) -> pure abs - Right _ -> fail "expected absolute path" - Left e -> fail e - pure $ UcmCommand path line - -fenced :: P Stanza -fenced = do - fence - fenceType <- lineToken(word "ucm" <|> word "unison" <|> language) - stanza <- - if fenceType == "ucm" then do - hide <- hidden - err <- expectingError - _ <- spaces - cmds <- many ucmCommand - pure $ Ucm hide err cmds - else if fenceType == "unison" then do - -- todo: this has to be more interesting - -- ```unison:hide - -- ```unison - -- ```unison:hide:all scratch.u - hide <- lineToken hidden - err <- lineToken expectingError - fileName <- optional untilSpace1 - blob <- spaces *> untilFence - pure $ Unison hide err fileName blob - else UnprocessedFence fenceType <$> untilFence - fence - pure stanza - --- Three backticks, consumes trailing spaces too --- ``` -fence :: P () -fence = P.try $ do void (word "```"); spaces - --- Parses up until next fence -unfenced :: P Stanza -unfenced = Unfenced <$> untilFence - -untilFence :: P Text -untilFence = do - _ <- P.lookAhead (P.takeP Nothing 1) - go mempty - where - go :: Seq Text -> P Text - go !acc = do - f <- P.lookAhead (P.optional fence) - case f of - Nothing -> do - oneOrTwoBackticks <- optional (word' "``" <|> word' "`") - let start = fromMaybe "" oneOrTwoBackticks - txt <- P.takeWhileP (Just "unfenced") (/= '`') - eof <- P.lookAhead (P.optional P.eof) - case eof of - Just _ -> pure $ fold (acc <> pure txt) - Nothing -> go (acc <> pure start <> pure txt) - Just _ -> pure $ fold acc - -word' :: Text -> P Text -word' txt = P.try $ do - chs <- P.takeP (Just $ show txt) (Text.length txt) - guard (chs == txt) - pure txt - -word :: Text -> P Text -word = word' - --- token :: P a -> P a --- token p = p <* spaces - -lineToken :: P a -> P a -lineToken p = p <* nonNewlineSpaces - -nonNewlineSpaces :: P () -nonNewlineSpaces = void $ P.takeWhileP Nothing (\ch -> ch `elem` (" \t" :: String)) - -hidden :: P Hidden -hidden = (\case Just x -> x; Nothing -> Shown) <$> optional go where - go = ((\_ -> HideAll) <$> (word ":hide:all")) <|> - ((\_ -> HideOutput) <$> (word ":hide")) - -expectingError :: P ExpectingError -expectingError = isJust <$> optional (word ":error") - -untilSpace1 :: P Text -untilSpace1 = P.takeWhile1P Nothing (not . Char.isSpace) - -language :: P Text -language = P.takeWhileP Nothing (\ch -> Char.isDigit ch || Char.isLower ch || ch == '_' ) - -spaces :: P () -spaces = void $ P.takeWhileP (Just "spaces") Char.isSpace - --- single :: Char -> P Char --- single t = P.satisfy (== t) diff --git a/parser-typechecker/src/Unison/Codebase/Watch.hs b/parser-typechecker/src/Unison/Codebase/Watch.hs deleted file mode 100644 index d1ab5992b2..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Watch.hs +++ /dev/null @@ -1,151 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Codebase.Watch where - -import Unison.Prelude - -import qualified UnliftIO as UnliftIO -import UnliftIO.Concurrent ( forkIO - , threadDelay - , killThread - ) -import UnliftIO ( MonadUnliftIO - , withRunInIO - , unliftIO ) -import UnliftIO.Directory ( getModificationTime - , listDirectory - , doesPathExist - ) -import UnliftIO.MVar ( newEmptyMVar, takeMVar - , tryTakeMVar, tryPutMVar, putMVar ) -import UnliftIO.STM ( atomically ) -import UnliftIO.Exception ( catch ) -import UnliftIO.IORef ( newIORef - , readIORef - , writeIORef - ) -import qualified Data.Map as Map -import qualified Data.Text.IO -import Data.Time.Clock ( UTCTime - , diffUTCTime - ) -import System.FSNotify ( Event(Added, Modified)) -import qualified System.FSNotify as FSNotify -import Unison.Util.TQueue ( TQueue ) -import qualified Unison.Util.TQueue as TQueue -import qualified Control.Concurrent.STM as STM - -untilJust :: Monad m => m (Maybe a) -> m a -untilJust act = act >>= maybe (untilJust act) return - -watchDirectory' - :: forall m. MonadUnliftIO m => FilePath -> m (m (), m (FilePath, UTCTime)) -watchDirectory' d = do - mvar <- newEmptyMVar - let handler :: Event -> m () - handler e = case e of - Added fp t False -> doIt fp t - Modified fp t False -> doIt fp t - _ -> pure () - where doIt fp t = do - _ <- tryTakeMVar mvar - putMVar mvar (fp, t) - -- janky: used to store the cancellation action returned - -- by `watchDir`, which is created asynchronously - cleanupRef <- newEmptyMVar - -- we don't like FSNotify's debouncing (it seems to drop later events) - -- so we will be doing our own instead - let config = FSNotify.defaultConfig { FSNotify.confDebounce = FSNotify.NoDebounce } - cancel <- forkIO $ withRunInIO $ \inIO -> - FSNotify.withManagerConf config $ \mgr -> do - cancelInner <- FSNotify.watchDir mgr d (const True) (inIO . handler) <|> (pure (pure ())) - putMVar cleanupRef $ liftIO cancelInner - forever $ threadDelay 1000000 - let cleanup :: m () - cleanup = join (takeMVar cleanupRef) >> killThread cancel - pure (cleanup, takeMVar mvar) - -collectUntilPause :: forall m a. MonadIO m => TQueue a -> Int -> m [a] -collectUntilPause queue minPauseµsec = do --- 1. wait for at least one element in the queue - void . atomically $ TQueue.peek queue - - let go :: MonadIO m => m [a] - go = do - before <- atomically $ TQueue.enqueueCount queue - threadDelay minPauseµsec - after <- atomically $ TQueue.enqueueCount queue - -- if nothing new is on the queue, then return the contents - if before == after - then atomically $ TQueue.flush queue - else go - go - -watchDirectory :: forall m. MonadUnliftIO m - => FilePath -> (FilePath -> Bool) -> m (m (), m (FilePath, Text)) -watchDirectory dir allow = do - previousFiles <- newIORef Map.empty - (cancelWatch, watcher) <- watchDirectory' dir - let - existingFiles :: MonadIO m => m [(FilePath, UTCTime)] - existingFiles = do - files <- listDirectory dir - filtered <- filterM doesPathExist files - let withTime file = (file,) <$> getModificationTime file - sortOn snd <$> mapM withTime filtered - process :: MonadIO m => FilePath -> UTCTime -> m (Maybe (FilePath, Text)) - process file t = - if allow file then let - handle :: IOException -> m () - handle e = do - liftIO $ putStrLn $ "‼ Got an exception while reading: " <> file - liftIO $ print (e :: IOException) - go :: MonadUnliftIO m => m (Maybe (FilePath, Text)) - go = liftIO $ do - contents <- Data.Text.IO.readFile file - prevs <- readIORef previousFiles - case Map.lookup file prevs of - -- if the file's content's haven't changed and less than .5s - -- have elapsed, wait for the next update - Just (contents0, t0) - | contents == contents0 && (t `diffUTCTime` t0) < 0.5 -> - return Nothing - _ -> - Just (file, contents) <$ - writeIORef previousFiles (Map.insert file (contents, t) prevs) - in catch go (\e -> Nothing <$ handle e) - else return Nothing - queue <- TQueue.newIO - gate <- liftIO newEmptyMVar - ctx <- UnliftIO.askUnliftIO - -- We spawn a separate thread to siphon the file change events - -- into a queue, which can be debounced using `collectUntilPause` - enqueuer <- liftIO . forkIO $ do - takeMVar gate -- wait until gate open before starting - forever $ do - event@(file, _) <- UnliftIO.unliftIO ctx watcher - when (allow file) $ - STM.atomically $ TQueue.enqueue queue event - pending <- newIORef =<< existingFiles - let - await :: MonadIO m => m (FilePath, Text) - await = untilJust $ readIORef pending >>= \case - [] -> do - -- open the gate - tryPutMVar gate () - -- this debounces the events, waits for 50ms pause - -- in file change events - events <- collectUntilPause queue 50000 - -- traceM $ "Collected file change events" <> show events - case events of - [] -> pure Nothing - -- we pick the last of the events within the 50ms window - -- TODO: consider enqueing other events if there are - -- multiple events for different files - _ -> uncurry process $ last events - ((file, t):rest) -> do - writeIORef pending rest - process file t - cancel = cancelWatch >> killThread enqueuer - pure (cancel, await) diff --git a/parser-typechecker/src/Unison/Codecs.hs b/parser-typechecker/src/Unison/Codecs.hs deleted file mode 100644 index 1ed01453aa..0000000000 --- a/parser-typechecker/src/Unison/Codecs.hs +++ /dev/null @@ -1,340 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Codecs where - --- A format for encoding runtime values, with sharing for compiled nodes. - -import Unison.Prelude - -import Control.Arrow (second) -import Control.Monad.State -import Data.Bits (Bits) -import qualified Data.Bytes.Serial as BS -import Data.Bytes.Signed (Unsigned) -import Data.Bytes.VarInt (VarInt(..)) -import qualified Data.ByteString as B -import Data.ByteString.Builder (doubleBE, int64BE, toLazyByteString) -import qualified Data.ByteString.Lazy as BL -import Data.Bytes.Put -import qualified Unison.ABT as ABT -import qualified Unison.Blank as Blank -import qualified Unison.DataDeclaration as DD -import qualified Unison.Hash as Hash -import Unison.Reference (Reference, pattern Builtin, pattern Derived) -import qualified Unison.Referent as Referent -import qualified Unison.ConstructorType as ConstructorType -import Unison.Term -import Unison.UnisonFile (UnisonFile, pattern UnisonFile) -import qualified Unison.UnisonFile as UF -import Unison.Var (Var) -import qualified Unison.Var as Var -import Unison.Pattern (Pattern) -import qualified Unison.Pattern as Pattern - -type Pos = Word64 - -serializeTerm :: (MonadPut m, MonadState Pos m, Var v) - => Term v a - -> m Pos -serializeTerm x = do - let putTag = do putWord8 111; putWord8 0 - let incPosition = do pos <- get; modify' (+1); pure pos - case ABT.out x of - ABT.Var v -> do - putTag - putWord8 0 - lengthEncode $ Var.name v - incPosition - ABT.Abs v body -> do - pbody <- serializeTerm body - putTag - putWord8 1 - lengthEncode $ Var.name v - putBackref pbody - incPosition - ABT.Cycle body -> do - pbody <- serializeTerm body - putTag - putWord8 10 - putBackref pbody - incPosition - ABT.Tm f -> case f of - Ann e _ -> do - serializeTerm e -- ignore types (todo: revisit) - Ref ref -> do - putTag - putWord8 2 - serializeReference ref - incPosition - Constructor ref id -> do - putTag - putWord8 3 - serializeReference ref - putWord32be $ fromIntegral id - incPosition - Request ref id -> do - putTag - putWord8 4 - serializeReference ref - putWord32be $ fromIntegral id - incPosition - Text text -> do - putTag - putWord8 5 - lengthEncode text - incPosition - Int n -> do - putTag - putWord8 6 - serializeInt n - incPosition - Nat n -> do - putTag - putWord8 6 - serializeNat n - incPosition - Float n -> do - putTag - putWord8 6 - serializeFloat n - incPosition - Boolean b -> do - putTag - putWord8 6 - serializeBoolean b - incPosition - Sequence v -> do - elementPositions <- traverse serializeTerm v - putTag - putWord8 7 - putLength $ length elementPositions - traverse_ putBackref elementPositions - incPosition - Lam body -> do - pos <- serializeTerm body - putTag - putWord8 8 - putBackref pos - incPosition - App fn arg -> do - posf <- serializeTerm fn - posarg <- serializeTerm arg - putTag - putWord8 9 - putBackref posf - putLength (1 :: Int) - putBackref posarg - incPosition - Let _ binding body -> do - posbind <- serializeTerm binding - posbod <- serializeTerm body - putTag - putWord8 11 - putBackref posbind - putBackref posbod - incPosition - If c t f -> do - posc <- serializeTerm c - post <- serializeTerm t - posf <- serializeTerm f - putTag - putWord8 12 - putBackref posc - putBackref post - putBackref posf - incPosition - And x y -> do - posx <- serializeTerm x - posy <- serializeTerm y - putTag - putWord8 13 - putBackref posx - putBackref posy - incPosition - Or x y -> do - posx <- serializeTerm x - posy <- serializeTerm y - putTag - putWord8 14 - putBackref posx - putBackref posy - incPosition - Match scrutinee cases -> do - poss <- serializeTerm scrutinee - casePositions <- traverse serializeCase1 cases - putTag - putWord8 15 - putBackref poss - putLength $ length casePositions - traverse_ serializeCase2 casePositions - incPosition - Blank b -> error $ "cannot serialize program with blank " ++ - fromMaybe "" (Blank.nameb b) - Handle h body -> do - hpos <- serializeTerm h - bpos <- serializeTerm body - putTag - putWord8 16 - putBackref hpos - putBackref bpos - incPosition - LetRec _ bs body -> do - positions <- traverse serializeTerm bs - pbody <- serializeTerm body - putTag - putWord8 19 - putLength $ length positions - traverse_ putBackref positions - putBackref pbody - incPosition - Char c -> do - putTag - putWord8 20 - putWord64be $ fromIntegral $ fromEnum c - incPosition - TermLink ref -> do - putTag - putWord8 21 - serializeReferent ref - incPosition - TypeLink ref -> do - putTag - putWord8 22 - serializeReference ref - incPosition - -serializePattern :: MonadPut m => Pattern a -> m () -serializePattern p = case p of - -- note: the putWord8 0 is the tag before any unboxed pattern - Pattern.Boolean _ b -> putWord8 0 *> serializeBoolean b - Pattern.Int _ n -> putWord8 0 *> serializeInt n - Pattern.Nat _ n -> putWord8 0 *> serializeNat n - Pattern.Float _ n -> putWord8 0 *> serializeFloat n - Pattern.Var _ -> putWord8 1 - Pattern.Unbound _ -> putWord8 2 - Pattern.Constructor _ r cid ps -> do - putWord8 3 - serializeReference r - putWord32be $ fromIntegral cid - putLength (length ps) - traverse_ serializePattern ps - Pattern.As _ p -> do - putWord8 4 - serializePattern p - Pattern.EffectPure _ p -> do - putWord8 5 - serializePattern p - Pattern.EffectBind _ r cid ps k -> do - putWord8 6 - serializeReference r - putWord32be $ fromIntegral cid - putLength (length ps) - traverse_ serializePattern ps - serializePattern k - _ -> error "todo: delete me after deleting PatternP - serializePattern match failure" - -serializeFloat :: MonadPut m => Double -> m () -serializeFloat n = do - putByteString . BL.toStrict . toLazyByteString $ doubleBE n - putWord8 3 - -serializeNat :: MonadPut m => Word64 -> m () -serializeNat n = do - putWord64be n - putWord8 2 - -serializeInt :: MonadPut m => Int64 -> m () -serializeInt n = do - putByteString . BL.toStrict . toLazyByteString $ int64BE n - putWord8 1 - -serializeBoolean :: MonadPut m => Bool -> m () -serializeBoolean False = putWord64be 0 *> putWord8 0 -serializeBoolean True = putWord64be 1 *> putWord8 0 - -serializeCase2 :: MonadPut m => MatchCase loc Pos -> m () -serializeCase2 (MatchCase p guard body) = do - serializePattern p - serializeMaybe putBackref guard - putBackref body - -serializeCase1 :: (Var v, MonadPut m, MonadState Pos m) - => MatchCase p (Term v a) -> m (MatchCase p Pos) -serializeCase1 (MatchCase p guard body) = do - posg <- traverse serializeTerm guard - posb <- serializeTerm body - pure $ MatchCase p posg posb - -putBackref :: MonadPut m => Pos -> m () -putBackref = BS.serialize . VarInt - -putLength :: (MonadPut m, Integral n, Integral (Unsigned n), - Bits n, Bits (Unsigned n)) - => n -> m () -putLength = BS.serialize . VarInt - -serializeMaybe :: (MonadPut m) => (a -> m ()) -> Maybe a -> m () -serializeMaybe f b = case b of - Nothing -> putWord8 0 - Just x -> putWord8 1 *> f x - -lengthEncode :: MonadPut m => Text -> m () -lengthEncode text = do - let bs = encodeUtf8 text - putLength $ B.length bs - putByteString bs - -serializeFoldable :: (MonadPut m, Foldable f) => (a -> m ()) -> f a -> m () -serializeFoldable f fa = do - putLength $ length fa - traverse_ f fa - -serializeReferent :: MonadPut m => Referent.Referent -> m () -serializeReferent r = case r of - Referent.Ref r -> putWord8 0 *> serializeReference r - Referent.Con r cid ct -> do - putWord8 1 - serializeReference r - putLength cid - serializeConstructorType ct - -serializeConstructorType :: MonadPut m => ConstructorType.ConstructorType -> m () -serializeConstructorType ct = case ct of - ConstructorType.Data -> putWord8 0 - ConstructorType.Effect -> putWord8 1 - -serializeReference :: MonadPut m => Reference -> m () -serializeReference ref = case ref of - Builtin text -> do - putWord8 0 - lengthEncode text - Derived hash i n -> do - putWord8 1 - let bs = Hash.toBytes hash - putLength $ B.length bs - putByteString bs - putLength i - putLength n - _ -> error "impossible" - -serializeConstructorArities :: MonadPut m => Reference -> [Int] -> m () -serializeConstructorArities r constructorArities = do - serializeReference r - serializeFoldable (putWord32be . fromIntegral) constructorArities - -serializeFile - :: (MonadPut m, MonadState Pos m, Monoid a, Var v) - => UnisonFile v a -> Term v a -> m () -serializeFile uf@(UnisonFile dataDecls effectDecls _ _) tm = do - let body = UF.uberTerm' uf tm - let dataDecls' = second DD.constructorArities <$> toList dataDecls - let effectDecls' = - second (DD.constructorArities . DD.toDataDecl) <$> toList effectDecls - -- traceM $ show effectDecls' - serializeFoldable (uncurry serializeConstructorArities) dataDecls' - serializeFoldable (uncurry serializeConstructorArities) effectDecls' - -- NB: we rewrite the term to minimize away let rec cycles, as let rec - -- blocks aren't allowed to have effects - pos <- serializeTerm body - putWord8 0 - putBackref pos diff --git a/parser-typechecker/src/Unison/CommandLine.hs b/parser-typechecker/src/Unison/CommandLine.hs deleted file mode 100644 index 0aca06d15f..0000000000 --- a/parser-typechecker/src/Unison/CommandLine.hs +++ /dev/null @@ -1,221 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE ViewPatterns #-} - - -module Unison.CommandLine where - -import Unison.Prelude - -import Control.Concurrent (forkIO, killThread) -import Control.Concurrent.STM (atomically) -import qualified Control.Monad.Extra as Monad -import qualified Control.Monad.Reader as Reader -import qualified Control.Monad.State as State -import Data.Configurator (autoReload, autoConfig) -import Data.Configurator.Types (Config, Worth (..)) -import Data.List (isSuffixOf, isPrefixOf) -import Data.ListLike (ListLike) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Data.Text as Text -import Prelude hiding (readFile, writeFile) -import qualified System.Console.Haskeline as Line -import System.FilePath ( takeFileName ) -import Unison.Codebase (Codebase) -import qualified Unison.Codebase as Codebase -import qualified Unison.Codebase.Branch as Branch -import Unison.Codebase.Causal ( Causal ) -import qualified Unison.Codebase.Causal as Causal -import Unison.Codebase.Editor.Input (Event(..), Input(..)) -import qualified Unison.Codebase.SearchResult as SR -import qualified Unison.Codebase.Watch as Watch -import Unison.CommandLine.InputPattern (InputPattern (parse)) -import qualified Unison.HashQualified' as HQ -import Unison.Names2 (Names0) -import qualified Unison.Util.ColorText as CT -import qualified Unison.Util.Find as Find -import qualified Unison.Util.Pretty as P -import Unison.Util.TQueue (TQueue) -import qualified Unison.Util.TQueue as Q - -allow :: FilePath -> Bool -allow p = - -- ignore Emacs .# prefixed files, see https://github.com/unisonweb/unison/issues/457 - not (".#" `isPrefixOf` takeFileName p) && - (isSuffixOf ".u" p || isSuffixOf ".uu" p) - -watchConfig :: FilePath -> IO (Config, IO ()) -watchConfig path = do - (config, t) <- autoReload autoConfig [Optional path] - pure (config, killThread t) - -watchFileSystem :: TQueue Event -> FilePath -> IO (IO ()) -watchFileSystem q dir = do - (cancel, watcher) <- Watch.watchDirectory dir allow - t <- forkIO . forever $ do - (filePath, text) <- watcher - atomically . Q.enqueue q $ UnisonFileChanged (Text.pack filePath) text - pure (cancel >> killThread t) - -watchBranchUpdates :: IO (Branch.Branch IO) -> TQueue Event -> Codebase IO v a -> IO (IO ()) -watchBranchUpdates currentRoot q codebase = do - (cancelExternalBranchUpdates, externalBranchUpdates) <- - Codebase.rootBranchUpdates codebase - thread <- forkIO . forever $ do - updatedBranches <- externalBranchUpdates - currentRoot <- currentRoot - -- Since there's some lag between when branch files are written and when - -- the OS generates a file watch event, we skip branch update events - -- that are causally before the current root. - -- - -- NB: Sadly, since the file watching API doesn't have a way to silence - -- the events from a specific individual write, this is ultimately a - -- heuristic. If a fairly recent head gets deposited at just the right - -- time, it would get ignored by this logic. This seems unavoidable. - let maxDepth = 20 -- if it's further back than this, consider it new - let isNew b = not <$> beforeHash maxDepth b (Branch._history currentRoot) - notBefore <- filterM isNew (toList updatedBranches) - when (length notBefore > 0) $ - atomically . Q.enqueue q . IncomingRootBranch $ Set.fromList notBefore - pure (cancelExternalBranchUpdates >> killThread thread) - - --- `True` if `h` is found in the history of `c` within `maxDepth` path length --- from the tip of `c` -beforeHash :: forall m h e . Monad m => Word -> Causal.RawHash h -> Causal m h e -> m Bool -beforeHash maxDepth h c = - Reader.runReaderT (State.evalStateT (go c) Set.empty) (0 :: Word) - where - go c | h == Causal.currentHash c = pure True - go c = do - currentDepth :: Word <- Reader.ask - if currentDepth >= maxDepth - then pure False - else do - seen <- State.get - cs <- lift . lift $ toList <$> sequence (Causal.children c) - let unseens = filter (\c -> c `Set.notMember` seen) cs - State.modify' (<> Set.fromList cs) - Monad.anyM (Reader.local (1+) . go) unseens - -warnNote :: String -> String -warnNote s = "⚠️ " <> s - -backtick :: IsString s => P.Pretty s -> P.Pretty s -backtick s = P.group ("`" <> s <> "`") - -backtickEOS :: IsString s => P.Pretty s -> P.Pretty s -backtickEOS s = P.group ("`" <> s <> "`.") - -tip :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s -tip s = P.column2 [("Tip:", P.wrap s)] - -note :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s -note s = P.column2 [("Note:", P.wrap s)] - -aside :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s -> P.Pretty s -aside a b = P.column2 [(a <> ":", b)] - -warn :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s -warn = emojiNote "⚠️" - -problem :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s -problem = emojiNote "❗️" - -bigproblem :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s -bigproblem = emojiNote "‼️" - -emojiNote :: (ListLike s Char, IsString s) => String -> P.Pretty s -> P.Pretty s -emojiNote lead s = P.group (fromString lead) <> "\n" <> P.wrap s - -nothingTodo :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s -nothingTodo = emojiNote "😶" - -completion :: String -> Line.Completion -completion s = Line.Completion s s True - -completion' :: String -> Line.Completion -completion' s = Line.Completion s s False - -prettyCompletion :: (String, P.Pretty P.ColorText) -> Line.Completion --- -- discards formatting in favor of better alignment --- prettyCompletion (s, p) = Line.Completion s (P.toPlainUnbroken p) True --- preserves formatting, but Haskeline doesn't know how to align -prettyCompletion (s, p) = Line.Completion s (P.toAnsiUnbroken p) True - --- avoids adding a space after successful completion -prettyCompletion' :: (String, P.Pretty P.ColorText) -> Line.Completion -prettyCompletion' (s, p) = Line.Completion s (P.toAnsiUnbroken p) False - -prettyCompletion'' :: Bool -> (String, P.Pretty P.ColorText) -> Line.Completion -prettyCompletion'' spaceAtEnd (s, p) = Line.Completion s (P.toAnsiUnbroken p) spaceAtEnd - -fuzzyCompleteHashQualified :: Names0 -> String -> [Line.Completion] -fuzzyCompleteHashQualified b q0@(HQ.fromString -> query) = case query of - Nothing -> [] - Just query -> - fixupCompletion q0 $ - makeCompletion <$> Find.fuzzyFindInBranch b query - where - makeCompletion (sr, p) = - prettyCompletion' (HQ.toString . SR.name $ sr, p) - -fuzzyComplete :: String -> [String] -> [Line.Completion] -fuzzyComplete q ss = - fixupCompletion q (prettyCompletion' <$> Find.simpleFuzzyFinder q ss id) - -exactComplete :: String -> [String] -> [Line.Completion] -exactComplete q ss = go <$> filter (isPrefixOf q) ss where - go s = prettyCompletion'' (s == q) - (s, P.hiBlack (P.string q) <> P.string (drop (length q) s)) - -prefixIncomplete :: String -> [String] -> [Line.Completion] -prefixIncomplete q ss = go <$> filter (isPrefixOf q) ss where - go s = prettyCompletion'' False - (s, P.hiBlack (P.string q) <> P.string (drop (length q) s)) - --- workaround for https://github.com/judah/haskeline/issues/100 --- if the common prefix of all the completions is smaller than --- the query, we make all the replacements equal to the query, --- which will preserve what the user has typed -fixupCompletion :: String -> [Line.Completion] -> [Line.Completion] -fixupCompletion _q [] = [] -fixupCompletion _q [c] = [c] -fixupCompletion q cs@(h:t) = let - commonPrefix (h1:t1) (h2:t2) | h1 == h2 = h1 : commonPrefix t1 t2 - commonPrefix _ _ = "" - overallCommonPrefix = - foldl commonPrefix (Line.replacement h) (Line.replacement <$> t) - in if not (q `isPrefixOf` overallCommonPrefix) - then [ c { Line.replacement = q } | c <- cs ] - else cs - -parseInput - :: Map String InputPattern -> [String] -> Either (P.Pretty CT.ColorText) Input -parseInput patterns ss = case ss of - [] -> Left "" - command : args -> case Map.lookup command patterns of - Just pat -> parse pat args - Nothing -> - Left - . warn - . P.wrap - $ "I don't know how to " - <> P.group (fromString command <> ".") - <> "Type `help` or `?` to get help." - -prompt :: String -prompt = "> " - --- `plural [] "cat" "cats" = "cats"` --- `plural ["meow"] "cat" "cats" = "cat"` --- `plural ["meow", "meow"] "cat" "cats" = "cats"` -plural :: Foldable f => f a -> b -> b -> b -plural items one other = case toList items of - [_] -> one - _ -> other - -plural' :: Integral a => a -> b -> b -> b -plural' 1 one _other = one -plural' _ _one other = other diff --git a/parser-typechecker/src/Unison/CommandLine/DisplayValues.hs b/parser-typechecker/src/Unison/CommandLine/DisplayValues.hs deleted file mode 100644 index b3cb70364d..0000000000 --- a/parser-typechecker/src/Unison/CommandLine/DisplayValues.hs +++ /dev/null @@ -1,96 +0,0 @@ -{-# Language PatternSynonyms #-} -{-# Language OverloadedStrings #-} - -module Unison.CommandLine.DisplayValues where - -import Data.Foldable ( fold ) - -import Unison.Reference (Reference) -import Unison.Referent (Referent) -import Unison.Term (Term) -import Unison.Type (Type) -import Unison.Var (Var) -import qualified Unison.Builtin.Decls as DD -import qualified Unison.DataDeclaration as DD -import qualified Unison.DeclPrinter as DP -import qualified Unison.NamePrinter as NP -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.Referent as Referent -import qualified Unison.Reference as Reference -import qualified Unison.Term as Term -import qualified Unison.TermPrinter as TP -import qualified Unison.TypePrinter as TypePrinter -import qualified Unison.Util.Pretty as P -import qualified Unison.Util.SyntaxText as S - -type Pretty = P.Pretty P.ColorText - -displayTerm :: (Var v, Monad m) - => PPE.PrettyPrintEnvDecl - -> (Reference -> m (Maybe (Term v a))) - -> (Referent -> m (Maybe (Type v a))) - -> (Reference -> m (Maybe (Term v a))) - -> (Reference -> m (Maybe (DD.Decl v a))) - -> Term v a - -> m Pretty -displayTerm pped terms typeOf eval types tm = case tm of - -- todo: can dispatch on other things with special rendering - Term.Ref' r -> eval r >>= \case - Nothing -> pure $ termName (PPE.suffixifiedPPE pped) (Referent.Ref r) - Just tm -> displayDoc pped terms typeOf eval types tm - _ -> displayDoc pped terms typeOf eval types tm - -displayDoc :: forall v m a. (Var v, Monad m) - => PPE.PrettyPrintEnvDecl - -> (Reference -> m (Maybe (Term v a))) - -> (Referent -> m (Maybe (Type v a))) - -> (Reference -> m (Maybe (Term v a))) - -> (Reference -> m (Maybe (DD.Decl v a))) - -> Term v a - -> m Pretty -displayDoc pped terms typeOf evaluated types = go - where - go (DD.DocJoin docs) = fold <$> traverse go docs - go (DD.DocBlob txt) = pure $ P.paragraphyText txt - go (DD.DocLink (DD.LinkTerm (Term.TermLink' r))) = - pure $ P.underline (termName (PPE.suffixifiedPPE pped) r) - go (DD.DocLink (DD.LinkType (Term.TypeLink' r))) = - pure $ P.underline (typeName (PPE.suffixifiedPPE pped) r) - go (DD.DocSource (DD.LinkTerm (Term.TermLink' r))) = prettyTerm terms r - go (DD.DocSource (DD.LinkType (Term.TypeLink' r))) = prettyType r - go (DD.DocSignature (Term.TermLink' r)) = prettySignature r - go (DD.DocEvaluate (Term.TermLink' r)) = prettyEval evaluated r - go tm = pure $ TP.pretty (PPE.suffixifiedPPE pped) tm - prettySignature r = typeOf r >>= \case - Nothing -> pure $ termName (PPE.unsuffixifiedPPE pped) r - Just typ -> pure . P.group $ - TypePrinter.prettySignatures - (PPE.suffixifiedPPE pped) - [(PPE.termName (PPE.unsuffixifiedPPE pped) r, typ)] - prettyEval terms r = case r of - Referent.Ref (Reference.Builtin n) -> pure . P.syntaxToColor $ P.text n - Referent.Ref ref -> - let ppe = PPE.declarationPPE pped ref - in terms ref >>= \case - Nothing -> pure $ "😶 Missing term source for: " <> termName ppe r - Just tm -> pure $ TP.pretty ppe tm - Referent.Con r _ _ -> pure $ typeName (PPE.declarationPPE pped r) r - prettyTerm terms r = case r of - Referent.Ref (Reference.Builtin _) -> prettySignature r - Referent.Ref ref -> let ppe = PPE.declarationPPE pped ref in terms ref >>= \case - Nothing -> pure $ "😶 Missing term source for: " <> termName ppe r - Just tm -> pure . P.syntaxToColor $ P.group $ TP.prettyBinding ppe (PPE.termName ppe r) tm - Referent.Con r _ _ -> prettyType r - prettyType r = let ppe = PPE.declarationPPE pped r in types r >>= \case - Nothing -> pure $ "😶 Missing type source for: " <> typeName ppe r - Just ty -> pure . P.syntaxToColor $ P.group $ DP.prettyDecl ppe r (PPE.typeName ppe r) ty - -termName :: PPE.PrettyPrintEnv -> Referent -> Pretty -termName ppe r = P.syntaxToColor $ - NP.styleHashQualified'' (NP.fmt $ S.Referent r) name - where name = PPE.termName ppe r - -typeName :: PPE.PrettyPrintEnv -> Reference -> Pretty -typeName ppe r = P.syntaxToColor $ - NP.styleHashQualified'' (NP.fmt $ S.Reference r) name - where name = PPE.typeName ppe r diff --git a/parser-typechecker/src/Unison/CommandLine/InputPattern.hs b/parser-typechecker/src/Unison/CommandLine/InputPattern.hs deleted file mode 100644 index c1a55d6499..0000000000 --- a/parser-typechecker/src/Unison/CommandLine/InputPattern.hs +++ /dev/null @@ -1,92 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE ViewPatterns #-} - - -module Unison.CommandLine.InputPattern where - -import qualified System.Console.Haskeline as Line -import Unison.Codebase (Codebase) -import Unison.Codebase.Branch (Branch) -import Unison.Codebase.Editor.Input (Input (..)) -import qualified Unison.Util.ColorText as CT -import qualified Unison.Util.Pretty as P -import Unison.Codebase.Path as Path - --- InputPatterns accept some fixed number of Required arguments of various --- types, followed by a variable number of a single type of argument. -data IsOptional - = Required -- 1, at the start - | Optional -- 0 or 1, at the end - | ZeroPlus -- 0 or more, at the end - | OnePlus -- 1 or more, at the end - deriving Show - -data InputPattern = InputPattern - { patternName :: String - , aliases :: [String] - , args :: [(IsOptional, ArgumentType)] - , help :: P.Pretty CT.ColorText - , parse :: [String] -> Either (P.Pretty CT.ColorText) Input - } - -data ArgumentType = ArgumentType - { typeName :: String - , suggestions :: forall m v a . Monad m - => String - -> Codebase m v a - -> Branch m - -> Path.Absolute - -> m [Line.Completion] - } -instance Show ArgumentType where - show at = "ArgumentType " <> typeName at - --- `argType` gets called when the user tries to autocomplete an `i`th argument (zero-indexed). --- todo: would be nice if we could alert the user if they try to autocomplete --- past the end. It would also be nice if -argType :: InputPattern -> Int -> Maybe ArgumentType -argType ip i = go (i, args ip) where - -- Strategy: all of these input patterns take some number of arguments. - -- If it takes no arguments, then don't autocomplete. - go (_, []) = Nothing - -- If requesting the 0th of >=1 arguments, return it. - go (0, (_, t) : _) = Just t - -- Vararg parameters should appear at the end of the arg list, and work for - -- any later argument number. - go (_, [(ZeroPlus, t)]) = Just t - go (_, [(OnePlus, t)]) = Just t - -- Optional parameters only work at position 0, under this countdown scheme. - go (_, [(Optional, _)]) = Nothing - -- If requesting a later parameter, decrement and drop one. - go (n, (Required, _) : args) = go (n - 1, args) - -- The argument list spec is invalid if something follows optional or vararg - go _ = error $ "Input pattern " <> show (patternName ip) - <> " has an invalid argument list: " <> (show . fmap fst) (args ip) - -minArgs :: InputPattern -> Int -minArgs ip@(fmap fst . args -> args) = go args where - go [] = 0 - go (Required : args) = 1 + go args - go [_] = 0 - go _ = error $ "Invalid args for InputPattern (" - <> show (patternName ip) <> "): " <> show args - -maxArgs :: InputPattern -> Maybe Int -maxArgs ip@(fmap fst . args -> args) = go args where - go [] = Just 0 - go (Required : args) = (1 +) <$> go args - go [Optional] = Just 0 - go [_] = Nothing - go _ = error $ "Invalid args for InputPattern (" - <> show (patternName ip) <> "): " <> show args - -noSuggestions - :: Monad m - => String - -> Codebase m v a - -> Branch m - -> Path.Absolute - -> m [Line.Completion] -noSuggestions _ _ _ _ = pure [] - diff --git a/parser-typechecker/src/Unison/CommandLine/InputPatterns.hs b/parser-typechecker/src/Unison/CommandLine/InputPatterns.hs deleted file mode 100644 index f6e5a533b0..0000000000 --- a/parser-typechecker/src/Unison/CommandLine/InputPatterns.hs +++ /dev/null @@ -1,1549 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.CommandLine.InputPatterns where - -import Unison.Prelude - -import qualified Control.Lens.Cons as Cons -import qualified Control.Lens as Lens -import Data.Bifunctor (first) -import Data.List (intercalate, isPrefixOf) -import Data.List.Extra (nubOrdOn) -import qualified System.Console.Haskeline.Completion as Completion -import System.Console.Haskeline.Completion (Completion(Completion)) -import Unison.Codebase (Codebase) -import Unison.Codebase.Editor.Input (Input) -import qualified Unison.Codebase.SyncMode as SyncMode -import Unison.CommandLine.InputPattern - ( ArgumentType(..) - , InputPattern(InputPattern) - , IsOptional(..) - ) -import Unison.CommandLine -import Unison.Util.Monoid (intercalateMap) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Data.Text as Text -import qualified Text.Megaparsec as P -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Editor.Input as Input -import qualified Unison.Codebase.Path as Path -import qualified Unison.CommandLine.InputPattern as I -import qualified Unison.HashQualified as HQ -import qualified Unison.HashQualified' as HQ' -import qualified Unison.Name as Name -import qualified Unison.Names2 as Names -import qualified Unison.Util.ColorText as CT -import qualified Unison.Util.Pretty as P -import qualified Unison.Util.Relation as R -import qualified Unison.Codebase.Editor.SlurpResult as SR -import qualified Unison.Codebase.Editor.UriParser as UriParser -import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace) -import qualified Unison.Codebase.Editor.RemoteRepo as RemoteRepo -import Data.Tuple.Extra (uncurry3) - -showPatternHelp :: InputPattern -> P.Pretty CT.ColorText -showPatternHelp i = P.lines [ - P.bold (fromString $ I.patternName i) <> fromString - (if not . null $ I.aliases i - then " (or " <> intercalate ", " (I.aliases i) <> ")" - else ""), - P.wrap $ I.help i ] - -patternName :: InputPattern -> P.Pretty P.ColorText -patternName = fromString . I.patternName - --- `example list ["foo", "bar"]` (haskell) becomes `list foo bar` (pretty) -makeExample, makeExampleNoBackticks :: InputPattern -> [P.Pretty CT.ColorText] -> P.Pretty CT.ColorText -makeExample p args = P.group . backtick $ makeExampleNoBackticks p args - -makeExampleNoBackticks p args = - P.group $ intercalateMap " " id (P.nonEmpty $ fromString (I.patternName p) : args) - -makeExample' :: InputPattern -> P.Pretty CT.ColorText -makeExample' p = makeExample p [] - -makeExampleEOS :: - InputPattern -> [P.Pretty CT.ColorText] -> P.Pretty CT.ColorText -makeExampleEOS p args = P.group $ - backtick (intercalateMap " " id (P.nonEmpty $ fromString (I.patternName p) : args)) <> "." - -helpFor :: InputPattern -> Either (P.Pretty CT.ColorText) Input -helpFor p = I.parse help [I.patternName p] - -mergeBuiltins :: InputPattern -mergeBuiltins = InputPattern "builtins.merge" [] [] - "Adds the builtins to `builtins.` in the current namespace (excluding `io` and misc)." - (const . pure $ Input.MergeBuiltinsI) - -mergeIOBuiltins :: InputPattern -mergeIOBuiltins = InputPattern "builtins.mergeio" [] [] - "Adds all the builtins to `builtins.` in the current namespace, including `io` and misc." - (const . pure $ Input.MergeIOBuiltinsI) - -updateBuiltins :: InputPattern -updateBuiltins = InputPattern - "builtins.update" - [] - [] - ( "Adds all the builtins that are missing from this namespace, " - <> "and deprecate the ones that don't exist in this version of Unison." - ) - (const . pure $ Input.UpdateBuiltinsI) - -todo :: InputPattern -todo = InputPattern - "todo" - [] - [(Optional, patchArg), (Optional, pathArg)] - (P.wrapColumn2 - [ ( makeExample' todo - , "lists the refactor work remaining in the default patch for the current" - <> " namespace." - ) - , ( makeExample todo [""] - , "lists the refactor work remaining in the given patch in the current " - <> "namespace." - ) - , ( makeExample todo ["", "[path]"] - , "lists the refactor work remaining in the given patch in given namespace." - ) - ] - ) - (\case - patchStr : ws -> mapLeft (warn . fromString) $ do - patch <- Path.parseSplit' Path.definitionNameSegment patchStr - branch <- case ws of - [] -> pure Path.relativeEmpty' - [pathStr] -> Path.parsePath' pathStr - _ -> Left "`todo` just takes a patch and one optional namespace" - Right $ Input.TodoI (Just patch) branch - [] -> Right $ Input.TodoI Nothing Path.relativeEmpty' - ) - -load :: InputPattern -load = InputPattern - "load" - [] - [(Optional, noCompletions)] - (P.wrapColumn2 - [ ( makeExample' load - , "parses, typechecks, and evaluates the most recent scratch file." - ) - , (makeExample load [""] - , "parses, typechecks, and evaluates the given scratch file." - ) - ] - ) - (\case - [] -> pure $ Input.LoadI Nothing - [file] -> pure $ Input.LoadI . Just $ file - _ -> Left (I.help load)) - - -add :: InputPattern -add = - InputPattern - "add" - [] - [(ZeroPlus, noCompletions)] - ("`add` adds to the codebase all the definitions from the most recently " - <> "typechecked file." - ) - $ \ws -> case traverse HQ'.fromString ws of - Just ws -> pure $ Input.AddI ws - Nothing -> - Left - . warn - . P.lines - . fmap fromString - . ("I don't know what these refer to:\n" :) - $ collectNothings HQ'.fromString ws - -previewAdd :: InputPattern -previewAdd = - InputPattern - "add.preview" - [] - [(ZeroPlus, noCompletions)] - ("`add.preview` previews additions to the codebase from the most recently " - <> "typechecked file. This command only displays cached typechecking " - <> "results. Use `load` to reparse & typecheck the file if the context " - <> "has changed." - ) - $ \ws -> case traverse HQ'.fromString ws of - Just ws -> pure $ Input.PreviewAddI ws - Nothing -> - Left - . warn - . P.lines - . fmap fromString - . ("I don't know what these refer to:\n" :) - $ collectNothings HQ'.fromString ws - -update :: InputPattern -update = InputPattern "update" - [] - [(Optional, patchArg) - ,(ZeroPlus, noCompletions)] - (P.wrap (makeExample' update <> "works like" - <> P.group (makeExample' add <> ",") - <> "except that if a definition in the file has the same name as an" - <> "existing definition, the name gets updated to point to the new" - <> "definition. If the old definition has any dependents, `update` will" - <> "add those dependents to a refactoring session, specified by an" - <> "optional patch.") - <> P.wrapColumn2 - [ (makeExample' update - , "adds all definitions in the .u file, noting replacements in the" - <> "default patch for the current namespace.") - , (makeExample update [""] - , "adds all definitions in the .u file, noting replacements in the" - <> "specified patch.") - , (makeExample update ["", "foo", "bar"] - , "adds `foo`, `bar`, and their dependents from the .u file, noting" - <> "any replacements into the specified patch.") - ] - ) - (\case - patchStr : ws -> do - patch <- first fromString $ Path.parseSplit' Path.definitionNameSegment patchStr - case traverse HQ'.fromString ws of - Just ws -> Right $ Input.UpdateI (Just patch) ws - Nothing -> - Left . warn . P.lines . fmap fromString . - ("I don't know what these refer to:\n" :) $ - collectNothings HQ'.fromString ws - [] -> Right $ Input.UpdateI Nothing [] ) - -previewUpdate :: InputPattern -previewUpdate = - InputPattern - "update.preview" - [] - [(ZeroPlus, noCompletions)] - ("`update.preview` previews updates to the codebase from the most " - <> "recently typechecked file. This command only displays cached " - <> "typechecking results. Use `load` to reparse & typecheck the file if " - <> "the context has changed." - ) - $ \ws -> case traverse HQ'.fromString ws of - Just ws -> pure $ Input.PreviewUpdateI ws - Nothing -> - Left - . warn - . P.lines - . fmap fromString - . ("I don't know what these refer to:\n" :) - $ collectNothings HQ'.fromString ws - -patch :: InputPattern -patch = InputPattern - "patch" - [] - [(Required, patchArg), (Optional, pathArg)] - ( P.wrap - $ makeExample' patch - <> "rewrites any definitions that depend on " - <> "definitions with type-preserving edits to use the updated versions of" - <> "these dependencies." - ) - (\case - patchStr : ws -> first fromString $ do - patch <- Path.parseSplit' Path.definitionNameSegment patchStr - branch <- case ws of - [pathStr] -> Path.parsePath' pathStr - _ -> pure Path.relativeEmpty' - pure $ Input.PropagatePatchI patch branch - [] -> - Left - $ warn - $ makeExample' patch - <> "takes a patch and an optional namespace." - ) - -view :: InputPattern -view = InputPattern - "view" - [] - [(OnePlus, definitionQueryArg)] - "`view foo` prints the definition of `foo`." - ( fmap (Input.ShowDefinitionI Input.ConsoleLocation) - . traverse parseHashQualifiedName - ) - -display :: InputPattern -display = InputPattern - "display" - [] - [(Required, definitionQueryArg)] - "`display foo` prints a rendered version of the term `foo`." - (\case - [s] -> Input.DisplayI Input.ConsoleLocation <$> parseHashQualifiedName s - _ -> Left (I.help display) - ) - - -displayTo :: InputPattern -displayTo = InputPattern - "display.to" - [] - [(Required, noCompletions), (Required, definitionQueryArg)] - ( P.wrap - $ makeExample displayTo ["", "foo"] - <> "prints a rendered version of the term `foo` to the given file." - ) - (\case - [file, s] -> - Input.DisplayI (Input.FileLocation file) <$> parseHashQualifiedName s - _ -> Left (I.help displayTo) - ) - -docs :: InputPattern -docs = InputPattern "docs" [] [(Required, definitionQueryArg)] - "`docs foo` shows documentation for the definition `foo`." - (\case - [s] -> first fromString $ Input.DocsI <$> Path.parseHQSplit' s - _ -> Left (I.help docs)) - -undo :: InputPattern -undo = InputPattern "undo" [] [] - "`undo` reverts the most recent change to the codebase." - (const $ pure Input.UndoI) - -viewByPrefix :: InputPattern -viewByPrefix = InputPattern - "view.recursive" - [] - [(OnePlus, definitionQueryArg)] - "`view.recursive Foo` prints the definitions of `Foo` and `Foo.blah`." - ( fmap (Input.ShowDefinitionByPrefixI Input.ConsoleLocation) - . traverse parseHashQualifiedName - ) - -find :: InputPattern -find = InputPattern - "find" - [] - [(ZeroPlus, fuzzyDefinitionQueryArg)] - (P.wrapColumn2 - [ ("`find`", "lists all definitions in the current namespace.") - , ( "`find foo`" - , "lists all definitions with a name similar to 'foo' in the current " - <> "namespace." - ) - , ( "`find foo bar`" - , "lists all definitions with a name similar to 'foo' or 'bar' in the " - <> "current namespace." - ) - ] - ) - (pure . Input.SearchByNameI False False) - -findShallow :: InputPattern -findShallow = InputPattern - "list" - ["ls"] - [(Optional, pathArg)] - (P.wrapColumn2 - [ ("`list`", "lists definitions and namespaces at the current level of the current namespace.") - , ( "`list foo`", "lists the 'foo' namespace." ) - , ( "`list .foo`", "lists the '.foo' namespace." ) - ] - ) - (\case - [] -> pure $ Input.FindShallowI Path.relativeEmpty' - [path] -> first fromString $ do - p <- Path.parsePath' path - pure $ Input.FindShallowI p - _ -> Left (I.help findShallow) - ) - -findVerbose :: InputPattern -findVerbose = InputPattern - "find.verbose" - ["list.verbose", "ls.verbose"] - [(ZeroPlus, fuzzyDefinitionQueryArg)] - ( "`find.verbose` searches for definitions like `find`, but includes hashes " - <> "and aliases in the results." - ) - (pure . Input.SearchByNameI True False) - -findPatch :: InputPattern -findPatch = InputPattern - "find.patch" - ["list.patch", "ls.patch"] - [] - (P.wrapColumn2 - [("`find.patch`", "lists all patches in the current namespace.")] - ) - (pure . const Input.FindPatchI) - -renameTerm :: InputPattern -renameTerm = InputPattern "move.term" ["rename.term"] - [(Required, exactDefinitionTermQueryArg) - ,(Required, newNameArg)] - "`move.term foo bar` renames `foo` to `bar`." - (\case - [oldName, newName] -> first fromString $ do - src <- Path.parseHQSplit' oldName - target <- Path.parseSplit' Path.definitionNameSegment newName - pure $ Input.MoveTermI src target - _ -> Left . P.warnCallout $ P.wrap - "`rename.term` takes two arguments, like `rename.term oldname newname`.") - -renameType :: InputPattern -renameType = InputPattern "move.type" ["rename.type"] - [(Required, exactDefinitionTypeQueryArg) - ,(Required, newNameArg)] - "`move.type foo bar` renames `foo` to `bar`." - (\case - [oldName, newName] -> first fromString $ do - src <- Path.parseHQSplit' oldName - target <- Path.parseSplit' Path.definitionNameSegment newName - pure $ Input.MoveTypeI src target - _ -> Left . P.warnCallout $ P.wrap - "`rename.type` takes two arguments, like `rename.type oldname newname`.") - -delete :: InputPattern -delete = InputPattern "delete" [] - [(OnePlus, definitionQueryArg)] - "`delete foo` removes the term or type name `foo` from the namespace." - (\case - [query] -> first fromString $ do - p <- Path.parseHQSplit' query - pure $ Input.DeleteI p - _ -> Left . P.warnCallout $ P.wrap - "`delete` takes an argument, like `delete name`." - ) - -deleteTerm :: InputPattern -deleteTerm = InputPattern "delete.term" [] - [(OnePlus, exactDefinitionTermQueryArg)] - "`delete.term foo` removes the term name `foo` from the namespace." - (\case - [query] -> first fromString $ do - p <- Path.parseHQSplit' query - pure $ Input.DeleteTermI p - _ -> Left . P.warnCallout $ P.wrap - "`delete.term` takes an argument, like `delete.term name`." - ) - -deleteType :: InputPattern -deleteType = InputPattern "delete.type" [] - [(OnePlus, exactDefinitionTypeQueryArg)] - "`delete.type foo` removes the type name `foo` from the namespace." - (\case - [query] -> first fromString $ do - p <- Path.parseHQSplit' query - pure $ Input.DeleteTypeI p - _ -> Left . P.warnCallout $ P.wrap - "`delete.type` takes an argument, like `delete.type name`." - ) - -deleteTermReplacementCommand :: String -deleteTermReplacementCommand = "delete.term-replacement" - -deleteTypeReplacementCommand :: String -deleteTypeReplacementCommand = "delete.type-replacement" - -deleteReplacement :: Bool -> InputPattern -deleteReplacement isTerm = InputPattern - commandName - [] - [(Required, if isTerm then exactDefinitionTermQueryArg else exactDefinitionTypeQueryArg), (Optional, patchArg)] - ( P.string - $ commandName - <> " ` removes any edit of the " - <> str - <> " `foo` from the patch `patch`, " - <> "or from the default patch if none is specified. Note that `foo` refers to the " - <> "original name for the " - <> str - <> " - not the one in place after the edit." - ) - (\case - query : patch -> do - patch <- - first fromString - . traverse (Path.parseSplit' Path.definitionNameSegment) - $ listToMaybe patch - q <- parseHashQualifiedName query - pure $ input q patch - _ -> - Left - . P.warnCallout - . P.wrapString - $ commandName - <> " needs arguments. See `help " - <> commandName - <> "`." - ) - where - input = if isTerm - then Input.RemoveTermReplacementI - else Input.RemoveTypeReplacementI - str = if isTerm then "term" else "type" - commandName = if isTerm - then deleteTermReplacementCommand - else deleteTypeReplacementCommand - -deleteTermReplacement :: InputPattern -deleteTermReplacement = deleteReplacement True - -deleteTypeReplacement :: InputPattern -deleteTypeReplacement = deleteReplacement False - -parseHashQualifiedName - :: String -> Either (P.Pretty CT.ColorText) HQ.HashQualified -parseHashQualifiedName s = - maybe - ( Left - . P.warnCallout - . P.wrap - $ P.string s - <> " is not a well-formed name, hash, or hash-qualified name. " - <> "I expected something like `foo`, `#abc123`, or `foo#abc123`." - ) - Right - $ HQ.fromString s - -aliasTerm :: InputPattern -aliasTerm = InputPattern "alias.term" [] - [(Required, exactDefinitionTermQueryArg), (Required, newNameArg)] - "`alias.term foo bar` introduces `bar` with the same definition as `foo`." - (\case - [oldName, newName] -> first fromString $ do - source <- Path.parseShortHashOrHQSplit' oldName - target <- Path.parseSplit' Path.definitionNameSegment newName - pure $ Input.AliasTermI source target - _ -> Left . warn $ P.wrap - "`alias.term` takes two arguments, like `alias.term oldname newname`." - ) - -aliasType :: InputPattern -aliasType = InputPattern "alias.type" [] - [(Required, exactDefinitionTypeQueryArg), (Required, newNameArg)] - "`alias.type Foo Bar` introduces `Bar` with the same definition as `Foo`." - (\case - [oldName, newName] -> first fromString $ do - source <- Path.parseShortHashOrHQSplit' oldName - target <- Path.parseSplit' Path.definitionNameSegment newName - pure $ Input.AliasTypeI source target - _ -> Left . warn $ P.wrap - "`alias.type` takes two arguments, like `alias.type oldname newname`." - ) - -aliasMany :: InputPattern -aliasMany = InputPattern "alias.many" ["copy"] - [(Required, definitionQueryArg), (OnePlus, exactDefinitionOrPathArg)] - (P.group . P.lines $ - [ P.wrap $ P.group (makeExample aliasMany ["", "[relative2...]", ""]) - <> "creates aliases `relative1`, `relative2`, ... in the namespace `namespace`." - , P.wrap $ P.group (makeExample aliasMany ["foo.foo", "bar.bar", ".quux"]) - <> "creates aliases `.quux.foo.foo` and `.quux.bar.bar`." - ]) - (\case - srcs@(_:_) Cons.:> dest -> first fromString $ do - sourceDefinitions <- traverse Path.parseHQSplit srcs - destNamespace <- Path.parsePath' dest - pure $ Input.AliasManyI sourceDefinitions destNamespace - _ -> Left (I.help aliasMany) - ) - - -cd :: InputPattern -cd = InputPattern "namespace" ["cd", "j"] [(Required, pathArg)] - (P.wrapColumn2 - [ (makeExample cd ["foo.bar"], - "descends into foo.bar from the current namespace.") - , (makeExample cd [".cat.dog"], - "sets the current namespace to the abolute namespace .cat.dog.") ]) - (\case - [p] -> first fromString $ do - p <- Path.parsePath' p - pure . Input.SwitchBranchI $ p - _ -> Left (I.help cd) - ) - -back :: InputPattern -back = InputPattern "back" ["popd"] [] - (P.wrapColumn2 - [ (makeExample back [], - "undoes the last" <> makeExample' cd <> "command.") - ]) - (\case - [] -> pure Input.PopBranchI - _ -> Left (I.help cd) - ) - -deleteBranch :: InputPattern -deleteBranch = InputPattern "delete.namespace" [] [(Required, pathArg)] - "`delete.namespace ` deletes the namespace `foo`" - (\case - ["."] -> first fromString . - pure $ Input.DeleteBranchI Nothing - [p] -> first fromString $ do - p <- Path.parseSplit' Path.definitionNameSegment p - pure . Input.DeleteBranchI $ Just p - _ -> Left (I.help deleteBranch) - ) - -deletePatch :: InputPattern -deletePatch = InputPattern "delete.patch" [] [(Required, patchArg)] - "`delete.patch ` deletes the patch `foo`" - (\case - [p] -> first fromString $ do - p <- Path.parseSplit' Path.definitionNameSegment p - pure . Input.DeletePatchI $ p - _ -> Left (I.help deletePatch) - ) - -movePatch :: String -> String -> Either (P.Pretty CT.ColorText) Input -movePatch src dest = first fromString $ do - src <- Path.parseSplit' Path.definitionNameSegment src - dest <- Path.parseSplit' Path.definitionNameSegment dest - pure $ Input.MovePatchI src dest - -copyPatch' :: String -> String -> Either (P.Pretty CT.ColorText) Input -copyPatch' src dest = first fromString $ do - src <- Path.parseSplit' Path.definitionNameSegment src - dest <- Path.parseSplit' Path.definitionNameSegment dest - pure $ Input.CopyPatchI src dest - -copyPatch :: InputPattern -copyPatch = InputPattern "copy.patch" - [] - [(Required, patchArg), (Required, newNameArg)] - "`copy.patch foo bar` copies the patch `foo` to `bar`." - (\case - [src, dest] -> copyPatch' src dest - _ -> Left (I.help copyPatch) - ) - -renamePatch :: InputPattern -renamePatch = InputPattern "move.patch" - ["rename.patch"] - [(Required, patchArg), (Required, newNameArg)] - "`move.patch foo bar` renames the patch `foo` to `bar`." - (\case - [src, dest] -> movePatch src dest - _ -> Left (I.help renamePatch) - ) - -renameBranch :: InputPattern -renameBranch = InputPattern "move.namespace" - ["rename.namespace"] - [(Required, pathArg), (Required, newNameArg)] - "`move.namespace foo bar` renames the path `bar` to `foo`." - (\case - [".", dest] -> first fromString $ do - dest <- Path.parseSplit' Path.definitionNameSegment dest - pure $ Input.MoveBranchI Nothing dest - [src, dest] -> first fromString $ do - src <- Path.parseSplit' Path.definitionNameSegment src - dest <- Path.parseSplit' Path.definitionNameSegment dest - pure $ Input.MoveBranchI (Just src) dest - _ -> Left (I.help renameBranch) - ) - -history :: InputPattern -history = InputPattern "history" [] - [(Optional, pathArg)] - (P.wrapColumn2 [ - (makeExample history [], "Shows the history of the current path."), - (makeExample history [".foo"], "Shows history of the path .foo."), - (makeExample history ["#9dndk3kbsk13nbpeu"], - "Shows the history of the namespace with the given hash." <> - "The full hash must be provided.") - ]) - (\case - [src] -> first fromString $ do - p <- Input.parseBranchId src - pure $ Input.HistoryI (Just 10) (Just 10) p - [] -> pure $ Input.HistoryI (Just 10) (Just 10) (Right Path.currentPath) - _ -> Left (I.help history) - ) - -forkLocal :: InputPattern -forkLocal = InputPattern "fork" ["copy.namespace"] [(Required, pathArg) - ,(Required, newNameArg)] - (makeExample forkLocal ["src", "dest"] <> "creates the namespace `dest` as a copy of `src`.") - (\case - [src, dest] -> first fromString $ do - src <- Input.parseBranchId src - dest <- Path.parsePath' dest - pure $ Input.ForkLocalBranchI src dest - _ -> Left (I.help forkLocal) - ) - -resetRoot :: InputPattern -resetRoot = InputPattern "reset-root" [] [(Required, pathArg)] - (P.wrapColumn2 [ - (makeExample resetRoot [".foo"], - "Reset the root namespace (along with its history) to that of the `.foo` namespace."), - (makeExample resetRoot ["#9dndk3kbsk13nbpeu"], - "Reset the root namespace (along with its history) to that of the namespace with hash `#9dndk3kbsk13nbpeu`.") - ]) - (\case - [src] -> first fromString $ do - src <- Input.parseBranchId src - pure $ Input.ResetRootI src - _ -> Left (I.help resetRoot)) - -pull :: InputPattern -pull = InputPattern - "pull" - [] - [(Optional, gitUrlArg), (Optional, pathArg)] - (P.lines - [ P.wrap - "The `pull` command merges a remote namespace into a local namespace." - , "" - , P.wrapColumn2 - [ ( "`pull remote local`" - , "merges the remote namespace `remote`" - <>"into the local namespace `local`." - ) - , ( "`pull remote`" - , "merges the remote namespace `remote`" - <>"into the current namespace") - , ( "`pull`" - , "merges the remote namespace configured in `.unisonConfig`" - <> "with the key `GitUrl.ns` where `ns` is the current namespace," - <> "into the current namespace") - ] - , "" - , P.wrap "where `remote` is a git repository, optionally followed by `:`" - <> "and an absolute remote path, such as:" - , P.indentN 2 . P.lines $ - [P.backticked "https://github.com/org/repo" - ,P.backticked "https://github.com/org/repo:.some.remote.path" - ] - ] - ) - (\case - [] -> - Right $ Input.PullRemoteBranchI Nothing Path.relativeEmpty' SyncMode.ShortCircuit - [url] -> do - ns <- parseUri "url" url - Right $ Input.PullRemoteBranchI (Just ns) Path.relativeEmpty' SyncMode.ShortCircuit - [url, path] -> do - ns <- parseUri "url" url - p <- first fromString $ Path.parsePath' path - Right $ Input.PullRemoteBranchI (Just ns) p SyncMode.ShortCircuit - _ -> Left (I.help pull) - ) - -pullExhaustive :: InputPattern -pullExhaustive = InputPattern - "debug.pull-exhaustive" - [] - [(Required, gitUrlArg), (Optional, pathArg)] - (P.lines - [ P.wrap $ - "The " <> makeExample' pullExhaustive <> "command can be used in place of" - <> makeExample' pull <> "to complete namespaces" - <> "which were pulled incompletely due to a bug in UCM" - <> "versions M1l and earlier. It may be extra slow!" - ] - ) - (\case - [] -> - Right $ Input.PullRemoteBranchI Nothing Path.relativeEmpty' SyncMode.Complete - [url] -> do - ns <- parseUri "url" url - Right $ Input.PullRemoteBranchI (Just ns) Path.relativeEmpty' SyncMode.Complete - [url, path] -> do - ns <- parseUri "url" url - p <- first fromString $ Path.parsePath' path - Right $ Input.PullRemoteBranchI (Just ns) p SyncMode.Complete - _ -> Left (I.help pull) - ) - -push :: InputPattern -push = InputPattern - "push" - [] - [(Required, gitUrlArg), (Optional, pathArg)] - (P.lines - [ P.wrap - "The `push` command merges a local namespace into a remote namespace." - , "" - , P.wrapColumn2 - [ ( "`push remote local`" - , "merges the contents of the local namespace `local`" - <> "into the remote namespace `remote`." - ) - , ( "`push remote`" - , "publishes the current namespace into the remote namespace `remote`") - , ( "`push`" - , "publishes the current namespace" - <> "into the remote namespace configured in `.unisonConfig`" - <> "with the key `GitUrl.ns` where `ns` is the current namespace") - ] - , "" - , P.wrap "where `remote` is a git repository, optionally followed by `:`" - <> "and an absolute remote path, such as:" - , P.indentN 2 . P.lines $ - [P.backticked "https://github.com/org/repo" - ,P.backticked "https://github.com/org/repo:.some.remote.path" - ] - ] - ) - (\case - [] -> - Right $ Input.PushRemoteBranchI Nothing Path.relativeEmpty' SyncMode.ShortCircuit - url : rest -> do - (repo, sbh, path) <- parseUri "url" url - when (isJust sbh) - $ Left "Can't push to a particular remote namespace hash." - p <- case rest of - [] -> Right Path.relativeEmpty' - [path] -> first fromString $ Path.parsePath' path - _ -> Left (I.help push) - Right $ Input.PushRemoteBranchI (Just (repo, path)) p SyncMode.ShortCircuit - ) - -pushExhaustive :: InputPattern -pushExhaustive = InputPattern - "debug.push-exhaustive" - [] - [(Required, gitUrlArg), (Optional, pathArg)] - (P.lines - [ P.wrap $ - "The " <> makeExample' pushExhaustive <> "command can be used in place of" - <> makeExample' push <> "to repair remote namespaces" - <> "which were pushed incompletely due to a bug in UCM" - <> "versions M1l and earlier. It may be extra slow!" - ] - ) - (\case - [] -> - Right $ Input.PushRemoteBranchI Nothing Path.relativeEmpty' SyncMode.Complete - url : rest -> do - (repo, sbh, path) <- parseUri "url" url - when (isJust sbh) - $ Left "Can't push to a particular remote namespace hash." - p <- case rest of - [] -> Right Path.relativeEmpty' - [path] -> first fromString $ Path.parsePath' path - _ -> Left (I.help push) - Right $ Input.PushRemoteBranchI (Just (repo, path)) p SyncMode.Complete - ) - -createPullRequest :: InputPattern -createPullRequest = InputPattern "pull-request.create" ["pr.create"] - [(Required, gitUrlArg), (Required, gitUrlArg), (Optional, pathArg)] - (P.group $ P.lines - [ P.wrap $ makeExample createPullRequest ["base", "head"] - <> "will generate a request to merge the remote repo `head`" - <> "into the remote repo `base`." - , "" - , "example: " <> - makeExampleNoBackticks createPullRequest ["https://github.com/unisonweb/base:.trunk", - "https://github.com/me/unison:.prs.base._myFeature" ] - ]) - (\case - [baseUrl, headUrl] -> do - baseRepo <- parseUri "baseRepo" baseUrl - headRepo <- parseUri "headRepo" headUrl - pure $ Input.CreatePullRequestI baseRepo headRepo - _ -> Left (I.help createPullRequest) - ) - -loadPullRequest :: InputPattern -loadPullRequest = InputPattern "pull-request.load" ["pr.load"] - [(Required, gitUrlArg), (Required, gitUrlArg), (Optional, pathArg)] - (P.lines - [P.wrap $ makeExample loadPullRequest ["base", "head"] - <> "will load a pull request for merging the remote repo `head` into the" - <> "remote repo `base`, staging each in the current namespace" - <> "(so make yourself a clean spot to work first)." - ,P.wrap $ makeExample loadPullRequest ["base", "head", "dest"] - <> "will load a pull request for merging the remote repo `head` into the" - <> "remote repo `base`, staging each in `dest`, which must be empty." - ]) - (\case - [baseUrl, headUrl] -> do - baseRepo <- parseUri "baseRepo" baseUrl - headRepo <- parseUri "topicRepo" headUrl - pure $ Input.LoadPullRequestI baseRepo headRepo Path.relativeEmpty' - [baseUrl, headUrl, dest] -> do - baseRepo <- parseUri "baseRepo" baseUrl - headRepo <- parseUri "topicRepo" headUrl - destPath <- first fromString $ Path.parsePath' dest - pure $ Input.LoadPullRequestI baseRepo headRepo destPath - _ -> Left (I.help loadPullRequest) - ) -parseUri :: String -> String -> Either (P.Pretty P.ColorText) RemoteNamespace -parseUri label input = do - ns <- first (fromString . show) -- turn any parsing errors into a Pretty. - (P.parse UriParser.repoPath label (Text.pack input)) - case (RemoteRepo.commit . Lens.view Lens._1) ns of - Nothing -> pure ns - Just commit -> Left . P.wrap $ - "I don't totally know how to address specific git commits (e.g. " - <> P.group (P.text commit <> ")") <> " yet." - <> "If you need this, add your 2¢ at" - <> P.backticked "https://github.com/unisonweb/unison/issues/1436" - -squashMerge :: InputPattern -squashMerge = - InputPattern "merge.squash" ["squash"] [(Required, pathArg), (Required, pathArg)] - (P.wrap $ makeExample squashMerge ["src","dest"] - <> "merges `src` namespace into `dest`," - <> "discarding the history of `src` in the process." - <> "The resulting `dest` will have (at most) 1" - <> "additional history entry.") - (\case - [src, dest] -> first fromString $ do - src <- Path.parsePath' src - dest <- Path.parsePath' dest - pure $ Input.MergeLocalBranchI src dest Branch.SquashMerge - _ -> Left (I.help squashMerge) - ) - -mergeLocal :: InputPattern -mergeLocal = InputPattern "merge" [] [(Required, pathArg) - ,(Optional, pathArg)] - (P.column2 [ - ("`merge src`", "merges `src` namespace into the current namespace"), - ("`merge src dest`", "merges `src` namespace into the `dest` namespace")]) - (\case - [src] -> first fromString $ do - src <- Path.parsePath' src - pure $ Input.MergeLocalBranchI src Path.relativeEmpty' Branch.RegularMerge - [src, dest] -> first fromString $ do - src <- Path.parsePath' src - dest <- Path.parsePath' dest - pure $ Input.MergeLocalBranchI src dest Branch.RegularMerge - _ -> Left (I.help mergeLocal) - ) - -diffNamespace :: InputPattern -diffNamespace = InputPattern - "diff.namespace" - [] - [(Required, pathArg), (Required, pathArg)] - (P.column2 - [ ( "`diff.namespace before after`" - , P.wrap - "shows how the namespace `after` differs from the namespace `before`" - ) - ] - ) - (\case - [before, after] -> first fromString $ do - before <- Path.parsePath' before - after <- Path.parsePath' after - pure $ Input.DiffNamespaceI before after - _ -> Left $ I.help diffNamespace - ) - -previewMergeLocal :: InputPattern -previewMergeLocal = InputPattern - "merge.preview" - [] - [(Required, pathArg), (Optional, pathArg)] - (P.column2 - [ ( "`merge.preview src`" - , "shows how the current namespace will change after a `merge src`." - ) - , ( "`merge.preview src dest`" - , "shows how `dest` namespace will change after a `merge src dest`." - ) - ] - ) - (\case - [src] -> first fromString $ do - src <- Path.parsePath' src - pure $ Input.PreviewMergeLocalBranchI src Path.relativeEmpty' - [src, dest] -> first fromString $ do - src <- Path.parsePath' src - dest <- Path.parsePath' dest - pure $ Input.PreviewMergeLocalBranchI src dest - _ -> Left (I.help previewMergeLocal) - ) - -replaceEdit - :: (HQ.HashQualified -> HQ.HashQualified -> Maybe Input.PatchPath -> Input) - -> String - -> InputPattern -replaceEdit f s = self - where - self = InputPattern - ("replace." <> s) - [] - [ (Required, definitionQueryArg) - , (Required, definitionQueryArg) - , (Optional, patchArg) - ] - (P.wrapColumn2 - [ ( makeExample self ["", "", ""] - , "Replace the " - <> P.string s - <> " in the given patch " - <> "with the " - <> P.string s - <> " ." - ) - , ( makeExample self ["", ""] - , "Replace the " - <> P.string s - <> " with in the default patch." - ) - ] - ) - (\case - source : target : patch -> do - patch <- - first fromString - <$> traverse (Path.parseSplit' Path.definitionNameSegment) - $ listToMaybe patch - sourcehq <- parseHashQualifiedName source - targethq <- parseHashQualifiedName target - pure $ f sourcehq targethq patch - _ -> Left $ I.help self - ) - -replaceType :: InputPattern -replaceType = replaceEdit Input.ReplaceTypeI "type" - -replaceTerm :: InputPattern -replaceTerm = replaceEdit Input.ReplaceTermI "term" - -viewReflog :: InputPattern -viewReflog = InputPattern - "reflog" - [] - [] - "`reflog` lists the changes that have affected the root namespace" - (\case - [] -> pure Input.ShowReflogI - _ -> Left . warn . P.string - $ I.patternName viewReflog ++ " doesn't take any arguments.") - -edit :: InputPattern -edit = InputPattern - "edit" - [] - [(OnePlus, definitionQueryArg)] - ( "`edit foo` prepends the definition of `foo` to the top of the most " - <> "recently saved file." - ) - ( fmap (Input.ShowDefinitionI Input.LatestFileLocation) - . traverse parseHashQualifiedName - ) - -topicNameArg :: ArgumentType -topicNameArg = - ArgumentType "topic" $ \q _ _ _ -> pure (exactComplete q $ Map.keys helpTopicsMap) - -helpTopics :: InputPattern -helpTopics = InputPattern - "help-topics" - ["help-topic"] - [(Optional, topicNameArg)] - ( "`help-topics` lists all topics and `help-topics ` shows an explanation of that topic." ) - (\case - [] -> Left topics - [topic] -> case Map.lookup topic helpTopicsMap of - Nothing -> Left . warn $ "I don't know of that topic. Try `help-topics`." - Just t -> Left t - _ -> Left $ warn "Use `help-topics ` or `help-topics`." - ) - where - topics = P.callout "🌻" $ P.lines [ - "Here's a list of topics I can tell you more about: ", - "", - P.indentN 2 $ P.sep "\n" (P.string <$> Map.keys helpTopicsMap), - "", - aside "Example" "use `help filestatus` to learn more about that topic." - ] - -helpTopicsMap :: Map String (P.Pretty P.ColorText) -helpTopicsMap = Map.fromList [ - ("testcache", testCacheMsg), - ("filestatus", fileStatusMsg), - ("messages.disallowedAbsolute", disallowedAbsoluteMsg), - ("namespaces", pathnamesMsg) - ] - where - blankline = ("","") - fileStatusMsg = P.callout "📓" . P.lines $ [ - P.wrap $ "Here's a list of possible status messages you might see" - <> "for definitions in a .u file.", "", - P.wrapColumn2 [ - (P.bold $ SR.prettyStatus SR.Collision, - "A definition with the same name as an existing definition. Doing" <> - "`update` instead of `add` will turn this failure into a successful" <> - "update."), - blankline, - (P.bold $ SR.prettyStatus SR.Conflicted, - "A definition with the same name as an existing definition." <> - "Resolving the conflict and then trying an `update` again will" <> - "turn this into a successful update."), - blankline, - (P.bold $ SR.prettyStatus SR.TermExistingConstructorCollision, - "A definition with the same name as an existing constructor for " <> - "some data type. Rename your definition or the data type before" <> - "trying again to `add` or `update`."), - blankline, - (P.bold $ SR.prettyStatus SR.ConstructorExistingTermCollision, - "A type defined in the file has a constructor that's named the" <> - "same as an existing term. Rename that term or your constructor" <> - "before trying again to `add` or `update`."), - blankline, - (P.bold $ SR.prettyStatus SR.BlockedDependency, - "This definition was blocked because it dependended on " <> - "a definition with a failed status."), - blankline, - (P.bold $ SR.prettyStatus SR.ExtraDefinition, - "This definition was added because it was a dependency of" <> - "a definition explicitly selected.") - ] - ] - testCacheMsg = P.callout "🎈" . P.lines $ [ - P.wrap $ "Unison caches the results of " <> P.blue "test>" - <> "watch expressions. Since these expressions are pure and" - <> "always yield the same result when evaluated, there's no need" - <> "to run them more than once!", - "", - P.wrap $ "A test is rerun only if it has changed, or if one" - <> "of the definitions it depends on has changed." - ] - pathnamesMsg = P.callout "\129488" . P.lines $ [ - P.wrap $ "There are two kinds of namespaces," <> P.group (P.blue "absolute" <> ",") - <> "such as" <> P.group ("(" <> P.blue ".foo.bar") - <> "or" <> P.group (P.blue ".base.math.+" <> ")") - <> "and" <> P.group (P.green "relative" <> ",") - <> "such as" <> P.group ("(" <> P.green "math.sqrt") - <> "or" <> P.group (P.green "util.List.++" <> ")."), - "", - P.wrap $ "Relative names are converted to absolute names by prepending the current namespace." - <> "For example, if your Unison prompt reads:", "", - P.indentN 2 $ P.blue ".foo.bar>", "", - "and your .u file looks like:", "", - P.indentN 2 $ P.green "x" <> " = 41", "", - P.wrap $ - "then doing an" <> P.blue "add" <> - "will create the definition with the absolute name" <> - P.group (P.blue ".foo.bar.x" <> " = 41"), - "", - P.wrap $ - "and you can refer to" <> P.green "x" <> "by its absolute name " <> - P.blue ".foo.bar.x" <> "elsewhere" <> "in your code. For instance:", "", - P.indentN 2 $ - "answerToLifeTheUniverseAndEverything = " <> P.blue ".foo.bar.x" <> " + 1" - ] - - disallowedAbsoluteMsg = P.callout "\129302" . P.lines $ [ - P.wrap $ - "Although I can understand absolute (ex: .foo.bar) or" <> - "relative (ex: util.math.sqrt) references to existing definitions" <> - P.group ("(" <> P.blue "help namespaces") <> "to learn more)," <> - "I can't yet handle giving new definitions with absolute names in a .u file.", - "", - P.wrap $ "As a workaround, you can give definitions with a relative name" - <> "temporarily (like `exports.blah.foo`) and then use `move.*` " - <> "or `merge` commands to move stuff around afterwards." - ] - -help :: InputPattern -help = InputPattern - "help" ["?"] [(Optional, commandNameArg)] - "`help` shows general help and `help ` shows help for one command." - (\case - [] -> Left $ intercalateMap "\n\n" showPatternHelp - (sortOn I.patternName validInputs) - [isHelp -> Just msg] -> Left msg - [cmd] -> case Map.lookup cmd commandsByName of - Nothing -> Left . warn $ "I don't know of that command. Try `help`." - Just pat -> Left $ showPatternHelp pat - _ -> Left $ warn "Use `help ` or `help`.") - where - commandsByName = Map.fromList [ - (n, i) | i <- validInputs, n <- I.patternName i : I.aliases i ] - isHelp s = Map.lookup s helpTopicsMap - -quit :: InputPattern -quit = InputPattern "quit" ["exit", ":q"] [] - "Exits the Unison command line interface." - (\case - [] -> pure Input.QuitI - _ -> Left "Use `quit`, `exit`, or to quit." - ) - -viewPatch :: InputPattern -viewPatch = InputPattern "view.patch" [] [(Required, patchArg)] - (P.wrapColumn2 - [ ( makeExample' viewPatch - , "Lists all the edits in the default patch." - ) - , ( makeExample viewPatch [""] - , "Lists all the edits in the given patch." - ) - ] - ) - (\case - [] -> Right $ Input.ListEditsI Nothing - [patchStr] -> mapLeft fromString $ do - patch <- Path.parseSplit' Path.definitionNameSegment patchStr - Right $ Input.ListEditsI (Just patch) - _ -> Left $ warn "`view.patch` takes a patch and that's it." - ) - -link :: InputPattern -link = InputPattern - "link" - [] - [(Required, definitionQueryArg), (OnePlus, definitionQueryArg)] - (fromString $ concat - [ "`link metadata defn` creates a link to `metadata` from `defn`. " - , "Use `links defn` or `links defn ` to view outgoing links, " - , "and `unlink metadata defn` to remove a link. The `defn` can be either the " - , "name of a term or type, multiple such names, or a range like `1-4` " - , "for a range of definitions listed by a prior `find` command." - ] - ) - (\case - md : defs -> first fromString $ do - md <- case HQ.fromString md of - Nothing -> Left "Invalid hash qualified identifier for metadata." - Just hq -> pure hq - defs <- traverse Path.parseHQSplit' defs - Right $ Input.LinkI md defs - _ -> Left (I.help link) - ) - -links :: InputPattern -links = InputPattern - "links" - [] - [(Required, definitionQueryArg), (Optional, definitionQueryArg)] - (P.column2 [ - (makeExample links ["defn"], "shows all outgoing links from `defn`."), - (makeExample links ["defn", ""], "shows all links of the given type.") ]) - (\case - src : rest -> first fromString $ do - src <- Path.parseHQSplit' src - let ty = case rest of - [] -> Nothing - _ -> Just $ unwords rest - in Right $ Input.LinksI src ty - _ -> Left (I.help links) - ) - -unlink :: InputPattern -unlink = InputPattern - "unlink" - ["delete.link"] - [(Required, definitionQueryArg), (OnePlus, definitionQueryArg)] - (fromString $ concat - [ "`unlink metadata defn` removes a link to `metadata` from `defn`." - , "The `defn` can be either the " - , "name of a term or type, multiple such names, or a range like `1-4` " - , "for a range of definitions listed by a prior `find` command." - ]) - (\case - md : defs -> first fromString $ do - md <- case HQ.fromString md of - Nothing -> Left "Invalid hash qualified identifier for metadata." - Just hq -> pure hq - defs <- traverse Path.parseHQSplit' defs - Right $ Input.UnlinkI md defs - _ -> Left (I.help unlink) - ) - -names :: InputPattern -names = InputPattern "names" [] - [(Required, definitionQueryArg)] - "`names foo` shows the hash and all known names for `foo`." - (\case - [thing] -> case HQ.fromString thing of - Just hq -> Right $ Input.NamesI hq - Nothing -> Left $ "I was looking for one of these forms: " - <> P.blue "foo .foo.bar foo#abc #abcde .foo.bar#asdf" - _ -> Left (I.help names) - ) - -dependents, dependencies :: InputPattern -dependents = InputPattern "dependents" [] [] - "List the dependents of the specified definition." - (\case - [thing] -> fmap Input.ListDependentsI $ parseHashQualifiedName thing - _ -> Left (I.help dependents)) -dependencies = InputPattern "dependencies" [] [] - "List the dependencies of the specified definition." - (\case - [thing] -> fmap Input.ListDependenciesI $ parseHashQualifiedName thing - _ -> Left (I.help dependencies)) - -debugNumberedArgs :: InputPattern -debugNumberedArgs = InputPattern "debug.numberedArgs" [] [] - "Dump the contents of the numbered args state." - (const $ Right Input.DebugNumberedArgsI) - -debugBranchHistory :: InputPattern -debugBranchHistory = InputPattern "debug.history" [] - [(Optional, noCompletions)] - "Dump codebase history, compatible with bit-booster.com/graph.html" - (const $ Right Input.DebugBranchHistoryI) - -debugFileHashes :: InputPattern -debugFileHashes = InputPattern "debug.file" [] [] - "View details about the most recent succesfully typechecked file." - (const $ Right Input.DebugTypecheckedUnisonFileI) - -test :: InputPattern -test = InputPattern "test" [] [] - "`test` runs unit tests for the current branch." - (const $ pure $ Input.TestI True True) - -execute :: InputPattern -execute = InputPattern - "run" - [] - [] - (P.wrapColumn2 - [ ( "`run mymain`" - , "Runs `!mymain`, where `mymain` is searched for in the most recent" - <> "typechecked file, or in the codebase." - ) - ] - ) - (\case - [w] -> pure . Input.ExecuteI $ w - _ -> Left $ showPatternHelp execute - ) - -createAuthor :: InputPattern -createAuthor = InputPattern "create.author" [] - [(Required, noCompletions), (Required, noCompletions)] - (makeExample createAuthor ["alicecoder", "\"Alice McGee\""] - <> "creates" <> backtick "alicecoder" <> "values in" - <> backtick "metadata.authors" <> "and" - <> backtickEOS "metadata.copyrightHolders") - (\case - symbolStr : authorStr@(_:_) -> first fromString $ do - symbol <- Path.definitionNameSegment symbolStr - -- let's have a real parser in not too long - let author :: Text - author = Text.pack $ case (unwords authorStr) of - quoted@('"':_) -> (init . tail) quoted - bare -> bare - pure $ Input.CreateAuthorI symbol author - _ -> Left $ showPatternHelp createAuthor - ) -validInputs :: [InputPattern] -validInputs = - [ help - , helpTopics - , load - , add - , previewAdd - , update - , previewUpdate - , delete - , forkLocal - , mergeLocal - , squashMerge - , previewMergeLocal - , diffNamespace - , names - , push - , pull - , pushExhaustive - , pullExhaustive - , createPullRequest - , loadPullRequest - , cd - , back - , deleteBranch - , renameBranch - , deletePatch - , renamePatch - , copyPatch - , find - , findShallow - , findVerbose - , view - , display - , displayTo - , docs - , findPatch - , viewPatch - , undo - , history - , edit - , renameTerm - , deleteTerm - , aliasTerm - , renameType - , deleteType - , aliasType - , aliasMany - , todo - , patch - , link - , unlink - , links - , createAuthor - , replaceTerm - , replaceType - , deleteTermReplacement - , deleteTypeReplacement - , test - , execute - , viewReflog - , resetRoot - , quit - , updateBuiltins - , mergeBuiltins - , mergeIOBuiltins - , dependents, dependencies - , debugNumberedArgs - , debugBranchHistory - , debugFileHashes - ] - -commandNames :: [String] -commandNames = validInputs >>= \i -> I.patternName i : I.aliases i - -commandNameArg :: ArgumentType -commandNameArg = - ArgumentType "command" $ \q _ _ _ -> pure (exactComplete q (commandNames <> Map.keys helpTopicsMap)) - -exactDefinitionOrPathArg :: ArgumentType -exactDefinitionOrPathArg = - ArgumentType "definition or path" $ - bothCompletors - (bothCompletors - (termCompletor exactComplete) - (typeCompletor exactComplete)) - (pathCompletor exactComplete (Set.map Path.toText . Branch.deepPaths)) - -fuzzyDefinitionQueryArg :: ArgumentType -fuzzyDefinitionQueryArg = - -- todo: improve this - ArgumentType "fuzzy definition query" $ - bothCompletors (termCompletor fuzzyComplete) - (typeCompletor fuzzyComplete) - -definitionQueryArg :: ArgumentType -definitionQueryArg = fuzzyDefinitionQueryArg { typeName = "definition query" } - -exactDefinitionTypeQueryArg :: ArgumentType -exactDefinitionTypeQueryArg = - ArgumentType "term definition query" $ typeCompletor exactComplete - -exactDefinitionTermQueryArg :: ArgumentType -exactDefinitionTermQueryArg = - ArgumentType "term definition query" $ termCompletor exactComplete - -typeCompletor :: Applicative m - => (String -> [String] -> [Completion]) - -> String - -> Codebase m v a - -> Branch.Branch m - -> Path.Absolute - -> m [Completion] -typeCompletor filterQuery = pathCompletor filterQuery go where - go = Set.map HQ'.toText . R.dom . Names.types . Names.names0ToNames . Branch.toNames0 - -termCompletor :: Applicative m - => (String -> [String] -> [Completion]) - -> String - -> Codebase m v a - -> Branch.Branch m - -> Path.Absolute - -> m [Completion] -termCompletor filterQuery = pathCompletor filterQuery go where - go = Set.map HQ'.toText . R.dom . Names.terms . Names.names0ToNames . Branch.toNames0 - -patchArg :: ArgumentType -patchArg = ArgumentType "patch" $ pathCompletor - exactComplete - (Set.map Name.toText . Map.keysSet . Branch.deepEdits) - -bothCompletors - :: (Monad m) - => (String -> t2 -> t3 -> t4 -> m [Completion]) - -> (String -> t2 -> t3 -> t4 -> m [Completion]) - -> String -> t2 -> t3 -> t4 -> m [Completion] -bothCompletors c1 c2 q code b currentPath = do - suggestions1 <- c1 q code b currentPath - suggestions2 <- c2 q code b currentPath - pure . fixupCompletion q - . nubOrdOn Completion.display - $ suggestions1 ++ suggestions2 - -pathCompletor - :: Applicative f - => (String -> [String] -> [Completion]) - -> (Branch.Branch0 m -> Set Text) - -> String - -> codebase - -> Branch.Branch m - -> Path.Absolute - -> f [Completion] -pathCompletor filterQuery getNames query _code b p = let - b0root = Branch.head b - b0local = Branch.getAt0 (Path.unabsolute p) b0root - -- todo: if these sets are huge, maybe trim results - in pure . filterQuery query . map Text.unpack $ - toList (getNames b0local) ++ - if "." `isPrefixOf` query then - map ("." <>) (toList (getNames b0root)) - else - [] - -pathArg :: ArgumentType -pathArg = ArgumentType "namespace" $ - pathCompletor exactComplete (Set.map Path.toText . Branch.deepPaths) - -newNameArg :: ArgumentType -newNameArg = ArgumentType "new-name" $ - pathCompletor prefixIncomplete - (Set.map ((<> ".") . Path.toText) . Branch.deepPaths) - -noCompletions :: ArgumentType -noCompletions = ArgumentType "word" I.noSuggestions - --- Arya: I could imagine completions coming from previous git pulls -gitUrlArg :: ArgumentType -gitUrlArg = ArgumentType "git-url" $ \input _ _ _ -> case input of - "gh" -> complete "https://github.com/" - "gl" -> complete "https://gitlab.com/" - "bb" -> complete "https://bitbucket.com/" - "ghs" -> complete "git@github.com:" - "gls" -> complete "git@gitlab.com:" - "bbs" -> complete "git@bitbucket.com:" - _ -> pure [] - where complete s = pure [Completion s s False] - -collectNothings :: (a -> Maybe b) -> [a] -> [a] -collectNothings f as = [ a | (Nothing, a) <- map f as `zip` as ] - -patternFromInput :: Input -> InputPattern -patternFromInput = \case - Input.PushRemoteBranchI _ _ SyncMode.ShortCircuit -> push - Input.PushRemoteBranchI _ _ SyncMode.Complete -> pushExhaustive - Input.PullRemoteBranchI _ _ SyncMode.ShortCircuit -> pull - Input.PullRemoteBranchI _ _ SyncMode.Complete -> pushExhaustive - _ -> error "todo: finish this function" - -inputStringFromInput :: IsString s => Input -> P.Pretty s -inputStringFromInput = \case - i@(Input.PushRemoteBranchI rh p' _) -> - (P.string . I.patternName $ patternFromInput i) - <> (" " <> maybe mempty (P.text . uncurry RemoteRepo.printHead) rh) - <> " " <> P.shown p' - i@(Input.PullRemoteBranchI ns p' _) -> - (P.string . I.patternName $ patternFromInput i) - <> (" " <> maybe mempty (P.text . uncurry3 RemoteRepo.printNamespace) ns) - <> " " <> P.shown p' - _ -> error "todo: finish this function" diff --git a/parser-typechecker/src/Unison/CommandLine/Main.hs b/parser-typechecker/src/Unison/CommandLine/Main.hs deleted file mode 100644 index ba4f1fcd3c..0000000000 --- a/parser-typechecker/src/Unison/CommandLine/Main.hs +++ /dev/null @@ -1,258 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.CommandLine.Main where - -import Unison.Prelude - -import Control.Concurrent.STM (atomically) -import Control.Exception (finally, catch, AsyncException(UserInterrupt), asyncExceptionFromException) -import Control.Monad.State (runStateT) -import Data.Configurator.Types (Config) -import Data.IORef -import Data.Tuple.Extra (uncurry3) -import Prelude hiding (readFile, writeFile) -import System.IO.Error (isDoesNotExistError) -import Unison.Codebase.Branch (Branch) -import qualified Unison.Codebase.Branch as Branch -import Unison.Codebase.Editor.Input (Input (..), Event) -import qualified Unison.Codebase.Editor.HandleInput as HandleInput -import qualified Unison.Codebase.Editor.HandleCommand as HandleCommand -import Unison.Codebase.Editor.Command (LoadSourceResult(..)) -import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace, printNamespace) -import Unison.Codebase.Runtime (Runtime) -import Unison.Codebase (Codebase) -import Unison.CommandLine -import Unison.PrettyTerminal -import Unison.CommandLine.InputPattern (ArgumentType (suggestions), InputPattern (aliases, patternName)) -import Unison.CommandLine.InputPatterns (validInputs) -import Unison.CommandLine.OutputMessages (notifyUser, notifyNumbered, shortenDirectory) -import Unison.Parser (Ann) -import Unison.Var (Var) -import qualified Control.Concurrent.Async as Async -import qualified Data.Map as Map -import qualified Data.Text as Text -import qualified Data.Text.IO -import qualified System.Console.Haskeline as Line -import qualified Crypto.Random as Random -import qualified Unison.Codebase.Path as Path -import qualified Unison.Codebase.Runtime as Runtime -import qualified Unison.Codebase as Codebase -import qualified Unison.CommandLine.InputPattern as IP -import qualified Unison.Util.Pretty as P -import qualified Unison.Util.TQueue as Q -import Text.Regex.TDFA -import Control.Lens (view) -import Control.Error (rightMay) - --- Expand a numeric argument like `1` or a range like `3-9` -expandNumber :: [String] -> String -> [String] -expandNumber numberedArgs s = - maybe [s] - (map (\i -> fromMaybe (show i) . atMay numberedArgs $ i - 1)) - expandedNumber - where - rangeRegex = "([0-9]+)-([0-9]+)" :: String - (junk,_,moreJunk, ns) = - s =~ rangeRegex :: (String, String, String, [String]) - expandedNumber = - case readMay s of - Just i -> Just [i] - Nothing -> - -- check for a range - case (junk, moreJunk, ns) of - ("", "", [from, to]) -> - (\x y -> [x..y]) <$> readMay from <*> readMay to - _ -> Nothing - -getUserInput - :: (MonadIO m, Line.MonadException m) - => Map String InputPattern - -> Codebase m v a - -> Branch m - -> Path.Absolute - -> [String] - -> m Input -getUserInput patterns codebase branch currentPath numberedArgs = - Line.runInputT settings go - where - go = do - line <- Line.getInputLine $ - P.toANSI 80 ((P.green . P.shown) currentPath <> fromString prompt) - case line of - Nothing -> pure QuitI - Just l -> - case words l of - [] -> go - ws -> - case parseInput patterns . (>>= expandNumber numberedArgs) $ ws of - Left msg -> do - liftIO $ putPrettyLn msg - go - Right i -> pure i - settings = Line.Settings tabComplete (Just ".unisonHistory") True - tabComplete = Line.completeWordWithPrev Nothing " " $ \prev word -> - -- User hasn't finished a command name, complete from command names - if null prev - then pure . exactComplete word $ Map.keys patterns - -- User has finished a command name; use completions for that command - else case words $ reverse prev of - h : t -> fromMaybe (pure []) $ do - p <- Map.lookup h patterns - argType <- IP.argType p (length t) - pure $ suggestions argType word codebase branch currentPath - _ -> pure [] - -asciiartUnison :: P.Pretty P.ColorText -asciiartUnison = - P.red " _____" - <> P.hiYellow " _ " - <> P.newline - <> P.red "| | |" - <> P.hiRed "___" - <> P.hiYellow "|_|" - <> P.hiGreen "___ " - <> P.cyan "___ " - <> P.purple "___ " - <> P.newline - <> P.red "| | | " - <> P.hiYellow "| |" - <> P.hiGreen "_ -" - <> P.cyan "| . |" - <> P.purple " |" - <> P.newline - <> P.red "|_____|" - <> P.hiRed "_|_" - <> P.hiYellow "|_|" - <> P.hiGreen "___" - <> P.cyan "|___|" - <> P.purple "_|_|" - -welcomeMessage :: FilePath -> String -> P.Pretty P.ColorText -welcomeMessage dir version = - asciiartUnison - <> P.newline - <> P.newline - <> P.linesSpaced - [ P.wrap "Welcome to Unison!" - , P.wrap ("You are running version: " <> P.string version) - , P.wrap - ( "I'm currently watching for changes to .u files under " - <> (P.group . P.blue $ fromString dir) - ) - , P.wrap ("Type " <> P.hiBlue "help" <> " to get help. 😎") - ] - -hintFreshCodebase :: RemoteNamespace -> P.Pretty P.ColorText -hintFreshCodebase ns = - P.wrap $ "Enter " - <> (P.hiBlue . P.group) - ("pull " <> P.text (uncurry3 printNamespace ns) <> " .base") - <> "to set up the default base library. 🏗" - -main - :: forall v - . Var v - => FilePath - -> Maybe RemoteNamespace - -> Path.Absolute - -> (Config, IO ()) - -> [Either Event Input] - -> IO (Runtime v) - -> Codebase IO v Ann - -> Branch.Cache IO - -> String - -> IO () -main dir defaultBaseLib initialPath (config,cancelConfig) initialInputs startRuntime codebase branchCache version = do - dir' <- shortenDirectory dir - root <- fromMaybe Branch.empty . rightMay <$> Codebase.getRootBranch codebase - putPrettyLn $ case defaultBaseLib of - Just ns | Branch.isOne root -> - welcomeMessage dir' version <> P.newline <> P.newline <> hintFreshCodebase ns - _ -> welcomeMessage dir' version - eventQueue <- Q.newIO - do - runtime <- startRuntime - -- we watch for root branch tip changes, but want to ignore ones we expect. - rootRef <- newIORef root - pathRef <- newIORef initialPath - initialInputsRef <- newIORef initialInputs - numberedArgsRef <- newIORef [] - pageOutput <- newIORef True - cancelFileSystemWatch <- watchFileSystem eventQueue dir - cancelWatchBranchUpdates <- watchBranchUpdates (readIORef rootRef) - eventQueue - codebase - let patternMap = - Map.fromList - $ validInputs - >>= (\p -> (patternName p, p) : ((, p) <$> aliases p)) - getInput = do - root <- readIORef rootRef - path <- readIORef pathRef - numberedArgs <- readIORef numberedArgsRef - getUserInput patternMap codebase root path numberedArgs - loadSourceFile :: Text -> IO LoadSourceResult - loadSourceFile fname = - if allow $ Text.unpack fname - then - let handle :: IOException -> IO LoadSourceResult - handle e = - case e of - _ | isDoesNotExistError e -> return InvalidSourceNameError - _ -> return LoadError - go = do - contents <- Data.Text.IO.readFile $ Text.unpack fname - return $ LoadSuccess contents - in catch go handle - else return InvalidSourceNameError - notify = notifyUser dir >=> (\o -> - ifM (readIORef pageOutput) - (putPrettyNonempty o) - (putPrettyLnUnpaged o)) - let - awaitInput = do - -- use up buffered input before consulting external events - i <- readIORef initialInputsRef - (case i of - h:t -> writeIORef initialInputsRef t >> pure h - [] -> - -- Race the user input and file watch. - Async.race (atomically $ Q.peek eventQueue) getInput >>= \case - Left _ -> do - let e = Left <$> atomically (Q.dequeue eventQueue) - writeIORef pageOutput False - e - x -> do - writeIORef pageOutput True - pure x) `catch` interruptHandler - interruptHandler (asyncExceptionFromException -> Just UserInterrupt) = awaitInput - interruptHandler _ = pure $ Right QuitI - cleanup = do - Runtime.terminate runtime - cancelConfig - cancelFileSystemWatch - cancelWatchBranchUpdates - loop state = do - writeIORef pathRef (view HandleInput.currentPath state) - let free = runStateT (runMaybeT HandleInput.loop) state - - (o, state') <- HandleCommand.commandLine config awaitInput - (writeIORef rootRef) - runtime - notify - (\o -> let (p, args) = notifyNumbered o in - putPrettyNonempty p $> args) - loadSourceFile - codebase - (const Random.getSystemDRG) - branchCache - free - case o of - Nothing -> pure () - Just () -> do - writeIORef numberedArgsRef (HandleInput._numberedArgs state') - loop state' - (`finally` cleanup) - $ loop (HandleInput.loopState0 root initialPath) diff --git a/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs b/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs deleted file mode 100644 index 4fc65ab260..0000000000 --- a/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs +++ /dev/null @@ -1,1977 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-partial-type-signatures #-} - -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PartialTypeSignatures #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE ViewPatterns #-} - - -module Unison.CommandLine.OutputMessages where - -import Unison.Prelude hiding (unlessM) - -import Unison.Codebase.Editor.Output -import qualified Unison.Codebase.Editor.Output as E -import qualified Unison.Codebase.Editor.Output as Output -import qualified Unison.Codebase.Editor.TodoOutput as TO -import qualified Unison.Codebase.Editor.SearchResult' as SR' -import qualified Unison.Codebase.Editor.Output.BranchDiff as OBD - - -import Control.Lens -import qualified Control.Monad.State.Strict as State -import Data.Bifunctor (first, second) -import Data.List (sort, stripPrefix) -import Data.List.Extra (nubOrdOn, nubOrd, notNull) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Data.Sequence as Seq -import qualified Data.Text as Text -import Data.Text.IO (readFile, writeFile) -import Data.Tuple.Extra (dupe, uncurry3) -import Prelude hiding (readFile, writeFile) -import System.Directory ( canonicalizePath - , doesFileExist - , getHomeDirectory - ) -import qualified Unison.ABT as ABT -import qualified Unison.UnisonFile as UF -import Unison.Codebase.GitError -import qualified Unison.Codebase.Path as Path -import qualified Unison.Codebase.Patch as Patch -import Unison.Codebase.Patch (Patch(..)) -import qualified Unison.Codebase.ShortBranchHash as SBH -import qualified Unison.Codebase.TermEdit as TermEdit -import qualified Unison.Codebase.TypeEdit as TypeEdit -import Unison.CommandLine ( bigproblem - , tip - , note - ) -import Unison.PrettyTerminal ( clearCurrentLine - , putPretty' - ) -import qualified Unison.CommandLine.InputPattern as IP1 -import Unison.CommandLine.InputPatterns (makeExample, makeExample') -import qualified Unison.CommandLine.InputPatterns as IP -import qualified Unison.Builtin.Decls as DD -import qualified Unison.DataDeclaration as DD -import qualified Unison.DeclPrinter as DeclPrinter -import qualified Unison.HashQualified as HQ -import qualified Unison.HashQualified' as HQ' -import Unison.Name (Name) -import qualified Unison.Name as Name -import Unison.NamePrinter (prettyHashQualified, - prettyReference, prettyReferent, - prettyLabeledDependency, - prettyNamedReference, - prettyNamedReferent, - prettyName, prettyShortHash, - styleHashQualified, - styleHashQualified', prettyHashQualified') -import Unison.Names2 (Names'(..), Names0) -import qualified Unison.Names2 as Names -import qualified Unison.Names3 as Names -import Unison.Parser (Ann, startingLine) -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.Codebase.Runtime as Runtime -import Unison.PrintError ( prettyParseError - , printNoteWithSource - , prettyResolutionFailures - ) -import qualified Unison.Reference as Reference -import Unison.Reference ( Reference ) -import qualified Unison.Referent as Referent -import Unison.Referent ( Referent ) -import qualified Unison.Result as Result -import qualified Unison.Term as Term -import Unison.Term (Term) -import Unison.Type (Type) -import qualified Unison.TermPrinter as TermPrinter -import qualified Unison.TypePrinter as TypePrinter -import qualified Unison.Util.ColorText as CT -import Unison.Util.Monoid ( intercalateMap - , unlessM - ) -import qualified Unison.Util.Pretty as P -import qualified Unison.Util.Relation as R -import Unison.Var (Var) -import qualified Unison.Var as Var -import qualified Unison.Codebase.Editor.SlurpResult as SlurpResult -import Unison.Codebase.Editor.DisplayThing (DisplayThing(MissingThing, BuiltinThing, RegularThing)) -import qualified Unison.Codebase.Editor.Input as Input -import qualified Unison.Hash as Hash -import qualified Unison.Codebase.Causal as Causal -import qualified Unison.Codebase.Editor.RemoteRepo as RemoteRepo -import qualified Unison.Util.List as List -import qualified Unison.Util.Monoid as Monoid -import Data.Tuple (swap) -import Unison.Codebase.ShortBranchHash (ShortBranchHash) -import qualified Unison.ShortHash as SH -import Unison.LabeledDependency as LD -import Unison.Codebase.Editor.RemoteRepo (RemoteRepo) - -type Pretty = P.Pretty P.ColorText - -shortenDirectory :: FilePath -> IO FilePath -shortenDirectory dir = do - home <- getHomeDirectory - pure $ case stripPrefix home dir of - Just d -> "~" <> d - Nothing -> dir - -renderFileName :: FilePath -> IO Pretty -renderFileName dir = P.group . P.blue . fromString <$> shortenDirectory dir - -notifyNumbered :: Var v => NumberedOutput v -> (Pretty, NumberedArgs) -notifyNumbered o = case o of - ShowDiffNamespace oldPrefix newPrefix ppe diffOutput -> - showDiffNamespace ShowNumbers ppe oldPrefix newPrefix diffOutput - - ShowDiffAfterDeleteDefinitions ppe diff -> - first (\p -> P.lines - [ p - , "" - , undoTip - ]) (showDiffNamespace ShowNumbers ppe e e diff) - - ShowDiffAfterDeleteBranch bAbs ppe diff -> - first (\p -> P.lines - [ p - , "" - , undoTip - ]) (showDiffNamespace ShowNumbers ppe bAbs bAbs diff) - - ShowDiffAfterModifyBranch b' _ _ (OBD.isEmpty -> True) -> - (P.wrap $ "Nothing changed in" <> prettyPath' b' <> ".", mempty) - ShowDiffAfterModifyBranch b' bAbs ppe diff -> - first (\p -> P.lines - [ P.wrap $ "Here's what changed in" <> prettyPath' b' <> ":" - , "" - , p - , "" - , undoTip - ]) (showDiffNamespace ShowNumbers ppe bAbs bAbs diff) - - ShowDiffAfterMerge _ _ _ (OBD.isEmpty -> True) -> - (P.wrap $ "Nothing changed as a result of the merge.", mempty) - ShowDiffAfterMerge dest' destAbs ppe diffOutput -> - first (\p -> P.lines [ - P.wrap $ "Here's what's changed in " <> prettyPath' dest' <> "after the merge:" - , "" - , p - , "" - , tip $ "You can use " <> IP.makeExample' IP.todo - <> "to see if this generated any work to do in this namespace" - <> "and " <> IP.makeExample' IP.test <> "to run the tests." - <> "Or you can use" <> IP.makeExample' IP.undo <> " or" - <> IP.makeExample' IP.viewReflog <> " to undo the results of this merge." - ]) (showDiffNamespace ShowNumbers ppe destAbs destAbs diffOutput) - - ShowDiffAfterMergePropagate dest' destAbs patchPath' ppe diffOutput -> - first (\p -> P.lines [ - P.wrap $ "Here's what's changed in " <> prettyPath' dest' - <> "after applying the patch at " <> P.group (prettyPath' patchPath' <> ":") - , "" - , p - , "" - , tip $ "You can use " - <> IP.makeExample IP.todo [prettyPath' patchPath', prettyPath' dest'] - <> "to see if this generated any work to do in this namespace" - <> "and " <> IP.makeExample' IP.test <> "to run the tests." - <> "Or you can use" <> IP.makeExample' IP.undo <> " or" - <> IP.makeExample' IP.viewReflog <> " to undo the results of this merge." - ]) (showDiffNamespace ShowNumbers ppe destAbs destAbs diffOutput) - - ShowDiffAfterMergePreview dest' destAbs ppe diffOutput -> - first (\p -> P.lines [ - P.wrap $ "Here's what would change in " <> prettyPath' dest' <> "after the merge:" - , "" - , p - ]) (showDiffNamespace ShowNumbers ppe destAbs destAbs diffOutput) - - ShowDiffAfterUndo ppe diffOutput -> - first (\p -> P.lines ["Here's the changes I undid", "", p ]) - (showDiffNamespace ShowNumbers ppe e e diffOutput) - - ShowDiffAfterPull dest' destAbs ppe diff -> - if OBD.isEmpty diff then - ("✅ Looks like " <> prettyPath' dest' <> " is up to date.", mempty) - else - first (\p -> P.lines [ - P.wrap $ "Here's what's changed in " <> prettyPath' dest' <> "after the pull:", "", - p, "", - undoTip - ]) - (showDiffNamespace ShowNumbers ppe destAbs destAbs diff) - ShowDiffAfterCreatePR baseRepo headRepo ppe diff -> - if OBD.isEmpty diff then - (P.wrap $ "Looks like there's no difference between " - <> prettyRemoteNamespace baseRepo - <> "and" - <> prettyRemoteNamespace headRepo <> "." - ,mempty) - else first (\p -> - (P.lines - [P.wrap $ "The changes summarized below are available for you to review," - <> "using the following command:" - ,"" - ,P.indentN 2 $ - IP.makeExampleNoBackticks - IP.loadPullRequest [(prettyRemoteNamespace baseRepo) - ,(prettyRemoteNamespace headRepo)] - ,"" - ,p])) (showDiffNamespace HideNumbers ppe e e diff) - -- todo: these numbers aren't going to work, - -- since the content isn't necessarily here. - -- Should we have a mode with no numbers? :P - - ShowDiffAfterCreateAuthor authorNS authorPath' bAbs ppe diff -> - first (\p -> P.lines - [ p - , "" - , tip $ "Add" <> prettyName "License" <> "values for" - <> prettyName (Name.fromSegment authorNS) - <> "under" <> P.group (prettyPath' authorPath' <> ".") - ]) (showDiffNamespace ShowNumbers ppe bAbs bAbs diff) - where - e = Path.absoluteEmpty - undoTip = tip $ "You can use" <> IP.makeExample' IP.undo - <> "or" <> IP.makeExample' IP.viewReflog - <> "to undo this change." - -prettyRemoteNamespace :: (RemoteRepo.RemoteRepo, - Maybe ShortBranchHash, Path.Path) - -> P.Pretty P.ColorText -prettyRemoteNamespace = - P.group . P.text . uncurry3 RemoteRepo.printNamespace - -notifyUser :: forall v . Var v => FilePath -> Output v -> IO Pretty -notifyUser dir o = case o of - Success -> pure $ P.bold "Done." - WarnIncomingRootBranch current hashes -> pure $ - if null hashes then P.wrap $ - "Please let someone know I generated an empty IncomingRootBranch" - <> " event, which shouldn't be possible!" - else P.lines - [ P.wrap $ (if length hashes == 1 then "A" else "Some") - <> "codebase" <> P.plural hashes "root" <> "appeared unexpectedly" - <> "with" <> P.group (P.plural hashes "hash" <> ":") - , "" - , (P.indentN 2 . P.oxfordCommas) - (map prettySBH $ toList hashes) - , "" - , P.wrap $ "and I'm not sure what to do about it." - <> "The last root namespace hash that I knew about was:" - , "" - , P.indentN 2 $ prettySBH current - , "" - , P.wrap $ "Now might be a good time to make a backup of your codebase. 😬" - , "" - , P.wrap $ "After that, you might try using the" <> makeExample' IP.forkLocal - <> "command to inspect the namespaces listed above, and decide which" - <> "one you want as your root." - <> "You can also use" <> makeExample' IP.viewReflog <> "to see the" - <> "last few root namespace hashes on record." - , "" - , P.wrap $ "Once you find one you like, you can use the" - <> makeExample' IP.resetRoot <> "command to set it." - ] - LoadPullRequest baseNS headNS basePath headPath mergedPath squashedPath -> pure $ P.lines - [ P.wrap $ "I checked out" <> prettyRemoteNamespace baseNS <> "to" <> P.group (prettyPath' basePath <> ".") - , P.wrap $ "I checked out" <> prettyRemoteNamespace headNS <> "to" <> P.group (prettyPath' headPath <> ".") - , "" - , P.wrap $ "The merged result is in" <> P.group (prettyPath' mergedPath <> ".") - , P.wrap $ "The (squashed) merged result is in" <> P.group (prettyPath' squashedPath <> ".") - , P.wrap $ "Use" <> - IP.makeExample IP.diffNamespace - [prettyPath' basePath, prettyPath' mergedPath] - <> "or" <> - IP.makeExample IP.diffNamespace - [prettyPath' basePath, prettyPath' squashedPath] - <> "to see what's been updated." - , P.wrap $ "Use" <> - IP.makeExample IP.todo - [ prettyPath' (snoc mergedPath "patch") - , prettyPath' mergedPath ] - <> "to see what work is remaining for the merge." - , P.wrap $ "Use" <> - IP.makeExample IP.push - [prettyRemoteNamespace baseNS, prettyPath' mergedPath] <> - "or" <> - IP.makeExample IP.push - [prettyRemoteNamespace baseNS, prettyPath' squashedPath] - <> "to push the changes." - ] - - DisplayDefinitions outputLoc ppe types terms -> - displayDefinitions outputLoc ppe types terms - DisplayRendered outputLoc pp -> - displayRendered outputLoc pp - DisplayLinks ppe md types terms -> - if Map.null md then pure $ P.wrap "Nothing to show here. Use the " - <> IP.makeExample' IP.link <> " command to add links from this definition." - else - pure $ intercalateMap "\n\n" go (Map.toList md) - where - go (_key, rs) = - displayDefinitions' ppe (Map.restrictKeys types rs) - (Map.restrictKeys terms rs) - TestResults stats ppe _showSuccess _showFailures oks fails -> case stats of - CachedTests 0 _ -> pure . P.callout "😶" $ "No tests to run." - CachedTests n n' | n == n' -> pure $ - P.lines [ cache, "", displayTestResults True ppe oks fails ] - CachedTests _n m -> pure $ - if m == 0 then "✅ " - else P.indentN 2 $ - P.lines [ "", cache, "", displayTestResults False ppe oks fails, "", "✅ " ] - where - NewlyComputed -> do - clearCurrentLine - pure $ P.lines [ - " " <> P.bold "New test results:", - "", - displayTestResults True ppe oks fails ] - where - cache = P.bold "Cached test results " <> "(`help testcache` to learn more)" - - TestIncrementalOutputStart ppe (n,total) r _src -> do - putPretty' $ P.shown (total - n) <> " tests left to run, current test: " - <> (P.syntaxToColor $ prettyHashQualified (PPE.termName ppe $ Referent.Ref r)) - pure mempty - - TestIncrementalOutputEnd _ppe (_n, _total) _r result -> do - clearCurrentLine - if isTestOk result then putPretty' " ✅ " - else putPretty' " 🚫 " - pure mempty - - MetadataMissingType ppe ref -> pure . P.fatalCallout . P.lines $ [ - P.wrap $ "The metadata value " <> P.red (prettyTermName ppe ref) - <> "is missing a type signature in the codebase.", - "", - P.wrap $ "This might be due to pulling an incomplete" - <> "or invalid codebase, or because files inside the codebase" - <> "are being deleted external to UCM." - ] - MetadataAmbiguous hq _ppe [] -> pure . P.warnCallout . - P.wrap $ "I couldn't find any metadata matching " - <> P.syntaxToColor (prettyHashQualified hq) - MetadataAmbiguous _ ppe refs -> pure . P.warnCallout . P.lines $ [ - P.wrap $ "I'm not sure which metadata value you're referring to" - <> "since there are multiple matches:", - "", - P.indentN 2 $ P.spaced (P.blue . prettyTermName ppe <$> refs), - "", - tip "Try again and supply one of the above definitions explicitly." - ] - - EvaluationFailure err -> pure err - SearchTermsNotFound hqs | null hqs -> pure mempty - SearchTermsNotFound hqs -> - pure - $ P.warnCallout "The following names were not found in the codebase. Check your spelling." - <> P.newline - <> (P.syntaxToColor $ P.indent " " (P.lines (prettyHashQualified <$> hqs))) - PatchNotFound _ -> - pure . P.warnCallout $ "I don't know about that patch." - NameNotFound _ -> - pure . P.warnCallout $ "I don't know about that name." - TermNotFound _ -> - pure . P.warnCallout $ "I don't know about that term." - TypeNotFound _ -> - pure . P.warnCallout $ "I don't know about that type." - TermAlreadyExists _ _ -> - pure . P.warnCallout $ "A term by that name already exists." - TypeAlreadyExists _ _ -> - pure . P.warnCallout $ "A type by that name already exists." - PatchAlreadyExists _ -> - pure . P.warnCallout $ "A patch by that name already exists." - BranchEmpty b -> pure . P.warnCallout . P.wrap $ - P.group (either P.shown prettyPath' b) <> "is an empty namespace." - BranchNotEmpty path -> - pure . P.warnCallout $ "I was expecting the namespace " <> prettyPath' path - <> " to be empty for this operation, but it isn't." - CantDelete ppe failed failedDependents -> pure . P.warnCallout $ - P.lines [ - P.wrap "I couldn't delete ", - "", P.indentN 2 $ listOfDefinitions' ppe False failed, - "", - "because it's still being used by these definitions:", - "", P.indentN 2 $ listOfDefinitions' ppe False failedDependents - ] - CantUndo reason -> case reason of - CantUndoPastStart -> pure . P.warnCallout $ "Nothing more to undo." - CantUndoPastMerge -> pure . P.warnCallout $ "Sorry, I can't undo a merge (not implemented yet)." - NoMainFunction main ppe ts -> pure . P.callout "😶" $ P.lines [ - P.wrap $ "I looked for a function" <> P.backticked (P.string main) - <> "in the most recently typechecked file and codebase but couldn't find one. It has to have the type:", - "", - P.indentN 2 $ P.lines [ P.string main <> " : " <> TypePrinter.pretty ppe t | t <- ts ] - ] - NoUnisonFile -> do - dir' <- canonicalizePath dir - fileName <- renderFileName dir' - pure . P.callout "😶" $ P.lines - [ P.wrap "There's nothing for me to add right now." - , "" - , P.column2 [(P.bold "Hint:", msg fileName)] ] - where - msg dir = P.wrap - $ "I'm currently watching for definitions in .u files under the" - <> dir - <> "directory. Make sure you've updated something there before using the" - <> makeExample' IP.add <> "or" <> makeExample' IP.update - <> "commands, or use" <> makeExample' IP.load <> "to load a file explicitly." - InvalidSourceName name -> - pure . P.callout "😶" $ P.wrap $ "The file " - <> P.blue (P.shown name) - <> " does not exist or is not a valid source file." - SourceLoadFailed name -> - pure . P.callout "😶" $ P.wrap $ "The file " - <> P.blue (P.shown name) - <> " could not be loaded." - BranchNotFound b -> - pure . P.warnCallout $ "The namespace " <> P.blue (P.shown b) <> " doesn't exist." - CreatedNewBranch path -> pure $ - "☝️ The namespace " <> P.blue (P.shown path) <> " is empty." - -- RenameOutput rootPath oldName newName r -> do - -- nameChange "rename" "renamed" oldName newName r - -- AliasOutput rootPath existingName newName r -> do - -- nameChange "alias" "aliased" existingName newName r - DeletedEverything -> - pure . P.wrap . P.lines $ - ["Okay, I deleted everything except the history." - ,"Use " <> IP.makeExample' IP.undo <> " to undo, or " - <> IP.makeExample' IP.mergeBuiltins - <> " to restore the absolute " - <> "basics to the current path."] - DeleteEverythingConfirmation -> - pure . P.warnCallout . P.lines $ - ["Are you sure you want to clear away everything?" - ,"You could use " <> IP.makeExample' IP.cd - <> " to switch to a new namespace instead."] - DeleteBranchConfirmation _uniqueDeletions -> error "todo" - -- let - -- pretty (branchName, (ppe, results)) = - -- header $ listOfDefinitions' ppe False results - -- where - -- header = plural uniqueDeletions id ((P.text branchName <> ":") `P.hang`) - -- - -- in putPrettyLn . P.warnCallout - -- $ P.wrap ("The" - -- <> plural uniqueDeletions "namespace contains" "namespaces contain" - -- <> "definitions that don't exist in any other branches:") - -- <> P.border 2 (mconcat (fmap pretty uniqueDeletions)) - -- <> P.newline - -- <> P.wrap "Please repeat the same command to confirm the deletion." - ListOfDefinitions ppe detailed results -> - listOfDefinitions ppe detailed results - ListOfLinks ppe results -> - listOfLinks ppe [ (name,tm) | (name,_ref,tm) <- results ] - ListNames _len [] [] -> pure . P.callout "😶" $ - P.wrap "I couldn't find anything by that name." - ListNames len types terms -> pure . P.sepNonEmpty "\n\n" $ [ - formatTypes types, formatTerms terms ] - where - formatTerms tms = - P.lines . P.nonEmpty $ P.plural tms (P.blue "Term") : (go <$> tms) where - go (ref, hqs) = P.column2 - [ ("Hash:", P.syntaxToColor (prettyReferent len ref)) - , ("Names: ", P.group (P.spaced (P.bold . P.syntaxToColor . prettyHashQualified' <$> toList hqs))) - ] - formatTypes types = - P.lines . P.nonEmpty $ P.plural types (P.blue "Type") : (go <$> types) where - go (ref, hqs) = P.column2 - [ ("Hash:", P.syntaxToColor (prettyReference len ref)) - , ("Names:", P.group (P.spaced (P.bold . P.syntaxToColor . prettyHashQualified' <$> toList hqs))) - ] - -- > names foo - -- Terms: - -- Hash: #asdflkjasdflkjasdf - -- Names: .util.frobnicate foo blarg.mcgee - -- - -- Term (with hash #asldfkjsdlfkjsdf): .util.frobnicate, foo, blarg.mcgee - -- Types (with hash #hsdflkjsdfsldkfj): Optional, Maybe, foo - ListShallow ppe entries -> pure $ - -- todo: make a version of prettyNumberedResult to support 3-columns - if null entries then P.lit "nothing to show" - else numberedEntries entries - where - numberedEntries :: [ShallowListEntry v a] -> P.Pretty P.ColorText - numberedEntries entries = - (P.column3 . fmap f) ([(1::Integer)..] `zip` fmap formatEntry entries) - where - f (i, (p1, p2)) = (P.hiBlack . fromString $ show i <> ".", p1, p2) - formatEntry :: ShallowListEntry v a -> (P.Pretty P.ColorText, P.Pretty P.ColorText) - formatEntry = \case - ShallowTermEntry _r hq ot -> - (P.syntaxToColor . prettyHashQualified' . fmap Name.fromSegment $ hq - , P.lit "(" <> maybe "type missing" (TypePrinter.pretty ppe) ot <> P.lit ")" ) - ShallowTypeEntry r hq -> - (P.syntaxToColor . prettyHashQualified' . fmap Name.fromSegment $ hq - ,isBuiltin r) - ShallowBranchEntry ns count -> - ((P.syntaxToColor . prettyName . Name.fromSegment) ns <> "/" - ,case count of - 1 -> P.lit ("(1 definition)") - _n -> P.lit "(" <> P.shown count <> P.lit " definitions)") - ShallowPatchEntry ns -> - ((P.syntaxToColor . prettyName . Name.fromSegment) ns - ,P.lit "(patch)") - isBuiltin = \case - Reference.Builtin{} -> P.lit "(builtin type)" - Reference.DerivedId{} -> P.lit "(type)" - - SlurpOutput input ppe s -> let - isPast = case input of Input.AddI{} -> True - Input.UpdateI{} -> True - _ -> False - in pure $ SlurpResult.pretty isPast ppe s - - NoExactTypeMatches -> - pure . P.callout "☝️" $ P.wrap "I couldn't find exact type matches, resorting to fuzzy matching..." - TypeParseError src e -> - pure . P.fatalCallout $ P.lines [ - P.wrap "I couldn't parse the type you supplied:", - "", - prettyParseError src e - ] - ParseResolutionFailures src es -> pure $ - prettyResolutionFailures src es - TypeHasFreeVars typ -> - pure . P.warnCallout $ P.lines [ - P.wrap "The type uses these names, but I'm not sure what they are:", - P.sep ", " (map (P.text . Var.name) . toList $ ABT.freeVars typ) - ] - ParseErrors src es -> - pure . P.sep "\n\n" $ prettyParseError (Text.unpack src) <$> es - TypeErrors src ppenv notes -> do - let showNote = - intercalateMap "\n\n" (printNoteWithSource ppenv (Text.unpack src)) - . map Result.TypeError - pure . showNote $ notes - Evaluated fileContents ppe bindings watches -> - if null watches then pure "\n" - else - -- todo: hashqualify binding names if necessary to distinguish them from - -- defs in the codebase. In some cases it's fine for bindings to - -- shadow codebase names, but you don't want it to capture them in - -- the decompiled output. - let prettyBindings = P.bracket . P.lines $ - P.wrap "The watch expression(s) reference these definitions:" : "" : - [(P.syntaxToColor $ TermPrinter.prettyBinding ppe (HQ.unsafeFromVar v) b) - | (v, b) <- bindings] - prettyWatches = P.sep "\n\n" [ - watchPrinter fileContents ppe ann kind evald isCacheHit | - (ann,kind,evald,isCacheHit) <- - sortOn (\(a,_,_,_)->a) . toList $ watches ] - -- todo: use P.nonempty - in pure $ if null bindings then prettyWatches - else prettyBindings <> "\n" <> prettyWatches - - DisplayConflicts termNamespace typeNamespace -> - pure $ P.sepNonEmpty "\n\n" [ - showConflicts "terms" terms, - showConflicts "types" types - ] - where - terms = R.dom termNamespace - types = R.dom typeNamespace - showConflicts :: Foldable f => Pretty -> f Name -> Pretty - showConflicts thingsName things = - if (null things) then mempty - else P.lines [ - "These " <> thingsName <> " have conflicts: ", "", - P.lines [ (" " <> prettyName x) | x <- toList things ] - ] - -- TODO: Present conflicting TermEdits and TypeEdits - -- if we ever allow users to edit hashes directly. - Typechecked sourceName ppe slurpResult uf -> do - let fileStatusMsg = SlurpResult.pretty False ppe slurpResult - let containsWatchExpressions = notNull $ UF.watchComponents uf - if UF.nonEmpty uf then do - fileName <- renderFileName $ Text.unpack sourceName - pure $ P.linesNonEmpty ([ - if fileStatusMsg == mempty then - P.okCallout $ fileName <> " changed." - else if SlurpResult.isAllDuplicates slurpResult then - P.wrap $ "I found and" - <> P.bold "typechecked" <> "the definitions in " - <> P.group (fileName <> ".") - <> "This file " <> P.bold "has been previously added" <> "to the codebase." - else - P.linesSpaced $ [ - P.wrap $ "I found and" - <> P.bold "typechecked" <> "these definitions in " - <> P.group (fileName <> ".") - <> "If you do an " - <> IP.makeExample' IP.add - <> " or " - <> P.group (IP.makeExample' IP.update <> ",") - <> "here's how your codebase would" - <> "change:" - , P.indentN 2 $ SlurpResult.pretty False ppe slurpResult - ] - ] ++ if containsWatchExpressions then [ - "", - P.wrap $ "Now evaluating any watch expressions" - <> "(lines starting with `>`)... " - <> P.group (P.hiBlack "Ctrl+C cancels.") - ] else []) - else if (null $ UF.watchComponents uf) then pure . P.wrap $ - "I loaded " <> P.text sourceName <> " and didn't find anything." - else pure mempty - - TodoOutput names todo -> pure (todoOutput names todo) - GitError input e -> pure $ case e of - CouldntParseRootBranch repo s -> P.wrap $ "I couldn't parse the string" - <> P.red (P.string s) <> "into a namespace hash, when opening the repository at" - <> P.group (prettyRepoBranch repo <> ".") - NoGit -> P.wrap $ - "I couldn't find git. Make sure it's installed and on your path." - CloneException repo msg -> P.wrap $ - "I couldn't clone the repository at" <> prettyRepoBranch repo <> ";" - <> "the error was:" <> (P.indentNAfterNewline 2 . P.group . P.string) msg - PushNoOp repo -> P.wrap $ - "The repository at" <> prettyRepoBranch repo <> "is already up-to-date." - PushException repo msg -> P.wrap $ - "I couldn't push to the repository at" <> prettyRepoRevision repo <> ";" - <> "the error was:" <> (P.indentNAfterNewline 2 . P.group . P.string) msg - UnrecognizableCacheDir uri localPath -> P.wrap $ "A cache directory for" - <> P.backticked (P.text uri) <> "already exists at" - <> P.backticked' (P.string localPath) "," <> "but it doesn't seem to" - <> "be a git repository, so I'm not sure what to do next. Delete it?" - UnrecognizableCheckoutDir uri localPath -> P.wrap $ "I tried to clone" - <> P.backticked (P.text uri) <> "into a cache directory at" - <> P.backticked' (P.string localPath) "," <> "but I can't recognize the" - <> "result as a git repository, so I'm not sure what to do next." - PushDestinationHasNewStuff repo -> - P.callout "⏸" . P.lines $ [ - P.wrap $ "The repository at" <> prettyRepoRevision repo - <> "has some changes I don't know about.", - "", - P.wrap $ "If you want to " <> push <> "you can do:", "", - P.indentN 2 pull, "", - P.wrap $ - "to merge these changes locally," <> - "then try your" <> push <> "again." - ] - where - push = P.group . P.backticked . P.string . IP1.patternName $ IP.patternFromInput input - pull = P.group . P.backticked $ IP.inputStringFromInput input - CouldntLoadRootBranch repo hash -> P.wrap - $ "I couldn't load the designated root hash" - <> P.group ("(" <> fromString (Hash.showBase32Hex hash) <> ")") - <> "from the repository at" <> prettyRepoRevision repo - NoRemoteNamespaceWithHash repo sbh -> P.wrap - $ "The repository at" <> prettyRepoRevision repo - <> "doesn't contain a namespace with the hash prefix" - <> (P.blue . P.text . SBH.toText) sbh - RemoteNamespaceHashAmbiguous repo sbh hashes -> P.lines [ - P.wrap $ "The namespace hash" <> prettySBH sbh - <> "at" <> prettyRepoRevision repo - <> "is ambiguous." - <> "Did you mean one of these hashes?", - "", - P.indentN 2 $ P.lines - (prettySBH . SBH.fromHash ((Text.length . SBH.toText) sbh * 2) - <$> Set.toList hashes), - "", - P.wrap "Try again with a few more hash characters to disambiguate." - ] - SomeOtherError msg -> P.callout "‼" . P.lines $ [ - P.wrap "I ran into an error:", "", - P.indentN 2 (P.string msg), "", - P.wrap $ "Check the logging messages above for more info." - ] - ListEdits patch ppe -> do - let - types = Patch._typeEdits patch - terms = Patch._termEdits patch - - prettyTermEdit (r, TermEdit.Deprecate) = - (P.syntaxToColor . prettyHashQualified . PPE.termName ppe . Referent.Ref $ r - , "-> (deprecated)") - prettyTermEdit (r, TermEdit.Replace r' _typing) = - (P.syntaxToColor . prettyHashQualified . PPE.termName ppe . Referent.Ref $ r - , "-> " <> (P.syntaxToColor . prettyHashQualified . PPE.termName ppe . Referent.Ref $ r')) - prettyTypeEdit (r, TypeEdit.Deprecate) = - (P.syntaxToColor . prettyHashQualified $ PPE.typeName ppe r - , "-> (deprecated)") - prettyTypeEdit (r, TypeEdit.Replace r') = - (P.syntaxToColor . prettyHashQualified $ PPE.typeName ppe r - , "-> " <> (P.syntaxToColor . prettyHashQualified . PPE.typeName ppe $ r')) - pure $ P.sepNonEmpty "\n\n" [ - if R.null types then mempty - else "Edited Types:" `P.hang` - P.column2 (prettyTypeEdit <$> R.toList types), - if R.null terms then mempty - else "Edited Terms:" `P.hang` - P.column2 (prettyTermEdit <$> R.toList terms), - if R.null types && R.null terms then "This patch is empty." - else tip . P.string $ "To remove entries from a patch, use " - <> IP.deleteTermReplacementCommand <> " or " - <> IP.deleteTypeReplacementCommand <> ", as appropriate." - ] - BustedBuiltins (Set.toList -> new) (Set.toList -> old) -> - -- todo: this could be prettier! Have a nice list like `find` gives, but - -- that requires querying the codebase to determine term types. Probably - -- the only built-in types will be primitive types like `Int`, so no need - -- to look up decl types. - -- When we add builtin terms, they may depend on new derived types, so - -- these derived types should be added to the branch too; but not - -- necessarily ever be automatically deprecated. (A library curator might - -- deprecate them; more work needs to go into the idea of sharing deprecations and stuff. - pure . P.warnCallout . P.lines $ - case (new, old) of - ([],[]) -> error "BustedBuiltins busted, as there were no busted builtins." - ([], old) -> - P.wrap ("This codebase includes some builtins that are considered deprecated. Use the " <> makeExample' IP.updateBuiltins <> " command when you're ready to work on eliminating them from your codebase:") - : "" - : fmap (P.text . Reference.toText) old - (new, []) -> P.wrap ("This version of Unison provides builtins that are not part of your codebase. Use " <> makeExample' IP.updateBuiltins <> " to add them:") - : "" : fmap (P.text . Reference.toText) new - (new@(_:_), old@(_:_)) -> - [ P.wrap - ("Sorry and/or good news! This version of Unison supports a different set of builtins than this codebase uses. You can use " - <> makeExample' IP.updateBuiltins - <> " to add the ones you're missing and deprecate the ones I'm missing. 😉" - ) - , "You're missing:" `P.hang` P.lines (fmap (P.text . Reference.toText) new) - , "I'm missing:" `P.hang` P.lines (fmap (P.text . Reference.toText) old) - ] - ListOfPatches patches -> pure $ - if null patches then P.lit "nothing to show" - else numberedPatches patches - where - numberedPatches :: Set Name -> P.Pretty P.ColorText - numberedPatches patches = - (P.column2 . fmap format) ([(1::Integer)..] `zip` (toList patches)) - where - format (i, p) = (P.hiBlack . fromString $ show i <> ".", prettyName p) - ConfiguredMetadataParseError p md err -> - pure . P.fatalCallout . P.lines $ - [ P.wrap $ "I couldn't understand the default metadata that's set for " - <> prettyPath' p <> " in .unisonConfig." - , P.wrap $ "The value I found was" - <> (P.backticked . P.blue . P.string) md - <> "but I encountered the following error when trying to parse it:" - , "" - , err - ] - NoConfiguredGitUrl pp p -> - pure . P.fatalCallout . P.wrap $ - "I don't know where to " <> - pushPull "push to!" "pull from!" pp <> - (if Path.isRoot' p then "" - else "Add a line like `GitUrl." <> P.shown p - <> " = ' to .unisonConfig. " - ) - <> "Type `help " <> pushPull "push" "pull" pp <> - "` for more information." - --- | ConfiguredGitUrlParseError PushPull Path' Text String - ConfiguredGitUrlParseError pp p url err -> - pure . P.fatalCallout . P.lines $ - [ P.wrap $ "I couldn't understand the GitUrl that's set for" - <> prettyPath' p <> "in .unisonConfig" - , P.wrap $ "The value I found was" <> (P.backticked . P.blue . P.text) url - <> "but I encountered the following error when trying to parse it:" - , "" - , P.string err - , "" - , P.wrap $ "Type" <> P.backticked ("help " <> pushPull "push" "pull" pp) - <> "for more information." - ] --- | ConfiguredGitUrlIncludesShortBranchHash ShortBranchHash - ConfiguredGitUrlIncludesShortBranchHash pp repo sbh remotePath -> - pure . P.lines $ - [ P.wrap - $ "The `GitUrl.` entry in .unisonConfig for the current path has the value" - <> (P.group . (<>",") . P.blue . P.text) - (RemoteRepo.printNamespace repo (Just sbh) remotePath) - <> "which specifies a namespace hash" - <> P.group (P.blue (prettySBH sbh) <> ".") - , "" - , P.wrap $ - pushPull "I can't push to a specific hash, because it's immutable." - ("It's no use for repeated pulls," - <> "because you would just get the same immutable namespace each time.") - pp - , "" - , P.wrap $ "You can use" - <> P.backticked ( - pushPull "push" "pull" pp - <> " " - <> P.text (RemoteRepo.printNamespace repo Nothing remotePath)) - <> "if you want to" <> pushPull "push onto" "pull from" pp - <> "the latest." - ] - NoBranchWithHash _h -> pure . P.callout "😶" $ - P.wrap $ "I don't know of a namespace with that hash." - NotImplemented -> pure $ P.wrap "That's not implemented yet. Sorry! 😬" - BranchAlreadyExists p -> pure . P.wrap $ - "The namespace" <> prettyPath' p <> "already exists." - LabeledReferenceNotFound hq -> - pure . P.callout "\129300" . P.wrap . P.syntaxToColor $ - "Sorry, I couldn't find anything named" <> prettyHashQualified hq <> "." - LabeledReferenceAmbiguous hashLen hq (LD.partition -> (tps, tms)) -> - pure . P.callout "\129300" . P.lines $ [ - P.wrap "That name is ambiguous. It could refer to any of the following definitions:" - , "" - , P.indentN 2 (P.lines (map qualifyTerm tms ++ map qualifyType tps)) - ] - where - qualifyTerm :: Referent -> P.Pretty P.ColorText - qualifyTerm = P.syntaxToColor . case hq of - HQ.NameOnly n -> prettyNamedReferent hashLen n - HQ.HashQualified n _ -> prettyNamedReferent hashLen n - HQ.HashOnly _ -> prettyReferent hashLen - qualifyType :: Reference -> P.Pretty P.ColorText - qualifyType = P.syntaxToColor . case hq of - HQ.NameOnly n -> prettyNamedReference hashLen n - HQ.HashQualified n _ -> prettyNamedReference hashLen n - HQ.HashOnly _ -> prettyReference hashLen - DeleteNameAmbiguous hashLen p tms tys -> - pure . P.callout "\129300" . P.lines $ [ - P.wrap "That name is ambiguous. It could refer to any of the following definitions:" - , "" - , P.indentN 2 (P.lines (map qualifyTerm (Set.toList tms) ++ map qualifyType (Set.toList tys))) - , "" - , P.wrap "You may:" - , "" - , P.indentN 2 . P.bulleted $ - [ P.wrap "Delete one by an unambiguous name, given above." - , P.wrap "Delete them all by re-issuing the previous command." - ] - ] - where - name :: Name - name = Path.toName' (HQ'.toName (Path.unsplitHQ' p)) - qualifyTerm :: Referent -> P.Pretty P.ColorText - qualifyTerm = P.syntaxToColor . prettyNamedReferent hashLen name - qualifyType :: Reference -> P.Pretty P.ColorText - qualifyType = P.syntaxToColor . prettyNamedReference hashLen name - TermAmbiguous _ _ -> pure "That term is ambiguous." - HashAmbiguous h rs -> pure . P.callout "\129300" . P.lines $ [ - P.wrap $ "The hash" <> prettyShortHash h <> "is ambiguous." - <> "Did you mean one of these hashes?", - "", - P.indentN 2 $ P.lines (P.shown <$> Set.toList rs), - "", - P.wrap "Try again with a few more hash characters to disambiguate." - ] - BranchHashAmbiguous h rs -> pure . P.callout "\129300" . P.lines $ [ - P.wrap $ "The namespace hash" <> prettySBH h <> "is ambiguous." - <> "Did you mean one of these hashes?", - "", - P.indentN 2 $ P.lines (prettySBH <$> Set.toList rs), - "", - P.wrap "Try again with a few more hash characters to disambiguate." - ] - BadName n -> - pure . P.wrap $ P.string n <> " is not a kind of name I understand." - TermNotFound' sh -> - pure $ "I could't find a term with hash " - <> (prettyShortHash sh) - TypeNotFound' sh -> - pure $ "I could't find a type with hash " - <> (prettyShortHash sh) - NothingToPatch _patchPath dest -> pure $ - P.callout "😶" . P.wrap - $ "This had no effect. Perhaps the patch has already been applied" - <> "or it doesn't intersect with the definitions in" - <> P.group (prettyPath' dest <> ".") - PatchNeedsToBeConflictFree -> - pure . P.wrap $ - "I tried to auto-apply the patch, but couldn't because it contained" - <> "contradictory entries." - PatchInvolvesExternalDependents _ _ -> - pure "That patch involves external dependents." - ShowReflog [] -> pure . P.warnCallout $ "The reflog appears to be empty!" - ShowReflog entries -> pure $ - P.lines [ - P.wrap $ "Here is a log of the root namespace hashes," - <> "starting with the most recent," - <> "along with the command that got us there." - <> "Try:", - "", - -- `head . tail` is safe: entries never has 1 entry, and [] is handled above - let e2 = head . tail $ entries in - P.indentN 2 . P.wrapColumn2 $ [ - (IP.makeExample IP.forkLocal ["2", ".old"], - ""), - (IP.makeExample IP.forkLocal [prettySBH . Output.hash $ e2, ".old"], - "to make an old namespace accessible again,"), - (mempty,mempty), - (IP.makeExample IP.resetRoot [prettySBH . Output.hash $ e2], - "to reset the root namespace and its history to that of the specified" - <> "namespace.") - ], - "", - P.numberedList . fmap renderEntry $ entries - ] - where - renderEntry :: Output.ReflogEntry -> P.Pretty CT.ColorText - renderEntry (Output.ReflogEntry hash reason) = P.wrap $ - P.blue (prettySBH hash) <> " : " <> P.text reason - History _cap history tail -> pure $ - P.lines [ - note $ "The most recent namespace hash is immediately below this message.", "", - P.sep "\n\n" [ go h diff | (h,diff) <- reverse history ], "", - tailMsg - ] - where - tailMsg = case tail of - E.EndOfLog h -> P.lines [ - "□ " <> prettySBH h <> " (start of history)" - ] - E.MergeTail h hs -> P.lines [ - P.wrap $ "This segment of history starts with a merge." <> ex, - "", - "⊙ " <> prettySBH h, - "⑃", - P.lines (prettySBH <$> hs) - ] - E.PageEnd h _n -> P.lines [ - P.wrap $ "There's more history before the versions shown here." <> ex, "", - dots, "", - "⊙ " <> prettySBH h, - "" - ] - dots = "⠇" - go hash diff = P.lines [ - "⊙ " <> prettySBH hash, - "", - P.indentN 2 $ prettyDiff diff - ] - ex = "Use" <> IP.makeExample IP.history ["#som3n4m3space"] - <> "to view history starting from a given namespace hash." - StartOfCurrentPathHistory -> pure $ - P.wrap "You're already at the very beginning! 🙂" - PullAlreadyUpToDate ns dest -> pure . P.callout "😶" $ - P.wrap $ prettyPath' dest <> "was already up-to-date with" - <> P.group (prettyRemoteNamespace ns <> ".") - - MergeAlreadyUpToDate src dest -> pure . P.callout "😶" $ - P.wrap $ prettyPath' dest <> "was already up-to-date with" - <> P.group (prettyPath' src <> ".") - PreviewMergeAlreadyUpToDate src dest -> pure . P.callout "😶" $ - P.wrap $ prettyPath' dest <> "is already up-to-date with" - <> P.group (prettyPath' src <> ".") - DumpNumberedArgs args -> pure . P.numberedList $ fmap P.string args - NoConflictsOrEdits -> - pure (P.okCallout "No conflicts or edits in progress.") - NoOp -> pure $ P.string "I didn't make any changes." - DefaultMetadataNotification -> pure $ P.wrap "I added some default metadata." - DumpBitBooster head map -> let - go output [] = output - go output (head : queue) = case Map.lookup head map of - Nothing -> go (renderLine head [] : output) queue - Just tails -> go (renderLine head tails : output) (queue ++ tails) - where - renderHash = take 10 . Text.unpack . Hash.base32Hex . Causal.unRawHash - renderLine head tail = - (renderHash head) ++ "|" ++ intercalateMap " " renderHash tail ++ - case Map.lookup (Hash.base32Hex . Causal.unRawHash $ head) tags of - Just t -> "|tag: " ++ t - Nothing -> "" - -- some specific hashes that we want to label in the output - tags :: Map Text String - tags = Map.fromList . fmap swap $ - [ ("unisonbase 2019/8/6", "54s9qjhaonotuo4sp6ujanq7brngk32f30qt5uj61jb461h9fcca6vv5levnoo498bavne4p65lut6k6a7rekaruruh9fsl19agu8j8") - , ("unisonbase 2019/8/5", "focmbmg7ca7ht7opvjaqen58fobu3lijfa9adqp7a1l1rlkactd7okoimpfmd0ftfmlch8gucleh54t3rd1e7f13fgei86hnsr6dt1g") - , ("unisonbase 2019/7/31", "jm2ltsg8hh2b3c3re7aru6e71oepkqlc3skr2v7bqm4h1qgl3srucnmjcl1nb8c9ltdv56dpsgpdur1jhpfs6n5h43kig5bs4vs50co") - , ("unisonbase 2019/7/25", "an1kuqsa9ca8tqll92m20tvrmdfk0eksplgjbda13evdlngbcn5q72h8u6nb86ojr7cvnemjp70h8cq1n95osgid1koraq3uk377g7g") - , ("ucm m1b", "o6qocrqcqht2djicb1gcmm5ct4nr45f8g10m86bidjt8meqablp0070qae2tvutnvk4m9l7o1bkakg49c74gduo9eati20ojf0bendo") - , ("ucm m1, m1a", "auheev8io1fns2pdcnpf85edsddj27crpo9ajdujum78dsncvfdcdu5o7qt186bob417dgmbd26m8idod86080bfivng1edminu3hug") - ] - - in pure $ P.lines [ - P.lines (fmap fromString . reverse . nubOrd $ go [] [head]), - "", - "Paste that output into http://bit-booster.com/graph.html" - ] - ListDependents hqLength ld names missing -> pure $ - if names == mempty && missing == mempty - then c (prettyLabeledDependency hqLength ld) <> " doesn't have any dependents." - else - "Dependents of " <> c (prettyLabeledDependency hqLength ld) <> ":\n\n" <> - (P.indentN 2 (P.numberedColumn2Header num pairs)) - where - num n = P.hiBlack $ P.shown n <> "." - header = (P.hiBlack "Reference", P.hiBlack "Name") - pairs = header : (fmap (first c . second c) $ - [ (p $ Reference.toShortHash r, prettyName n) | (n, r) <- names ] ++ - [ (p $ Reference.toShortHash r, "(no name available)") | r <- toList missing ]) - p = prettyShortHash . SH.take hqLength - c = P.syntaxToColor - -- this definition is identical to the previous one, apart from the word - -- "Dependencies", but undecided about whether or how to refactor - ListDependencies hqLength ld names missing -> pure $ - if names == mempty && missing == mempty - then c (prettyLabeledDependency hqLength ld) <> " doesn't have any dependencies." - else - "Dependencies of " <> c (prettyLabeledDependency hqLength ld) <> ":\n\n" <> - (P.indentN 2 (P.numberedColumn2Header num pairs)) - where - num n = P.hiBlack $ P.shown n <> "." - header = (P.hiBlack "Reference", P.hiBlack "Name") - pairs = header : (fmap (first c . second c) $ - [ (p $ Reference.toShortHash r, prettyName n) | (n, r) <- names ] ++ - [ (p $ Reference.toShortHash r, "(no name available)") | r <- toList missing ]) - p = prettyShortHash . SH.take hqLength - c = P.syntaxToColor - DumpUnisonFileHashes hqLength datas effects terms -> - pure . P.syntaxToColor . P.lines $ - (effects <&> \(n,r) -> "ability " <> - prettyHashQualified' (HQ'.take hqLength . HQ'.fromNamedReference n $ Reference.DerivedId r)) <> - (datas <&> \(n,r) -> "type " <> - prettyHashQualified' (HQ'.take hqLength . HQ'.fromNamedReference n $ Reference.DerivedId r)) <> - (terms <&> \(n,r) -> - prettyHashQualified' (HQ'.take hqLength . HQ'.fromNamedReference n $ Reference.DerivedId r)) - - where - _nameChange _cmd _pastTenseCmd _oldName _newName _r = error "todo" - -- do - -- when (not . Set.null $ E.changedSuccessfully r) . putPrettyLn . P.okCallout $ - -- P.wrap $ "I" <> pastTenseCmd <> "the" - -- <> ns (E.changedSuccessfully r) - -- <> P.blue (prettyName oldName) - -- <> "to" <> P.group (P.green (prettyName newName) <> ".") - -- when (not . Set.null $ E.oldNameConflicted r) . putPrettyLn . P.warnCallout $ - -- (P.wrap $ "I couldn't" <> cmd <> "the" - -- <> ns (E.oldNameConflicted r) - -- <> P.blue (prettyName oldName) - -- <> "to" <> P.green (prettyName newName) - -- <> "because of conflicts.") - -- <> "\n\n" - -- <> tip ("Use " <> makeExample' IP.todo <> " to view more information on conflicts and remaining work.") - -- when (not . Set.null $ E.newNameAlreadyExists r) . putPrettyLn . P.warnCallout $ - -- (P.wrap $ "I couldn't" <> cmd <> P.blue (prettyName oldName) - -- <> "to" <> P.green (prettyName newName) - -- <> "because the " - -- <> ns (E.newNameAlreadyExists r) - -- <> "already exist(s).") - -- <> "\n\n" - -- <> tip - -- ("Use" <> makeExample IP.rename [prettyName newName, ""] <> "to make" <> prettyName newName <> "available.") --- where --- ns targets = P.oxfordCommas $ --- map (fromString . Names.renderNameTarget) (toList targets) - -prettyPath' :: Path.Path' -> Pretty -prettyPath' p' = - if Path.isCurrentPath p' - then "the current namespace" - else P.blue (P.shown p') - -prettyRelative :: Path.Relative -> Pretty -prettyRelative = P.blue . P.shown - -prettySBH :: IsString s => ShortBranchHash -> P.Pretty s -prettySBH hash = P.group $ "#" <> P.text (SBH.toText hash) - -formatMissingStuff :: (Show tm, Show typ) => - [(HQ.HashQualified, tm)] -> [(HQ.HashQualified, typ)] -> Pretty -formatMissingStuff terms types = - (unlessM (null terms) . P.fatalCallout $ - P.wrap "The following terms have a missing or corrupted type signature:" - <> "\n\n" - <> P.column2 [ (P.syntaxToColor $ prettyHashQualified name, fromString (show ref)) | (name, ref) <- terms ]) <> - (unlessM (null types) . P.fatalCallout $ - P.wrap "The following types weren't found in the codebase:" - <> "\n\n" - <> P.column2 [ (P.syntaxToColor $ prettyHashQualified name, fromString (show ref)) | (name, ref) <- types ]) - -displayDefinitions' :: Var v => Ord a1 - => PPE.PrettyPrintEnvDecl - -> Map Reference.Reference (DisplayThing (DD.Decl v a1)) - -> Map Reference.Reference (DisplayThing (Term v a1)) - -> Pretty -displayDefinitions' ppe0 types terms = P.syntaxToColor $ P.sep "\n\n" (prettyTypes <> prettyTerms) - where - ppeBody r = PPE.declarationPPE ppe0 r - ppeDecl = PPE.unsuffixifiedPPE ppe0 - prettyTerms = map go . Map.toList - -- sort by name - $ Map.mapKeys (first (PPE.termName ppeDecl . Referent.Ref) . dupe) terms - prettyTypes = map go2 . Map.toList - $ Map.mapKeys (first (PPE.typeName ppeDecl) . dupe) types - go ((n, r), dt) = - case dt of - MissingThing r -> missing n r - BuiltinThing -> builtin n - RegularThing tm -> TermPrinter.prettyBinding (ppeBody r) n tm - go2 ((n, r), dt) = - case dt of - MissingThing r -> missing n r - BuiltinThing -> builtin n - RegularThing decl -> case decl of - Left d -> DeclPrinter.prettyEffectDecl (ppeBody r) r n d - Right d -> DeclPrinter.prettyDataDecl (ppeBody r) r n d - builtin n = P.wrap $ "--" <> prettyHashQualified n <> " is built-in." - missing n r = P.wrap ( - "-- The name " <> prettyHashQualified n <> " is assigned to the " - <> "reference " <> fromString (show r ++ ",") - <> "which is missing from the codebase.") - <> P.newline - <> tip "You might need to repair the codebase manually." - -displayRendered :: Maybe FilePath -> Pretty -> IO Pretty -displayRendered outputLoc pp = - maybe (pure pp) scratchAndDisplay outputLoc - where - scratchAndDisplay path = do - path' <- canonicalizePath path - prependToFile pp path' - pure (message pp path') - where - prependToFile pp path = do - existingContents <- do - exists <- doesFileExist path - if exists then readFile path - else pure "" - writeFile path . Text.pack . P.toPlain 80 $ - P.lines [ pp, "", P.text existingContents ] - message pp path = - P.callout "☝️" $ P.lines [ - P.wrap $ "I added this to the top of " <> fromString path, - "", - P.indentN 2 pp - ] - -displayDefinitions :: Var v => Ord a1 => - Maybe FilePath - -> PPE.PrettyPrintEnvDecl - -> Map Reference.Reference (DisplayThing (DD.Decl v a1)) - -> Map Reference.Reference (DisplayThing (Term v a1)) - -> IO Pretty -displayDefinitions _outputLoc _ppe types terms | Map.null types && Map.null terms = - pure $ P.callout "😶" "No results to display." -displayDefinitions outputLoc ppe types terms = - maybe displayOnly scratchAndDisplay outputLoc - where - displayOnly = pure code - scratchAndDisplay path = do - path' <- canonicalizePath path - prependToFile code path' - pure (message code path') - where - prependToFile code path = do - existingContents <- do - exists <- doesFileExist path - if exists then readFile path - else pure "" - writeFile path . Text.pack . P.toPlain 80 $ - P.lines [ code, "" - , "---- " <> "Anything below this line is ignored by Unison." - , "", P.text existingContents ] - message code path = - P.callout "☝️" $ P.lines [ - P.wrap $ "I added these definitions to the top of " <> fromString path, - "", - P.indentN 2 code, - "", - P.wrap $ - "You can edit them there, then do" <> makeExample' IP.update <> - "to replace the definitions currently in this namespace." - ] - code = displayDefinitions' ppe types terms - -displayTestResults :: Bool -- whether to show the tip - -> PPE.PrettyPrintEnv - -> [(Reference, Text)] - -> [(Reference, Text)] - -> Pretty -displayTestResults showTip ppe oks fails = let - name r = P.text (HQ.toText $ PPE.termName ppe (Referent.Ref r)) - okMsg = - if null oks then mempty - else P.column2 [ (P.green "◉ " <> name r, " " <> P.green (P.text msg)) | (r, msg) <- oks ] - okSummary = - if null oks then mempty - else "✅ " <> P.bold (P.num (length oks)) <> P.green " test(s) passing" - failMsg = - if null fails then mempty - else P.column2 [ (P.red "✗ " <> name r, " " <> P.red (P.text msg)) | (r, msg) <- fails ] - failSummary = - if null fails then mempty - else "🚫 " <> P.bold (P.num (length fails)) <> P.red " test(s) failing" - tipMsg = - if not showTip || (null oks && null fails) then mempty - else tip $ "Use " <> P.blue ("view " <> name (fst $ head (fails ++ oks))) <> "to view the source of a test." - in if null oks && null fails then "😶 No tests available." - else P.sep "\n\n" . P.nonEmpty $ [ - okMsg, failMsg, - P.sep ", " . P.nonEmpty $ [failSummary, okSummary], tipMsg] - -unsafePrettyTermResultSig' :: Var v => - PPE.PrettyPrintEnv -> SR'.TermResult' v a -> Pretty -unsafePrettyTermResultSig' ppe = \case - SR'.TermResult' (HQ'.toHQ -> name) (Just typ) _r _aliases -> - head (TypePrinter.prettySignatures' ppe [(name,typ)]) - _ -> error "Don't pass Nothing" - --- produces: --- -- #5v5UtREE1fTiyTsTK2zJ1YNqfiF25SkfUnnji86Lms#0 --- Optional.None, Maybe.Nothing : Maybe a -unsafePrettyTermResultSigFull' :: Var v => - PPE.PrettyPrintEnv -> SR'.TermResult' v a -> Pretty -unsafePrettyTermResultSigFull' ppe = \case - SR'.TermResult' (HQ'.toHQ -> hq) (Just typ) r (Set.map HQ'.toHQ -> aliases) -> - P.lines - [ P.hiBlack "-- " <> greyHash (HQ.fromReferent r) - , P.group $ - P.commas (fmap greyHash $ hq : toList aliases) <> " : " - <> (P.syntaxToColor $ TypePrinter.pretty0 ppe mempty (-1) typ) - , mempty - ] - _ -> error "Don't pass Nothing" - where greyHash = styleHashQualified' id P.hiBlack - -prettyTypeResultHeader' :: Var v => SR'.TypeResult' v a -> Pretty -prettyTypeResultHeader' (SR'.TypeResult' (HQ'.toHQ -> name) dt r _aliases) = - prettyDeclTriple (name, r, dt) - --- produces: --- -- #5v5UtREE1fTiyTsTK2zJ1YNqfiF25SkfUnnji86Lms --- type Optional --- type Maybe -prettyTypeResultHeaderFull' :: Var v => SR'.TypeResult' v a -> Pretty -prettyTypeResultHeaderFull' (SR'.TypeResult' (HQ'.toHQ -> name) dt r (Set.map HQ'.toHQ -> aliases)) = - P.lines stuff <> P.newline - where - stuff = - (P.hiBlack "-- " <> greyHash (HQ.fromReference r)) : - fmap (\name -> prettyDeclTriple (name, r, dt)) - (name : toList aliases) - where greyHash = styleHashQualified' id P.hiBlack - -prettyDeclTriple :: Var v => - (HQ.HashQualified, Reference.Reference, DisplayThing (DD.Decl v a)) - -> Pretty -prettyDeclTriple (name, _, displayDecl) = case displayDecl of - BuiltinThing -> P.hiBlack "builtin " <> P.hiBlue "type " <> P.blue (P.syntaxToColor $ prettyHashQualified name) - MissingThing _ -> mempty -- these need to be handled elsewhere - RegularThing decl -> case decl of - Left ed -> P.syntaxToColor $ DeclPrinter.prettyEffectHeader name ed - Right dd -> P.syntaxToColor $ DeclPrinter.prettyDataHeader name dd - -prettyDeclPair :: Var v => - PPE.PrettyPrintEnv -> (Reference, DisplayThing (DD.Decl v a)) - -> Pretty -prettyDeclPair ppe (r, dt) = prettyDeclTriple (PPE.typeName ppe r, r, dt) - -renderNameConflicts :: Set.Set Name -> Set.Set Name -> Pretty -renderNameConflicts conflictedTypeNames conflictedTermNames = - unlessM (null allNames) $ P.callout "❓" . P.sep "\n\n" . P.nonEmpty $ [ - showConflictedNames "types" conflictedTypeNames, - showConflictedNames "terms" conflictedTermNames, - tip $ "This occurs when merging branches that both independently introduce the same name. Use " - <> makeExample IP.view (prettyName <$> take 3 allNames) - <> "to see the conflicting defintions, then use " - <> makeExample' (if (not . null) conflictedTypeNames - then IP.renameType else IP.renameTerm) - <> "to resolve the conflicts." - ] - where - allNames = toList (conflictedTermNames <> conflictedTypeNames) - showConflictedNames things conflictedNames = - unlessM (Set.null conflictedNames) $ - P.wrap ("These" <> P.bold (things <> "have conflicting definitions:")) - `P.hang` P.commas (P.blue . prettyName <$> toList conflictedNames) - -renderEditConflicts :: - PPE.PrettyPrintEnv -> Patch -> Pretty -renderEditConflicts ppe Patch{..} = - unlessM (null editConflicts) . P.callout "❓" . P.sep "\n\n" $ [ - P.wrap $ "These" <> P.bold "definitions were edited differently" - <> "in namespaces that have been merged into this one." - <> "You'll have to tell me what to use as the new definition:", - P.indentN 2 (P.lines (formatConflict <$> editConflicts)) --- , tip $ "Use " <> makeExample IP.resolve [name (head editConflicts), " "] <> " to pick a replacement." -- todo: eventually something with `edit` - ] - where - -- todo: could possibly simplify all of this, but today is a copy/paste day. - editConflicts :: [Either (Reference, Set TypeEdit.TypeEdit) (Reference, Set TermEdit.TermEdit)] - editConflicts = - (fmap Left . Map.toList . R.toMultimap . R.filterManyDom $ _typeEdits) <> - (fmap Right . Map.toList . R.toMultimap . R.filterManyDom $ _termEdits) - typeName r = styleHashQualified P.bold (PPE.typeName ppe r) - termName r = styleHashQualified P.bold (PPE.termName ppe (Referent.Ref r)) - formatTypeEdits (r, toList -> es) = P.wrap $ - "The type" <> typeName r <> "was" <> - (if TypeEdit.Deprecate `elem` es - then "deprecated and also replaced with" - else "replaced with") <> - P.oxfordCommas [ typeName r | TypeEdit.Replace r <- es ] - formatTermEdits (r, toList -> es) = P.wrap $ - "The term" <> termName r <> "was" <> - (if TermEdit.Deprecate `elem` es - then "deprecated and also replaced with" - else "replaced with") <> - P.oxfordCommas [ termName r | TermEdit.Replace r _ <- es ] - formatConflict = either formatTypeEdits formatTermEdits - -type Numbered = State.State (Int, Seq.Seq String) - -todoOutput :: Var v => PPE.PrettyPrintEnvDecl -> TO.TodoOutput v a -> Pretty -todoOutput ppe todo = - todoConflicts <> todoEdits - where - ppeu = PPE.unsuffixifiedPPE ppe - ppes = PPE.suffixifiedPPE ppe - (frontierTerms, frontierTypes) = TO.todoFrontier todo - (dirtyTerms, dirtyTypes) = TO.todoFrontierDependents todo - corruptTerms = - [ (PPE.termName ppeu (Referent.Ref r), r) | (r, Nothing) <- frontierTerms ] - corruptTypes = - [ (PPE.typeName ppeu r, r) | (r, MissingThing _) <- frontierTypes ] - goodTerms ts = - [ (PPE.termName ppeu (Referent.Ref r), typ) | (r, Just typ) <- ts ] - todoConflicts = if TO.noConflicts todo then mempty else P.lines . P.nonEmpty $ - [ renderEditConflicts ppeu (TO.editConflicts todo) - , renderNameConflicts conflictedTypeNames conflictedTermNames ] - where - -- If a conflict is both an edit and a name conflict, we show it in the edit - -- conflicts section - c :: Names0 - c = removeEditConflicts (TO.editConflicts todo) (TO.nameConflicts todo) - conflictedTypeNames = (R.dom . Names.types) c - conflictedTermNames = (R.dom . Names.terms) c - -- e.g. `foo#a` has been independently updated to `foo#b` and `foo#c`. - -- This means there will be a name conflict: - -- foo -> #b - -- foo -> #c - -- as well as an edit conflict: - -- #a -> #b - -- #a -> #c - -- We want to hide/ignore the name conflicts that are also targets of an - -- edit conflict, so that the edit conflict will be dealt with first. - -- For example, if hash `h` has multiple edit targets { #x, #y, #z, ...}, - -- we'll temporarily remove name conflicts pointing to { #x, #y, #z, ...}. - removeEditConflicts :: Ord n => Patch -> Names' n -> Names' n - removeEditConflicts Patch{..} Names{..} = Names terms' types' where - terms' = R.filterRan (`Set.notMember` conflictedTermEditTargets) terms - types' = R.filterRan (`Set.notMember` conflictedTypeEditTargets) types - conflictedTypeEditTargets :: Set Reference - conflictedTypeEditTargets = - Set.fromList $ toList (R.ran typeEditConflicts) >>= TypeEdit.references - conflictedTermEditTargets :: Set Referent.Referent - conflictedTermEditTargets = - Set.fromList . fmap Referent.Ref - $ toList (R.ran termEditConflicts) >>= TermEdit.references - typeEditConflicts = R.filterDom (`R.manyDom` _typeEdits) _typeEdits - termEditConflicts = R.filterDom (`R.manyDom` _termEdits) _termEdits - - - todoEdits = unlessM (TO.noEdits todo) . P.callout "🚧" . P.sep "\n\n" . P.nonEmpty $ - [ P.wrap ("The namespace has" <> fromString (show (TO.todoScore todo)) - <> "transitive dependent(s) left to upgrade." - <> "Your edit frontier is the dependents of these definitions:") - , P.indentN 2 . P.lines $ ( - (prettyDeclPair ppeu <$> toList frontierTypes) ++ - TypePrinter.prettySignatures' ppes (goodTerms frontierTerms) - ) - , P.wrap "I recommend working on them in the following order:" - , P.numberedList $ - let unscore (_score,a,b) = (a,b) - in (prettyDeclPair ppeu . unscore <$> toList dirtyTypes) ++ - TypePrinter.prettySignatures' - ppes - (goodTerms $ unscore <$> dirtyTerms) - , formatMissingStuff corruptTerms corruptTypes - ] - -listOfDefinitions :: - Var v => PPE.PrettyPrintEnv -> E.ListDetailed -> [SR'.SearchResult' v a] -> IO Pretty -listOfDefinitions ppe detailed results = - pure $ listOfDefinitions' ppe detailed results - -listOfLinks :: - Var v => PPE.PrettyPrintEnv -> [(HQ.HashQualified, Maybe (Type v a))] -> IO Pretty -listOfLinks _ [] = pure . P.callout "😶" . P.wrap $ - "No results. Try using the " <> - IP.makeExample IP.link [] <> - "command to add metadata to a definition." -listOfLinks ppe results = pure $ P.lines [ - P.numberedColumn2 num [ - (P.syntaxToColor $ prettyHashQualified hq, ": " <> prettyType typ) | (hq,typ) <- results - ], "", - tip $ "Try using" <> IP.makeExample IP.display ["1"] - <> "to display the first result or" - <> IP.makeExample IP.view ["1"] <> "to view its source." - ] - where - num i = P.hiBlack $ P.shown i <> "." - prettyType Nothing = "❓ (missing a type for this definition)" - prettyType (Just t) = TypePrinter.pretty ppe t - -data ShowNumbers = ShowNumbers | HideNumbers --- | `ppe` is just for rendering type signatures --- `oldPath, newPath :: Path.Absolute` are just for producing fully-qualified --- numbered args -showDiffNamespace :: forall v . Var v - => ShowNumbers - -> PPE.PrettyPrintEnv - -> Path.Absolute - -> Path.Absolute - -> OBD.BranchDiffOutput v Ann - -> (Pretty, NumberedArgs) -showDiffNamespace _ _ _ _ diffOutput | OBD.isEmpty diffOutput = - ("The namespaces are identical.", mempty) -showDiffNamespace sn ppe oldPath newPath OBD.BranchDiffOutput{..} = - (P.sepNonEmpty "\n\n" p, toList args) - where - (p, (menuSize, args)) = (`State.runState` (0::Int, Seq.empty)) $ sequence [ - if (not . null) newTypeConflicts - || (not . null) newTermConflicts - then do - prettyUpdatedTypes :: [Pretty] <- traverse prettyUpdateType newTypeConflicts - prettyUpdatedTerms :: [Pretty] <- traverse prettyUpdateTerm newTermConflicts - pure $ P.sepNonEmpty "\n\n" - [ P.red "New name conflicts:" - , P.indentN 2 . P.sepNonEmpty "\n\n" $ prettyUpdatedTypes <> prettyUpdatedTerms - ] - else pure mempty - ,if (not . null) resolvedTypeConflicts - || (not . null) resolvedTermConflicts - then do - prettyUpdatedTypes :: [Pretty] <- traverse prettyUpdateType resolvedTypeConflicts - prettyUpdatedTerms :: [Pretty] <- traverse prettyUpdateTerm resolvedTermConflicts - pure $ P.sepNonEmpty "\n\n" - [ P.bold "Resolved name conflicts:" - , P.indentN 2 . P.sepNonEmpty "\n\n" $ prettyUpdatedTypes <> prettyUpdatedTerms - ] - else pure mempty - ,if (not . null) updatedTypes - || (not . null) updatedTerms - || propagatedUpdates > 0 - || (not . null) updatedPatches - then do - prettyUpdatedTypes :: [Pretty] <- traverse prettyUpdateType updatedTypes - prettyUpdatedTerms :: [Pretty] <- traverse prettyUpdateTerm updatedTerms - prettyUpdatedPatches :: [Pretty] <- traverse (prettySummarizePatch newPath) updatedPatches - pure $ P.sepNonEmpty "\n\n" - [ P.bold "Updates:" - , P.indentNonEmptyN 2 . P.sepNonEmpty "\n\n" $ prettyUpdatedTypes <> prettyUpdatedTerms - , if propagatedUpdates > 0 - then P.indentN 2 - $ P.wrap (P.hiBlack $ "There were " - <> P.shown propagatedUpdates - <> "auto-propagated updates.") - else mempty - , P.indentNonEmptyN 2 . P.linesNonEmpty $ prettyUpdatedPatches - ] - else pure mempty - ,if (not . null) addedTypes - || (not . null) addedTerms - || (not . null) addedPatches - then do - prettyAddedTypes :: Pretty <- prettyAddTypes addedTypes - prettyAddedTerms :: Pretty <- prettyAddTerms addedTerms - prettyAddedPatches :: [Pretty] <- traverse (prettySummarizePatch newPath) addedPatches - pure $ P.sepNonEmpty "\n\n" - [ P.bold "Added definitions:" - , P.indentNonEmptyN 2 $ P.linesNonEmpty [prettyAddedTypes, prettyAddedTerms] - , P.indentNonEmptyN 2 $ P.lines prettyAddedPatches - ] - else pure mempty - ,if (not . null) removedTypes - || (not . null) removedTerms - || (not . null) removedPatches - then do - prettyRemovedTypes :: Pretty <- prettyRemoveTypes removedTypes - prettyRemovedTerms :: Pretty <- prettyRemoveTerms removedTerms - prettyRemovedPatches :: [Pretty] <- traverse (prettyNamePatch oldPath) removedPatches - pure $ P.sepNonEmpty "\n\n" - [ P.bold "Removed definitions:" - , P.indentN 2 $ P.linesNonEmpty [ prettyRemovedTypes - , prettyRemovedTerms - , P.linesNonEmpty prettyRemovedPatches ] - ] - else pure mempty - ,if (not . null) renamedTypes - || (not . null) renamedTerms - then do - results <- prettyRenameGroups renamedTypes renamedTerms - pure $ P.sepNonEmpty "\n\n" - [ P.bold "Name changes:" - , P.indentN 2 . P.sepNonEmpty "\n\n" $ results - ] - -- todo: change separator to just '\n' here if all the results are 1 to 1 - else pure mempty - ] - - {- new implementation - 23. X ┐ => (added) 24. X' - 25. X2 ┘ (removed) 26. X2 - -} - prettyRenameGroups :: [OBD.RenameTypeDisplay v a] - -> [OBD.RenameTermDisplay v a] - -> Numbered [Pretty] - prettyRenameGroups types terms = - (<>) <$> traverse (prettyGroup . (over (_1._1) Referent.Ref)) - (types `zip` [0..]) - <*> traverse prettyGroup (terms `zip` [length types ..]) - where - leftNamePad :: Int = foldl1' max $ - map (foldl1' max . map HQ'.nameLength . toList . view _3) terms <> - map (foldl1' max . map HQ'.nameLength . toList . view _3) types - prettyGroup :: ((Referent, b, Set HQ'.HashQualified, Set HQ'.HashQualified), Int) - -> Numbered Pretty - prettyGroup ((r, _, olds, news),i) = let - -- [ "peach ┐" - -- , "peach' ┘"] - olds' :: [Numbered Pretty] = - map (\(oldhq, oldp) -> numHQ' oldPath oldhq r <&> (\n -> n <> " " <> oldp)) - . (zip (toList olds)) - . P.boxRight - . map (P.rightPad leftNamePad . phq') - $ toList olds - - added' = toList $ Set.difference news olds - removed' = toList $ Set.difference olds news - -- [ "(added) 24. X'" - -- , "(removed) 26. X2" - -- ] - - news' :: [Numbered Pretty] = - map (number addedLabel) added' ++ map (number removedLabel) removed' - where - addedLabel = "(added)" - removedLabel = "(removed)" - number label name = - numHQ' newPath name r <&> - (\num -> num <> " " <> phq' name <> " " <> label) - - buildTable :: [Numbered Pretty] -> [Numbered Pretty] -> Numbered Pretty - buildTable lefts rights = let - hlefts = if i == 0 then pure (P.bold "Original") : lefts - else lefts - hrights = if i == 0 then pure (P.bold "Changes") : rights else rights - in P.column2UnzippedM @Numbered mempty hlefts hrights - - in buildTable olds' news' - - prettyUpdateType :: OBD.UpdateTypeDisplay v a -> Numbered Pretty - {- - 1. ability Foo#pqr x y - 2. - AllRightsReserved : License - 3. + MIT : License - 4. ability Foo#abc - 5. - apiDocs : License - 6. + MIT : License - -} - prettyUpdateType (Nothing, mdUps) = - P.column2 <$> traverse (mdTypeLine newPath) mdUps - {- - 1. ┌ ability Foo#pqr x y - 2. └ ability Foo#xyz a b - ⧩ - 4. ┌ ability Foo#abc - │ 5. - apiDocs : Doc - │ 6. + MIT : License - 7. └ ability Foo#def - 8. - apiDocs : Doc - 9. + MIT : License - - 1. ┌ foo#abc : Nat -> Nat -> Poop - 2. └ foo#xyz : Nat - ↓ - 4. foo : Poop - 5. + foo.docs : Doc - -} - prettyUpdateType (Just olds, news) = - do - olds <- traverse (mdTypeLine oldPath) [ (name,r,decl,mempty) | (name,r,decl) <- olds ] - news <- traverse (mdTypeLine newPath) news - let (oldnums, olddatas) = unzip olds - let (newnums, newdatas) = unzip news - pure . P.column2 $ - zip (oldnums <> [""] <> newnums) - (P.boxLeft olddatas <> [downArrow] <> P.boxLeft newdatas) - - {- - 13. ┌ability Yyz (+1 metadata) - 14. └ability copies.Yyz (+2 metadata) - -} - prettyAddTypes :: [OBD.AddedTypeDisplay v a] -> Numbered Pretty - prettyAddTypes = fmap P.lines . traverse prettyGroup where - prettyGroup :: OBD.AddedTypeDisplay v a -> Numbered Pretty - prettyGroup (hqmds, r, odecl) = do - pairs <- traverse (prettyLine r odecl) hqmds - let (nums, decls) = unzip pairs - let boxLeft = case hqmds of _:_:_ -> P.boxLeft; _ -> id - pure . P.column2 $ zip nums (boxLeft decls) - prettyLine :: Reference -> Maybe (DD.DeclOrBuiltin v a) -> (HQ'.HashQualified, [OBD.MetadataDisplay v a]) -> Numbered (Pretty, Pretty) - prettyLine r odecl (hq, mds) = do - n <- numHQ' newPath hq (Referent.Ref r) - pure . (n,) $ prettyDecl hq odecl <> case length mds of - 0 -> mempty - c -> " (+" <> P.shown c <> " metadata)" - - prettyAddTerms :: [OBD.AddedTermDisplay v a] -> Numbered Pretty - prettyAddTerms = fmap (P.column3 . mconcat) . traverse prettyGroup . reorderTerms where - reorderTerms = sortOn (not . Referent.isConstructor . view _2) - prettyGroup :: OBD.AddedTermDisplay v a -> Numbered [(Pretty, Pretty, Pretty)] - prettyGroup (hqmds, r, otype) = do - pairs <- traverse (prettyLine r otype) hqmds - let (nums, names, decls) = unzip3 pairs - boxLeft = case hqmds of _:_:_ -> P.boxLeft; _ -> id - pure $ zip3 nums (boxLeft names) decls - prettyLine r otype (hq, mds) = do - n <- numHQ' newPath hq r - pure . (n, phq' hq, ) $ ": " <> prettyType otype <> case length mds of - 0 -> mempty - c -> " (+" <> P.shown c <> " metadata)" - - prettySummarizePatch, prettyNamePatch :: Path.Absolute -> OBD.PatchDisplay -> Numbered Pretty - -- 12. patch p (added 3 updates, deleted 1) - prettySummarizePatch prefix (name, patchDiff) = do - n <- numPatch prefix name - let addCount = (R.size . view Patch.addedTermEdits) patchDiff + - (R.size . view Patch.addedTypeEdits) patchDiff - delCount = (R.size . view Patch.removedTermEdits) patchDiff + - (R.size . view Patch.removedTypeEdits) patchDiff - messages = - (if addCount > 0 then ["added " <> P.shown addCount] else []) ++ - (if delCount > 0 then ["deleted " <> P.shown addCount] else []) - message = case messages of - [] -> mempty - x : ys -> " (" <> P.commas (x <> " updates" : ys) <> ")" - pure $ n <> P.bold " patch " <> prettyName name <> message - -- 18. patch q - prettyNamePatch prefix (name, _patchDiff) = do - n <- numPatch prefix name - pure $ n <> P.bold " patch " <> prettyName name - - {- - Removes: - - 10. ┌ oldn'busted : Nat -> Nat -> Poop - 11. └ oldn'busted' - 12. ability BadType - 13. patch defunctThingy - -} - prettyRemoveTypes :: [OBD.RemovedTypeDisplay v a] -> Numbered Pretty - prettyRemoveTypes = fmap P.lines . traverse prettyGroup where - prettyGroup :: OBD.RemovedTypeDisplay v a -> Numbered Pretty - prettyGroup (hqs, r, odecl) = do - lines <- traverse (prettyLine r odecl) hqs - let (nums, decls) = unzip lines - boxLeft = case hqs of _:_:_ -> P.boxLeft; _ -> id - pure . P.column2 $ zip nums (boxLeft decls) - prettyLine r odecl hq = do - n <- numHQ' newPath hq (Referent.Ref r) - pure (n, prettyDecl hq odecl) - - prettyRemoveTerms :: [OBD.RemovedTermDisplay v a] -> Numbered Pretty - prettyRemoveTerms = fmap (P.column3 . mconcat) . traverse prettyGroup . reorderTerms where - reorderTerms = sortOn (not . Referent.isConstructor . view _2) - prettyGroup :: OBD.RemovedTermDisplay v a -> Numbered [(Pretty, Pretty, Pretty)] - prettyGroup ([], r, _) = - error $ "trying to remove " <> show r <> " without any names." - prettyGroup (hq1:hqs, r, otype) = do - line1 <- prettyLine1 r otype hq1 - lines <- traverse (prettyLine r) hqs - let (nums, names, decls) = unzip3 (line1:lines) - boxLeft = case hqs of _:_ -> P.boxLeft; _ -> id - pure $ zip3 nums (boxLeft names) decls - prettyLine1 r otype hq = do - n <- numHQ' newPath hq r - pure (n, phq' hq, ": " <> prettyType otype) - prettyLine r hq = do - n <- numHQ' newPath hq r - pure (n, phq' hq, mempty) - - downArrow = P.bold "↓" - mdTypeLine :: Path.Absolute -> OBD.TypeDisplay v a -> Numbered (Pretty, Pretty) - mdTypeLine p (hq, r, odecl, mddiff) = do - n <- numHQ' p hq (Referent.Ref r) - fmap ((n,) . P.linesNonEmpty) . sequence $ - [ pure $ prettyDecl hq odecl - , P.indentN leftNumsWidth <$> prettyMetadataDiff mddiff ] - - -- + 2. MIT : License - -- - 3. AllRightsReserved : License - mdTermLine :: Path.Absolute -> Int -> OBD.TermDisplay v a -> Numbered (Pretty, Pretty) - mdTermLine p namesWidth (hq, r, otype, mddiff) = do - n <- numHQ' p hq r - fmap ((n,) . P.linesNonEmpty) . sequence $ - [ pure $ P.rightPad namesWidth (phq' hq) <> " : " <> prettyType otype - , prettyMetadataDiff mddiff ] - -- , P.indentN 2 <$> prettyMetadataDiff mddiff ] - - prettyUpdateTerm :: OBD.UpdateTermDisplay v a -> Numbered Pretty - prettyUpdateTerm (Nothing, newTerms) = - if null newTerms then error "Super invalid UpdateTermDisplay" else - fmap P.column2 $ traverse (mdTermLine newPath namesWidth) newTerms - where namesWidth = foldl1' max $ fmap (HQ'.nameLength . view _1) newTerms - prettyUpdateTerm (Just olds, news) = - fmap P.column2 $ do - olds <- traverse (mdTermLine oldPath namesWidth) [ (name,r,typ,mempty) | (name,r,typ) <- olds ] - news <- traverse (mdTermLine newPath namesWidth) news - let (oldnums, olddatas) = unzip olds - let (newnums, newdatas) = unzip news - pure $ zip (oldnums <> [""] <> newnums) - (P.boxLeft olddatas <> [downArrow] <> P.boxLeft newdatas) - where namesWidth = foldl1' max $ fmap (HQ'.nameLength . view _1) news - <> fmap (HQ'.nameLength . view _1) olds - - prettyMetadataDiff :: OBD.MetadataDiff (OBD.MetadataDisplay v a) -> Numbered Pretty - prettyMetadataDiff OBD.MetadataDiff{..} = P.column2M $ - map (elem oldPath "- ") removedMetadata <> - map (elem newPath "+ ") addedMetadata - where - elem p x (hq, r, otype) = do - num <- numHQ p hq r - pure (x <> num <> " " <> phq hq, ": " <> prettyType otype) - - prettyType = maybe (P.red "type not found") (TypePrinter.pretty ppe) - prettyDecl hq = - maybe (P.red "type not found") - (P.syntaxToColor . DeclPrinter.prettyDeclOrBuiltinHeader (HQ'.toHQ hq)) - phq' :: _ -> Pretty = P.syntaxToColor . prettyHashQualified' - phq :: _ -> Pretty = P.syntaxToColor . prettyHashQualified - -- - -- DeclPrinter.prettyDeclHeader : HQ -> Either - numPatch :: Path.Absolute -> Name -> Numbered Pretty - numPatch prefix name = - addNumberedArg . Name.toString . Name.makeAbsolute $ Path.prefixName prefix name - - numHQ :: Path.Absolute -> HQ.HashQualified -> Referent -> Numbered Pretty - numHQ prefix hq r = addNumberedArg (HQ.toString hq') - where - hq' = HQ.requalify (fmap (Name.makeAbsolute . Path.prefixName prefix) hq) r - - numHQ' :: Path.Absolute -> HQ'.HashQualified -> Referent -> Numbered Pretty - numHQ' prefix hq r = addNumberedArg (HQ'.toString hq') - where - hq' = HQ'.requalify (fmap (Name.makeAbsolute . Path.prefixName prefix) hq) r - - addNumberedArg :: String -> Numbered Pretty - addNumberedArg s = case sn of - ShowNumbers -> do - (n, args) <- State.get - State.put (n+1, args Seq.|> s) - pure $ padNumber (n+1) - HideNumbers -> pure mempty - - padNumber :: Int -> Pretty - padNumber n = P.hiBlack . P.rightPad leftNumsWidth $ P.shown n <> "." - - leftNumsWidth = length (show menuSize) + length ("." :: String) - -noResults :: Pretty -noResults = P.callout "😶" $ - P.wrap $ "No results. Check your spelling, or try using tab completion " - <> "to supply command arguments." - -listOfDefinitions' :: Var v - => PPE.PrettyPrintEnv -- for printing types of terms :-\ - -> E.ListDetailed - -> [SR'.SearchResult' v a] - -> Pretty -listOfDefinitions' ppe detailed results = - if null results then noResults - else P.lines . P.nonEmpty $ prettyNumberedResults : - [formatMissingStuff termsWithMissingTypes missingTypes - ,unlessM (null missingBuiltins) . bigproblem $ P.wrap - "I encountered an inconsistency in the codebase; these definitions refer to built-ins that this version of unison doesn't know about:" `P.hang` - P.column2 ( (P.bold "Name", P.bold "Built-in") - -- : ("-", "-") - : fmap (bimap (P.syntaxToColor . prettyHashQualified) - (P.text . Referent.toText)) missingBuiltins) - ] - where - prettyNumberedResults = P.numberedList prettyResults - -- todo: group this by namespace - prettyResults = - map (SR'.foldResult' renderTerm renderType) - (filter (not.missingType) results) - where - (renderTerm, renderType) = - if detailed then - (unsafePrettyTermResultSigFull' ppe, prettyTypeResultHeaderFull') - else - (unsafePrettyTermResultSig' ppe, prettyTypeResultHeader') - missingType (SR'.Tm _ Nothing _ _) = True - missingType (SR'.Tp _ (MissingThing _) _ _) = True - missingType _ = False - -- termsWithTypes = [(name,t) | (name, Just t) <- sigs0 ] - -- where sigs0 = (\(name, _, typ) -> (name, typ)) <$> terms - termsWithMissingTypes = - [ (HQ'.toHQ name, r) - | SR'.Tm name Nothing (Referent.Ref (Reference.DerivedId r)) _ <- results ] - missingTypes = nubOrdOn snd $ - [ (HQ'.toHQ name, Reference.DerivedId r) - | SR'.Tp name (MissingThing r) _ _ <- results ] <> - [ (HQ'.toHQ name, r) - | SR'.Tm name Nothing (Referent.toTypeReference -> Just r) _ <- results] - missingBuiltins = results >>= \case - SR'.Tm name Nothing r@(Referent.Ref (Reference.Builtin _)) _ -> [(HQ'.toHQ name,r)] - _ -> [] - -watchPrinter - :: Var v - => Text - -> PPE.PrettyPrintEnv - -> Ann - -> UF.WatchKind - -> Term v () - -> Runtime.IsCacheHit - -> Pretty -watchPrinter src ppe ann kind term isHit = - P.bracket - $ let - lines = Text.lines src - lineNum = fromMaybe 1 $ startingLine ann - lineNumWidth = length (show lineNum) - extra = " " <> replicate (length kind) ' ' -- for the ` | > ` after the line number - line = lines !! (lineNum - 1) - addCache p = if isHit then p <> " (cached)" else p - renderTest (Term.App' (Term.Constructor' _ id) (Term.Text' msg)) = - "\n" <> if id == DD.okConstructorId - then addCache - (P.green "✅ " <> P.bold "Passed" <> P.green (P.text msg')) - else if id == DD.failConstructorId - then addCache - (P.red "🚫 " <> P.bold "FAILED" <> P.red (P.text msg')) - else P.red "❓ " <> TermPrinter.pretty ppe term - where - msg' = if Text.take 1 msg == " " then msg - else " " <> msg - - renderTest x = - fromString $ "\n Unison bug: " <> show x <> " is not a test." - in - P.lines - [ fromString (show lineNum) <> " | " <> P.text line - , case (kind, term) of - (UF.TestWatch, Term.Sequence' tests) -> foldMap renderTest tests - _ -> P.lines - [ fromString (replicate lineNumWidth ' ') - <> fromString extra - <> (if isHit then id else P.purple) "⧩" - , P.indentN (lineNumWidth + length extra) - . (if isHit then id else P.bold) - $ TermPrinter.pretty ppe term - ] - ] - -filestatusTip :: Pretty -filestatusTip = tip "Use `help filestatus` to learn more." - -prettyDiff :: Names.Diff -> Pretty -prettyDiff diff = let - orig = Names.originalNames diff - adds = Names.addedNames diff - removes = Names.removedNames diff - - addedTerms = [ (n,r) | (n,r) <- R.toList (Names.terms0 adds) - , not $ R.memberRan r (Names.terms0 removes) ] - addedTypes = [ (n,r) | (n,r) <- R.toList (Names.types0 adds) - , not $ R.memberRan r (Names.types0 removes) ] - added = sort (hqTerms ++ hqTypes) - where - hqTerms = [ Names.hqName adds n (Right r) | (n, r) <- addedTerms ] - hqTypes = [ Names.hqName adds n (Left r) | (n, r) <- addedTypes ] - - removedTerms = [ (n,r) | (n,r) <- R.toList (Names.terms0 removes) - , not $ R.memberRan r (Names.terms0 adds) - , Set.notMember n addedTermsSet ] where - addedTermsSet = Set.fromList (map fst addedTerms) - removedTypes = [ (n,r) | (n,r) <- R.toList (Names.types0 removes) - , not $ R.memberRan r (Names.types0 adds) - , Set.notMember n addedTypesSet ] where - addedTypesSet = Set.fromList (map fst addedTypes) - removed = sort (hqTerms ++ hqTypes) - where - hqTerms = [ Names.hqName removes n (Right r) | (n, r) <- removedTerms ] - hqTypes = [ Names.hqName removes n (Left r) | (n, r) <- removedTypes ] - - movedTerms = [ (n,n2) | (n,r) <- R.toList (Names.terms0 removes) - , n2 <- toList (R.lookupRan r (Names.terms adds)) ] - movedTypes = [ (n,n2) | (n,r) <- R.toList (Names.types removes) - , n2 <- toList (R.lookupRan r (Names.types adds)) ] - moved = Name.sortNamed fst . nubOrd $ (movedTerms <> movedTypes) - - copiedTerms = List.multimap [ - (n,n2) | (n2,r) <- R.toList (Names.terms0 adds) - , not (R.memberRan r (Names.terms0 removes)) - , n <- toList (R.lookupRan r (Names.terms0 orig)) ] - copiedTypes = List.multimap [ - (n,n2) | (n2,r) <- R.toList (Names.types0 adds) - , not (R.memberRan r (Names.types0 removes)) - , n <- toList (R.lookupRan r (Names.types0 orig)) ] - copied = Name.sortNamed fst $ - Map.toList (Map.unionWith (<>) copiedTerms copiedTypes) - in - P.sepNonEmpty "\n\n" [ - if not $ null added then - P.lines [ - -- todo: split out updates - P.green "+ Adds / updates:", "", - P.indentN 2 . P.wrap $ - P.sep " " (P.syntaxToColor . prettyHashQualified' <$> added) - ] - else mempty, - if not $ null removed then - P.lines [ - P.hiBlack "- Deletes:", "", - P.indentN 2 . P.wrap $ - P.sep " " (P.syntaxToColor . prettyHashQualified' <$> removed) - ] - else mempty, - if not $ null moved then - P.lines [ - P.purple "> Moves:", "", - P.indentN 2 $ - P.column2 $ - (P.hiBlack "Original name", P.hiBlack "New name") : - [ (prettyName n,prettyName n2) | (n, n2) <- moved ] - ] - else mempty, - if not $ null copied then - P.lines [ - P.yellow "= Copies:", "", - P.indentN 2 $ - P.column2 $ - (P.hiBlack "Original name", P.hiBlack "New name(s)") : - [ (prettyName n, P.sep " " (prettyName <$> ns)) - | (n, ns) <- copied ] - ] - else mempty - ] - -prettyTermName :: PPE.PrettyPrintEnv -> Referent -> Pretty -prettyTermName ppe r = P.syntaxToColor $ - prettyHashQualified (PPE.termName ppe r) - -prettyRepoRevision :: RemoteRepo -> Pretty -prettyRepoRevision (RemoteRepo.GitRepo url treeish) = - P.blue (P.text url) <> prettyRevision treeish - where - prettyRevision treeish = - Monoid.fromMaybe $ - treeish <&> \treeish -> "at revision" <> P.blue (P.text treeish) - -prettyRepoBranch :: RemoteRepo -> Pretty -prettyRepoBranch (RemoteRepo.GitRepo url treeish) = - P.blue (P.text url) <> prettyRevision treeish - where - prettyRevision treeish = - Monoid.fromMaybe $ - treeish <&> \treeish -> "at branch" <> P.blue (P.text treeish) - -isTestOk :: Term v Ann -> Bool -isTestOk tm = case tm of - Term.Sequence' ts -> all isSuccess ts where - isSuccess (Term.App' (Term.Constructor' ref cid) _) = - cid == DD.okConstructorId && - ref == DD.testResultRef - isSuccess _ = False - _ -> False diff --git a/parser-typechecker/src/Unison/DeclPrinter.hs b/parser-typechecker/src/Unison/DeclPrinter.hs deleted file mode 100644 index b0f6f9ec84..0000000000 --- a/parser-typechecker/src/Unison/DeclPrinter.hs +++ /dev/null @@ -1,187 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.DeclPrinter where - -import Unison.Prelude - -import Data.List ( isPrefixOf ) -import qualified Data.Map as Map -import Unison.DataDeclaration ( DataDeclaration - , EffectDeclaration - , toDataDecl - ) -import qualified Unison.DataDeclaration as DD -import qualified Unison.ConstructorType as CT -import Unison.HashQualified ( HashQualified ) -import qualified Unison.HashQualified as HQ -import qualified Unison.Name as Name -import Unison.NamePrinter ( styleHashQualified'' ) -import Unison.PrettyPrintEnv ( PrettyPrintEnv ) -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.Referent as Referent -import Unison.Reference ( Reference(DerivedId) ) -import qualified Unison.Util.SyntaxText as S -import Unison.Util.SyntaxText ( SyntaxText ) -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import qualified Unison.TypePrinter as TypePrinter -import Unison.Util.Pretty ( Pretty ) -import qualified Unison.Util.Pretty as P -import Unison.Var ( Var ) -import qualified Unison.Var as Var - -prettyDecl - :: Var v - => PrettyPrintEnv - -> Reference - -> HashQualified - -> DD.Decl v a - -> Pretty SyntaxText -prettyDecl ppe r hq d = case d of - Left e -> prettyEffectDecl ppe r hq e - Right dd -> prettyDataDecl ppe r hq dd - -prettyEffectDecl - :: Var v - => PrettyPrintEnv - -> Reference - -> HashQualified - -> EffectDeclaration v a - -> Pretty SyntaxText -prettyEffectDecl ppe r name = prettyGADT ppe r name . toDataDecl - -prettyGADT - :: Var v - => PrettyPrintEnv - -> Reference - -> HashQualified - -> DataDeclaration v a - -> Pretty SyntaxText -prettyGADT env r name dd = P.hang header . P.lines $ constructor <$> zip - [0 ..] - (DD.constructors' dd) - where - constructor (n, (_, _, t)) = - prettyPattern env r name n - <> (fmt S.TypeAscriptionColon " :") - `P.hang` TypePrinter.pretty0 env Map.empty (-1) t - header = prettyEffectHeader name (DD.EffectDeclaration dd) <> (fmt S.ControlKeyword " where") - -prettyPattern - :: PrettyPrintEnv -> Reference -> HashQualified -> Int -> Pretty SyntaxText -prettyPattern env r namespace n = styleHashQualified'' (fmt S.Constructor) - ( HQ.stripNamespace (fromMaybe "" $ Name.toText <$> HQ.toName namespace) - $ PPE.patternName env r n - ) - -prettyDataDecl - :: Var v - => PrettyPrintEnv - -> Reference - -> HashQualified - -> DataDeclaration v a - -> Pretty SyntaxText -prettyDataDecl env r name dd = - (header <>) . P.sep (fmt S.DelimiterChar (" | " `P.orElse` "\n | ")) $ constructor <$> zip - [0 ..] - (DD.constructors' dd) - where - constructor (n, (_, _, (Type.ForallsNamed' _ t))) = constructor' n t - constructor (n, (_, _, t) ) = constructor' n t - constructor' n t = case Type.unArrows t of - Nothing -> prettyPattern env r name n - Just ts -> case fieldNames env r name dd of - Nothing -> P.group . P.hang' (prettyPattern env r name n) " " - $ P.spaced (TypePrinter.prettyRaw env Map.empty 10 <$> init ts) - Just fs -> P.group $ (fmt S.DelimiterChar "{ ") - <> P.sep ((fmt S.DelimiterChar ",") <> " " `P.orElse` "\n ") - (field <$> zip fs (init ts)) - <> (fmt S.DelimiterChar " }") - field (fname, typ) = P.group $ styleHashQualified'' (fmt S.Constructor) fname <> - (fmt S.TypeAscriptionColon " :") `P.hang` TypePrinter.prettyRaw env Map.empty (-1) typ - header = prettyDataHeader name dd <> (fmt S.DelimiterChar (" = " `P.orElse` "\n = ")) - --- Comes up with field names for a data declaration which has the form of a --- record, like `type Pt = { x : Int, y : Int }`. Works by generating the --- record accessor terms for the data type, hashing these terms, and then --- checking the `PrettyPrintEnv` for the names of those hashes. If the names for --- these hashes are: --- --- `Pt.x`, `Pt.x.set`, `Pt.x.modify`, `Pt.y`, `Pt.y.set`, `Pt.y.modify` --- --- then this matches the naming convention generated by the parser, and we --- return `x` and `y` as the field names. --- --- This function bails with `Nothing` if the names aren't an exact match for --- the expected record naming convention. -fieldNames - :: forall v a . Var v - => PrettyPrintEnv - -> Reference - -> HashQualified - -> DataDeclaration v a - -> Maybe [HashQualified] -fieldNames env r name dd = case DD.constructors dd of - [(_, typ)] -> let - vars :: [v] - vars = [ Var.freshenId (fromIntegral n) (Var.named "_") | n <- [0..Type.arity typ - 1]] - accessors = DD.generateRecordAccessors (map (,()) vars) (HQ.toVar name) r - hashes = Term.hashComponents (Map.fromList accessors) - names = [ (r, HQ.toString . PPE.termName env . Referent.Ref $ DerivedId r) - | r <- fst <$> Map.elems hashes ] - fieldNames = Map.fromList - [ (r, f) | (r, n) <- names - , typename <- pure (HQ.toString name) - , typename `isPrefixOf` n - -- drop the typename and the following '.' - , rest <- pure $ drop (length typename + 1) n - , (f, rest) <- pure $ span (/= '.') rest - , rest `elem` ["",".set",".modify"] ] - in if Map.size fieldNames == length names then - Just [ HQ.unsafeFromString name - | v <- vars - , Just (ref, _) <- [Map.lookup (Var.namespaced [HQ.toVar name, v]) hashes] - , Just name <- [Map.lookup ref fieldNames] ] - else Nothing - _ -> Nothing - -prettyModifier :: DD.Modifier -> Pretty SyntaxText -prettyModifier DD.Structural = mempty -prettyModifier (DD.Unique _uid) = - fmt S.DataTypeModifier "unique" -- <> ("[" <> P.text uid <> "] ") - -prettyDataHeader :: Var v => HashQualified -> DD.DataDeclaration v a -> Pretty SyntaxText -prettyDataHeader name dd = - P.sepNonEmpty " " [ - prettyModifier (DD.modifier dd), - fmt S.DataTypeKeyword "type", - styleHashQualified'' (fmt $ S.HashQualifier name) name, - P.sep " " (fmt S.DataTypeParams . P.text . Var.name <$> DD.bound dd) ] - -prettyEffectHeader :: Var v => HashQualified -> DD.EffectDeclaration v a -> Pretty SyntaxText -prettyEffectHeader name ed = P.sepNonEmpty " " [ - prettyModifier (DD.modifier (DD.toDataDecl ed)), - fmt S.DataTypeKeyword "ability", - styleHashQualified'' (fmt $ S.HashQualifier name) name, - P.sep " " (fmt S.DataTypeParams . P.text . Var.name <$> DD.bound (DD.toDataDecl ed)) ] - -prettyDeclHeader - :: Var v - => HashQualified - -> Either (DD.EffectDeclaration v a) (DD.DataDeclaration v a) - -> Pretty SyntaxText -prettyDeclHeader name (Left e) = prettyEffectHeader name e -prettyDeclHeader name (Right d) = prettyDataHeader name d - -prettyDeclOrBuiltinHeader - :: Var v - => HashQualified - -> DD.DeclOrBuiltin v a - -> Pretty SyntaxText -prettyDeclOrBuiltinHeader name (DD.Builtin ctype) = case ctype of - CT.Data -> fmt S.DataTypeKeyword "builtin type " <> styleHashQualified'' (fmt $ S.HashQualifier name) name - CT.Effect -> fmt S.DataTypeKeyword "builtin ability " <> styleHashQualified'' (fmt $ S.HashQualifier name) name -prettyDeclOrBuiltinHeader name (DD.Decl e) = prettyDeclHeader name e - -fmt :: S.Element -> Pretty S.SyntaxText -> Pretty S.SyntaxText -fmt = P.withSyntax diff --git a/parser-typechecker/src/Unison/FileParser.hs b/parser-typechecker/src/Unison/FileParser.hs deleted file mode 100644 index 5cb9972d0a..0000000000 --- a/parser-typechecker/src/Unison/FileParser.hs +++ /dev/null @@ -1,289 +0,0 @@ -{-# Language DeriveTraversable #-} -{-# Language OverloadedStrings #-} - -module Unison.FileParser where - -import Unison.Prelude - -import qualified Unison.ABT as ABT -import Control.Lens -import Control.Monad.Reader (local, asks) -import qualified Data.Map as Map -import Prelude hiding (readFile) -import qualified Text.Megaparsec as P -import Unison.DataDeclaration (DataDeclaration, EffectDeclaration) -import qualified Unison.DataDeclaration as DD -import qualified Unison.Lexer as L -import Unison.Parser -import Unison.Term (Term) -import qualified Unison.Term as Term -import qualified Unison.TermParser as TermParser -import Unison.Type (Type) -import qualified Unison.Type as Type -import qualified Unison.TypeParser as TypeParser -import Unison.UnisonFile (UnisonFile(..), environmentFor) -import qualified Unison.UnisonFile as UF -import qualified Unison.Util.List as List -import Unison.Var (Var) -import qualified Unison.Var as Var -import qualified Unison.Names3 as Names -import qualified Unison.Name as Name - -resolutionFailures :: Ord v => [Names.ResolutionFailure v Ann] -> P v x -resolutionFailures es = P.customFailure (ResolutionFailures es) - -file :: forall v . Var v => P v (UnisonFile v Ann) -file = do - _ <- openBlock - -- The file may optionally contain top-level imports, - -- which are parsed and applied to the type decls and term stanzas - (namesStart, imports) <- TermParser.imports <* optional semi - (dataDecls, effectDecls, parsedAccessors) <- declarations - env <- case environmentFor (Names.currentNames namesStart) dataDecls effectDecls of - Right (Right env) -> pure env - Right (Left es) -> P.customFailure $ TypeDeclarationErrors es - Left es -> resolutionFailures (toList es) - let importNames = [(Name.fromVar v, Name.fromVar v2) | (v,v2) <- imports ] - let locals = Names.importing0 importNames (UF.names env) - local (\e -> e { names = Names.push locals namesStart }) $ do - names <- asks names - stanzas0 <- local (\e -> e { names = names }) $ sepBy semi stanza - let stanzas = fmap (TermParser.substImports names imports) <$> stanzas0 - _ <- closeBlock - let (termsr, watchesr) = foldl' go ([], []) stanzas - go (terms, watches) s = case s of - WatchBinding kind _ ((_, v), at) -> - (terms, (kind,(v,Term.generalizeTypeSignatures at)) : watches) - WatchExpression kind guid _ at -> - (terms, (kind, (Var.unnamedTest guid, Term.generalizeTypeSignatures at)) : watches) - Binding ((_, v), at) -> ((v,Term.generalizeTypeSignatures at) : terms, watches) - Bindings bs -> ([(v,Term.generalizeTypeSignatures at) | ((_,v), at) <- bs ] ++ terms, watches) - let (terms, watches) = (reverse termsr, reverse watchesr) - -- local term bindings shadow any same-named thing from the outer codebase scope - let locals = stanzas0 >>= getVars - let curNames = Names.deleteTerms0 (Name.fromVar <$> locals) (Names.currentNames names) - terms <- case List.validate (traverse $ Term.bindSomeNames curNames) terms of - Left es -> resolutionFailures (toList es) - Right terms -> pure terms - watches <- case List.validate (traverse . traverse $ Term.bindSomeNames curNames) watches of - Left es -> resolutionFailures (toList es) - Right ws -> pure ws - let toPair (tok, _) = (L.payload tok, ann tok) - accessors = - [ DD.generateRecordAccessors (toPair <$> fields) (L.payload typ) r - | (typ, fields) <- parsedAccessors - , Just (r,_) <- [Map.lookup (L.payload typ) (UF.datas env)] - ] - uf = UnisonFileId (UF.datasId env) (UF.effectsId env) (terms <> join accessors) - (List.multimap watches) - pure uf - --- A stanza is either a watch expression like: --- > 1 + x --- > z = x + 1 --- Or it is a binding like: --- foo : Nat -> Nat --- foo x = x + 42 --- Or it is a namespace like: --- namespace Woot where --- x = 42 --- y = 17 --- which parses as [(Woot.x, 42), (Woot.y, 17)] - -data Stanza v term - = WatchBinding UF.WatchKind Ann ((Ann, v), term) - | WatchExpression UF.WatchKind Text Ann term - | Binding ((Ann, v), term) - | Bindings [((Ann, v), term)] deriving (Foldable, Traversable, Functor) - -getVars :: Var v => Stanza v term -> [v] -getVars = \case - WatchBinding _ _ ((_,v), _) -> [v] - WatchExpression _ guid _ _ -> [Var.unnamedTest guid] - Binding ((_,v), _) -> [v] - Bindings bs -> [ v | ((_,v), _) <- bs ] - -stanza :: Var v => P v (Stanza v (Term v Ann)) -stanza = watchExpression <|> unexpectedAction <|> binding <|> namespace - where - unexpectedAction = failureIf (TermParser.blockTerm $> getErr) binding - getErr = do - t <- anyToken - t2 <- optional anyToken - P.customFailure $ DidntExpectExpression t t2 - watchExpression = do - (kind, guid, ann) <- watched - _ <- closed - msum [ - WatchBinding kind ann <$> TermParser.binding, - WatchExpression kind guid ann <$> TermParser.blockTerm ] - binding = Binding <$> TermParser.binding - namespace = tweak <$> TermParser.namespaceBlock where - tweak ns = Bindings (TermParser.toBindings [ns]) - -watched :: Var v => P v (UF.WatchKind, Text, Ann) -watched = P.try $ do - kind <- optional wordyIdString - guid <- uniqueName 10 - op <- optional (L.payload <$> P.lookAhead symbolyIdString) - guard (op == Just ">") - tok <- anyToken - guard $ maybe True (`L.touches` tok) kind - pure (maybe UF.RegularWatch L.payload kind, guid, maybe mempty ann kind <> ann tok) - -closed :: Var v => P v () -closed = P.try $ do - op <- optional (L.payload <$> P.lookAhead closeBlock) - case op of Just () -> P.customFailure EmptyWatch - _ -> pure () - --- The parsed form of record accessors, as in: --- --- type Additive a = { zero : a, (+) : a -> a -> a } --- --- The `Token v` is the variable name and location (here `zero` and `(+)`) of --- each field, and the type is the type of that field -type Accessors v = [(L.Token v, [(L.Token v, Type v Ann)])] - -declarations :: Var v => P v - (Map v (DataDeclaration v Ann), - Map v (EffectDeclaration v Ann), - Accessors v) -declarations = do - declarations <- many $ declaration <* optional semi - let (dataDecls0, effectDecls) = partitionEithers declarations - dataDecls = [(a,b) | (a,b,_) <- dataDecls0 ] - multimap :: Ord k => [(k,v)] -> Map k [v] - multimap = foldl' mi Map.empty - mi m (k,v) = Map.insertWith (++) k [v] m - mds = multimap dataDecls - mes = multimap effectDecls - mdsBad = Map.filter (\xs -> length xs /= 1) mds - mesBad = Map.filter (\xs -> length xs /= 1) mes - if Map.null mdsBad && Map.null mesBad then - pure (Map.fromList dataDecls, - Map.fromList effectDecls, - join . map (view _3) $ dataDecls0) - else - P.customFailure . DuplicateTypeNames $ - [ (v, DD.annotation <$> ds) | (v, ds) <- Map.toList mdsBad ] <> - [ (v, DD.annotation . DD.toDataDecl <$> es) | (v, es) <- Map.toList mesBad ] - -modifier :: Var v => P v (L.Token DD.Modifier) -modifier = do - o <- optional (openBlockWith "unique") - case o of - Nothing -> fmap (const DD.Structural) <$> P.lookAhead anyToken - Just tok -> do - uid <- do - o <- optional (reserved "[" *> wordyIdString <* reserved "]") - case o of - Nothing -> uniqueName 32 - Just uid -> pure (fromString . L.payload $ uid) - pure (DD.Unique uid <$ tok) - -declaration :: Var v - => P v (Either (v, DataDeclaration v Ann, Accessors v) - (v, EffectDeclaration v Ann)) -declaration = do - mod <- modifier - fmap Right (effectDeclaration mod) <|> fmap Left (dataDeclaration mod) - -dataDeclaration - :: forall v - . Var v - => L.Token DD.Modifier - -> P v (v, DataDeclaration v Ann, Accessors v) -dataDeclaration mod = do - _ <- fmap void (reserved "type") <|> openBlockWith "type" - (name, typeArgs) <- - (,) <$> TermParser.verifyRelativeVarName prefixDefinitionName - <*> many (TermParser.verifyRelativeVarName prefixDefinitionName) - let typeArgVs = L.payload <$> typeArgs - eq <- reserved "=" - let - -- go gives the type of the constructor, given the types of - -- the constructor arguments, e.g. Cons becomes forall a . a -> List a -> List a - go :: L.Token v -> [Type v Ann] -> (Ann, v, Type v Ann) - go ctorName ctorArgs = let - arrow i o = Type.arrow (ann i <> ann o) i o - app f arg = Type.app (ann f <> ann arg) f arg - -- ctorReturnType e.g. `Optional a` - ctorReturnType = foldl' app (tok Type.var name) (tok Type.var <$> typeArgs) - -- ctorType e.g. `a -> Optional a` - -- or just `Optional a` in the case of `None` - ctorType = foldr arrow ctorReturnType ctorArgs - ctorAnn = ann ctorName <> - (if null ctorArgs then mempty else ann (last ctorArgs)) - in (ann ctorName, Var.namespaced [L.payload name, L.payload ctorName], - Type.foralls ctorAnn typeArgVs ctorType) - prefixVar = TermParser.verifyRelativeVarName prefixDefinitionName - dataConstructor = go <$> prefixVar <*> many TypeParser.valueTypeLeaf - record = do - _ <- openBlockWith "{" - fields <- sepBy1 (reserved "," <* optional semi) $ - liftA2 (,) (prefixVar <* reserved ":") TypeParser.valueType - _ <- closeBlock - pure ([go name (snd <$> fields)], [(name, fields)]) - (constructors, accessors) <- - msum [record, (,[]) <$> sepBy (reserved "|") dataConstructor] - _ <- closeBlock - let -- the annotation of the last constructor if present, - -- otherwise ann of name - closingAnn :: Ann - closingAnn = last (ann eq : ((\(_,_,t) -> ann t) <$> constructors)) - pure (L.payload name, - DD.mkDataDecl' (L.payload mod) (ann mod <> closingAnn) typeArgVs constructors, - accessors) - -effectDeclaration - :: Var v => L.Token DD.Modifier -> P v (v, EffectDeclaration v Ann) -effectDeclaration mod = do - _ <- fmap void (reserved "ability") <|> openBlockWith "ability" - name <- TermParser.verifyRelativeVarName prefixDefinitionName - typeArgs <- many (TermParser.verifyRelativeVarName prefixDefinitionName) - let typeArgVs = L.payload <$> typeArgs - blockStart <- openBlockWith "where" - constructors <- sepBy semi (constructor typeArgs name) - -- `ability` opens a block, as does `where` - _ <- closeBlock <* closeBlock - let closingAnn = - last $ ann blockStart : ((\(_, _, t) -> ann t) <$> constructors) - pure - ( L.payload name - , DD.mkEffectDecl' (L.payload mod) - (ann mod <> closingAnn) - typeArgVs - constructors - ) - where - constructor - :: Var v => [L.Token v] -> L.Token v -> P v (Ann, v, Type v Ann) - constructor typeArgs name = - explodeToken - <$> TermParser.verifyRelativeVarName prefixDefinitionName - <* reserved ":" - <*> ( Type.generalizeLowercase mempty - . ensureEffect - <$> TypeParser.computationType - ) - where - explodeToken v t = (ann v, Var.namespaced [L.payload name, L.payload v], t) - -- If the effect is not syntactically present in the constructor types, - -- add them after parsing. - ensureEffect t = case t of - Type.Effect' _ _ -> modEffect t - x -> Type.editFunctionResult modEffect x - modEffect t = case t of - Type.Effect' es t -> go es t - t -> go [] t - toTypeVar t = Type.av' (ann t) (Var.name $ L.payload t) - headIs t v = case t of - Type.Apps' (Type.Var' x) _ -> x == v - Type.Var' x -> x == v - _ -> False - go es t = - let es' = if any (`headIs` L.payload name) es - then es - else Type.apps' (toTypeVar name) (toTypeVar <$> typeArgs) : es - in Type.cleanupAbilityLists $ Type.effect (ABT.annotation t) es' t diff --git a/parser-typechecker/src/Unison/FileParsers.hs b/parser-typechecker/src/Unison/FileParsers.hs deleted file mode 100644 index aee5ea3546..0000000000 --- a/parser-typechecker/src/Unison/FileParsers.hs +++ /dev/null @@ -1,196 +0,0 @@ -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE UnicodeSyntax #-} - -module Unison.FileParsers where - -import Unison.Prelude - -import Control.Lens (view, _3) -import qualified Unison.Parser as Parser -import Control.Monad.State (evalStateT) -import Control.Monad.Writer (tell) -import Data.Bifunctor ( first ) -import qualified Data.Foldable as Foldable -import qualified Data.Map as Map -import Data.List (partition) -import qualified Data.Set as Set -import qualified Data.Sequence as Seq -import Data.Text (unpack) -import qualified Unison.ABT as ABT -import qualified Unison.Blank as Blank -import qualified Unison.Name as Name -import qualified Unison.Names3 as Names -import Unison.Parser (Ann) -import qualified Unison.Parsers as Parsers -import qualified Unison.Referent as Referent -import Unison.Reference (Reference) -import Unison.Result (Note (..), Result, pattern Result, ResultT, CompilerBug(..)) -import qualified Unison.Result as Result -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import qualified Unison.Typechecker as Typechecker -import qualified Unison.Typechecker.TypeLookup as TL -import qualified Unison.Typechecker.Context as Context -import qualified Unison.UnisonFile as UF -import qualified Unison.Util.List as List -import qualified Unison.Util.Relation as Rel -import Unison.Var (Var) -import qualified Unison.Var as Var -import Unison.Names3 (Names0) - -type Term v = Term.Term v Ann -type Type v = Type.Type v Ann -type UnisonFile v = UF.UnisonFile v Ann -type Result' v = Result (Seq (Note v Ann)) - -convertNotes :: Ord v => Typechecker.Notes v ann -> Seq (Note v ann) -convertNotes (Typechecker.Notes bugs es is) = - (CompilerBug . TypecheckerBug <$> bugs) <> (TypeError <$> es) <> (TypeInfo <$> Seq.fromList is') where - is' = snd <$> List.uniqueBy' f ([(1::Word)..] `zip` Foldable.toList is) - f (_, Context.TopLevelComponent cs) = Right [ v | (v,_,_) <- cs ] - f (i, _) = Left i - -- each round of TDNR emits its own TopLevelComponent notes, so we remove - -- duplicates (based on var name and location), preferring the later note as - -- that will have the latest typechecking info - -parseAndSynthesizeFile - :: (Var v, Monad m) - => [Type v] - -> (Set Reference -> m (TL.TypeLookup v Ann)) - -> Parser.ParsingEnv - -> FilePath - -> Text - -> ResultT - (Seq (Note v Ann)) - m - (Either Names0 (UF.TypecheckedUnisonFile v Ann)) -parseAndSynthesizeFile ambient typeLookupf env filePath src = do - uf <- Result.fromParsing $ Parsers.parseFile filePath (unpack src) env - let names0 = Names.currentNames (Parser.names env) - (tm, tdnrMap, typeLookup) <- resolveNames typeLookupf names0 uf - let (Result notes' r) = synthesizeFile ambient typeLookup tdnrMap uf tm - tell notes' $> maybe (Left (UF.toNames uf )) Right r - -type TDNRMap v = Map Typechecker.Name [Typechecker.NamedReference v Ann] - -resolveNames - :: (Var v, Monad m) - => (Set Reference -> m (TL.TypeLookup v Ann)) - -> Names.Names0 - -> UnisonFile v - -> ResultT - (Seq (Note v Ann)) - m - (Term v, TDNRMap v, TL.TypeLookup v Ann) -resolveNames typeLookupf preexistingNames uf = do - let tm = UF.typecheckingTerm uf - deps = Term.dependencies tm - possibleDeps = [ (Name.toText name, Var.name v, r) | - (name, r) <- Rel.toList (Names.terms0 preexistingNames), - v <- Set.toList (Term.freeVars tm), - Name.unqualified name == Name.unqualified (Name.fromVar v) ] - possibleRefs = Referent.toReference . view _3 <$> possibleDeps - tl <- lift . lift . fmap (UF.declsToTypeLookup uf <>) - $ typeLookupf (deps <> Set.fromList possibleRefs) - let fqnsByShortName = List.multimap $ - [ (shortname, nr) | - (name, shortname, r) <- possibleDeps, - typ <- toList $ TL.typeOfReferent tl r, - let nr = Typechecker.NamedReference name typ (Right r) ] <> - [ (shortname, nr) | - (name, r) <- Rel.toList (Names.terms0 $ UF.toNames uf), - typ <- toList $ TL.typeOfReferent tl r, - let shortname = Name.toText $ Name.unqualified name, - let nr = Typechecker.NamedReference (Name.toText name) typ (Right r) ] - pure (tm, fqnsByShortName, tl) - -synthesizeFile' - :: forall v - . Var v - => [Type v] - -> TL.TypeLookup v Ann - -> UnisonFile v - -> Result (Seq (Note v Ann)) (UF.TypecheckedUnisonFile v Ann) -synthesizeFile' ambient tl uf = - synthesizeFile ambient tl mempty uf $ UF.typecheckingTerm uf - -synthesizeFile - :: forall v - . Var v - => [Type v] - -> TL.TypeLookup v Ann - -> TDNRMap v - -> UnisonFile v - -> Term v - -> Result (Seq (Note v Ann)) (UF.TypecheckedUnisonFile v Ann) -synthesizeFile ambient tl fqnsByShortName uf term = do - let -- substitute Blanks for any remaining free vars in UF body - tdnrTerm = Term.prepareTDNR term - env0 = Typechecker.Env ambient tl fqnsByShortName - Result notes mayType = - evalStateT (Typechecker.synthesizeAndResolve env0) tdnrTerm - -- If typechecking succeeded, reapply the TDNR decisions to user's term: - Result (convertNotes notes) mayType >>= \_typ -> do - let infos = Foldable.toList $ Typechecker.infos notes - (topLevelComponents :: [[(v, Term v, Type v)]]) <- - let - topLevelBindings :: Map v (Term v) - topLevelBindings = Map.mapKeys Var.reset $ extractTopLevelBindings tdnrTerm - extractTopLevelBindings (Term.LetRecNamedAnnotatedTop' True _ bs body) = - Map.fromList (first snd <$> bs) <> extractTopLevelBindings body - extractTopLevelBindings _ = Map.empty - tlcsFromTypechecker = - List.uniqueBy' (fmap vars) - [ t | Context.TopLevelComponent t <- infos ] - where vars (v, _, _) = v - strippedTopLevelBinding (v, typ, redundant) = do - tm <- case Map.lookup v topLevelBindings of - Nothing -> - Result.compilerBug $ Result.TopLevelComponentNotFound v term - Just (Term.Ann' x _) | redundant -> pure x - Just x -> pure x - -- The Var.reset removes any freshening added during typechecking - pure (Var.reset v, tm, typ) - in - -- use tlcsFromTypechecker to inform annotation-stripping decisions - traverse (traverse strippedTopLevelBinding) tlcsFromTypechecker - let doTdnr = applyTdnrDecisions infos - doTdnrInComponent (v, t, tp) = (\t -> (v, t, tp)) <$> doTdnr t - _ <- doTdnr tdnrTerm - tdnredTlcs <- (traverse . traverse) doTdnrInComponent topLevelComponents - let (watches', terms') = partition isWatch tdnredTlcs - isWatch = all (\(v,_,_) -> Set.member v watchedVars) - watchedVars = Set.fromList [ v | (v, _) <- UF.allWatches uf ] - tlcKind [] = error "empty TLC, should never occur" - tlcKind tlc@((v,_,_):_) = let - hasE k = - elem v . fmap fst $ Map.findWithDefault [] k (UF.watches uf) - in case Foldable.find hasE (Map.keys $ UF.watches uf) of - Nothing -> error "wat" - Just kind -> (kind, tlc) - pure $ UF.typecheckedUnisonFile - (UF.dataDeclarationsId uf) - (UF.effectDeclarationsId uf) - terms' - (map tlcKind watches') - where - applyTdnrDecisions - :: [Context.InfoNote v Ann] - -> Term v - -> Result' v (Term v) - applyTdnrDecisions infos tdnrTerm = foldM go tdnrTerm decisions - where - -- UF data/effect ctors + builtins + TLC Term.vars - go term _decision@(shortv, loc, replacement) = - ABT.visit (resolve shortv loc replacement) term - decisions = - [ (v, loc, replacement) | Context.Decision v loc replacement <- infos ] - -- resolve (v,loc) in a matching Blank to whatever `fqn` maps to in `names` - resolve shortv loc replacement t = case t of - Term.Blank' (Blank.Recorded (Blank.Resolve loc' name)) - | loc' == loc && Var.nameStr shortv == name -> - -- loc of replacement already chosen correctly by whatever made the - -- Decision - pure . pure $ replacement - _ -> Nothing diff --git a/parser-typechecker/src/Unison/Lexer.hs b/parser-typechecker/src/Unison/Lexer.hs deleted file mode 100644 index ff3918cfae..0000000000 --- a/parser-typechecker/src/Unison/Lexer.hs +++ /dev/null @@ -1,777 +0,0 @@ -{-# LANGUAGE OverloadedLists #-} -{-# LANGUAGE BangPatterns #-} -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE DeriveFoldable #-} -{-# LANGUAGE DeriveTraversable #-} - -module Unison.Lexer where - -import Unison.Prelude - -import Control.Lens.TH (makePrisms) -import qualified Control.Monad.State as S -import Data.Char -import Data.List -import qualified Data.List.NonEmpty as Nel -import Unison.Util.Monoid (intercalateMap) -import qualified Data.Map.Strict as Map -import qualified Data.Set as Set -import GHC.Exts (sortWith) -import Text.Megaparsec.Error (ShowToken(..)) -import Unison.ShortHash ( ShortHash ) -import qualified Unison.ShortHash as SH - -data Err - = InvalidWordyId String - | InvalidSymbolyId String - | InvalidShortHash String - | Both Err Err - | MissingFractional String -- ex `1.` rather than `1.04` - | MissingExponent String -- ex `1e` rather than `1e3` - | UnknownLexeme - | TextLiteralMissingClosingQuote String - | InvalidEscapeCharacter Char - | LayoutError - | CloseWithoutMatchingOpen String String -- open, close - deriving (Eq,Ord,Show) -- richer algebra - --- Design principle: --- `[Lexeme]` should be sufficient information for parsing without --- further knowledge of spacing or indentation levels --- any knowledge of comments -data Lexeme - = Open String -- start of a block - | Semi IsVirtual -- separator between elements of a block - | Close -- end of a block - | Reserved String -- reserved tokens such as `{`, `(`, `type`, `of`, etc - | Textual String -- text literals, `"foo bar"` - | Character Char -- character literals, `?X` - | Backticks String (Maybe ShortHash) -- an identifier in backticks - | WordyId String (Maybe ShortHash) -- a (non-infix) identifier - | SymbolyId String (Maybe ShortHash) -- an infix identifier - | Blank String -- a typed hole or placeholder - | Numeric String -- numeric literals, left unparsed - | Hash ShortHash -- hash literals - | Err Err - deriving (Eq,Show,Ord) - -type IsVirtual = Bool -- is it a virtual semi or an actual semi? - -makePrisms ''Lexeme - -simpleWordyId :: String -> Lexeme -simpleWordyId = flip WordyId Nothing - -simpleSymbolyId :: String -> Lexeme -simpleSymbolyId = flip SymbolyId Nothing - -data Token a = Token { - payload :: a, - start :: Pos, - end :: Pos -} deriving (Eq, Ord, Show, Functor) - -notLayout :: Token Lexeme -> Bool -notLayout t = case payload t of - Close -> False - Semi _ -> False - Open _ -> False - _ -> True - -instance ShowToken (Token Lexeme) where - showTokens xs = - join . Nel.toList . S.evalState (traverse go xs) . end $ Nel.head xs - where - go :: Token Lexeme -> S.State Pos String - go tok = do - prev <- S.get - S.put $ end tok - pure $ pad prev (start tok) ++ pretty (payload tok) - pretty (Open s) = s - pretty (Reserved w) = w - pretty (Textual t) = '"' : t ++ ['"'] - pretty (Character c) = - case showEscapeChar c of - Just c -> "?\\" ++ [c] - Nothing -> '?' : [c] - pretty (Backticks n h) = - '`' : n ++ (toList h >>= SH.toString) ++ ['`'] - pretty (WordyId n h) = n ++ (toList h >>= SH.toString) - pretty (SymbolyId n h) = n ++ (toList h >>= SH.toString) - pretty (Blank s) = "_" ++ s - pretty (Numeric n) = n - pretty (Hash sh) = show sh - pretty (Err e) = show e - pretty Close = "" - pretty (Semi True) = "" - pretty (Semi False) = ";" - pad (Pos line1 col1) (Pos line2 col2) = - if line1 == line2 - then replicate (col2 - col1) ' ' - else replicate (line2 - line1) '\n' ++ replicate col2 ' ' - -instance Applicative Token where - pure a = Token a (Pos 0 0) (Pos 0 0) - Token f start _ <*> Token a _ end = Token (f a) start end - -type Line = Int -type Column = Int - -data Pos = Pos {-# Unpack #-} !Line {-# Unpack #-} !Column deriving (Eq,Ord,Show) - -instance Semigroup Pos where (<>) = mappend - -instance Monoid Pos where - mempty = Pos 0 0 - Pos line col `mappend` Pos line2 col2 = - if line2 == 0 then Pos line (col + col2) - else Pos (line + line2) col2 - -line :: Pos -> Line -line (Pos line _) = line - -column :: Pos -> Column -column (Pos _ column) = column - --- `True` if the tokens are adjacent, with no space separating the two -touches :: Token a -> Token b -> Bool -touches (end -> t) (start -> t2) = - line t == line t2 && column t == column t2 - -type BlockName = String -type Layout = [(BlockName,Column)] - -top :: Layout -> Column -top [] = 1 -top ((_,h):_) = h - --- todo: make Layout a NonEmpty -topBlockName :: Layout -> Maybe BlockName -topBlockName [] = Nothing -topBlockName ((name,_):_) = Just name - -topHasClosePair :: Layout -> Bool -topHasClosePair [] = False -topHasClosePair ((name,_):_) = name == "{" || name == "(" - -findNearest :: Layout -> Set BlockName -> Maybe BlockName -findNearest l ns = - case topBlockName l of - Just n -> if Set.member n ns then Just n else findNearest (pop l) ns - Nothing -> Nothing - -pop :: [a] -> [a] -pop = drop 1 - -topLeftCorner :: Pos -topLeftCorner = Pos 1 1 - -data T a = T a [T a] [a] | L a deriving (Functor, Foldable, Traversable) - -headToken :: T a -> a -headToken (T a _ _) = a -headToken (L a) = a - -instance Show a => Show (T a) where - show (L a) = show a - show (T open mid close) = - show open ++ "\n" - ++ indent " " (intercalateMap "\n" show mid) ++ "\n" - ++ intercalateMap "" show close - where - indent by s = by ++ (s >>= go by) - go by '\n' = '\n' : by - go _ c = [c] - -reorderTree :: ([T a] -> [T a]) -> T a -> T a -reorderTree _ l@(L _) = l -reorderTree f (T open mid close) = T open (f (reorderTree f <$> mid)) close - -tree :: [Token Lexeme] -> T (Token Lexeme) -tree toks = one toks const where - one (open@(payload -> Open _) : ts) k = many (T open) [] ts k - one (t : ts) k = k (L t) ts - one [] k = k lastErr [] where - lastErr = case drop (length toks - 1) toks of - [] -> L (Token (Err LayoutError) topLeftCorner topLeftCorner) - (t : _) -> L $ t { payload = Err LayoutError } - - many open acc [] k = k (open (reverse acc) []) [] - many open acc (t@(payload -> Close) : ts) k = k (open (reverse acc) [t]) ts - many open acc ts k = one ts $ \t ts -> many open (t:acc) ts k - -stanzas :: [T (Token Lexeme)] -> [[T (Token Lexeme)]] -stanzas = go [] where - go acc [] = [reverse acc] - go acc (t:ts) = case payload $ headToken t of - Semi _ -> reverse (t : acc) : go [] ts - _ -> go (t:acc) ts - --- Moves type and effect declarations to the front of the token stream --- and move `use` statements to the front of each block -reorder :: [T (Token Lexeme)] -> [T (Token Lexeme)] -reorder = join . sortWith f . stanzas - where - f [] = 3 :: Int - f (t : _) = case payload $ headToken t of - Open "type" -> 1 - Open "unique" -> 1 - Open "ability" -> 1 - Reserved "use" -> 0 - _ -> 3 :: Int - -lexer :: String -> String -> [Token Lexeme] -lexer scope rem = - let t = tree $ lexer0 scope rem - -- after reordering can end up with trailing semicolon at the end of - -- a block, which we remove with this pass - fixup ((payload -> Semi _) : t@(payload -> Close) : tl) = t : fixup tl - fixup [] = [] - fixup (h : t) = h : fixup t - in fixup . toList $ reorderTree reorder t - -lexer0 :: String -> String -> [Token Lexeme] -lexer0 scope rem = - tweak $ Token (Open scope) topLeftCorner topLeftCorner - : pushLayout scope [] topLeftCorner rem - where - -- hacky postprocessing pass to do some cleanup of stuff that's annoying to - -- fix without adding more state to the lexer: - -- - 1+1 lexes as [1, +1], convert this to [1, +, 1] - -- - when a semi followed by a virtual semi, drop the virtual, lets you - -- write - -- foo x = action1; - -- 2 - tweak [] = [] - tweak (h@(payload -> Semi False):(payload -> Semi True):t) = h : tweak t - tweak (h@(payload -> Reserved _):t) = h : tweak t - tweak (t1:t2@(payload -> Numeric num):rem) - | notLayout t1 && touches t1 t2 && isSigned num = - t1 : Token (SymbolyId (take 1 num) Nothing) - (start t2) - (inc $ start t2) - : Token (Numeric (drop 1 num)) (inc $ start t2) (end t2) - : tweak rem - tweak (h:t) = h : tweak t - isSigned num = all (\ch -> ch == '-' || ch == '+') $ take 1 num - -- skip whitespace and comments - goWhitespace :: Layout -> Pos -> String -> [Token Lexeme] - goWhitespace l pos rem = span' isSpace rem $ \case - (_spaces, '-':'-':'-':_rem) -> popLayout0 l pos [] - (spaces, '-':'-':rem) -> spanThru' (/= '\n') rem $ \(ignored, rem) -> - goWhitespace l (incBy ('-':'-':ignored) . incBy spaces $ pos) rem - (spaces, rem) -> popLayout l (incBy spaces pos) rem - - popLayout :: Layout -> Pos -> String -> [Token Lexeme] - popLayout l pos rem = case matchKeyword' layoutCloseAndOpenKeywords rem of - Nothing -> case matchKeyword' layoutCloseOnlyKeywords rem of - Nothing -> popLayout0 l pos rem - Just (kw, rem) -> - let end = incBy kw pos - in Token Close pos end - : Token (Reserved kw) pos end - : goWhitespace (pop l) (incBy kw pos) rem - Just (kw, rem) -> - let kw' = layoutCloseAndOpenKeywordMap kw l in - case closes (openingKeyword kw') kw' l pos of - (Nothing, ts) -> ts ++ recover l (incBy kw pos) rem - (Just l, ts) -> - let end = incBy kw pos - in ts ++ [Token (Open kw) pos end] ++ pushLayout kw' l end rem - - -- Examine current column and pop the layout stack - -- and emit `Semi` / `Close` tokens as needed - popLayout0 :: Layout -> Pos -> String -> [Token Lexeme] - popLayout0 l p [] = replicate (length l) $ Token Close p p - popLayout0 l p@(Pos _ c2) rem - | top l == c2 = Token (Semi True) p p : go l p rem - | top l < c2 || topHasClosePair l = go l p rem - | top l > c2 = Token Close p p : popLayout0 (pop l) p rem - | otherwise = error "impossible" - - -- todo: is there a reason we want this to be more than just: - -- go1 (top l + 1 : l) pos rem - -- looks for the next non whitespace, non-comment character, and - -- pushes its column onto the layout stack - pushLayout :: BlockName -> Layout -> Pos -> String -> [Token Lexeme] - pushLayout b l pos rem = span' isSpace rem $ \case - (_spaces, '-':'-':'-':_rem) -> - -- short circuit - everything after `---` is ignored - popLayout0 ((b,column pos):l) pos [] - (spaces, '-':'-':rem) -> spanThru' (/= '\n') rem $ \(ignored, rem) -> - pushLayout b l (incBy ('-':'-':ignored) . incBy spaces $ pos) rem - (spaces, rem) -> - let topcol = top l - pos' = incBy spaces pos - col' = column pos' - in - if b == "=" && col' <= topcol then - -- force closing by introducing a fake col +1 layout - popLayout0 ((b, col' + 1) : l) pos' rem - else - go ((b, col') : l) pos' rem - - -- Figure out how many elements must be popped from the layout stack - -- before finding a matching `Open` token - findClose :: String -> Layout -> Maybe Int - findClose _ [] = Nothing - findClose s ((h,_):tl) = if s == h then Just 1 else (1+) <$> findClose s tl - - -- Closes a layout block with the given open/close pair, e.g `close "(" ")"` - close :: String -> String -> Layout -> Pos -> String -> [Token Lexeme] - close open close l pos rem = case findClose open l of - Nothing -> [Token (Err $ CloseWithoutMatchingOpen open close) pos pos] - Just n -> - let closes = replicate n $ Token Close pos (incBy close pos) - in closes ++ goWhitespace (drop n l) (incBy close pos) rem - - -- If the close is well-formed, returns a new layout stack and the correct - -- number of `Close` tokens. If the close isn't well-formed (has no match), - -- `Nothing` is returned along an error token. - closes :: String -> String -> Layout -> Pos - -> (Maybe Layout, [Token Lexeme]) - closes open close l pos = case findClose open l of - Nothing -> (Nothing, - [Token (Err $ CloseWithoutMatchingOpen open close) pos (incBy close pos)]) - Just n -> - (Just $ drop n l, replicate n $ Token Close pos (incBy close pos)) - - -- assuming we've dealt with whitespace and layout, read a token - go :: Layout -> Pos -> String -> [Token Lexeme] - go l pos rem = case rem of - [] -> popLayout0 l pos [] - '?' : '\\' : c : rem -> - case parseEscapeChar c of - Just c -> - let end = inc $ inc $ inc pos in - Token (Character c) pos end : goWhitespace l end rem - Nothing -> - [Token (Err $ InvalidEscapeCharacter c) pos pos] - '?' : c : rem -> - let end = inc $ inc pos in - Token (Character c) pos end : goWhitespace l end rem - '[' : ':' : rem -> - let end = inc . inc $ pos in - Token (Open "[:") pos (inc . inc $ pos) : lexDoc l end rem - -- '{' and '(' both introduce a block, which is closed by '}' and ')' - -- The lexer doesn't distinguish among closing blocks: all the ways of - -- closing a block emit the same sort of token, `Close`. - -- - -- Note: within {}'s, `->` does not open a block, since `->` is used - -- inside request patterns like `{State.set s -> k}` - '{' : rem -> Token (Open "{") pos (inc pos) : pushLayout "{" l (inc pos) rem - '}' : rem -> close "{" "}" l pos rem - '(' : rem -> Token (Open "(") pos (inc pos) : pushLayout "(" l (inc pos) rem - ')' : rem -> close "(" ")" l pos rem - ';' : rem -> Token (Semi False) pos (inc pos) : goWhitespace l (inc pos) rem - ch : rem | Set.member ch delimiters -> - Token (Reserved [ch]) pos (inc pos) : goWhitespace l (inc pos) rem - op : rem@(c : _) - | isDelayOrForce op - && (isSpace c || isAlphaNum c - || Set.member c delimiters || isDelayOrForce c) -> - Token (Reserved [op]) pos (inc pos) : goWhitespace l (inc pos) rem - ':' : rem@(c : _) | isSpace c || isAlphaNum c -> - Token (Reserved ":") pos (inc pos) : goWhitespace l (inc pos) rem - '@' : rem -> - Token (Reserved "@") pos (inc pos) : goWhitespace l (inc pos) rem - '_' : rem | hasSep rem -> - Token (Blank "") pos (inc pos) : goWhitespace l (inc pos) rem - '_' : (wordyId -> Right (id, rem)) -> - let pos' = incBy id $ inc pos - in Token (Blank id) pos pos' : goWhitespace l pos' rem - '&' : '&' : rem -> - let end = incBy "&&" pos - in Token (Reserved "&&") pos end : goWhitespace l end rem - '|' : '|' : rem -> - let end = incBy "||" pos - in Token (Reserved "||") pos end : goWhitespace l end rem - '|' : c : rem | isSpace c || isAlphaNum c -> - Token (Reserved "|") pos (inc pos) : goWhitespace l (inc pos) (c:rem) - '=' : rem@(c : _) | isSpace c || isAlphaNum c -> - let end = inc pos - in case topBlockName l of - -- '=' does not open a layout block if within a type declaration - Just "type" -> Token (Reserved "=") pos end : goWhitespace l end rem - Just "unique" -> Token (Reserved "=") pos end : goWhitespace l end rem - Just _ -> Token (Open "=") pos end : pushLayout "=" l end rem - Nothing -> Token (Err LayoutError) pos pos : recover l pos rem - '-' : '>' : rem@(c : _) - | isSpace c || isAlphaNum c || Set.member c delimiters -> - let end = incBy "->" pos - in case topBlockName l of - Just "match-with" -> -- `->` opens a block when pattern-matching only - Token (Open "->") pos end : pushLayout "->" l end rem - Just "cases" -> -- `->` opens a block when pattern-matching only - Token (Open "->") pos end : pushLayout "->" l end rem - Just _ -> Token (Reserved "->") pos end : goWhitespace l end rem - Nothing -> Token (Err LayoutError) pos pos : recover l pos rem - - -- string literals and backticked identifiers - '"' : rem -> case splitStringLit rem of - Right (delta, lit, rem) -> let end = pos <> delta in - Token (Textual lit) pos end : goWhitespace l end rem - Left (TextLiteralMissingClosingQuote _) -> - [Token (Err $ TextLiteralMissingClosingQuote rem) pos pos] - Left err -> [Token (Err err) pos pos] - '`' : rem -> case wordyId rem of - Left e -> Token (Err e) pos pos : recover l pos rem - Right (id, rem) -> - if ['#'] `isPrefixOf` rem then - case shortHash rem of - Left e -> Token (Err e) pos pos : recover l pos rem - Right (h, rem) -> - let end = inc . incBy id . incBy (SH.toString h) . inc $ pos - in Token (Backticks id (Just h)) pos end - : goWhitespace l end (pop rem) - else - let end = inc . incBy id . inc $ pos - in Token (Backticks id Nothing) pos end - : goWhitespace l end (pop rem) - - rem@('#' : _) -> case shortHash rem of - Left e -> Token (Err e) pos pos : recover l pos rem - Right (h, rem) -> - let end = incBy (SH.toString h) pos - in Token (Hash h) pos end : goWhitespace l end rem - -- keywords and identifiers - (symbolyId -> Right (id, rem')) -> case numericLit rem of - Right (Just (num, rem)) -> - let end = incBy num pos - in Token (Numeric num) pos end : goWhitespace l end rem - _ -> if ['#'] `isPrefixOf` rem' then - case shortHash rem' of - Left e -> Token (Err e) pos pos : recover l pos rem' - Right (h, rem) -> - let end = incBy id . incBy (SH.toString h) $ pos - in Token (SymbolyId id (Just h)) pos end - : goWhitespace l end rem - else - let end = incBy id pos - in Token (SymbolyId id Nothing) pos end : goWhitespace l end rem' - (wordyId -> Right (id, rem)) -> - if ['#'] `isPrefixOf` rem then - case shortHash rem of - Left e -> Token (Err e) pos pos : recover l pos rem - Right (h, rem) -> - let end = incBy id . incBy (SH.toString h) $ pos - in Token (WordyId id (Just h)) pos end - : goWhitespace l end rem - else let end = incBy id pos - in Token (WordyId id Nothing) pos end : goWhitespace l end rem - (matchKeyword -> Just (kw,rem)) -> - let end = incBy kw pos in - case kw of - -- `unique type` lexes as [Open "unique", Reserved "type"] - -- `type` lexes as [Open "type"] - -- `unique ability` lexes as [Open "unique", Reserved "ability"] - -- `ability` lexes as [Open "ability"] - kw@"unique" -> - Token (Open kw) pos end - : goWhitespace ((kw, column $ inc pos) : l) end rem - kw@"ability" | topBlockName l /= Just "unique" -> - Token (Open kw) pos end - : goWhitespace ((kw, column $ inc pos) : l) end rem - kw@"type" | topBlockName l /= Just "unique" -> - Token (Open kw) pos end - : goWhitespace ((kw, column $ inc pos) : l) end rem - kw | Set.member kw layoutKeywords -> - Token (Open kw) pos end : pushLayout kw l end rem - | otherwise -> Token (Reserved kw) pos end : goWhitespace l end rem - - -- numeric literals - rem -> case numericLit rem of - Right (Just (num, rem)) -> - let end = incBy num pos in Token (Numeric num) pos end : goWhitespace l end rem - Right Nothing -> Token (Err UnknownLexeme) pos pos : recover l pos rem - Left e -> Token (Err e) pos pos : recover l pos rem - - lexDoc l pos rem = case span (\c -> isSpace c && not (c == '\n')) rem of - (spaces,rem) -> docBlob l pos' rem pos' [] - where pos' = incBy spaces pos - - docBlob l pos rem blobStart acc = case rem of - '@' : (hqToken (inc pos) -> Just (tok, rem)) -> - let pos' = inc $ end tok in - Token (Textual (reverse acc)) blobStart pos : - tok : - docBlob l pos' rem pos' [] - '@' : (docType (inc pos) -> Just (typTok, pos', rem)) -> - Token (Textual (reverse acc)) blobStart pos : case rem of - (hqToken pos' -> Just (tok, rem)) -> - let pos'' = inc (end tok) in - typTok : tok : docBlob l pos'' rem pos' [] - _ -> recover l pos rem - '\\' : '@' : rem -> docBlob l (incBy "\\@" pos) rem blobStart ('@':acc) - '\\' : ':' : ']' : rem -> docBlob l (incBy "\\:]" pos) rem blobStart (']':':':acc) - ':' : ']' : rem -> - let pos' = inc . inc $ pos in - (if null acc then id - else (Token (Textual (reverse - $ dropWhile (\c -> isSpace c && not (c == '\n')) acc)) blobStart pos :)) $ - Token Close pos pos' : goWhitespace l pos' rem - [] -> recover l pos rem - ch : rem -> docBlob l (incBy [ch] pos) rem blobStart (ch:acc) - - docType :: Pos -> String -> Maybe (Token Lexeme, Pos, String) - docType pos rem = case rem of - -- this crazy one liner parses [], as a pattern match - '[' : (span (/= ']') -> (typ, ']' : (span isSpace -> (spaces, rem)))) -> - -- advance past [, , ], - let pos' = incBy typ . inc . incBy spaces . inc $ pos in - -- the reserved token doesn't include the `[]` chars - Just (Token (Reserved typ) (inc pos) (incBy typ . inc $ pos), pos', rem) - _ -> Nothing - - hqToken :: Pos -> String -> Maybe (Token Lexeme, String) - hqToken pos rem = case rem of - (shortHash -> Right (h, rem)) -> - Just (Token (Hash h) pos (incBy (SH.toString h) pos), rem) - (wordyId -> Right (id, rem)) -> case rem of - (shortHash -> Right (h, rem)) -> - Just (Token (WordyId id $ Just h) pos (incBy id . incBy (SH.toString h) $ pos), rem) - _ -> Just (Token (WordyId id Nothing) pos (incBy id pos), rem) - (symbolyId -> Right (id, rem)) -> case rem of - (shortHash -> Right (h, rem)) -> - Just (Token (SymbolyId id $ Just h) pos (incBy id . incBy (SH.toString h) $ pos), rem) - _ -> Just (Token (SymbolyId id Nothing) pos (incBy id pos), rem) - _ -> Nothing - - recover _l _pos _rem = [] - -isDelayOrForce :: Char -> Bool -isDelayOrForce op = op == '\''|| op == '!' - -matchKeyword :: String -> Maybe (String,String) -matchKeyword = matchKeyword' keywords - -matchKeyword' :: Set String -> String -> Maybe (String,String) -matchKeyword' keywords s = case break isSep s of - (kw, rem) | Set.member kw keywords -> Just (kw, rem) - _ -> Nothing - --- Split into a string literal and the remainder, and a delta which includes --- both the starting and ending `"` character --- The input string should only start with a '"' if the string literal is empty -splitStringLit :: String -> Either Err (Pos, String, String) -splitStringLit = go (inc mempty) "" where - -- n tracks the raw character delta of this literal - go !n !acc ('\\':s:rem) = case parseEscapeChar s of - Just e -> go (inc . inc $ n) (e:acc) rem - Nothing -> Left $ InvalidEscapeCharacter s - go !n !acc ('"':rem) = Right (inc n, reverse acc, rem) - go !n !acc (x:rem) = go (inc n) (x:acc) rem - go _ _ [] = Left $ TextLiteralMissingClosingQuote "" - --- Mapping between characters and their escape codes. Use parse/showEscapeChar --- to convert. -escapeChars :: [(Char, Char)] -escapeChars = - [ ('0', '\0') - , ('a', '\a') - , ('b', '\b') - , ('f', '\f') - , ('n', '\n') - , ('r', '\r') - , ('t', '\t') - , ('v', '\v') - , ('s', ' ') - , ('\'', '\'') - , ('"', '"') - , ('\\', '\\') - ] - --- Map a escape symbol to it's character literal -parseEscapeChar :: Char -> Maybe Char -parseEscapeChar c = - Map.lookup c (Map.fromList escapeChars) - --- Inverse of parseEscapeChar; map a character to its escaped version: -showEscapeChar :: Char -> Maybe Char -showEscapeChar c = - Map.lookup c (Map.fromList [(x, y) | (y, x) <- escapeChars]) - -numericLit :: String -> Either Err (Maybe (String,String)) -numericLit = go - where - go ('+':s) = go2 "+" s - go ('-':s) = go2 "-" s - go s = go2 "" s - go2 sign s = case span isDigit s of - (num@(_:_), []) -> pure $ pure (sign ++ num, []) - (num@(_:_), '.':rem) -> case span isDigit rem of - (fractional@(_:_), []) -> - pure $ pure (sign ++ num ++ "." ++ fractional, []) - (fractional@(_:_), c:rem) - | c `elem` "eE" -> goExp (sign ++ num ++ "." ++ fractional) rem - | isSep c -> pure $ pure (sign ++ num ++ "." ++ fractional, c:rem) - | otherwise -> pure Nothing - ([], _) -> Left (MissingFractional (sign ++ num ++ ".")) - (num@(_:_), c:rem) | c `elem` "eE" -> goExp (sign ++ num) rem - (num@(_:_), c:rem) -> pure $ pure (sign ++ num, c:rem) - ([], _) -> pure Nothing - goExp signNum rem = case rem of - ('+':s) -> goExp' signNum "+" s - ('-':s) -> goExp' signNum "-" s - s -> goExp' signNum "" s - goExp' signNum expSign exp = case span isDigit exp of - (_:_, []) -> - pure $ pure (signNum ++ "e" ++ expSign ++ exp, []) - (exp'@(_:_), c:rem) - | isSep c -> pure $ pure (signNum ++ "e" ++ expSign ++ exp', c:rem) - | otherwise -> pure Nothing - ([], _) -> Left (MissingExponent (signNum ++ "e" ++ expSign)) - -isSep :: Char -> Bool -isSep c = isSpace c || Set.member c delimiters - -hasSep :: String -> Bool -hasSep [] = True -hasSep (ch:_) = isSep ch - --- Not a keyword, '.' delimited list of wordyId0 (should not include a trailing '.') -wordyId0 :: String -> Either Err (String, String) -wordyId0 s = span' wordyIdChar s $ \case - (id@(ch:_), rem) | not (Set.member id keywords) - && wordyIdStartChar ch - -> Right (id, rem) - (id, _rem) -> Left (InvalidWordyId id) - -wordyIdStartChar :: Char -> Bool -wordyIdStartChar ch = isAlpha ch || isEmoji ch || ch == '_' - -wordyIdChar :: Char -> Bool -wordyIdChar ch = - isAlphaNum ch || isEmoji ch || ch `elem` "_!'" - -isEmoji :: Char -> Bool -isEmoji c = c >= '\x1F300' && c <= '\x1FAFF' - -symbolyId :: String -> Either Err (String, String) -symbolyId r@('.':s) - | s == "" = symbolyId0 r -- - | isSpace (head s) = symbolyId0 r -- lone dot treated as an operator - | isDelimiter (head s) = symbolyId0 r -- - | otherwise = (\(s, rem) -> ('.':s, rem)) <$> symbolyId' s -symbolyId s = symbolyId' s - --- Is a '.' delimited list of wordyId, with a final segment of `symbolyId0` -symbolyId' :: String -> Either Err (String, String) -symbolyId' s = case wordyId0 s of - Left _ -> symbolyId0 s - Right (wid, '.':rem) -> case symbolyId rem of - Left e -> Left e - Right (rest, rem) -> Right (wid <> "." <> rest, rem) - Right (w,_) -> Left (InvalidSymbolyId w) - -wordyId :: String -> Either Err (String, String) -wordyId ('.':s) = (\(s,rem) -> ('.':s,rem)) <$> wordyId' s -wordyId s = wordyId' s - --- Is a '.' delimited list of wordyId -wordyId' :: String -> Either Err (String, String) -wordyId' s = case wordyId0 s of - Left e -> Left e - Right (wid, '.':rem@(ch:_)) | wordyIdStartChar ch -> case wordyId rem of - Left e -> Left e - Right (rest, rem) -> Right (wid <> "." <> rest, rem) - Right (w,rem) -> Right (w,rem) - --- Is a `ShortHash` -shortHash :: String -> Either Err (ShortHash, String) -shortHash s = case SH.fromString potentialHash of - Nothing -> Left (InvalidShortHash potentialHash) - Just x -> Right (x, rem) - where (potentialHash, rem) = break ((||) <$> isSpace <*> (== '`')) s - --- Returns either an error or an id and a remainder -symbolyId0 :: String -> Either Err (String, String) -symbolyId0 s = span' symbolyIdChar s $ \case - (id@(_:_), rem) | not (Set.member id reservedOperators) -> Right (id, rem) - (id, _rem) -> Left (InvalidSymbolyId id) - -symbolyIdChar :: Char -> Bool -symbolyIdChar ch = Set.member ch symbolyIdChars - -symbolyIdChars :: Set Char -symbolyIdChars = Set.fromList "!$%^&*-=+<>.~\\/|:" - -keywords :: Set String -keywords = Set.fromList [ - "if", "then", "else", "forall", "∀", - "handle", "with", "unique", - "where", "use", - "true", "false", - "type", "ability", "alias", "typeLink", "termLink", - "let", "namespace", "match", "cases"] - --- These keywords introduce a layout block -layoutKeywords :: Set String -layoutKeywords = - Set.fromList [ - "if", "handle", "let", "where", "match", "cases" - ] - --- These keywords end a layout block and begin another layout block -layoutCloseAndOpenKeywords :: Set String -layoutCloseAndOpenKeywords = Set.fromList ["then", "else", "with"] - --- Use a transformed block name to disambiguate certain keywords -layoutCloseAndOpenKeywordMap :: String -- close-and-open keyword - -> Layout -- layout - -> BlockName -- transformed blockname for keyword -layoutCloseAndOpenKeywordMap "with" l = - case findNearest l (Set.fromList ["handle", "match"]) of - Just "match" -> "match-with" - Just "handle" -> "handle-with" - _ -> "with" -layoutCloseAndOpenKeywordMap kw _ = kw - -openingKeyword :: BlockName -> String -openingKeyword "then" = "if" -openingKeyword "else" = "then" -openingKeyword "with" = "match or handle" -- hack!! -openingKeyword "match-with" = "match" -openingKeyword "handle-with" = "handle" -openingKeyword kw = error $ "Not sure what the opening keyword is for: " <> kw - --- These keywords end a layout block -layoutCloseOnlyKeywords :: Set String -layoutCloseOnlyKeywords = Set.fromList ["}"] - -delimiters :: Set Char -delimiters = Set.fromList "()[]{},?;" - -isDelimiter :: Char -> Bool -isDelimiter ch = Set.member ch delimiters - -reservedOperators :: Set String -reservedOperators = Set.fromList ["->", ":", "&&", "||"] - -inc :: Pos -> Pos -inc (Pos line col) = Pos line (col + 1) - -incBy :: String -> Pos -> Pos -incBy rem pos@(Pos line col) = case rem of - [] -> pos - '\r':rem -> incBy rem $ Pos line col - '\n':rem -> incBy rem $ Pos (line + 1) 1 - _:rem -> incBy rem $ Pos line (col + 1) - -debugLex'' :: [Token Lexeme] -> String -debugLex'' = show . fmap payload . tree - -debugLex' :: String -> String -debugLex' = debugLex'' . lexer "debugLex" - -debugLex''' :: String -> String -> String -debugLex''' s = debugLex'' . lexer s - -span' :: (a -> Bool) -> [a] -> (([a],[a]) -> r) -> r -span' f a k = k (span f a) - -spanThru' :: (a -> Bool) -> [a] -> (([a],[a]) -> r) -> r -spanThru' f a k = case span f a of - (l, []) -> k (l, []) - (l, lz:r) -> k (l ++ [lz], r) diff --git a/parser-typechecker/src/Unison/NamePrinter.hs b/parser-typechecker/src/Unison/NamePrinter.hs deleted file mode 100644 index d86d94d3a6..0000000000 --- a/parser-typechecker/src/Unison/NamePrinter.hs +++ /dev/null @@ -1,81 +0,0 @@ -module Unison.NamePrinter where - -import Unison.Prelude - -import qualified Unison.HashQualified as HQ -import qualified Unison.HashQualified' as HQ' -import Unison.LabeledDependency (LabeledDependency) -import qualified Unison.LabeledDependency as LD -import Unison.Name (Name) -import qualified Unison.Name as Name -import Unison.Reference (Reference) -import Unison.Referent (Referent) -import Unison.ShortHash (ShortHash) -import qualified Unison.ShortHash as SH -import Unison.Util.SyntaxText (SyntaxText) -import qualified Unison.Util.SyntaxText as S -import Unison.Util.Pretty (Pretty) -import qualified Unison.Util.Pretty as PP - -prettyName :: IsString s => Name -> Pretty s -prettyName = PP.text . Name.toText - -prettyHashQualified :: HQ.HashQualified -> Pretty SyntaxText -prettyHashQualified hq = styleHashQualified' id (fmt $ S.HashQualifier hq) hq - -prettyHashQualified' :: HQ'.HashQualified -> Pretty SyntaxText -prettyHashQualified' = prettyHashQualified . HQ'.toHQ - -prettyHashQualified0 :: IsString s => HQ.HashQualified -> Pretty s -prettyHashQualified0 = PP.text . HQ.toText - --- | Pretty-print a reference as a name and the given number of characters of --- its hash. -prettyNamedReference :: Int -> Name -> Reference -> Pretty SyntaxText -prettyNamedReference len name = - prettyHashQualified . HQ.take len . HQ.fromNamedReference name - --- | Pretty-print a referent as a name and the given number of characters of its --- hash. -prettyNamedReferent :: Int -> Name -> Referent -> Pretty SyntaxText -prettyNamedReferent len name = - prettyHashQualified . HQ.take len . HQ.fromNamedReferent name - --- | Pretty-print a reference as the given number of characters of its hash. -prettyReference :: Int -> Reference -> Pretty SyntaxText -prettyReference len = - prettyHashQualified . HQ.take len . HQ.fromReference - --- | Pretty-print a referent as the given number of characters of its hash. -prettyReferent :: Int -> Referent -> Pretty SyntaxText -prettyReferent len = - prettyHashQualified . HQ.take len . HQ.fromReferent - -prettyLabeledDependency :: Int -> LabeledDependency -> Pretty SyntaxText -prettyLabeledDependency len = LD.fold (prettyReference len) (prettyReferent len) - -prettyShortHash :: IsString s => ShortHash -> Pretty s -prettyShortHash = fromString . SH.toString - -styleHashQualified :: - IsString s => (Pretty s -> Pretty s) -> HQ.HashQualified -> Pretty s -styleHashQualified style hq = styleHashQualified' style id hq - -styleHashQualified' :: - IsString s => (Pretty s -> Pretty s) - -> (Pretty s -> Pretty s) - -> HQ.HashQualified - -> Pretty s -styleHashQualified' nameStyle hashStyle = \case - HQ.NameOnly n -> nameStyle (prettyName n) - HQ.HashOnly h -> hashStyle (prettyShortHash h) - HQ.HashQualified n h -> - PP.group $ nameStyle (prettyName n) <> hashStyle (prettyShortHash h) - -styleHashQualified'' :: (Pretty SyntaxText -> Pretty SyntaxText) - -> HQ.HashQualified - -> Pretty SyntaxText -styleHashQualified'' nameStyle hq = styleHashQualified' nameStyle (fmt $ S.HashQualifier hq) hq - -fmt :: S.Element -> Pretty S.SyntaxText -> Pretty S.SyntaxText -fmt = PP.withSyntax diff --git a/parser-typechecker/src/Unison/Parser.hs b/parser-typechecker/src/Unison/Parser.hs deleted file mode 100644 index ad874269fb..0000000000 --- a/parser-typechecker/src/Unison/Parser.hs +++ /dev/null @@ -1,443 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE TypeFamilies #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Parser where - -import Unison.Prelude - -import qualified Crypto.Random as Random -import Data.Bytes.Put (runPutS) -import Data.Bytes.Serial ( serialize ) -import Data.Bytes.VarInt ( VarInt(..) ) -import Data.Bifunctor (bimap) -import qualified Data.Char as Char -import Data.List.NonEmpty (NonEmpty (..)) --- import Data.Maybe -import qualified Data.Set as Set -import qualified Data.Text as Text -import Data.Typeable (Proxy (..)) -import Text.Megaparsec (runParserT) -import qualified Text.Megaparsec as P -import qualified Text.Megaparsec.Char as P -import qualified Unison.ABT as ABT -import qualified Unison.Hash as Hash -import qualified Unison.HashQualified as HQ -import qualified Unison.Lexer as L -import Unison.Pattern (Pattern) -import qualified Unison.Pattern as Pattern -import Unison.Term (MatchCase (..)) -import Unison.Var (Var) -import qualified Unison.Var as Var -import qualified Unison.UnisonFile as UF -import Unison.Name as Name -import Unison.Names3 (Names) -import qualified Unison.Names3 as Names -import Control.Monad.Reader.Class (asks) -import qualified Unison.Hashable as Hashable -import Unison.Referent (Referent) -import Unison.Reference (Reference) - -debug :: Bool -debug = False - -type P v = P.ParsecT (Error v) Input ((->) ParsingEnv) -type Token s = P.Token s -type Err v = P.ParseError (Token Input) (Error v) - -data ParsingEnv = - ParsingEnv { uniqueNames :: UniqueName - , names :: Names - } - -newtype UniqueName = UniqueName (L.Pos -> Int -> Maybe Text) - -instance Semigroup UniqueName where (<>) = mappend -instance Monoid UniqueName where - mempty = UniqueName (\_ _ -> Nothing) - mappend (UniqueName f) (UniqueName g) = - UniqueName $ \pos len -> f pos len <|> g pos len - - -uniqueBase32Namegen :: forall gen. Random.DRG gen => gen -> UniqueName -uniqueBase32Namegen rng = - UniqueName $ \pos lenInBase32Hex -> go pos lenInBase32Hex rng - where - -- if the identifier starts with a number, try again, since - -- we want the name to work as a valid wordyId - go :: L.Pos -> Int -> gen -> Maybe Text - go pos lenInBase32Hex rng0 = let - (bytes,rng) = Random.randomBytesGenerate 32 rng0 - posBytes = runPutS $ do - serialize $ VarInt (L.line pos) - serialize $ VarInt (L.column pos) - h = Hashable.accumulate' $ bytes <> posBytes - b58 = Hash.base32Hex h - in if Char.isDigit (Text.head b58) then go pos lenInBase32Hex rng - else Just . Text.take lenInBase32Hex $ b58 - - -uniqueName :: Var v => Int -> P v Text -uniqueName lenInBase32Hex = do - UniqueName mkName <- asks uniqueNames - pos <- L.start <$> P.lookAhead anyToken - let none = Hash.base32Hex . Hash.fromBytes . encodeUtf8 . Text.pack $ show pos - pure . fromMaybe none $ mkName pos lenInBase32Hex - -data Error v - = SignatureNeedsAccompanyingBody (L.Token v) - | DisallowedAbsoluteName (L.Token Name) - | EmptyBlock (L.Token String) - | UnknownAbilityConstructor (L.Token HQ.HashQualified) (Set (Reference, Int)) - | UnknownDataConstructor (L.Token HQ.HashQualified) (Set (Reference, Int)) - | UnknownTerm (L.Token HQ.HashQualified) (Set Referent) - | UnknownType (L.Token HQ.HashQualified) (Set Reference) - | UnknownId (L.Token HQ.HashQualified) (Set Referent) (Set Reference) - | ExpectedBlockOpen String (L.Token L.Lexeme) - | EmptyWatch - | UseInvalidPrefixSuffix (Either (L.Token Name) (L.Token Name)) (Maybe [L.Token Name]) - | UseEmpty (L.Token String) -- an empty `use` statement - | DidntExpectExpression (L.Token L.Lexeme) (Maybe (L.Token L.Lexeme)) - | TypeDeclarationErrors [UF.Error v Ann] - | ResolutionFailures [Names.ResolutionFailure v Ann] - | DuplicateTypeNames [(v, [Ann])] - | DuplicateTermNames [(v, [Ann])] - deriving (Show, Eq, Ord) - -data Ann - = Intrinsic -- { sig :: String, start :: L.Pos, end :: L.Pos } - | External - | Ann { start :: L.Pos, end :: L.Pos } - deriving (Eq, Ord, Show) - -startingLine :: Ann -> Maybe L.Line -startingLine (Ann (L.line -> line) _) = Just line -startingLine _ = Nothing - -instance Monoid Ann where - mempty = External - mappend = (<>) - -instance Semigroup Ann where - Ann s1 _ <> Ann _ e2 = Ann s1 e2 - -- If we have a concrete location from a file, use it - External <> a = a - a <> External = a - Intrinsic <> a = a - a <> Intrinsic = a - -tokenToPair :: L.Token a -> (Ann, a) -tokenToPair t = (ann t, L.payload t) - -newtype Input = Input { inputStream :: [L.Token L.Lexeme] } - deriving (Eq, Ord, Show) - -instance P.Stream Input where - type Token Input = L.Token L.Lexeme - type Tokens Input = Input - - tokenToChunk pxy = P.tokensToChunk pxy . pure - - tokensToChunk _ = Input - - chunkToTokens _ = inputStream - - chunkLength pxy = length . P.chunkToTokens pxy - - chunkEmpty pxy = null . P.chunkToTokens pxy - - positionAt1 _ sp t = setPos sp (L.start t) - - positionAtN pxy sp = - maybe sp (setPos sp . L.start) . listToMaybe . P.chunkToTokens pxy - - advance1 _ _ cp = setPos cp . L.end - - advanceN _ _ cp = setPos cp . L.end . last . inputStream - - take1_ (P.chunkToTokens proxy -> []) = Nothing - take1_ (P.chunkToTokens proxy -> t:ts) = Just (t, P.tokensToChunk proxy ts) - take1_ _ = error "Unpossible" - - takeN_ n (P.chunkToTokens proxy -> []) | n > 0 = Nothing - takeN_ n ts = - Just - . join bimap (P.tokensToChunk proxy) - . splitAt n $ P.chunkToTokens proxy ts - - takeWhile_ p = join bimap (P.tokensToChunk proxy) . span p . inputStream - -setPos :: P.SourcePos -> L.Pos -> P.SourcePos -setPos sp lp = - P.SourcePos (P.sourceName sp) (P.mkPos $ L.line lp) (P.mkPos $ L.column lp) - -class Annotated a where - ann :: a -> Ann - -instance Annotated Ann where - ann = id - -instance Annotated (L.Token a) where - ann (L.Token _ s e) = Ann s e - -instance Annotated a => Annotated (ABT.Term f v a) where - ann = ann . ABT.annotation - -instance Annotated a => Annotated (Pattern a) where - ann = ann . Pattern.loc - -instance (Annotated a, Annotated b) => Annotated (MatchCase a b) where - ann (MatchCase p _ b) = ann p <> ann b - -label :: (Ord v, Show a) => String -> P v a -> P v a -label = P.label --- label = P.dbg - -traceRemainingTokens :: Ord v => String -> P v () -traceRemainingTokens label = do - remainingTokens <- lookAhead $ many anyToken - let - _ = - trace ("REMAINDER " ++ label ++ ":\n" ++ L.debugLex'' remainingTokens) () - pure () - -mkAnn :: (Annotated a, Annotated b) => a -> b -> Ann -mkAnn x y = ann x <> ann y - -tok :: (Ann -> a -> b) -> L.Token a -> b -tok f (L.Token a start end) = f (Ann start end) a - -peekAny :: Ord v => P v (L.Token L.Lexeme) -peekAny = P.lookAhead P.anyChar - -lookAhead :: Ord v => P v a -> P v a -lookAhead = P.lookAhead - -anyToken :: Ord v => P v (L.Token L.Lexeme) -anyToken = P.anyChar - -failCommitted :: Ord v => Error v -> P v x -failCommitted e = do - void anyToken <|> void P.eof - P.customFailure e - -proxy :: Proxy Input -proxy = Proxy - -root :: Ord v => P v a -> P v a -root p = (openBlock *> p) <* closeBlock <* P.eof - --- | -rootFile :: Ord v => P v a -> P v a -rootFile p = p <* P.eof - -run' :: Ord v => P v a -> String -> String -> ParsingEnv -> Either (Err v) a -run' p s name env = - let lex = if debug - then L.lexer name (trace (L.debugLex''' "lexer receives" s) s) - else L.lexer name s - pTraced = traceRemainingTokens "parser receives" *> p - env' = env { names = Names.suffixify (names env) } - in runParserT pTraced name (Input lex) env' - -run :: Ord v => P v a -> String -> ParsingEnv -> Either (Err v) a -run p s = run' p s "" - --- Virtual pattern match on a lexeme. -queryToken :: Ord v => (L.Lexeme -> Maybe a) -> P v (L.Token a) -queryToken f = P.token go Nothing - where go t@(f . L.payload -> Just s) = Right $ fmap (const s) t - go x = Left (pure (P.Tokens (x:|[])), Set.empty) - --- Consume a block opening and return the string that opens the block. -openBlock :: Ord v => P v (L.Token String) -openBlock = queryToken getOpen - where - getOpen (L.Open s) = Just s - getOpen _ = Nothing - -openBlockWith :: Ord v => String -> P v (L.Token ()) -openBlockWith s = void <$> P.satisfy ((L.Open s ==) . L.payload) - --- Match a particular lexeme exactly, and consume it. -matchToken :: Ord v => L.Lexeme -> P v (L.Token L.Lexeme) -matchToken x = P.satisfy ((==) x . L.payload) - --- The package name that refers to the root, literally just `.` -importDotId :: Ord v => P v (L.Token Name) -importDotId = queryToken go where - go (L.SymbolyId "." Nothing) = Just (Name.fromString ".") - go _ = Nothing - --- Consume a virtual semicolon -semi :: Ord v => P v (L.Token ()) -semi = queryToken go where - go (L.Semi _) = Just () - go _ = Nothing - --- Consume the end of a block -closeBlock :: Ord v => P v (L.Token ()) -closeBlock = void <$> matchToken L.Close - -wordyPatternName :: Var v => P v (L.Token v) -wordyPatternName = queryToken $ \case - L.WordyId s Nothing -> Just $ Var.nameds s - _ -> Nothing - --- Parse an prefix identifier e.g. Foo or (+), discarding any hash -prefixDefinitionName :: Var v => P v (L.Token v) -prefixDefinitionName = - wordyDefinitionName <|> parenthesize symbolyDefinitionName - --- Parse a wordy identifier e.g. Foo, discarding any hash -wordyDefinitionName :: Var v => P v (L.Token v) -wordyDefinitionName = queryToken $ \case - L.WordyId s _ -> Just $ Var.nameds s - L.Blank s -> Just $ Var.nameds ("_" <> s) - _ -> Nothing - --- Parse a wordyId as a String, rejecting any hash -wordyIdString :: Ord v => P v (L.Token String) -wordyIdString = queryToken $ \case - L.WordyId s Nothing -> Just s - _ -> Nothing - --- Parse a wordyId as a Name, rejecting any hash -importWordyId :: Ord v => P v (L.Token Name) -importWordyId = (fmap . fmap) Name.fromString wordyIdString - --- The `+` in: use Foo.bar + as a Name -importSymbolyId :: Ord v => P v (L.Token Name) -importSymbolyId = (fmap . fmap) Name.fromString symbolyIdString - --- Parse a symbolyId as a String, rejecting any hash -symbolyIdString :: Ord v => P v (L.Token String) -symbolyIdString = queryToken $ \case - L.SymbolyId s Nothing -> Just s - _ -> Nothing - --- Parse an infix id e.g. + or `cons`, discarding any hash -infixDefinitionName :: Var v => P v (L.Token v) -infixDefinitionName = symbolyDefinitionName <|> backticked where - backticked :: Var v => P v (L.Token v) - backticked = queryToken $ \case - L.Backticks s _ -> Just $ Var.nameds s - _ -> Nothing - --- Parse a symboly ID like >>= or &&, discarding any hash -symbolyDefinitionName :: Var v => P v (L.Token v) -symbolyDefinitionName = queryToken $ \case - L.SymbolyId s _ -> Just $ Var.nameds s - _ -> Nothing - -parenthesize :: Ord v => P v a -> P v a -parenthesize p = P.try (openBlockWith "(" *> p) <* closeBlock - -hqPrefixId, hqInfixId :: Ord v => P v (L.Token HQ.HashQualified) -hqPrefixId = hqWordyId_ <|> parenthesize hqSymbolyId_ -hqInfixId = hqSymbolyId_ <|> hqBacktickedId_ - --- Parse a hash-qualified alphanumeric identifier -hqWordyId_ :: Ord v => P v (L.Token HQ.HashQualified) -hqWordyId_ = queryToken $ \case - L.WordyId "" (Just h) -> Just $ HQ.HashOnly h - L.WordyId s (Just h) -> Just $ HQ.HashQualified (Name.fromString s) h - L.WordyId s Nothing -> Just $ HQ.NameOnly (Name.fromString s) - L.Hash h -> Just $ HQ.HashOnly h - L.Blank s | not (null s) -> Just $ HQ.NameOnly (Name.fromString ("_" <> s)) - _ -> Nothing - --- Parse a hash-qualified symboly ID like >>=#foo or && -hqSymbolyId_ :: Ord v => P v (L.Token HQ.HashQualified) -hqSymbolyId_ = queryToken $ \case - L.SymbolyId "" (Just h) -> Just $ HQ.HashOnly h - L.SymbolyId s (Just h) -> Just $ HQ.HashQualified (Name.fromString s) h - L.SymbolyId s Nothing -> Just $ HQ.NameOnly (Name.fromString s) - _ -> Nothing - -hqBacktickedId_ :: Ord v => P v (L.Token HQ.HashQualified) -hqBacktickedId_ = queryToken $ \case - L.Backticks "" (Just h) -> Just $ HQ.HashOnly h - L.Backticks s (Just h) -> Just $ HQ.HashQualified (Name.fromString s) h - L.Backticks s Nothing -> Just $ HQ.NameOnly (Name.fromString s) - _ -> Nothing - --- Parse a reserved word -reserved :: Ord v => String -> P v (L.Token String) -reserved w = label w $ queryToken getReserved - where getReserved (L.Reserved w') | w == w' = Just w - getReserved _ = Nothing - --- Parse a placeholder or typed hole -blank :: Ord v => P v (L.Token String) -blank = label "blank" $ queryToken getBlank - where getBlank (L.Blank s) = Just ('_' : s) - getBlank _ = Nothing - -numeric :: Ord v => P v (L.Token String) -numeric = queryToken getNumeric - where getNumeric (L.Numeric s) = Just s - getNumeric _ = Nothing - -sepBy :: Ord v => P v a -> P v b -> P v [b] -sepBy sep pb = P.sepBy pb sep - -sepBy1 :: Ord v => P v a -> P v b -> P v [b] -sepBy1 sep pb = P.sepBy1 pb sep - -character :: Ord v => P v (L.Token Char) -character = queryToken getChar - where getChar (L.Character c) = Just c - getChar _ = Nothing - -string :: Ord v => P v (L.Token Text) -string = queryToken getString - where getString (L.Textual s) = Just (Text.pack s) - getString _ = Nothing - -tupleOrParenthesized :: Ord v => P v a -> (Ann -> a) -> (a -> a -> a) -> P v a -tupleOrParenthesized p unit pair = do - open <- openBlockWith "(" - es <- sepBy (reserved "," *> optional semi) p - close <- optional semi *> closeBlock - pure $ go es open close - where - go [t] _ _ = t - go as s e = foldr pair (unit (ann s <> ann e)) as - -seq :: Ord v => (Ann -> [a] -> a) -> P v a -> P v a -seq f p = f' <$> reserved "[" <*> elements <*> trailing - where - f' open elems close = f (ann open <> ann close) elems - trailing = optional semi *> reserved "]" - sep = P.try $ optional semi *> reserved "," <* optional semi - elements = sepBy sep p - -chainr1 :: Ord v => P v a -> P v (a -> a -> a) -> P v a -chainr1 p op = go1 where - go1 = p >>= go2 - go2 hd = do { op <- op; op hd <$> go1 } <|> pure hd - --- Parse `p` 1+ times, combining with `op` -chainl1 :: Ord v => P v a -> P v (a -> a -> a) -> P v a -chainl1 p op = foldl (flip ($)) <$> p <*> P.many (flip <$> op <*> p) - --- If `p` would succeed, this fails uncommitted. --- Otherwise, `failIfOk` used to produce the output -failureIf :: Ord v => P v (P v b) -> P v a -> P v b -failureIf failIfOk p = do - dontwant <- P.try . P.lookAhead $ failIfOk - p <- P.try $ P.lookAhead (optional p) - when (isJust p) $ fail "failureIf" - dontwant - --- Gives this var an id based on its position - a useful trick to --- obtain a variable whose id won't match any other id in the file --- `positionalVar a Var.missingResult` -positionalVar :: (Annotated a, Var v) => a -> v -> v -positionalVar a v = - let s = start (ann a) - line = fromIntegral $ L.line s - col = fromIntegral $ L.column s - -- this works as long as no lines more than 50k characters - in Var.freshenId (line * 50000 + col) v diff --git a/parser-typechecker/src/Unison/Parsers.hs b/parser-typechecker/src/Unison/Parsers.hs deleted file mode 100644 index be787c3d1d..0000000000 --- a/parser-typechecker/src/Unison/Parsers.hs +++ /dev/null @@ -1,88 +0,0 @@ -module Unison.Parsers where - -import Unison.Prelude - -import qualified Data.Text as Text -import Data.Text.IO ( readFile ) -import Prelude hiding ( readFile ) -import qualified Unison.Names3 as Names -import qualified Unison.Builtin as Builtin -import qualified Unison.FileParser as FileParser -import Unison.Parser ( Ann ) -import qualified Unison.Parser as Parser -import Unison.PrintError ( prettyParseError - , defaultWidth ) -import Unison.Symbol ( Symbol ) -import Unison.Term ( Term ) -import qualified Unison.TermParser as TermParser -import Unison.Type ( Type ) -import qualified Unison.TypeParser as TypeParser -import Unison.UnisonFile ( UnisonFile ) -import qualified Unison.Util.Pretty as Pr -import Unison.Var ( Var ) - -unsafeGetRightFrom :: (Var v, Show v) => String -> Either (Parser.Err v) a -> a -unsafeGetRightFrom src = - either (error . Pr.toANSI defaultWidth . prettyParseError src) id - -parse - :: Var v - => Parser.P v a - -> String - -> Parser.ParsingEnv - -> Either (Parser.Err v) a -parse p = Parser.run (Parser.root p) - -parseTerm - :: Var v - => String - -> Parser.ParsingEnv - -> Either (Parser.Err v) (Term v Ann) -parseTerm = parse TermParser.term - -parseType - :: Var v - => String - -> Parser.ParsingEnv - -> Either (Parser.Err v) (Type v Ann) -parseType = Parser.run (Parser.root TypeParser.valueType) - -parseFile - :: Var v - => FilePath - -> String - -> Parser.ParsingEnv - -> Either (Parser.Err v) (UnisonFile v Ann) -parseFile filename s = Parser.run' (Parser.rootFile FileParser.file) s filename - -readAndParseFile - :: Var v - => Parser.ParsingEnv - -> FilePath - -> IO (Either (Parser.Err v) (UnisonFile v Ann)) -readAndParseFile penv fileName = do - txt <- readFile fileName - let src = Text.unpack txt - pure $ parseFile fileName src penv - -unsafeParseTerm :: Var v => String -> Parser.ParsingEnv -> Term v Ann -unsafeParseTerm s = fmap (unsafeGetRightFrom s) . parseTerm $ s - -unsafeReadAndParseFile - :: Parser.ParsingEnv -> FilePath -> IO (UnisonFile Symbol Ann) -unsafeReadAndParseFile penv fileName = do - txt <- readFile fileName - let str = Text.unpack txt - pure . unsafeGetRightFrom str $ parseFile fileName str penv - -unsafeParseFileBuiltinsOnly - :: FilePath -> IO (UnisonFile Symbol Ann) -unsafeParseFileBuiltinsOnly = - unsafeReadAndParseFile $ Parser.ParsingEnv - mempty - (Names.Names Builtin.names0 mempty) - -unsafeParseFile - :: String -> Parser.ParsingEnv -> UnisonFile Symbol Ann -unsafeParseFile s pEnv = unsafeGetRightFrom s $ parseFile "" s pEnv - diff --git a/parser-typechecker/src/Unison/Path.hs b/parser-typechecker/src/Unison/Path.hs deleted file mode 100644 index 5ce88ed774..0000000000 --- a/parser-typechecker/src/Unison/Path.hs +++ /dev/null @@ -1,54 +0,0 @@ --- | --- Provides a typeclass for a general concept of a path into --- a treelike structure. We have a root or empty path, paths --- may be concatenated, and a pair of paths may be factored into --- paths relative to their lowest common ancestor in the tree. - -module Unison.Path where - -import Unison.Prelude - --- | Satisfies: --- * `extend root p == p` and `extend p root == p` --- * `extend` is associative, `extend (extend p1 p2) p3 == extend p1 (extend p2 p3)` --- * `lca root p == root` and `lca p root == root` --- * `case factor p p2 of (r,p',p2') -> extend r p' == p && extend r p2' == p2` -class Path p where - -- | The root or empty path - root :: p - -- | Concatenate two paths - extend :: p -> p -> p - -- | Extract the lowest common ancestor and the path from the LCA to each argument - factor :: p -> p -> (p,(p,p)) - -- | Satisfies `factor (parent p) p == (parent p, (root, tl)` and - -- `extend (parent p) tl == p` - parent :: p -> p - --- | Compute the lowest common ancestor of two paths -lca :: Path p => p -> p -> p -lca p p2 = fst (factor p p2) - --- | `isSubpath p1 p2` is true if `p2 == extend p1 x` for some `x` -isSubpath :: (Eq p, Path p) => p -> p -> Bool -isSubpath p1 p2 = lca p1 p2 == p1 - -instance Eq a => Path (Maybe a) where - root = Nothing - extend = (<|>) - parent _ = Nothing - factor p1 p2 | p1 == p2 = (p1, (Nothing, Nothing)) - factor p1 p2 = (Nothing, (p1,p2)) - -instance Eq a => Path [a] where - root = [] - extend = (++) - parent p | null p = [] - parent p = init p - factor p1 p2 = (take shared p1, (drop shared p1, drop shared p2)) - where shared = length (takeWhile id $ zipWith (==) p1 p2) - -instance Path () where - root = () - parent _ = () - extend _ _ = () - factor u _ = (u,(u,u)) diff --git a/parser-typechecker/src/Unison/PrettyPrintEnv.hs b/parser-typechecker/src/Unison/PrettyPrintEnv.hs deleted file mode 100644 index b20802cb90..0000000000 --- a/parser-typechecker/src/Unison/PrettyPrintEnv.hs +++ /dev/null @@ -1,142 +0,0 @@ -{-# Language OverloadedStrings #-} - -module Unison.PrettyPrintEnv where - -import Unison.Prelude - -import Unison.HashQualified ( HashQualified ) -import Unison.Name ( Name ) -import Unison.Names3 ( Names ) -import Unison.Reference ( Reference ) -import Unison.Referent ( Referent ) -import Unison.Util.List (safeHead) -import qualified Data.Map as Map -import qualified Unison.HashQualified as HQ -import qualified Unison.Name as Name -import qualified Unison.Names3 as Names -import qualified Unison.Reference as Reference -import qualified Unison.Referent as Referent -import qualified Unison.ConstructorType as CT -import qualified Unison.HashQualified' as HQ' -import qualified Data.Set as Set - -data PrettyPrintEnv = PrettyPrintEnv { - -- names for terms, constructors, and requests - terms :: Referent -> Maybe HashQualified, - -- names for types - types :: Reference -> Maybe HashQualified } - -patterns :: PrettyPrintEnv -> Reference -> Int -> Maybe HashQualified -patterns ppe r cid = terms ppe (Referent.Con r cid CT.Data) - <|>terms ppe (Referent.Con r cid CT.Effect) - -instance Show PrettyPrintEnv where - show _ = "PrettyPrintEnv" - -fromNames :: Int -> Names -> PrettyPrintEnv -fromNames len names = PrettyPrintEnv terms' types' where - terms' r = shortestName . Set.map HQ'.toHQ $ Names.termName len r names - types' r = shortestName . Set.map HQ'.toHQ $ Names.typeName len r names - shortestName ns = safeHead $ HQ.sortByLength (toList ns) - -fromSuffixNames :: Int -> Names -> PrettyPrintEnv -fromSuffixNames len names = fromNames len (Names.suffixify names) - -fromNamesDecl :: Int -> Names -> PrettyPrintEnvDecl -fromNamesDecl len names = - PrettyPrintEnvDecl (fromNames len names) (fromSuffixNames len names) - --- A pair of PrettyPrintEnvs: --- - suffixifiedPPE uses the shortest unique suffix --- - unsuffixifiedPPE uses the shortest full name --- --- Generally, we want declarations LHS (the `x` in `x = 23`) to use the --- unsuffixified names, so the LHS is an accurate description of where in the --- namespace the definition lives. For everywhere else, we can use the --- suffixified version. -data PrettyPrintEnvDecl = PrettyPrintEnvDecl { - unsuffixifiedPPE :: PrettyPrintEnv, - suffixifiedPPE :: PrettyPrintEnv - } deriving Show - --- declarationPPE uses the full name for references that are --- part the same cycle as the input reference, used to ensures --- recursive definitions are printed properly, for instance: --- --- foo.bar x = foo.bar x --- and not --- foo.bar x = bar x -declarationPPE :: PrettyPrintEnvDecl -> Reference -> PrettyPrintEnv -declarationPPE ppe rd = PrettyPrintEnv tm ty where - comp = Reference.members (Reference.componentFor rd) - tm r0@(Referent.Ref r) = if Set.member r comp - then terms (unsuffixifiedPPE ppe) r0 - else terms (suffixifiedPPE ppe) r0 - tm r = terms (suffixifiedPPE ppe) r - ty r = if Set.member r comp then types (unsuffixifiedPPE ppe) r - else types (suffixifiedPPE ppe) r - --- Left-biased union of environments -unionLeft :: PrettyPrintEnv -> PrettyPrintEnv -> PrettyPrintEnv -unionLeft e1 e2 = PrettyPrintEnv - (\r -> terms e1 r <|> terms e2 r) - (\r -> types e1 r <|> types e2 r) - -assignTermName :: Referent -> HashQualified -> PrettyPrintEnv -> PrettyPrintEnv -assignTermName r name = (fromTermNames [(r,name)] `unionLeft`) - -fromTypeNames :: [(Reference,HashQualified)] -> PrettyPrintEnv -fromTypeNames types = let - m = Map.fromList types - in PrettyPrintEnv (const Nothing) (`Map.lookup` m) - -fromTermNames :: [(Referent,HashQualified)] -> PrettyPrintEnv -fromTermNames tms = let - m = Map.fromList tms - in PrettyPrintEnv (`Map.lookup` m) (const Nothing) - --- todo: these need to be a dynamic length, but we need additional info -todoHashLength :: Int -todoHashLength = 10 - -termName :: PrettyPrintEnv -> Referent -> HashQualified -termName env r = - fromMaybe (HQ.take todoHashLength $ HQ.fromReferent r) (terms env r) - -typeName :: PrettyPrintEnv -> Reference -> HashQualified -typeName env r = - fromMaybe (HQ.take todoHashLength $ HQ.fromReference r) (types env r) - -patternName :: PrettyPrintEnv -> Reference -> Int -> HashQualified -patternName env r cid = - case patterns env r cid of - Just name -> name - Nothing -> HQ.take todoHashLength $ HQ.fromPattern r cid - -instance Monoid PrettyPrintEnv where - mempty = PrettyPrintEnv (const Nothing) (const Nothing) - mappend = unionLeft -instance Semigroup PrettyPrintEnv where - (<>) = mappend - --- Type aliases relating to Fully-Qualified Names, e.g. 'Acme.API.foo' --- Used primarily by the FQN elision code - see TermPrinter.PrintAnnotation. - --- Note that a Suffix can include dots. -type Suffix = Text --- Each member of a Prefix list is dot-free. -type Prefix = [Text] --- Keys are FQNs, values are shorter names which are equivalent, thanks to use --- statements that are in scope. -type Imports = Map Name Suffix - --- Give the shortened version of an FQN, if there's been a `use` statement for that FQN. -elideFQN :: Imports -> HQ.HashQualified -> HQ.HashQualified -elideFQN imports hq = - let hash = HQ.toHash hq - name' = do name <- HQ.toName hq - let hit = fmap Name.unsafeFromText (Map.lookup name imports) - -- Cut out the "const id $" to get tracing of FQN elision attempts. - let t = const id $ trace ("hit: " ++ show hit ++ " finding: " ++ show hq ++ " in imports: " ++ show imports) - t (pure $ fromMaybe name hit) - in HQ.fromNameHash name' hash diff --git a/parser-typechecker/src/Unison/PrettyTerminal.hs b/parser-typechecker/src/Unison/PrettyTerminal.hs deleted file mode 100644 index bcedc524bf..0000000000 --- a/parser-typechecker/src/Unison/PrettyTerminal.hs +++ /dev/null @@ -1,51 +0,0 @@ -module Unison.PrettyTerminal where - -import Unison.Util.Less (less) -import qualified Unison.Util.Pretty as P -import qualified Unison.Util.ColorText as CT -import qualified System.Console.Terminal.Size as Terminal -import Data.List (dropWhileEnd) -import Data.Char (isSpace) - -stripSurroundingBlanks :: String -> String -stripSurroundingBlanks s = unlines (dropWhile isBlank . dropWhileEnd isBlank $ lines s) where - isBlank line = all isSpace line - --- like putPrettyLn' but prints a blank line before and after. -putPrettyLn :: P.Pretty CT.ColorText -> IO () -putPrettyLn p | p == mempty = pure () -putPrettyLn p = do - width <- getAvailableWidth - less . P.toANSI width $ P.border 2 p - -putPrettyLnUnpaged :: P.Pretty CT.ColorText -> IO () -putPrettyLnUnpaged p | p == mempty = pure () -putPrettyLnUnpaged p = do - width <- getAvailableWidth - putStrLn . P.toANSI width $ P.border 2 p - -putPrettyLn' :: P.Pretty CT.ColorText -> IO () -putPrettyLn' p | p == mempty = pure () -putPrettyLn' p = do - width <- getAvailableWidth - less . P.toANSI width $ p - -clearCurrentLine :: IO () -clearCurrentLine = do - width <- getAvailableWidth - putStr "\r" - putStr . replicate width $ ' ' - putStr "\r" - -putPretty' :: P.Pretty CT.ColorText -> IO () -putPretty' p = do - width <- getAvailableWidth - putStr . P.toANSI width $ p - -getAvailableWidth :: IO Int -getAvailableWidth = - maybe 80 (\s -> 100 `min` Terminal.width s) <$> Terminal.size - -putPrettyNonempty :: P.Pretty P.ColorText -> IO () -putPrettyNonempty msg = do - if msg == mempty then pure () else putPrettyLn msg diff --git a/parser-typechecker/src/Unison/PrintError.hs b/parser-typechecker/src/Unison/PrintError.hs deleted file mode 100644 index dd73a2371d..0000000000 --- a/parser-typechecker/src/Unison/PrintError.hs +++ /dev/null @@ -1,1243 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.PrintError where - -import Unison.Prelude - -import Control.Lens ((%~)) -import Control.Lens.Tuple (_1, _2, _3) -import Data.List (intersperse) -import Data.List.Extra (nubOrd) -import qualified Data.List.NonEmpty as Nel -import qualified Data.Map as Map -import Data.Sequence (Seq (..)) -import qualified Data.Set as Set -import qualified Data.Text as Text -import Data.Void (Void) -import qualified Text.Megaparsec as P -import qualified Unison.ABT as ABT -import Unison.Builtin.Decls (pattern TupleType') -import qualified Unison.HashQualified as HQ -import Unison.Kind (Kind) -import qualified Unison.Kind as Kind -import qualified Unison.Lexer as L -import Unison.Parser (Ann (..), Annotated, ann) -import qualified Unison.Parser as Parser -import qualified Unison.Reference as R -import Unison.Referent (Referent) -import Unison.Result (Note (..)) -import qualified Unison.Settings as Settings -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import qualified Unison.Typechecker.Context as C -import Unison.Typechecker.TypeError -import qualified Unison.Typechecker.TypeVar as TypeVar -import qualified Unison.UnisonFile as UF -import Unison.Util.AnnotatedText (AnnotatedText) -import qualified Unison.Util.AnnotatedText as AT -import Unison.Util.ColorText (Color) -import qualified Unison.Util.ColorText as Color -import Unison.Util.Monoid (intercalateMap) -import Unison.Util.Range (Range (..), startingLine) -import Unison.Var (Var) -import qualified Unison.Var as Var -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.TermPrinter as TermPrinter -import qualified Unison.Util.Pretty as Pr -import Unison.Util.Pretty (Pretty, ColorText) -import qualified Unison.Names3 as Names -import qualified Unison.Name as Name -import Unison.HashQualified (HashQualified) -import Unison.Type (Type) -import Unison.NamePrinter (prettyHashQualified0) - -type Env = PPE.PrettyPrintEnv - -pattern Code = Color.Blue -pattern Type1 = Color.HiBlue -pattern Type2 = Color.Green -pattern ErrorSite = Color.HiRed -pattern TypeKeyword = Color.Yellow -pattern AbilityKeyword = Color.Green -pattern Identifier = Color.Bold - -defaultWidth :: Pr.Width -defaultWidth = 60 - -fromOverHere' - :: Ord a - => String - -> [Maybe (Range, a)] - -> [Maybe (Range, a)] - -> Pretty (AnnotatedText a) -fromOverHere' s spots0 removing = - fromOverHere s (catMaybes spots0) (catMaybes removing) - -fromOverHere - :: Ord a => String -> [(Range, a)] -> [(Range, a)] -> Pretty (AnnotatedText a) -fromOverHere src spots0 removing = - let spots = toList $ Set.fromList spots0 Set.\\ Set.fromList removing - in case length spots of - 0 -> mempty - 1 -> "\n from right here:\n\n" <> showSource src spots - _ -> "\n from these spots, respectively:\n\n" <> showSource src spots - -showTypeWithProvenance - :: (Var v, Annotated a, Ord style) - => Env - -> String - -> style - -> Type v a - -> Pretty (AnnotatedText style) -showTypeWithProvenance env src color typ = - style color (renderType' env typ) - <> ".\n" - <> fromOverHere' src [styleAnnotated color typ] [] - -styleAnnotated :: Annotated a => sty -> a -> Maybe (Range, sty) -styleAnnotated sty a = (, sty) <$> rangeForAnnotated a - -style :: s -> String -> Pretty (AnnotatedText s) -style sty str = Pr.lit . AT.annotate sty $ fromString str - -stylePretty :: Color -> Pretty ColorText -> Pretty ColorText -stylePretty sty str = Pr.map (AT.annotate sty) str - -describeStyle :: Color -> Pretty ColorText -describeStyle ErrorSite = "in " <> style ErrorSite "red" -describeStyle Type1 = "in " <> style Type1 "blue" -describeStyle Type2 = "in " <> style Type2 "green" -describeStyle _ = "" - --- Render an informational typechecking note -renderTypeInfo - :: forall v loc sty - . (Var v, Annotated loc, Ord loc, Show loc) - => TypeInfo v loc - -> Env - -> Pretty (AnnotatedText sty) -renderTypeInfo i env = case i of - TopLevelComponent {..} -> case definitions of - [def] -> - Pr.wrap "🌟 I found and typechecked a definition:" <> Pr.newline <> mconcat - (renderOne def) - [] -> mempty - _ -> - Pr.wrap "🎁 These mutually dependent definitions typechecked:" - <> Pr.newline - <> intercalateMap Pr.newline (foldMap ("\t" <>) . renderOne) definitions - where - renderOne :: IsString s => (v, Type v loc, RedundantTypeAnnotation) -> [s] - renderOne (v, typ, _) = - [fromString . Text.unpack $ Var.name v, " : ", renderType' env typ] - - --- Render a type error -renderTypeError - :: forall v loc - . (Var v, Annotated loc, Ord loc, Show loc) - => TypeError v loc - -> Env - -> String - -> Pretty ColorText -renderTypeError e env src = case e of - BooleanMismatch {..} -> mconcat - [ Pr.wrap $ mconcat - [ preamble - , " " - , style Type1 "Boolean" - , ", but this one is " - , style Type2 (renderType' env foundType) - , ":" - ] - , Pr.lineSkip - , showSourceMaybes src [siteS] - , fromOverHere' src [typeS] [siteS] - , debugNoteLoc $ mconcat - [ "loc debug:" - , "\n mismatchSite: " - , annotatedToEnglish mismatchSite - , "\n foundType: " - , annotatedToEnglish foundType - , "\n" - ] - , debugSummary note - ] - where - siteS = styleAnnotated Type2 mismatchSite - typeS = styleAnnotated Type2 foundType - preamble = case getBooleanMismatch of - CondMismatch -> - "The condition for an " - <> style ErrorSite "if" - <> "-expression has to be" - AndMismatch -> - "The arguments to " <> style ErrorSite "and" <> " have to be" - OrMismatch -> - "The arguments to " <> style ErrorSite "or" <> " have to be" - GuardMismatch -> - "The guard expression for a " - <> style ErrorSite "match" - <> "/" - <> style ErrorSite "with" - <> " has to be" - - ExistentialMismatch {..} -> mconcat - [ Pr.wrap $ mconcat - [ preamble - , " " - , "Here, one is " - , style Type1 (renderType' env expectedType) - , " and another is " - , style Type2 (renderType' env foundType) - , ":"] - , Pr.lineSkip - , showSourceMaybes src [mismatchSiteS, expectedLocS] - , fromOverHere' src - [expectedTypeS, mismatchedTypeS] - [mismatchSiteS, expectedLocS] - , intLiteralSyntaxTip mismatchSite expectedType - , debugNoteLoc $ mconcat - [ "\nloc debug:" - , "\n mismatchSite: " - , annotatedToEnglish mismatchSite - , "\n foundType: " - , annotatedToEnglish foundType - , "\n expectedType: " - , annotatedToEnglish expectedType - , "\n expectedLoc: " - , annotatedToEnglish expectedLoc - , "\n" - ] - , debugSummary note - ] - where - mismatchedTypeS = styleAnnotated Type2 foundType - mismatchSiteS = styleAnnotated Type2 mismatchSite - expectedTypeS = styleAnnotated Type1 expectedType - expectedLocS = styleAnnotated Type1 expectedLoc - preamble = case getExistentialMismatch of - IfBody -> mconcat - [ "The " - , style ErrorSite "else" - , " clause of an " - , style ErrorSite "if" - , " expression needs to have the same type as the " - , style ErrorSite "then" - , " clause." - ] - VectorBody -> "The elements of a vector all need to have the same type." - CaseBody -> mconcat - [ "Each case of a " - , style ErrorSite "match" - , "/" - , style ErrorSite "with" - , " expression " - , "need to have the same type." - ] - NotFunctionApplication {..} -> mconcat - [ "This looks like a function call, but with a " - , style Type1 (renderType' env ft) - , " where the function should be. Are you missing an operator?\n\n" - , annotatedAsStyle Type1 src f - , debugSummary note - ] - FunctionApplication {..} - -> let - fte = Type.removePureEffects ft - fteFreeVars = Set.map TypeVar.underlying $ ABT.freeVars fte - showVar (v, _t) = Set.member v fteFreeVars - solvedVars' = filter showVar solvedVars - in - mconcat - [ "The " - , ordinal argNum - , " argument to the function " - , style ErrorSite (renderTerm env f) - , " is " - , style Type2 (renderType' env foundType) - , ", but I was expecting " - , style Type1 (renderType' env expectedType) - , ":\n\n" - , showSourceMaybes src - [ (, Type1) <$> rangeForAnnotated expectedType - , (, Type2) <$> rangeForAnnotated foundType - , (, Type2) <$> rangeForAnnotated arg - , (, ErrorSite) <$> rangeForAnnotated f ] - , intLiteralSyntaxTip arg expectedType - -- todo: factor this out and use in ExistentialMismatch and any other - -- "recursive subtypes" situations - , case leafs of - Nothing -> mempty - Just (foundLeaf, expectedLeaf) -> mconcat - [ "\n" - , "More specifically, I found " - , style Type2 (renderType' env foundLeaf) - , " where I was expecting " - , style Type1 (renderType' env expectedLeaf) - , ":\n\n" - , showSourceMaybes - src - [ (, Type1) <$> rangeForAnnotated expectedLeaf - , (, Type2) <$> rangeForAnnotated foundLeaf - ] - ] - , case solvedVars' of - _ : _ -> - let - go :: (v, C.Type v loc) -> Pretty ColorText - go (v, t) = mconcat - [ " " - , renderVar v - , " = " - , style ErrorSite (renderType' env t) - , ", from here:\n\n" - , showSourceMaybes - src - [(, ErrorSite) <$> rangeForAnnotated t] - , "\n" - ] - in - mconcat - [ "\n" - , "because the " - , style ErrorSite (renderTerm env f) - , " function has type" - , "\n\n" - , " " - , renderType' env fte - , "\n\n" - , "where:" - , "\n\n" - , mconcat (go <$> solvedVars') - ] - [] -> mempty - , debugNoteLoc - . mconcat - $ [ "\nloc debug:" - , style ErrorSite "\n f: " - , annotatedToEnglish f - , style Type2 "\n foundType: " - , annotatedToEnglish foundType - , style Type1 "\n expectedType: " - , annotatedToEnglish expectedType - -- , "\n expectedLoc: ", annotatedToEnglish expectedLoc - ] - , debugSummary note - ] - Mismatch {..} -> mconcat - [ "I found a value of type " - , style Type1 (renderType' env foundLeaf) - , " where I expected to find one of type " - , style Type2 (renderType' env expectedLeaf) - , ":\n\n" - , showSourceMaybes - src - [ -- these are overwriting the colored ranges for some reason? - -- (,Color.ForceShow) <$> rangeForAnnotated mismatchSite - -- , (,Color.ForceShow) <$> rangeForType foundType - -- , (,Color.ForceShow) <$> rangeForType expectedType - -- , - (, Type1) <$> rangeForAnnotated mismatchSite - , (, Type2) <$> rangeForAnnotated expectedLeaf - ] - , fromOverHere' src - [styleAnnotated Type1 foundLeaf] - [styleAnnotated Type1 mismatchSite] - , intLiteralSyntaxTip mismatchSite expectedType - , debugNoteLoc - . mconcat - $ [ "\nloc debug:" - , "\n mismatchSite: " - , annotatedToEnglish mismatchSite - , "\n foundType: " - , annotatedToEnglish foundType - , "\n foundLeaf: " - , annotatedToEnglish foundLeaf - , "\n expectedType: " - , annotatedToEnglish expectedType - , "\n expectedLeaf: " - , annotatedToEnglish expectedLeaf - , "\n" - ] - , debugSummary note - ] - AbilityCheckFailure {..} -> mconcat - [ "The expression " - , describeStyle ErrorSite - , " " - , case toList requested of - [] -> error "unpossible" - [e] -> "needs the {" <> renderType' env e <> "} ability," - requested -> - " needs these abilities: {" - <> commas (renderType' env) requested - <> "}," - , " but " - , case toList ambient of - [] -> "this location does not have access to any abilities." - [e] -> - "this location only has access to the {" - <> renderType' env e - <> "} ability," - ambient -> - "this location only has access to these abilities: " - <> "{" - <> commas (renderType' env) ambient - <> "}" - , "\n\n" - , annotatedAsErrorSite src abilityCheckFailureSite - , debugSummary note - ] - UnguardedLetRecCycle vs locs _ -> mconcat - [ "These definitions depend on each other cyclically but aren't guarded " - , "by a lambda: " <> intercalateMap ", " renderVar vs - , "\n" - , showSourceMaybes src [ (,ErrorSite) <$> rangeForAnnotated loc | loc <- locs ]] - - UnknownType {..} -> mconcat [ - if ann typeSite == Intrinsic then - "I don't know about the builtin type " <> style ErrorSite (renderVar unknownTypeV) <> ". " - else if ann typeSite == External then - "I don't know about the type " <> style ErrorSite (renderVar unknownTypeV) <> ". " - else - "I don't know about the type " <> style ErrorSite (renderVar unknownTypeV) <> ":\n" - <> annotatedAsErrorSite src typeSite - , "Make sure it's imported and spelled correctly." - ] - UnknownTerm {..} -> - let (correct, wrongTypes, wrongNames) = - foldr sep id suggestions ([], [], []) - sep (C.Suggestion name typ _ match) r = - case match of - C.Exact -> (_1 %~ ((name, typ) :)) . r - C.WrongType -> (_2 %~ ((name, typ) :)) . r - C.WrongName -> (_3 %~ ((name, typ) :)) . r - in mconcat - [ "I'm not sure what " - , style ErrorSite (Var.nameStr unknownTermV) - , " means at " - , annotatedToEnglish termSite - , "\n\n" - , annotatedAsErrorSite src termSite - , case expectedType of - Type.Var' (TypeVar.Existential _ _) -> "\nThere are no constraints on its type." - _ -> - "\nWhatever it is, it has a type that conforms to " - <> style Type1 (renderType' env expectedType) - <> ".\n" - -- ++ showTypeWithProvenance env src Type1 expectedType - , case correct of - [] -> case wrongTypes of - [] -> case wrongNames of - [] -> mempty - wrongs -> formatWrongs wrongNameText wrongs - wrongs -> formatWrongs wrongTypeText wrongs - suggs -> mconcat - [ "I found some terms in scope that have matching names and types. " - , "Maybe you meant one of these:\n\n" - , intercalateMap "\n" formatSuggestion suggs - ] - ] - DuplicateDefinitions {..} -> - mconcat - [ Pr.wrap $ mconcat - [ "I found" - , Pr.shown (length defns) - , names - , "with multiple definitions:" - ] - , Pr.lineSkip - , Pr.spaced ((\(v, _locs) -> renderVar v) <$> defns) - , debugSummary note - ] - where - names = - case defns of - _ Nel.:| [] -> "name" - _ -> "names" - Other (C.cause -> C.HandlerOfUnexpectedType loc typ) -> - Pr.lines [ - Pr.wrap "The handler used here", "", - annotatedAsErrorSite src loc, - Pr.wrap $ - "has type " <> stylePretty ErrorSite (Pr.group (renderType' env typ)) - <> "but I'm expecting a function of the form" - <> Pr.group (Pr.blue (renderType' env exHandler) <> ".") - ] - where - exHandler :: C.Type v loc - exHandler = fmap (const loc) $ - Type.arrow () - (Type.apps' (Type.ref () Type.effectRef) - [Type.var () (Var.named "e"), Type.var () (Var.named "a") ]) - (Type.var () (Var.named "o")) - - Other note -> mconcat - [ "Sorry, you hit an error we didn't make a nice message for yet.\n\n" - , "Here is a summary of the Note:\n" - , summary note - ] - where - wrongTypeText pl = mconcat - [ "I found " - , pl "a term" "some terms" - , " in scope with " - , pl "a " "" - , "matching name" - , pl "" "s" - , " but " - , pl "a " "" - , "different type" - , pl "" "s" - , ". " - , "If " - , pl "this" "one of these" - , " is what you meant, try using the fully qualified name and I might " - , "be able to give you a more illuminating error message: \n\n" - ] - wrongNameText pl = mconcat - [ "I found " - , pl "a term" "some terms" - , " in scope with " - , pl "a " "" - , "matching type" - , pl "" "s" - , " but " - , pl "a " "" - , "different name" - , pl "" "s" - , ". " - , "Maybe you meant " - , pl "this" "one of these" - , ":\n\n" - ] - formatSuggestion :: (Text, C.Type v loc) -> Pretty ColorText - formatSuggestion (name, typ) = - " - " <> fromString (Text.unpack name) <> " : " <> renderType' env typ - formatWrongs txt wrongs = - let sz = length wrongs - pl a b = if sz == 1 then a else b - in mconcat [txt pl, intercalateMap "\n" formatSuggestion wrongs] - ordinal :: (IsString s) => Int -> s - ordinal n = fromString $ show n ++ case last (show n) of - '1' -> "st" - '2' -> "nd" - '3' -> "rd" - _ -> "th" - debugNoteLoc a = if Settings.debugNoteLoc then a else mempty - debugSummary :: C.ErrorNote v loc -> Pretty ColorText - debugSummary note = - if Settings.debugNoteSummary then summary note else mempty - summary :: C.ErrorNote v loc -> Pretty ColorText - summary note = mconcat - [ "\n" - , " simple cause:\n" - , " " - , simpleCause (C.cause note) - , "\n" - , case toList (C.path note) of - [] -> " path: (empty)\n" - l -> " path:\n" <> mconcat (simplePath <$> l) - ] - simplePath :: C.PathElement v loc -> Pretty ColorText - simplePath e = " " <> simplePath' e <> "\n" - simplePath' :: C.PathElement v loc -> Pretty ColorText - simplePath' = \case - C.InSynthesize e -> "InSynthesize e=" <> renderTerm env e - C.InSubtype t1 t2 -> - "InSubtype t1=" <> renderType' env t1 <> ", t2=" <> renderType' env t2 - C.InCheck e t -> - "InCheck e=" <> renderTerm env e <> "," <> " t=" <> renderType' env t - C.InInstantiateL v t -> - "InInstantiateL v=" <> renderVar v <> ", t=" <> renderType' env t - C.InInstantiateR t v -> - "InInstantiateR t=" <> renderType' env t <> " v=" <> renderVar v - C.InSynthesizeApp t e n -> - "InSynthesizeApp t=" - <> renderType' env t - <> ", e=" - <> renderTerm env e - <> ", n=" - <> fromString (show n) - C.InFunctionCall vs f ft es -> - "InFunctionCall vs=[" - <> commas renderVar vs - <> "]" - <> ", f=" - <> renderTerm env f - <> ", ft=" - <> renderType' env ft - <> ", es=[" - <> commas (renderTerm env) es - <> "]" - C.InIfCond -> "InIfCond" - C.InIfBody loc -> "InIfBody thenBody=" <> annotatedToEnglish loc - C.InAndApp -> "InAndApp" - C.InOrApp -> "InOrApp" - C.InVectorApp loc -> "InVectorApp firstTerm=" <> annotatedToEnglish loc - C.InMatch loc -> "InMatch firstBody=" <> annotatedToEnglish loc - C.InMatchGuard -> "InMatchGuard" - C.InMatchBody -> "InMatchBody" - simpleCause :: C.Cause v loc -> Pretty ColorText - simpleCause = \case - C.TypeMismatch c -> - mconcat ["TypeMismatch\n", " context:\n", renderContext env c] - C.HandlerOfUnexpectedType loc typ -> - mconcat ["HandlerOfUnexpectedType\n", Pr.shown loc, "type:\n", renderType' env typ ] - C.IllFormedType c -> - mconcat ["IllFormedType\n", " context:\n", renderContext env c] - C.UnguardedLetRecCycle vs _ts -> - "Unguarded cycle of definitions: " <> - foldMap renderVar vs - C.UnknownSymbol loc v -> mconcat - [ "UnknownSymbol: " - , annotatedToEnglish loc - , " " <> renderVar v - , "\n\n" - , annotatedAsErrorSite src loc - ] - C.UnknownTerm loc v suggestions typ -> mconcat - [ "UnknownTerm: " - , annotatedToEnglish loc - , " " - , renderVar v - , "\n\n" - , annotatedAsErrorSite src loc - , "Suggestions: " - , mconcat (renderSuggestion env <$> suggestions) - , "\n\n" - , "Type: " - , renderType' env typ - ] - C.AbilityCheckFailure ambient requested c -> mconcat - [ "AbilityCheckFailure: " - , "ambient={" - , commas (renderType' env) ambient - , "} requested={" - , commas (renderType' env) requested - , "}\n" - , renderContext env c - ] - C.EffectConstructorWrongArgCount e a r cid -> mconcat - [ "EffectConstructorWrongArgCount:" - , " expected=" - , (fromString . show) e - , ", actual=" - , (fromString . show) a - , ", reference=" - , showConstructor env r cid - ] - C.MalformedEffectBind ctorType ctorResult es -> mconcat - [ "MalformedEffectBind: " - , " ctorType=" - , renderType' env ctorType - , " ctorResult=" - , renderType' env ctorResult - , " effects=" - , fromString (show es) - ] - C.PatternArityMismatch loc typ args -> mconcat - [ "PatternArityMismatch:\n" - , " loc=" - , annotatedToEnglish loc - , "\n" - , " typ=" - , renderType' env typ - , "\n" - , " args=" - , fromString (show args) - , "\n" - ] - C.DuplicateDefinitions vs -> - let go :: (v, [loc]) -> Pretty (AnnotatedText a) - go (v, locs) = - "[" - <> renderVar v - <> mconcat (intersperse " : " $ annotatedToEnglish <$> locs) - <> "]" - in "DuplicateDefinitions:" <> mconcat (go <$> Nel.toList vs) - C.ConcatPatternWithoutConstantLength loc typ -> mconcat - [ "ConcatPatternWithoutConstantLength:\n" - , " loc=" - , annotatedToEnglish loc - , "\n" - , " typ=" - , renderType' env typ - , "\n" - ] - -renderContext - :: (Var v, Ord loc) => Env -> C.Context v loc -> Pretty (AnnotatedText a) -renderContext env ctx@(C.Context es) = " Γ\n " - <> intercalateMap "\n " (showElem ctx . fst) (reverse es) - where - shortName :: (Var v, IsString loc) => v -> loc - shortName = fromString . Text.unpack . Var.name - showElem - :: (Var v, Ord loc) - => C.Context v loc - -> C.Element v loc - -> Pretty (AnnotatedText a) - showElem _ctx (C.Var v) = case v of - TypeVar.Universal x -> "@" <> renderVar x - TypeVar.Existential _ x -> "'" <> renderVar x - showElem ctx (C.Solved _ v (Type.Monotype t)) = - "'" <> shortName v <> " = " <> renderType' env (C.apply ctx t) - showElem ctx (C.Ann v t) = - shortName v <> " : " <> renderType' env (C.apply ctx t) - showElem _ (C.Marker v) = "|" <> shortName v <> "|" - -renderTerm :: (IsString s, Var v) => Env -> C.Term v loc -> s -renderTerm env e = - let s = Color.toPlain $ TermPrinter.pretty' (Just 80) env (TypeVar.lowerTerm e) - in if length s > Settings.renderTermMaxLength - then fromString (take Settings.renderTermMaxLength s <> "...") - else fromString s - --- | renders a type with no special styling -renderType' :: (IsString s, Var v) => Env -> Type v loc -> s -renderType' env typ = - fromString . Pr.toPlain defaultWidth $ renderType env (const id) typ - --- | `f` may do some styling based on `loc`. --- | You can pass `(const id)` if no styling is needed, or call `renderType'`. -renderType - :: Var v - => Env - -> (loc -> Pretty (AnnotatedText a) -> Pretty (AnnotatedText a)) - -> Type v loc - -> Pretty (AnnotatedText a) -renderType env f t = renderType0 env f (0 :: Int) (Type.removePureEffects t) - where - wrap :: (IsString a, Semigroup a) => a -> a -> Bool -> a -> a - wrap start end test s = if test then start <> s <> end else s - paren = wrap "(" ")" - curly = wrap "{" "}" - renderType0 env f p t = f (ABT.annotation t) $ case t of - Type.Ref' r -> showTypeRef env r - Type.Arrow' i (Type.Effect1' e o) -> - paren (p >= 2) $ go 2 i <> " ->{" <> go 1 e <> "} " <> go 1 o - Type.Arrow' i o -> paren (p >= 2) $ go 2 i <> " -> " <> go 1 o - Type.Ann' t k -> paren True $ go 1 t <> " : " <> renderKind k - TupleType' ts -> paren True $ commas (go 0) ts - Type.Apps' (Type.Ref' (R.Builtin "Sequence")) [arg] -> - "[" <> go 0 arg <> "]" - Type.Apps' f' args -> paren (p >= 3) $ spaces (go 3) (f' : args) - Type.Effects' es -> curly (p >= 3) $ commas (go 0) es - Type.Effect' es t -> case es of - [] -> go p t - _ -> "{" <> commas (go 0) es <> "} " <> go 3 t - Type.Effect1' e t -> paren (p >= 3) $ "{" <> go 0 e <> "}" <> go 3 t - Type.ForallsNamed' vs body -> - paren (p >= 1) $ if not Settings.debugRevealForalls - then go 0 body - else "forall " <> spaces renderVar vs <> " . " <> go 1 body - Type.Var' v -> renderVar v - _ -> error $ "pattern match failure in PrintError.renderType " ++ show t - where go = renderType0 env f - -renderSuggestion - :: (IsString s, Semigroup s, Var v) => Env -> C.Suggestion v loc -> s -renderSuggestion env sug = - fromString (Text.unpack $ C.suggestionName sug) <> " : " <> renderType' - env - (C.suggestionType sug) - -spaces :: (IsString a, Monoid a) => (b -> a) -> [b] -> a -spaces = intercalateMap " " - -arrows :: (IsString a, Monoid a) => (b -> a) -> [b] -> a -arrows = intercalateMap " ->" - -commas :: (IsString a, Monoid a) => (b -> a) -> [b] -> a -commas = intercalateMap ", " - -renderVar :: (IsString a, Var v) => v -> a -renderVar = fromString . Text.unpack . Var.name - -renderVar' :: (Var v, Annotated a) => Env -> C.Context v a -> v -> String -renderVar' env ctx v = case C.lookupSolved ctx v of - Nothing -> "unsolved" - Just t -> renderType' env $ Type.getPolytype t - -prettyVar :: Var v => v -> Pretty ColorText -prettyVar = Pr.text . Var.name - -renderKind :: Kind -> Pretty (AnnotatedText a) -renderKind Kind.Star = "*" -renderKind (Kind.Arrow k1 k2) = renderKind k1 <> " -> " <> renderKind k2 - -showTermRef :: IsString s => Env -> Referent -> s -showTermRef env r = fromString . HQ.toString $ PPE.termName env r - -showTypeRef :: IsString s => Env -> R.Reference -> s -showTypeRef env r = fromString . HQ.toString $ PPE.typeName env r - --- todo: do something different/better if cid not found -showConstructor :: IsString s => Env -> R.Reference -> Int -> s -showConstructor env r cid = fromString . HQ.toString $ - PPE.patternName env r cid - -styleInOverallType - :: (Var v, Annotated a, Eq a) - => Env - -> C.Type v a - -> C.Type v a - -> Color - -> Pretty ColorText -styleInOverallType e overallType leafType c = renderType e f overallType - where - f loc s = if loc == ABT.annotation leafType then Color.style c <$> s else s - -_posToEnglish :: IsString s => L.Pos -> s -_posToEnglish (L.Pos l c) = - fromString $ "Line " ++ show l ++ ", Column " ++ show c - -rangeForToken :: L.Token a -> Range -rangeForToken t = Range (L.start t) (L.end t) - -rangeToEnglish :: IsString s => Range -> s -rangeToEnglish (Range (L.Pos l c) (L.Pos l' c')) = - fromString - $ let showColumn = True - in - if showColumn - then if l == l' - then if c == c' - then "line " ++ show l ++ ", column " ++ show c - else "line " ++ show l ++ ", columns " ++ show c ++ "-" ++ show c' - else - "line " - ++ show l - ++ ", column " - ++ show c - ++ " through " - ++ "line " - ++ show l' - ++ ", column " - ++ show c' - else if l == l' - then "line " ++ show l - else "lines " ++ show l ++ "—" ++ show l' - -annotatedToEnglish :: (Annotated a, IsString s) => a -> s -annotatedToEnglish a = case ann a of - Intrinsic -> "an intrinsic" - External -> "an external" - Ann start end -> rangeToEnglish $ Range start end - - -rangeForAnnotated :: Annotated a => a -> Maybe Range -rangeForAnnotated a = case ann a of - Intrinsic -> Nothing - External -> Nothing - Ann start end -> Just $ Range start end - -showLexerOutput :: Bool -showLexerOutput = False - -renderNoteAsANSI - :: (Var v, Annotated a, Show a, Ord a) - => Pr.Width - -> Env - -> String - -> Note v a - -> String -renderNoteAsANSI w e s n = Pr.toANSI w $ printNoteWithSource e s n - -renderParseErrorAsANSI :: Var v => Pr.Width -> String -> Parser.Err v -> String -renderParseErrorAsANSI w src = Pr.toANSI w . prettyParseError src - -printNoteWithSource - :: (Var v, Annotated a, Show a, Ord a) - => Env - -> String - -> Note v a - -> Pretty ColorText -printNoteWithSource env _s (TypeInfo n) = prettyTypeInfo n env -printNoteWithSource _env s (Parsing e) = prettyParseError s e -printNoteWithSource env s (TypeError e) = prettyTypecheckError e env s -printNoteWithSource _env _s (NameResolutionFailures _es) = undefined -printNoteWithSource _env s (InvalidPath path term) = - fromString ("Invalid Path: " ++ show path ++ "\n") - <> annotatedAsErrorSite s term -printNoteWithSource _env s (UnknownSymbol v a) = - fromString ("Unknown symbol `" ++ Text.unpack (Var.name v) ++ "`\n\n") - <> annotatedAsErrorSite s a -printNoteWithSource _env _s (CompilerBug c) = - fromString $ "Compiler bug: " <> show c - -_printPosRange :: String -> L.Pos -> L.Pos -> String -_printPosRange s (L.Pos startLine startCol) _end = - -- todo: multi-line ranges - -- todo: ranges - _printArrowsAtPos s startLine startCol - -_printArrowsAtPos :: String -> Int -> Int -> String -_printArrowsAtPos s line column = - let lineCaret s i = s ++ if i == line then "\n" ++ columnCaret else "" - columnCaret = replicate (column - 1) '-' ++ "^" - source = unlines (uncurry lineCaret <$> lines s `zip` [1 ..]) - in source - --- Wow, epic view pattern for picking out a lexer error -pattern LexerError ts e <- Just (P.Tokens (firstLexerError -> Just (ts, e))) - -firstLexerError :: Foldable t => t (L.Token L.Lexeme) -> Maybe ([L.Token L.Lexeme], L.Err) -firstLexerError (toList -> ts@((L.payload -> L.Err e) : _)) = Just (ts, e) -firstLexerError _ = Nothing - -prettyParseError - :: forall v - . Var v - => String - -> Parser.Err v - -> Pretty ColorText -prettyParseError s = \case - P.TrivialError _ (LexerError ts (L.CloseWithoutMatchingOpen open close)) _ -> - "❗️ I found a closing " <> style ErrorSite (fromString close) <> - " here without a matching " <> style ErrorSite (fromString open) <> ".\n\n" <> - showSource s ((\t -> (rangeForToken t, ErrorSite)) <$> ts) - P.TrivialError sp unexpected expected - -> fromString - (P.parseErrorPretty @_ @Void (P.TrivialError sp unexpected expected)) - <> (case unexpected of - Just (P.Tokens (toList -> ts)) -> case ts of - [] -> mempty - _ -> showSource s $ (\t -> (rangeForToken t, ErrorSite)) <$> ts - _ -> mempty - ) - <> lexerOutput - P.FancyError _sp fancyErrors -> - mconcat (go' <$> Set.toList fancyErrors) <> lexerOutput - where - go' :: P.ErrorFancy (Parser.Error v) -> Pretty ColorText - go' (P.ErrorFail s) = - "The parser failed with this message:\n" <> fromString s - go' (P.ErrorIndentation ordering indent1 indent2) = mconcat - [ "The parser was confused by the indentation.\n" - , "It was expecting the reference level (" - , fromString (show indent1) - , ")\nto be " - , fromString (show ordering) - , " than/to the actual level (" - , fromString (show indent2) - , ").\n" - ] - go' (P.ErrorCustom e) = go e - errorVar v = style ErrorSite . fromString . Text.unpack $ Var.name v - go :: Parser.Error v -> Pretty ColorText - -- | UseInvalidPrefixSuffix (Either (L.Token Name) (L.Token Name)) (Maybe [L.Token Name]) - go (Parser.UseEmpty tok) = msg where - msg = Pr.indentN 2 . Pr.callout "😶" $ Pr.lines [ - Pr.wrap $ "I was expecting something after the " <> Pr.hiRed "use" <> "keyword", "", - tokenAsErrorSite s tok, - useExamples - ] - go (Parser.UseInvalidPrefixSuffix prefix suffix) = msg where - msg :: Pretty ColorText - msg = Pr.indentN 2 . Pr.blockedCallout . Pr.lines $ case (prefix, suffix) of - (Left tok, Just _) -> [ - Pr.wrap "The first argument of a `use` statement can't be an operator name:", "", - tokenAsErrorSite s tok, - useExamples - ] - (tok0, Nothing) -> let tok = either id id tok0 in [ - Pr.wrap $ "I was expecting something after " <> Pr.hiRed "here:", "", - tokenAsErrorSite s tok, - case Name.parent (L.payload tok) of - Nothing -> useExamples - Just parent -> Pr.wrap $ - "You can write" <> - Pr.group (Pr.blue $ "use " <> Pr.shown parent <> " " - <> Pr.shown (Name.unqualified (L.payload tok))) <> - "to introduce " <> Pr.backticked (Pr.shown (Name.unqualified (L.payload tok))) <> - "as a local alias for " <> Pr.backticked (Pr.shown (L.payload tok)) - ] - (Right tok, _) -> [ -- this is unpossible but rather than bomb, nice msg - "You found a Unison bug 🐞 here:", "", - tokenAsErrorSite s tok, - Pr.wrap $ - "This looks like a valid `use` statement," <> - "but the parser didn't recognize it. This is a Unison bug." - ] - go (Parser.DisallowedAbsoluteName t) = msg where - msg :: Pretty ColorText - msg = Pr.indentN 2 $ Pr.fatalCallout $ Pr.lines [ - Pr.wrap $ "I don't currently support creating definitions that start with" - <> Pr.group (Pr.blue "'.'" <> ":"), - "", - tokenAsErrorSite s t, - Pr.wrap $ "Use " <> Pr.blue "help messages.disallowedAbsolute" <> "to learn more.", - "" - ] - go (Parser.DuplicateTypeNames ts) = intercalateMap "\n\n" showDup ts where - showDup (v, locs) = - "I found multiple types with the name " <> errorVar v <> ":\n\n" <> - annotatedsStartingLineAsStyle ErrorSite s locs - go (Parser.DuplicateTermNames ts) = - Pr.fatalCallout $ intercalateMap "\n\n" showDup ts - where - showDup (v, locs) = Pr.lines [ - Pr.wrap $ - "I found multiple bindings with the name " <> Pr.group (errorVar v <> ":"), - annotatedsStartingLineAsStyle ErrorSite s locs - ] - go (Parser.TypeDeclarationErrors es) = let - unknownTypes = [ (v, a) | UF.UnknownType v a <- es ] - dupDataAndAbilities = [ (v, a, a2) | UF.DupDataAndAbility v a a2 <- es ] - unknownTypesMsg = - mconcat [ "I don't know about the type(s) " - , intercalateMap ", " errorVar (nubOrd $ fst <$> unknownTypes) - , ":\n\n" - , annotatedsAsStyle ErrorSite s (snd <$> unknownTypes) - ] - dupDataAndAbilitiesMsg = intercalateMap "\n\n" dupMsg dupDataAndAbilities - dupMsg (v, a, a2) = - mconcat [ "I found two types called " <> errorVar v <> ":" - , "\n\n" - , annotatedsStartingLineAsStyle ErrorSite s [a, a2]] - in if null unknownTypes - then dupDataAndAbilitiesMsg - else if null dupDataAndAbilities then unknownTypesMsg - else unknownTypesMsg <> "\n\n" <> dupDataAndAbilitiesMsg - go (Parser.DidntExpectExpression _tok (Just t@(L.payload -> L.SymbolyId "::" Nothing))) - = mconcat - [ "This looks like the start of an expression here but I was expecting a binding." - , "\nDid you mean to use a single " <> style Code ":" - , " here for a type signature?" - , "\n\n" - , tokenAsErrorSite s t - ] - go (Parser.DidntExpectExpression tok _nextTok) = mconcat - [ "This looks like the start of an expression here \n\n" - , tokenAsErrorSite s tok - , "\nbut at the file top-level, I expect one of the following:" - , "\n" - , "\n - A binding, like " <> t <> style Code " = 42" <> " OR" - , "\n " <> t <> style Code " : Nat" - , "\n " <> t <> style Code " = 42" - , "\n - A watch expression, like " <> style Code "> " <> t <> style Code - " + 1" - , "\n - An `ability` declaration, like " - <> style Code "ability Foo where ..." - , "\n - A `type` declaration, like " - <> style Code "type Optional a = None | Some a" - , "\n - A `namespace` declaration, like " - <> style Code "namespace Seq where ..." - , "\n" - ] - where t = style Code (fromString (P.showTokens (pure tok))) - go (Parser.ExpectedBlockOpen blockName tok@(L.payload -> L.Close)) = mconcat - [ "I was expecting an indented block following the " <> - "`" <> fromString blockName <> "` keyword\n" - , "but instead found an outdent:\n\n" - , tokenAsErrorSite s tok ] -- todo: @aryairani why is this displaying weirdly? - go (Parser.ExpectedBlockOpen blockName tok) = mconcat - [ "I was expecting an indented block following the " <> - "`" <> fromString blockName <> "` keyword\n" - , "but instead found this token:\n" - , tokenAsErrorSite s tok ] - go (Parser.SignatureNeedsAccompanyingBody tok) = mconcat - [ "You provided a type signature, but I didn't find an accompanying\n" - , "binding after it. Could it be a spelling mismatch?\n" - , tokenAsErrorSite s tok - ] - go (Parser.EmptyBlock tok) = mconcat - [ "I expected a block after this (" - , describeStyle ErrorSite - , "), " - , "but there wasn't one. Maybe check your indentation:\n" - , tokenAsErrorSite s tok - ] - go Parser.EmptyWatch = - "I expected a non-empty watch expression and not just \">\"" - go (Parser.UnknownAbilityConstructor tok _referents) = unknownConstructor "ability" tok - go (Parser.UnknownDataConstructor tok _referents) = unknownConstructor "data" tok - go (Parser.UnknownId tok referents references) = Pr.lines - [ if missing then - "I couldn't resolve the reference " <> style ErrorSite (HQ.toString (L.payload tok)) <> "." - else - "The reference " <> style ErrorSite (HQ.toString (L.payload tok)) <> " was ambiguous." - , "" - , tokenAsErrorSite s $ HQ.toString <$> tok - , if missing then "Make sure it's spelled correctly." - else "Try hash-qualifying the term you meant to reference." - ] - where missing = Set.null referents && Set.null references - go (Parser.UnknownTerm tok referents) = Pr.lines - [ if Set.null referents then - "I couldn't find a term for " <> style ErrorSite (HQ.toString (L.payload tok)) <> "." - else - "The term reference " <> style ErrorSite (HQ.toString (L.payload tok)) <> " was ambiguous." - , "" - , tokenAsErrorSite s $ HQ.toString <$> tok - , if missing then "Make sure it's spelled correctly." - else "Try hash-qualifying the term you meant to reference." - ] - where - missing = Set.null referents - go (Parser.UnknownType tok referents) = Pr.lines - [ if Set.null referents then - "I couldn't find a type for " <> style ErrorSite (HQ.toString (L.payload tok)) <> "." - else - "The type reference " <> style ErrorSite (HQ.toString (L.payload tok)) <> " was ambiguous." - , "" - , tokenAsErrorSite s $ HQ.toString <$> tok - , if missing then "Make sure it's spelled correctly." - else "Try hash-qualifying the type you meant to reference." - ] - where - missing = Set.null referents - go (Parser.ResolutionFailures failures) = - Pr.border 2 . prettyResolutionFailures s $ failures - unknownConstructor - :: String -> L.Token HashQualified -> Pretty ColorText - unknownConstructor ctorType tok = Pr.lines [ - (Pr.wrap . mconcat) [ "I don't know about any " - , fromString ctorType - , " constructor named " - , Pr.group ( - stylePretty ErrorSite (prettyHashQualified0 (L.payload tok)) <> - "." - ) - , "Maybe make sure it's correctly spelled and that you've imported it:" - ] - , "" - , tokenAsErrorSite s tok - ] - lexerOutput :: Pretty (AnnotatedText a) - lexerOutput = if showLexerOutput - then "\nLexer output:\n" <> fromString (L.debugLex' s) - else mempty - -annotatedAsErrorSite - :: Annotated a => String -> a -> Pretty ColorText -annotatedAsErrorSite = annotatedAsStyle ErrorSite - -annotatedAsStyle - :: (Ord style, Annotated a) - => style - -> String - -> a - -> Pretty (AnnotatedText style) -annotatedAsStyle style s ann = - showSourceMaybes s [(, style) <$> rangeForAnnotated ann] - -annotatedsAsErrorSite :: (Annotated a) => String -> [a] -> Pretty ColorText -annotatedsAsErrorSite = annotatedsAsStyle ErrorSite - -annotatedsAsStyle :: (Annotated a) => Color -> String -> [a] -> Pretty ColorText -annotatedsAsStyle style src as = - showSourceMaybes src [ (, style) <$> rangeForAnnotated a | a <- as ] - -annotatedsStartingLineAsStyle - :: (Annotated a) => Color -> String -> [a] -> Pretty ColorText -annotatedsStartingLineAsStyle style src as = showSourceMaybes - src - [ (, style) <$> (startingLine <$> rangeForAnnotated a) | a <- as ] - -tokenAsErrorSite :: String -> L.Token a -> Pretty ColorText -tokenAsErrorSite src tok = showSource1 src (rangeForToken tok, ErrorSite) - -tokensAsErrorSite :: String -> [L.Token a] -> Pretty ColorText -tokensAsErrorSite src ts = - showSource src [(rangeForToken t, ErrorSite) | t <- ts ] - -showSourceMaybes - :: Ord a => String -> [Maybe (Range, a)] -> Pretty (AnnotatedText a) -showSourceMaybes src annotations = showSource src $ catMaybes annotations - -showSource :: Ord a => String -> [(Range, a)] -> Pretty (AnnotatedText a) -showSource src annotations = Pr.lit . AT.condensedExcerptToText 6 $ AT.markup - (fromString src) - (Map.fromList annotations) - -showSource1 :: Ord a => String -> (Range, a) -> Pretty (AnnotatedText a) -showSource1 src annotation = showSource src [annotation] - -findTerm :: Seq (C.PathElement v loc) -> Maybe loc -findTerm = go - where - go (C.InSynthesize t :<| _) = Just $ ABT.annotation t - go (C.InCheck t _ :<| _) = Just $ ABT.annotation t - go (C.InSynthesizeApp _ t _ :<| _) = Just $ ABT.annotation t - go (_ :<| t) = go t - go Empty = Nothing - -prettyTypecheckError - :: (Var v, Ord loc, Show loc, Parser.Annotated loc) - => C.ErrorNote v loc - -> Env - -> String - -> Pretty ColorText -prettyTypecheckError = renderTypeError . typeErrorFromNote - -prettyTypeInfo - :: (Var v, Ord loc, Show loc, Parser.Annotated loc) - => C.InfoNote v loc - -> Env - -> Pretty ColorText -prettyTypeInfo n e = - maybe "" (`renderTypeInfo` e) (typeInfoFromNote n) - -intLiteralSyntaxTip - :: C.Term v loc -> C.Type v loc -> Pretty ColorText -intLiteralSyntaxTip term expectedType = case (term, expectedType) of - (Term.Nat' n, Type.Ref' r) | r == Type.intRef -> - "\nTip: Use the syntax " - <> style Type2 ("+" <> show n) - <> " to produce an " - <> style Type2 "Int" - <> "." - _ -> "" - -prettyResolutionFailures - :: (Annotated a, Var v) - => String - -> [Names.ResolutionFailure v a] - -> Pretty ColorText -prettyResolutionFailures s failures = Pr.callout "❓" $ Pr.linesNonEmpty - [ Pr.wrap - ("I couldn't resolve any of" <> style ErrorSite "these" <> "symbols:") - , "" - , annotatedsAsErrorSite s - $ [ a | Names.TermResolutionFailure _ a _ <- failures ] - ++ [ a | Names.TypeResolutionFailure _ a _ <- failures ] - , let - conflicts = - nubOrd - $ [ v - | Names.TermResolutionFailure v _ s <- failures - , Set.size s > 1 - ] - ++ [ v - | Names.TypeResolutionFailure v _ s <- failures - , Set.size s > 1 - ] - allVars = - nubOrd - $ [ v | Names.TermResolutionFailure v _ _ <- failures ] - ++ [ v | Names.TypeResolutionFailure v _ _ <- failures ] - in - "Using these fully qualified names:" - `Pr.hang` Pr.spaced (prettyVar <$> allVars) - <> "\n" - <> if null conflicts - then "" - else Pr.spaced (prettyVar <$> conflicts) - <> Pr.bold " are currently conflicted symbols" - ] - -useExamples :: Pretty ColorText -useExamples = Pr.lines [ - "Here's a few examples of valid `use` statements:", "", - Pr.indentN 2 . Pr.column2 $ - [ (Pr.blue "use math sqrt", Pr.wrap "Introduces `sqrt` as a local alias for `math.sqrt`") - , (Pr.blue "use List :+", Pr.wrap "Introduces `:+` as a local alias for `List.:+`.") - , (Pr.blue "use .foo bar.baz", Pr.wrap "Introduces `bar.baz` as a local alias for the absolute name `.foo.bar.baz`") ] - ] diff --git a/parser-typechecker/src/Unison/Result.hs b/parser-typechecker/src/Unison/Result.hs deleted file mode 100644 index c0569c9113..0000000000 --- a/parser-typechecker/src/Unison/Result.hs +++ /dev/null @@ -1,90 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE Rank2Types #-} - -module Unison.Result where - -import Unison.Prelude - -import Control.Monad.Except ( ExceptT(..) ) -import Data.Functor.Identity -import qualified Control.Monad.Fail as Fail -import qualified Control.Monad.Morph as Morph -import Control.Monad.Writer ( WriterT(..) - , runWriterT - , MonadWriter(..) - ) -import Unison.Name ( Name ) -import qualified Unison.Parser as Parser -import Unison.Paths ( Path ) -import Unison.Term ( Term ) -import qualified Unison.Typechecker.Context as Context -import Control.Error.Util ( note) -import qualified Unison.Names3 as Names - -type Result notes = ResultT notes Identity - -type ResultT notes f = MaybeT (WriterT notes f) - -data Note v loc - = Parsing (Parser.Err v) - | NameResolutionFailures [Names.ResolutionFailure v loc] - | InvalidPath Path (Term v loc) -- todo: move me! - | UnknownSymbol v loc - | TypeError (Context.ErrorNote v loc) - | TypeInfo (Context.InfoNote v loc) - | CompilerBug (CompilerBug v loc) - deriving Show - -data CompilerBug v loc - = TopLevelComponentNotFound v (Term v loc) - | ResolvedNameNotFound v loc Name - | TypecheckerBug (Context.CompilerBug v loc) - deriving Show - -result :: Result notes a -> Maybe a -result (Result _ may) = may - -pattern Result notes may = MaybeT (WriterT (Identity (may, notes))) -{-# COMPLETE Result #-} - -isSuccess :: Functor f => ResultT note f a -> f Bool -isSuccess = (isJust . fst <$>) . runResultT - -isFailure :: Functor f => ResultT note f a -> f Bool -isFailure = (isNothing . fst <$>) . runResultT - -toMaybe :: Functor f => ResultT note f a -> f (Maybe a) -toMaybe = (fst <$>) . runResultT - -runResultT :: ResultT notes f a -> f (Maybe a, notes) -runResultT = runWriterT . runMaybeT - --- Returns the `Result` in the `f` functor. -getResult :: Functor f => ResultT notes f a -> f (Result notes a) -getResult r = uncurry (flip Result) <$> runResultT r - -toEither :: Functor f => ResultT notes f a -> ExceptT notes f a -toEither r = ExceptT (go <$> runResultT r) - where go (may, notes) = note notes may - -tell1 :: Monad f => note -> ResultT (Seq note) f () -tell1 = tell . pure - -fromParsing - :: Monad f => Either (Parser.Err v) a -> ResultT (Seq (Note v loc)) f a -fromParsing (Left e) = do - tell1 $ Parsing e - Fail.fail "" -fromParsing (Right a) = pure a - -tellAndFail :: Monad f => note -> ResultT (Seq note) f a -tellAndFail note = tell1 note *> Fail.fail "Elegantly and responsibly" - -compilerBug :: Monad f => CompilerBug v loc -> ResultT (Seq (Note v loc)) f a -compilerBug = tellAndFail . CompilerBug - -hoist - :: (Monad f, Monoid notes) - => (forall a. f a -> g a) - -> ResultT notes f b -> ResultT notes g b -hoist morph = Morph.hoist (Morph.hoist morph) diff --git a/parser-typechecker/src/Unison/Runtime/ANF.hs b/parser-typechecker/src/Unison/Runtime/ANF.hs deleted file mode 100644 index b0ae353743..0000000000 --- a/parser-typechecker/src/Unison/Runtime/ANF.hs +++ /dev/null @@ -1,1408 +0,0 @@ -{-# LANGUAGE DeriveFoldable #-} -{-# LANGUAGE DeriveTraversable #-} -{-# LANGUAGE ViewPatterns #-} -{-# Language OverloadedStrings #-} -{-# Language PatternGuards #-} -{-# Language PatternSynonyms #-} -{-# Language ScopedTypeVariables #-} -{-# Language GeneralizedNewtypeDeriving #-} - -module Unison.Runtime.ANF - ( optimize - , fromTerm - , fromTerm' - , term - , minimizeCyclesOrCrash - , pattern TVar - , pattern TLit - , pattern TApp - , pattern TApv - , pattern TCom - , pattern TCon - , pattern TKon - , pattern TReq - , pattern TPrm - , pattern TIOp - , pattern THnd - , pattern TLet - , pattern TFrc - , pattern TLets - , pattern TName - , pattern TBind - , pattern TTm - , pattern TBinds - , pattern TBinds' - , pattern TShift - , pattern TMatch - , Mem(..) - , Lit(..) - , SuperNormal(..) - , SuperGroup(..) - , POp(..) - , IOp(..) - , close - , saturate - , float - , lamLift - , ANormalBF(..) - , ANormalTF(.., AApv, ACom, ACon, AKon, AReq, APrm, AIOp) - , ANormal - , ANormalT - , RTag - , CTag - , Tag(..) - , packTags - , unpackTags - , ANFM - , Branched(..) - , Func(..) - , superNormalize - , anfTerm - , sink - , prettyGroup - ) where - -import Unison.Prelude - -import Control.Monad.Reader (ReaderT(..), asks, local) -import Control.Monad.State (State, runState, MonadState(..), modify, gets) -import Control.Lens (snoc, unsnoc) - -import Data.Bifunctor (Bifunctor(..)) -import Data.Bifoldable (Bifoldable(..)) -import Data.Bits ((.&.), (.|.), shiftL, shiftR) -import Data.List hiding (and,or) -import Prelude hiding (abs,and,or,seq) -import qualified Prelude -import Unison.Term hiding (resolve, fresh, float) -import Unison.Var (Var, typed) -import Unison.Util.EnumContainers as EC -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Data.Text as Text -import qualified Unison.ABT as ABT -import qualified Unison.ABT.Normalized as ABTN -import qualified Unison.Term as Term -import qualified Unison.Type as Ty -import qualified Unison.Builtin.Decls as Ty (unitRef,seqViewRef) -import qualified Unison.Var as Var -import Unison.Typechecker.Components (minimize') -import Unison.Pattern (SeqOp(..)) -import qualified Unison.Pattern as P -import Unison.Reference (Reference(..)) -import Unison.Referent (Referent) - -newtype ANF v a = ANF_ { term :: Term v a } - --- Replace all lambdas with free variables with closed lambdas. --- Works by adding a parameter for each free variable. These --- synthetic parameters are added before the existing lambda params. --- For example, `(x -> x + y + z)` becomes `(y z x -> x + y + z) y z`. --- As this replacement has the same type as the original lambda, it --- can be done as a purely local transformation, without updating any --- call sites of the lambda. --- --- The transformation is shallow and doesn't transform the body of --- lambdas it finds inside of `t`. -lambdaLift :: (Var v, Semigroup a) => (v -> v) -> Term v a -> Term v a -lambdaLift liftVar t = result where - result = ABT.visitPure go t - go t@(LamsNamed' vs body) = Just $ let - fvs = ABT.freeVars t - fvsLifted = [ (v, liftVar v) | v <- toList fvs ] - a = ABT.annotation t - subs = [(v, var a v') | (v,v') <- fvsLifted ] - in if Set.null fvs then lam' a vs body -- `lambdaLift body` would make transform deep - else apps' (lam' a (map snd fvsLifted ++ vs) (ABT.substs subs body)) - (snd <$> subs) - go _ = Nothing - -closure :: Var v => Map v (Set v, Set v) -> Map v (Set v) -closure m0 = trace (snd <$> m0) - where - refs = fst <$> m0 - - expand acc fvs rvs - = fvs <> foldMap (\r -> Map.findWithDefault mempty r acc) rvs - - trace acc - | acc == acc' = acc - | otherwise = trace acc' - where - acc' = Map.intersectionWith (expand acc) acc refs - -expandRec - :: (Var v, Monoid a) - => Set v - -> [(v, Term v a)] - -> [(v, Term v a)] -expandRec keep vbs = mkSub <$> fvl - where - mkSub (v, fvs) = (v, apps' (var mempty v) (var mempty <$> fvs)) - - fvl = Map.toList - . fmap (Set.toList) - . closure - $ Set.partition (`Set.member` keep) - . ABT.freeVars - <$> Map.fromList vbs - -expandSimple - :: (Var v, Monoid a) - => Set v - -> (v, Term v a) - -> (v, Term v a) -expandSimple keep (v, bnd) = (v, apps' (var a v) evs) - where - a = ABT.annotation bnd - fvs = ABT.freeVars bnd - evs = map (var a) . Set.toList $ Set.difference fvs keep - - -abstract :: (Var v) => Set v -> Term v a -> Term v a -abstract keep bnd = lam' a evs bnd - where - a = ABT.annotation bnd - fvs = ABT.freeVars bnd - evs = Set.toList $ Set.difference fvs keep - -enclose - :: (Var v, Monoid a) - => Set v - -> (Set v -> Term v a -> Term v a) - -> Term v a - -> Maybe (Term v a) -enclose keep rec (LetRecNamedTop' top vbs bd) - = Just $ letRec' top lvbs lbd - where - xpnd = expandRec keep' vbs - keep' = Set.union keep . Set.fromList . map fst $ vbs - lvbs = (map.fmap) (rec keep' . abstract keep' . ABT.substs xpnd) vbs - lbd = rec keep' . ABT.substs xpnd $ bd --- will be lifted, so keep this variable -enclose keep rec (Let1NamedTop' top v b@(LamsNamed' vs bd) e) - = Just . let1' top [(v, lamb)] . rec (Set.insert v keep) - $ ABT.subst v av e - where - (_, av) = expandSimple keep (v, b) - keep' = Set.difference keep $ Set.fromList vs - fvs = ABT.freeVars b - evs = Set.toList $ Set.difference fvs keep - a = ABT.annotation b - lbody = rec keep' bd - lamb = lam' a (evs ++ vs) lbody -enclose keep rec t@(LamsNamed' vs body) - = Just $ if null evs then lamb else apps' lamb $ map (var a) evs - where - -- remove shadowed variables - keep' = Set.difference keep $ Set.fromList vs - fvs = ABT.freeVars t - evs = Set.toList $ Set.difference fvs keep - a = ABT.annotation t - lbody = rec keep' body - lamb = lam' a (evs ++ vs) lbody -enclose keep rec t@(Handle' h body) - | isStructured body - = Just . handle (ABT.annotation t) h $ apps' lamb args - where - fvs = ABT.freeVars body - evs = Set.toList $ Set.difference fvs keep - a = ABT.annotation body - lbody = rec keep body - fv = Var.freshIn fvs $ typed Var.Eta - args | null evs = [constructor a Ty.unitRef 0] - | otherwise = var a <$> evs - lamb | null evs = lam' a [fv] lbody - | otherwise = lam' a evs lbody -enclose _ _ _ = Nothing - -isStructured :: Var v => Term v a -> Bool -isStructured (Var' _) = False -isStructured (Lam' _) = False -isStructured (Nat' _) = False -isStructured (Int' _) = False -isStructured (Float' _) = False -isStructured (Text' _) = False -isStructured (Char' _) = False -isStructured (Constructor' _ _) = False -isStructured (Apps' Constructor'{} args) = any isStructured args -isStructured (If' b t f) - = isStructured b || isStructured t || isStructured f -isStructured (And' l r) = isStructured l || isStructured r -isStructured (Or' l r) = isStructured l || isStructured r -isStructured _ = True - -close :: (Var v, Monoid a) => Set v -> Term v a -> Term v a -close keep tm = ABT.visitPure (enclose keep close) tm - -type FloatM v a r = State (Set v, [(v, Term v a)]) r - -freshFloat :: Var v => Set v -> v -> v -freshFloat avoid (Var.freshIn avoid -> v0) - = case Var.typeOf v0 of - Var.User nm - | v <- typed (Var.User $ nm <> w) , v `Set.notMember` avoid - -> v - | otherwise - -> freshFloat (Set.insert v0 avoid) v0 - _ -> v0 - where - w = Text.pack . show $ Var.freshId v0 - -letFloater - :: (Var v, Monoid a) - => (Term v a -> FloatM v a (Term v a)) - -> [(v, Term v a)] -> Term v a - -> FloatM v a (Term v a) -letFloater rec vbs e = do - cvs <- gets fst - let shadows = [ (v, freshFloat cvs v) - | (v, _) <- vbs, Set.member v cvs ] - shadowMap = Map.fromList shadows - rn v = Map.findWithDefault v v shadowMap - shvs = Set.fromList $ map (rn.fst) vbs - modify (first $ (<>shvs)) - fvbs <- traverse (\(v, b) -> (,) (rn v) <$> rec' (ABT.changeVars shadowMap b)) vbs - modify (second (++ fvbs)) - pure $ ABT.changeVars shadowMap e - where - rec' b@(LamsNamed' vs bd) = lam' (ABT.annotation b) vs <$> rec bd - rec' b = rec b - -lamFloater - :: (Var v, Monoid a) - => Maybe v -> a -> [v] -> Term v a -> FloatM v a v -lamFloater mv a vs bd - = state $ \(cvs, ctx) -> - let v = ABT.freshIn cvs $ fromMaybe (typed Var.Float) mv - in (v, (Set.insert v cvs, ctx <> [(v, lam' a vs bd)])) - -floater - :: (Var v, Monoid a) - => (Term v a -> FloatM v a (Term v a)) - -> Term v a -> Maybe (FloatM v a (Term v a)) -floater rec (LetRecNamed' vbs e) = Just $ letFloater rec vbs e >>= rec -floater rec (Let1Named' v b e) - | LamsNamed' vs bd <- b - = Just $ rec bd - >>= lamFloater (Just v) a vs - >>= \lv -> rec $ ABT.changeVars (Map.singleton v lv) e - where a = ABT.annotation b -floater rec tm@(LamsNamed' vs bd) = Just $ do - bd <- rec bd - lv <- lamFloater Nothing a vs bd - pure $ var a lv - where a = ABT.annotation tm -floater _ _ = Nothing - -float :: (Var v, Monoid a) => Term v a -> Term v a -float tm = case runState (go tm) (Set.empty, []) of - (bd, (_, ctx)) -> letRec' True ctx bd - where - go = ABT.visit $ floater go - -- tm | LetRecNamedTop' _ vbs e <- tm0 - -- , (pre, rec, post) <- reduceCycle vbs - -- = let1' False pre . letRec' False rec . let1' False post $ e - -- | otherwise = tm0 - -deannotate :: Var v => Term v a -> Term v a -deannotate = ABT.visitPure $ \case - Ann' c _ -> Just $ deannotate c - _ -> Nothing - -lamLift :: (Var v, Monoid a) => Term v a -> Term v a -lamLift = float . close Set.empty . deannotate - -saturate - :: (Var v, Monoid a) - => Map (Reference,Int) Int -> Term v a -> Term v a -saturate dat = ABT.visitPure $ \case - Apps' f@(Constructor' r t) args -> sat r t f args - Apps' f@(Request' r t) args -> sat r t f args - f@(Constructor' r t) -> sat r t f [] - f@(Request' r t) -> sat r t f [] - _ -> Nothing - where - frsh avoid _ = - let v = Var.freshIn avoid $ typed Var.Eta - in (Set.insert v avoid, v) - sat r t f args = case Map.lookup (r,t) dat of - Just n - | m < n - , vs <- snd $ mapAccumL frsh fvs [1..n-m] - , nargs <- var mempty <$> vs - -> Just . lam' mempty vs . apps' f $ args' ++ nargs - | m > n - , (sargs, eargs) <- splitAt n args' - , sv <- Var.freshIn fvs $ typed Var.Eta - -> Just - . let1' False [(sv,apps' f sargs)] - $ apps' (var mempty sv) eargs - _ -> Just (apps' f args') - where - m = length args - fvs = foldMap freeVars args - args' = saturate dat <$> args - -optimize :: forall a v . (Semigroup a, Var v) => Term v a -> Term v a -optimize t = go t where - ann = ABT.annotation - go (Let1' b body) | canSubstLet b body = go (ABT.bind body b) - go e@(App' f arg) = case go f of - Lam' f -> go (ABT.bind f arg) - f -> app (ann e) f (go arg) - go (If' (Boolean' False) _ f) = go f - go (If' (Boolean' True) t _) = go t - -- todo: can simplify match expressions - go e@(ABT.Var' _) = e - go e@(ABT.Tm' f) = case e of - Lam' _ -> e -- optimization is shallow - don't descend into lambdas - _ -> ABT.tm' (ann e) (go <$> f) - go e@(ABT.out -> ABT.Cycle body) = ABT.cycle' (ann e) (go body) - go e@(ABT.out -> ABT.Abs v body) = ABT.abs' (ann e) v (go body) - go e = e - - -- test for whether an expression `let x = y in body` can be - -- reduced by substituting `y` into `body`. We only substitute - -- when `y` is a variable or a primitive, otherwise this might - -- end up duplicating evaluation or changing the order that - -- effects are evaluated - canSubstLet expr _body - | isLeaf expr = True - -- todo: if number of occurrences of the binding is 1 and the - -- binding is pure, okay to substitute - | otherwise = False - -isLeaf :: ABT.Term (F typeVar typeAnn patternAnn) v a -> Bool -isLeaf (Var' _) = True -isLeaf (Int' _) = True -isLeaf (Float' _) = True -isLeaf (Nat' _) = True -isLeaf (Text' _) = True -isLeaf (Boolean' _) = True -isLeaf (Constructor' _ _) = True -isLeaf (TermLink' _) = True -isLeaf (TypeLink' _) = True -isLeaf _ = False - -minimizeCyclesOrCrash :: Var v => Term v a -> Term v a -minimizeCyclesOrCrash t = case minimize' t of - Right t -> t - Left e -> error $ "tried to minimize let rec with duplicate definitions: " - ++ show (fst <$> toList e) - -fromTerm' :: (Monoid a, Var v) => (v -> v) -> Term v a -> Term v a -fromTerm' liftVar t = term (fromTerm liftVar t) - -fromTerm :: forall a v . (Monoid a, Var v) => (v -> v) -> Term v a -> ANF v a -fromTerm liftVar t = ANF_ (go $ lambdaLift liftVar t) where - ann = ABT.annotation - isRef (Ref' _) = True - isRef _ = False - fixup :: Set v -- if we gotta create new vars, avoid using these - -> ([Term v a] -> Term v a) -- do this with ANF'd args - -> [Term v a] -- the args (not all in ANF already) - -> Term v a -- the ANF'd term - fixup used f args = let - args' = Map.fromList $ toVar =<< (args `zip` [0..]) - toVar (b, i) | isLeaf b = [] - | otherwise = [(i, Var.freshIn used (Var.named . Text.pack $ "arg" ++ show i))] - argsANF = map toANF (args `zip` [0..]) - toANF (b,i) = maybe b (var (ann b)) $ Map.lookup i args' - addLet (b,i) body = maybe body (\v -> let1' False [(v,go b)] body) (Map.lookup i args') - in foldr addLet (f argsANF) (args `zip` [(0::Int)..]) - go :: Term v a -> Term v a - go e@(Apps' f args) - | (isRef f || isLeaf f) && all isLeaf args = e - | not (isRef f || isLeaf f) = - let f' = ABT.fresh e (Var.named "f") - in let1' False [(f', go f)] (go $ apps' (var (ann f) f') args) - | otherwise = fixup (ABT.freeVars e) (apps' f) args - go e@(Handle' h body) - | isLeaf h = handle (ann e) h (go body) - | otherwise = let h' = ABT.fresh e (Var.named "handler") - in let1' False [(h', go h)] (handle (ann e) (var (ann h) h') (go body)) - go e@(If' cond t f) - | isLeaf cond = iff (ann e) cond (go t) (go f) - | otherwise = let cond' = ABT.fresh e (Var.named "cond") - in let1' False [(cond', go cond)] (iff (ann e) (var (ann cond) cond') (go t) (go f)) - go e@(Match' scrutinee cases) - | isLeaf scrutinee = match (ann e) scrutinee (fmap go <$> cases) - | otherwise = let scrutinee' = ABT.fresh e (Var.named "scrutinee") - in let1' False [(scrutinee', go scrutinee)] - (match (ann e) - (var (ann scrutinee) scrutinee') - (fmap go <$> cases)) - -- MatchCase RHS, shouldn't conflict with LetRec - go (ABT.Abs1NA' avs t) = ABT.absChain' avs (go t) - go e@(And' x y) - | isLeaf x = and (ann e) x (go y) - | otherwise = - let x' = ABT.fresh e (Var.named "argX") - in let1' False [(x', go x)] (and (ann e) (var (ann x) x') (go y)) - go e@(Or' x y) - | isLeaf x = or (ann e) x (go y) - | otherwise = - let x' = ABT.fresh e (Var.named "argX") - in let1' False [(x', go x)] (or (ann e) (var (ann x) x') (go y)) - go e@(Var' _) = e - go e@(Int' _) = e - go e@(Nat' _) = e - go e@(Float' _) = e - go e@(Boolean' _) = e - go e@(Text' _) = e - go e@(Char' _) = e - go e@(Blank' _) = e - go e@(Ref' _) = e - go e@(TermLink' _) = e - go e@(TypeLink' _) = e - go e@(RequestOrCtor' _ _) = e - go e@(Lam' _) = e -- ANF conversion is shallow - - -- don't descend into closed lambdas - go (Let1Named' v b e) = let1' False [(v, go b)] (go e) - -- top = False because we don't care to emit typechecker notes about TLDs - go (LetRecNamed' bs e) = letRec' False (fmap (second go) bs) (go e) - go e@(Sequence' vs) = - if all isLeaf vs then e - else fixup (ABT.freeVars e) (seq (ann e)) (toList vs) - go e@(Ann' tm typ) = Term.ann (ann e) (go tm) typ - go e = error $ "ANF.term: I thought we got all of these\n" <> show e - -data Mem = UN | BX deriving (Eq,Ord,Show,Enum) - --- Context entries with evaluation strategy -data CTE v s - = ST [v] [Mem] s - | LZ v (Either Word64 v) [v] - deriving (Show) - -pattern ST1 v m s = ST [v] [m] s - -data ANormalBF v e - = ALet [Mem] (ANormalTF v e) e - | AName (Either Word64 v) [v] e - | ATm (ANormalTF v e) - deriving (Show) - -data ANormalTF v e - = ALit Lit - | AMatch v (Branched e) - | AShift RTag e - | AHnd [RTag] v e - | AApp (Func v) [v] - | AFrc v - | AVar v - deriving (Show) - --- Types representing components that will go into the runtime tag of --- a data type value. RTags correspond to references, while CTags --- correspond to constructors. -newtype RTag = RTag Word64 deriving (Eq,Ord,Show,Read,EC.EnumKey) -newtype CTag = CTag Word16 deriving (Eq,Ord,Show,Read,EC.EnumKey) - -class Tag t where rawTag :: t -> Word64 -instance Tag RTag where rawTag (RTag w) = w -instance Tag CTag where rawTag (CTag w) = fromIntegral w - -packTags :: RTag -> CTag -> Word64 -packTags (RTag rt) (CTag ct) = ri .|. ci - where - ri = rt `shiftL` 16 - ci = fromIntegral ct - -unpackTags :: Word64 -> (RTag, CTag) -unpackTags w = (RTag $ w `shiftR` 16, CTag . fromIntegral $ w .&. 0xFFFF) - -ensureRTag :: (Ord n, Show n, Num n) => String -> n -> r -> r -ensureRTag s n x - | n > 0xFFFFFFFFFFFF = error $ s ++ "@RTag: too large: " ++ show n - | otherwise = x - -ensureCTag :: (Ord n, Show n, Num n) => String -> n -> r -> r -ensureCTag s n x - | n > 0xFFFF = error $ s ++ "@CTag: too large: " ++ show n - | otherwise = x - -instance Enum RTag where - toEnum i = ensureRTag "toEnum" i . RTag $ toEnum i - fromEnum (RTag w) = fromEnum w - -instance Enum CTag where - toEnum i = ensureCTag "toEnum" i . CTag $ toEnum i - fromEnum (CTag w) = fromEnum w - -instance Num RTag where - fromInteger i = ensureRTag "fromInteger" i . RTag $ fromInteger i - (+) = error "RTag: +" - (*) = error "RTag: *" - abs = error "RTag: abs" - signum = error "RTag: signum" - negate = error "RTag: negate" - -instance Num CTag where - fromInteger i = ensureCTag "fromInteger" i . CTag $ fromInteger i - (+) = error "CTag: +" - (*) = error "CTag: *" - abs = error "CTag: abs" - signum = error "CTag: signum" - negate = error "CTag: negate" - -instance Functor (ANormalBF v) where - fmap f (ALet m bn bo) = ALet m (f <$> bn) $ f bo - fmap f (AName n as bo) = AName n as $ f bo - fmap f (ATm tm) = ATm $ f <$> tm - -instance Bifunctor ANormalBF where - bimap f g (ALet m bn bo) = ALet m (bimap f g bn) $ g bo - bimap f g (AName n as bo) = AName (f <$> n) (f <$> as) $ g bo - bimap f g (ATm tm) = ATm (bimap f g tm) - -instance Bifoldable ANormalBF where - bifoldMap f g (ALet _ b e) = bifoldMap f g b <> g e - bifoldMap f g (AName n as e) = foldMap f n <> foldMap f as <> g e - bifoldMap f g (ATm e) = bifoldMap f g e - -instance Functor (ANormalTF v) where - fmap _ (AVar v) = AVar v - fmap _ (ALit l) = ALit l - fmap f (AMatch v br) = AMatch v $ f <$> br - fmap f (AHnd rs h e) = AHnd rs h $ f e - fmap f (AShift i e) = AShift i $ f e - fmap _ (AFrc v) = AFrc v - fmap _ (AApp f args) = AApp f args - -instance Bifunctor ANormalTF where - bimap f _ (AVar v) = AVar (f v) - bimap _ _ (ALit l) = ALit l - bimap f g (AMatch v br) = AMatch (f v) $ fmap g br - bimap f g (AHnd rs v e) = AHnd rs (f v) $ g e - bimap _ g (AShift i e) = AShift i $ g e - bimap f _ (AFrc v) = AFrc (f v) - bimap f _ (AApp fu args) = AApp (fmap f fu) $ fmap f args - -instance Bifoldable ANormalTF where - bifoldMap f _ (AVar v) = f v - bifoldMap _ _ (ALit _) = mempty - bifoldMap f g (AMatch v br) = f v <> foldMap g br - bifoldMap f g (AHnd _ h e) = f h <> g e - bifoldMap _ g (AShift _ e) = g e - bifoldMap f _ (AFrc v) = f v - bifoldMap f _ (AApp func args) = foldMap f func <> foldMap f args - -matchLit :: Term v a -> Maybe Lit -matchLit (Int' i) = Just $ I i -matchLit (Nat' n) = Just $ N n -matchLit (Float' f) = Just $ F f -matchLit (Text' t) = Just $ T t -matchLit (Char' c) = Just $ C c -matchLit _ = Nothing - -pattern Lit' l <- (matchLit -> Just l) -pattern TLet v m bn bo = ABTN.TTm (ALet [m] bn (ABTN.TAbs v bo)) -pattern TLets vs ms bn bo = ABTN.TTm (ALet ms bn (ABTN.TAbss vs bo)) -pattern TName v f as bo = ABTN.TTm (AName f as (ABTN.TAbs v bo)) -pattern TTm e = ABTN.TTm (ATm e) -{-# complete TLets, TName, TTm #-} - -pattern TLit l = TTm (ALit l) - -pattern TApp f args = TTm (AApp f args) -pattern AApv v args = AApp (FVar v) args -pattern TApv v args = TApp (FVar v) args -pattern ACom r args = AApp (FComb r) args -pattern TCom r args = TApp (FComb r) args -pattern ACon r t args = AApp (FCon r t) args -pattern TCon r t args = TApp (FCon r t) args -pattern AKon v args = AApp (FCont v) args -pattern TKon v args = TApp (FCont v) args -pattern AReq r t args = AApp (FReq r t) args -pattern TReq r t args = TApp (FReq r t) args -pattern APrm p args = AApp (FPrim (Left p)) args -pattern TPrm p args = TApp (FPrim (Left p)) args -pattern AIOp p args = AApp (FPrim (Right p)) args -pattern TIOp p args = TApp (FPrim (Right p)) args - -pattern THnd rs h b = TTm (AHnd rs h b) -pattern TShift i v e = TTm (AShift i (ABTN.TAbs v e)) -pattern TMatch v cs = TTm (AMatch v cs) -pattern TFrc v = TTm (AFrc v) -pattern TVar v = TTm (AVar v) - -{-# complete - TLet, TName, TVar, TApp, TFrc, TLit, THnd, TShift, TMatch - #-} -{-# complete - TLet, TName, - TVar, TFrc, - TApv, TCom, TCon, TKon, TReq, TPrm, TIOp, - TLit, THnd, TShift, TMatch - #-} - -bind :: Var v => Cte v -> ANormal v -> ANormal v -bind (ST us ms bu) = TLets us ms bu -bind (LZ u f as) = TName u f as - -unbind :: Var v => ANormal v -> Maybe (Cte v, ANormal v) -unbind (TLets us ms bu bd) = Just (ST us ms bu, bd) -unbind (TName u f as bd) = Just (LZ u f as, bd) -unbind _ = Nothing - -unbinds :: Var v => ANormal v -> (Ctx v, ANormal v) -unbinds (TLets us ms bu (unbinds -> (ctx, bd))) = (ST us ms bu:ctx, bd) -unbinds (TName u f as (unbinds -> (ctx, bd))) = (LZ u f as:ctx, bd) -unbinds tm = ([], tm) - -unbinds' :: Var v => ANormal v -> (Ctx v, ANormalT v) -unbinds' (TLets us ms bu (unbinds' -> (ctx, bd))) = (ST us ms bu:ctx, bd) -unbinds' (TName u f as (unbinds' -> (ctx, bd))) = (LZ u f as:ctx, bd) -unbinds' (TTm tm) = ([], tm) - -pattern TBind bn bd <- (unbind -> Just (bn, bd)) - where TBind bn bd = bind bn bd - -pattern TBinds :: Var v => Ctx v -> ANormal v -> ANormal v -pattern TBinds ctx bd <- (unbinds -> (ctx, bd)) - where TBinds ctx bd = foldr bind bd ctx - -pattern TBinds' :: Var v => Ctx v -> ANormalT v -> ANormal v -pattern TBinds' ctx bd <- (unbinds' -> (ctx, bd)) - where TBinds' ctx bd = foldr bind (TTm bd) ctx - -{-# complete TBinds' #-} - -data SeqEnd = SLeft | SRight - deriving (Eq, Ord, Enum, Show) - -data Branched e - = MatchIntegral (EnumMap Word64 e) (Maybe e) - | MatchText (Map.Map Text e) (Maybe e) - | MatchRequest (EnumMap RTag (EnumMap CTag ([Mem], e))) e - | MatchEmpty - | MatchData Reference (EnumMap CTag ([Mem], e)) (Maybe e) - | MatchSum (EnumMap Word64 ([Mem], e)) - deriving (Show, Functor, Foldable, Traversable) - -data BranchAccum v - = AccumEmpty - | AccumIntegral - Reference - (Maybe (ANormal v)) - (EnumMap Word64 (ANormal v)) - | AccumText - (Maybe (ANormal v)) - (Map.Map Text (ANormal v)) - | AccumDefault (ANormal v) - | AccumRequest - (EnumMap RTag (EnumMap CTag ([Mem],ANormal v))) - (Maybe (ANormal v)) - | AccumData - Reference - (Maybe (ANormal v)) - (EnumMap CTag ([Mem],ANormal v)) - | AccumSeqEmpty (ANormal v) - | AccumSeqView - SeqEnd - (Maybe (ANormal v)) -- empty - (ANormal v) -- cons/snoc - | AccumSeqSplit - SeqEnd - Int -- split at - (Maybe (ANormal v)) -- default - (ANormal v) -- split - -instance Semigroup (BranchAccum v) where - AccumEmpty <> r = r - l <> AccumEmpty = l - AccumIntegral rl dl cl <> AccumIntegral rr dr cr - | rl == rr = AccumIntegral rl (dl <|> dr) $ cl <> cr - AccumText dl cl <> AccumText dr cr - = AccumText (dl <|> dr) (cl <> cr) - AccumData rl dl cl <> AccumData rr dr cr - | rl == rr = AccumData rl (dl <|> dr) (cl <> cr) - AccumDefault dl <> AccumIntegral r _ cr - = AccumIntegral r (Just dl) cr - AccumDefault dl <> AccumText _ cr - = AccumText (Just dl) cr - AccumDefault dl <> AccumData rr _ cr - = AccumData rr (Just dl) cr - AccumIntegral r dl cl <> AccumDefault dr - = AccumIntegral r (dl <|> Just dr) cl - AccumText dl cl <> AccumDefault dr - = AccumText (dl <|> Just dr) cl - AccumData rl dl cl <> AccumDefault dr - = AccumData rl (dl <|> Just dr) cl - AccumRequest hl dl <> AccumRequest hr dr - = AccumRequest hm $ dl <|> dr - where - hm = EC.unionWith (<>) hl hr - l@(AccumSeqEmpty _) <> AccumSeqEmpty _ = l - AccumSeqEmpty eml <> AccumSeqView er _ cnr - = AccumSeqView er (Just eml) cnr - AccumSeqView el eml cnl <> AccumSeqEmpty emr - = AccumSeqView el (eml <|> Just emr) cnl - AccumSeqView el eml cnl <> AccumSeqView er emr _ - | el /= er - = error "AccumSeqView: trying to merge views of opposite ends" - | otherwise = AccumSeqView el (eml <|> emr) cnl - AccumSeqView _ _ _ <> AccumDefault _ - = error "seq views may not have defaults" - AccumDefault _ <> AccumSeqView _ _ _ - = error "seq views may not have defaults" - AccumSeqSplit el nl dl bl <> AccumSeqSplit er nr dr _ - | el /= er - = error "AccumSeqSplit: trying to merge splits at opposite ends" - | nl /= nr - = error - "AccumSeqSplit: trying to merge splits at different positions" - | otherwise - = AccumSeqSplit el nl (dl <|> dr) bl - AccumDefault dl <> AccumSeqSplit er nr _ br - = AccumSeqSplit er nr (Just dl) br - AccumSeqSplit el nl dl bl <> AccumDefault dr - = AccumSeqSplit el nl (dl <|> Just dr) bl - _ <> _ = error $ "cannot merge data cases for different types" - -instance Monoid (BranchAccum e) where - mempty = AccumEmpty - -data Func v - -- variable - = FVar v - -- top-level combinator - | FComb !Word64 - -- continuation jump - | FCont v - -- data constructor - | FCon !RTag !CTag - -- ability request - | FReq !RTag !CTag - -- prim op - | FPrim (Either POp IOp) - deriving (Show, Functor, Foldable, Traversable) - -data Lit - = I Int64 - | N Word64 - | F Double - | T Text - | C Char - | LM Referent - | LY Reference - deriving (Show) - -litRef :: Lit -> Reference -litRef (I _) = Ty.intRef -litRef (N _) = Ty.natRef -litRef (F _) = Ty.floatRef -litRef (T _) = Ty.textRef -litRef (C _) = Ty.charRef -litRef (LM _) = Ty.termLinkRef -litRef (LY _) = Ty.typeLinkRef - -data POp - -- Int - = ADDI | SUBI | MULI | DIVI -- +,-,*,/ - | SGNI | NEGI | MODI -- sgn,neg,mod - | POWI | SHLI | SHRI -- pow,shiftl,shiftr - | INCI | DECI | LEQI | EQLI -- inc,dec,<=,== - -- Nat - | ADDN | SUBN | MULN | DIVN -- +,-,*,/ - | MODN | TZRO | LZRO -- mod,trailing/leadingZeros - | POWN | SHLN | SHRN -- pow,shiftl,shiftr - | ANDN | IORN | XORN | COMN -- and,or,xor,complement - | INCN | DECN | LEQN | EQLN -- inc,dec,<=,== - -- Float - | ADDF | SUBF | MULF | DIVF -- +,-,*,/ - | MINF | MAXF | LEQF | EQLF -- min,max,<=,== - | POWF | EXPF | SQRT | LOGF -- pow,exp,sqrt,log - | LOGB -- logBase - | ABSF | CEIL | FLOR | TRNF -- abs,ceil,floor,truncate - | RNDF -- round - -- Trig - | COSF | ACOS | COSH | ACSH -- cos,acos,cosh,acosh - | SINF | ASIN | SINH | ASNH -- sin,asin,sinh,asinh - | TANF | ATAN | TANH | ATNH -- tan,atan,tanh,atanh - | ATN2 -- atan2 - -- Text - | CATT | TAKT | DRPT | SIZT -- ++,take,drop,size - | UCNS | USNC | EQLT | LEQT -- uncons,unsnoc,==,<= - | PAKT | UPKT -- pack,unpack - -- Sequence - | CATS | TAKS | DRPS | SIZS -- ++,take,drop,size - | CONS | SNOC | IDXS | BLDS -- cons,snoc,at,build - | VWLS | VWRS | SPLL | SPLR -- viewl,viewr,splitl,splitr - -- Bytes - | PAKB | UPKB | TAKB | DRPB -- pack,unpack,take,drop - | IDXB | SIZB | FLTB | CATB -- index,size,flatten,append - -- Conversion - | ITOF | NTOF | ITOT | NTOT - | TTOI | TTON | TTOF | FTOT - -- Concurrency - | FORK - -- Universal operations - | EQLU | CMPU | EROR - -- Debug - | PRNT | INFO - deriving (Show,Eq,Ord) - -data IOp - = OPENFI | CLOSFI | ISFEOF | ISFOPN - | ISSEEK | SEEKFI | POSITN | STDHND - | GBUFFR | SBUFFR - | GTLINE | GTTEXT | PUTEXT - | SYTIME | GTMPDR | GCURDR | SCURDR - | DCNTNS | FEXIST | ISFDIR - | CRTDIR | REMDIR | RENDIR - | REMOFI | RENAFI | GFTIME | GFSIZE - | SRVSCK | LISTEN | CLISCK | CLOSCK - | SKACPT | SKSEND | SKRECV - | THKILL | THDELY - deriving (Show,Eq,Ord) - -type ANormal = ABTN.Term ANormalBF -type ANormalT v = ANormalTF v (ANormal v) - -type Cte v = CTE v (ANormalT v) -type Ctx v = [Cte v] - --- Should be a completely closed term -data SuperNormal v - = Lambda { conventions :: [Mem], bound :: ANormal v } - deriving (Show) -data SuperGroup v - = Rec - { group :: [(v, SuperNormal v)] - , entry :: SuperNormal v - } deriving (Show) - -type ANFM v - = ReaderT (Set v, Reference -> Word64, Reference -> RTag) - (State (Word64, [(v, SuperNormal v)])) - -resolveTerm :: Reference -> ANFM v Word64 -resolveTerm r = asks $ \(_, rtm, _) -> rtm r - -resolveType :: Reference -> ANFM v RTag -resolveType r = asks $ \(_, _, rty) -> rty r - -groupVars :: ANFM v (Set v) -groupVars = asks $ \(grp, _, _) -> grp - -bindLocal :: Ord v => [v] -> ANFM v r -> ANFM v r -bindLocal vs - = local $ \(gr, rw, rt) -> (gr Set.\\ Set.fromList vs, rw, rt) - -freshANF :: Var v => Word64 -> v -freshANF fr = Var.freshenId fr $ typed Var.ANFBlank - -fresh :: Var v => ANFM v v -fresh = state $ \(fr, cs) -> (freshANF fr, (fr+1, cs)) - -contextualize :: Var v => ANormalT v -> ANFM v (Ctx v, v) -contextualize (AVar cv) = do - gvs <- groupVars - if cv `Set.notMember` gvs - then pure ([], cv) - else do fresh <&> \bv -> ([ST1 bv BX $ AApv cv []], bv) -contextualize tm = fresh <&> \fv -> ([ST1 fv BX tm], fv) - -record :: Var v => (v, SuperNormal v) -> ANFM v () -record p = modify $ \(fr, to) -> (fr, p:to) - -superNormalize - :: Var v - => (Reference -> Word64) - -> (Reference -> RTag) - -> Term v a - -> SuperGroup v -superNormalize rtm rty tm = Rec l c - where - (bs, e) | LetRecNamed' bs e <- tm = (bs, e) - | otherwise = ([], tm) - grp = Set.fromList $ fst <$> bs - comp = traverse_ superBinding bs *> toSuperNormal e - subc = runReaderT comp (grp, rtm, rty) - (c, (_,l)) = runState subc (0, []) - -superBinding :: Var v => (v, Term v a) -> ANFM v () -superBinding (v, tm) = do - nf <- toSuperNormal tm - modify $ \(cvs, ctx) -> (cvs, (v,nf):ctx) - -toSuperNormal :: Var v => Term v a -> ANFM v (SuperNormal v) -toSuperNormal tm = do - grp <- groupVars - if not . Set.null . (Set.\\ grp) $ freeVars tm - then error $ "free variables in supercombinator: " ++ show tm - else Lambda (BX<$vs) . ABTN.TAbss vs <$> bindLocal vs (anfTerm body) - where - (vs, body) = fromMaybe ([], tm) $ unLams' tm - -anfTerm :: Var v => Term v a -> ANFM v (ANormal v) -anfTerm tm = uncurry TBinds' <$> anfBlock tm - -floatableCtx :: Ctx v -> Bool -floatableCtx = all p - where - p (LZ _ _ _) = True - p (ST _ _ tm) = q tm - q (ALit _) = True - q (AVar _) = True - q (ACon _ _ _) = True - q _ = False - -anfBlock :: Var v => Term v a -> ANFM v (Ctx v, ANormalT v) -anfBlock (Var' v) = pure ([], AVar v) -anfBlock (If' c t f) = do - (cctx, cc) <- anfBlock c - cf <- anfTerm f - ct <- anfTerm t - (cx, v) <- contextualize cc - let cases = MatchData - (Builtin $ Text.pack "Boolean") - (EC.mapSingleton 0 ([], cf)) - (Just ct) - pure (cctx ++ cx, AMatch v cases) -anfBlock (And' l r) = do - (lctx, vl) <- anfArg l - (rctx, vr) <- anfArg r - i <- resolveTerm $ Builtin "Boolean.and" - pure (lctx ++ rctx, ACom i [vl, vr]) -anfBlock (Or' l r) = do - (lctx, vl) <- anfArg l - (rctx, vr) <- anfArg r - i <- resolveTerm $ Builtin "Boolean.or" - pure (lctx ++ rctx, ACom i [vl, vr]) -anfBlock (Handle' h body) - = anfArg h >>= \(hctx, vh) -> - anfBlock body >>= \case - (ctx, ACom f as) | floatableCtx ctx -> do - v <- fresh - pure (hctx ++ ctx ++ [LZ v (Left f) as], AApp (FVar vh) [v]) - (ctx, AApv f as) | floatableCtx ctx -> do - v <- fresh - pure (hctx ++ ctx ++ [LZ v (Right f) as], AApp (FVar vh) [v]) - (ctx, AVar v) | floatableCtx ctx -> do - pure (hctx ++ ctx, AApp (FVar vh) [v]) - p@(_, _) -> - error $ "handle body should be a simple call: " ++ show p -anfBlock (Match' scrut cas) = do - (sctx, sc) <- anfBlock scrut - (cx, v) <- contextualize sc - brn <- anfCases v cas - case brn of - AccumDefault (TBinds' dctx df) -> do - pure (sctx ++ cx ++ dctx, df) - AccumRequest _ Nothing -> - error "anfBlock: AccumRequest without default" - AccumRequest abr (Just df) -> do - (r, vs) <- do - r <- fresh - v <- fresh - gvs <- groupVars - let hfb = ABTN.TAbs v . TMatch v $ MatchRequest abr df - hfvs = Set.toList $ ABTN.freeVars hfb `Set.difference` gvs - record (r, Lambda (BX <$ hfvs ++ [v]) . ABTN.TAbss hfvs $ hfb) - pure (r, hfvs) - hv <- fresh - let msc | [ST1 _ BX tm] <- cx = tm - | [ST _ _ _] <- cx = error "anfBlock: impossible" - | otherwise = AFrc v - pure ( sctx ++ [LZ hv (Right r) vs] - , AHnd (EC.keys abr) hv . TTm $ msc - ) - AccumText df cs -> - pure (sctx ++ cx, AMatch v $ MatchText cs df) - AccumIntegral r df cs -> do - i <- fresh - let dcs = MatchData r - (EC.mapSingleton 0 ([UN], ABTN.TAbss [i] ics)) - Nothing - ics = TMatch i $ MatchIntegral cs df - pure (sctx ++ cx, AMatch v dcs) - AccumData r df cs -> - pure (sctx ++ cx, AMatch v $ MatchData r cs df) - AccumSeqEmpty _ -> - error "anfBlock: non-exhaustive AccumSeqEmpty" - AccumSeqView en (Just em) bd -> do - r <- fresh - op <- case en of - SLeft -> resolveTerm $ Builtin "List.viewl" - _ -> resolveTerm $ Builtin "List.viewr" - pure ( sctx ++ cx ++ [ST1 r BX (ACom op [v])] - , AMatch r - $ MatchData Ty.seqViewRef - (EC.mapFromList - [ (0, ([], em)) - , (1, ([BX,BX], bd)) - ] - ) - Nothing - ) - AccumSeqView {} -> - error "anfBlock: non-exhaustive AccumSeqView" - AccumSeqSplit en n mdf bd -> do - i <- fresh - r <- fresh - t <- fresh - pure ( sctx ++ cx ++ [lit i, split i r] - , AMatch r . MatchSum $ mapFromList - [ (0, ([], df t)) - , (1, ([BX,BX], bd)) - ]) - where - op | SLeft <- en = SPLL - | otherwise = SPLR - lit i = ST1 i UN (ALit . N $ fromIntegral n) - split i r = ST1 r UN (APrm op [i,v]) - df t - = fromMaybe - ( TLet t BX (ALit (T "non-exhaustive split")) - $ TPrm EROR [t]) - mdf - AccumEmpty -> pure (sctx ++ cx, AMatch v MatchEmpty) -anfBlock (Let1Named' v b e) - = anfBlock b >>= \(bctx, cb) -> bindLocal [v] $ do - (ectx, ce) <- anfBlock e - pure (bctx ++ ST1 v BX cb : ectx, ce) -anfBlock (Apps' f args) = do - (fctx, cf) <- anfFunc f - (actx, cas) <- anfArgs args - pure (fctx ++ actx, AApp cf cas) -anfBlock (Constructor' r t) - = resolveType r <&> \rt -> ([], ACon rt (toEnum t) []) -anfBlock (Request' r t) = do - r <- resolveType r - pure ([], AReq r (toEnum t) []) -anfBlock (Boolean' b) = - resolveType Ty.booleanRef <&> \rt -> - ([], ACon rt (if b then 1 else 0) []) -anfBlock (Lit' l@(T _)) = - pure ([], ALit l) -anfBlock (Lit' l) = do - lv <- fresh - rt <- resolveType $ litRef l - pure ([ST1 lv UN $ ALit l], ACon rt 0 [lv]) -anfBlock (Ref' r) = - resolveTerm r <&> \n -> ([], ACom n []) -anfBlock (Blank' _) = do - ev <- fresh - pure ([ST1 ev BX (ALit (T "Blank"))], APrm EROR [ev]) -anfBlock (TermLink' r) = pure ([], ALit (LM r)) -anfBlock (TypeLink' r) = pure ([], ALit (LY r)) -anfBlock (Sequence' as) = fmap (APrm BLDS) <$> anfArgs tms - where - tms = toList as -anfBlock t = error $ "anf: unhandled term: " ++ show t - --- Note: this assumes that patterns have already been translated --- to a state in which every case matches a single layer of data, --- with no guards, and no variables ignored. This is not checked --- completely. -anfInitCase - :: Var v - => v - -> MatchCase p (Term v a) - -> ANFM v (BranchAccum v) -anfInitCase u (MatchCase p guard (ABT.AbsN' vs bd)) - | Just _ <- guard = error "anfInitCase: unexpected guard" - | P.Unbound _ <- p - , [] <- vs - = AccumDefault <$> anfBody bd - | P.Var _ <- p - , [v] <- vs - = AccumDefault . ABTN.rename v u <$> anfBody bd - | P.Var _ <- p - = error $ "vars: " ++ show (length vs) - | P.Int _ (fromIntegral -> i) <- p - = AccumIntegral Ty.intRef Nothing . EC.mapSingleton i <$> anfBody bd - | P.Nat _ i <- p - = AccumIntegral Ty.natRef Nothing . EC.mapSingleton i <$> anfBody bd - | P.Boolean _ b <- p - , t <- if b then 1 else 0 - = AccumData Ty.booleanRef Nothing - . EC.mapSingleton t . ([],) <$> anfBody bd - | P.Text _ t <- p - , [] <- vs - = AccumText Nothing . Map.singleton t <$> anfBody bd - | P.Constructor _ r t ps <- p = do - us <- expandBindings ps vs - AccumData r Nothing - . EC.mapSingleton (toEnum t) - . (BX<$us,) - . ABTN.TAbss us - <$> anfBody bd - | P.EffectPure _ q <- p = do - us <- expandBindings [q] vs - AccumRequest mempty . Just . ABTN.TAbss us <$> anfBody bd - | P.EffectBind _ r t ps pk <- p = do - exp <- expandBindings (snoc ps pk) vs - let (us, uk) - = maybe (error "anfInitCase: unsnoc impossible") id - $ unsnoc exp - n <- resolveType r - jn <- resolveTerm $ Builtin "jumpCont" - kf <- fresh - flip AccumRequest Nothing - . EC.mapSingleton n - . EC.mapSingleton (toEnum t) - . (BX<$us,) - . ABTN.TAbss us - . TShift n kf - . TName uk (Left jn) [kf] - <$> anfBody bd - | P.SequenceLiteral _ [] <- p - = AccumSeqEmpty <$> anfBody bd - | P.SequenceOp _ l op r <- p - , Concat <- op - , P.SequenceLiteral p ll <- l = do - us <- expandBindings [P.Var p, r] vs - AccumSeqSplit SLeft (length ll) Nothing - . ABTN.TAbss us - <$> anfBody bd - | P.SequenceOp _ l op r <- p - , Concat <- op - , P.SequenceLiteral p rl <- r = do - us <- expandBindings [l, P.Var p] vs - AccumSeqSplit SLeft (length rl) Nothing - . ABTN.TAbss us - <$> anfBody bd - | P.SequenceOp _ l op r <- p = do - us <- expandBindings [l,r] vs - let dir = case op of Cons -> SLeft ; _ -> SRight - AccumSeqView dir Nothing . ABTN.TAbss us <$> anfBody bd - where - anfBody tm = bindLocal vs $ anfTerm tm -anfInitCase _ (MatchCase p _ _) - = error $ "anfInitCase: unexpected pattern: " ++ show p - -expandBindings' - :: Var v - => Word64 - -> [P.Pattern p] - -> [v] - -> Either String (Word64, [v]) -expandBindings' fr [] [] = Right (fr, []) -expandBindings' fr (P.Unbound _:ps) vs - = fmap (u :) <$> expandBindings' (fr+1) ps vs - where u = freshANF fr -expandBindings' fr (P.Var _:ps) (v:vs) - = fmap (v :) <$> expandBindings' fr ps vs -expandBindings' _ [] (_:_) - = Left "expandBindings': more bindings than expected" -expandBindings' _ (_:_) [] - = Left "expandBindings': more patterns than expected" -expandBindings' _ _ _ - = Left $ "expandBindings': unexpected pattern" - -expandBindings :: Var v => [P.Pattern p] -> [v] -> ANFM v [v] -expandBindings ps vs - = state $ \(fr,co) -> case expandBindings' fr ps vs of - Left err -> error $ err ++ " " ++ show (ps, vs) - Right (fr,l) -> (l, (fr,co)) - -anfCases - :: Var v - => v - -> [MatchCase p (Term v a)] - -> ANFM v (BranchAccum v) -anfCases u = fmap fold . traverse (anfInitCase u) - -anfFunc :: Var v => Term v a -> ANFM v (Ctx v, Func v) -anfFunc (Var' v) = pure ([], FVar v) -anfFunc (Ref' r) - = resolveTerm r <&> \n -> ([], FComb n) -anfFunc (Constructor' r t) - = resolveType r <&> \rt -> ([], FCon rt $ toEnum t) -anfFunc (Request' r t) - = resolveType r <&> \rt -> ([], FReq rt $ toEnum t) -anfFunc tm = do - (fctx, ctm) <- anfBlock tm - (cx, v) <- contextualize ctm - pure (fctx ++ cx, FVar v) - -anfArg :: Var v => Term v a -> ANFM v (Ctx v, v) -anfArg tm = do - (ctx, ctm) <- anfBlock tm - (cx, v) <- contextualize ctm - pure (ctx ++ cx, v) - -anfArgs :: Var v => [Term v a] -> ANFM v (Ctx v, [v]) -anfArgs tms = first concat . unzip <$> traverse anfArg tms - -sink :: Var v => v -> Mem -> ANormalT v -> ANormal v -> ANormal v -sink v mtm tm = dive $ freeVarsT tm - where - frsh l r = Var.freshIn (l <> r) $ Var.typed Var.ANFBlank - - directVars = bifoldMap Set.singleton (const mempty) - freeVarsT = bifoldMap Set.singleton ABTN.freeVars - - dive _ exp | v `Set.notMember` ABTN.freeVars exp = exp - dive avoid exp@(TName u f as bo) - | v `elem` as - = let w = frsh avoid (ABTN.freeVars exp) - in TLet w mtm tm $ ABTN.rename v w exp - | otherwise - = TName u f as (dive avoid' bo) - where avoid' = Set.insert u avoid - dive avoid exp@(TLets us ms bn bo) - | v `Set.member` directVars bn -- we need to stop here - = let w = frsh avoid (ABTN.freeVars exp) - in TLet w mtm tm $ ABTN.rename v w exp - | otherwise - = TLets us ms bn' $ dive avoid' bo - where - avoid' = Set.fromList us <> avoid - bn' | v `Set.notMember` freeVarsT bn = bn - | otherwise = dive avoid' <$> bn - dive avoid exp@(TTm tm) - | v `Set.member` directVars tm -- same as above - = let w = frsh avoid (ABTN.freeVars exp) - in TLet w mtm tm $ ABTN.rename v w exp - | otherwise = TTm $ dive avoid <$> tm - -indent :: Int -> ShowS -indent ind = showString (replicate (ind*2) ' ') - -prettyGroup :: Var v => SuperGroup v -> ShowS -prettyGroup (Rec grp ent) - = showString "let rec\n" - . foldr f id grp - . showString "entry" - . prettySuperNormal 1 ent - where - f (v,sn) r = indent 1 . pvar v - . prettySuperNormal 2 sn . showString "\n" . r - -pvar :: Var v => v -> ShowS -pvar v = showString . Text.unpack $ Var.name v - -prettyVars :: Var v => [v] -> ShowS -prettyVars - = foldr (\v r -> showString " " . pvar v . r) id - -prettyLVars :: Var v => [Mem] -> [v] -> ShowS -prettyLVars [] [] = showString " " -prettyLVars (c:cs) (v:vs) - = showString " " - . showParen True (pvar v . showString ":" . shows c) - . prettyLVars cs vs - -prettyLVars [] (_:_) = error "more variables than conventions" -prettyLVars (_:_) [] = error "more conventions than variables" - -prettyRBind :: Var v => [v] -> ShowS -prettyRBind [] = showString "()" -prettyRBind [v] = pvar v -prettyRBind (v:vs) - = showParen True - $ pvar v . foldr (\v r -> shows v . showString "," . r) id vs - -prettySuperNormal :: Var v => Int -> SuperNormal v -> ShowS -prettySuperNormal ind (Lambda ccs (ABTN.TAbss vs tm)) - = prettyLVars ccs vs - . showString "=" - . prettyANF False (ind+1) tm - -prettyANF :: Var v => Bool -> Int -> ANormal v -> ShowS -prettyANF m ind tm = case tm of - TLets vs _ bn bo - -> showString "\n" - . indent ind - . prettyRBind vs - . showString " =" - . prettyANFT False (ind+1) bn - . prettyANF True ind bo - TName v f vs bo - -> showString "\n" - . indent ind - . prettyRBind [v] - . showString " := " - . prettyLZF f - . prettyVars vs - . prettyANF True ind bo - TTm tm - -> prettyANFT m ind tm - _ -> shows tm - -prettySpace :: Bool -> Int -> ShowS -prettySpace False _ = showString " " -prettySpace True ind = showString "\n" . indent ind - -prettyANFT :: Var v => Bool -> Int -> ANormalT v -> ShowS -prettyANFT m ind tm = prettySpace m ind . case tm of - ALit l -> shows l - AFrc v -> showString "!" . pvar v - AVar v -> pvar v - AApp f vs -> prettyFunc f . prettyVars vs - AMatch v bs - -> showString "match " - . pvar v . showString " with" - . prettyBranches (ind+1) bs - AShift r (ABTN.TAbss vs bo) - -> showString "shift[" . shows r . showString "]" - . prettyVars vs . showString "." - . prettyANF False (ind+1) bo - AHnd rs v bo - -> showString "handle" . prettyTags rs - . prettyANF False (ind+1) bo - . showString " with " . pvar v - -prettyLZF :: Var v => Either Word64 v -> ShowS -prettyLZF (Left w) = showString "ENV(" . shows w . showString ") " -prettyLZF (Right v) = pvar v . showString " " - -prettyTags :: [RTag] -> ShowS -prettyTags [] = showString "{}" -prettyTags (r:rs) - = showString "{" . shows r - . foldr (\t r -> shows t . showString "," . r) id rs - . showString "}" - -prettyFunc :: Var v => Func v -> ShowS -prettyFunc (FVar v) = pvar v . showString " " -prettyFunc (FCont v) = pvar v . showString " " -prettyFunc (FComb w) = showString "ENV(" . shows w . showString ")" -prettyFunc (FCon r t) - = showString "CON(" - . shows r . showString "," . shows t - . showString ")" -prettyFunc (FReq r t) - = showString "REQ(" - . shows r . showString "," . shows t - . showString ")" -prettyFunc (FPrim op) = either shows shows op . showString " " - -prettyBranches :: Var v => Int -> Branched (ANormal v) -> ShowS -prettyBranches ind bs = case bs of - MatchEmpty -> showString "{}" - MatchIntegral bs df - -> maybe id (\e -> prettyCase ind (showString "_") e id) df - . foldr (uncurry $ prettyCase ind . shows) id (mapToList bs) - MatchText bs df - -> maybe id (\e -> prettyCase ind (showString "_") e id) df - . foldr (uncurry $ prettyCase ind . shows) id (Map.toList bs) - MatchData _ bs df - -> maybe id (\e -> prettyCase ind (showString "_") e id) df - . foldr (uncurry $ prettyCase ind . shows) id - (mapToList $ snd <$> bs) - MatchRequest bs df - -> foldr (\(r,m) s -> - foldr (\(c,e) -> prettyCase ind (prettyReq r c) e) - s (mapToList $ snd <$> m)) - (prettyCase ind (prettyReq 0 0) df id) (mapToList bs) - MatchSum bs - -> foldr (uncurry $ prettyCase ind . shows) id - (mapToList $ snd <$> bs) - -- _ -> error "prettyBranches: todo" - where - prettyReq r c - = showString "REQ(" - . shows r . showString "," . shows c - . showString ")" - -prettyCase :: Var v => Int -> ShowS -> ANormal v -> ShowS -> ShowS -prettyCase ind sc (ABTN.TAbss vs e) r - = showString "\n" . indent ind . sc . prettyVars vs - . showString " ->" . prettyANF False (ind+1) e . r diff --git a/parser-typechecker/src/Unison/Runtime/Builtin.hs b/parser-typechecker/src/Unison/Runtime/Builtin.hs deleted file mode 100644 index b09218f869..0000000000 --- a/parser-typechecker/src/Unison/Runtime/Builtin.hs +++ /dev/null @@ -1,1200 +0,0 @@ -{-# language RankNTypes #-} -{-# language ViewPatterns #-} -{-# language PatternGuards #-} -{-# language TypeApplications #-} -{-# language OverloadedStrings #-} -{-# language ScopedTypeVariables #-} -{-# language FunctionalDependencies #-} - -module Unison.Runtime.Builtin - ( builtinLookup - , builtinTermNumbering - , builtinTypeNumbering - , builtinTermBackref - , builtinTypeBackref - , numberedTermLookup - ) where - -import Unison.ABT.Normalized hiding (TTm) -import Unison.Reference -import Unison.Runtime.ANF -import Unison.Var -import Unison.Symbol -import Unison.Runtime.IOSource - -import qualified Unison.Type as Ty -import qualified Unison.Builtin.Decls as Ty - -import Unison.Util.EnumContainers as EC - -import Data.Word (Word64) - -import Data.Set (Set, insert) - -import Data.Map (Map) -import qualified Data.Map as Map - -freshes :: Var v => Int -> [v] -freshes = freshes' mempty - -freshes' :: Var v => Set v -> Int -> [v] -freshes' avoid0 = go avoid0 [] - where - go _ vs 0 = vs - go avoid vs n - = let v = freshIn avoid $ typed ANFBlank - in go (insert v avoid) (v:vs) (n-1) - -boolTag, intTag, natTag, floatTag, charTag :: RTag -boolTag = rtag Ty.booleanRef -intTag = rtag Ty.intRef -natTag = rtag Ty.natRef -floatTag = rtag Ty.floatRef -charTag = rtag Ty.charRef - -optionTag, eitherTag, pairTag, seqViewTag :: RTag -optionTag = rtag Ty.optionalRef -eitherTag = rtag eitherReference -pairTag = rtag Ty.pairRef -seqViewTag = rtag Ty.seqViewRef - -fls, tru :: Var v => ANormal v -fls = TCon boolTag 0 [] -tru = TCon boolTag 1 [] - -boolift :: Var v => v -> ANormalT v -boolift v - = AMatch v $ MatchIntegral (mapFromList [(0,fls), (1,tru)]) Nothing - -notlift :: Var v => v -> ANormalT v -notlift v - = AMatch v $ MatchIntegral (mapFromList [(1,fls), (0,tru)]) Nothing - -unbox :: Var v => v -> Reference -> v -> ANormal v -> ANormal v -unbox v0 r v b - = TMatch v0 - $ MatchData r (mapSingleton 0 $ ([UN], TAbs v b)) Nothing - -unenum :: Var v => Int -> v -> Reference -> v -> ANormal v -> ANormal v -unenum n v0 r v nx - = TMatch v0 $ MatchData r cases Nothing - where - mkCase i = (toEnum i, ([], TLet v UN (ALit . I $ fromIntegral i) nx)) - cases = mapFromList . fmap mkCase $ [0..n-1] - -unop0 :: Var v => Int -> ([v] -> ANormal v) -> SuperNormal v -unop0 n f - = Lambda [BX] - . TAbss [x0] - $ f xs - where - xs@(x0:_) = freshes (1+n) - -binop0 :: Var v => Int -> ([v] -> ANormal v) -> SuperNormal v -binop0 n f - = Lambda [BX,BX] - . TAbss [x0,y0] - $ f xs - where - xs@(x0:y0:_) = freshes (2+n) - -unop :: Var v => POp -> Reference -> SuperNormal v -unop pop rf = unop' pop rf rf - -unop' :: Var v => POp -> Reference -> Reference -> SuperNormal v -unop' pop rfi rfo - = unop0 2 $ \[x0,x,r] - -> unbox x0 rfi x - . TLet r UN (APrm pop [x]) - $ TCon (rtag rfo) 0 [r] - -binop :: Var v => POp -> Reference -> SuperNormal v -binop pop rf = binop' pop rf rf rf - -binop' - :: Var v - => POp - -> Reference -> Reference -> Reference - -> SuperNormal v -binop' pop rfx rfy rfr - = binop0 3 $ \[x0,y0,x,y,r] - -> unbox x0 rfx x - . unbox y0 rfy y - . TLet r UN (APrm pop [x,y]) - $ TCon (rtag rfr) 0 [r] - -cmpop :: Var v => POp -> Reference -> SuperNormal v -cmpop pop rf - = binop0 3 $ \[x0,y0,x,y,b] - -> unbox x0 rf x - . unbox y0 rf y - . TLet b UN (APrm pop [x,y]) - $ TTm $ boolift b - -cmpopb :: Var v => POp -> Reference -> SuperNormal v -cmpopb pop rf - = binop0 3 $ \[x0,y0,x,y,b] - -> unbox x0 rf x - . unbox y0 rf y - . TLet b UN (APrm pop [y,x]) - $ TTm $ boolift b - -cmpopn :: Var v => POp -> Reference -> SuperNormal v -cmpopn pop rf - = binop0 3 $ \[x0,y0,x,y,b] - -> unbox x0 rf x - . unbox y0 rf y - . TLet b UN (APrm pop [x,y]) - $ TTm $ notlift b - -cmpopbn :: Var v => POp -> Reference -> SuperNormal v -cmpopbn pop rf - = binop0 3 $ \[x0,y0,x,y,b] - -> unbox x0 rf x - . unbox y0 rf y - . TLet b UN (APrm pop [y,x]) - $ TTm $ notlift b - -addi,subi,muli,divi,modi,shli,shri,powi :: Var v => SuperNormal v -addi = binop ADDI Ty.intRef -subi = binop SUBI Ty.intRef -muli = binop MULI Ty.intRef -divi = binop DIVI Ty.intRef -modi = binop MODI Ty.intRef -shli = binop' SHLI Ty.intRef Ty.natRef Ty.intRef -shri = binop' SHRI Ty.intRef Ty.natRef Ty.intRef -powi = binop' POWI Ty.intRef Ty.natRef Ty.intRef - -addn,subn,muln,divn,modn,shln,shrn,pown :: Var v => SuperNormal v -addn = binop ADDN Ty.natRef -subn = binop' SUBN Ty.natRef Ty.natRef Ty.intRef -muln = binop MULN Ty.natRef -divn = binop DIVN Ty.natRef -modn = binop MODN Ty.natRef -shln = binop SHLN Ty.natRef -shrn = binop SHRN Ty.natRef -pown = binop POWN Ty.natRef - -eqi, eqn, lti, ltn, lei, len :: Var v => SuperNormal v -eqi = cmpop EQLI Ty.intRef -lti = cmpopbn LEQI Ty.intRef -lei = cmpop LEQI Ty.intRef -eqn = cmpop EQLN Ty.natRef -ltn = cmpopbn LEQN Ty.natRef -len = cmpop LEQN Ty.natRef - -gti, gtn, gei, gen :: Var v => SuperNormal v -gti = cmpopn LEQI Ty.intRef -gei = cmpopb LEQI Ty.intRef -gtn = cmpopn LEQN Ty.intRef -gen = cmpopb LEQN Ty.intRef - -neqi, neqn :: Var v => SuperNormal v -neqi = cmpopn EQLI Ty.intRef -neqn = cmpopn EQLN Ty.intRef - -inci, incn :: Var v => SuperNormal v -inci = unop INCI Ty.intRef -incn = unop INCN Ty.natRef - -sgni, negi :: Var v => SuperNormal v -sgni = unop SGNI Ty.intRef -negi = unop NEGI Ty.intRef - -lzeron, tzeron, lzeroi, tzeroi :: Var v => SuperNormal v -lzeron = unop LZRO Ty.natRef -tzeron = unop TZRO Ty.natRef -lzeroi = unop' LZRO Ty.intRef Ty.natRef -tzeroi = unop' TZRO Ty.intRef Ty.natRef - -andn, orn, xorn, compln :: Var v => SuperNormal v -andn = binop ANDN Ty.natRef -orn = binop IORN Ty.natRef -xorn = binop XORN Ty.natRef -compln = unop COMN Ty.natRef - -addf, subf, mulf, divf, powf, sqrtf, logf, logbf - :: Var v => SuperNormal v -addf = binop ADDF Ty.floatRef -subf = binop SUBF Ty.floatRef -mulf = binop MULF Ty.floatRef -divf = binop DIVF Ty.floatRef -powf = binop POWF Ty.floatRef -sqrtf = unop SQRT Ty.floatRef -logf = unop LOGF Ty.floatRef -logbf = binop LOGB Ty.floatRef - -expf, absf :: Var v => SuperNormal v -expf = unop EXPF Ty.floatRef -absf = unop ABSF Ty.floatRef - -cosf, sinf, tanf, acosf, asinf, atanf :: Var v => SuperNormal v -cosf = unop COSF Ty.floatRef -sinf = unop SINF Ty.floatRef -tanf = unop TANF Ty.floatRef -acosf = unop ACOS Ty.floatRef -asinf = unop ASIN Ty.floatRef -atanf = unop ATAN Ty.floatRef - -coshf, sinhf, tanhf, acoshf, asinhf, atanhf, atan2f - :: Var v => SuperNormal v -coshf = unop COSH Ty.floatRef -sinhf = unop SINH Ty.floatRef -tanhf = unop TANH Ty.floatRef -acoshf = unop ACSH Ty.floatRef -asinhf = unop ASNH Ty.floatRef -atanhf = unop ATNH Ty.floatRef -atan2f = binop ATN2 Ty.floatRef - -ltf, gtf, lef, gef, eqf, neqf :: Var v => SuperNormal v -ltf = cmpopbn LEQF Ty.floatRef -gtf = cmpopn LEQF Ty.floatRef -lef = cmpop LEQF Ty.floatRef -gef = cmpopb LEQF Ty.floatRef -eqf = cmpop EQLF Ty.floatRef -neqf = cmpopn EQLF Ty.floatRef - -minf, maxf :: Var v => SuperNormal v -minf = binop MINF Ty.floatRef -maxf = binop MAXF Ty.floatRef - -ceilf, floorf, truncf, roundf, i2f, n2f :: Var v => SuperNormal v -ceilf = unop' CEIL Ty.floatRef Ty.intRef -floorf = unop' FLOR Ty.floatRef Ty.intRef -truncf = unop' TRNF Ty.floatRef Ty.intRef -roundf = unop' RNDF Ty.floatRef Ty.intRef -i2f = unop' ITOF Ty.intRef Ty.floatRef -n2f = unop' NTOF Ty.natRef Ty.floatRef - -trni :: Var v => SuperNormal v -trni = unop0 3 $ \[x0,x,z,b] - -> unbox x0 Ty.intRef x - . TLet z UN (ALit $ I 0) - . TLet b UN (APrm LEQI [x, z]) - . TMatch b - $ MatchIntegral - (mapSingleton 1 $ TCon natTag 0 [z]) - (Just $ TCon natTag 0 [x]) - -modular :: Var v => POp -> (Bool -> ANormal v) -> SuperNormal v -modular pop ret - = unop0 3 $ \[x0,x,m,t] - -> unbox x0 Ty.intRef x - . TLet t UN (ALit $ I 2) - . TLet m UN (APrm pop [x,t]) - . TMatch m - $ MatchIntegral - (mapSingleton 1 $ ret True) - (Just $ ret False) - -evni, evnn, oddi, oddn :: Var v => SuperNormal v -evni = modular MODI (\b -> if b then fls else tru) -oddi = modular MODI (\b -> if b then tru else fls) -evnn = modular MODN (\b -> if b then fls else tru) -oddn = modular MODN (\b -> if b then tru else fls) - -dropn :: Var v => SuperNormal v -dropn = binop0 4 $ \[x0,y0,x,y,b,r] - -> unbox x0 Ty.natRef x - . unbox y0 Ty.natRef y - . TLet b UN (APrm LEQN [x,y]) - . TLet r UN - (AMatch b $ MatchIntegral - (mapSingleton 1 $ TLit $ N 0) - (Just $ TPrm SUBN [x,y])) - $ TCon (rtag Ty.natRef) 0 [r] - -appendt, taket, dropt, sizet, unconst, unsnoct :: Var v => SuperNormal v -appendt = binop0 0 $ \[x,y] -> TPrm CATT [x,y] -taket = binop0 1 $ \[x0,y,x] - -> unbox x0 Ty.natRef x - $ TPrm TAKT [x,y] -dropt = binop0 1 $ \[x0,y,x] - -> unbox x0 Ty.natRef x - $ TPrm DRPT [x,y] -sizet = unop0 1 $ \[x,r] - -> TLet r UN (APrm SIZT [x]) - $ TCon (rtag Ty.natRef) 0 [r] -unconst = unop0 5 $ \[x,t,c0,c,y,p] - -> TLet t UN (APrm UCNS [x]) - . TMatch t . MatchSum $ mapFromList - [ (0, ([], TCon optionTag 0 [])) - , (1, ([UN,BX], TAbss [c0,y] - . TLet c BX (ACon charTag 0 [c0]) - . TLet p BX (ACon pairTag 0 [c,y]) - $ TCon optionTag 1 [p])) - ] -unsnoct = unop0 5 $ \[x,t,c0,c,y,p] - -> TLet t UN (APrm USNC [x]) - . TMatch t . MatchSum $ mapFromList - [ (0, ([], TCon optionTag 0 [])) - , (1, ([BX,UN], TAbss [y,c0] - . TLet c BX (ACon charTag 0 [c0]) - . TLet p BX (ACon pairTag 0 [y,c]) - $ TCon optionTag 1 [p])) - ] - -appends, conss, snocs :: Var v => SuperNormal v -appends = binop0 0 $ \[x,y] -> TPrm CATS [x,y] -conss = binop0 0 $ \[x,y] -> TPrm CONS [x,y] -snocs = binop0 0 $ \[x,y] -> TPrm SNOC [x,y] - -takes, drops, sizes, ats, emptys :: Var v => SuperNormal v -takes = binop0 1 $ \[x0,y,x] - -> unbox x0 Ty.natRef x - $ TPrm TAKS [x,y] -drops = binop0 1 $ \[x0,y,x] - -> unbox x0 Ty.natRef x - $ TPrm DRPS [x,y] -sizes = unop0 1 $ \[x,r] - -> TLet r UN (APrm SIZS [x]) - $ TCon natTag 0 [r] -ats = binop0 3 $ \[x0,y,x,t,r] - -> unbox x0 Ty.natRef x - . TLet t UN (APrm IDXS [x,y]) - . TMatch t . MatchSum $ mapFromList - [ (0, ([], TCon optionTag 0 [])) - , (1, ([BX], TAbs r $ TCon optionTag 1 [r])) - ] -emptys = Lambda [] $ TPrm BLDS [] - -viewls, viewrs :: Var v => SuperNormal v -viewls = unop0 3 $ \[s,u,h,t] - -> TLet u UN (APrm VWLS [s]) - . TMatch u . MatchSum $ mapFromList - [ (0, ([], TCon seqViewTag 0 [])) - , (1, ([BX,BX], TAbss [h,t] $ TCon seqViewTag 1 [h,t])) - ] -viewrs = unop0 3 $ \[s,u,i,l] - -> TLet u UN (APrm VWRS [s]) - . TMatch u . MatchSum $ mapFromList - [ (0, ([], TCon seqViewTag 0 [])) - , (1, ([BX,BX], TAbss [i,l] $ TCon seqViewTag 1 [i,l])) - ] - -eqt, neqt, leqt, geqt, lesst, great :: Var v => SuperNormal v -eqt = binop0 1 $ \[x,y,b] - -> TLet b UN (APrm EQLT [x,y]) - . TTm $ boolift b -neqt = binop0 1 $ \[x,y,b] - -> TLet b UN (APrm EQLT [x,y]) - . TTm $ notlift b -leqt = binop0 1 $ \[x,y,b] - -> TLet b UN (APrm LEQT [x,y]) - . TTm $ boolift b -geqt = binop0 1 $ \[x,y,b] - -> TLet b UN (APrm LEQT [y,x]) - . TTm $ boolift b -lesst = binop0 1 $ \[x,y,b] - -> TLet b UN (APrm LEQT [y,x]) - . TTm $ notlift b -great = binop0 1 $ \[x,y,b] - -> TLet b UN (APrm LEQT [x,y]) - . TTm $ notlift b - -packt, unpackt :: Var v => SuperNormal v -packt = unop0 0 $ \[s] -> TPrm PAKT [s] -unpackt = unop0 0 $ \[t] -> TPrm UPKT [t] - -packb, unpackb, emptyb, appendb :: Var v => SuperNormal v -packb = unop0 0 $ \[s] -> TPrm PAKB [s] -unpackb = unop0 0 $ \[b] -> TPrm UPKB [b] -emptyb - = Lambda [] - . TLet es BX (APrm BLDS []) - $ TPrm PAKB [es] - where - [es] = freshes 1 -appendb = binop0 0 $ \[x,y] -> TPrm CATB [x,y] - -takeb, dropb, atb, sizeb, flattenb :: Var v => SuperNormal v -takeb = binop0 1 $ \[n0,b,n] - -> unbox n0 Ty.natRef n - $ TPrm TAKB [n,b] - -dropb = binop0 1 $ \[n0,b,n] - -> unbox n0 Ty.natRef n - $ TPrm DRPB [n,b] - -atb = binop0 4 $ \[n0,b,n,t,r0,r] - -> unbox n0 Ty.natRef n - . TLet t UN (APrm IDXB [n,b]) - . TMatch t . MatchSum $ mapFromList - [ (0, ([], TCon optionTag 0 [])) - , (1, ([UN], TAbs r0 - . TLet r BX (ACon natTag 0 [r0]) - $ TCon optionTag 1 [r])) - ] - -sizeb = unop0 1 $ \[b,n] - -> TLet n UN (APrm SIZB [b]) - $ TCon natTag 0 [n] - -flattenb = unop0 0 $ \[b] -> TPrm FLTB [b] - -i2t, n2t, f2t :: Var v => SuperNormal v -i2t = unop0 1 $ \[n0,n] - -> unbox n0 Ty.intRef n - $ TPrm ITOT [n] -n2t = unop0 1 $ \[n0,n] - -> unbox n0 Ty.natRef n - $ TPrm NTOT [n] -f2t = unop0 1 $ \[f0,f] - -> unbox f0 Ty.floatRef f - $ TPrm FTOT [f] - -t2i, t2n, t2f :: Var v => SuperNormal v -t2i = unop0 3 $ \[x,t,n0,n] - -> TLet t UN (APrm TTOI [x]) - . TMatch t . MatchSum $ mapFromList - [ (0, ([], TCon optionTag 0 [])) - , (1, ([UN], TAbs n0 - . TLet n BX (ACon intTag 0 [n0]) - $ TCon optionTag 1 [n])) - ] -t2n = unop0 3 $ \[x,t,n0,n] - -> TLet t UN (APrm TTON [x]) - . TMatch t . MatchSum $ mapFromList - [ (0, ([], TCon optionTag 0 [])) - , (1, ([UN], TAbs n0 - . TLet n BX (ACon natTag 0 [n0]) - $ TCon optionTag 1 [n])) - ] -t2f = unop0 3 $ \[x,t,f0,f] - -> TLet t UN (APrm TTOF [x]) - . TMatch t . MatchSum $ mapFromList - [ (0, ([], TCon optionTag 0 [])) - , (1, ([UN], TAbs f0 - . TLet f BX (ACon floatTag 0 [f0]) - $ TCon optionTag 1 [f])) - ] - -equ :: Var v => SuperNormal v -equ = binop0 1 $ \[x,y,b] - -> TLet b UN (APrm EQLU [x,y]) - . TTm $ boolift b - -cmpu :: Var v => SuperNormal v -cmpu = binop0 2 $ \[x,y,c,i] - -> TLet c UN (APrm CMPU [x,y]) - . TLet i UN (APrm DECI [c]) - $ TCon intTag 0 [i] - -ltu :: Var v => SuperNormal v -ltu = binop0 1 $ \[x,y,c] - -> TLet c UN (APrm CMPU [x,y]) - . TMatch c - $ MatchIntegral - (mapFromList [ (0, TCon boolTag 1 []) ]) - (Just $ TCon boolTag 0 []) - -gtu :: Var v => SuperNormal v -gtu = binop0 1 $ \[x,y,c] - -> TLet c UN (APrm CMPU [x,y]) - . TMatch c - $ MatchIntegral - (mapFromList [ (2, TCon boolTag 1 []) ]) - (Just $ TCon boolTag 0 []) - -geu :: Var v => SuperNormal v -geu = binop0 1 $ \[x,y,c] - -> TLet c UN (APrm CMPU [x,y]) - . TMatch c - $ MatchIntegral - (mapFromList [ (0, TCon boolTag 0 []) ]) - (Just $ TCon boolTag 1 []) - -leu :: Var v => SuperNormal v -leu = binop0 1 $ \[x,y,c] - -> TLet c UN (APrm CMPU [x,y]) - . TMatch c - $ MatchIntegral - (mapFromList [ (2, TCon boolTag 0 []) ]) - (Just $ TCon boolTag 1 []) - -notb :: Var v => SuperNormal v -notb = unop0 0 $ \[b] - -> TMatch b . flip (MatchData Ty.booleanRef) Nothing - $ mapFromList [ (0, ([], tru)), (1, ([], fls)) ] - -orb :: Var v => SuperNormal v -orb = binop0 0 $ \[p,q] - -> TMatch p . flip (MatchData Ty.booleanRef) Nothing - $ mapFromList [ (1, ([], tru)), (0, ([], TVar q)) ] - -andb :: Var v => SuperNormal v -andb = binop0 0 $ \[p,q] - -> TMatch p . flip (MatchData Ty.booleanRef) Nothing - $ mapFromList [ (0, ([], fls)), (1, ([], TVar q)) ] - --- unsafeCoerce, used for numeric types where conversion is a --- no-op on the representation. Ideally this will be inlined and --- eliminated so that no instruction is necessary. -cast :: Var v => Reference -> Reference -> SuperNormal v -cast ri ro - = unop0 1 $ \[x0,x] - -> unbox x0 ri x - $ TCon (rtag ro) 0 [x] - -jumpk :: Var v => SuperNormal v -jumpk = binop0 0 $ \[k,a] -> TKon k [a] - -bug :: Var v => SuperNormal v -bug = unop0 0 $ \[x] -> TPrm EROR [x] - -watch :: Var v => SuperNormal v -watch - = binop0 0 $ \[t,v] - -> TLets [] [] (APrm PRNT [t]) - $ TVar v - -type IOOP = forall v. Var v => Set v -> ([Mem], ANormal v) - -io'error'result0 - :: Var v - => IOp -> [v] - -> v -> [Mem] -> [v] -> v - -> ANormal v -> ANormal v -io'error'result0 ins args ior ccs vs e nx - = TLet ior UN (AIOp ins args) - . TMatch ior . MatchSum - $ mapFromList - [ (0, ([BX], TAbs e $ TCon eitherTag 0 [e])) - , (1, (ccs, TAbss vs nx)) - ] - -io'error'result'let - :: Var v - => IOp -> [v] - -> v -> [Mem] -> [v] -> v -> v -> ANormalT v - -> ANormal v -io'error'result'let ins args ior ccs vs e r m - = io'error'result0 ins args ior ccs vs e - . TLet r BX m - $ TCon eitherTag 1 [r] - -io'error'result'direct - :: Var v - => IOp -> [v] - -> v -> v -> v - -> ANormal v -io'error'result'direct ins args ior e r - = io'error'result0 ins args ior [BX] [r] e - $ TCon eitherTag 1 [r] - -io'error'result'unit - :: Var v - => IOp -> [v] - -> v -> v -> v - -> ANormal v -io'error'result'unit ins args ior e r - = io'error'result'let ins args ior [] [] e r - $ ACon (rtag Ty.unitRef) 0 [] - -io'error'result'bool - :: Var v - => IOp -> [v] - -> v -> (v -> ANormalT v) -> v -> v -> v -> ANormal v -io'error'result'bool ins args ior encode b e r - = io'error'result'let ins args ior [UN] [b] e r - $ encode b - -open'file :: IOOP -open'file avoid - = ([BX,BX],) - . TAbss [fp,m0] - . unenum 4 m0 ioModeReference m - $ io'error'result'direct OPENFI [fp,m] ior e r - where - [m0,fp,m,ior,e,r] = freshes' avoid 6 - -close'file :: IOOP -close'file avoid - = ([BX],) - . TAbss [h] - $ io'error'result'unit CLOSFI [h] ior e r - where - [h,ior,e,r] = freshes' avoid 4 - -is'file'eof :: IOOP -is'file'eof avoid - = ([BX],) - . TAbss [h] - $ io'error'result'bool ISFEOF [h] ior boolift b e r - where - [h,b,ior,e,r] = freshes' avoid 5 - -is'file'open :: IOOP -is'file'open avoid - = ([BX],) - . TAbss [h] - $ io'error'result'bool ISFOPN [h] ior boolift b e r - where - [h,b,ior,e,r] = freshes' avoid 5 - -is'seekable :: IOOP -is'seekable avoid - = ([BX],) - . TAbss [h] - $ io'error'result'bool ISSEEK [h] ior boolift b e r - where - [h,b,ior,e,r] = freshes' avoid 5 - -standard'handle :: IOOP -standard'handle avoid - = ([BX],) - . TAbss [n0] - . unbox n0 Ty.natRef n - . TLet r UN (AIOp STDHND [n]) - . TMatch r . MatchSum - $ mapFromList - [ (0, ([], TCon optionTag 0 [])) - , (1, ([BX], TAbs h $ TCon optionTag 1 [h])) - ] - where - [n0,n,h,r] = freshes' avoid 4 - -seek'handle :: IOOP -seek'handle avoid - = ([BX,BX,BX],) - . TAbss [h,sm0,po0] - . unenum 3 sm0 seekModeReference sm - . unbox po0 Ty.natRef po - $ io'error'result'unit SEEKFI [h,sm,po] ior e r - where - [sm0,po0,h,sm,po,ior,e,r] = freshes' avoid 8 - -handle'position :: IOOP -handle'position avoid - = ([BX],) - . TAbss [h] - . io'error'result'let POSITN [h] ior [UN] [i] e r - $ (ACon (rtag Ty.intRef) 0 [i]) - where - [h,i,ior,e,r] = freshes' avoid 5 - -get'buffering :: IOOP -get'buffering avoid - = ([BX],) - . TAbss [h] - . io'error'result'let GBUFFR [h] ior [UN] [bu] e r - . AMatch bu . MatchSum - $ mapFromList - [ (0, ([], TCon (rtag Ty.optionalRef) 0 [])) - , (1, ([], line)) - , (2, ([], block'nothing)) - , (3, ([UN], TAbs n $ block'n)) - ] - where - [h,bu,ior,e,r,m,n,b] = freshes' avoid 8 - final = TCon (rtag Ty.optionalRef) 1 [b] - block = TLet b BX (ACon (rtag bufferModeReference) 1 [m]) $ final - - line - = TLet b BX (ACon (rtag bufferModeReference) 0 []) $ final - block'nothing - = TLet m BX (ACon (rtag Ty.optionalRef) 0 []) - $ block - block'n - = TLet m BX (ACon (rtag Ty.optionalRef) 1 [n]) - $ block - -set'buffering :: IOOP -set'buffering avoid - = ([BX,BX],) - . TAbss [h,bm0] - . TMatch bm0 . flip (MatchData Ty.optionalRef) Nothing - $ mapFromList - [ (0, ([], none'branch)) - , (1, ([BX], TAbs bm just'branch'0)) - ] - where - [t,ior,e,r,h,mbs,bs0,bs,bm0,bm] = freshes' avoid 10 - none'branch - = TLet t UN (ALit $ I 0) - $ io'error'result'unit SBUFFR [h,t] ior e r - just'branch'0 - = TMatch bm . flip (MatchData bufferModeReference) Nothing - $ mapFromList - [ (0, ([] - , TLet t UN (ALit $ I 1) - $ io'error'result'unit SBUFFR [h,t] ior e r - )) - , (1, ([BX], TAbs mbs just'branch'1)) - ] - just'branch'1 - = TMatch mbs - . flip (MatchData Ty.optionalRef) Nothing - $ mapFromList - [ (0, ([] - , TLet t UN (ALit $ I 2) - $ io'error'result'unit SBUFFR [h,t] ior e r)) - , (1, ([BX] - , TAbs bs0 - . unbox bs0 Ty.natRef bs - . TLet t UN (ALit $ I 3) - $ io'error'result'unit SBUFFR [h,t,bs] ior e r)) - ] - -get'line :: IOOP -get'line avoid - = ([BX],) - . TAbss [h] - $ io'error'result'direct GTLINE [h] ior e r - where - [h,ior,e,r] = freshes' avoid 4 - -get'text :: IOOP -get'text avoid - = ([BX],) - . TAbss [h] - $ io'error'result'direct GTTEXT [h] ior e r - where - [h,ior,e,r] = freshes' avoid 4 - -put'text :: IOOP -put'text avoid - = ([BX,BX],) - . TAbss [h,tx] - $ io'error'result'direct PUTEXT [h,tx] ior e r - where - [h,tx,ior,e,r] = freshes' avoid 5 - -system'time :: IOOP -system'time avoid - = ([],) - . io'error'result'let SYTIME [] ior [UN] [n] e r - $ ACon (rtag Ty.natRef) 0 [n] - where - [n,ior,e,r] = freshes' avoid 4 - -get'temp'directory :: IOOP -get'temp'directory avoid - = ([],) - . io'error'result'let GTMPDR [] ior [BX] [t] e r - $ ACon (rtag filePathReference) 0 [t] - where - [t,ior,e,r] = freshes' avoid 4 - -get'current'directory :: IOOP -get'current'directory avoid - = ([],) - . io'error'result'let GCURDR [] ior [BX] [t] e r - $ ACon (rtag filePathReference) 0 [r] - where - [t,e,r,ior] = freshes' avoid 4 - -set'current'directory :: IOOP -set'current'directory avoid - = ([BX],) - . TAbs fp - $ io'error'result'unit SCURDR [fp] ior e r - where - [fp,ior,e,r] = freshes' avoid 4 - --- directory'contents --- DCNTNS --- directoryContents_ : io.FilePath -> Either io.Error [io.FilePath] - - -file'exists :: IOOP -file'exists avoid - = ([BX],) - . TAbs fp - $ io'error'result'bool FEXIST [fp] ior boolift b e r - where - [fp,b,ior,e,r] = freshes' avoid 5 - -is'directory :: IOOP -is'directory avoid - = ([BX],) - . TAbs fp - $ io'error'result'bool ISFDIR [fp] ior boolift b e r - where - [fp,b,ior,e,r] = freshes' avoid 5 - -create'directory :: IOOP -create'directory avoid - = ([BX],) - . TAbs fp - $ io'error'result'unit CRTDIR [fp] ior e r - where - [fp,ior,e,r] = freshes' avoid 4 - -remove'directory :: IOOP -remove'directory avoid - = ([BX],) - . TAbs fp - $ io'error'result'unit REMDIR [fp] ior e r - where - [fp,ior,e,r] = freshes' avoid 4 - -rename'directory :: IOOP -rename'directory avoid - = ([BX,BX],) - . TAbss [from,to] - $ io'error'result'unit RENDIR [from,to] ior e r - where - [from,to,ior,e,r] = freshes' avoid 5 - -remove'file :: IOOP -remove'file avoid - = ([BX],) - . TAbs fp - $ io'error'result'unit REMOFI [fp] ior e r - where - [fp,ior,e,r] = freshes' avoid 4 - -rename'file :: IOOP -rename'file avoid - = ([BX,BX],) - . TAbss [from,to] - $ io'error'result'unit RENAFI [from,to] ior e r - where - [from,to,ior,e,r] = freshes' avoid 5 - -get'file'timestamp :: IOOP -get'file'timestamp avoid - = ([BX],) - . TAbs fp - . io'error'result'let GFTIME [fp] ior [UN] [n] e r - $ ACon (rtag Ty.natRef) 0 [n] - where - [fp,n,ior,e,r] = freshes' avoid 5 - -get'file'size :: IOOP -get'file'size avoid - = ([BX],) - . TAbs fp - . io'error'result'let GFSIZE [fp] ior [UN] [n] e r - $ ACon (rtag Ty.natRef) 0 [n] - where - [fp,n,ior,e,r] = freshes' avoid 5 - -server'socket :: IOOP -server'socket avoid - = ([BX,BX],) - . TAbss [mhn,sn] - . TMatch mhn . flip (MatchData Ty.optionalRef) Nothing - $ mapFromList - [ (0, ([], none'branch)) - , (1, ([BX], TAbs hn just'branch)) - ] - where - [mhn,sn,hn,t,ior,e,r] = freshes' avoid 7 - none'branch - = TLet t UN (ALit $ I 0) - $ io'error'result'direct SRVSCK [t,sn] ior e r - just'branch - = TLet t UN (ALit $ I 1) - $ io'error'result'direct SRVSCK [t,hn,sn] ior e r - -listen :: IOOP -listen avoid - = ([BX],) - . TAbs sk - $ io'error'result'direct LISTEN [sk] ior e r - where - [sk,ior,e,r] = freshes' avoid 4 - -client'socket :: IOOP -client'socket avoid - = ([BX,BX],) - . TAbss [hn,sn] - $ io'error'result'direct CLISCK [hn,sn] ior e r - where - [hn,sn,r,ior,e] = freshes' avoid 5 - -close'socket :: IOOP -close'socket avoid - = ([BX,BX],) - . TAbs sk - $ io'error'result'unit CLOSCK [sk] ior e r - where - [sk,ior,e,r] = freshes' avoid 4 - -socket'accept :: IOOP -socket'accept avoid - = ([BX],) - . TAbs sk - $ io'error'result'direct SKACPT [sk] ior e r - where - [sk,r,e,ior] = freshes' avoid 4 - -socket'send :: IOOP -socket'send avoid - = ([BX,BX],) - . TAbss [sk,by] - $ io'error'result'unit SKSEND [sk,by] ior e r - where - [sk,by,ior,e,r] = freshes' avoid 5 - -socket'receive :: IOOP -socket'receive avoid - = ([BX,BX],) - . TAbss [sk,n0] - . unbox n0 Ty.natRef n - . io'error'result'let SKRECV [sk,n] ior [UN] [mt] e r - . AMatch mt . MatchSum - $ mapFromList - [ (0, ([], TCon (rtag Ty.optionalRef) 0 [])) - , (1, ([BX], TAbs b $ TCon (rtag Ty.optionalRef) 1 [b])) - ] - where - [n0,sk,n,ior,e,r,b,mt] = freshes' avoid 8 - -fork'comp :: IOOP -fork'comp avoid - = ([BX],) - . TAbs lz - $ TPrm FORK [lz] - where - [lz] = freshes' avoid 3 - -builtinLookup :: Var v => Map.Map Reference (SuperNormal v) -builtinLookup - = Map.fromList - $ map (\(t, f) -> (Builtin t, f)) - [ ("Int.+", addi) - , ("Int.-", subi) - , ("Int.*", muli) - , ("Int./", divi) - , ("Int.mod", modi) - , ("Int.==", eqi) - , ("Int.!=", neqi) - , ("Int.<", lti) - , ("Int.<=", lei) - , ("Int.>", gti) - , ("Int.>=", gei) - , ("Int.increment", inci) - , ("Int.signum", sgni) - , ("Int.negate", negi) - , ("Int.truncate0", trni) - , ("Int.isEven", evni) - , ("Int.isOdd", oddi) - , ("Int.shiftLeft", shli) - , ("Int.shiftRight", shri) - , ("Int.trailingZeros", tzeroi) - , ("Int.leadingZeros", lzeroi) - , ("Int.pow", powi) - , ("Int.toText", i2t) - , ("Int.fromText", t2i) - , ("Int.toFloat", i2f) - - , ("Nat.+", addn) - , ("Nat.-", subn) - , ("Nat.sub", subn) - , ("Nat.*", muln) - , ("Nat./", divn) - , ("Nat.mod", modn) - , ("Nat.==", eqn) - , ("Int.!=", neqn) - , ("Nat.<", ltn) - , ("Nat.<=", len) - , ("Nat.>", gtn) - , ("Nat.>=", gen) - , ("Nat.increment", incn) - , ("Nat.isEven", evnn) - , ("Nat.isOdd", oddn) - , ("Nat.shiftLeft", shln) - , ("Nat.shiftRight", shrn) - , ("Nat.trailingZeros", tzeron) - , ("Nat.leadingZeros", lzeron) - , ("Nat.and", andn) - , ("Nat.or", orn) - , ("Nat.xor", xorn) - , ("Nat.complement", compln) - , ("Nat.pow", pown) - , ("Nat.drop", dropn) - , ("Nat.toInt", cast Ty.natRef Ty.intRef) - , ("Nat.toFloat", n2f) - , ("Nat.toText", n2t) - , ("Nat.fromText", t2n) - - , ("Float.+", addf) - , ("Float.-", subf) - , ("Float.*", mulf) - , ("Float./", divf) - , ("Float.pow", powf) - , ("Float.log", logf) - , ("Float.logBase", logbf) - , ("Float.sqrt", sqrtf) - - , ("Float.min", minf) - , ("Float.max", maxf) - - , ("Float.<", ltf) - , ("Float.>", gtf) - , ("Float.<=", lef) - , ("Float.>=", gef) - , ("Float.==", eqf) - , ("Float.!=", neqf) - - , ("Float.acos", acosf) - , ("Float.asin", asinf) - , ("Float.atan", atanf) - , ("Float.cos", cosf) - , ("Float.sin", sinf) - , ("Float.tan", tanf) - - , ("Float.acosh", acoshf) - , ("Float.asinh", asinhf) - , ("Float.atanh", atanhf) - , ("Float.cosh", coshf) - , ("Float.sinh", sinhf) - , ("Float.tanh", tanhf) - - , ("Float.exp", expf) - , ("Float.abs", absf) - - , ("Float.ceiling", ceilf) - , ("Float.floor", floorf) - , ("Float.round", roundf) - , ("Float.truncate", truncf) - , ("Float.atan2", atan2f) - - , ("Float.toText", f2t) - , ("Float.fromText", t2f) - - -- text - , ("Text.empty", Lambda [] $ TLit (T "")) - , ("Text.++", appendt) - , ("Text.take", taket) - , ("Text.drop", dropt) - , ("Text.size", sizet) - , ("Text.==", eqt) - , ("Text.!=", neqt) - , ("Text.<=", leqt) - , ("Text.>=", geqt) - , ("Text.<", lesst) - , ("Text.>", great) - , ("Text.uncons", unconst) - , ("Text.unsnoc", unsnoct) - , ("Text.toCharList", unpackt) - , ("Text.fromCharList", packt) - - , ("Boolean.not", notb) - , ("Boolean.or", orb) - , ("Boolean.and", andb) - - , ("bug", bug) - , ("todo", bug) - , ("Debug.watch", watch) - - , ("Char.toNat", cast Ty.charRef Ty.natRef) - , ("Char.fromNat", cast Ty.natRef Ty.charRef) - - , ("Bytes.empty", emptyb) - , ("Bytes.fromList", packb) - , ("Bytes.toList", unpackb) - , ("Bytes.++", appendb) - , ("Bytes.take", takeb) - , ("Bytes.drop", dropb) - , ("Bytes.at", atb) - , ("Bytes.size", sizeb) - , ("Bytes.flatten", flattenb) - - , ("List.take", takes) - , ("List.drop", drops) - , ("List.size", sizes) - , ("List.++", appends) - , ("List.at", ats) - , ("List.cons", conss) - , ("List.snoc", snocs) - , ("List.empty", emptys) - , ("List.viewl", viewls) - , ("List.viewr", viewrs) --- --- , B "Debug.watch" $ forall1 "a" (\a -> text --> a --> a) - , ("Universal.==", equ) - , ("Universal.compare", cmpu) - , ("Universal.>", gtu) - , ("Universal.<", ltu) - , ("Universal.>=", geu) - , ("Universal.<=", leu) - - , ("jumpCont", jumpk) - - , ("IO.openFile", ioComb open'file) - , ("IO.closeFile", ioComb close'file) - , ("IO.isFileEOF", ioComb is'file'eof) - , ("IO.isFileOpen", ioComb is'file'open) - , ("IO.isSeekable", ioComb is'seekable) - , ("IO.seekHandle", ioComb seek'handle) - , ("IO.handlePosition", ioComb handle'position) - , ("IO.getBuffering", ioComb get'buffering) - , ("IO.setBuffering", ioComb set'buffering) - , ("IO.getLine", ioComb get'line) - , ("IO.getText", ioComb get'text) - , ("IO.putText", ioComb put'text) - , ("IO.systemTime", ioComb system'time) - , ("IO.getTempDirectory", ioComb get'temp'directory) - , ("IO.getCurrentDirectory", ioComb get'current'directory) - , ("IO.setCurrentDirectory", ioComb set'current'directory) - , ("IO.fileExists", ioComb file'exists) - , ("IO.isDirectory", ioComb is'directory) - , ("IO.createDirectory", ioComb create'directory) - , ("IO.removeDirectory", ioComb remove'directory) - , ("IO.renameDirectory", ioComb rename'directory) - , ("IO.removeFile", ioComb remove'file) - , ("IO.renameFile", ioComb rename'file) - , ("IO.getFileTimestamp", ioComb get'file'timestamp) - , ("IO.getFileSize", ioComb get'file'size) - , ("IO.serverSocket", ioComb server'socket) - , ("IO.listen", ioComb listen) - , ("IO.clientSocket", ioComb client'socket) - , ("IO.closeSocket", ioComb close'socket) - , ("IO.socketAccept", ioComb socket'accept) - , ("IO.socketSend", ioComb socket'send) - , ("IO.socketReceive", ioComb socket'receive) - , ("IO.forkComp", ioComb fork'comp) - , ("IO.stdHandle", ioComb standard'handle) - ] - -ioComb :: Var v => IOOP -> SuperNormal v -ioComb ioop = uncurry Lambda (ioop mempty) - -typeReferences :: [(Reference, RTag)] -typeReferences - = zip - [ Ty.natRef - , Ty.optionalRef - , Ty.unitRef - , Ty.pairRef - , Ty.booleanRef - , Ty.intRef - , Ty.floatRef - , Ty.booleanRef - , Ty.textRef - , Ty.charRef - , eitherReference - , filePathReference - , bufferModeReference - , Ty.effectRef - , Ty.vectorRef - , Ty.seqViewRef - ] [1..] - -numberedTermLookup :: Var v => EnumMap Word64 (SuperNormal v) -numberedTermLookup - = mapFromList . zip [1..] . Map.elems $ builtinLookup - -rtag :: Reference -> RTag -rtag r | Just x <- Map.lookup r builtinTypeNumbering = x - | otherwise = error $ "rtag: unknown reference: " ++ show r - - -builtinTermNumbering :: Map Reference Word64 -builtinTermNumbering - = Map.fromList (zip (Map.keys $ builtinLookup @Symbol) [1..]) - -builtinTermBackref :: EnumMap Word64 Reference -builtinTermBackref - = mapFromList . zip [1..] . Map.keys $ builtinLookup @Symbol - -builtinTypeNumbering :: Map Reference RTag -builtinTypeNumbering = Map.fromList typeReferences - -builtinTypeBackref :: EnumMap RTag Reference -builtinTypeBackref = mapFromList $ swap <$> typeReferences - where swap (x, y) = (y, x) diff --git a/parser-typechecker/src/Unison/Runtime/Debug.hs b/parser-typechecker/src/Unison/Runtime/Debug.hs deleted file mode 100644 index a049fdbcf6..0000000000 --- a/parser-typechecker/src/Unison/Runtime/Debug.hs +++ /dev/null @@ -1,52 +0,0 @@ - -module Unison.Runtime.Debug - ( traceComb - , traceCombs - , tracePretty - , tracePrettyGroup - ) where - -import Data.Word - -import qualified Unison.Term as Tm -import Unison.Var (Var) -import Unison.PrettyPrintEnv (PrettyPrintEnv) -import Unison.TermPrinter (pretty) -import Unison.Util.Pretty (toANSI) -import Unison.Util.EnumContainers - -import Unison.Runtime.ANF -import Unison.Runtime.MCode - -import Debug.Trace - -type Term v = Tm.Term v () - -traceComb :: Bool -> Word64 -> Comb -> Bool -traceComb False _ _ = True -traceComb True w c = trace (prettyComb w c "\n") True - -traceCombs - :: Bool - -> (Comb, EnumMap Word64 Comb, Word64) - -> (Comb, EnumMap Word64 Comb, Word64) -traceCombs False c = c -traceCombs True c = trace (prettyCombs c "") c - -tracePretty - :: Var v - => PrettyPrintEnv - -> Bool - -> Term v - -> Term v -tracePretty _ False tm = tm -tracePretty ppe True tm = trace (toANSI 50 $ pretty ppe tm) tm - -tracePrettyGroup - :: Var v - => Bool - -> SuperGroup v - -> SuperGroup v -tracePrettyGroup False g = g -tracePrettyGroup True g = trace (prettyGroup g "") g - diff --git a/parser-typechecker/src/Unison/Runtime/Decompile.hs b/parser-typechecker/src/Unison/Runtime/Decompile.hs deleted file mode 100644 index 0b77588eb7..0000000000 --- a/parser-typechecker/src/Unison/Runtime/Decompile.hs +++ /dev/null @@ -1,120 +0,0 @@ -{-# language PatternGuards #-} -{-# language TupleSections #-} -{-# language PatternSynonyms #-} - -module Unison.Runtime.Decompile - ( decompile ) where - -import Prelude hiding (seq) - -import Data.String (fromString) -import Data.Sequence (Seq) -import Data.Word (Word64) - -import Unison.ABT (absChain, substs, pattern AbsN') -import Unison.Term - ( Term - , nat, int, char, float, boolean, constructor, app, apps', text - , seq, seq', builtin - ) -import Unison.Type - ( natRef, intRef, charRef, floatRef, booleanRef, vectorRef - ) -import Unison.Var (Var) -import Unison.Reference (Reference) - -import Unison.Runtime.ANF (RTag, CTag, Tag(..)) -import Unison.Runtime.Foreign - (Foreign, unwrapText, unwrapBytes, maybeUnwrapForeign) -import Unison.Runtime.Stack - (Closure(..), pattern DataC, pattern PApV, IComb(..)) - -import Unison.Codebase.Runtime (Error) -import Unison.Util.Pretty (lit) - -import qualified Unison.Util.Bytes as By - -import Unsafe.Coerce -- for Int -> Double - -con :: Var v => Reference -> CTag -> Term v () -con rf ct = constructor () rf . fromIntegral $ rawTag ct - -err :: String -> Either Error a -err = Left . lit . fromString - -decompile - :: Var v - => (RTag -> Maybe Reference) - -> (Word64 -> Maybe (Term v ())) - -> Closure - -> Either Error (Term v ()) -decompile tyRef _ (DataC rt ct [] []) - | Just rf <- tyRef rt - , rf == booleanRef - = boolean () <$> tag2bool ct -decompile tyRef _ (DataC rt ct [i] []) - | Just rf <- tyRef rt - = decompileUnboxed rf ct i -decompile tyRef topTerms (DataC rt ct [] bs) - | Just rf <- tyRef rt - = apps' (con rf ct) <$> traverse (decompile tyRef topTerms) bs -decompile tyRef topTerms (PApV (IC rt _) [] bs) - | Just t <- topTerms rt - = substitute t <$> traverse (decompile tyRef topTerms) bs - | otherwise - = err "reference to unknown combinator" -decompile _ _ cl@(PAp _ _ _) - = err $ "cannot decompile a partial application to unboxed values: " - ++ show cl -decompile _ _ (DataC{}) - = err "cannot decompile data type with multiple unboxed fields" -decompile _ _ BlackHole = err "exception" -decompile _ _ (Captured{}) = err "decompiling a captured continuation" -decompile tyRef topTerms (Foreign f) = decompileForeign tyRef topTerms f - -tag2bool :: CTag -> Either Error Bool -tag2bool c = case rawTag c of - 0 -> Right False - 1 -> Right True - _ -> err "bad boolean tag" - -substitute :: Var v => Term v () -> [Term v ()] -> Term v () -substitute (AbsN' vs bd) ts = align [] vs ts - where - align vts (v:vs) (t:ts) = align ((v,t):vts) vs ts - align vts vs [] = substs vts (absChain vs bd) - -- this should not happen - align vts [] ts = apps' (substs vts bd) ts --- TODO: these aliases are not actually very conveniently written -substitute _ _ = error "impossible" - -decompileUnboxed - :: Var v => Reference -> CTag -> Int -> Either Error (Term v ()) -decompileUnboxed r _ i - | r == natRef = pure . nat () $ fromIntegral i - | r == intRef = pure . int () $ fromIntegral i - | r == floatRef = pure . float () $ unsafeCoerce i - | r == charRef = pure . char () $ toEnum i -decompileUnboxed r _ _ - = err $ "cannot decompile unboxed data type with reference: " ++ show r - -decompileForeign - :: Var v - => (RTag -> Maybe Reference) - -> (Word64 -> Maybe (Term v ())) - -> Foreign - -> Either Error (Term v ()) -decompileForeign tyRef topTerms f - | Just t <- unwrapText f = Right $ text () t - | Just b <- unwrapBytes f = Right $ decompileBytes b - | Just s <- unwrapSeq f - = seq' () <$> traverse (decompile tyRef topTerms) s -decompileForeign _ _ _ = err "cannot decompile Foreign" - -decompileBytes :: Var v => By.Bytes -> Term v () -decompileBytes - = app () (builtin () $ fromString "Bytes.fromList") - . seq () . fmap (nat () . fromIntegral) . By.toWord8s - -unwrapSeq :: Foreign -> Maybe (Seq Closure) -unwrapSeq = maybeUnwrapForeign vectorRef diff --git a/parser-typechecker/src/Unison/Runtime/Foreign.hs b/parser-typechecker/src/Unison/Runtime/Foreign.hs deleted file mode 100644 index c7290b0d13..0000000000 --- a/parser-typechecker/src/Unison/Runtime/Foreign.hs +++ /dev/null @@ -1,205 +0,0 @@ -{-# language GADTs #-} -{-# language BangPatterns #-} -{-# language PatternGuards #-} - -module Unison.Runtime.Foreign - ( Foreign(..) - , ForeignArgs - , ForeignRslt - , ForeignFunc(..) - , unwrapForeign - , maybeUnwrapForeign - , foreign0 - , foreign1 - , foreign2 - , foreign3 - , wrapText - , unwrapText - , wrapBytes - , unwrapBytes - ) where - -import GHC.Stack (HasCallStack) - -import Data.Bifunctor - -import Control.Concurrent (ThreadId) -import Data.Text (Text,unpack) -import Network.Socket (Socket) -import System.IO (BufferMode(..), SeekMode, Handle, IOMode) -import Unison.Util.Bytes (Bytes) -import Unison.Reference (Reference) -import Unison.Referent (Referent) -import qualified Unison.Type as Ty - -import Unsafe.Coerce - -data Foreign where - Wrap :: Reference -> e -> Foreign - -wrapText :: Text -> Foreign -wrapText = Wrap Ty.textRef - -wrapBytes :: Bytes -> Foreign -wrapBytes = Wrap Ty.bytesRef - -unwrapText :: Foreign -> Maybe Text -unwrapText (Wrap r v) - | r == Ty.textRef = Just $ unsafeCoerce v - | otherwise = Nothing - -unwrapBytes :: Foreign -> Maybe Bytes -unwrapBytes (Wrap r v) - | r == Ty.bytesRef = Just $ unsafeCoerce v - | otherwise = Nothing - -promote :: (a -> a -> r) -> b -> c -> r -promote (~~) x y = unsafeCoerce x ~~ unsafeCoerce y - -ref2eq :: Reference -> Maybe (a -> b -> Bool) -ref2eq r - | r == Ty.textRef = Just $ promote ((==) @Text) - | r == Ty.termLinkRef = Just $ promote ((==) @Referent) - | r == Ty.typeLinkRef = Just $ promote ((==) @Reference) - | otherwise = Nothing - -ref2cmp :: Reference -> Maybe (a -> b -> Ordering) -ref2cmp r - | r == Ty.textRef = Just $ promote (compare @Text) - | r == Ty.termLinkRef = Just $ promote (compare @Referent) - | r == Ty.typeLinkRef = Just $ promote (compare @Reference) - | otherwise = Nothing - -instance Eq Foreign where - Wrap rl t == Wrap rr u - | rl == rr , Just (~~) <- ref2eq rl = t ~~ u - _ == _ = error "Eq Foreign" - -instance Ord Foreign where - Wrap rl t `compare` Wrap rr u - | rl == rr, Just cmp <- ref2cmp rl = cmp t u - compare _ _ = error "Ord Foreign" - -instance Show Foreign where - showsPrec p !(Wrap r _) - = showParen (p>9) - $ showString "Wrap " . showsPrec 10 r . showString " _" - -type ForeignArgs = [Foreign] -type ForeignRslt = [Either Int Foreign] - -newtype ForeignFunc = FF (ForeignArgs -> IO ForeignRslt) - -instance Show ForeignFunc where - show _ = "ForeignFunc" -instance Eq ForeignFunc where - _ == _ = error "Eq ForeignFunc" -instance Ord ForeignFunc where - compare _ _ = error "Ord ForeignFunc" - -decodeForeignEnum :: Enum a => [Foreign] -> (a,[Foreign]) -decodeForeignEnum = first toEnum . decodeForeign - -class ForeignConvention a where - decodeForeign :: [Foreign] -> (a, [Foreign]) - decodeForeign (f:fs) = (unwrapForeign f, fs) - decodeForeign _ = foreignCCError - -instance ForeignConvention Int -instance ForeignConvention Text -instance ForeignConvention Bytes -instance ForeignConvention Handle -instance ForeignConvention Socket -instance ForeignConvention ThreadId - -instance ForeignConvention FilePath where - decodeForeign = first unpack . decodeForeign -instance ForeignConvention SeekMode where - decodeForeign = decodeForeignEnum -instance ForeignConvention IOMode where - decodeForeign = decodeForeignEnum - -instance ForeignConvention a => ForeignConvention (Maybe a) where - decodeForeign (f:fs) - | 0 <- unwrapForeign f = (Nothing, fs) - | 1 <- unwrapForeign f - , (x, fs) <- decodeForeign fs = (Just x, fs) - decodeForeign _ = foreignCCError - -instance (ForeignConvention a, ForeignConvention b) - => ForeignConvention (a,b) - where - decodeForeign fs - | (x,fs) <- decodeForeign fs - , (y,fs) <- decodeForeign fs - = ((x,y), fs) - -instance ( ForeignConvention a - , ForeignConvention b - , ForeignConvention c - ) - => ForeignConvention (a,b,c) - where - decodeForeign fs - | (x, fs) <- decodeForeign fs - , (y, fs) <- decodeForeign fs - , (z, fs) <- decodeForeign fs - = ((x,y,z), fs) - -instance ForeignConvention BufferMode where - decodeForeign (f:fs) - | 0 <- unwrapForeign f = (NoBuffering,fs) - | 1 <- unwrapForeign f = (LineBuffering,fs) - | 2 <- unwrapForeign f = (BlockBuffering Nothing, fs) - | 3 <- unwrapForeign f - , (n,fs) <- decodeForeign fs - = (BlockBuffering $ Just n, fs) - decodeForeign _ = foreignCCError - -foreignCCError :: HasCallStack => a -foreignCCError = error "mismatched foreign calling convention" - -unwrapForeign :: Foreign -> a -unwrapForeign (Wrap _ e) = unsafeCoerce e - -maybeUnwrapForeign :: Reference -> Foreign -> Maybe a -maybeUnwrapForeign rt (Wrap r e) - | rt == r = Just (unsafeCoerce e) - | otherwise = Nothing - -foreign0 :: IO [Either Int Foreign] -> ForeignFunc -foreign0 e = FF $ \[] -> e - -foreign1 - :: ForeignConvention a - => (a -> IO [Either Int Foreign]) - -> ForeignFunc -foreign1 f = FF $ \case - fs | (x,[]) <- decodeForeign fs - -> f x - | otherwise -> foreignCCError - -foreign2 - :: ForeignConvention a - => ForeignConvention b - => (a -> b -> IO [Either Int Foreign]) - -> ForeignFunc -foreign2 f = FF $ \case - fs | (x,fs) <- decodeForeign fs - , (y,[]) <- decodeForeign fs - -> f x y - | otherwise -> foreignCCError - -foreign3 - :: ForeignConvention a - => ForeignConvention b - => ForeignConvention c - => (a -> b -> c -> IO [Either Int Foreign]) - -> ForeignFunc -foreign3 f = FF $ \case - fs | (x,fs) <- decodeForeign fs - , (y,fs) <- decodeForeign fs - , (z,[]) <- decodeForeign fs - -> f x y z - | otherwise -> foreignCCError - diff --git a/parser-typechecker/src/Unison/Runtime/IOSource.hs b/parser-typechecker/src/Unison/Runtime/IOSource.hs deleted file mode 100644 index 519c620bdf..0000000000 --- a/parser-typechecker/src/Unison/Runtime/IOSource.hs +++ /dev/null @@ -1,559 +0,0 @@ -{-# Language ViewPatterns #-} -{-# Language PatternSynonyms #-} -{-# LANGUAGE OverloadedStrings #-} -{-# Language QuasiQuotes #-} - -module Unison.Runtime.IOSource where - -import Unison.Prelude - -import Control.Lens (view, _1) -import Control.Monad.Identity (runIdentity, Identity) -import Data.List (elemIndex, genericIndex) -import Text.RawString.QQ (r) -import Unison.Codebase.CodeLookup (CodeLookup(..)) -import Unison.FileParsers (parseAndSynthesizeFile) -import Unison.Parser (Ann(..)) -import Unison.Symbol (Symbol) -import qualified Data.Map as Map -import qualified Unison.Builtin as Builtin -import qualified Unison.Codebase.CodeLookup as CL -import qualified Unison.DataDeclaration as DD -import qualified Unison.Parser as Parser -import qualified Unison.Reference as R -import qualified Unison.Result as Result -import qualified Unison.Typechecker.TypeLookup as TL -import qualified Unison.UnisonFile as UF -import qualified Unison.Var as Var -import qualified Unison.Names3 as Names - -typecheckedFile :: UF.TypecheckedUnisonFile Symbol Ann -typecheckedFile = typecheckedFile' - -typecheckedFile' :: forall v. Var.Var v => UF.TypecheckedUnisonFile v Ann -typecheckedFile' = let - tl :: a -> Identity (TL.TypeLookup v Ann) - tl = const $ pure (External <$ Builtin.typeLookup) - env = Parser.ParsingEnv mempty (Names.Names Builtin.names0 mempty) - r = parseAndSynthesizeFile [] tl env "" source - in case runIdentity $ Result.runResultT r of - (Nothing, notes) -> error $ "parsing failed: " <> show notes - (Just Left{}, notes) -> error $ "typechecking failed" <> show notes - (Just (Right file), _) -> file - -typecheckedFileTerms :: Map.Map Symbol R.Reference -typecheckedFileTerms = view _1 <$> UF.hashTerms typecheckedFile - -termNamed :: String -> R.Reference -termNamed s = fromMaybe (error $ "No builtin term called: " <> s) - $ Map.lookup (Var.nameds s) typecheckedFileTerms - -codeLookup :: CodeLookup Symbol Identity Ann -codeLookup = CL.fromUnisonFile $ UF.discardTypes typecheckedFile - -typeNamedId :: String -> R.Id -typeNamedId s = - case Map.lookup (Var.nameds s) (UF.dataDeclarationsId' typecheckedFile) of - Nothing -> error $ "No builtin type called: " <> s - Just (r, _) -> r - -typeNamed :: String -> R.Reference -typeNamed = R.DerivedId . typeNamedId - -abilityNamedId :: String -> R.Id -abilityNamedId s = - case Map.lookup (Var.nameds s) (UF.effectDeclarationsId' typecheckedFile) of - Nothing -> error $ "No builtin ability called: " <> s - Just (r, _) -> r - -ioHash :: R.Id -ioHash = abilityNamedId "io.IO" - -ioReference, bufferModeReference, eitherReference, ioModeReference, optionReference, errorReference, errorTypeReference, seekModeReference, threadIdReference, socketReference, handleReference, epochTimeReference, isTestReference, isPropagatedReference, filePathReference, hostNameReference, serviceNameReference - :: R.Reference -ioReference = R.DerivedId ioHash -bufferModeReference = typeNamed "io.BufferMode" -eitherReference = typeNamed "Either" -ioModeReference = typeNamed "io.Mode" -optionReference = typeNamed "Optional" -errorReference = typeNamed "io.Error" -errorTypeReference = typeNamed "io.ErrorType" -seekModeReference = typeNamed "io.SeekMode" -threadIdReference = typeNamed "io.ThreadId" -socketReference = typeNamed "io.Socket" -handleReference = typeNamed "io.Handle" -epochTimeReference = typeNamed "io.EpochTime" -isTestReference = typeNamed "IsTest" -isPropagatedReference = typeNamed "IsPropagated" -filePathReference = typeNamed "io.FilePath" -hostNameReference = typeNamed "io.HostName" -serviceNameReference = typeNamed "io.ServiceName" - -isTest :: (R.Reference, R.Reference) -isTest = (isTestReference, termNamed "metadata.isTest") - -isPropagatedValue :: R.Reference -isPropagatedValue = termNamed "metadata.isPropagated" - -eitherLeftId, eitherRightId, someId, noneId, ioErrorId, handleId, socketId, threadIdId, epochTimeId, bufferModeLineId, bufferModeBlockId, filePathId :: DD.ConstructorId -eitherLeftId = constructorNamed eitherReference "Either.Left" -eitherRightId = constructorNamed eitherReference "Either.Right" -someId = constructorNamed optionReference "Optional.Some" -noneId = constructorNamed optionReference "Optional.None" -ioErrorId = constructorNamed errorReference "io.Error.Error" -handleId = constructorNamed handleReference "io.Handle.Handle" -socketId = constructorNamed socketReference "io.Socket.Socket" -threadIdId = constructorNamed threadIdReference "io.ThreadId.ThreadId" -epochTimeId = constructorNamed epochTimeReference "io.EpochTime.EpochTime" -bufferModeLineId = constructorNamed bufferModeReference "io.BufferMode.Line" -bufferModeBlockId = constructorNamed bufferModeReference "io.BufferMode.Block" -filePathId = constructorNamed filePathReference "io.FilePath.FilePath" - -mkErrorType :: Text -> DD.ConstructorId -mkErrorType = constructorNamed errorTypeReference - -alreadyExistsId, noSuchThingId, resourceBusyId, resourceExhaustedId, eofId, illegalOperationId, permissionDeniedId, userErrorId - :: DD.ConstructorId -alreadyExistsId = mkErrorType "io.ErrorType.AlreadyExists" -noSuchThingId = mkErrorType "io.ErrorType.NoSuchThing" -resourceBusyId = mkErrorType "io.ErrorType.ResourceBusy" -resourceExhaustedId = mkErrorType "io.ErrorType.ResourceExhausted" -eofId = mkErrorType "io.ErrorType.EOF" -illegalOperationId = mkErrorType "io.ErrorType.IllegalOperation" -permissionDeniedId = mkErrorType "io.ErrorType.PermissionDenied" -userErrorId = mkErrorType "io.ErrorType.UserError" - -constructorNamed :: R.Reference -> Text -> DD.ConstructorId -constructorNamed ref name = - case runIdentity . getTypeDeclaration codeLookup $ R.unsafeId ref of - Nothing -> - error - $ "There's a bug in the Unison runtime. Couldn't find type " - <> show ref - Just decl -> - fromMaybe - ( error - $ "Unison runtime bug. The type " - <> show ref - <> " has no constructor named " - <> show name - ) - . elemIndex name - . DD.constructorNames - $ DD.asDataDecl decl - -constructorName :: R.Reference -> DD.ConstructorId -> Text -constructorName ref cid = - case runIdentity . getTypeDeclaration codeLookup $ R.unsafeId ref of - Nothing -> - error - $ "There's a bug in the Unison runtime. Couldn't find type " - <> show ref - Just decl -> genericIndex (DD.constructorNames $ DD.asDataDecl decl) cid - --- .. todo - fill in the rest of these - -source :: Text -source = fromString [r| - -type Either a b = Left a | Right b - -type Optional a = None | Some a - -unique[b28d929d0a73d2c18eac86341a3bb9399f8550c11b5f35eabb2751e6803ccc20] type - IsPropagated = IsPropagated - -d1 Doc.++ d2 = - use Doc - match (d1,d2) with - (Join ds, Join ds2) -> Join (ds Sequence.++ ds2) - (Join ds, _) -> Join (ds `Sequence.snoc` d2) - (_, Join ds) -> Join (d1 `Sequence.cons` ds) - _ -> Join [d1,d2] - -unique[q1905679b27a97a4098bc965574da880c1074183a2c55ff1d481619c7fb8a1e1] type - Author = { guid : GUID, name : Text } - -unique[ee1c051034fa0671ea66e7c708ba552003bd3cf657bd28bf0051f1f8cdfcba53] type - CopyrightHolder = { guid : GUID, name : Text} - -unique[bed6724af0d5f47f80cdea1b6023d35f120137ee0556e57154a9fc8b62fe5fed] type - License = { copyrightHolders : [CopyrightHolder] - , years : [Year] - , licenseType : LicenseType } - --- Use `Doc` here to get nice text-wrapping when viewing --- and to avoid needing to stick hard line breaks in the license -unique[d875fa1ea7ef3adf8e29417c6c8b01a1830c4c6bd10dcca9d4196388462e0b7a] type LicenseType = LicenseType Doc - -unique[cb8469a1b41a63655062226556eaccf06129a2641af61fe7edef9c485c94a870] type GUID = GUID Bytes - --- Common era years -unique[u9ae6694152966cf1b0c1f4ad901a77e1acd7bbe16595fd27b07435ac45dab05] type Year = Year Nat - --- This is linked to definitions that are considered tests -unique[e6dca08b40458b03ca1660cfbdaecaa7279b42d18257898b5fd1c34596aac36f] type - IsTest = IsTest - --- Create references for these that can be used as metadata. --- (Reminder: Metadata is references, not values.) -metadata.isTest = IsTest.IsTest -metadata.isPropagated = IsPropagated.IsPropagated - --- Handles are unique identifiers. --- The implementation of IO in the runtime will supply Haskell --- file handles and map those to Unison handles. --- A pure implementation of I/O might use some kind of pure supply --- of unique IDs instead. -unique[d4597403ec40fd4fbee57c62b8096f9c3d382dff01f20108546fe3530a927e86] type - io.Handle = Handle Text - --- Ditto for sockets -unique[e1d94401fde8b2546d6dfc54e93f11e6a9285a7ea765d3255da19122a42715d3] type - io.Socket = Socket Text - --- Builtin handles: standard in, out, error - -use io Error Mode Handle IO Socket ThreadId HostName FilePath EpochTime - BufferMode SeekMode ServiceName - -use io.Handle Handle - -namespace io where - stdin : Handle - stdin = Handle "stdin" - - stdout : Handle - stdout = Handle "stdout" - - stderr : Handle - stderr = Handle "stderr" - - -- Throw an I/O error on the left as an effect in `IO` - rethrow : (Either io.Error a) -> {IO} a - rethrow x = match x with - Either.Left e -> io.IO.throw e - Either.Right a -> a - - -- Print a line to the standard output - printLine : Text ->{IO} () - printLine t = - putText stdout t - putText stdout "\n" - - -- Read a line from the standard input - readLine : '{IO} Text - readLine = '(getLine stdin) - - -- Built-ins - - -- Open a named file in the given mode, yielding an open file handle - openFile : FilePath -> Mode ->{IO} Handle - openFile f m = rethrow (io.IO.openFile_ f m) - - -- Close an open file handle - closeFile : Handle ->{IO} () - closeFile f = rethrow (io.IO.closeFile_ f) - - -- Check whether a file handle has reached the end of the file - isFileEOF : Handle ->{IO} Boolean - isFileEOF h = rethrow (io.IO.isFileEOF_ h) - - -- Check whether a file handle is open - isFileOpen : Handle ->{IO} Boolean - isFileOpen h = rethrow (io.IO.isFileOpen_ h) - - -- Get a line of text from a text file handle - getLine : Handle ->{IO} Text - getLine h = rethrow (io.IO.getLine_ h) - - -- Get the entire contents of a file as a single block of text - getText : Handle ->{IO} Text - getText h = rethrow (io.IO.getText_ h) - - -- Write some text to a file - putText : Handle -> Text ->{IO} () - putText h t = rethrow (io.IO.putText_ h t) - - -- Get epoch system time - systemTime : '{IO} EpochTime - systemTime = '(rethrow (io.IO.systemTime_)) - - -- Does the file handle support `seek`? - isSeekable : Handle -> {IO} Boolean - isSeekable h = rethrow (io.IO.isSeekable_ h) - - -- Seek to a position in a file handle - seek : Handle -> SeekMode -> Int ->{IO} () - seek h m i = rethrow (io.IO.seek_ h m i) - - -- Ask for the position of a file handle - position : Handle ->{IO} Int - position h = rethrow (io.IO.position_ h) - - -- Get the buffer mode of a file handle - getBuffering : Handle ->{IO} (Optional BufferMode) - getBuffering h = rethrow (io.IO.getBuffering_ h) - - -- Set the buffer mode for a file handle - setBuffering : Handle -> Optional BufferMode ->{IO} () - setBuffering h bm = rethrow (io.IO.setBuffering_ h bm) - - -- Get the path to a temporary directory managed by the operating system - getTemporaryDirectory : '{IO} FilePath - getTemporaryDirectory = '(rethrow (io.IO.getTemporaryDirectory_)) - - -- Get the current working directory - getCurrentDirectory : '{IO} FilePath - getCurrentDirectory = '(rethrow (io.IO.getCurrentDirectory_)) - - -- Set the current working directory - setCurrentDirectory : FilePath -> {IO} () - setCurrentDirectory d = rethrow (io.IO.setCurrentDirectory_ d) - - -- List the contents of a directory - directoryContents : FilePath -> {IO} [FilePath] - directoryContents d = rethrow (io.IO.directoryContents_ d) - - -- Check if a path exists - fileExists : FilePath -> {IO} Boolean - fileExists d = rethrow (io.IO.fileExists_ d) - - -- Check if a path is a directory - isDirectory : FilePath -> {IO} Boolean - isDirectory d = rethrow (io.IO.isDirectory_ d) - - -- Create a directory at the given path, including parent directories - createDirectory : FilePath -> {IO} () - createDirectory d = rethrow (io.IO.createDirectory_ d) - - -- Remove the directory at the given path - removeDirectory : FilePath -> {IO} () - removeDirectory d = rethrow (io.IO.removeDirectory_ d) - - -- Move a directory from one path to another - renameDirectory : FilePath -> FilePath -> {IO} () - renameDirectory from to = rethrow (io.IO.renameDirectory_ from to) - - -- Remove a file from the file system - removeFile : FilePath -> {IO} () - removeFile d = rethrow (io.IO.removeFile_ d) - - -- Move a file from one path to another - renameFile : FilePath -> FilePath -> {IO} () - renameFile from to = rethrow (io.IO.renameFile_ from to) - - -- Get the timestamp of a file - getFileTimestamp : FilePath -> {IO} EpochTime - getFileTimestamp d = rethrow (io.IO.getFileTimestamp_ d) - - -- Get the size of a file in bytes - getFileSize : FilePath -> {IO} Nat - getFileSize d = rethrow (io.IO.getFileSize_ d) - - -- Create a socket bound to the given local port/service. - -- If a hostname is not given, this will use any available host. - serverSocket : Optional HostName -> ServiceName -> {IO} Socket - serverSocket host service = rethrow (io.IO.serverSocket_ host service) - - -- Start listening for connections on the given socket. - listen : Socket -> {IO} () - listen s = rethrow (io.IO.listen_ s) - - -- Create a socket connected to the given remote address. - clientSocket : HostName -> ServiceName -> {IO} Socket - clientSocket host service = rethrow (io.IO.clientSocket_ host service) - - -- Close a socket and all connections to it. - closeSocket : Socket -> {IO} () - closeSocket s = rethrow (io.IO.closeSocket_ s) - - -- Accept a connection on a socket. - -- Returns a socket that can send and receive data on a new connection - accept : Socket -> {IO} Socket - accept s = rethrow (io.IO.accept_ s) - - -- Send some bytes to a socket. - send : Socket -> Bytes -> {IO} () - send s bs = rethrow (io.IO.send_ s bs) - - -- Read the specified number of bytes from a socket. - receive : Socket -> Nat ->{IO} (Optional Bytes) - receive s n = rethrow (io.IO.receive_ s n) - - -- Fork a new thread. - fork : '{IO} a -> {IO} ThreadId - fork a = rethrow (io.IO.fork_ a) - - -- Kill a running thread. - kill : ThreadId -> {IO} () - kill t = rethrow (io.IO.kill_ t) - - -- Suspend the current thread for a number of microseconds. - delay : Nat -> {IO} () - delay n = rethrow (io.IO.delay_ n) - - -- Safely acquire and release a resource - bracket : '{IO} a -> (a ->{IO} b) -> (a ->{IO} c) -> {IO} c - bracket acquire release what = rethrow (io.IO.bracket_ acquire release what) - - -- Run the given computation, and if it throws an error - -- handle the error with the given handler. - -- catch : '{IO} a -> (io.Error ->{IO} a) ->{IO} a - -- catch c h = - -- k io = match io with - -- { IO.throw e } -> h e - -- x -> x - -- handle k in c - --- IO Modes from the Haskell API -type io.Mode = Read | Write | Append | ReadWrite - --- IO error types from the Haskell API -unique[bb57f367a3740d4a1608b9e0eee14fd744ec9e368f1529550cb436ef56c0b268] type - io.ErrorType - = AlreadyExists - | NoSuchThing - | ResourceBusy - | ResourceExhausted - | EOF - | IllegalOperation - | PermissionDenied - | UserError - -unique[b5c578f0a9977ed54a5a12b580dc6b0b2ba37bc3f517f48d1b3285a7f3e8c6bc] type - io.ErrorLocation = ErrorLocation Text -unique[e6ca048b6bf540f93617c0ef9506afcbb490427a9581a01d51ffad39cdf2c554] type - io.ErrorDescription = ErrorDescription Text -unique[d5d61b0a65f1d448dbdeed8af688f0bdbab6b3f775400da370eb5bfc34e428d5] type - io.FilePath = FilePath Text - -type io.Error = Error io.ErrorType Text - -unique[cad7ab802bd143f0b674155c9caf18dde7145d16867a02659534d7bb01a5e287] type - io.SeekMode = Absolute | Relative | FromEnd - --- If the buffer size is not specified, --- use an implementation-specific size. -unique[e65de145a461a771de93d6c7885acae28552d77f8ae460bc8bf5de6f2a15ff77] type - io.BufferMode = Line | Block (Optional Nat) - -unique[e1f48f31982a720ae895c0bf4e6ea9a950f5c00d3a73101ad31e63461b7beded] type - io.EpochTime = EpochTime Nat - --- Either a host name e.g., "unisonweb.org" or a numeric host address --- string consisting of a dotted decimal IPv4 address --- e.g., "192.168.0.1". -unique[c7279b501764751edc66f1f7b532e68354fc4704c9eb1ed201f01c894cdd86f4] type - io.HostName = HostName Text - --- For example a port number like "8080" -unique[ee4ff0bda526b0513e4c7b7387b39811ce57938ddb31a77fdb0ff00ee2717c33] type - io.ServiceName = ServiceName Text - -unique[a38186de35c9fcd29d2b359b2148f9f890732413d91575af39d025fcded67e89] type - io.ThreadId = ThreadId Text - -ability io.IO where - - -- Basic file IO - openFile_ : io.FilePath -> io.Mode -> (Either io.Error io.Handle) - closeFile_ : io.Handle -> (Either io.Error ()) - isFileEOF_ : io.Handle -> (Either io.Error Boolean) - isFileOpen_ : io.Handle -> (Either io.Error Boolean) - - -- Text input and output - - --getChar : io.Handle -> Char - getLine_ : io.Handle -> (Either io.Error Text) - -- Get the entire contents of the file as text - getText_ : io.Handle -> (Either io.Error Text) - -- putChar : io.Handle -> Char -> () - putText_ : io.Handle -> Text -> (Either io.Error ()) - - -- Throw an error as an `io.IO` effect - throw : io.Error -> a - - -- File positioning - isSeekable_ : io.Handle -> (Either io.Error Boolean) - seek_ : io.Handle -> io.SeekMode -> Int -> (Either io.Error ()) - position_ : io.Handle -> (Either io.Error Int) - - -- File buffering - getBuffering_ : io.Handle -> Either io.Error (Optional io.BufferMode) - setBuffering_ : io.Handle -> Optional io.BufferMode -> (Either io.Error ()) - - -- Should we expose mutable arrays for byte buffering? - -- Inclined to say no, although that sounds a lot like - -- a decision to just be slow. - -- We'll need a byte buffer manipulation library in that case. - - -- getBytes : io.Handle -> Nat -> Bytes - -- putBytes : io.Handle -> Bytes -> () - - -- getBytes : io.Handle -> Nat -> ByteArray -> Nat - -- putBytes : io.Handle -> Nat -> ByteArray -> () - - systemTime_ : (Either io.Error io.EpochTime) - - -- File system operations - getTemporaryDirectory_ : (Either io.Error io.FilePath) - getCurrentDirectory_ : (Either io.Error io.FilePath) - setCurrentDirectory_ : io.FilePath -> (Either io.Error ()) - directoryContents_ : io.FilePath -> Either io.Error [io.FilePath] - fileExists_ : io.FilePath -> (Either io.Error Boolean) - isDirectory_ : io.FilePath -> (Either io.Error Boolean) - createDirectory_ : io.FilePath -> (Either io.Error ()) - removeDirectory_ : io.FilePath -> (Either io.Error ()) - renameDirectory_ : io.FilePath -> io.FilePath -> (Either io.Error ()) - removeFile_ : io.FilePath -> (Either io.Error ()) - renameFile_ : io.FilePath -> io.FilePath -> (Either io.Error ()) - getFileTimestamp_ : io.FilePath -> (Either io.Error io.EpochTime) - getFileSize_ : io.FilePath -> (Either io.Error Nat) - - -- Simple TCP Networking - - -- Create a socket bound to the given local address. - -- If a hostname is not given, this will use any available host. - serverSocket_ : Optional io.HostName -> - io.ServiceName -> (Either io.Error io.Socket) - -- Start listening for connections - listen_ : io.Socket -> (Either io.Error ()) - - -- Create a socket connected to the given remote address - clientSocket_ : io.HostName -> - io.ServiceName -> (Either io.Error io.Socket) - - closeSocket_ : io.Socket -> (Either io.Error ()) - - --socketToio.Handle : Socket -> Mode -> (Either io.Error io.Handle) - --handleToSocket : io.Handle -> (Either io.Error Socket) - - -- Accept a connection on a socket. - -- Returns a socket that can send and receive data on a new connection - accept_ : io.Socket -> (Either io.Error io.Socket) - - -- Send some bytes to a socket. - send_ : io.Socket -> Bytes -> (Either io.Error ()) - - -- Read the spefified number of bytes from the socket. - receive_ : io.Socket -> Nat -> (Either io.Error (Optional Bytes)) - - -- scatter/gather mode network I/O - -- sendMany : Socket -> [Bytes] -> Int - - -- Threading -- - - -- Fork a thread - fork_ : '{io.IO} a -> (Either io.Error io.ThreadId) - - -- Kill a running thread - kill_ : io.ThreadId -> (Either io.Error ()) - - -- Suspend the current thread for a number of microseconds. - delay_ : Nat -> (Either io.Error ()) - - -- Safely acquire and release a resource - bracket_ : '{io.IO} a -> (a ->{io.IO} b) -> (a ->{io.IO} c) ->{io.IO} (Either io.Error c) - -|] diff --git a/parser-typechecker/src/Unison/Runtime/IR.hs b/parser-typechecker/src/Unison/Runtime/IR.hs deleted file mode 100644 index b24d804bb5..0000000000 --- a/parser-typechecker/src/Unison/Runtime/IR.hs +++ /dev/null @@ -1,1196 +0,0 @@ -{-# Language DeriveFoldable #-} -{-# Language DeriveTraversable #-} -{-# Language OverloadedStrings #-} -{-# Language PartialTypeSignatures #-} -{-# Language StrictData #-} -{-# Language ViewPatterns #-} -{-# Language PatternSynonyms #-} - -module Unison.Runtime.IR where - -import Unison.Prelude - -import Control.Monad.State.Strict (StateT, gets, modify, runStateT) -import Data.Bifunctor (first, second) -import Data.IORef -import Unison.Hash (Hash) -import Unison.NamePrinter (prettyHashQualified0) -import Unison.Referent (Referent) -import Unison.Symbol (Symbol) -import Unison.Util.CyclicEq (CyclicEq, cyclicEq) -import Unison.Util.CyclicOrd (CyclicOrd, cyclicOrd) -import Unison.Util.Monoid (intercalateMap) -import Unison.Var (Var) -import qualified Data.Map as Map -import qualified Data.Sequence as Sequence -import qualified Data.Set as Set -import qualified Unison.ABT as ABT -import qualified Unison.Builtin.Decls as DD -import qualified Unison.PatternCompat as Pattern -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.Reference as R -import qualified Unison.Runtime.ANF as ANF -import qualified Unison.Term as Term -import qualified Unison.TermPrinter as TP -import qualified Unison.Util.Bytes as Bytes -import qualified Unison.Util.ColorText as CT -import qualified Unison.Util.CycleTable as CyT -import qualified Unison.Util.CyclicOrd as COrd -import qualified Unison.Util.Pretty as P -import qualified Unison.Var as Var - -type Pos = Int -type Arity = Int -type ConstructorId = Int -type Term v = Term.Term v () - -data CompilationEnv e cont - = CompilationEnv { toIR' :: Map R.Reference (IR e cont) - , constructorArity' :: Map (R.Reference, Int) Int } - -toIR :: CompilationEnv e cont -> R.Reference -> Maybe (IR e cont) -toIR = flip Map.lookup . toIR' - -constructorArity :: CompilationEnv e cont -> R.Reference -> Int -> Maybe Int -constructorArity e r i = Map.lookup (r,i) $ constructorArity' e - --- SymbolC = Should this variable be compiled as a LazySlot? -data SymbolC = - SymbolC { isLazy :: Bool - , underlyingSymbol :: Symbol - }-- deriving Show -instance Show SymbolC where - show (SymbolC lazy s) = (if lazy then "'" else "") <> show s - -makeLazy :: SymbolC -> SymbolC -makeLazy s = s { isLazy = True } - -toSymbolC :: Symbol -> SymbolC -toSymbolC = SymbolC False - --- Values, in normal form -type RefID = Int - -data Value e cont - = I Int64 | F Double | N Word64 | B Bool | T Text | C Char | Bs Bytes.Bytes - | TermLink Referent - | TypeLink R.Reference - | Lam Arity (UnderapplyStrategy e cont) (IR e cont) - | Data R.Reference ConstructorId [Value e cont] - | Sequence (Sequence.Seq (Value e cont)) - | Ref RefID Symbol (IORef (Value e cont)) - | Pure (Value e cont) - | Requested (Req e cont) - | Cont cont - | UninitializedLetRecSlot Symbol [(Symbol, IR e cont)] (IR e cont) - -instance (Eq cont, Eq e) => Eq (Value e cont) where - I x == I y = x == y - F x == F y = x == y - N x == N y = x == y - B x == B y = x == y - T x == T y = x == y - C x == C y = x == y - Bs x == Bs y = x == y - Lam n us _ == Lam n2 us2 _ = n == n2 && us == us2 - Data r1 cid1 vs1 == Data r2 cid2 vs2 = r1 == r2 && cid1 == cid2 && vs1 == vs2 - Sequence vs == Sequence vs2 = vs == vs2 - Ref _ _ io1 == Ref _ _ io2 = io1 == io2 - Pure x == Pure y = x == y - Requested r1 == Requested r2 = r1 == r2 - Cont k1 == Cont k2 = k1 == k2 - TermLink r1 == TermLink r2 = r1 == r2 - TypeLink r1 == TypeLink r2 = r1 == r2 - _ == _ = False - -instance (Eq cont, Eq e) => Eq (UnderapplyStrategy e cont) where - FormClosure h _ vs == FormClosure h2 _ vs2 = h == h2 && vs == vs2 - Specialize h _ vs == Specialize h2 _ vs2 = h == h2 && vs == vs2 - _ == _ = False - --- would have preferred to make pattern synonyms -maybeToOptional :: Maybe (Value e cont) -> Value e cont -maybeToOptional = \case - Just a -> Data DD.optionalRef 1 [a] - Nothing -> Data DD.optionalRef 0 [] - -unit :: Value e cont -unit = Data DD.unitRef 0 [] - -pair :: (Value e cont, Value e cont) -> Value e cont -pair (a, b) = Data DD.pairRef 0 [a, b] - -tuple :: [Value e cont] -> Value e cont -tuple [v] = v -tuple vs = foldr (curry pair) unit vs - --- When a lambda is underapplied, for instance, `(x y -> x) 19`, we can do --- one of two things: we can substitute away the arguments that have --- been applied, in this example, creating the lambda `x -> 19`. This --- is called specialization and requires recompiling the lambda with its new --- body. --- --- The other option is to just stash the arguments until the rest of the --- args are supplied later. This keeps the original lambda around and --- doesn't involve recompiling. This would just create the closure --- `((x y -> x) 19)`, which when given one more arg, would call the original --- `x y -> x` function with both arguments. --- --- Specialization can be done for any Unison term definition, like --- --- blah x y = x + y --- --- Closure formation is used for: --- --- * builtin functions --- * constructor functions --- --- The reason is that builtins and constructor functions don't have a body --- with variables that we could substitute - the functions only compute --- to anything when all the arguments are available. - -data UnderapplyStrategy e cont - = FormClosure Hash (Term SymbolC) [Value e cont] -- head is the latest argument - | Specialize Hash (Term SymbolC) [(SymbolC, Value e cont)] -- same - deriving (Show) - -decompileUnderapplied :: (External e, External cont) => UnderapplyStrategy e cont -> DS (Term Symbol) -decompileUnderapplied u = case u of -- todo: consider unlambda-lifting here - FormClosure _ lam vals -> - Term.apps' (Term.vmap underlyingSymbol lam) . reverse <$> - traverse decompileImpl vals - Specialize _ lam symvals -> do - lam <- Term.apps' (Term.vmap underlyingSymbol lam) . reverse <$> - traverse (decompileImpl . snd) symvals - pure $ Term.betaReduce lam - --- Patterns - for now this follows Unison.Pattern exactly, but --- we may switch to more efficient runtime representation of patterns -data Pattern - = PatternI Int64 | PatternF Double | PatternN Word64 | PatternB Bool | PatternT Text | PatternC Char - | PatternData R.Reference ConstructorId [Pattern] - | PatternSequenceLiteral [Pattern] - | PatternSequenceCons Pattern Pattern - | PatternSequenceSnoc Pattern Pattern - -- `Either Int Int` here represents the known constant length of either - -- the left or right side of a sequence concat operation - | PatternSequenceConcat (Either Int Int) Pattern Pattern - | PatternPure Pattern - | PatternBind R.Reference ConstructorId [Pattern] Pattern - | PatternAs Pattern - | PatternIgnore - | PatternVar deriving (Eq,Show) - --- Leaf level instructions - these return immediately without using any stack -data Z e cont - = Slot Pos - | LazySlot Pos - | Val (Value e cont) - | External e - deriving (Eq) - --- The `Set Int` is the set of de bruijn indices that are free in the body --- of `Let` instructions. -type IR e cont = IR' (Set Int) (Z e cont) - --- Computations - evaluation reduces these to values -data IR' ann z - = Leaf z - -- Ints - | AddI z z | SubI z z | MultI z z | DivI z z - | GtI z z | LtI z z | GtEqI z z | LtEqI z z | EqI z z - | SignumI z | NegateI z | Truncate0I z | ModI z z - | PowI z z | ShiftLI z z | ShiftRI z z | BitAndI z z - | BitOrI z z | BitXorI z z | ComplementI z | LeadZeroI z - | TrailZeroI z - -- Nats - | AddN z z | DropN z z | SubN z z | MultN z z | DivN z z - | GtN z z | LtN z z | GtEqN z z | LtEqN z z | EqN z z - | ModN z z | ToIntN z | PowN z z | ShiftLN z z | ShiftRN z z - | BitOrN z z | BitXorN z z | BitAndN z z | ComplementN z - | LeadZeroN z | TrailZeroN z - -- Floats - | AddF z z | SubF z z | MultF z z | DivF z z - | GtF z z | LtF z z | GtEqF z z | LtEqF z z | EqF z z - -- Universals - | EqU z z -- universal equality - | CompareU z z -- universal ordering - -- Debugging/Utilities - | Todo z - | Bug z - -- Control flow - - -- `Let` has an `ann` associated with it, e.g `ann = Set Int` which is the - -- set of "free" stack slots referenced by the body of the `let` - | Let Symbol (IR' ann z) (IR' ann z) ann - | LetRec [(Symbol, IR' ann z)] (IR' ann z) - | MakeSequence [z] - | Apply (IR' ann z) [z] - | Construct R.Reference ConstructorId [z] - | Request R.Reference ConstructorId [z] - | Handle z (IR' ann z) - | If z (IR' ann z) (IR' ann z) - | And z (IR' ann z) - | Or z (IR' ann z) - | Not z - -- pattern, optional guard, rhs - | Match z [(Pattern, [Symbol], Maybe (IR' ann z), IR' ann z)] - deriving (Functor,Foldable,Traversable,Eq,Show) - -prettyZ :: PPE.PrettyPrintEnv - -> (e -> P.Pretty String) - -> (cont -> P.Pretty String) - -> Z e cont - -> P.Pretty String -prettyZ ppe prettyE prettyCont z = case z of - Slot i -> "@" <> P.shown i - LazySlot i -> "'@" <> P.shown i - Val v -> prettyValue ppe prettyE prettyCont v - External e -> "External" `P.hang` prettyE e - -prettyIR :: PPE.PrettyPrintEnv - -> (e -> P.Pretty String) - -> (cont -> P.Pretty String) - -> IR e cont - -> P.Pretty String -prettyIR ppe prettyE prettyCont = pir - where - unlets (Let s hd tl _) = (Just s, hd) : unlets tl - unlets e = [(Nothing, e)] - pz = prettyZ ppe prettyE prettyCont - pir ir = case ir of - Leaf z -> pz z - AddI a b -> P.parenthesize $ "AddI" `P.hang` P.spaced [pz a, pz b] - SubI a b -> P.parenthesize $ "SubI" `P.hang` P.spaced [pz a, pz b] - MultI a b -> P.parenthesize $ "MultI" `P.hang` P.spaced [pz a, pz b] - DivI a b -> P.parenthesize $ "DivI" `P.hang` P.spaced [pz a, pz b] - GtI a b -> P.parenthesize $ "GtI" `P.hang` P.spaced [pz a, pz b] - LtI a b -> P.parenthesize $ "LtI" `P.hang` P.spaced [pz a, pz b] - GtEqI a b -> P.parenthesize $ "GtEqI" `P.hang` P.spaced [pz a, pz b] - LtEqI a b -> P.parenthesize $ "LtEqI" `P.hang` P.spaced [pz a, pz b] - EqI a b -> P.parenthesize $ "EqI" `P.hang` P.spaced [pz a, pz b] - SignumI a -> P.parenthesize $ "SignumI" `P.hang` P.spaced [pz a] - NegateI a -> P.parenthesize $ "NegateI" `P.hang` P.spaced [pz a] - Truncate0I a -> P.parenthesize $ "Truncate0I" `P.hang` P.spaced [pz a] - ModI a b -> P.parenthesize $ "ModI" `P.hang` P.spaced [pz a, pz b] - PowI a b -> P.parenthesize $ "PowI" `P.hang` P.spaced [pz a, pz b] - ShiftRI a b -> P.parenthesize $ "ShiftRI" `P.hang` P.spaced [pz a, pz b] - ShiftLI a b -> P.parenthesize $ "ShiftLI" `P.hang` P.spaced [pz a, pz b] - BitAndI a b -> P.parenthesize $ "BitAndI" `P.hang` P.spaced [pz a, pz b] - BitOrI a b -> P.parenthesize $ "BitOrI" `P.hang` P.spaced [pz a, pz b] - BitXorI a b -> P.parenthesize $ "BitXorI" `P.hang` P.spaced [pz a, pz b] - ComplementI a -> P.parenthesize $ "ComplementI" `P.hang` P.spaced [pz a] - LeadZeroI a -> P.parenthesize $ "LeadZeroI" `P.hang` P.spaced [pz a] - TrailZeroI a -> P.parenthesize $ "TrailZeroI" `P.hang` P.spaced [pz a] - - AddN a b -> P.parenthesize $ "AddN" `P.hang` P.spaced [pz a, pz b] - SubN a b -> P.parenthesize $ "SubN" `P.hang` P.spaced [pz a, pz b] - DropN a b -> P.parenthesize $ "DropN" `P.hang` P.spaced [pz a, pz b] - MultN a b -> P.parenthesize $ "MultN" `P.hang` P.spaced [pz a, pz b] - DivN a b -> P.parenthesize $ "DivN" `P.hang` P.spaced [pz a, pz b] - GtN a b -> P.parenthesize $ "GtN" `P.hang` P.spaced [pz a, pz b] - LtN a b -> P.parenthesize $ "LtN" `P.hang` P.spaced [pz a, pz b] - GtEqN a b -> P.parenthesize $ "GtEqN" `P.hang` P.spaced [pz a, pz b] - LtEqN a b -> P.parenthesize $ "LtEqN" `P.hang` P.spaced [pz a, pz b] - EqN a b -> P.parenthesize $ "EqN" `P.hang` P.spaced [pz a, pz b] - ModN a b -> P.parenthesize $ "ModN" `P.hang` P.spaced [pz a, pz b] - ToIntN a -> P.parenthesize $ "ToIntN" `P.hang` P.spaced [pz a] - PowN a b -> P.parenthesize $ "PowN" `P.hang` P.spaced [pz a, pz b] - ShiftLN a b -> P.parenthesize $ "ShiftLN" `P.hang` P.spaced [pz a, pz b] - ShiftRN a b -> P.parenthesize $ "ShiftRN" `P.hang` P.spaced [pz a, pz b] - BitAndN a b -> P.parenthesize $ "BitAndN" `P.hang` P.spaced [pz a, pz b] - BitOrN a b -> P.parenthesize $ "BitOrN" `P.hang` P.spaced [pz a, pz b] - BitXorN a b -> P.parenthesize $ "BitXorN" `P.hang` P.spaced [pz a, pz b] - ComplementN a -> P.parenthesize $ "ComplementN" `P.hang` P.spaced [pz a] - LeadZeroN a -> P.parenthesize $ "LeadZeroN" `P.hang` P.spaced [pz a] - TrailZeroN a -> P.parenthesize $ "TrailZeroN" `P.hang` P.spaced [pz a] - - AddF a b -> P.parenthesize $ "AddF" `P.hang` P.spaced [pz a, pz b] - SubF a b -> P.parenthesize $ "SubF" `P.hang` P.spaced [pz a, pz b] - MultF a b -> P.parenthesize $ "MultF" `P.hang` P.spaced [pz a, pz b] - DivF a b -> P.parenthesize $ "DivF" `P.hang` P.spaced [pz a, pz b] - GtF a b -> P.parenthesize $ "GtF" `P.hang` P.spaced [pz a, pz b] - LtF a b -> P.parenthesize $ "LtF" `P.hang` P.spaced [pz a, pz b] - GtEqF a b -> P.parenthesize $ "GtEqF" `P.hang` P.spaced [pz a, pz b] - LtEqF a b -> P.parenthesize $ "LtEqF" `P.hang` P.spaced [pz a, pz b] - EqF a b -> P.parenthesize $ "EqF" `P.hang` P.spaced [pz a, pz b] - EqU a b -> P.parenthesize $ "EqU" `P.hang` P.spaced [pz a, pz b] - CompareU a b -> P.parenthesize $ "CompareU" `P.hang` P.spaced [pz a, pz b] - Bug a -> P.parenthesize $ "Bug" `P.hang` P.spaced [pz a] - Todo a -> P.parenthesize $ "Todo" `P.hang` P.spaced [pz a] - ir@Let{} -> - P.group $ "let" `P.hang` P.lines (blockElem <$> block) - where - block = unlets ir - blockElem (Nothing, binding) = pir binding - blockElem (Just name, binding) = - (P.shown name <> " =") `P.hang` pir binding - LetRec bs body -> P.group $ "letrec" `P.hang` P.lines ls - where - blockElem (Nothing, binding) = pir binding - blockElem (Just name, binding) = - (P.shown name <> " =") `P.hang` pir binding - ls = fmap blockElem $ [ (Just n, ir) | (n,ir) <- bs ] - ++ [(Nothing, body)] - MakeSequence vs -> P.group $ - P.surroundCommas "[" "]" (pz <$> vs) - Apply fn args -> P.parenthesize $ pir fn `P.hang` P.spaced (pz <$> args) - Construct r cid args -> P.parenthesize $ - ("Construct " <> prettyHashQualified0 (PPE.patternName ppe r cid)) - `P.hang` - P.surroundCommas "[" "]" (pz <$> args) - Request r cid args -> P.parenthesize $ - ("Request " <> prettyHashQualified0 (PPE.patternName ppe r cid)) - `P.hang` - P.surroundCommas "[" "]" (pz <$> args) - Handle h body -> P.parenthesize $ - P.group ("Handle " <> pz h) `P.hang` pir body - If cond t f -> P.parenthesize $ - ("If " <> pz cond) `P.hang` P.spaced [pir t, pir f] - And x y -> P.parenthesize $ "And" `P.hang` P.spaced [pz x, pir y] - Or x y -> P.parenthesize $ "Or" `P.hang` P.spaced [pz x, pir y] - Not x -> P.parenthesize $ "Not" `P.hang` pz x - Match scrute cases -> P.parenthesize $ - P.group ("Match " <> pz scrute) `P.hang` P.lines (pcase <$> cases) - where - pcase (pat, vs, guard, rhs) = let - lhs = P.spaced . P.nonEmpty $ - [ P.parenthesize (P.shown pat), P.shown vs, maybe mempty pir guard ] - in (lhs <> " ->" `P.hang` pir rhs) - -prettyValue :: PPE.PrettyPrintEnv - -> (e -> P.Pretty String) - -> (cont -> P.Pretty String) - -> Value e cont - -> P.Pretty String -prettyValue ppe prettyE prettyCont = pv - where - pv v = case v of - I i -> (if i >= 0 then "+" else "" ) <> P.string (show i) - F d -> P.shown d - N n -> P.shown n - B b -> if b then "true" else "false" - T t -> P.shown t - C c -> P.shown c - Bs bs -> P.shown bs - TermLink r -> P.parenthesize $ - ("TermLink " <> prettyHashQualified0 (PPE.termName ppe r)) - TypeLink r -> P.parenthesize $ - ("TypeLink " <> prettyHashQualified0 (PPE.typeName ppe r)) - Lam arity _u b -> P.parenthesize $ - ("Lambda " <> P.string (show arity)) `P.hang` - prettyIR ppe prettyE prettyCont b - Data r cid vs -> P.parenthesize $ - ("Data " <> prettyHashQualified0 (PPE.patternName ppe r cid)) `P.hang` - P.surroundCommas "[" "]" (pv <$> vs) - Sequence vs -> P.surroundCommas "[" "]" (pv <$> vs) - Ref id name _ -> P.parenthesize $ - P.sep " " ["Ref", P.shown id, P.shown name] - Pure v -> P.surroundCommas "{" "}" [pv v] - Requested (Req r cid vs cont) -> P.parenthesize $ - ("Request " <> prettyHashQualified0 (PPE.patternName ppe r cid)) - `P.hang` - P.spaced [ - P.surroundCommas "[" "]" (pv <$> vs), - prettyCont cont - ] - Cont k -> P.parenthesize $ "Cont" `P.hang` prettyCont k - UninitializedLetRecSlot s _ _ -> P.parenthesize $ - "Uninitialized " <> P.shown s - --- Contains the effect ref and ctor id, the args, and the continuation --- which expects the result at the top of the stack -data Req e cont = Req R.Reference ConstructorId [Value e cont] cont - deriving (Eq,Show) - --- Annotate all `z` values with the number of outer bindings, useful for --- tracking free variables or converting away from debruijn indexing. --- Currently used as an implementation detail by `specializeIR`. -annotateDepth :: IR' a z -> IR' a (z, Int) -annotateDepth = go 0 where - go depth ir = case ir of - -- Only the binders modify the depth - Let v b body ann -> Let v (go depth b) (go (depth + 1) body) ann - LetRec bs body -> let - depth' = depth + length bs - in LetRec (second (go depth') <$> bs) (go depth' body) - Match scrute cases -> Match (scrute, depth) (tweak <$> cases) where - tweak (pat, boundVars, guard, rhs) = let - depth' = depth + length boundVars - in (pat, boundVars, go depth' <$> guard, go depth' rhs) - -- All the other cases just leave depth alone and recurse - Apply f args -> Apply (go depth f) ((,depth) <$> args) - Handle f body -> Handle (f,depth) (go depth body) - If c a b -> If (c,depth) (go depth a) (go depth b) - And a b -> And (a,depth) (go depth b) - Or a b -> Or (a,depth) (go depth b) - ir -> (,depth) <$> ir - --- Given an environment mapping of de bruijn indices to values, specialize --- the given `IR` by replacing slot lookups with the provided values. -specializeIR :: Map Int (Value e cont) -> IR' a (Z e cont) -> IR' a (Z e cont) -specializeIR env ir = let - ir' = annotateDepth ir - go (s@(Slot i), depth) = maybe s Val $ Map.lookup (i - depth) env - go (s@(LazySlot i), depth) = maybe s Val $ Map.lookup (i - depth) env - go (s,_) = s - in go <$> ir' - -compile :: (Show e, Show cont) => CompilationEnv e cont -> Term Symbol -> IR e cont -compile env t = compile0 env [] - (ABT.rewriteDown ANF.minimizeCyclesOrCrash $ Term.vmap toSymbolC t) - -freeVars :: [(SymbolC,a)] -> Term SymbolC -> Set SymbolC -freeVars bound t = - -- let fv = trace "free:" . traceShowId $ ABT.freeVars t - -- bv = trace "bound:" . traceShowId $ Set.fromList (fst <$> bound) - -- in trace "difference:" . traceShowId $ fv `Set.difference` bv - ABT.freeVars t `Set.difference` Set.fromList (fst <$> bound) - --- Main compilation function - converts an arbitrary term to an `IR`. --- Takes a way of resolving `Reference`s and an environment of variables, --- some of which may already be precompiled to `V`s. (This occurs when --- recompiling a function that is being partially applied) -compile0 - :: (Show e, Show cont) - => CompilationEnv e cont - -> [(SymbolC, Maybe (Value e cont))] - -> Term SymbolC - -> IR e cont -compile0 env bound t = - if Set.null fvs then - -- Annotates the term with this [(SymbolC, Maybe (Value e))] - -- where a `Just v` indicates an immediate value, and `Nothing` indicates - -- a stack lookup is needed at the stack index equal to the symbol's index. - -- ABT.annotateBound' produces an initial annotation consisting of the a - -- stack of bound variables, with the innermost bound variable at the top. - -- We tag each of these with `Nothing`, and then tack on the immediates at - -- the end. Their indices don't correspond to stack positions (although - -- they may reflect shadowing). - let wrangle vars = ((,Nothing) <$> vars) ++ bound - t0 = ANF.fromTerm' makeLazy t - _msg = "ANF form:\n" <> - TP.pretty' (Just 80) mempty t0 <> - "\n---------" - in go (wrangle <$> ABT.annotateBound' t0) - else - error $ "can't compile a term with free variables: " ++ show (toList fvs) - where - fvs = freeVars bound t - go t = case t of - Term.Nat' n -> Leaf . Val . N $ n - Term.Int' n -> Leaf . Val . I $ n - Term.Float' n -> Leaf . Val . F $ n - Term.Boolean' n -> Leaf . Val . B $ n - Term.Text' n -> Leaf . Val . T $ n - Term.Char' n -> Leaf . Val . C $ n - Term.TermLink' r -> Leaf . Val . TermLink $ r - Term.TypeLink' r -> Leaf . Val . TypeLink $ r - Term.And' x y -> And (toZ "and" t x) (go y) - Term.LamsNamed' vs body -> Leaf . Val $ - Lam (length vs) - (Specialize (ABT.hash t) (void t) []) - (compile0 env (ABT.annotation body) (void body)) - Term.Or' x y -> Or (toZ "or" t x) (go y) - Term.Let1Named' v b body -> Let (underlyingSymbol v) (go b) (go body) (freeSlots body) - Term.LetRecNamed' bs body -> - LetRec ((\(v,b) -> (underlyingSymbol v, go b)) <$> bs) (go body) - Term.Constructor' r cid -> ctorIR con (Term.constructor()) r cid where - con 0 r cid [] = Leaf . Val $ Data r cid [] - con _ r cid args = Construct r cid args - Term.Request' r cid -> ctorIR (const Request) (Term.request()) r cid - Term.Apps' f args -> Apply (go f) (map (toZ "apply-args" t) args) - Term.Handle' h body -> Handle (toZ "handle" t h) (go body) - Term.Ann' e _ -> go e - Term.Match' scrutinee cases -> - Match (toZ "match" t scrutinee) (compileCase <$> cases) - ABT.Abs1NA' _ body -> go body - Term.If' cond ifT ifF -> If (toZ "cond" t cond) (go ifT) (go ifF) - Term.Var' _ -> Leaf $ toZ "var" t t - Term.Ref' r -> case toIR env r of - Nothing -> error $ reportBug "B8920912182" msg where - msg = "The program being compiled referenced this definition " <> - show r <> "\nbut the compilation environment has no compiled form for this reference." - Just ir -> ir - Term.Sequence' vs -> MakeSequence . toList . fmap (toZ "sequence" t) $ vs - _ -> error $ "TODO - don't know how to compile this term:\n" - <> (CT.toPlain . P.render 80 . TP.pretty mempty $ void t) - where - compileVar _ v [] = unknown v - compileVar i v ((v',o):tl) - | v == v' = case o of - Nothing | isLazy v -> LazySlot i - | otherwise -> Slot i - Just v -> Val v - | isJust o = compileVar i v tl - | otherwise = compileVar (i + 1) v tl - - -- freeSlots :: _ -> Set Int - freeSlots t = let - vars = ABT.freeVars t - env = ABT.annotation t - in Set.fromList $ toList vars >>= \v -> case compileVar 0 v env of - Slot i -> [i] - LazySlot i -> [i] - _ -> [] - - ctorIR :: (Int -> R.Reference -> Int -> [Z e cont] -> IR e cont) - -> (R.Reference -> Int -> Term SymbolC) - -> R.Reference -> Int -> IR e cont - ctorIR con src r cid = case constructorArity env r cid of - Nothing -> error $ "the compilation env is missing info about how " - ++ "to compile this constructor: " ++ show (r, cid) ++ "\n" ++ show (constructorArity' env) - Just 0 -> con 0 r cid [] - -- Just 0 -> Leaf . Val $ Data "Optional" 0 - Just arity -> Leaf . Val $ Lam arity (FormClosure (ABT.hash s) s []) ir - where - s = src r cid - -- if `arity` is 1, then `Slot 0` is the sole argument. - -- if `arity` is 2, then `Slot 1` is the first arg, and `Slot 0` - -- get the second arg, etc. - -- Note: [1..10] is inclusive of both `1` and `10` - ir = con arity r cid (reverse $ map Slot [0 .. (arity - 1)]) - - unknown v = error $ "free variable during compilation: " ++ show v - toZ _msg t (Term.Var' v) = compileVar 0 v (ABT.annotation t) - toZ msg _t e = case go e of - Leaf v -> v - e -> error $ msg ++ ": ANF should have eliminated any non-Z arguments from: " ++ show e - compileCase (Term.MatchCase pat guard rhs@(ABT.unabs -> (vs,_))) = - (compilePattern pat, underlyingSymbol <$> vs, go <$> guard, go rhs) - - getSeqLength :: Pattern.Pattern -> Maybe Int - getSeqLength p = case p of - Pattern.SequenceLiteral ps -> Just (length ps) - Pattern.SequenceOp l op r -> case op of - Pattern.Snoc -> (+ 1) <$> getSeqLength l - Pattern.Cons -> (+ 1) <$> getSeqLength r - Pattern.Concat -> (+) <$> getSeqLength l <*> getSeqLength r - Pattern.As p -> getSeqLength p - _ -> Nothing - - compilePattern :: Pattern.Pattern -> Pattern - compilePattern pat = case pat of - Pattern.Unbound -> PatternIgnore - Pattern.Var -> PatternVar - Pattern.Boolean b -> PatternB b - Pattern.Int n -> PatternI n - Pattern.Nat n -> PatternN n - Pattern.Float n -> PatternF n - Pattern.Text t -> PatternT t - Pattern.Char c -> PatternC c - Pattern.Constructor r cid args -> PatternData r cid (compilePattern <$> args) - Pattern.As pat -> PatternAs (compilePattern pat) - Pattern.EffectPure p -> PatternPure (compilePattern p) - Pattern.EffectBind r cid args k -> PatternBind r cid (compilePattern <$> args) (compilePattern k) - Pattern.SequenceLiteral ps -> PatternSequenceLiteral (compilePattern <$> ps) - Pattern.SequenceOp l op r -> case op of - Pattern.Snoc -> PatternSequenceSnoc (compilePattern l) (compilePattern r) - Pattern.Cons -> PatternSequenceCons (compilePattern l) (compilePattern r) - Pattern.Concat -> fromMaybe concatErr ((concat Left <$> getSeqLength l) <|> (concat Right <$> getSeqLength r)) - where - concat :: (Int -> Either Int Int) -> Int -> Pattern - concat f i = PatternSequenceConcat (f i) (compilePattern l) (compilePattern r) - concatErr = error $ "At least one side of a concat must have a constant length. " <> - "This code should never be reached as this constraint is " <> - "applied in the typechecker." - - _ -> error $ "todo - compilePattern " ++ show pat - -type DS = StateT (Map Symbol (Term Symbol), Set RefID) IO - -runDS :: DS (Term Symbol) -> IO (Term Symbol) -runDS ds = do - (body, (letRecBindings, _)) <- runStateT ds mempty - pure $ if null letRecBindings then body - else Term.letRec' False (Map.toList letRecBindings) body - -decompile :: (External e, External cont) => Value e cont -> IO (Term Symbol) -decompile v = runDS (decompileImpl v) - -decompileImpl :: - (External e, External cont) => Value e cont -> DS (Term Symbol) -decompileImpl v = case v of - I n -> pure $ Term.int () n - N n -> pure $ Term.nat () n - F n -> pure $ Term.float () n - B b -> pure $ Term.boolean () b - T t -> pure $ Term.text () t - C c -> pure $ Term.char () c - Bs bs -> pure $ Term.builtin() "Bytes.fromList" `Term.apps'` [bsv] where - bsv = Term.seq'() . Sequence.fromList $ - [ Term.nat() (fromIntegral w8) | w8 <- Bytes.toWord8s bs ] - Lam _ f _ -> decompileUnderapplied f - Data r cid args -> - Term.apps' <$> pure (Term.constructor() r cid) - <*> traverse decompileImpl (toList args) - Sequence vs -> Term.seq' () <$> traverse decompileImpl vs - Ref id symbol ioref -> do - seen <- gets snd - symbol <- pure $ Var.freshenId (fromIntegral id) symbol - if Set.member id seen then - pure $ Term.var () symbol - else do - modify (second $ Set.insert id) - t <- decompileImpl =<< lift (readIORef ioref) - modify (first $ Map.insert symbol t) - pure (Term.etaNormalForm t) - Cont k -> liftIO $ decompileExternal k - Pure a -> do - -- `{a}` doesn't have a term syntax, so it's decompiled as - -- `handle (x -> x) in a`, which has the type `Request ambient e a` - a <- decompileImpl a - pure $ Term.handle() id a - Requested (Req r cid vs k) -> do - -- `{req a b -> k}` doesn't have a term syntax, so it's decompiled as - -- `handle (x -> x) in k (req a b)` - vs <- traverse decompileImpl vs - kt <- liftIO $ decompileExternal k - pure . Term.handle() id $ - Term.apps' kt [Term.apps' (Term.request() r cid) vs] - UninitializedLetRecSlot _b _bs _body -> - error "unpossible - decompile UninitializedLetRecSlot" - TermLink r -> pure $ Term.termLink() r - TypeLink r -> pure $ Term.typeLink() r - where - idv = Var.named "x" - id = Term.lam () idv (Term.var() idv) - - -boundVarsIR :: IR e cont -> Set Symbol -boundVarsIR = \case - Let v b body _ -> Set.singleton v <> boundVarsIR b <> boundVarsIR body - LetRec bs body -> Set.fromList (fst <$> bs) <> foldMap (boundVarsIR . snd) bs <> boundVarsIR body - Apply lam _ -> boundVarsIR lam - Handle _ body -> boundVarsIR body - If _ t f -> foldMap boundVarsIR [t,f] - And _ b -> boundVarsIR b - Or _ b -> boundVarsIR b - Match _ cases -> foldMap doCase cases - where doCase (_, _, b, body) = maybe mempty boundVarsIR b <> boundVarsIR body - -- I added all these cases for exhaustiveness checking in the future, - -- and also because I needed the patterns for decompileIR anyway. - -- Sure is ugly though. This ghc doesn't support Language MultiCase. - -- I want to be able to say `_ -> mempty` where _ refers to exactly the other - -- cases that existed at the time I wrote it! - Leaf _ -> mempty - AddI _ _ -> mempty - SubI _ _ -> mempty - MultI _ _ -> mempty - DivI _ _ -> mempty - GtI _ _ -> mempty - LtI _ _ -> mempty - GtEqI _ _ -> mempty - LtEqI _ _ -> mempty - EqI _ _ -> mempty - SignumI _ -> mempty - NegateI _ -> mempty - Truncate0I _ -> mempty - ModI _ _ -> mempty - PowI _ _ -> mempty - ShiftRI _ _ -> mempty - ShiftLI _ _ -> mempty - BitAndI _ _ -> mempty - BitOrI _ _ -> mempty - BitXorI _ _ -> mempty - ComplementI _ -> mempty - TrailZeroI _ -> mempty - LeadZeroI _ -> mempty - AddN _ _ -> mempty - DropN _ _ -> mempty - SubN _ _ -> mempty - MultN _ _ -> mempty - DivN _ _ -> mempty - GtN _ _ -> mempty - LtN _ _ -> mempty - GtEqN _ _ -> mempty - LtEqN _ _ -> mempty - EqN _ _ -> mempty - ModN _ _ -> mempty - PowN _ _ -> mempty - ShiftLN _ _ -> mempty - ShiftRN _ _ -> mempty - ToIntN _ -> mempty - BitAndN _ _ -> mempty - BitOrN _ _ -> mempty - BitXorN _ _ -> mempty - ComplementN _ -> mempty - LeadZeroN _ -> mempty - TrailZeroN _ -> mempty - AddF _ _ -> mempty - SubF _ _ -> mempty - MultF _ _ -> mempty - DivF _ _ -> mempty - GtF _ _ -> mempty - LtF _ _ -> mempty - GtEqF _ _ -> mempty - LtEqF _ _ -> mempty - EqF _ _ -> mempty - EqU _ _ -> mempty - CompareU _ _ -> mempty - Bug _ -> mempty - Todo _ -> mempty - MakeSequence _ -> mempty - Construct{} -> mempty - Request{} -> mempty - Not{} -> mempty - -class External e where - decompileExternal :: e -> IO (Term Symbol) - -decompileIR - :: (External e, External cont) => [Symbol] -> IR e cont -> DS (Term Symbol) -decompileIR stack = \case - -- added all these cases for exhaustiveness checking in the future, - -- and also because I needed the patterns for decompileIR anyway. - Leaf z -> decompileZ z - AddI x y -> builtin "Int.+" [x,y] - SubI x y -> builtin "Int.-" [x,y] - MultI x y -> builtin "Int.*" [x,y] - DivI x y -> builtin "Int./" [x,y] - GtI x y -> builtin "Int.>" [x,y] - LtI x y -> builtin "Int.<" [x,y] - GtEqI x y -> builtin "Int.>=" [x,y] - LtEqI x y -> builtin "Int.<=" [x,y] - EqI x y -> builtin "Int.==" [x,y] - SignumI x -> builtin "Int.signum" [x] - NegateI x -> builtin "Int.negate" [x] - Truncate0I x -> builtin "Int.truncate0" [x] - ModI x y -> builtin "Int.mod" [x,y] - PowI x y -> builtin "Int.pow" [x,y] - ShiftRI x y -> builtin "Int.shiftRight" [x,y] - ShiftLI x y -> builtin "Int.shiftLeft" [x,y] - BitAndI x y -> builtin "Int.and" [x,y] - BitOrI x y -> builtin "Int.or" [x,y] - BitXorI x y -> builtin "Int.xor" [x,y] - ComplementI x -> builtin "Int.complement" [x] - LeadZeroI x -> builtin "Int.leadingZeros" [x] - TrailZeroI x -> builtin "Int.trailingZeros" [x] - AddN x y -> builtin "Nat.+" [x,y] - DropN x y -> builtin "Nat.drop" [x,y] - SubN x y -> builtin "Nat.sub" [x,y] - MultN x y -> builtin "Nat.*" [x,y] - DivN x y -> builtin "Nat./" [x,y] - GtN x y -> builtin "Nat.>" [x,y] - LtN x y -> builtin "Nat.<" [x,y] - GtEqN x y -> builtin "Nat.>=" [x,y] - LtEqN x y -> builtin "Nat.<=" [x,y] - EqN x y -> builtin "Nat.==" [x,y] - ModN x y -> builtin "Nat.mod" [x,y] - ToIntN x -> builtin "Nat.toInt" [x] - PowN x y -> builtin "Nat.pow" [x,y] - ShiftRN x y -> builtin "Nat.shiftRight" [x,y] - ShiftLN x y -> builtin "Nat.shiftLeft" [x,y] - BitAndN x y -> builtin "Nat.and" [x,y] - BitOrN x y -> builtin "Nat.or" [x,y] - BitXorN x y -> builtin "Nat.xor" [x,y] - ComplementN x -> builtin "Nat.complement" [x] - LeadZeroN x -> builtin "Nat.leadingZeros" [x] - TrailZeroN x -> builtin "Nat.trailingZeros" [x] - AddF x y -> builtin "Float.+" [x,y] - SubF x y -> builtin "Float.-" [x,y] - MultF x y -> builtin "Float.*" [x,y] - DivF x y -> builtin "Float./" [x,y] - GtF x y -> builtin "Float.>" [x,y] - LtF x y -> builtin "Float.<" [x,y] - GtEqF x y -> builtin "Float.>=" [x,y] - LtEqF x y -> builtin "Float.<=" [x,y] - EqF x y -> builtin "Float.==" [x,y] - EqU x y -> builtin "Universal.==" [x,y] - CompareU x y -> builtin "Universal.compare" [x,y] - Bug x -> builtin "bug" [x] - Todo x -> builtin "todo" [x] - Let v b body _ -> do - b' <- decompileIR stack b - body' <- decompileIR (v:stack) body - pure $ Term.let1_ False [(v, b')] body' - LetRec bs body -> do - let stack' = reverse (fmap fst bs) ++ stack - secondM f (x,y) = (x,) <$> f y - bs' <- traverse (secondM $ decompileIR stack') bs - body' <- decompileIR stack' body - pure $ Term.letRec' False bs' body' - MakeSequence args -> - Term.seq() <$> traverse decompileZ args - Apply lam args -> - Term.apps' <$> decompileIR stack lam <*> traverse decompileZ args - Construct r cid args -> - Term.apps' (Term.constructor() r cid) <$> traverse decompileZ args - Request r cid args -> - Term.apps' (Term.request() r cid) <$> traverse decompileZ args - Handle h body -> - Term.handle() <$> decompileZ h <*> decompileIR stack body - If c t f -> - Term.iff() <$> decompileZ c <*> decompileIR stack t <*> decompileIR stack f - And x y -> - Term.and() <$> decompileZ x <*> decompileIR stack y - Or x y -> - Term.or() <$> decompileZ x <*> decompileIR stack y - Not x -> builtin "Boolean.not" [x] - Match scrutinee cases -> - Term.match () <$> decompileZ scrutinee <*> traverse decompileMatchCase cases - where - builtin :: (External e, External cont) => Text -> [Z e cont] -> DS (Term Symbol) - builtin t args = - Term.apps' (Term.ref() (R.Builtin t)) <$> traverse decompileZ args - at :: Pos -> Term Symbol - at i = Term.var() (stack !! i) - decompileZ :: (External e, External cont) => Z e cont -> DS (Term Symbol) - decompileZ = \case - Slot p -> pure $ at p - LazySlot p -> pure $ at p - Val v -> decompileImpl v - External e -> liftIO $ decompileExternal e - decompilePattern :: Pattern -> Pattern.Pattern - decompilePattern = \case - PatternI i -> Pattern.Int i - PatternN n -> Pattern.Nat n - PatternF f -> Pattern.Float f - PatternB b -> Pattern.Boolean b - PatternT t -> Pattern.Text t - PatternC c -> Pattern.Char c - PatternData r cid pats -> - Pattern.Constructor r cid (d <$> pats) - PatternSequenceLiteral ps -> Pattern.SequenceLiteral $ decompilePattern <$> ps - PatternSequenceCons l r -> Pattern.SequenceOp (decompilePattern l) Pattern.Cons (decompilePattern r) - PatternSequenceSnoc l r -> Pattern.SequenceOp (decompilePattern l) Pattern.Snoc (decompilePattern r) - PatternSequenceConcat _ l r -> Pattern.SequenceOp (decompilePattern l) Pattern.Concat (decompilePattern r) - PatternPure pat -> Pattern.EffectPure (d pat) - PatternBind r cid pats k -> - Pattern.EffectBind r cid (d <$> pats) (d k) - PatternAs pat -> Pattern.As (d pat) - PatternIgnore -> Pattern.Unbound - PatternVar -> Pattern.Var - d = decompilePattern - decompileMatchCase (pat, vars, guard, body) = do - let stack' = reverse vars ++ stack - guard' <- traverse (decompileIR stack') guard - body' <- decompileIR stack' body - pure $ Term.MatchCase (d pat) guard' body' - -instance (Show e, Show cont) => Show (Z e cont) where - show (LazySlot i) = "'#" ++ show i - show (Slot i) = "#" ++ show i - show (Val v) = show v - show (External e) = "External:" <> show e - -freeSlots :: IR e cont -> Set Int -freeSlots ir = case ir of - Let _ _ _ free -> decrementFrees free - LetRec bs body -> let - n = length bs - in foldMap (decrementFreesBy n . freeSlots . snd) bs <> - decrementFreesBy n (freeSlots body) - Apply lam args -> freeSlots lam <> foldMap free args - Handle h body -> free h <> freeSlots body - If c t f -> free c <> freeSlots t <> freeSlots f - And x y -> free x <> freeSlots y - Or x y -> free x <> freeSlots y - Match scrutinee cases -> free scrutinee <> foldMap freeInCase cases where - freeInCase (_pat, bound, guard, rhs) = let - n = length bound - in decrementFreesBy n (freeSlots rhs) <> - maybe mempty (decrementFreesBy n . freeSlots) guard - _ -> foldMap free (toList ir) - where - free z = case z of - Slot i -> Set.singleton i - LazySlot i -> Set.singleton i - _ -> Set.empty - --- todo: could make this more efficient -decrementFreesBy :: Int -> Set Int -> Set Int -decrementFreesBy 0 s = s -decrementFreesBy n s = decrementFreesBy (n-1) (decrementFrees s) - -decrementFrees :: Set Int -> Set Int -decrementFrees frees = - Set.map (\x -> x - 1) (Set.delete 0 frees) - -let' :: Symbol -> IR e cont -> IR e cont -> IR e cont -let' name binding body = - Let name binding body (decrementFrees $ freeSlots body) - -builtins :: Map R.Reference (IR e cont) -builtins = Map.fromList $ arity0 <> arityN - where - -- slot = Leaf . Slot - val = Leaf . Val - underapply name = - let r = Term.ref() $ R.Builtin name :: Term SymbolC - in FormClosure (ABT.hash r) r [] - var = Var.named "x" - arity0 = [ (R.Builtin name, val value) | (name, value) <- - [ ("Text.empty", T "") - , ("Sequence.empty", Sequence mempty) - , ("Bytes.empty", Bs mempty) - ] ] - arityN = [ (R.Builtin name, Leaf . Val $ Lam arity (underapply name) ir) | - (name, arity, ir) <- - [ ("Int.+", 2, AddI (Slot 1) (Slot 0)) - , ("Int.-", 2, SubI (Slot 1) (Slot 0)) - , ("Int.*", 2, MultI (Slot 1) (Slot 0)) - , ("Int./", 2, DivI (Slot 1) (Slot 0)) - , ("Int.<", 2, LtI (Slot 1) (Slot 0)) - , ("Int.>", 2, GtI (Slot 1) (Slot 0)) - , ("Int.<=", 2, LtEqI (Slot 1) (Slot 0)) - , ("Int.>=", 2, GtEqI (Slot 1) (Slot 0)) - , ("Int.==", 2, EqI (Slot 1) (Slot 0)) - , ("Int.and", 2, BitAndI (Slot 1) (Slot 0)) - , ("Int.or", 2, BitOrI (Slot 1) (Slot 0)) - , ("Int.xor", 2, BitXorI (Slot 1) (Slot 0)) - , ("Int.complement", 1, ComplementI (Slot 0)) - , ("Int.increment", 1, AddI (Val (I 1)) (Slot 0)) - , ("Int.signum", 1, SignumI (Slot 0)) - , ("Int.negate", 1, NegateI (Slot 0)) - , ("Int.truncate0", 1, Truncate0I (Slot 0)) - , ("Int.mod", 2, ModI (Slot 1) (Slot 0)) - , ("Int.pow", 2, PowI (Slot 1) (Slot 0)) - , ("Int.shiftLeft", 2, ShiftLI (Slot 1) (Slot 0)) - , ("Int.shiftRight", 2, ShiftRI (Slot 1) (Slot 0)) - , ("Int.leadingZeros", 1, LeadZeroI (Slot 0)) - , ("Int.trailingZeros", 1, TrailZeroI (Slot 0)) - , ("Int.isEven", 1, let' var (ModI (Slot 0) (Val (I 2))) - (EqI (Val (I 0)) (Slot 0))) - , ("Int.isOdd", 1, let' var (ModI (Slot 0) (Val (I 2))) - (let' var (EqI (Val (I 0)) (Slot 0)) - (Not (Slot 0)))) - - , ("Nat.+", 2, AddN (Slot 1) (Slot 0)) - , ("Nat.drop", 2, DropN (Slot 1) (Slot 0)) - , ("Nat.sub", 2, SubN (Slot 1) (Slot 0)) - , ("Nat.*", 2, MultN (Slot 1) (Slot 0)) - , ("Nat./", 2, DivN (Slot 1) (Slot 0)) - , ("Nat.<", 2, LtN (Slot 1) (Slot 0)) - , ("Nat.>", 2, GtN (Slot 1) (Slot 0)) - , ("Nat.<=", 2, LtEqN (Slot 1) (Slot 0)) - , ("Nat.>=", 2, GtEqN (Slot 1) (Slot 0)) - , ("Nat.==", 2, EqN (Slot 1) (Slot 0)) - , ("Nat.and", 2, BitAndN (Slot 1) (Slot 0)) - , ("Nat.or", 2, BitOrN (Slot 1) (Slot 0)) - , ("Nat.xor", 2, BitXorN (Slot 1) (Slot 0)) - , ("Nat.complement", 1, ComplementN (Slot 0)) - , ("Nat.increment", 1, AddN (Val (N 1)) (Slot 0)) - , ("Nat.mod", 2, ModN (Slot 1) (Slot 0)) - , ("Nat.pow", 2, PowN (Slot 1) (Slot 0)) - , ("Nat.shiftLeft", 2, ShiftLN (Slot 1) (Slot 0)) - , ("Nat.shiftRight", 2, ShiftRN (Slot 1) (Slot 0)) - , ("Nat.leadingZeros", 1, LeadZeroN (Slot 0)) - , ("Nat.trailingZeros", 1, TrailZeroN (Slot 0)) - , ("Nat.isEven", 1, let' var (ModN (Slot 0) (Val (N 2))) - (EqN (Val (N 0)) (Slot 0))) - , ("Nat.isOdd", 1, let' var (ModN (Slot 0) (Val (N 2))) - (let' var (EqN (Val (N 0)) (Slot 0)) - (Not (Slot 0)))) - , ("Nat.toInt", 1, ToIntN (Slot 0)) - - , ("Float.+", 2, AddF (Slot 1) (Slot 0)) - , ("Float.-", 2, SubF (Slot 1) (Slot 0)) - , ("Float.*", 2, MultF (Slot 1) (Slot 0)) - , ("Float./", 2, DivF (Slot 1) (Slot 0)) - , ("Float.<", 2, LtF (Slot 1) (Slot 0)) - , ("Float.>", 2, GtF (Slot 1) (Slot 0)) - , ("Float.<=", 2, LtEqF (Slot 1) (Slot 0)) - , ("Float.>=", 2, GtEqF (Slot 1) (Slot 0)) - , ("Float.==", 2, EqF (Slot 1) (Slot 0)) - - , ("Universal.==", 2, EqU (Slot 1) (Slot 0)) - , ("Universal.compare", 2, CompareU (Slot 1) (Slot 0)) - , ("Universal.<", 2, let' var (CompareU (Slot 1) (Slot 0)) - (LtI (Slot 0) (Val (I 0)))) - , ("Universal.>", 2, let' var (CompareU (Slot 1) (Slot 0)) - (GtI (Slot 0) (Val (I 0)))) - , ("Universal.>=", 2, let' var (CompareU (Slot 1) (Slot 0)) - (GtEqI (Slot 0) (Val (I 0)))) - , ("Universal.<=", 2, let' var (CompareU (Slot 1) (Slot 0)) - (LtEqI (Slot 0) (Val (I 0)))) - , ("Boolean.not", 1, Not (Slot 0)) - , ("bug", 1, Bug (Slot 0)) - , ("todo", 1, Todo (Slot 0)) - ]] - --- boring instances - -instance Eq SymbolC where - SymbolC _ s == SymbolC _ s2 = s == s2 - -instance Ord SymbolC where - SymbolC _ s `compare` SymbolC _ s2 = s `compare` s2 - -instance ABT.Var SymbolC where - freshIn vs (SymbolC i s) = - SymbolC i (ABT.freshIn (Set.map underlyingSymbol vs) s) - -instance Var SymbolC where - typed s = SymbolC False (Var.typed s) - typeOf (SymbolC _ s) = Var.typeOf s - freshId (SymbolC _ s) = Var.freshId s - freshenId n (SymbolC i s) = SymbolC i (Var.freshenId n s) - -instance (Show e, Show cont) => Show (Value e cont) where - show (I n) = show n - show (F n) = show n - show (N n) = show n - show (B b) = show b - show (T t) = show t - show (C c) = show c - show (Bs bs) = show bs - show (Lam n e ir) = "(Lam " <> show n <> " " <> show e <> " (" <> show ir <> "))" - show (Data r cid vs) = "(Data " <> show r <> " " <> show cid <> " " <> show vs <> ")" - show (Sequence vs) = "[" <> intercalateMap ", " show vs <> "]" - show (Ref n s _) = "(Ref " <> show n <> " " <> show s <> ")" - show (TermLink r) = "(TermLink " <> show r <> ")" - show (TypeLink r) = "(TypeLink " <> show r <> ")" - show (Pure v) = "(Pure " <> show v <> ")" - show (Requested r) = "(Requested " <> show r <> ")" - show (Cont ir) = "(Cont " <> show ir <> ")" - show (UninitializedLetRecSlot b bs _body) = - "(UninitializedLetRecSlot " <> show b <> " in " <> show (fst <$> bs)<> ")" - -compilationEnv0 :: CompilationEnv e cont -compilationEnv0 = CompilationEnv builtins mempty - -instance Semigroup (CompilationEnv e cont) where (<>) = mappend - -instance Monoid (CompilationEnv e cont) where - mempty = CompilationEnv mempty mempty - mappend c1 c2 = CompilationEnv ir ctor where - ir = toIR' c1 <> toIR' c2 - ctor = constructorArity' c1 <> constructorArity' c2 - -instance (CyclicEq e, CyclicEq cont) => CyclicEq (UnderapplyStrategy e cont) where - cyclicEq h1 h2 (FormClosure hash1 _ vs1) (FormClosure hash2 _ vs2) = - if hash1 == hash2 then cyclicEq h1 h2 vs1 vs2 - else pure False - cyclicEq h1 h2 (Specialize hash1 _ vs1) (Specialize hash2 _ vs2) = - if hash1 == hash2 then cyclicEq h1 h2 (snd <$> vs1) (snd <$> vs2) - else pure False - cyclicEq _ _ _ _ = pure False - -instance (CyclicEq e, CyclicEq cont) => CyclicEq (Req e cont) where - cyclicEq h1 h2 (Req r1 c1 vs1 k1) (Req r2 c2 vs2 k2) = - if r1 == r2 && c1 == c2 then do - b <- cyclicEq h1 h2 vs1 vs2 - if b then cyclicEq h1 h2 k1 k2 - else pure False - else pure False - -instance (CyclicEq e, CyclicEq cont) => CyclicEq (Value e cont) where - cyclicEq _ _ (I x) (I y) = pure (x == y) - cyclicEq _ _ (F x) (F y) = pure (x == y) - cyclicEq _ _ (N x) (N y) = pure (x == y) - cyclicEq _ _ (B x) (B y) = pure (x == y) - cyclicEq _ _ (T x) (T y) = pure (x == y) - cyclicEq _ _ (C x) (C y) = pure (x == y) - cyclicEq _ _ (Bs x) (Bs y) = pure (x == y) - cyclicEq _ _ (TermLink x) (TermLink y) = pure (x == y) - cyclicEq _ _ (TypeLink x) (TypeLink y) = pure (x == y) - cyclicEq h1 h2 (Lam arity1 us _) (Lam arity2 us2 _) = - if arity1 == arity2 then cyclicEq h1 h2 us us2 - else pure False - cyclicEq h1 h2 (Data r1 c1 vs1) (Data r2 c2 vs2) = - if r1 == r2 && c1 == c2 then cyclicEq h1 h2 vs1 vs2 - else pure False - cyclicEq h1 h2 (Sequence v1) (Sequence v2) = cyclicEq h1 h2 v1 v2 - cyclicEq h1 h2 (Ref r1 _ io1) (Ref r2 _ io2) = - if io1 == io2 then pure True - else do - a <- CyT.lookup r1 h1 - b <- CyT.lookup r2 h2 - case (a,b) of - -- We haven't encountered these refs before, descend into them and - -- compare contents. - (Nothing, Nothing) -> do - CyT.insertEnd r1 h1 - CyT.insertEnd r2 h2 - r1 <- readIORef io1 - r2 <- readIORef io2 - cyclicEq h1 h2 r1 r2 - -- We've encountered these refs before, compare the positions where - -- they were first encountered - (Just r1, Just r2) -> pure (r1 == r2) - _ -> pure False - cyclicEq h1 h2 (Pure a) (Pure b) = cyclicEq h1 h2 a b - cyclicEq h1 h2 (Requested r1) (Requested r2) = cyclicEq h1 h2 r1 r2 - cyclicEq h1 h2 (Cont k1) (Cont k2) = cyclicEq h1 h2 k1 k2 - cyclicEq _ _ _ _ = pure False - -constructorId :: Value e cont -> Int -constructorId v = case v of - I _ -> 0 - F _ -> 1 - N _ -> 2 - B _ -> 3 - T _ -> 4 - Bs _ -> 5 - Lam{} -> 6 - Data{} -> 7 - Sequence _ -> 8 - Pure _ -> 9 - Requested _ -> 10 - Ref{} -> 11 - Cont _ -> 12 - C _ -> 13 - UninitializedLetRecSlot{} -> 14 - TermLink _ -> 15 - TypeLink _ -> 16 - -instance (CyclicOrd e, CyclicOrd cont) => CyclicOrd (UnderapplyStrategy e cont) where - cyclicOrd h1 h2 (FormClosure hash1 _ vs1) (FormClosure hash2 _ vs2) = - COrd.bothOrd' h1 h2 hash1 hash2 vs1 vs2 - cyclicOrd h1 h2 (Specialize hash1 _ vs1) (Specialize hash2 _ vs2) = - COrd.bothOrd' h1 h2 hash1 hash2 (map snd vs1) (map snd vs2) - cyclicOrd _ _ FormClosure{} _ = pure LT - cyclicOrd _ _ Specialize{} _ = pure GT - -instance (CyclicOrd e, CyclicOrd cont) => CyclicOrd (Req e cont) where - cyclicOrd h1 h2 (Req r1 c1 vs1 k1) (Req r2 c2 vs2 k2) = case compare r1 r2 of - EQ -> do - o <- COrd.bothOrd' h1 h2 c1 c2 vs1 vs2 - o <- case o of - EQ -> cyclicOrd h1 h2 k1 k2 - _ -> pure o - case o of - EQ -> pure (r1 `compare` r2) - _ -> pure o - c -> pure c - -instance (CyclicOrd e, CyclicOrd cont) => CyclicOrd (Value e cont) where - cyclicOrd _ _ (I x) (I y) = pure (x `compare` y) - cyclicOrd _ _ (F x) (F y) = pure (x `compare` y) - cyclicOrd _ _ (N x) (N y) = pure (x `compare` y) - cyclicOrd _ _ (B x) (B y) = pure (x `compare` y) - cyclicOrd _ _ (T x) (T y) = pure (x `compare` y) - cyclicOrd _ _ (C x) (C y) = pure (x `compare` y) - cyclicOrd _ _ (Bs x) (Bs y) = pure (x `compare` y) - cyclicOrd _ _ (TermLink x) (TermLink y) = pure (x `compare` y) - cyclicOrd _ _ (TypeLink x) (TypeLink y) = pure (x `compare` y) - cyclicOrd h1 h2 (Lam arity1 us _) (Lam arity2 us2 _) = - COrd.bothOrd' h1 h2 arity1 arity2 us us2 - cyclicOrd h1 h2 (Data r1 c1 vs1) (Data r2 c2 vs2) = - COrd.bothOrd' h1 h2 c1 c2 vs1 vs2 >>= \o -> case o of - EQ -> pure (r1 `compare` r2) - _ -> pure o - cyclicOrd h1 h2 (Sequence v1) (Sequence v2) = cyclicOrd h1 h2 v1 v2 - cyclicOrd h1 h2 (Ref r1 _ io1) (Ref r2 _ io2) = - if io1 == io2 then pure EQ - else do - a <- CyT.lookup r1 h1 - b <- CyT.lookup r2 h2 - case (a,b) of - -- We haven't encountered these refs before, descend into them and - -- compare contents. - (Nothing, Nothing) -> do - CyT.insertEnd r1 h1 - CyT.insertEnd r2 h2 - r1 <- readIORef io1 - r2 <- readIORef io2 - cyclicOrd h1 h2 r1 r2 - -- We've encountered these refs before, compare the positions where - -- they were first encountered - (Just r1, Just r2) -> pure (r1 `compare` r2) - _ -> pure $ a `compare` b - cyclicOrd h1 h2 (Pure a) (Pure b) = cyclicOrd h1 h2 a b - cyclicOrd h1 h2 (Requested r1) (Requested r2) = cyclicOrd h1 h2 r1 r2 - cyclicOrd h1 h2 (Cont k1) (Cont k2) = cyclicOrd h1 h2 k1 k2 - cyclicOrd _ _ v1 v2 = pure $ constructorId v1 `compare` constructorId v2 diff --git a/parser-typechecker/src/Unison/Runtime/Interface.hs b/parser-typechecker/src/Unison/Runtime/Interface.hs deleted file mode 100644 index d0b97d6c66..0000000000 --- a/parser-typechecker/src/Unison/Runtime/Interface.hs +++ /dev/null @@ -1,225 +0,0 @@ -{-# language DataKinds #-} -{-# language PatternGuards #-} -{-# language ScopedTypeVariables #-} - -module Unison.Runtime.Interface - ( startRuntime - ) where - -import Control.Exception (try) -import Control.Monad (foldM, (<=<)) - -import Data.Bifunctor (first,second) -import Data.Foldable -import Data.IORef -import Data.Word (Word64) - -import qualified Data.Map.Strict as Map - -import qualified Unison.Term as Tm -import Unison.Var (Var) - -import Unison.DataDeclaration (declFields) -import qualified Unison.LabeledDependency as RF -import Unison.Reference (Reference) -import qualified Unison.Reference as RF - -import Unison.Util.EnumContainers as EC - -import Unison.Codebase.CodeLookup (CodeLookup(..)) -import Unison.Codebase.Runtime (Runtime(..), Error) -import Unison.Codebase.MainTerm (builtinMain) - -import Unison.Parser (Ann(External)) -import Unison.PrettyPrintEnv -import Unison.TermPrinter - -import Unison.Runtime.ANF -import Unison.Runtime.Builtin -import Unison.Runtime.Decompile -import Unison.Runtime.Machine -import Unison.Runtime.MCode -import Unison.Runtime.Pattern -import Unison.Runtime.Stack - -type Term v = Tm.Term v () - -data EvalCtx v - = ECtx - { freshTy :: Int - , freshTm :: Word64 - , refTy :: Map.Map RF.Reference RTag - , refTm :: Map.Map RF.Reference Word64 - , combs :: EnumMap Word64 Comb - , dspec :: DataSpec - , backrefTy :: EnumMap RTag RF.Reference - , backrefTm :: EnumMap Word64 (Term v) - , backrefComb :: EnumMap Word64 RF.Reference - } - -uncurryDspec :: DataSpec -> Map.Map (Reference,Int) Int -uncurryDspec = Map.fromList . concatMap f . Map.toList - where - f (r,l) = zipWith (\n c -> ((r,n),c)) [0..] $ either id id l - -numberLetRec :: Word64 -> Term v -> EnumMap Word64 (Term v) -numberLetRec frsh (Tm.LetRecNamed' bs e) - = mapFromList . zip [frsh..] $ e : map snd bs -numberLetRec _ _ = error "impossible" - -baseContext :: forall v. Var v => EvalCtx v -baseContext - = ECtx - { freshTy = fty - , freshTm = ftm - , refTy = builtinTypeNumbering - , refTm = builtinTermNumbering - , combs = emitComb @v mempty <$> numberedTermLookup - , dspec = builtinDataSpec - , backrefTy = builtinTypeBackref - , backrefTm = Tm.ref () <$> builtinTermBackref - , backrefComb = builtinTermBackref - } - where - ftm = 1 + maximum builtinTermNumbering - fty = (1+) . fromEnum $ maximum builtinTypeNumbering - --- allocTerm --- :: Var v --- => CodeLookup v m () --- -> EvalCtx v --- -> RF.Reference --- -> IO (EvalCtx v) --- allocTerm _ _ b@(RF.Builtin _) --- = die $ "Unknown builtin term reference: " ++ show b --- allocTerm _ _ (RF.DerivedId _) --- = die $ "TODO: allocTerm: hash reference" - -allocType - :: EvalCtx v - -> RF.Reference - -> Either [Int] [Int] - -> IO (EvalCtx v) -allocType _ b@(RF.Builtin _) _ - = die $ "Unknown builtin type reference: " ++ show b -allocType ctx r cons - = pure $ ctx - { refTy = Map.insert r rt $ refTy ctx - , backrefTy = mapInsert rt r $ backrefTy ctx - , dspec = Map.insert r cons $ dspec ctx - , freshTy = fresh - } - where - (rt, fresh) - | Just rt <- Map.lookup r $ refTy ctx = (rt, freshTy ctx) - | frsh <- freshTy ctx = (toEnum $ frsh, frsh + 1) - -collectDeps - :: Var v - => CodeLookup v IO () - -> Term v - -> IO ([(Reference, Either [Int] [Int])], [Reference]) -collectDeps cl tm - = (,tms) <$> traverse getDecl tys - where - chld = toList $ Tm.labeledDependencies tm - categorize = either (first . (:)) (second . (:)) . RF.toReference - (tys, tms) = foldr categorize ([],[]) chld - getDecl ty@(RF.DerivedId i) = - (ty,) . maybe (Right []) declFields - <$> getTypeDeclaration cl i - getDecl r = pure (r,Right []) - -loadDeps - :: Var v - => CodeLookup v IO () - -> EvalCtx v - -> Term v - -> IO (EvalCtx v) -loadDeps cl ctx tm = do - (tys, _ ) <- collectDeps cl tm - -- TODO: terms - foldM (uncurry . allocType) ctx $ filter p tys - where - p (r@RF.DerivedId{},_) - = r `Map.notMember` dspec ctx - || r `Map.notMember` refTy ctx - p _ = False - -addCombs :: EnumMap Word64 Comb -> EvalCtx v -> EvalCtx v -addCombs m ctx = ctx { combs = m <> combs ctx } - -addTermBackrefs :: EnumMap Word64 (Term v) -> EvalCtx v -> EvalCtx v -addTermBackrefs refs ctx = ctx { backrefTm = refs <> backrefTm ctx } - -refresh :: Word64 -> EvalCtx v -> EvalCtx v -refresh w ctx = ctx { freshTm = w } - -ref :: Ord k => Show k => Map.Map k v -> k -> v -ref m k - | Just x <- Map.lookup k m = x - | otherwise = error $ "unknown reference: " ++ show k - -compileTerm - :: Var v => Word64 -> Term v -> EvalCtx v -> EvalCtx v -compileTerm w tm ctx - = finish - . fmap - ( emitCombs frsh - . superNormalize (ref $ refTm ctx) (ref $ refTy ctx)) - . bkrf - . lamLift - . splitPatterns (dspec ctx) - . saturate (uncurryDspec $ dspec ctx) - $ tm - where - frsh = freshTm ctx - bkrf tm = (numberLetRec frsh tm, tm) - finish (recs, (main, aux, frsh')) - = refresh frsh' - . addTermBackrefs recs - . addCombs (mapInsert w main aux) - $ ctx - -watchHook :: IORef Closure -> Stack 'UN -> Stack 'BX -> IO () -watchHook r _ bstk = peek bstk >>= writeIORef r - -combEnv :: EvalCtx v -> Word64 -> Comb -combEnv ctx w - | Just c <- EC.lookup w (combs ctx) = c - | otherwise = error $ "bad combinator reference: " ++ show w - -evalInContext - :: Var v - => PrettyPrintEnv - -> EvalCtx v - -> Word64 - -> IO (Either Error (Term v)) -evalInContext ppe ctx w = do - r <- newIORef BlackHole - let hook = watchHook r - renv = Refs (backrefTy ctx) (backrefComb ctx) - result <- traverse (const $ readIORef r) - . first prettyError - <=< try $ apply0 (Just hook) renv (combEnv ctx) w - pure $ decom =<< result - where - decom = decompile (`EC.lookup`backrefTy ctx) (`EC.lookup`backrefTm ctx) - prettyError (PE p) = p - prettyError (BU c) = either id (pretty ppe) $ decom c - -startRuntime :: Var v => IO (Runtime v) -startRuntime = do - ctxVar <- newIORef baseContext - pure $ Runtime - { terminate = pure () - , evaluate = \cl ppe tm -> do - ctx <- readIORef ctxVar - ctx <- loadDeps cl ctx tm - writeIORef ctxVar ctx - let init = freshTm ctx - ctx <- pure $ refresh (init+1) ctx - ctx <- pure $ compileTerm init tm ctx - evalInContext ppe ctx init - , mainType = builtinMain External - } diff --git a/parser-typechecker/src/Unison/Runtime/MCode.hs b/parser-typechecker/src/Unison/Runtime/MCode.hs deleted file mode 100644 index a3dc8001d2..0000000000 --- a/parser-typechecker/src/Unison/Runtime/MCode.hs +++ /dev/null @@ -1,1401 +0,0 @@ -{-# language GADTs #-} -{-# language BangPatterns #-} -{-# language DeriveFunctor #-} -{-# language PatternGuards #-} -{-# language EmptyDataDecls #-} -{-# language PatternSynonyms #-} - -module Unison.Runtime.MCode - ( Args'(..) - , Args(..) - , MLit(..) - , Instr(..) - , Section(..) - , Comb(..) - , Ref(..) - , UPrim1(..) - , UPrim2(..) - , BPrim1(..) - , BPrim2(..) - , Branch(..) - , bcount - , ucount - , emitCombs - , emitComb - , prettyCombs - , prettyComb - ) where - -import GHC.Stack (HasCallStack) - -import Control.Applicative (liftA2) - -import Data.Bifunctor (bimap,first) -import Data.Coerce -import Data.List (partition) -import Data.Word (Word64) - -import Data.Primitive.PrimArray - -import qualified Data.Map.Strict as M -import Unison.Util.EnumContainers as EC - -import Data.Text (Text) -import qualified Data.Text as Text - -import Unison.Var (Var) -import Unison.ABT.Normalized (pattern TAbss) -import Unison.Reference (Reference) -import Unison.Referent (Referent) -import qualified Unison.Type as Rf -import qualified Unison.Runtime.IOSource as Rf -import Unison.Runtime.ANF - ( ANormal - , ANormalT - , ANormalTF(..) - , Branched(..) - , Func(..) - , Mem(..) - , SuperNormal(..) - , SuperGroup(..) - , RTag - , CTag - , Tag(..) - , packTags - , pattern TVar - , pattern TLit - , pattern TApp - , pattern TPrm - , pattern THnd - , pattern TFrc - , pattern TShift - , pattern TLets - , pattern TName - , pattern TTm - , pattern TMatch - ) -import qualified Unison.Runtime.ANF as ANF -import Unison.Runtime.Foreign -import Unison.Util.Bytes as Bytes - -import Network.Socket as SYS - ( accept - ) -import Network.Simple.TCP as SYS - ( HostPreference(..) - , bindSock - , connectSock - , listenSock - , closeSock - , send - , recv - ) -import System.IO as SYS - ( BufferMode(..) - , Handle - , openFile - , hClose - , hGetBuffering - , hSetBuffering - , hIsEOF - , hIsOpen - , hIsSeekable - , hSeek - , hTell - , stdin, stdout, stderr - ) -import Data.Text.IO as SYS - ( hGetLine - , hPutStr - ) -import Control.Concurrent as SYS - ( threadDelay - , killThread - ) -import Data.Time.Clock.POSIX as SYS - ( getPOSIXTime - , utcTimeToPOSIXSeconds - ) -import System.Directory as SYS - ( getCurrentDirectory - , setCurrentDirectory - , getTemporaryDirectory - , getDirectoryContents - , doesPathExist - -- , doesDirectoryExist - , renameDirectory - , removeFile - , renameFile - , createDirectoryIfMissing - , removeDirectoryRecursive - , getModificationTime - , getFileSize - ) - --- This outlines some of the ideas/features in this core --- language, and how they may be used to implement features of --- the surface language. - ------------------------ --- Delimited control -- ------------------------ - --- There is native support for delimited control operations in --- the core language. This means we can: --- 1. delimit a block of code with an integer tagged prompt, --- which corresponds to pushing a frame onto the --- continuation with said tag --- 2. capture a portion of the continuation up to a particular --- tag frame and turn it into a value, which _removes_ the --- tag frame from the continuation in the process --- 3. push such a captured value back onto the continuation - --- TBD: Since the captured continuations in _delimited_ control --- are (in this case impure) functions, it may make sense to make --- the representation of functions support these captured --- continuations directly. - --- The obvious use case of this feature is effects and handlers. --- Delimiting a block with a prompt is part of installing a --- handler for said block at least naively. The other part is --- establishing the code that should be executed for each --- operation to be handled. - --- It's important (I believe) in #2 that the prompt be removed --- from the continuation by a control effect. The captured --- continuation not being automatically delimited corresponds to --- a shallow handler's obligation to re-establish the handling of --- a re-invoked computation if it wishes to do so. The delimiter --- being removed from the capturing code's continuation --- corresponds to a handler being allowed to yield effects from --- the same siganture that it is handling. - --- In special cases, it should be possible to omit use of control --- effects in handlers. At the least, if a handler case resumes --- the computation in tail position, it should be unnecessary to --- capture the continuation at all. If all cases act this way, we --- don't need a delimiter, because we will never capture. - --- TBD: it may make more sense to have prompt pushing be part of --- some other construct, due to A-normal forms of the code. - ------------------------------ --- Unboxed sum-of-products -- ------------------------------ - --- It is not usually stated this way, but one of the core --- features of the STG machine is that functions/closures can --- return unboxed sum-of-products types. This is actually the way --- _all_ data types work in STG. The discriminee of a case --- statement must eventually return by pushing several values --- onto the stack (the product part) and specifying which branch --- to return to (the sum part). - --- The way heap allocated data is produced is that an --- intermediate frame may be in the continuation that grabs this --- information from the local storage and puts it into the heap. --- If this frame were omitted, only the unboxed component would --- be left. Also, in STG, the heap allocated data is just a means --- of reconstructing its unboxed analogue. Evaluating a heap --- allocated data type value just results in pushing its stored --- fields back on the stack, and immediately returning the tag. - --- The portion of this with the heap allocation frame omitted --- seems to be a natural match for the case analysis portion of --- handlers. A naive implementation of an effect algebra is as --- the data type of the polynomial functor generated by the --- signature, and handling corresponds to case analysis. However, --- in a real implementation, we don't want a heap allocated --- representation of this algebra, because its purpose is control --- flow. Each operation will be handled once as it occurs, and we --- won't save work by remembering some reified representation of --- which operations were used. - --- Since handlers in unison are written as functions, it seems to --- make sense to define a calling convention for unboxed --- sum-of-products as arguments. Variable numbers of stack --- positions could be pushed for such arguments, with tags --- specifying which case is being provided. - --- TBD: sum arguments to a function correspond to a product of --- functions, so it's possible that the calling convention for --- these functions should be similar to returning to a case, --- where we push arguments and then select which of several --- pieces of code to jump to. This view also seems relevant to --- the optimized implementation of certain forms of handler, --- where we want effects to just directly select some code to --- execute based on state that has been threaded to that point. - --- One thing to note: it probably does not make sense to --- completely divide returns into unboxed returns and allocation --- frames. The reason this works in STG is laziness. Naming a --- computation with `let` does not do any evaluation, but it does --- allocate space for its (boxed) result. The only thing that --- _does_ demand evaluation is case analysis. So, if a value with --- sum type is being evaluated, we know it must be about to be --- unpacked, and it makes little sense to pack it on the stack, --- though we can build a closure version of it in the writeback --- location established by `let`. - --- By contrast, in unison a `let` of a sum type evaluates it --- immediately, even if no one is analyzing it. So we might waste --- work rearranging the stack with the unpacked contents when we --- only needed the closure version to begin with. Instead, we --- gain the ability to make the unpacking operation use no stack, --- because we know what we are unpacking must be a value. Turning --- boxed function calls into unboxed versions thus seems like a --- situational optimization, rather than a universal calling --- convention. - -------------------------------- --- Delimited Dynamic Binding -- -------------------------------- - --- There is a final component to the implementation of ability --- handlers in this runtime system, and that is dynamically --- scoped variables associated to each prompt. Each prompt --- corresponds to an ability signature, and `reset` to a handler --- for said signature, but we need storage space for the code --- installed by said handler. It is possible to implement --- dynamically scoped variables entirely with delimited --- continuations, but it is more efficient to keep track of the --- storage directly when manipulating the continuations. - --- The dynamic scoping---and how it interacts with --- continuations---corresponds to the nested structure of --- handlers. Installing a handler establishes a variable scope, --- shadowing outer scopes for the same prompt. Shifting, however, --- can exit these scopes dynamically. So, for instance, if we --- have a structure like: - --- reset 0 $ ... --- reset 1 $ ... --- reset 0 $ ... --- shift 1 - --- We have nested scopes 0>1>0, with the second 0 shadowing the --- first. However, when we shift to 1, the inner 0 scope is --- captured into the continuation, and uses of the 0 ability in --- will be handled by the outer handler until it is shadowed --- again (and the captured continuation will re-establish the --- shadowing). - --- Mutation of the variables is possible, but mutation only --- affects the current scope. Essentially, the dynamic scoping is --- of mutable references, and when scope changes, we switch --- between different references, and the mutation of each --- reference does not affect the others. The purpose of the --- mutation is to enable more efficient implementation of --- certain recursive, 'deep' handlers, since those can operate --- more like stateful code than control operators. - -data Args' - = Arg1 !Int - | Arg2 !Int !Int - -- frame index of each argument to the function - | ArgN {-# unpack #-} !(PrimArray Int) - | ArgR !Int !Int - -data Args - = ZArgs - | UArg1 !Int - | UArg2 !Int !Int - | BArg1 !Int - | BArg2 !Int !Int - | DArg2 !Int !Int - | UArgR !Int !Int - | BArgR !Int !Int - | DArgR !Int !Int !Int !Int - | BArgN !(PrimArray Int) - | UArgN !(PrimArray Int) - | DArgN !(PrimArray Int) !(PrimArray Int) - | DArgV !Int !Int - deriving (Show, Eq, Ord) - -ucount, bcount :: Args -> Int - -ucount (UArg1 _) = 1 -ucount (UArg2 _ _) = 2 -ucount (DArg2 _ _) = 1 -ucount (UArgR _ l) = l -ucount (DArgR _ l _ _) = l -ucount _ = 0 -{-# inline ucount #-} - -bcount (BArg1 _) = 1 -bcount (BArg2 _ _) = 2 -bcount (DArg2 _ _) = 1 -bcount (BArgR _ l) = l -bcount (DArgR _ _ _ l) = l -bcount (BArgN a) = sizeofPrimArray a -bcount _ = 0 -{-# inline bcount #-} - -data UPrim1 - -- integral - = DECI | INCI | NEGI | SGNI -- decrement,increment,negate,signum - | LZRO | TZRO | COMN -- leading/trailingZeroes,complement - -- floating - | ABSF | EXPF | LOGF | SQRT -- abs,exp,log,sqrt - | COSF | ACOS | COSH | ACSH -- cos,acos,cosh,acosh - | SINF | ASIN | SINH | ASNH -- sin,asin,sinh,asinh - | TANF | ATAN | TANH | ATNH -- tan,atan,tanh,atanh - | ITOF | NTOF | CEIL | FLOR -- intToFloat,natToFloat,ceiling,floor - | TRNF | RNDF -- truncate,round - deriving (Show, Eq, Ord) - -data UPrim2 - -- integral - = ADDI | SUBI | MULI | DIVI | MODI -- +,-,*,/,mod - | SHLI | SHRI | SHRN | POWI -- shiftl,shiftr,shiftr,pow - | EQLI | LEQI | LEQN -- ==,<=,<= - | ANDN | IORN | XORN -- and,or,xor - -- floating - | EQLF | LEQF -- ==,<= - | ADDF | SUBF | MULF | DIVF | ATN2 -- +,-,*,/,atan2 - | POWF | LOGB | MAXF | MINF -- pow,low,max,min - deriving (Show, Eq, Ord) - -data BPrim1 - -- text - = SIZT | USNC | UCNS -- size,unsnoc,uncons - | ITOT | NTOT | FTOT -- intToText,natToText,floatToText - | TTOI | TTON | TTOF -- textToInt,textToNat,textToFloat - | PAKT | UPKT -- pack,unpack - -- sequence - | VWLS | VWRS | SIZS -- viewl,viewr,size - | PAKB | UPKB | SIZB -- pack,unpack,size - | FLTB -- flatten - -- general - | THRO -- throw - deriving (Show, Eq, Ord) - -data BPrim2 - -- universal - = EQLU | CMPU -- ==,compare - -- text - | DRPT | CATT | TAKT -- drop,append,take - | EQLT | LEQT | LEST -- ==,<=,< - -- sequence - | DRPS | CATS | TAKS -- drop,append,take - | CONS | SNOC | IDXS -- cons,snoc,index - | SPLL | SPLR -- splitLeft,splitRight - -- bytes - | TAKB | DRPB | IDXB | CATB -- take,drop,index,append - deriving (Show, Eq, Ord) - -data MLit - = MI !Int - | MD !Double - | MT !Text - | MM !Referent - | MY !Reference - deriving (Show, Eq, Ord) - --- Instructions for manipulating the data stack in the main portion of --- a block -data Instr - -- 1-argument unboxed primitive operations - = UPrim1 !UPrim1 -- primitive instruction - !Int -- index of prim argument - - -- 2-argument unboxed primitive operations - | UPrim2 !UPrim2 -- primitive instruction - !Int -- index of first prim argument - !Int -- index of second prim argument - - -- 1-argument primitive operations that may involve boxed values - | BPrim1 !BPrim1 - !Int - - -- 2-argument primitive operations that may involve boxed values - | BPrim2 !BPrim2 - !Int - !Int - - -- Call out to a Haskell function. This is considerably slower - -- for very simple operations, hence the primops. - | ForeignCall !Bool -- catch exceptions - !ForeignFunc -- FFI call - !Args -- arguments - - -- Set the value of a dynamic reference - | SetDyn !Word64 -- the prompt tag of the reference - !Int -- the stack index of the closure to store - - -- Capture the continuation up to a given marker. - | Capture !Word64 -- the prompt tag - - -- This is essentially the opposite of `Call`. Pack a given - -- statically known function into a closure with arguments. - -- No stack is necessary, because no nested evaluation happens, - -- so the instruction directly takes a follow-up. - | Name !Ref !Args - - -- Dump some debugging information about the machine state to - -- the screen. - | Info !String -- prefix for output - - -- Pack a data type value into a closure and place it - -- on the stack. - | Pack !Word64 -- tag - !Args -- arguments to pack - - -- Unpack the contents of a data type onto the stack - | Unpack !Int -- stack index of data to unpack - - -- Push a particular value onto the appropriate stack - | Lit !MLit -- value to push onto the stack - - -- Print a value on the unboxed stack - | Print !Int -- index of the primitive value to print - - -- Put a delimiter on the continuation - | Reset !(EnumSet Word64) -- prompt ids - - | Fork !Section - | Seq !Args - deriving (Show, Eq, Ord) - -data Section - -- Apply a function to arguments. This is the 'slow path', and - -- handles applying functions from arbitrary sources. This - -- requires checks to determine what exactly should happen. - = App - !Bool -- skip argument check for known calling convention - !Ref -- function to call - !Args -- arguments - - -- This is the 'fast path', for when we statically know we're - -- making an exactly saturated call to a statically known - -- function. This allows skipping various checks that can cost - -- time in very tight loops. This also allows skipping the - -- stack check if we know that the current stack allowance is - -- sufficient for where we're jumping to. - | Call - !Bool -- skip stack check - !Word64 -- global function reference - !Args -- arguments - - -- Jump to a captured continuation value. - | Jump - !Int -- index of captured continuation - !Args -- arguments to send to continuation - - -- Branch on the value in the unboxed data stack - | Match !Int -- index of unboxed item to match on - !Branch -- branches - - -- Yield control to the current continuation, with arguments - | Yield !Args -- values to yield - - -- Prefix an instruction onto a section - | Ins !Instr !Section - - -- Sequence two sections. The second is pushed as a return - -- point for the results of the first. Stack modifications in - -- the first are lost on return to the second. - | Let !Section !Section - - -- Throw an exception with the given message - | Die String - - -- Immediately stop a thread of interpretation. This is more of - -- a debugging tool than a proper operation to target. - | Exit - deriving (Show, Eq, Ord) - -data Comb - = Lam !Int -- Number of unboxed arguments - !Int -- Number of boxed arguments - !Int -- Maximum needed unboxed frame size - !Int -- Maximum needed boxed frame size - !Section -- Entry - deriving (Show, Eq, Ord) - -data Ref - = Stk !Int -- stack reference to a closure - | Env !Word64 -- global environment reference to a combinator - | Dyn !Word64 -- dynamic scope reference to a closure - deriving (Show, Eq, Ord) - -data Branch - -- if tag == n then t else f - = Test1 !Word64 - !Section - !Section - | Test2 !Word64 !Section -- if tag == m then ... - !Word64 !Section -- else if tag == n then ... - !Section -- else ... - | TestW !Section - !(EnumMap Word64 Section) - | TestT !Section - !(M.Map Text Section) - deriving (Show, Eq, Ord) - --- Convenience patterns for matches used in the algorithms below. -pattern MatchW i d cs = Match i (TestW d cs) -pattern MatchT i d cs = Match i (TestT d cs) - --- Representation of the variable context available in the current --- frame. This tracks tags that have been dumped to the stack for --- proper indexing. The `Block` constructor is used to mark when we --- go into the first portion of a `Let`, to track the size of that --- sub-frame. -data Ctx v - = ECtx - | Block (Ctx v) - | Tag (Ctx v) - | Var v Mem (Ctx v) - deriving (Show) - --- Represents the context formed by the top-level let rec around a --- set of definitions. Previous steps have normalized the term to --- only contain a single recursive binding group. The variables in --- this binding group are resolved to numbered combinators rather --- than stack positions. -type RCtx v = M.Map v Word64 - --- Add a sequence of variables and corresponding calling conventions --- to the context. -ctx :: [v] -> [Mem] -> Ctx v -ctx vs cs = pushCtx (zip vs cs) ECtx - --- Look up a variable in the context, getting its position on the --- relevant stack and its calling convention if it is there. -ctxResolve :: Var v => Ctx v -> v -> Maybe (Int,Mem) -ctxResolve ctx v = walk 0 0 ctx - where - walk _ _ ECtx = Nothing - walk ui bi (Block ctx) = walk ui bi ctx - walk ui bi (Tag ctx) = walk (ui+1) bi ctx - walk ui bi (Var x m ctx) - | v == x = case m of BX -> Just (bi,m) ; UN -> Just (ui,m) - | otherwise = walk ui' bi' ctx - where - (ui', bi') = case m of BX -> (ui,bi+1) ; UN -> (ui+1,bi) - --- Add a sequence of variables and calling conventions to the context. -pushCtx :: [(v,Mem)] -> Ctx v -> Ctx v -pushCtx new old = foldr (uncurry Var) old new - --- Concatenate two contexts -catCtx :: Ctx v -> Ctx v -> Ctx v -catCtx ECtx r = r -catCtx (Tag l) r = Tag $ catCtx l r -catCtx (Block l) r = Block $ catCtx l r -catCtx (Var v m l) r = Var v m $ catCtx l r - --- Split the context after a particular variable -breakAfter :: Eq v => (v -> Bool) -> Ctx v -> (Ctx v, Ctx v) -breakAfter _ ECtx = (ECtx, ECtx) -breakAfter p (Tag vs) = first Tag $ breakAfter p vs -breakAfter p (Block vs) = first Block $ breakAfter p vs -breakAfter p (Var v m vs) = (Var v m lvs, rvs) - where - (lvs, rvs) - | p v = (ECtx, vs) - | otherwise = breakAfter p vs - --- Modify the context to contain the variables introduced by an --- unboxed sum -sumCtx :: Var v => Ctx v -> v -> [(v,Mem)] -> Ctx v -sumCtx ctx v vcs - | (lctx, rctx) <- breakAfter (== v) ctx - = catCtx lctx $ pushCtx vcs rctx - --- Look up a variable in the top let rec context -rctxResolve :: Var v => RCtx v -> v -> Maybe Word64 -rctxResolve ctx u = M.lookup u ctx - --- Compile a top-level definition group to a collection of combinators. --- The values in the recursive group are numbered according to the --- provided word. -emitCombs - :: Var v => Word64 -> SuperGroup v - -> (Comb, EnumMap Word64 Comb, Word64) -emitCombs frsh (Rec grp ent) - = (emitComb rec ent, EC.mapFromList aux, frsh') - where - frsh' = frsh + fromIntegral (length grp) - (rvs, cmbs) = unzip grp - rec = M.fromList $ zip rvs [frsh..] - aux = zip [frsh..] $ emitComb rec <$> cmbs - --- Type for aggregating the necessary stack frame size. First field is --- unboxed size, second is boxed. The Applicative instance takes the --- point-wise maximum, so that combining values from different branches --- results in finding the maximum value of either size necessary. -data Counted a = C !Int !Int a - deriving (Functor) - -instance Applicative Counted where - pure = C 0 0 - C u0 b0 f <*> C u1 b1 x = C (max u0 u1) (max b0 b1) (f x) - --- Counts the stack space used by a context and annotates a value --- with it. -countCtx :: Ctx v -> a -> Counted a -countCtx = go 0 0 - where - go !ui !bi (Var _ UN ctx) = go (ui+1) bi ctx - go ui bi (Var _ BX ctx) = go ui (bi+1) ctx - go ui bi (Tag ctx) = go (ui+1) bi ctx - go ui bi (Block ctx) = go ui bi ctx - go ui bi ECtx = C ui bi - -emitComb :: Var v => RCtx v -> SuperNormal v -> Comb -emitComb rec (Lambda ccs (TAbss vs bd)) - = Lam 0 (length vs) u b s - where C u b s = emitSection rec (ctx vs ccs) bd - -addCount :: Int -> Int -> Counted a -> Counted a -addCount i j (C u b x) = C (u+i) (b+j) x - --- Emit a machine code section from an ANF term -emitSection - :: Var v - => RCtx v -> Ctx v -> ANormal v - -> Counted Section -emitSection rec ctx (TLets us ms bu bo) - = emitLet rec ctx bu $ emitSection rec ectx bo - where - ectx = pushCtx (zip us ms) ctx -emitSection rec ctx (TName u (Left f) args bo) - = emitClosures rec ctx args $ \ctx as - -> Ins (Name (Env f) as) <$> emitSection rec (Var u BX ctx) bo -emitSection rec ctx (TName u (Right v) args bo) - | Just (i,BX) <- ctxResolve ctx v - = emitClosures rec ctx args $ \ctx as - -> Ins (Name (Stk i) as) <$> emitSection rec (Var u BX ctx) bo - | Just n <- rctxResolve rec v - = emitClosures rec ctx args $ \ctx as - -> Ins (Name (Env n) as) <$> emitSection rec (Var u BX ctx) bo - | otherwise = emitSectionVErr v -emitSection rec ctx (TVar v) - | Just (i,BX) <- ctxResolve ctx v = countCtx ctx . Yield $ BArg1 i - | Just (i,UN) <- ctxResolve ctx v = countCtx ctx . Yield $ UArg1 i - | Just j <- rctxResolve rec v = countCtx ctx $ App False (Env j) ZArgs - | otherwise = emitSectionVErr v -emitSection _ ctx (TPrm p args) - -- 3 is a conservative estimate of how many extra stack slots - -- a prim op will need for its results. - = addCount 3 3 . countCtx ctx - . Ins (emitPOp p $ emitArgs ctx args) . Yield $ DArgV i j - where - (i, j) = countBlock ctx -emitSection rec ctx (TApp f args) - = emitClosures rec ctx args $ \ctx as - -> countCtx ctx $ emitFunction rec ctx f as -emitSection _ ctx (TLit l) - = c . countCtx ctx . Ins (emitLit l) . Yield $ litArg l - where - c | ANF.T{} <- l = addCount 0 1 - | ANF.LM{} <- l = addCount 0 1 - | ANF.LY{} <- l = addCount 0 1 - | otherwise = addCount 1 0 -emitSection rec ctx (TMatch v bs) - | Just (i,BX) <- ctxResolve ctx v - , MatchData _ cs df <- bs - = Ins (Unpack i) - <$> emitDataMatching rec ctx cs df - | Just (i,BX) <- ctxResolve ctx v - , MatchRequest hs df <- bs - = Ins (Unpack i) - <$> emitRequestMatching rec ctx hs df - | Just (i,UN) <- ctxResolve ctx v - , MatchIntegral cs df <- bs - = emitIntegralMatching rec ctx i cs df - | Just (i,BX) <- ctxResolve ctx v - , MatchText cs df <- bs - = emitTextMatching rec ctx i cs df - | Just (i,UN) <- ctxResolve ctx v - , MatchSum cs <- bs - = emitSumMatching rec ctx v i cs - | Just (_,cc) <- ctxResolve ctx v - = error - $ "emitSection: mismatched calling convention for match: " - ++ matchCallingError cc bs - | otherwise - = error - $ "emitSection: could not resolve match variable: " ++ show (ctx,v) -emitSection rec ctx (THnd rts h b) - | Just (i,BX) <- ctxResolve ctx h - = Ins (Reset (EC.setFromList rs)) - . flip (foldr (\r -> Ins (SetDyn r i))) rs - <$> emitSection rec ctx b - | otherwise = emitSectionVErr h - where - rs = rawTag <$> rts - -emitSection rec ctx (TShift i v e) - = Ins (Capture $ rawTag i) - <$> emitSection rec (Var v BX ctx) e -emitSection _ ctx (TFrc v) - | Just (i,BX) <- ctxResolve ctx v - = countCtx ctx $ App False (Stk i) ZArgs - | Just _ <- ctxResolve ctx v = error - $ "emitSection: values to be forced must be boxed: " ++ show v - | otherwise = emitSectionVErr v -emitSection _ _ tm = error $ "emitSection: unhandled code: " ++ show tm - --- Emit the code for a function call -emitFunction :: Var v => RCtx v -> Ctx v -> Func v -> Args -> Section -emitFunction rec ctx (FVar v) as - | Just (i,BX) <- ctxResolve ctx v - = App False (Stk i) as - | Just j <- rctxResolve rec v - = App False (Env j) as - | otherwise = emitSectionVErr v -emitFunction _ _ (FComb n) as - | False -- known saturated call - = Call False n as - | False -- known unsaturated call - = Ins (Name (Env n) as) $ Yield (BArg1 0) - | otherwise -- slow path - = App False (Env n) as -emitFunction _ _ (FCon r t) as - = Ins (Pack (packTags r t) as) - . Yield $ BArg1 0 -emitFunction _ _ (FReq a e) as - -- Currently implementing packed calling convention for abilities - = Ins (Lit (MI . fromIntegral $ rawTag e)) - . Ins (Pack (rawTag a) (reqArgs as)) - . App True (Dyn $ rawTag a) $ BArg1 0 -emitFunction _ ctx (FCont k) as - | Just (i, BX) <- ctxResolve ctx k = Jump i as - | Nothing <- ctxResolve ctx k = emitFunctionVErr k - | otherwise = error $ "emitFunction: continuations are boxed" -emitFunction _ _ (FPrim _) _ - = error "emitFunction: impossible" - --- Modify function arguments for packing into a request -reqArgs :: Args -> Args -reqArgs = \case - ZArgs -> UArg1 0 - UArg1 i -> UArg2 0 (i+1) - UArg2 i j - | i == 0 && j == 1 -> UArgR 0 3 - | otherwise -> UArgN (fl [0,i+1,j+1]) - BArg1 i -> DArg2 0 i - BArg2 i j - | j == i+1 -> DArgR 0 1 i 2 - | otherwise -> DArgN (fl [0]) (fl [i,j]) - DArg2 i j - | i == 0 -> DArgR 0 2 j 1 - | otherwise -> DArgN (fl [0,i+1]) (fl [j]) - UArgR i l - | i == 0 -> UArgR 0 (l+1) - | otherwise -> UArgN (fl $ [0] ++ Prelude.take l [i+1..]) - BArgR i l -> DArgR 0 1 i l - DArgR ui ul bi bl - | ui == 0 -> DArgR 0 (ul+1) bi bl - | otherwise -> DArgN (fl $ [0] ++ Prelude.take ul [ui+1..]) - (fl $ Prelude.take bl [bi..]) - UArgN us -> UArgN (fl $ [0] ++ fmap (+1) (tl us)) - BArgN bs -> DArgN (fl [0]) bs - DArgN us bs -> DArgN (fl $ [0] ++ fmap (+1) (tl us)) bs - DArgV i j -> DArgV i j - where - fl = primArrayFromList - tl = primArrayToList - -countBlock :: Ctx v -> (Int, Int) -countBlock = go 0 0 - where - go !ui !bi (Var _ UN ctx) = go (ui+1) bi ctx - go ui bi (Var _ BX ctx) = go ui (bi+1) ctx - go ui bi (Tag ctx) = go (ui+1) bi ctx - go ui bi _ = (ui, bi) - -matchCallingError :: Mem -> Branched v -> String -matchCallingError cc b = "(" ++ show cc ++ "," ++ brs ++ ")" - where - brs | MatchData _ _ _ <- b = "MatchData" - | MatchEmpty <- b = "MatchEmpty" - | MatchIntegral _ _ <- b = "MatchIntegral" - | MatchRequest _ _ <- b = "MatchRequest" - | MatchSum _ <- b = "MatchSum" - | MatchText _ _ <- b = "MatchText" - -emitSectionVErr :: (Var v, HasCallStack) => v -> a -emitSectionVErr v - = error - $ "emitSection: could not resolve function variable: " ++ show v - -emitFunctionVErr :: (Var v, HasCallStack) => v -> a -emitFunctionVErr v - = error - $ "emitFunction: could not resolve function variable: " ++ show v - -litArg :: ANF.Lit -> Args -litArg ANF.T{} = BArg1 0 -litArg ANF.LM{} = BArg1 0 -litArg ANF.LY{} = BArg1 0 -litArg _ = UArg1 0 - --- Emit machine code for a let expression. Some expressions do not --- require a machine code Let, which uses more complicated stack --- manipulation. -emitLet - :: Var v - => RCtx v -> Ctx v -> ANormalT v - -> Counted Section - -> Counted Section -emitLet _ _ (ALit l) - = fmap (Ins $ emitLit l) -emitLet _ ctx (AApp (FComb n) args) - -- We should be able to tell if we are making a saturated call - -- or not here. We aren't carrying the information here yet, though. - | False -- not saturated - = fmap (Ins . Name (Env n) $ emitArgs ctx args) -emitLet _ ctx (AApp (FCon r n) args) - = fmap (Ins . Pack (packTags r n) $ emitArgs ctx args) -emitLet _ ctx (AApp (FPrim p) args) - = fmap (Ins . either emitPOp emitIOp p $ emitArgs ctx args) -emitLet rec ctx bnd - = liftA2 Let (emitSection rec (Block ctx) (TTm bnd)) - --- Translate from ANF prim ops to machine code operations. The --- machine code operations are divided with respect to more detailed --- information about expected number and types of arguments. -emitPOp :: ANF.POp -> Args -> Instr --- Integral -emitPOp ANF.ADDI = emitP2 ADDI -emitPOp ANF.ADDN = emitP2 ADDI -emitPOp ANF.SUBI = emitP2 SUBI -emitPOp ANF.SUBN = emitP2 SUBI -emitPOp ANF.MULI = emitP2 MULI -emitPOp ANF.MULN = emitP2 MULI -emitPOp ANF.DIVI = emitP2 DIVI -emitPOp ANF.DIVN = emitP2 DIVI -emitPOp ANF.MODI = emitP2 MODI -- TODO: think about how these behave -emitPOp ANF.MODN = emitP2 MODI -- TODO: think about how these behave -emitPOp ANF.POWI = emitP2 POWI -emitPOp ANF.POWN = emitP2 POWI -emitPOp ANF.SHLI = emitP2 SHLI -emitPOp ANF.SHLN = emitP2 SHLI -- Note: left shift behaves uniformly -emitPOp ANF.SHRI = emitP2 SHRI -emitPOp ANF.SHRN = emitP2 SHRN -emitPOp ANF.LEQI = emitP2 LEQI -emitPOp ANF.LEQN = emitP2 LEQN -emitPOp ANF.EQLI = emitP2 EQLI -emitPOp ANF.EQLN = emitP2 EQLI - -emitPOp ANF.SGNI = emitP1 SGNI -emitPOp ANF.NEGI = emitP1 NEGI -emitPOp ANF.INCI = emitP1 INCI -emitPOp ANF.INCN = emitP1 INCI -emitPOp ANF.DECI = emitP1 DECI -emitPOp ANF.DECN = emitP1 DECI -emitPOp ANF.TZRO = emitP1 TZRO -emitPOp ANF.LZRO = emitP1 LZRO -emitPOp ANF.ANDN = emitP2 ANDN -emitPOp ANF.IORN = emitP2 IORN -emitPOp ANF.XORN = emitP2 XORN -emitPOp ANF.COMN = emitP1 COMN - --- Float -emitPOp ANF.ADDF = emitP2 ADDF -emitPOp ANF.SUBF = emitP2 SUBF -emitPOp ANF.MULF = emitP2 MULF -emitPOp ANF.DIVF = emitP2 DIVF -emitPOp ANF.LEQF = emitP2 LEQF -emitPOp ANF.EQLF = emitP2 EQLF - -emitPOp ANF.MINF = emitP2 MINF -emitPOp ANF.MAXF = emitP2 MAXF - -emitPOp ANF.POWF = emitP2 POWF -emitPOp ANF.EXPF = emitP1 EXPF -emitPOp ANF.ABSF = emitP1 ABSF -emitPOp ANF.SQRT = emitP1 SQRT -emitPOp ANF.LOGF = emitP1 LOGF -emitPOp ANF.LOGB = emitP2 LOGB - -emitPOp ANF.CEIL = emitP1 CEIL -emitPOp ANF.FLOR = emitP1 FLOR -emitPOp ANF.TRNF = emitP1 TRNF -emitPOp ANF.RNDF = emitP1 RNDF - -emitPOp ANF.COSF = emitP1 COSF -emitPOp ANF.SINF = emitP1 SINF -emitPOp ANF.TANF = emitP1 TANF -emitPOp ANF.COSH = emitP1 COSH -emitPOp ANF.SINH = emitP1 SINH -emitPOp ANF.TANH = emitP1 TANH -emitPOp ANF.ACOS = emitP1 ACOS -emitPOp ANF.ATAN = emitP1 ATAN -emitPOp ANF.ASIN = emitP1 ASIN -emitPOp ANF.ACSH = emitP1 ACSH -emitPOp ANF.ASNH = emitP1 ASNH -emitPOp ANF.ATNH = emitP1 ATNH -emitPOp ANF.ATN2 = emitP2 ATN2 - --- conversions -emitPOp ANF.ITOF = emitP1 ITOF -emitPOp ANF.NTOF = emitP1 NTOF -emitPOp ANF.ITOT = emitBP1 ITOT -emitPOp ANF.NTOT = emitBP1 NTOT -emitPOp ANF.FTOT = emitBP1 FTOT -emitPOp ANF.TTON = emitBP1 TTON -emitPOp ANF.TTOI = emitBP1 TTOI -emitPOp ANF.TTOF = emitBP1 TTOF - --- text -emitPOp ANF.CATT = emitBP2 CATT -emitPOp ANF.TAKT = emitBP2 TAKT -emitPOp ANF.DRPT = emitBP2 DRPT -emitPOp ANF.SIZT = emitBP1 SIZT -emitPOp ANF.UCNS = emitBP1 UCNS -emitPOp ANF.USNC = emitBP1 USNC -emitPOp ANF.EQLT = emitBP2 EQLT -emitPOp ANF.LEQT = emitBP2 LEQT -emitPOp ANF.PAKT = emitBP1 PAKT -emitPOp ANF.UPKT = emitBP1 UPKT - --- sequence -emitPOp ANF.CATS = emitBP2 CATS -emitPOp ANF.TAKS = emitBP2 TAKS -emitPOp ANF.DRPS = emitBP2 DRPS -emitPOp ANF.SIZS = emitBP1 SIZS -emitPOp ANF.CONS = emitBP2 CONS -emitPOp ANF.SNOC = emitBP2 SNOC -emitPOp ANF.IDXS = emitBP2 IDXS -emitPOp ANF.VWLS = emitBP1 VWLS -emitPOp ANF.VWRS = emitBP1 VWRS -emitPOp ANF.SPLL = emitBP2 SPLL -emitPOp ANF.SPLR = emitBP2 SPLR - --- bytes -emitPOp ANF.PAKB = emitBP1 PAKB -emitPOp ANF.UPKB = emitBP1 UPKB -emitPOp ANF.TAKB = emitBP2 TAKB -emitPOp ANF.DRPB = emitBP2 DRPB -emitPOp ANF.IDXB = emitBP2 IDXB -emitPOp ANF.SIZB = emitBP1 SIZB -emitPOp ANF.FLTB = emitBP1 FLTB -emitPOp ANF.CATB = emitBP2 CATB - --- universal comparison -emitPOp ANF.EQLU = emitBP2 EQLU -emitPOp ANF.CMPU = emitBP2 CMPU - --- error call -emitPOp ANF.EROR = emitBP1 THRO - --- non-prim translations -emitPOp ANF.BLDS = Seq -emitPOp ANF.FORK = \case - BArg1 i -> Fork $ App True (Stk i) ZArgs - _ -> error "fork takes exactly one boxed argument" -emitPOp ANF.PRNT = \case - BArg1 i -> Print i - _ -> error "print takes exactly one boxed argument" -emitPOp ANF.INFO = \case - ZArgs -> Info "debug" - _ -> error "info takes no arguments" --- handled in emitSection because Die is not an instruction - --- Emit machine code for ANF IO operations. These are all translated --- to 'foreing function' calls, but there is a special case for the --- standard handle access function, because it does not yield an --- explicit error. -emitIOp :: ANF.IOp -> Args -> Instr -emitIOp iop@ANF.STDHND = ForeignCall False (iopToForeign iop) -emitIOp iop = ForeignCall True (iopToForeign iop) - -bufferModeResult :: BufferMode -> ForeignRslt -bufferModeResult NoBuffering = [Left 0] -bufferModeResult LineBuffering = [Left 1] -bufferModeResult (BlockBuffering Nothing) = [Left 3] -bufferModeResult (BlockBuffering (Just n)) = [Left 4, Left n] - -booleanResult :: Bool -> ForeignRslt -booleanResult b = [Left $ fromEnum b] - -intResult :: Int -> ForeignRslt -intResult i = [Left i] - --- TODO: this seems questionable, but the existing IO source is --- saying that these things return Nat, not arbitrary precision --- integers. -intg2natResult :: Integer -> ForeignRslt -intg2natResult i = [Left $ fromInteger i] - -stringResult :: String -> ForeignRslt -stringResult = wrappedResult Rf.textRef . Text.pack - -wrappedResult :: Reference -> a -> ForeignRslt -wrappedResult r x = [Right $ Wrap r x] - -handleResult :: Handle -> ForeignRslt -handleResult h = [Right $ Wrap Rf.handleReference h] - -timeResult :: RealFrac r => r -> ForeignRslt -timeResult t = intResult $ round t - -maybeResult' - :: (a -> (Int, ForeignRslt)) -> Maybe a -> ForeignRslt -maybeResult' _ Nothing = [Left 0] -maybeResult' f (Just x) - | (i, r) <- f x = Left (i+1) : r - --- Implementations of ANF IO operations -iopToForeign :: ANF.IOp -> ForeignFunc -iopToForeign ANF.OPENFI - = foreign2 $ \fp mo -> handleResult <$> openFile fp mo -iopToForeign ANF.CLOSFI - = foreign1 $ \h -> [] <$ hClose h -iopToForeign ANF.ISFEOF - = foreign1 $ \h -> booleanResult <$> hIsEOF h -iopToForeign ANF.ISFOPN - = foreign1 $ \h -> booleanResult <$> hIsOpen h -iopToForeign ANF.ISSEEK - = foreign1 $ \h -> booleanResult <$> hIsSeekable h -iopToForeign ANF.SEEKFI - = foreign3 $ \h sm n -> [] <$ hSeek h sm (fromIntegral (n :: Int)) -iopToForeign ANF.POSITN - = foreign1 $ \h -> intg2natResult <$> hTell h -iopToForeign ANF.GBUFFR - = foreign1 $ \h -> bufferModeResult <$> hGetBuffering h -iopToForeign ANF.SBUFFR - = foreign2 $ \h bm -> [] <$ hSetBuffering h bm -iopToForeign ANF.GTLINE - = foreign1 $ \h -> wrappedResult Rf.textRef <$> hGetLine h -iopToForeign ANF.GTTEXT - = error "todo" -- foreign1 $ \h -> pure . Right . Wrap <$> hGetText h -iopToForeign ANF.PUTEXT - = foreign2 $ \h t -> [] <$ hPutStr h t -iopToForeign ANF.SYTIME - = foreign0 $ timeResult <$> getPOSIXTime -iopToForeign ANF.GTMPDR - = foreign0 $ stringResult <$> getTemporaryDirectory -iopToForeign ANF.GCURDR - = foreign0 $ stringResult <$> getCurrentDirectory -iopToForeign ANF.SCURDR - = foreign1 $ \fp -> [] <$ setCurrentDirectory (Text.unpack fp) -iopToForeign ANF.DCNTNS - = foreign1 $ \fp -> - error "todo" <$ getDirectoryContents (Text.unpack fp) -iopToForeign ANF.FEXIST - = foreign1 $ \fp -> booleanResult <$> doesPathExist (Text.unpack fp) -iopToForeign ANF.ISFDIR = error "todo" -iopToForeign ANF.CRTDIR - = foreign1 $ \fp -> - [] <$ createDirectoryIfMissing True (Text.unpack fp) -iopToForeign ANF.REMDIR - = foreign1 $ \fp -> [] <$ removeDirectoryRecursive (Text.unpack fp) -iopToForeign ANF.RENDIR - = foreign2 $ \fmp top -> - [] <$ renameDirectory (Text.unpack fmp) (Text.unpack top) -iopToForeign ANF.REMOFI - = foreign1 $ \fp -> [] <$ removeFile (Text.unpack fp) -iopToForeign ANF.RENAFI - = foreign2 $ \fmp top -> - [] <$ renameFile (Text.unpack fmp) (Text.unpack top) -iopToForeign ANF.GFTIME - = foreign1 $ \fp -> - timeResult . utcTimeToPOSIXSeconds - <$> getModificationTime (Text.unpack fp) -iopToForeign ANF.GFSIZE - = foreign1 $ \fp -> intg2natResult <$> getFileSize (Text.unpack fp) -iopToForeign ANF.SRVSCK - = foreign2 $ \mhst port -> - wrappedResult Rf.socketReference - <$> SYS.bindSock (hostPreference mhst) (Text.unpack port) -iopToForeign ANF.LISTEN - = foreign1 $ \sk -> - [] <$ SYS.listenSock sk 2048 -iopToForeign ANF.CLISCK - = foreign2 $ \ho po -> - wrappedResult Rf.socketReference - <$> SYS.connectSock (Text.unpack ho) (Text.unpack po) -iopToForeign ANF.CLOSCK - = foreign1 $ \sk -> [] <$ SYS.closeSock sk -iopToForeign ANF.SKACPT - = foreign1 $ \sk -> - wrappedResult Rf.socketReference <$> SYS.accept sk -iopToForeign ANF.SKSEND - = foreign2 $ \sk bs -> - [] <$ SYS.send sk (Bytes.toByteString bs) -iopToForeign ANF.SKRECV - = foreign2 $ \hs n -> - maybeResult' ((0,) . wrappedResult Rf.bytesRef) - . fmap Bytes.fromByteString - <$> SYS.recv hs n -iopToForeign ANF.THKILL - = foreign1 $ \tid -> [] <$ killThread tid -iopToForeign ANF.THDELY - = foreign1 $ \n -> [] <$ threadDelay n -iopToForeign ANF.STDHND - = foreign1 $ \(n :: Int) -> case n of - 0 -> pure [Left 1, Right . Wrap Rf.handleReference $ SYS.stdin] - 1 -> pure [Left 1, Right . Wrap Rf.handleReference $ SYS.stdout] - 2 -> pure [Left 1, Right . Wrap Rf.handleReference $ SYS.stderr] - _ -> pure [Left 0] - -hostPreference :: Maybe Text -> SYS.HostPreference -hostPreference Nothing = SYS.HostAny -hostPreference (Just host) = SYS.Host $ Text.unpack host - --- Helper functions for packing the variable argument representation --- into the indexes stored in prim op instructions -emitP1 :: UPrim1 -> Args -> Instr -emitP1 p (UArg1 i) = UPrim1 p i -emitP1 p a - = error $ "wrong number of args for unary unboxed primop: " - ++ show (p, a) - -emitP2 :: UPrim2 -> Args -> Instr -emitP2 p (UArg2 i j) = UPrim2 p i j -emitP2 p a - = error $ "wrong number of args for binary unboxed primop: " - ++ show (p, a) - -emitBP1 :: BPrim1 -> Args -> Instr -emitBP1 p (UArg1 i) = BPrim1 p i -emitBP1 p (BArg1 i) = BPrim1 p i -emitBP1 p a - = error $ "wrong number of args for unary boxed primop: " - ++ show (p,a) - -emitBP2 :: BPrim2 -> Args -> Instr -emitBP2 p (UArg2 i j) = BPrim2 p i j -emitBP2 p (BArg2 i j) = BPrim2 p i j -emitBP2 p (DArg2 i j) = BPrim2 p i j -emitBP2 p a - = error $ "wrong number of args for binary boxed primop: " - ++ show (p,a) - -emitDataMatching - :: Var v - => RCtx v - -> Ctx v - -> EnumMap CTag ([Mem], ANormal v) - -> Maybe (ANormal v) - -> Counted Section -emitDataMatching rec ctx cs df - = MatchW 0 <$> edf <*> traverse (emitCase rec ctx) (coerce cs) - where - -- Note: this is not really accurate. A default data case needs - -- stack space corresponding to the actual data that shows up there. - -- However, we currently don't use default cases for data. - edf | Just co <- df = emitSection rec ctx co - | otherwise = countCtx ctx $ Die "missing data case" - --- Emits code corresponding to an unboxed sum match. --- The match is against a tag on the stack, and cases introduce --- variables to the middle of the context, because the fields were --- already there, but it was unknown how many there were until --- branching on the tag. -emitSumMatching - :: Var v - => RCtx v - -> Ctx v - -> v - -> Int - -> EnumMap Word64 ([Mem], ANormal v) - -> Counted Section -emitSumMatching rec ctx v i cs - = MatchW i edf <$> traverse (emitSumCase rec ctx v) cs - where - edf = Die "uncovered unboxed sum case" - -emitRequestMatching - :: Var v - => RCtx v - -> Ctx v - -> EnumMap RTag (EnumMap CTag ([Mem], ANormal v)) - -> ANormal v - -> Counted Section -emitRequestMatching rec ctx hs df = MatchW 0 edf <$> tops - where - tops = mapInsert 0 - <$> emitCase rec ctx ([BX], df) - <*> traverse f (coerce hs) - f cs = MatchW 1 edf <$> traverse (emitCase rec ctx) cs - edf = Die "unhandled ability" - -emitIntegralMatching - :: Var v - => RCtx v - -> Ctx v - -> Int - -> EnumMap Word64 (ANormal v) - -> Maybe (ANormal v) - -> Counted Section -emitIntegralMatching rec ctx i cs df - = MatchW i <$> edf <*> traverse (emitCase rec ctx . ([],)) cs - where - edf | Just co <- df = emitSection rec ctx co - | otherwise = countCtx ctx $ Die "missing integral case" - -emitTextMatching - :: Var v - => RCtx v - -> Ctx v - -> Int - -> M.Map Text (ANormal v) - -> Maybe (ANormal v) - -> Counted Section -emitTextMatching rec ctx i cs df - = MatchT i <$> edf <*> traverse (emitCase rec ctx . ([],)) cs - where - edf | Just co <- df = emitSection rec ctx co - | otherwise = countCtx ctx $ Die "missing text case" - -emitCase - :: Var v - => RCtx v -> Ctx v -> ([Mem], ANormal v) - -> Counted Section -emitCase rec ctx (ccs, TAbss vs bo) - = emitSection rec (Tag $ pushCtx (zip vs ccs) ctx) bo - -emitSumCase - :: Var v - => RCtx v -> Ctx v -> v -> ([Mem], ANormal v) - -> Counted Section -emitSumCase rec ctx v (ccs, TAbss vs bo) - = emitSection rec (sumCtx ctx v $ zip vs ccs) bo - -emitLit :: ANF.Lit -> Instr -emitLit l = Lit $ case l of - ANF.I i -> MI $ fromIntegral i - ANF.N n -> MI $ fromIntegral n - ANF.C c -> MI $ fromEnum c - ANF.F d -> MD d - ANF.T t -> MT t - ANF.LM r -> MM r - ANF.LY r -> MY r - --- Emits some fix-up code for calling functions. Some of the --- variables in scope come from the top-level let rec, but these --- are definitions, not values on the stack. These definitions cannot --- be passed directly as function arguments, and must have a --- corresponding stack entry allocated first. So, this function inserts --- these allocations and passes the appropriate context into the --- provided continuation. -emitClosures - :: Var v - => RCtx v -> Ctx v -> [v] - -> (Ctx v -> Args -> Counted Section) - -> Counted Section -emitClosures rec ctx args k - = allocate ctx args $ \ctx -> k ctx $ emitArgs ctx args - where - allocate ctx [] k = k ctx - allocate ctx (a:as) k - | Just _ <- ctxResolve ctx a = allocate ctx as k - | Just n <- rctxResolve rec a - = Ins (Name (Env n) ZArgs) <$> allocate (Var a BX ctx) as k - | otherwise - = error $ "emitClosures: unknown reference: " ++ show a - -emitArgs :: Var v => Ctx v -> [v] -> Args -emitArgs ctx args - | Just l <- traverse (ctxResolve ctx) args = demuxArgs l - | otherwise - = error $ "could not resolve argument variables: " ++ show args - --- Turns a list of stack positions and calling conventions into the --- argument format expected in the machine code. -demuxArgs :: [(Int,Mem)] -> Args -demuxArgs as0 - = case bimap (fmap fst) (fmap fst) $ partition ((==UN).snd) as0 of - ([],[]) -> ZArgs - ([],[i]) -> BArg1 i - ([],[i,j]) -> BArg2 i j - ([i],[]) -> UArg1 i - ([i,j],[]) -> UArg2 i j - ([i],[j]) -> DArg2 i j - ([],bs) -> BArgN $ primArrayFromList bs - (us,[]) -> UArgN $ primArrayFromList us - -- TODO: handle ranges - (us,bs) -> DArgN (primArrayFromList us) (primArrayFromList bs) - -indent :: Int -> ShowS -indent ind = showString (replicate (ind*2) ' ') - -prettyCombs - :: (Comb, EnumMap Word64 Comb, Word64) - -> ShowS -prettyCombs (c, es, w) - = foldr (\(w,c) r -> prettyComb w c . showString "\n" . r) - id (mapToList es) - . showString "\n" . prettyComb w c - -prettyComb :: Word64 -> Comb -> ShowS -prettyComb w (Lam ua ba _ _ s) - = shows w . shows [ua,ba] - . showString ":\n" . prettySection 2 s - -prettySection :: Int -> Section -> ShowS -prettySection ind sec - = indent ind . case sec of - App _ r as -> - showString "App " - . showsPrec 12 r . showString " " . prettyArgs as - Call _ i as -> - showString "Call " . shows i . showString " " . prettyArgs as - Jump i as -> - showString "Jump " . shows i . showString " " . prettyArgs as - Match i bs -> - showString "Match " . shows i . showString "\n" - . prettyBranches (ind+1) bs - Yield as -> showString "Yield " . prettyArgs as - Ins i nx -> - prettyIns i . showString "\n" . prettySection ind nx - Let s n -> - showString "Let\n" . prettySection (ind+2) s - . showString "\n" . prettySection ind n - Die s -> showString $ "Die " ++ s - Exit -> showString "Exit" - -prettyBranches :: Int -> Branch -> ShowS -prettyBranches ind bs - = case bs of - Test1 i e df -> pdf df . picase i e - Test2 i ei j ej df -> pdf df . picase i ei . picase j ej - TestW df m -> - pdf df . foldr (\(i,e) r -> picase i e . r) id (mapToList m) - TestT df m -> - pdf df . foldr (\(i,e) r -> ptcase i e . r) id (M.toList m) - where - pdf e = indent ind . showString "DFLT ->\n" . prettySection (ind+1) e - ptcase t e - = showString "\n" . indent ind . shows t . showString " ->\n" - . prettySection (ind+1) e - picase i e - = showString "\n" . indent ind . shows i . showString " ->\n" - . prettySection (ind+1) e - -un :: ShowS -un = ('U':) - -bx :: ShowS -bx = ('B':) - -prettyIns :: Instr -> ShowS -prettyIns (Pack i as) - = showString "Pack " . shows i . (' ':) . prettyArgs as -prettyIns i = shows i - -prettyArgs :: Args -> ShowS -prettyArgs ZArgs = shows @[Int] [] -prettyArgs (UArg1 i) = un . shows [i] -prettyArgs (BArg1 i) = bx . shows [i] -prettyArgs (UArg2 i j) = un . shows [i,j] -prettyArgs (BArg2 i j) = bx . shows [i,j] -prettyArgs (DArg2 i j) = un . shows [i] . (' ':) . bx . shows [j] -prettyArgs (UArgR i l) = un . shows (Prelude.take l [i..]) -prettyArgs (BArgR i l) = bx . shows (Prelude.take l [i..]) -prettyArgs (DArgR i l j k) - = un . shows (Prelude.take l [i..]) . (' ':) - . bx . shows (Prelude.take k [j..]) -prettyArgs (UArgN v) = un . shows (primArrayToList v) -prettyArgs (BArgN v) = bx . shows (primArrayToList v) -prettyArgs (DArgN u b) - = un . shows (primArrayToList u) . (' ':) - . bx . shows (primArrayToList b) -prettyArgs (DArgV i j) = ('V':) . shows [i,j] diff --git a/parser-typechecker/src/Unison/Runtime/Machine.hs b/parser-typechecker/src/Unison/Runtime/Machine.hs deleted file mode 100644 index 2b3246561a..0000000000 --- a/parser-typechecker/src/Unison/Runtime/Machine.hs +++ /dev/null @@ -1,1330 +0,0 @@ -{-# language DataKinds #-} -{-# language RankNTypes #-} -{-# language BangPatterns #-} -{-# language PatternGuards #-} - -module Unison.Runtime.Machine where - -import Data.Maybe (fromMaybe) - -import Data.Bits -import Data.String (fromString) -import Data.Foldable (toList) -import Data.Traversable -import Data.Word (Word64) - -import qualified Data.Text as Tx -import qualified Data.Text.IO as Tx -import qualified Data.Sequence as Sq -import qualified Data.Map.Strict as M - -import Control.Exception -import Control.Lens ((<&>)) -import Control.Concurrent (forkIOWithUnmask, ThreadId) -import Control.Monad ((<=<)) - -import qualified Data.Primitive.PrimArray as PA - -import Text.Read (readMaybe) - -import Unison.Reference (Reference) - -import Unison.Runtime.ANF (Mem(..), RTag) -import Unison.Runtime.Foreign -import Unison.Runtime.Stack -import Unison.Runtime.MCode - -import qualified Unison.Type as Rf -import qualified Unison.Runtime.IOSource as Rf - -import qualified Unison.Util.Bytes as By -import Unison.Util.EnumContainers as EC -import Unison.Util.Pretty as P - -type Tag = Word64 -type Env = Word64 -> Comb -type DEnv = EnumMap Word64 Closure -data REnv - = Refs - { tagRefs :: EnumMap RTag Reference - , combRefs :: EnumMap Word64 Reference - } - -type Unmask = forall a. IO a -> IO a - -data RuntimeExn - = PE (P.Pretty P.ColorText) - | BU Closure - deriving (Show) -instance Exception RuntimeExn - -die :: String -> IO a -die = throwIO . PE . P.lit . fromString - -info :: Show a => String -> a -> IO () -info ctx x = infos ctx (show x) -infos :: String -> String -> IO () -infos ctx s = putStrLn $ ctx ++ ": " ++ s - --- Entry point for evaluating a section -eval0 :: REnv -> Env -> Section -> IO () -eval0 renv !env !co = do - ustk <- alloc - bstk <- alloc - mask $ \unmask -> eval unmask renv env mempty ustk bstk KE co - --- Entry point for evaluating a numbered combinator. --- An optional callback for the base of the stack may be supplied. --- --- This is the entry point actually used in the interactive --- environment currently. -apply0 - :: Maybe (Stack 'UN -> Stack 'BX -> IO ()) - -> REnv -> Env -> Word64 -> IO () -apply0 !callback renv !env !i = do - ustk <- alloc - bstk <- alloc - mask $ \unmask -> - apply unmask renv env mempty ustk bstk k0 True ZArgs comb - where - comb = PAp (IC i $ env i) unull bnull - k0 = maybe KE (CB . Hook) callback - -lookupDenv :: Word64 -> DEnv -> Closure -lookupDenv p denv = fromMaybe BlackHole $ EC.lookup p denv - -exec - :: Unmask -> REnv -> Env -> DEnv - -> Stack 'UN -> Stack 'BX -> K - -> Instr - -> IO (DEnv, Stack 'UN, Stack 'BX, K) -exec _ _ !_ !denv !ustk !bstk !k (Info tx) = do - info tx ustk - info tx bstk - info tx k - pure (denv, ustk, bstk, k) -exec _ _ !env !denv !ustk !bstk !k (Name r args) = do - bstk <- name ustk bstk args =<< resolve env denv bstk r - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (SetDyn p i) = do - clo <- peekOff bstk i - pure (EC.mapInsert p clo denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (Capture p) = do - (sk,denv,ustk,bstk,useg,bseg,k) <- splitCont denv ustk bstk k p - bstk <- bump bstk - poke bstk $ Captured sk useg bseg - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (UPrim1 op i) = do - ustk <- uprim1 ustk op i - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (UPrim2 op i j) = do - ustk <- uprim2 ustk op i j - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (BPrim1 op i) = do - (ustk,bstk) <- bprim1 ustk bstk op i - pure (denv, ustk, bstk, k) -exec _ renv !_ !denv !ustk !bstk !k (BPrim2 EQLU i j) = do - x <- peekOff bstk i - y <- peekOff bstk j - ustk <- bump ustk - poke ustk - $ case universalCompare cmb tag compare x y of - EQ -> 1 - _ -> 0 - pure (denv, ustk, bstk, k) - where - cmb w | Just r <- EC.lookup w (combRefs renv) = r - | otherwise = error $ "exec: unknown combinator: " ++ show w - tag t | Just r <- EC.lookup t (tagRefs renv) = r - | otherwise = error $ "exec: unknown data: " ++ show t -exec _ renv !_ !denv !ustk !bstk !k (BPrim2 CMPU i j) = do - x <- peekOff bstk i - y <- peekOff bstk j - ustk <- bump ustk - poke ustk . fromEnum $ universalCompare cmb tag compare x y - pure (denv, ustk, bstk, k) - where - cmb w | Just r <- EC.lookup w (combRefs renv) = r - | otherwise = error $ "exec: unknown combinator: " ++ show w - tag t | Just r <- EC.lookup t (tagRefs renv) = r - | otherwise = error $ "exec: unknown data: " ++ show t -exec _ _ !_ !denv !ustk !bstk !k (BPrim2 op i j) = do - (ustk,bstk) <- bprim2 ustk bstk op i j - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (Pack t args) = do - clo <- buildData ustk bstk t args - bstk <- bump bstk - poke bstk clo - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (Unpack i) = do - (ustk, bstk) <- dumpData ustk bstk =<< peekOff bstk i - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (Print i) = do - t <- peekOffT bstk i - Tx.putStrLn t - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (Lit (MI n)) = do - ustk <- bump ustk - poke ustk n - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (Lit (MD d)) = do - ustk <- bump ustk - pokeD ustk d - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (Lit (MT t)) = do - bstk <- bump bstk - poke bstk (Foreign (Wrap Rf.textRef t)) - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (Lit (MM r)) = do - bstk <- bump bstk - poke bstk (Foreign (Wrap Rf.termLinkRef r)) - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (Lit (MY r)) = do - bstk <- bump bstk - poke bstk (Foreign (Wrap Rf.typeLinkRef r)) - pure (denv, ustk, bstk, k) -exec _ _ !_ !denv !ustk !bstk !k (Reset ps) = do - pure (denv, ustk, bstk, Mark ps clos k) - where clos = EC.restrictKeys denv ps -exec _ _ !_ !denv !ustk !bstk !k (Seq as) = do - l <- closureArgs bstk as - bstk <- bump bstk - pokeS bstk $ Sq.fromList l - pure (denv, ustk, bstk, k) -exec unmask _ !_ !denv !ustk !bstk !k (ForeignCall catch (FF f) args) - = foreignArgs ustk bstk args - >>= perform - <&> uncurry (denv,,,k) - where - perform - | catch = foreignCatch unmask ustk bstk f - | otherwise = foreignResult ustk bstk <=< f -exec unmask renv !env !denv !ustk !bstk !k (Fork lz) = do - tid <- - unmask $ - forkEval renv env denv k lz <$> duplicate ustk <*> duplicate bstk - bstk <- bump bstk - poke bstk . Foreign . Wrap Rf.threadIdReference $ tid - pure (denv, ustk, bstk, k) -{-# inline exec #-} - -maskTag :: Word64 -> Word64 -maskTag i = i .&. 0xFFFF - -eval :: Unmask -> REnv -> Env -> DEnv - -> Stack 'UN -> Stack 'BX -> K -> Section -> IO () -eval unmask renv !env !denv !ustk !bstk !k (Match i (TestT df cs)) = do - t <- peekOffT bstk i - eval unmask renv env denv ustk bstk k $ selectTextBranch t df cs -eval unmask renv !env !denv !ustk !bstk !k (Match i br) = do - n <- peekOffN ustk i - eval unmask renv env denv ustk bstk k $ selectBranch n br -eval unmask renv !env !denv !ustk !bstk !k (Yield args) - | asize ustk + asize bstk > 0 , BArg1 i <- args = do - peekOff bstk i >>= apply unmask renv env denv ustk bstk k False ZArgs - | otherwise = do - (ustk, bstk) <- moveArgs ustk bstk args - ustk <- frameArgs ustk - bstk <- frameArgs bstk - yield unmask renv env denv ustk bstk k -eval unmask renv !env !denv !ustk !bstk !k (App ck r args) = - resolve env denv bstk r - >>= apply unmask renv env denv ustk bstk k ck args -eval unmask renv !env !denv !ustk !bstk !k (Call ck n args) = - enter unmask renv env denv ustk bstk k ck args $ env n -eval unmask renv !env !denv !ustk !bstk !k (Jump i args) = - peekOff bstk i >>= jump unmask renv env denv ustk bstk k args -eval unmask renv !env !denv !ustk !bstk !k (Let nw nx) = do - (ustk, ufsz, uasz) <- saveFrame ustk - (bstk, bfsz, basz) <- saveFrame bstk - eval unmask renv env denv ustk bstk (Push ufsz bfsz uasz basz nx k) nw -eval unmask renv !env !denv !ustk !bstk !k (Ins i nx) = do - (denv, ustk, bstk, k) <- exec unmask renv env denv ustk bstk k i - eval unmask renv env denv ustk bstk k nx -eval _ _ !_ !_ !_ !_ !_ Exit = pure () -eval _ _ !_ !_ !_ !_ !_ (Die s) = die s -{-# noinline eval #-} - -forkEval - :: REnv -> Env -> DEnv - -> K -> Section -> Stack 'UN -> Stack 'BX -> IO ThreadId -forkEval renv env denv k nx ustk bstk = forkIOWithUnmask $ \unmask -> do - (denv, ustk, bstk, k) <- discardCont denv ustk bstk k 0 - eval unmask renv env denv ustk bstk k nx -{-# inline forkEval #-} - --- fast path application -enter - :: Unmask -> REnv -> Env -> DEnv -> Stack 'UN -> Stack 'BX -> K - -> Bool -> Args -> Comb -> IO () -enter unmask renv !env !denv !ustk !bstk !k !ck !args !comb = do - ustk <- if ck then ensure ustk uf else pure ustk - bstk <- if ck then ensure bstk bf else pure bstk - (ustk, bstk) <- moveArgs ustk bstk args - ustk <- acceptArgs ustk ua - bstk <- acceptArgs bstk ba - eval unmask renv env denv ustk bstk k entry - where - Lam ua ba uf bf entry = comb -{-# inline enter #-} - --- fast path by-name delaying -name :: Stack 'UN -> Stack 'BX -> Args -> Closure -> IO (Stack 'BX) -name !ustk !bstk !args clo = case clo of - PAp comb useg bseg -> do - (useg, bseg) <- closeArgs I ustk bstk useg bseg args - bstk <- bump bstk - poke bstk $ PAp comb useg bseg - pure bstk - _ -> die $ "naming non-function: " ++ show clo -{-# inline name #-} - --- slow path application -apply - :: Unmask -> REnv -> Env -> DEnv -> Stack 'UN -> Stack 'BX -> K - -> Bool -> Args -> Closure -> IO () -apply unmask renv !env !denv !ustk !bstk !k !ck !args clo = case clo of - PAp comb@(Lam_ ua ba uf bf entry) useg bseg - | ck || ua <= uac && ba <= bac -> do - ustk <- ensure ustk uf - bstk <- ensure bstk bf - (ustk, bstk) <- moveArgs ustk bstk args - ustk <- dumpSeg ustk useg A - bstk <- dumpSeg bstk bseg A - ustk <- acceptArgs ustk ua - bstk <- acceptArgs bstk ba - eval unmask renv env denv ustk bstk k entry - | otherwise -> do - (useg, bseg) <- closeArgs C ustk bstk useg bseg args - ustk <- discardFrame =<< frameArgs ustk - bstk <- discardFrame =<< frameArgs bstk - bstk <- bump bstk - poke bstk $ PAp comb useg bseg - yield unmask renv env denv ustk bstk k - where - uac = asize ustk + ucount args + uscount useg - bac = asize bstk + bcount args + bscount bseg - clo | ZArgs <- args, asize ustk == 0, asize bstk == 0 -> do - ustk <- discardFrame ustk - bstk <- discardFrame bstk - bstk <- bump bstk - poke bstk clo - yield unmask renv env denv ustk bstk k - | otherwise -> die $ "applying non-function: " ++ show clo -{-# inline apply #-} - -jump - :: Unmask -> REnv -> Env -> DEnv - -> Stack 'UN -> Stack 'BX -> K - -> Args -> Closure -> IO () -jump unmask renv !env !denv !ustk !bstk !k !args clo = case clo of - Captured sk useg bseg -> do - (useg, bseg) <- closeArgs K ustk bstk useg bseg args - ustk <- discardFrame ustk - bstk <- discardFrame bstk - ustk <- dumpSeg ustk useg . F $ ucount args - bstk <- dumpSeg bstk bseg . F $ bcount args - repush unmask renv env ustk bstk denv sk k - _ -> die "jump: non-cont" -{-# inline jump #-} - -repush - :: Unmask -> REnv -> Env - -> Stack 'UN -> Stack 'BX -> DEnv -> K -> K -> IO () -repush unmask renv !env !ustk !bstk = go - where - go !denv KE !k = yield unmask renv env denv ustk bstk k - go !denv (Mark ps cs sk) !k = go denv' sk $ Mark ps cs' k - where - denv' = cs <> EC.withoutKeys denv ps - cs' = EC.restrictKeys denv ps - go !denv (Push un bn ua ba nx sk) !k - = go denv sk $ Push un bn ua ba nx k - go !_ (CB _) !_ = die "repush: impossible" -{-# inline repush #-} - -moveArgs - :: Stack 'UN -> Stack 'BX - -> Args -> IO (Stack 'UN, Stack 'BX) -moveArgs !ustk !bstk ZArgs = do - ustk <- discardFrame ustk - bstk <- discardFrame bstk - pure (ustk, bstk) -moveArgs !ustk !bstk (DArgV i j) = do - ustk <- if ul > 0 - then prepareArgs ustk (ArgR 0 ul) - else discardFrame ustk - bstk <- if bl > 0 - then prepareArgs bstk (ArgR 0 bl) - else discardFrame bstk - pure (ustk, bstk) - where - ul = fsize ustk - i - bl = fsize bstk - j -moveArgs !ustk !bstk (UArg1 i) = do - ustk <- prepareArgs ustk (Arg1 i) - bstk <- discardFrame bstk - pure (ustk, bstk) -moveArgs !ustk !bstk (UArg2 i j) = do - ustk <- prepareArgs ustk (Arg2 i j) - bstk <- discardFrame bstk - pure (ustk, bstk) -moveArgs !ustk !bstk (UArgR i l) = do - ustk <- prepareArgs ustk (ArgR i l) - bstk <- discardFrame bstk - pure (ustk, bstk) -moveArgs !ustk !bstk (BArg1 i) = do - ustk <- discardFrame ustk - bstk <- prepareArgs bstk (Arg1 i) - pure (ustk, bstk) -moveArgs !ustk !bstk (BArg2 i j) = do - ustk <- discardFrame ustk - bstk <- prepareArgs bstk (Arg2 i j) - pure (ustk, bstk) -moveArgs !ustk !bstk (BArgR i l) = do - ustk <- discardFrame ustk - bstk <- prepareArgs bstk (ArgR i l) - pure (ustk, bstk) -moveArgs !ustk !bstk (DArg2 i j) = do - ustk <- prepareArgs ustk (Arg1 i) - bstk <- prepareArgs bstk (Arg1 j) - pure (ustk, bstk) -moveArgs !ustk !bstk (DArgR ui ul bi bl) = do - ustk <- prepareArgs ustk (ArgR ui ul) - bstk <- prepareArgs bstk (ArgR bi bl) - pure (ustk, bstk) -moveArgs !ustk !bstk (UArgN as) = do - ustk <- prepareArgs ustk (ArgN as) - bstk <- discardFrame bstk - pure (ustk, bstk) -moveArgs !ustk !bstk (BArgN as) = do - ustk <- discardFrame ustk - bstk <- prepareArgs bstk (ArgN as) - pure (ustk, bstk) -moveArgs !ustk !bstk (DArgN us bs) = do - ustk <- prepareArgs ustk (ArgN us) - bstk <- prepareArgs bstk (ArgN bs) - pure (ustk, bstk) -{-# inline moveArgs #-} - -closureArgs :: Stack 'BX -> Args -> IO [Closure] -closureArgs !_ ZArgs = pure [] -closureArgs !bstk (BArg1 i) = do - x <- peekOff bstk i - pure [x] -closureArgs !bstk (BArg2 i j) = do - x <- peekOff bstk i - y <- peekOff bstk j - pure [x,y] -closureArgs !bstk (BArgR i l) - = for (take l [i..]) (peekOff bstk) -closureArgs !bstk (BArgN bs) - = for (PA.primArrayToList bs) (peekOff bstk) -closureArgs !_ _ - = error "closure arguments can only be boxed." -{-# inline closureArgs #-} - -foreignArgs :: Stack 'UN -> Stack 'BX -> Args -> IO ForeignArgs -foreignArgs !_ !_ ZArgs = pure [] -foreignArgs !ustk !_ (UArg1 i) = do - x <- peekOff ustk i - pure [Wrap Rf.intRef x] -foreignArgs !ustk !_ (UArg2 i j) = do - x <- peekOff ustk i - y <- peekOff ustk j - pure [Wrap Rf.intRef x, Wrap Rf.intRef y] -foreignArgs !_ !bstk (BArg1 i) = do - x <- peekOff bstk i - pure [marshalToForeign x] -foreignArgs !_ !bstk (BArg2 i j) = do - x <- peekOff bstk i - y <- peekOff bstk j - pure [marshalToForeign x, marshalToForeign y] -foreignArgs !ustk !bstk (DArg2 i j) = do - x <- peekOff ustk i - y <- peekOff bstk j - pure [Wrap Rf.intRef x, marshalToForeign y] -foreignArgs !ustk !_ (UArgR ui ul) = - for (take ul [ui..]) $ fmap (Wrap Rf.intRef) . peekOff ustk -foreignArgs !_ !bstk (BArgR bi bl) = - for (take bl [bi..]) $ fmap marshalToForeign . peekOff bstk -foreignArgs !ustk !bstk (DArgR ui ul bi bl) = do - uas <- for (take ul [ui..]) $ fmap (Wrap Rf.intRef) . peekOff ustk - bas <- for (take bl [bi..]) $ fmap marshalToForeign . peekOff bstk - pure $ uas ++ bas -foreignArgs !ustk !_ (UArgN us) = - for (PA.primArrayToList us) $ fmap (Wrap Rf.intRef) . peekOff ustk -foreignArgs !_ !bstk (BArgN bs) = do - for (PA.primArrayToList bs) $ fmap (Wrap Rf.intRef) . peekOff bstk -foreignArgs !ustk !bstk (DArgN us bs) = do - uas <- for (PA.primArrayToList us) $ - fmap (Wrap Rf.intRef) . peekOff ustk - bas <- for (PA.primArrayToList bs) $ - fmap (marshalToForeign) . peekOff bstk - pure $ uas ++ bas -foreignArgs !ustk !bstk (DArgV ui bi) = do - uas <- for ([0..ul]) $ fmap (Wrap Rf.intRef) . peekOff ustk - bas <- for ([0..bl]) $ fmap marshalToForeign . peekOff bstk - pure $ uas ++ bas - where - ul = fsize ustk - ui - 1 - bl = fsize bstk - bi - 1 - -foreignCatch - :: (IO ForeignRslt -> IO ForeignRslt) - -> Stack 'UN -> Stack 'BX - -> (ForeignArgs -> IO ForeignRslt) - -> ForeignArgs - -> IO (Stack 'UN, Stack 'BX) -foreignCatch unmask ustk bstk f args - = try (unmask $ f args) - >>= foreignResult ustk bstk . encodeExn - where - encodeExn :: Either IOError ForeignRslt -> ForeignRslt - encodeExn (Left e) = [Left 0, Right $ Wrap Rf.errorReference e] - encodeExn (Right r) = Left 1 : r - -foreignResult - :: Stack 'UN -> Stack 'BX -> ForeignRslt -> IO (Stack 'UN, Stack 'BX) -foreignResult !ustk !bstk [] = pure (ustk,bstk) -foreignResult !ustk !bstk (Left i : rs) = do - ustk <- bump ustk - poke ustk i - foreignResult ustk bstk rs -foreignResult !ustk !bstk (Right x : rs) = do - bstk <- bump bstk - poke bstk $ Foreign x - foreignResult ustk bstk rs - -buildData - :: Stack 'UN -> Stack 'BX -> Tag -> Args -> IO Closure -buildData !_ !_ !t ZArgs = pure $ Enum t -buildData !ustk !_ !t (UArg1 i) = do - x <- peekOff ustk i - pure $ DataU1 t x -buildData !ustk !_ !t (UArg2 i j) = do - x <- peekOff ustk i - y <- peekOff ustk j - pure $ DataU2 t x y -buildData !_ !bstk !t (BArg1 i) = do - x <- peekOff bstk i - pure $ DataB1 t x -buildData !_ !bstk !t (BArg2 i j) = do - x <- peekOff bstk i - y <- peekOff bstk j - pure $ DataB2 t x y -buildData !ustk !bstk !t (DArg2 i j) = do - x <- peekOff ustk i - y <- peekOff bstk j - pure $ DataUB t x y -buildData !ustk !_ !t (UArgR i l) = do - useg <- augSeg I ustk unull (Just $ ArgR i l) - pure $ DataG t useg bnull -buildData !_ !bstk !t (BArgR i l) = do - bseg <- augSeg I bstk bnull (Just $ ArgR i l) - pure $ DataG t unull bseg -buildData !ustk !bstk !t (DArgR ui ul bi bl) = do - useg <- augSeg I ustk unull (Just $ ArgR ui ul) - bseg <- augSeg I bstk bnull (Just $ ArgR bi bl) - pure $ DataG t useg bseg -buildData !ustk !_ !t (UArgN as) = do - useg <- augSeg I ustk unull (Just $ ArgN as) - pure $ DataG t useg bnull -buildData !_ !bstk !t (BArgN as) = do - bseg <- augSeg I bstk bnull (Just $ ArgN as) - pure $ DataG t unull bseg -buildData !ustk !bstk !t (DArgN us bs) = do - useg <- augSeg I ustk unull (Just $ ArgN us) - bseg <- augSeg I bstk bnull (Just $ ArgN bs) - pure $ DataG t useg bseg -buildData !ustk !bstk !t (DArgV ui bi) = do - useg <- if ul > 0 - then augSeg I ustk unull (Just $ ArgR 0 ul) - else pure unull - bseg <- if bl > 0 - then augSeg I bstk bnull (Just $ ArgR 0 bl) - else pure bnull - pure $ DataG t useg bseg - where - ul = fsize ustk - ui - bl = fsize bstk - bi -{-# inline buildData #-} - -dumpData - :: Stack 'UN -> Stack 'BX -> Closure -> IO (Stack 'UN, Stack 'BX) -dumpData !ustk !bstk (Enum t) = do - ustk <- bump ustk - pokeN ustk $ maskTag t - pure (ustk, bstk) -dumpData !ustk !bstk (DataU1 t x) = do - ustk <- bumpn ustk 2 - pokeOff ustk 1 x - pokeN ustk $ maskTag t - pure (ustk, bstk) -dumpData !ustk !bstk (DataU2 t x y) = do - ustk <- bumpn ustk 3 - pokeOff ustk 2 y - pokeOff ustk 1 x - pokeN ustk $ maskTag t - pure (ustk, bstk) -dumpData !ustk !bstk (DataB1 t x) = do - ustk <- bump ustk - bstk <- bump bstk - poke bstk x - pokeN ustk $ maskTag t - pure (ustk, bstk) -dumpData !ustk !bstk (DataB2 t x y) = do - ustk <- bump ustk - bstk <- bumpn bstk 2 - pokeOff bstk 1 y - poke bstk x - pokeN ustk $ maskTag t - pure (ustk, bstk) -dumpData !ustk !bstk (DataUB t x y) = do - ustk <- bumpn ustk 2 - bstk <- bump bstk - pokeOff ustk 1 x - poke bstk y - pokeN ustk $ maskTag t - pure (ustk, bstk) -dumpData !ustk !bstk (DataG t us bs) = do - ustk <- dumpSeg ustk us S - bstk <- dumpSeg bstk bs S - ustk <- bump ustk - pokeN ustk $ maskTag t - pure (ustk, bstk) -dumpData !_ !_ clo = die $ "dumpData: bad closure: " ++ show clo -{-# inline dumpData #-} - --- Note: although the representation allows it, it is impossible --- to under-apply one sort of argument while over-applying the --- other. Thus, it is unnecessary to worry about doing tricks to --- only grab a certain number of arguments. -closeArgs - :: Augment - -> Stack 'UN -> Stack 'BX - -> Seg 'UN -> Seg 'BX - -> Args -> IO (Seg 'UN, Seg 'BX) -closeArgs mode !ustk !bstk !useg !bseg args = - (,) <$> augSeg mode ustk useg uargs - <*> augSeg mode bstk bseg bargs - where - (uargs, bargs) = case args of - ZArgs -> (Nothing, Nothing) - UArg1 i -> (Just $ Arg1 i, Nothing) - BArg1 i -> (Nothing, Just $ Arg1 i) - UArg2 i j -> (Just $ Arg2 i j, Nothing) - BArg2 i j -> (Nothing, Just $ Arg2 i j) - UArgR i l -> (Just $ ArgR i l, Nothing) - BArgR i l -> (Nothing, Just $ ArgR i l) - DArg2 i j -> (Just $ Arg1 i, Just $ Arg1 j) - DArgR ui ul bi bl -> (Just $ ArgR ui ul, Just $ ArgR bi bl) - UArgN as -> (Just $ ArgN as, Nothing) - BArgN as -> (Nothing, Just $ ArgN as) - DArgN us bs -> (Just $ ArgN us, Just $ ArgN bs) - DArgV ui bi -> (ua, ba) - where - ua | ul > 0 = Just $ ArgR 0 ul - | otherwise = Nothing - ba | bl > 0 = Just $ ArgR 0 bl - | otherwise = Nothing - ul = fsize ustk - ui - bl = fsize bstk - bi - -peekForeign :: Stack 'BX -> Int -> IO a -peekForeign bstk i - = peekOff bstk i >>= \case - Foreign x -> pure $ unwrapForeign x - _ -> die "bad foreign argument" -{-# inline peekForeign #-} - -uprim1 :: Stack 'UN -> UPrim1 -> Int -> IO (Stack 'UN) -uprim1 !ustk DECI !i = do - m <- peekOff ustk i - ustk <- bump ustk - poke ustk (m-1) - pure ustk -uprim1 !ustk INCI !i = do - m <- peekOff ustk i - ustk <- bump ustk - poke ustk (m+1) - pure ustk -uprim1 !ustk NEGI !i = do - m <- peekOff ustk i - ustk <- bump ustk - poke ustk (-m) - pure ustk -uprim1 !ustk SGNI !i = do - m <- peekOff ustk i - ustk <- bump ustk - poke ustk (signum m) - pure ustk -uprim1 !ustk ABSF !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (abs d) - pure ustk -uprim1 !ustk CEIL !i = do - d <- peekOffD ustk i - ustk <- bump ustk - poke ustk (ceiling d) - pure ustk -uprim1 !ustk FLOR !i = do - d <- peekOffD ustk i - ustk <- bump ustk - poke ustk (floor d) - pure ustk -uprim1 !ustk TRNF !i = do - d <- peekOffD ustk i - ustk <- bump ustk - poke ustk (truncate d) - pure ustk -uprim1 !ustk RNDF !i = do - d <- peekOffD ustk i - ustk <- bump ustk - poke ustk (round d) - pure ustk -uprim1 !ustk EXPF !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (exp d) - pure ustk -uprim1 !ustk LOGF !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (log d) - pure ustk -uprim1 !ustk SQRT !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (sqrt d) - pure ustk -uprim1 !ustk COSF !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (cos d) - pure ustk -uprim1 !ustk SINF !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (sin d) - pure ustk -uprim1 !ustk TANF !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (tan d) - pure ustk -uprim1 !ustk COSH !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (cosh d) - pure ustk -uprim1 !ustk SINH !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (sinh d) - pure ustk -uprim1 !ustk TANH !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (tanh d) - pure ustk -uprim1 !ustk ACOS !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (acos d) - pure ustk -uprim1 !ustk ASIN !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (asin d) - pure ustk -uprim1 !ustk ATAN !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (atan d) - pure ustk -uprim1 !ustk ASNH !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (asinh d) - pure ustk -uprim1 !ustk ACSH !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (acosh d) - pure ustk -uprim1 !ustk ATNH !i = do - d <- peekOffD ustk i - ustk <- bump ustk - pokeD ustk (atanh d) - pure ustk -uprim1 !ustk ITOF !i = do - n <- peekOff ustk i - ustk <- bump ustk - pokeD ustk (fromIntegral n) - pure ustk -uprim1 !ustk NTOF !i = do - n <- peekOffN ustk i - ustk <- bump ustk - pokeD ustk (fromIntegral n) - pure ustk -uprim1 !ustk LZRO !i = do - n <- peekOffN ustk i - ustk <- bump ustk - poke ustk (countLeadingZeros n) - pure ustk -uprim1 !ustk TZRO !i = do - n <- peekOffN ustk i - ustk <- bump ustk - poke ustk (countTrailingZeros n) - pure ustk -uprim1 !ustk COMN !i = do - n <- peekOffN ustk i - ustk <- bump ustk - pokeN ustk (complement n) - pure ustk -{-# inline uprim1 #-} - -uprim2 :: Stack 'UN -> UPrim2 -> Int -> Int -> IO (Stack 'UN) -uprim2 !ustk ADDI !i !j = do - m <- peekOff ustk i - n <- peekOff ustk j - ustk <- bump ustk - poke ustk (m+n) - pure ustk -uprim2 !ustk SUBI !i !j = do - m <- peekOff ustk i - n <- peekOff ustk j - ustk <- bump ustk - poke ustk (m-n) - pure ustk -uprim2 !ustk MULI !i !j = do - m <- peekOff ustk i - n <- peekOff ustk j - ustk <- bump ustk - poke ustk (m*n) - pure ustk -uprim2 !ustk DIVI !i !j = do - m <- peekOff ustk i - n <- peekOff ustk j - ustk <- bump ustk - poke ustk (m`div`n) - pure ustk -uprim2 !ustk MODI !i !j = do - m <- peekOff ustk i - n <- peekOff ustk j - ustk <- bump ustk - poke ustk (m`mod`n) - pure ustk -uprim2 !ustk SHLI !i !j = do - m <- peekOff ustk i - n <- peekOff ustk j - ustk <- bump ustk - poke ustk (m`shiftL`n) - pure ustk -uprim2 !ustk SHRI !i !j = do - m <- peekOff ustk i - n <- peekOff ustk j - ustk <- bump ustk - poke ustk (m`shiftR`n) - pure ustk -uprim2 !ustk SHRN !i !j = do - m <- peekOffN ustk i - n <- peekOff ustk j - ustk <- bump ustk - pokeN ustk (m`shiftR`n) - pure ustk -uprim2 !ustk POWI !i !j = do - m <- peekOff ustk i - n <- peekOffN ustk j - ustk <- bump ustk - poke ustk (m^n) - pure ustk -uprim2 !ustk EQLI !i !j = do - m <- peekOff ustk i - n <- peekOff ustk j - ustk <- bump ustk - poke ustk $ if m == n then 1 else 0 - pure ustk -uprim2 !ustk LEQI !i !j = do - m <- peekOff ustk i - n <- peekOff ustk j - ustk <- bump ustk - poke ustk $ if m <= n then 1 else 0 - pure ustk -uprim2 !ustk LEQN !i !j = do - m <- peekOffN ustk i - n <- peekOffN ustk j - ustk <- bump ustk - poke ustk $ if m <= n then 1 else 0 - pure ustk -uprim2 !ustk ADDF !i !j = do - x <- peekOffD ustk i - y <- peekOffD ustk j - ustk <- bump ustk - pokeD ustk (x + y) - pure ustk -uprim2 !ustk SUBF !i !j = do - x <- peekOffD ustk i - y <- peekOffD ustk j - ustk <- bump ustk - pokeD ustk (x - y) - pure ustk -uprim2 !ustk MULF !i !j = do - x <- peekOffD ustk i - y <- peekOffD ustk j - ustk <- bump ustk - pokeD ustk (x * y) - pure ustk -uprim2 !ustk DIVF !i !j = do - x <- peekOffD ustk i - y <- peekOffD ustk j - ustk <- bump ustk - pokeD ustk (x / y) - pure ustk -uprim2 !ustk LOGB !i !j = do - x <- peekOffD ustk i - y <- peekOffD ustk j - ustk <- bump ustk - pokeD ustk (logBase x y) - pure ustk -uprim2 !ustk POWF !i !j = do - x <- peekOffD ustk i - y <- peekOffD ustk j - ustk <- bump ustk - pokeD ustk (x ** y) - pure ustk -uprim2 !ustk MAXF !i !j = do - x <- peekOffD ustk i - y <- peekOffD ustk j - ustk <- bump ustk - pokeD ustk (max x y) - pure ustk -uprim2 !ustk MINF !i !j = do - x <- peekOffD ustk i - y <- peekOffD ustk j - ustk <- bump ustk - pokeD ustk (min x y) - pure ustk -uprim2 !ustk EQLF !i !j = do - x <- peekOffD ustk i - y <- peekOffD ustk j - ustk <- bump ustk - pokeD ustk (if x == y then 1 else 0) - pure ustk -uprim2 !ustk LEQF !i !j = do - x <- peekOffD ustk i - y <- peekOffD ustk j - ustk <- bump ustk - pokeD ustk (if x <= y then 1 else 0) - pure ustk -uprim2 !ustk ATN2 !i !j = do - x <- peekOffD ustk i - y <- peekOffD ustk j - ustk <- bump ustk - pokeD ustk (atan2 x y) - pure ustk -uprim2 !ustk ANDN !i !j = do - x <- peekOffN ustk i - y <- peekOffN ustk j - ustk <- bump ustk - pokeN ustk (x .&. y) - pure ustk -uprim2 !ustk IORN !i !j = do - x <- peekOffN ustk i - y <- peekOffN ustk j - ustk <- bump ustk - pokeN ustk (x .|. y) - pure ustk -uprim2 !ustk XORN !i !j = do - x <- peekOffN ustk i - y <- peekOffN ustk j - ustk <- bump ustk - pokeN ustk (xor x y) - pure ustk -{-# inline uprim2 #-} - -bprim1 - :: Stack 'UN -> Stack 'BX -> BPrim1 -> Int - -> IO (Stack 'UN, Stack 'BX) -bprim1 !ustk !bstk SIZT i = do - t <- peekOffT bstk i - ustk <- bump ustk - poke ustk $ Tx.length t - pure (ustk, bstk) -bprim1 !ustk !bstk SIZS i = do - s <- peekOffS bstk i - ustk <- bump ustk - poke ustk $ Sq.length s - pure (ustk, bstk) -bprim1 !ustk !bstk ITOT i = do - n <- peekOff ustk i - bstk <- bump bstk - pokeT bstk . Tx.pack $ show n - pure (ustk, bstk) -bprim1 !ustk !bstk NTOT i = do - n <- peekOffN ustk i - bstk <- bump bstk - pokeT bstk . Tx.pack $ show n - pure (ustk, bstk) -bprim1 !ustk !bstk FTOT i = do - f <- peekOffD ustk i - bstk <- bump bstk - pokeT bstk . Tx.pack $ show f - pure (ustk, bstk) -bprim1 !ustk !bstk USNC i - = peekOffT bstk i >>= \t -> case Tx.unsnoc t of - Nothing -> do - ustk <- bump ustk - poke ustk 0 - pure (ustk, bstk) - Just (t, c) -> do - ustk <- bumpn ustk 2 - bstk <- bump bstk - pokeOff ustk 1 $ fromEnum c - poke ustk 1 - pokeT bstk t - pure (ustk, bstk) -bprim1 !ustk !bstk UCNS i - = peekOffT bstk i >>= \t -> case Tx.uncons t of - Nothing -> do - ustk <- bump ustk - poke ustk 0 - pure (ustk, bstk) - Just (c, t) -> do - ustk <- bumpn ustk 2 - bstk <- bump bstk - pokeOff ustk 1 $ fromEnum c - poke ustk 1 - pokeT bstk t - pure (ustk, bstk) -bprim1 !ustk !bstk TTOI i - = peekOffT bstk i >>= \t -> case readm $ Tx.unpack t of - Nothing -> do - ustk <- bump ustk - poke ustk 0 - pure (ustk, bstk) - Just n -> do - ustk <- bumpn ustk 2 - poke ustk 1 - pokeOff ustk 1 n - pure (ustk, bstk) - where - readm ('+':s) = readMaybe s - readm s = readMaybe s -bprim1 !ustk !bstk TTON i - = peekOffT bstk i >>= \t -> case readMaybe $ Tx.unpack t of - Nothing -> do - ustk <- bump ustk - poke ustk 0 - pure (ustk, bstk) - Just n -> do - ustk <- bumpn ustk 2 - poke ustk 1 - pokeOffN ustk 1 n - pure (ustk, bstk) -bprim1 !ustk !bstk TTOF i - = peekOffT bstk i >>= \t -> case readMaybe $ Tx.unpack t of - Nothing -> do - ustk <- bump ustk - poke ustk 0 - pure (ustk, bstk) - Just f -> do - ustk <- bumpn ustk 2 - poke ustk 1 - pokeOffD ustk 1 f - pure (ustk, bstk) -bprim1 !ustk !bstk VWLS i - = peekOffS bstk i >>= \case - Sq.Empty -> do - ustk <- bump ustk - poke ustk 0 - pure (ustk, bstk) - x Sq.:<| xs -> do - ustk <- bump ustk - poke ustk 1 - bstk <- bumpn bstk 2 - pokeOffS bstk 1 xs - poke bstk x - pure (ustk, bstk) -bprim1 !ustk !bstk VWRS i - = peekOffS bstk i >>= \case - Sq.Empty -> do - ustk <- bump ustk - poke ustk 0 - pure (ustk, bstk) - xs Sq.:|> x -> do - ustk <- bump ustk - poke ustk 1 - bstk <- bumpn bstk 2 - pokeOff bstk 1 x - pokeS bstk xs - pure (ustk, bstk) -bprim1 !ustk !bstk PAKT i = do - s <- peekOffS bstk i - bstk <- bump bstk - pokeT bstk . Tx.pack . toList $ clo2char <$> s - pure (ustk, bstk) - where - clo2char (DataU1 655360 i) = toEnum i - clo2char c = error $ "pack text: non-character closure: " ++ show c -bprim1 !ustk !bstk UPKT i = do - t <- peekOffT bstk i - bstk <- bump bstk - pokeS bstk . Sq.fromList - . fmap (DataU1 655360 . fromEnum) . Tx.unpack $ t - pure (ustk, bstk) -bprim1 !ustk !bstk PAKB i = do - s <- peekOffS bstk i - bstk <- bump bstk - pokeB bstk . By.fromWord8s . fmap clo2w8 $ toList s - pure (ustk, bstk) - where - clo2w8 (DataU1 65536 n) = toEnum n - clo2w8 c = error $ "pack bytes: non-natural closure: " ++ show c -bprim1 !ustk !bstk UPKB i = do - b <- peekOffB bstk i - bstk <- bump bstk - pokeS bstk . Sq.fromList . fmap (DataU1 65536 . fromEnum) - $ By.toWord8s b - pure (ustk, bstk) -bprim1 !ustk !bstk SIZB i = do - b <- peekOffB bstk i - ustk <- bump ustk - poke ustk $ By.size b - pure (ustk, bstk) -bprim1 !ustk !bstk FLTB i = do - b <- peekOffB bstk i - bstk <- bump bstk - pokeB bstk $ By.flatten b - pure (ustk, bstk) -bprim1 !_ !bstk THRO i - = throwIO . BU =<< peekOff bstk i -{-# inline bprim1 #-} - -bprim2 - :: Stack 'UN -> Stack 'BX -> BPrim2 -> Int -> Int - -> IO (Stack 'UN, Stack 'BX) -bprim2 !ustk !bstk EQLU i j = do - x <- peekOff bstk i - y <- peekOff bstk j - ustk <- bump ustk - poke ustk $ if x == y then 1 else 0 - pure (ustk, bstk) -bprim2 !ustk !bstk DRPT i j = do - n <- peekOff ustk i - t <- peekOffT bstk j - bstk <- bump bstk - pokeT bstk $ Tx.drop n t - pure (ustk, bstk) -bprim2 !ustk !bstk CATT i j = do - x <- peekOffT bstk i - y <- peekOffT bstk j - bstk <- bump bstk - pokeT bstk $ Tx.append x y - pure (ustk, bstk) -bprim2 !ustk !bstk TAKT i j = do - n <- peekOff ustk i - t <- peekOffT bstk j - bstk <- bump bstk - pokeT bstk $ Tx.take n t - pure (ustk, bstk) -bprim2 !ustk !bstk EQLT i j = do - x <- peekOffT bstk i - y <- peekOffT bstk j - ustk <- bump ustk - poke ustk $ if x == y then 1 else 0 - pure (ustk, bstk) -bprim2 !ustk !bstk LEQT i j = do - x <- peekOffT bstk i - y <- peekOffT bstk j - ustk <- bump ustk - poke ustk $ if x <= y then 1 else 0 - pure (ustk, bstk) -bprim2 !ustk !bstk LEST i j = do - x <- peekOffT bstk i - y <- peekOffT bstk j - ustk <- bump ustk - poke ustk $ if x < y then 1 else 0 - pure (ustk, bstk) -bprim2 !ustk !bstk DRPS i j = do - n <- peekOff ustk i - s <- peekOffS bstk j - bstk <- bump bstk - pokeS bstk $ Sq.drop n s - pure (ustk, bstk) -bprim2 !ustk !bstk TAKS i j = do - n <- peekOff ustk i - s <- peekOffS bstk j - bstk <- bump bstk - pokeS bstk $ Sq.take n s - pure (ustk, bstk) -bprim2 !ustk !bstk CONS i j = do - x <- peekOff bstk i - s <- peekOffS bstk j - bstk <- bump bstk - pokeS bstk $ x Sq.<| s - pure (ustk, bstk) -bprim2 !ustk !bstk SNOC i j = do - s <- peekOffS bstk i - x <- peekOff bstk j - bstk <- bump bstk - pokeS bstk $ s Sq.|> x - pure (ustk, bstk) -bprim2 !ustk !bstk CATS i j = do - x <- peekOffS bstk i - y <- peekOffS bstk j - bstk <- bump bstk - pokeS bstk $ x Sq.>< y - pure (ustk, bstk) -bprim2 !ustk !bstk IDXS i j = do - n <- peekOff ustk i - s <- peekOffS bstk j - case Sq.lookup n s of - Nothing -> do - ustk <- bump ustk - poke ustk 0 - pure (ustk, bstk) - Just x -> do - ustk <- bump ustk - poke ustk 1 - bstk <- bump bstk - poke bstk x - pure (ustk, bstk) -bprim2 !ustk !bstk SPLL i j = do - n <- peekOff ustk i - s <- peekOffS bstk j - if Sq.length s < n then do - ustk <- bump ustk - poke ustk 0 - pure (ustk, bstk) - else do - ustk <- bump ustk - poke ustk 1 - bstk <- bumpn bstk 2 - let (l,r) = Sq.splitAt n s - pokeOffS bstk 1 r - pokeS bstk l - pure (ustk, bstk) -bprim2 !ustk !bstk SPLR i j = do - n <- peekOff ustk i - s <- peekOffS bstk j - if Sq.length s < n then do - ustk <- bump ustk - poke ustk 0 - pure (ustk, bstk) - else do - ustk <- bump ustk - poke ustk 1 - bstk <- bumpn bstk 2 - let (l,r) = Sq.splitAt (Sq.length s - n) s - pokeOffS bstk 1 r - pokeS bstk l - pure (ustk, bstk) -bprim2 !ustk !bstk TAKB i j = do - n <- peekOff ustk i - b <- peekOffB bstk j - bstk <- bump bstk - pokeB bstk $ By.take n b - pure (ustk, bstk) -bprim2 !ustk !bstk DRPB i j = do - n <- peekOff ustk i - b <- peekOffB bstk j - bstk <- bump bstk - pokeB bstk $ By.drop n b - pure (ustk, bstk) -bprim2 !ustk !bstk IDXB i j = do - n <- peekOff ustk i - b <- peekOffB bstk j - ustk <- bump ustk - ustk <- case By.at n b of - Nothing -> ustk <$ poke ustk 0 - Just x -> do - poke ustk $ fromIntegral x - ustk <- bump ustk - ustk <$ poke ustk 0 - pure (ustk, bstk) -bprim2 !ustk !bstk CATB i j = do - l <- peekOffB bstk i - r <- peekOffB bstk j - bstk <- bump bstk - pokeB bstk $ l <> r - pure (ustk, bstk) -bprim2 !ustk !bstk CMPU _ _ = pure (ustk, bstk) -- impossible -{-# inline bprim2 #-} - -yield - :: Unmask -> REnv -> Env -> DEnv - -> Stack 'UN -> Stack 'BX -> K -> IO () -yield unmask renv !env !denv !ustk !bstk !k = leap denv k - where - leap !denv0 (Mark ps cs k) = do - let denv = cs <> EC.withoutKeys denv0 ps - clo = denv0 EC.! EC.findMin ps - poke bstk . DataB1 0 =<< peek bstk - apply unmask renv env denv ustk bstk k False (BArg1 0) clo - leap !denv (Push ufsz bfsz uasz basz nx k) = do - ustk <- restoreFrame ustk ufsz uasz - bstk <- restoreFrame bstk bfsz basz - eval unmask renv env denv ustk bstk k nx - leap _ (CB (Hook f)) = f ustk bstk - leap _ KE = pure () -{-# inline yield #-} - -selectTextBranch - :: Tx.Text -> Section -> M.Map Tx.Text Section -> Section -selectTextBranch t df cs = M.findWithDefault df t cs -{-# inline selectTextBranch #-} - -selectBranch :: Tag -> Branch -> Section -selectBranch t (Test1 u y n) - | t == u = y - | otherwise = n -selectBranch t (Test2 u cu v cv e) - | t == u = cu - | t == v = cv - | otherwise = e -selectBranch t (TestW df cs) = lookupWithDefault df t cs -selectBranch _ (TestT {}) = error "impossible" -{-# inline selectBranch #-} - -splitCont - :: DEnv -> Stack 'UN -> Stack 'BX -> K - -> Word64 -> IO (K, DEnv, Stack 'UN, Stack 'BX, Seg 'UN, Seg 'BX, K) -splitCont !denv !ustk !bstk !k !p - = walk denv (asize ustk) (asize bstk) KE k - where - walk !denv !usz !bsz !ck KE - = die "fell off stack" >> finish denv usz bsz ck KE - walk !denv !usz !bsz !ck (CB _) - = die "fell off stack" >> finish denv usz bsz ck KE - walk !denv !usz !bsz !ck (Mark ps cs k) - | EC.member p ps = finish denv' usz bsz ck k - | otherwise = walk denv' usz bsz (Mark ps cs' ck) k - where - denv' = cs <> EC.withoutKeys denv ps - cs' = EC.restrictKeys denv ps - walk !denv !usz !bsz !ck (Push un bn ua ba br k) - = walk denv (usz+un+ua) (bsz+bn+ba) (Push un bn ua ba br ck) k - - finish !denv !usz !bsz !ck !k = do - (useg, ustk) <- grab ustk usz - (bseg, bstk) <- grab bstk bsz - return (ck, denv, ustk, bstk, useg, bseg, k) -{-# inline splitCont #-} - -discardCont - :: DEnv -> Stack 'UN -> Stack 'BX -> K - -> Word64 -> IO (DEnv, Stack 'UN, Stack 'BX, K) -discardCont denv ustk bstk k p - = splitCont denv ustk bstk k p - <&> \(_, denv, ustk, bstk, _, _, k) -> (denv, ustk, bstk, k) -{-# inline discardCont #-} - -resolve :: Env -> DEnv -> Stack 'BX -> Ref -> IO Closure -resolve env _ _ (Env i) = return $ PAp (IC i $ env i) unull bnull -resolve _ _ bstk (Stk i) = peekOff bstk i -resolve _ denv _ (Dyn i) = case EC.lookup i denv of - Just clo -> pure clo - _ -> die $ "resolve: looked up bad dynamic: " ++ show i diff --git a/parser-typechecker/src/Unison/Runtime/Pattern.hs b/parser-typechecker/src/Unison/Runtime/Pattern.hs deleted file mode 100644 index 9c7f9f20e6..0000000000 --- a/parser-typechecker/src/Unison/Runtime/Pattern.hs +++ /dev/null @@ -1,664 +0,0 @@ -{-# language BangPatterns #-} -{-# language ViewPatterns #-} -{-# language PatternGuards #-} -{-# language TupleSections #-} -{-# language PatternSynonyms #-} -{-# language OverloadedStrings #-} - -module Unison.Runtime.Pattern - ( DataSpec - , splitPatterns - , builtinDataSpec - ) where - -import Control.Applicative ((<|>)) -import Control.Lens ((<&>)) -import Control.Monad.State (State, state, evalState, runState, modify) - -import Data.List (transpose) -import Data.Maybe (catMaybes, fromMaybe, listToMaybe) -import Data.Word (Word64) - -import Data.Set as Set (Set, fromList, member) - -import Unison.ABT - (absChain', visitPure, pattern AbsN', changeVars) -import Unison.Builtin.Decls (builtinDataDecls, builtinEffectDecls) -import Unison.DataDeclaration (declFields) -import Unison.Pattern -import qualified Unison.Pattern as P -import Unison.Reference (Reference(..)) -import Unison.Symbol (Symbol) -import Unison.Term hiding (Term) -import qualified Unison.Term as Tm -import Unison.Var (Var, typed, freshIn, freshenId, Type(Pattern)) - -import qualified Unison.Type as Rf - -import Data.Map.Strict - (Map, toList, fromListWith, insertWith) -import qualified Data.Map.Strict as Map - -type Term v = Tm.Term v () - --- Represents the number of fields of constructors of a data type/ --- ability in order of constructors -type Cons = [Int] - --- Maps references to the constructor information for abilities (left) --- and data types (right) -type DataSpec = Map Reference (Either Cons Cons) - -type Ctx v = Map v Reference - --- Representation of a row in a pattern compilation matrix. --- There is a list of patterns annotated with the variables they --- are matching against, an optional guard, and the body of the --- 'match' clause associated with this row. -data PatternRow v - = PR - { matches :: [Pattern v] - , guard :: Maybe (Term v) - , body :: Term v - } deriving (Show) - --- This is the data and ability 'constructor' information for all --- the things defined in Haskell source code. -builtinDataSpec :: DataSpec -builtinDataSpec = Map.fromList decls - where - decls = [ (DerivedId x, declFields $ Right y) - | (_,x,y) <- builtinDataDecls @Symbol ] - ++ [ (DerivedId x, declFields $ Left y) - | (_,x,y) <- builtinEffectDecls @Symbol ] - --- A pattern compilation matrix is just a list of rows. There is --- no need for the rows to have uniform length; the variable --- annotations on the patterns in the rows keep track of what --- should be matched against when decomposing a matrix. -data PatternMatrix v - = PM { _rows :: [PatternRow v] } - deriving Show - --- Heuristics guide the pattern compilation. They inspect the --- pattern matrix and (may) choose a variable to split on next. --- The full strategy will consist of multiple heuristics composed --- in series. -type Heuristic v = PatternMatrix v -> Maybe v - -choose :: [Heuristic v] -> PatternMatrix v -> v -choose [] (PM (PR (p:_) _ _ : _)) = loc p -choose [] _ = error "pattern matching: failed to choose a splitting" -choose (h:hs) m - | Just i <- h m = i - | otherwise = choose hs m - -refutable :: P.Pattern a -> Bool -refutable (P.Unbound _) = False -refutable (P.Var _) = False -refutable _ = True - -rowIrrefutable :: PatternRow v -> Bool -rowIrrefutable (PR ps _ _) = null ps - -firstRow :: ([P.Pattern v] -> Maybe v) -> Heuristic v -firstRow f (PM (r:_)) = f $ matches r -firstRow _ _ = Nothing - -heuristics :: [Heuristic v] -heuristics = [firstRow $ fmap loc . listToMaybe] - -extractVar :: Var v => P.Pattern v -> Maybe v -extractVar p - | P.Unbound{} <- p = Nothing - | otherwise = Just (loc p) - -extractVars :: Var v => [P.Pattern v] -> [v] -extractVars = catMaybes . fmap extractVar - --- Splits a data type pattern, yielding its subpatterns. The provided --- integers are the tag and number of fields for the constructor being --- matched against. A constructor pattern thus only yields results if --- it matches the tag (and number of subpatterns as a consistency --- check), while variables can also match and know how many subpatterns --- to yield. --- --- The outer list indicates success of the match. It could be Maybe, --- but elsewhere these results are added to a list, so it is more --- convenient to yield a list here. -decomposePattern - :: Var v - => Int -> Int -> P.Pattern v - -> [[P.Pattern v]] -decomposePattern t nfields p@(P.Constructor _ _ u ps) - | t == u - = if length ps == nfields - then [ps] - else error err - where - err = "decomposePattern: wrong number of constructor fields: " - ++ show (nfields, p) -decomposePattern t nfields p@(P.EffectBind _ _ u ps pk) - | t == u - = if length ps == nfields - then [ps ++ [pk]] - else error err - where - err = "decomposePattern: wrong number of ability fields: " - ++ show (nfields, p) -decomposePattern t _ (P.EffectPure _ p) - | t == -1 = [[p]] -decomposePattern _ nfields (P.Var _) - = [replicate nfields (P.Unbound (typed Pattern))] -decomposePattern _ nfields (P.Unbound _) - = [replicate nfields (P.Unbound (typed Pattern))] -decomposePattern _ _ (P.SequenceLiteral _ _) - = error "decomposePattern: sequence literal" -decomposePattern _ _ _ = [] - -matchBuiltin :: P.Pattern a -> Maybe (P.Pattern ()) -matchBuiltin (P.Var _) = Just $ P.Unbound () -matchBuiltin (P.Unbound _) = Just $ P.Unbound () -matchBuiltin (P.Nat _ n) = Just $ P.Nat () n -matchBuiltin (P.Int _ n) = Just $ P.Int () n -matchBuiltin (P.Text _ t) = Just $ P.Text () t -matchBuiltin (P.Char _ c) = Just $ P.Char () c -matchBuiltin (P.Float _ d) = Just $ P.Float () d -matchBuiltin _ = Nothing - --- Represents the various cases that that may occur when performing --- a sequence match. These fall into two groups: --- --- E, C, S: empty, cons, snoc --- L, R, DL, DR: split left/right, insufficient elements --- --- These groups correspond to different sorts of matches we can compile --- to. We can view the left/right end of a sequence, or attempt to --- split a sequence at a specified offset from the left/right side. -data SeqMatch = E | C | S | L Int | R Int | DL Int | DR Int - deriving (Eq,Ord,Show) - -seqPSize :: P.Pattern v -> Maybe Int -seqPSize (P.SequenceLiteral _ l) = Just $ length l -seqPSize (P.SequenceOp _ _ Cons r) = (1+) <$> seqPSize r -seqPSize (P.SequenceOp _ l Snoc _) = (1+) <$> seqPSize l -seqPSize (P.SequenceOp _ l Concat r) = (+) <$> seqPSize l <*> seqPSize r -seqPSize _ = Nothing - --- Decides a sequence matching operation to perform based on a column --- of patterns that match against it. Literals do not really force a --- bias, so the decision of which side to view is postponed to --- subsequent patterns if we encounter a literal first. A literal with --- priority does block a splitting pattern, though. -decideSeqPat :: [P.Pattern v] -> [SeqMatch] -decideSeqPat = go False - where - go _ [] = [E,C] - go _ (P.SequenceLiteral{} : ps) = go True ps - go _ (P.SequenceOp _ _ Snoc _ : _) = [E,S] - go _ (P.SequenceOp _ _ Cons _ : _) = [E,C] - - go guard (P.SequenceOp _ l Concat r : _) - | guard = [E,C] -- prefer prior literals - | Just n <- seqPSize l = [L n, DL n] - | Just n <- seqPSize r = [R n, DR n] - go b (P.Unbound _ : ps) = go b ps - go b (P.Var _ : ps) = go b ps - go _ (p:_) - = error $ "Cannot process sequence pattern: " ++ show p - --- Represents the possible correspondences between a sequence pattern --- and a sequence matching compilation target. Unlike data matching, --- where a pattern either matches or is disjoint from a tag, sequence --- patterns can overlap in non-trivial ways where it would be difficult --- to avoid re-testing the original list. -data SeqCover v - = Cover [P.Pattern v] - | Disjoint - | Overlap - --- Determines how a pattern corresponds to a sequence matching --- compilation target. -decomposeSeqP :: Var v => Set v -> SeqMatch -> P.Pattern v -> SeqCover v -decomposeSeqP _ E (P.SequenceLiteral _ []) = Cover [] -decomposeSeqP _ E _ = Disjoint - -decomposeSeqP _ C (P.SequenceOp _ l Cons r) = Cover [l,r] -decomposeSeqP _ C (P.SequenceOp _ _ Concat _) = Overlap -decomposeSeqP _ C (P.SequenceLiteral _ []) = Disjoint -decomposeSeqP avoid C (P.SequenceLiteral _ (p:ps)) - = Cover [p, P.SequenceLiteral u ps] - where u = freshIn avoid $ typed Pattern - -decomposeSeqP _ S (P.SequenceOp _ l Snoc r) = Cover [l,r] -decomposeSeqP _ S (P.SequenceOp _ _ Concat _) = Overlap -decomposeSeqP _ S (P.SequenceLiteral _ []) = Disjoint -decomposeSeqP avoid S (P.SequenceLiteral _ ps) - = Cover [P.SequenceLiteral u (init ps), last ps] - where u = freshIn avoid $ typed Pattern - -decomposeSeqP _ (L n) (P.SequenceOp _ l Concat r) - | Just m <- seqPSize l - , n == m - = Cover [l,r] - | otherwise = Overlap -decomposeSeqP avoid (L n) (P.SequenceLiteral _ ps) - | length ps >= n - , (pl, pr) <- splitAt n ps - = Cover $ P.SequenceLiteral u <$> [pl,pr] - | otherwise = Disjoint - where u = freshIn avoid $ typed Pattern - -decomposeSeqP _ (R n) (P.SequenceOp _ l Concat r) - | Just m <- seqPSize r - , n == m - = Cover [l,r] -decomposeSeqP avoid (R n) (P.SequenceLiteral _ ps) - | length ps >= n - , (pl, pr) <- splitAt (length ps - n) ps - = Cover $ P.SequenceLiteral u <$> [pl,pr] - | otherwise = Disjoint - where u = freshIn avoid $ typed Pattern - -decomposeSeqP _ (DL n) (P.SequenceOp _ l Concat _) - | Just m <- seqPSize l , n == m = Disjoint -decomposeSeqP _ (DL n) (P.SequenceLiteral _ ps) - | length ps >= n = Disjoint - -decomposeSeqP _ (DR n) (P.SequenceOp _ _ Concat r) - | Just m <- seqPSize r , n == m = Disjoint -decomposeSeqP _ (DR n) (P.SequenceLiteral _ ps) - | length ps >= n = Disjoint - -decomposeSeqP _ _ _ = Overlap - --- Splits a pattern row with respect to matching a variable against a --- data type constructor. If the row would match the specified --- constructor, the subpatterns and resulting row are yielded. A list --- is used as the result value to indicate success or failure to match, --- because these results are accumulated into a larger list elsewhere. -splitRow - :: Var v - => v - -> Int - -> Int - -> PatternRow v - -> [([P.Pattern v], PatternRow v)] -splitRow v t nfields (PR (break ((==v).loc) -> (pl, sp : pr)) g b) - = decomposePattern t nfields sp - <&> \subs -> (subs, PR (pl ++ filter refutable subs ++ pr) g b) -splitRow _ _ _ row = [([],row)] - --- Splits a row with respect to a variable, expecting that the --- variable will be matched against a builtin pattern (non-data type, --- non-request, non-sequence). In addition to returning the --- subpatterns and new row, returns a version of the pattern that was --- matched against the variable that may be collected to determine the --- cases the built-in value is matched against. -splitRowBuiltin - :: Var v - => v - -> PatternRow v - -> [(P.Pattern (), [([P.Pattern v], PatternRow v)])] -splitRowBuiltin v (PR (break ((==v).loc) -> (pl, sp : pr)) g b) - | Just p <- matchBuiltin sp = [(p, [([], PR (pl ++ pr) g b)])] - | otherwise = [] -splitRowBuiltin _ r = [(P.Unbound (), [([], r)])] - --- Splits a row with respect to a variable, expecting that the --- variable will be matched against a sequence matching operation. --- Yields the subpatterns and a new row to be used in subsequent --- compilation. The outer list result is used to indicate success or --- failure. -splitRowSeq - :: Var v - => v - -> SeqMatch - -> PatternRow v - -> [([P.Pattern v], PatternRow v)] -splitRowSeq v m r@(PR (break ((==v).loc) -> (pl, sp : pr)) g b) - = case decomposeSeqP avoid m sp of - Cover sps -> - [(sps, PR (pl ++ filter refutable sps ++ pr) g b)] - Disjoint -> [] - Overlap -> [([], r)] - where avoid = maybe mempty freeVars g <> freeVars b -splitRowSeq _ _ r = [([], r)] - --- Renames the variables annotating the patterns in a row, for once a --- canonical choice has been made. -renameRow :: Var v => Map v v -> PatternRow v -> PatternRow v -renameRow m (PR p0 g0 b0) = PR p g b - where - access k - | Just v <- Map.lookup k m = v - | otherwise = k - p = (fmap.fmap) access p0 - g = changeVars m <$> g0 - b = changeVars m b0 - --- Chooses a common set of variables for use when decomposing --- patterns into multiple sub-patterns. It is too naive to simply use --- the variables in the first row, because it may have been generated --- by decomposing a variable or unbound pattern, which will make up --- variables for subpatterns. -chooseVars :: Var v => [[P.Pattern v]] -> [v] -chooseVars [] = [] -chooseVars ([]:rs) = chooseVars rs -chooseVars ((P.Unbound{} : _) : rs) = chooseVars rs -chooseVars (r : _) = extractVars r - --- Creates a pattern matrix from many rows with the subpatterns --- introduced during the splitting that generated those rows. Also --- yields an indication of the type of the variables that the --- subpatterns match against, if possible. -buildMatrix - :: Var v - => [([P.Pattern v], PatternRow v)] - -> ([(v,Reference)], PatternMatrix v) -buildMatrix [] = ([], PM []) -buildMatrix vrs = (zip cvs rs, PM $ fixRow <$> vrs) - where - rs = fmap determineType . transpose . fmap fst $ vrs - cvs = chooseVars $ fst <$> vrs - fixRow (extractVars -> rvs, pr) - = renameRow (fromListWith const . zip rvs $ cvs) pr - --- Splits a pattern matrix on a given variable, expected to be matched --- against builtin type patterns. Yields the cases covered and --- corresponding matrices for those cases, with types for any new --- variables (although currently builtin patterns do not introduce --- variables). -splitMatrixBuiltin - :: Var v - => v - -> PatternMatrix v - -> [(P.Pattern (), [(v,Reference)], PatternMatrix v)] -splitMatrixBuiltin v (PM rs) - = fmap (\(a,(b,c)) -> (a,b,c)) - . toList - . fmap buildMatrix - . fromListWith (++) - $ splitRowBuiltin v =<< rs - -matchPattern :: [(v,Reference)] -> SeqMatch -> P.Pattern () -matchPattern vrs = \case - E -> sz 0 - C -> P.SequenceOp () vr Cons vr - S -> P.SequenceOp () vr Snoc vr - L n -> P.SequenceOp () (sz n) Concat (P.Var ()) - R n -> P.SequenceOp () (P.Var ()) Concat (sz n) - DL _ -> P.Unbound () - DR _ -> P.Unbound () - where - vr | [] <- vrs = P.Unbound () | otherwise = P.Var () - sz n = P.SequenceLiteral () . replicate n $ P.Unbound () - --- Splits a matrix at a given variable with respect to sequence --- patterns. Yields the appropriate patterns for the covered cases, --- variables introduced for each case with their types, and new --- matricies for subsequent compilation. -splitMatrixSeq - :: Var v - => v - -> PatternMatrix v - -> [(P.Pattern (), [(v,Reference)], PatternMatrix v)] -splitMatrixSeq v (PM rs) - = cases - where - ms = decideSeqPat $ take 1 . dropWhile ((/=v).loc) . matches =<< rs - hint m vrs - | m `elem` [E,C,S] = vrs - | otherwise = (fmap.fmap) (const Rf.vectorRef) vrs - cases = ms <&> \m -> - let frs = rs >>= splitRowSeq v m - (vrs, pm) = buildMatrix frs - in (matchPattern vrs m, hint m vrs, pm) - --- Splits a matrix at a given variable with respect to a data type or --- ability match. Yields a new matrix for each constructor, with --- variables introduced and their types for each case. -splitMatrix - :: Var v - => v - -> Either Cons Cons - -> PatternMatrix v - -> [(Int, [(v,Reference)], PatternMatrix v)] -splitMatrix v econs (PM rs) - = fmap (\(a, (b, c)) -> (a,b,c)) . (fmap.fmap) buildMatrix $ mmap - where - cons = either (((-1,1):) . zip [0..]) (zip [0..]) econs - mmap = fmap (\(t,fs) -> (t, splitRow v t fs =<< rs)) cons - --- Monad for pattern preparation. It is a state monad carrying a fresh --- variable source, the list of variables bound the the pattern being --- prepared, and a variable renaming mapping. -type PPM v a = State (Word64, [v], Map v v) a - -freshVar :: Var v => PPM v v -freshVar = state $ \(fw, vs, rn) -> - let v = freshenId fw $ typed Pattern - in (v, (fw+1, vs, rn)) - -useVar :: PPM v v -useVar = state $ \(avoid, v:vs, rn) -> (v, (avoid, vs, rn)) - -renameTo :: Var v => v -> v -> PPM v () -renameTo to from - = modify $ \(avoid, vs, rn) -> - ( avoid, vs - , insertWith (error "renameTo: duplicate rename") from to rn - ) - --- Tries to rewrite sequence patterns into a format that can be --- matched most flexibly. -normalizeSeqP :: P.Pattern a -> P.Pattern a -normalizeSeqP (P.As a p) = P.As a (normalizeSeqP p) -normalizeSeqP (P.EffectPure a p) = P.EffectPure a $ normalizeSeqP p -normalizeSeqP (P.EffectBind a r i ps k) - = P.EffectBind a r i (normalizeSeqP <$> ps) (normalizeSeqP k) -normalizeSeqP (P.Constructor a r i ps) - = P.Constructor a r i $ normalizeSeqP <$> ps -normalizeSeqP (P.SequenceLiteral a ps) - = P.SequenceLiteral a $ normalizeSeqP <$> ps -normalizeSeqP (P.SequenceOp a p0 op q0) - = case (op, normalizeSeqP p0, normalizeSeqP q0) of - (Cons, p, P.SequenceLiteral _ ps) - -> P.SequenceLiteral a (p:ps) - (Snoc, P.SequenceLiteral _ ps, p) - -> P.SequenceLiteral a (ps ++ [p]) - (Concat, P.SequenceLiteral _ ps, P.SequenceLiteral _ qs) - -> P.SequenceLiteral a (ps ++ qs) - (Concat, P.SequenceLiteral _ ps, q) - -> foldr (\p r -> P.SequenceOp a p Cons r) q ps - (Concat, p, P.SequenceLiteral _ qs) - -> foldl (\r q -> P.SequenceOp a r Snoc q) p qs - (op, p, q) -> P.SequenceOp a p op q -normalizeSeqP p = p - --- Prepares a pattern for compilation, like `preparePattern`. This --- function, however, is used when a candidate variable for a pattern --- has already been chosen, as with an As pattern. This allows turning --- redundant names (like with the pattern u@v) into renamings. -prepareAs :: Var v => P.Pattern a -> v -> PPM v (P.Pattern v) -prepareAs (P.Unbound _) u = pure $ P.Var u -prepareAs (P.As _ p) u = prepareAs p u <* (renameTo u =<< useVar) -prepareAs (P.Var _) u = P.Var u <$ (renameTo u =<< useVar) -prepareAs (P.Constructor _ r i ps) u = do - P.Constructor u r i <$> traverse preparePattern ps -prepareAs (P.EffectPure _ p) u = do - P.EffectPure u <$> preparePattern p -prepareAs (P.EffectBind _ r i ps k) u = do - P.EffectBind u r i - <$> traverse preparePattern ps - <*> preparePattern k -prepareAs (P.SequenceLiteral _ ps) u = do - P.SequenceLiteral u <$> traverse preparePattern ps -prepareAs (P.SequenceOp _ p op q) u = do - flip (P.SequenceOp u) op - <$> preparePattern p - <*> preparePattern q -prepareAs p u = pure $ u <$ p - --- Prepares a pattern for compilation. This removes the existing --- annotations and replaces them with a choice of variable that the --- pattern is matching against. As patterns are eliminated and the --- variables they bind are used as candidates for what that level of --- the pattern matches against. -preparePattern :: Var v => P.Pattern a -> PPM v (P.Pattern v) -preparePattern (P.Unbound _) = P.Var <$> freshVar -preparePattern (P.Var _) = P.Var <$> useVar -preparePattern (P.As _ p) = prepareAs p =<< useVar -preparePattern p = prepareAs p =<< freshVar - -buildPattern :: Bool -> Reference -> Int -> [v] -> Int -> P.Pattern () -buildPattern ef r t vs nfields = - case ef of - False -> P.Constructor () r t vps - True | t == -1 -> P.EffectPure () (P.Var ()) - | otherwise -> P.EffectBind () r t aps kp - where - (aps,kp) | [] <- vps = error "too few patterns for effect bind" - | otherwise = (init vps, last vps) - where - vps | length vs < nfields - = replicate nfields $ P.Unbound () - | otherwise - = P.Var () <$ vs - -compile :: Var v => DataSpec -> Ctx v -> PatternMatrix v -> Term v -compile _ _ (PM []) = blank () -compile spec ctx m@(PM (r:rs)) - | rowIrrefutable r - = case guard r of - Nothing -> body r - Just g -> iff mempty g (body r) $ compile spec ctx (PM rs) - | rf == Rf.vectorRef - = match () (var () v) - $ buildCaseBuiltin spec ctx - <$> splitMatrixSeq v m - | rf `member` builtinCase - = match () (var () v) - $ buildCaseBuiltin spec ctx - <$> splitMatrixBuiltin v m - | otherwise - = case Map.lookup rf spec of - Just cons -> - match () (var () v) - $ buildCase spec rf cons ctx - <$> splitMatrix v cons m - Nothing -> error $ "unknown data reference: " ++ show r - where - v = choose heuristics m - rf = Map.findWithDefault defaultRef v ctx - -buildCaseBuiltin - :: Var v - => DataSpec - -> Ctx v - -> (P.Pattern (), [(v,Reference)], PatternMatrix v) - -> MatchCase () (Term v) -buildCaseBuiltin spec ctx0 (p, vrs, m) - = MatchCase p Nothing . absChain' vs $ compile spec ctx m - where - vs = ((),) . fst <$> vrs - ctx = Map.fromList vrs <> ctx0 - -buildCase - :: Var v - => DataSpec - -> Reference - -> Either Cons Cons - -> Ctx v - -> (Int, [(v,Reference)], PatternMatrix v) - -> MatchCase () (Term v) -buildCase spec r econs ctx0 (t, vrs, m) - = MatchCase pat Nothing . absChain' vs $ compile spec ctx m - where - (eff, cons) = either (True,) (False,) econs - pat = buildPattern eff r t vs $ cons !! t - vs = ((),) . fst <$> vrs - ctx = Map.fromList vrs <> ctx0 - -mkRow - :: Var v - => v - -> MatchCase a (Term v) - -> State Word64 (PatternRow v) -mkRow sv (MatchCase (normalizeSeqP -> p0) g0 (AbsN' vs b)) - = state $ \w -> case runState (prepareAs p0 sv) (w, vs, mempty) of - (p, (w, [], rn)) -> (,w) - $ PR (filter refutable [p]) - (changeVars rn <$> g) - (changeVars rn b) - _ -> error "mkRow: not all variables used" - where - g = case g0 of - Just (AbsN' us g) - | us == vs -> Just g - | otherwise -> error "mkRow: guard variables do not match body" - Nothing -> Nothing - _ -> error "mkRow: impossible" -mkRow _ _ = error "mkRow: impossible" - -initialize - :: Var v - => Reference - -> Term v - -> [MatchCase () (Term v)] - -> (Maybe v, (v, Reference), PatternMatrix v) -initialize r sc cs - = ( lv - , (sv, r) - , PM $ evalState (traverse (mkRow sv) cs) 1 - ) - where - (lv, sv) | Var' v <- sc = (Nothing, v) - | pv <- freshenId 0 $ typed Pattern - = (Just pv, pv) - -splitPatterns :: Var v => DataSpec -> Term v -> Term v -splitPatterns spec0 = visitPure $ \case - Match' sc0 cs0 - | r <- determineType $ p <$> cs0 - , (lv, scrut, pm) <- initialize r sc cs - , body <- compile spec (uncurry Map.singleton scrut) pm - -> Just $ case lv of - Just v -> let1 False [(((),v), sc)] body - _ -> body - where - sc = splitPatterns spec sc0 - cs = fmap (splitPatterns spec) <$> cs0 - _ -> Nothing - where - p (MatchCase pp _ _) = pp - spec = Map.insert Rf.booleanRef (Right [0,0]) spec0 - -builtinCase :: Set Reference -builtinCase - = fromList - [ Rf.intRef - , Rf.natRef - , Rf.floatRef - , Rf.textRef - , Rf.charRef - ] - -defaultRef :: Reference -defaultRef = Builtin "PatternMatchUnknown" - -determineType :: Show a => [P.Pattern a] -> Reference -determineType ps = fromMaybe defaultRef . foldr ((<|>) . f) Nothing $ ps - where - f (P.As _ p) = f p - f P.Int{} = Just Rf.intRef - f P.Nat{} = Just Rf.natRef - f P.Float{} = Just Rf.floatRef - f P.Boolean{} = Just Rf.booleanRef - f P.Text{} = Just Rf.textRef - f P.Char{} = Just Rf.charRef - f P.SequenceLiteral{} = Just Rf.vectorRef - f P.SequenceOp{} = Just Rf.vectorRef - f (P.Constructor _ r _ _) = Just r - f (P.EffectBind _ r _ _ _) = Just r - f _ = Nothing diff --git a/parser-typechecker/src/Unison/Runtime/Rt1.hs b/parser-typechecker/src/Unison/Runtime/Rt1.hs deleted file mode 100644 index 21bdf0e816..0000000000 --- a/parser-typechecker/src/Unison/Runtime/Rt1.hs +++ /dev/null @@ -1,868 +0,0 @@ -{-# Language BangPatterns #-} -{-# Language OverloadedStrings #-} -{-# Language Strict #-} -{-# Language StrictData #-} -{-# Language RankNTypes #-} -{-# Language PatternSynonyms #-} -{-# Language ViewPatterns #-} - - -module Unison.Runtime.Rt1 where - -import Unison.Prelude - -import Data.Bifunctor (second) -import Data.Bits ((.&.), (.|.), complement, countLeadingZeros, countTrailingZeros, shiftR, shiftL, xor) -import Data.IORef -import Unison.Runtime.IR (pattern CompilationEnv, pattern Req) -import Unison.Runtime.IR hiding (CompilationEnv, IR, Req, Value, Z) -import Unison.Symbol (Symbol) -import Unison.Util.CyclicEq (CyclicEq, cyclicEq) -import Unison.Util.CyclicOrd (CyclicOrd, cyclicOrd) -import Unison.Util.Monoid (intercalateMap) -import qualified System.Mem.StableName as S -import qualified Data.ByteString as BS -import qualified Data.List as List -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Data.Text as Text -import qualified Data.Sequence as Sequence -import qualified Data.Vector.Mutable as MV -import qualified Unison.ABT as ABT -import qualified Unison.Codebase.CodeLookup as CL -import qualified Unison.DataDeclaration as DD -import qualified Unison.Reference as R -import qualified Unison.Runtime.IR as IR -import qualified Unison.Term as Term -import qualified Unison.Util.CycleTable as CT -import qualified Unison.Util.Bytes as Bytes -import qualified Unison.Var as Var - -type CompilationEnv = IR.CompilationEnv ExternalFunction Continuation -type IR = IR.IR ExternalFunction Continuation -type Req = IR.Req ExternalFunction Continuation -type Value = IR.Value ExternalFunction Continuation -type Z = IR.Z ExternalFunction Continuation -type Size = Int -type Stack = MV.IOVector Value - --- The number of stack elements referenced by an IR -type NeededStack = Int - -data Continuation - = WrapHandler Value Continuation - | One NeededStack Size Stack IR - | Chain Symbol Continuation Continuation - --- just returns its input -idContinuation :: IO Continuation -idContinuation = do - m0 <- MV.new 1 - pure $ One 0 1 m0 (IR.Leaf (IR.Slot 0)) - -instance Show Continuation where - show _c = "" - -instance External Continuation where - decompileExternal k = runDS $ Term.lam() paramName <$> go [paramName] k - where - paramName = Var.freshIn (used k) (Var.named "result") - used c = case c of - One _ _ _ ir -> boundVarsIR ir - WrapHandler _ k -> used k - Chain s k1 k2 -> Set.insert s (used k1 <> used k2) - go :: [Symbol] -> Continuation -> DS (Term Symbol) - go env k = case k of - WrapHandler h k -> Term.handle() <$> decompileImpl h <*> go env k - One _needed size m ir -> do - captured <- fmap Map.fromList . for (toList (freeSlots ir)) $ \i -> - (i,) <$> liftIO (at size (LazySlot i) m) - decompileIR env (specializeIR captured ir) - Chain s k1 k2 -> do - k1 <- go env k1 - Term.let1' False [(s, k1)] <$> go (s:env) k2 - --- Wrap a `handle h` around the continuation inside the `Req`. --- Ex: `k = x -> x + 1` becomes `x -> handle h in x + 1`. -wrapHandler :: Value -> Req -> Req -wrapHandler h (Req r cid args k) = Req r cid args (WrapHandler h k) - --- Appends `k2` to the end of the `k` continuation --- Ex: if `k` is `x -> x + 1` and `k2` is `y -> y + 4`, --- this produces a continuation `x -> let r1 = x + 1; r1 + 4`. -appendCont :: Symbol -> Req -> Continuation -> Req -appendCont v (Req r cid args k) k2 = Req r cid args (Chain v k k2) - -data ExternalFunction = - ExternalFunction R.Reference (Size -> Stack -> IO Value) -instance Eq ExternalFunction where - ExternalFunction r _ == ExternalFunction r2 _ = r == r2 -instance External ExternalFunction where - decompileExternal (ExternalFunction r _) = pure $ Term.ref () r - --- This function converts `Z` to a `Value`. --- A bunch of variants follow. -at :: Size -> Z -> Stack -> IO Value -at size i m = case i of - Val v -> force v - Slot i -> - -- the top of the stack is slot 0, at index size - 1 - force =<< MV.read m (size - i - 1) - LazySlot i -> - MV.read m (size - i - 1) - External (ExternalFunction _ e) -> e size m - -atc :: Size -> Z -> Stack -> IO Char -atc size i m = at size i m >>= \case - C c -> pure c - v -> fail $ "type error, expecting C, got " <> show v - -ati :: Size -> Z -> Stack -> IO Int64 -ati size i m = at size i m >>= \case - I i -> pure i - v -> fail $ "type error, expecting I, got " <> show v - -atn :: Size -> Z -> Stack -> IO Word64 -atn size i m = at size i m >>= \case - N i -> pure i - v -> fail $ "type error, expecting N, got " <> show v - -atf :: Size -> Z -> Stack -> IO Double -atf size i m = at size i m >>= \case - F i -> pure i - v -> fail $ "type error, expecting F, got " <> show v - -atb :: Size -> Z -> Stack -> IO Bool -atb size i m = at size i m >>= \case - B b -> pure b - v -> fail $ "type error, expecting B, got " <> show v - -att :: Size -> Z -> Stack -> IO Text -att size i m = at size i m >>= \case - T t -> pure t - v -> do - stackStuff <- fmap (take 200 . show) <$> traverse (MV.read m) [0 .. size - 1] - traceM $ "nstack:\n" <> intercalateMap "\n" (take 200) stackStuff - fail $ "type error, expecting T at " <> show i <> ", got " <> show v - -atbs :: Size -> Z -> Stack -> IO Bytes.Bytes -atbs size i m = at size i m >>= \case - Bs v -> pure v - v -> fail $ "type error, expecting Bytes, got: " <> show v - -ats :: Size -> Z -> Stack -> IO (Seq Value) -ats size i m = at size i m >>= \case - Sequence v -> pure v - v -> fail $ "type error, expecting List, got: " <> show v - -atd :: Size -> Z -> Stack -> IO (R.Reference, ConstructorId, [Value]) -atd size i m = at size i m >>= \case - Data r id vs -> pure (r, id, vs) - v -> fail $ "type error, expecting Data, got " <> show v - --- | `push` doesn't return the new stack size (is it for efficiency?), --- so make sure that you add +1 to it yourself, after this call. -push :: Size -> Value -> Stack -> IO Stack -push size v m = do - m <- ensureSize (size + 1) m - MV.write m size v - pure m - --- Values passed to pushMany* are already in stack order: --- the first Value is deeper on the resulting stack than the final Value -pushMany :: Foldable f - => Size -> f Value -> Stack -> IO (Size, Stack) -pushMany size values m = do - m <- ensureSize (size + length values) m - let pushArg :: Size -> Value -> IO Size - pushArg size' val = do - MV.write m size' val - pure (size' + 1) - newSize <- foldM pushArg size values - pure (newSize, m) - -pushManyZ :: Foldable f => Size -> f Z -> Stack -> IO (Size, Stack) -pushManyZ size zs m = do - m <- ensureSize (size + length zs) m - let pushArg size' z = do - val <- at size z m -- variable lookup uses current size - MV.write m size' val - pure (size' + 1) - size2 <- foldM pushArg size zs - pure (size2, m) - --- | Grow the physical stack to at least `size` slots -ensureSize :: Size -> Stack -> IO Stack -ensureSize size m = - if (size > MV.length m) then - MV.grow m size - else pure m - -force :: Value -> IO Value -force (Ref _ _ r) = readIORef r >>= force -force v = pure v - -data ErrorType = ErrorTypeTodo | ErrorTypeBug deriving Show - -data Result - = RRequest Req - | RMatchFail Size [Value] Value - | RDone Value - | RError ErrorType Value - deriving Show - -done :: Value -> IO Result -done v = pure (RDone v) - -arity :: Value -> Int -arity (Lam n _ _) = n -arity _ = 0 - --- Creates a `CompilationEnv` by pulling out all the constructor arities for --- types that are referenced by the given term, `t`. -compilationEnv :: Monad m - => CL.CodeLookup Symbol m a - -> Term.Term Symbol a - -> m CompilationEnv -compilationEnv env t = do - let typeDeps = Term.typeDependencies t - arityMap <- fmap (Map.fromList . join) . for (toList typeDeps) $ \case - r@(R.DerivedId id) -> do - decl <- CL.getTypeDeclaration env id - case decl of - Nothing -> error $ "no type declaration for " <> show id -- pure [] - Just (Left ad) -> pure $ - let arities = DD.constructorArities $ DD.toDataDecl ad - in [ ((r, i), arity) | (arity, i) <- arities `zip` [0..] ] - Just (Right dd) -> pure $ - let arities = DD.constructorArities dd - in [ ((r, i), arity) | (arity, i) <- arities `zip` [0..] ] - R.Builtin{} -> pure [] - let cenv = CompilationEnv mempty arityMap - - -- deps = Term.dependencies t - -- this would rely on haskell laziness for compilation, needs more thought - --compiledTerms <- fmap (Map.fromList . join) . for (toList deps) $ \case - -- r@(R.DerivedId id) -> do - -- o <- CL.getTerm env id - -- case o of - -- Nothing -> pure [] - -- Just e -> pure [(r, compile cenv (Term.amap (const ()) e))] - -- _ -> pure [] - pure $ builtinCompilationEnv <> cenv - -builtinCompilationEnv :: CompilationEnv -builtinCompilationEnv = CompilationEnv (builtinsMap <> IR.builtins) mempty - where - builtins :: [(Text, Int, Size -> Stack -> IO Value)] - builtins = - [ mk2 "Text.++" att att (pure . T) (<>) - , mk2 "Text.take" atn att (pure . T) (Text.take . fromIntegral) - , mk2 "Text.drop" atn att (pure . T) (Text.drop . fromIntegral) - , mk2 "Text.==" att att (pure . B) (==) - , mk2 "Text.!=" att att (pure . B) (/=) - , mk2 "Text.<=" att att (pure . B) (<=) - , mk2 "Text.>=" att att (pure . B) (>=) - , mk2 "Text.>" att att (pure . B) (>) - , mk2 "Text.<" att att (pure . B) (<) - , mk1 "Text.size" att (pure . N) (fromIntegral . Text.length) - , mk1 "Text.uncons" att - ( pure - . IR.maybeToOptional - . fmap (\(h, t) -> IR.tuple [C h, T t]) - ) - $ Text.uncons - , mk1 "Text.unsnoc" att - ( pure - . IR.maybeToOptional - . fmap (\(i, l) -> IR.tuple [T i, C l]) - ) - $ Text.unsnoc - - , mk1 "Text.toCharList" att (pure . Sequence) - (Sequence.fromList . map C . Text.unpack) - - , mk1 "Text.fromCharList" ats (pure . T) - (\s -> Text.pack [ c | C c <- toList s ]) - - , mk1 "Char.toNat" atc (pure . N) (fromIntegral . fromEnum) - , mk1 "Char.fromNat" atn (pure . C) (toEnum . fromIntegral) - - , mk2 "List.at" atn ats (pure . IR.maybeToOptional) - $ Sequence.lookup - . fromIntegral - , mk2 "List.cons" at ats (pure . Sequence) (Sequence.<|) - , mk2 "List.snoc" ats at (pure . Sequence) (Sequence.|>) - , mk2 "List.take" atn ats (pure . Sequence) (Sequence.take . fromIntegral) - , mk2 "List.drop" atn ats (pure . Sequence) (Sequence.drop . fromIntegral) - , mk2 "List.++" ats ats (pure . Sequence) (<>) - , mk1 "List.size" ats (pure . N) (fromIntegral . Sequence.length) - - , mk1 "Bytes.fromList" ats (pure . Bs) (\s -> - Bytes.fromByteString (BS.pack [ fromIntegral n | N n <- toList s])) - , mk2 "Bytes.++" atbs atbs (pure . Bs) (<>) - , mk2 "Bytes.take" atn atbs (pure . Bs) (\n b -> Bytes.take (fromIntegral n) b) - , mk2 "Bytes.drop" atn atbs (pure . Bs) (\n b -> Bytes.drop (fromIntegral n) b) - , mk1 "Bytes.toList" atbs (pure . Sequence) - (\bs -> Sequence.fromList [ N (fromIntegral n) | n <- Bytes.toWord8s bs ]) - , mk1 "Bytes.size" atbs (pure . N . fromIntegral) Bytes.size - , mk2 "Bytes.at" atn atbs pure $ \i bs -> - IR.maybeToOptional (N . fromIntegral <$> Bytes.at (fromIntegral i) bs) - , mk1 "Bytes.flatten" atbs (pure . Bs) Bytes.flatten - - -- Trigonometric functions - , mk1 "Float.acos" atf (pure . F) acos - , mk1 "Float.asin" atf (pure . F) asin - , mk1 "Float.atan" atf (pure . F) atan - , mk2 "Float.atan2" atf atf (pure . F) atan2 - , mk1 "Float.cos" atf (pure . F) cos - , mk1 "Float.sin" atf (pure . F) sin - , mk1 "Float.tan" atf (pure . F) tan - - -- Hyperbolic functions - , mk1 "Float.acosh" atf (pure . F) acosh - , mk1 "Float.asinh" atf (pure . F) asinh - , mk1 "Float.atanh" atf (pure . F) atanh - , mk1 "Float.cosh" atf (pure . F) cosh - , mk1 "Float.sinh" atf (pure . F) sinh - , mk1 "Float.tanh" atf (pure . F) tanh - - -- Exponential functions - , mk1 "Float.exp" atf (pure . F) exp - , mk1 "Float.log" atf (pure . F) log - , mk2 "Float.logBase" atf atf (pure . F) logBase - - -- Power Functions - , mk2 "Float.pow" atf atf (pure . F) (**) - , mk1 "Float.sqrt" atf (pure . F) sqrt - - -- Rounding and Remainder Functions - , mk1 "Float.ceiling" atf (pure . I) ceiling - , mk1 "Float.floor" atf (pure . I) floor - , mk1 "Float.round" atf (pure . I) round - , mk1 "Float.truncate" atf (pure . I) truncate - - , mk1 "Nat.toText" atn (pure . T) (Text.pack . show) - , mk1 "Nat.fromText" att (pure . IR.maybeToOptional . fmap N) ( - (\x -> readMaybe x :: Maybe Word64) . Text.unpack) - , mk1 "Nat.toFloat" atn (pure . F) fromIntegral - - , mk1 "Int.toText" ati (pure . T) - (Text.pack . (\x -> if x >= 0 then ("+" <> show x) else show x)) - , mk1 "Int.fromText" att (pure . IR.maybeToOptional . fmap I) $ - (\x -> readMaybe (if "+" `List.isPrefixOf` x then drop 1 x else x)) - . Text.unpack - , mk1 "Int.toFloat" ati (pure . F) fromIntegral - - -- Float Utils - , mk1 "Float.abs" atf (pure . F) abs - , mk2 "Float.max" atf atf (pure . F) max - , mk2 "Float.min" atf atf (pure . F) min - , mk1 "Float.toText" atf (pure . T) (Text.pack . show) - , mk1 "Float.fromText" att (pure . IR.maybeToOptional . fmap F) ( - (\x -> readMaybe x :: Maybe Double) . Text.unpack) - - , mk2 "Debug.watch" att at id (\t v -> putStrLn (Text.unpack t) *> pure v) - ] - - builtinsMap :: Map R.Reference IR - builtinsMap = Map.fromList - [ (R.Builtin name, makeIR arity name ir) | (name, arity, ir) <- builtins ] - makeIR arity name = - Leaf - . Val - . Lam arity (underapply name) - . Leaf - . External - . ExternalFunction (R.Builtin name) - underapply name = - let r = Term.ref () $ R.Builtin name :: Term SymbolC - in FormClosure (ABT.hash r) r [] - mk1 - :: Text - -> (Size -> Z -> Stack -> IO a) - -> (b -> IO Value) - -> (a -> b) - -> (Text, Int, Size -> Stack -> IO Value) - mk1 name getA mkB f = - ( name - , 1 - , \size stack -> do - a <- getA size (Slot 0) stack - mkB $ f a - ) - mk2 - :: Text - -> (Size -> Z -> Stack -> IO a) - -> (Size -> Z -> Stack -> IO b) - -> (c -> IO Value) - -> (a -> b -> c) - -> (Text, Int, Size -> Stack -> IO Value) - mk2 name getA getB mkC f = - ( name - , 2 - , \size stack -> do - a <- getA size (Slot 1) stack - b <- getB size (Slot 0) stack - mkC $ f a b - ) - -run :: (R.Reference -> ConstructorId -> [Value] -> IO Result) - -> CompilationEnv - -> IR - -> IO Result -run ioHandler env ir = do - let -- pir = prettyIR mempty pexternal pcont - -- pvalue = prettyValue mempty pexternal pcont - -- pcont _k = "" -- TP.pretty mempty <$> decompileExternal k - -- if we had a PrettyPrintEnv, we could use that here - -- pexternal (ExternalFunction r _) = P.shown r - -- traceM $ "Running this program" - -- traceM $ P.render 80 (pir ir) - supply <- newIORef 0 - m0 <- MV.new 256 - let - fresh :: IO Int - fresh = atomicModifyIORef' supply (\n -> (n + 1, n)) - - -- TODO: - -- go :: (MonadReader Size m, MonadState Stack m, MonadIO m) => IR -> m Result - go :: Size -> Stack -> IR -> IO Result - go size m ir = do - -- stackStuff <- traverse (MV.read m) [0..size-1] - -- traceM $ "stack: " <> show stackStuff - -- traceM $ "ir: " <> show ir - -- traceM "" - case ir of - Leaf (Val v) -> done v - Leaf slot -> done =<< at size slot m - If c t f -> atb size c m >>= \case - True -> go size m t - False -> go size m f - And i j -> atb size i m >>= \case - True -> go size m j - False -> done (B False) - Or i j -> atb size i m >>= \case - True -> done (B True) - False -> go size m j - Not i -> atb size i m >>= (done . B . not) - Let var b body freeInBody -> go size m b >>= \case - RRequest req -> - let needed = if Set.null freeInBody then 0 else Set.findMax freeInBody - in pure $ RRequest (appendCont var req $ One needed size m body) - RDone v -> do - -- Garbage collect the stack occasionally - (size, m) <- - if size >= MV.length m - -- freeInBody just the set of de bruijn indices referenced in `body` - -- Examples: - -- a) let x = 1 in x, freeInBody = {0} - -- b) let x = 1 in 42, freeInBody = {} - -- We don't need anything from old stack in either of the above - -- - -- c) let x = 1 in (let y = 2 in x + y), freeInBody = {0,1} - -- We need the top element of the old stack to be preserved - then let - maxSlot = - if Set.null freeInBody then -1 - else Set.findMax freeInBody - 1 - in gc size m maxSlot - else pure (size, m) - -- traceM . P.render 80 $ P.shown var <> " =" `P.hang` pvalue v - push size v m >>= \m -> go (size + 1) m body - e@(RMatchFail _ _ _) -> pure e - e@(RError _ _) -> pure e - LetRec bs body -> letrec size m bs body - MakeSequence vs -> - done . Sequence . Sequence.fromList =<< traverse (\i -> at size i m) vs - Construct r cid args -> - done . Data r cid =<< traverse (\i -> at size i m) args - Request r cid args -> - req <$> traverse (\i -> at size i m) args - where - -- The continuation of the request is initially the identity function - -- and we append to it in `Let` as we unwind the stack - req vs = RRequest (Req r cid vs (One 0 size m (Leaf $ Slot 0))) - Handle handler body -> do - h <- at size handler m - runHandler size m h body - Apply fn args -> do - RDone fn <- go size m fn -- ANF should ensure this match is OK - fn <- force fn - call size m fn args - Match scrutinee cases -> do - -- scrutinee : Z -- already evaluated :amazing: - -- cases : [(Pattern, Maybe IR, IR)] - scrute <- at size scrutinee m -- "I am scrute" / "Dwight K. Scrute" - tryCases size scrute m cases - - -- Builtins - AddI i j -> do x <- ati size i m; y <- ati size j m; done (I (x + y)) - SubI i j -> do x <- ati size i m; y <- ati size j m; done (I (x - y)) - MultI i j -> do x <- ati size i m; y <- ati size j m; done (I (x * y)) - DivI i j -> do x <- ati size i m; y <- ati size j m; done (I (x `div` y)) - GtI i j -> do x <- ati size i m; y <- ati size j m; done (B (x > y)) - GtEqI i j -> do x <- ati size i m; y <- ati size j m; done (B (x >= y)) - LtI i j -> do x <- ati size i m; y <- ati size j m; done (B (x < y)) - LtEqI i j -> do x <- ati size i m; y <- ati size j m; done (B (x <= y)) - EqI i j -> do x <- ati size i m; y <- ati size j m; done (B (x == y)) - SignumI i -> do x <- ati size i m; done (I (signum x)) - NegateI i -> do x <- ati size i m; done (I (negate x)) - Truncate0I i -> do x <- ati size i m; done (N (fromIntegral (truncate0 x))) - ModI i j -> do x <- ati size i m; y <- ati size j m; done (I (x `mod` y)) - PowI i j -> do x <- ati size i m; y <- atn size j m; done (I (x ^ y)) - ShiftRI i j -> do x <- ati size i m; y <- atn size j m; done (I (x `shiftR` (fromIntegral y))) - ShiftLI i j -> do x <- ati size i m; y <- atn size j m; done (I (x `shiftL` (fromIntegral y))) - BitAndI i j -> do x <- ati size i m; y <- ati size j m; done (I ((.&.) (fromIntegral x) (fromIntegral y))) - BitOrI i j -> do x <- ati size i m; y <- ati size j m; done (I ((.|.) (fromIntegral x) (fromIntegral y))) - BitXorI i j -> do x <- ati size i m; y <- ati size j m; done (I (xor (fromIntegral x) (fromIntegral y))) - ComplementI i -> do x <- ati size i m; done (I (fromIntegral (complement x))) - LeadZeroI i -> do x <- ati size i m; done (I (fromIntegral (countLeadingZeros x))) - TrailZeroI i -> do x <- ati size i m; done (I (fromIntegral (countTrailingZeros x))) - - AddN i j -> do x <- atn size i m; y <- atn size j m; done (N (x + y)) - -- cast to `Int` and subtract - SubN i j -> do x <- atn size i m; y <- atn size j m - done (I (fromIntegral x - fromIntegral y)) - -- subtraction truncated at 0 (don't wrap around) - DropN i j -> do x <- atn size i m; y <- atn size j m - done (N (x - (y `min` x))) - MultN i j -> do x <- atn size i m; y <- atn size j m; done (N (x * y)) - DivN i j -> do x <- atn size i m; y <- atn size j m; done (N (x `div` y)) - ModN i j -> do x <- atn size i m; y <- atn size j m; done (N (x `mod` y)) - PowN i j -> do x <- atn size i m; y <- atn size j m; done (N (fromIntegral (x ^ y))) - ShiftRN i j -> do x <- atn size i m; y <- atn size j m; done (N (fromIntegral (x `shiftR` (fromIntegral y)))) - ShiftLN i j -> do x <- atn size i m; y <- atn size j m; done (N (fromIntegral (x `shiftL` (fromIntegral y)))) - ToIntN i -> do x <- atn size i m; done (I (fromIntegral x)) - GtN i j -> do x <- atn size i m; y <- atn size j m; done (B (x > y)) - GtEqN i j -> do x <- atn size i m; y <- atn size j m; done (B (x >= y)) - LtN i j -> do x <- atn size i m; y <- atn size j m; done (B (x < y)) - LtEqN i j -> do x <- atn size i m; y <- atn size j m; done (B (x <= y)) - EqN i j -> do x <- atn size i m; y <- atn size j m; done (B (x == y)) - BitAndN i j -> do x <- atn size i m; y <- atn size j m; done (N ((.&.) x y)) - BitOrN i j -> do x <- atn size i m; y <- atn size j m; done (N ((.|.) x y)) - BitXorN i j -> do x <- atn size i m; y <- atn size j m; done (N (xor x y)) - ComplementN i -> do x <- atn size i m; done (N (fromIntegral (complement x))) - LeadZeroN i -> do x <- atn size i m; done (N (fromIntegral (countLeadingZeros x))) - TrailZeroN i -> do x <- atn size i m; done (N (fromIntegral (countTrailingZeros x))) - - AddF i j -> do x <- atf size i m; y <- atf size j m; done (F (x + y)) - SubF i j -> do x <- atf size i m; y <- atf size j m; done (F (x - y)) - MultF i j -> do x <- atf size i m; y <- atf size j m; done (F (x * y)) - DivF i j -> do x <- atf size i m; y <- atf size j m; done (F (x / y)) - GtF i j -> do x <- atf size i m; y <- atf size j m; done (B (x > y)) - GtEqF i j -> do x <- atf size i m; y <- atf size j m; done (B (x >= y)) - LtF i j -> do x <- atf size i m; y <- atf size j m; done (B (x < y)) - LtEqF i j -> do x <- atf size i m; y <- atf size j m; done (B (x <= y)) - EqF i j -> do x <- atf size i m; y <- atf size j m; done (B (x == y)) - EqU i j -> do - -- todo: these can be reused - t1 <- CT.new 8 - t2 <- CT.new 8 - x <- at size i m - y <- at size j m - RDone . B <$> cyclicEq t1 t2 x y - CompareU i j -> do - -- todo: these can be reused - t1 <- CT.new 8 - t2 <- CT.new 8 - x <- at size i m - y <- at size j m - o <- cyclicOrd t1 t2 x y - pure . RDone . I $ case o of - EQ -> 0 - LT -> -1 - GT -> 1 - Bug i -> RError ErrorTypeBug <$> at size i m - Todo i -> RError ErrorTypeTodo <$> at size i m - - runHandler :: Size -> Stack -> Value -> IR -> IO Result - runHandler size m handler body = - go size m body >>= runHandler' size m handler - - -- Certain handlers are of a form where we can can skip the step of - -- copying the continuation inside the request. We aren't totally - -- sure what the conditions are, but speculate: - -- - -- * The Request can't escape the invocation of the handler; that is, the - -- handler can't stash the request for later, it has to inspect and run - -- the continuation immediately. - -- * The handler can't invoke the continuation multiple times, since - -- evaluation of the continuation will alter the stack. - -- * Is that sufficient? Does it matter if continuation is called in - -- tail position or not? - -- - -- Leijn's "Implementing Algebraic Effects in C" paper mentions there's - -- a speedup in the case where the handler uses its continuation just once - -- in tail position: - -- https://www.microsoft.com/en-us/research/wp-content/uploads/2017/06/algeff-in-c-tr-v2.pdf - handlerNeedsCopy :: Value -> Bool - handlerNeedsCopy _ = True -- overly conservative choice, but never wrong! - - runHandler' :: Size -> Stack -> Value -> Result -> IO Result - runHandler' size m handler r = case r of - RRequest req -> do - req <- if handlerNeedsCopy handler then copyRequest req else pure req - m <- push size (Requested req) m - result <- call (size + 1) m handler [Slot 0] - case result of - RMatchFail _ _ _ -> pure $ RRequest (wrapHandler handler req) - r -> pure r - RDone v -> do - m <- push size (Pure v) m - call (size + 1) m handler [Slot 0] - r -> pure r - - call :: Size -> Stack -> Value -> [Z] -> IO Result - -- call _ _ fn@(Lam _ _ _) args | trace ("call "<> show fn <> " " <>show args) False = undefined - call size m fn@(Lam arity underapply body) args = let nargs = length args in - -- fully applied call, `(x y -> ..) 9 10` - if nargs == arity then case underapply of - -- when calling a closure, we supply all the closure arguments, before - -- `args`. See fix528.u for an example. - FormClosure _hash _tm pushedArgs -> do - (size, m) <- pushManyZ size (fmap Val (reverse pushedArgs) ++ args) m - go size m body - _ -> do - (size, m) <- pushManyZ size args m - go size m body - -- overapplied call, e.g. `id id 42` - else if nargs > arity then do - let (usedArgs, extraArgs) = splitAt arity args - result <- call size m fn usedArgs - case result of - RDone fn' -> call size m fn' extraArgs - -- foo : Int ->{IO} (Int -> Int) - -- ... - -- (foo 12 12) - RRequest req -> do - let overApplyName = Var.named "oa" - extraArgvs <- for extraArgs $ \arg -> at size arg m - pure . RRequest . appendCont overApplyName req $ - One 0 size m (Apply (Leaf (Slot 0)) (Val <$> extraArgvs)) - e -> error $ "type error, tried to apply: " <> show e - -- underapplied call, e.g. `(x y -> ..) 9` - else do - argvs <- for args $ \arg -> at size arg m - case underapply of - -- Example 1: - -- f = x y z p -> x - y - z - p - -- f' = f 1 2 -- Specialize f [2, 1] -- each arg is pushed onto top - -- f'' = f' 3 -- Specialize f [3, 2, 1] - -- f'' 4 -- should be the same thing as `f 1 2 3 4` - -- - -- pushedArgs = [mostRecentlyApplied, ..., firstApplied] - Specialize hash lam@(Term.LamsNamed' vs body) pushedArgs -> let - pushedArgs' :: [ (SymbolC, Value)] -- head is the latest argument - pushedArgs' = reverse (drop (length pushedArgs) vs `zip` argvs) ++ pushedArgs - vsRemaining = drop (length pushedArgs') vs - compiled = compile0 env - (reverse (fmap (,Nothing) vsRemaining) ++ - fmap (second Just) pushedArgs') - body - in done $ Lam (arity - nargs) (Specialize hash lam pushedArgs') compiled - Specialize _ e pushedArgs -> error $ "can't underapply a non-lambda: " <> show e <> " " <> show pushedArgs - FormClosure hash tm pushedArgs -> - let pushedArgs' = reverse argvs ++ pushedArgs - in done $ Lam (arity - nargs) (FormClosure hash tm pushedArgs') body - call size m (Cont k) [arg] = do - v <- at size arg m - callContinuation size m k v - call size m fn args = do - s0 <- traverse (MV.read m) [0..size-1] - let s = [(0::Int)..] `zip` reverse s0 - error $ "type error - tried to apply a non-function: " <> - show fn <> " " <> show args <> "\n" <> - "[\n " <> - intercalateMap "\n " (\(i,v) -> "Slot " <> show i <> ": " <> take 50 (show v)) s - <> "\n]" - - callContinuation :: Size -> Stack -> Continuation -> Value -> IO Result - callContinuation size m k v = case k of - One _ size m ir -> do - m <- push size v m - go (size + 1) m ir - WrapHandler h k -> runHandler' size m h =<< callContinuation size m k v - -- reassociate to the right during execution, is this needed and why? - Chain v1 (Chain v2 k1 k2) k3 -> - callContinuation size m (Chain v1 k1 (Chain v2 k2 k3)) v - Chain var k1 k2 -> do - r <- callContinuation size m k1 v - case r of - RDone v -> callContinuation size m k2 v - RRequest req -> pure $ RRequest (appendCont var req k2) - _ -> pure r - - copyContinuation :: Continuation -> IO Continuation - copyContinuation k = case k of - -- reassociate to the right during copying, is this needed and why? - Chain v1 (Chain v2 k1 k2) k3 -> - copyContinuation (Chain v1 k1 (Chain v2 k2 k3)) - Chain v k1 k2 -> Chain v <$> copyContinuation k1 <*> copyContinuation k2 - One needed size stack ir -> do - -- (@0 + @3) -- 3 needed from old stack - -- (@0) -- 0 needed from old stack - -- (1 + 1) -- 0 needed from old stack - let slice = MV.slice (size - needed) needed stack - copied <- MV.clone slice - pure $ One needed (MV.length copied) copied ir - WrapHandler h k -> WrapHandler h <$> copyContinuation k - - copyRequest :: Req -> IO Req - copyRequest (Req r cid args k) = Req r cid args <$> copyContinuation k - - -- Just = match success, Nothing = match fail - -- Returns Values to be put on the stack when evaluating case guard/body - tryCase :: (Value, Pattern) -> Maybe [Value] - -- tryCase x | trace ("tryCase " ++ show x ++ " =") False = undefined - -- tryCase x = traceShowId $ case x of - tryCase = \case - (I x, PatternI x2) -> when' (x == x2) $ Just [] - (F x, PatternF x2) -> when' (x == x2) $ Just [] - (N x, PatternN x2) -> when' (x == x2) $ Just [] - (B x, PatternB x2) -> when' (x == x2) $ Just [] - (T x, PatternT x2) -> when' (x == x2) $ Just [] - (C x, PatternC x2) -> when' (x == x2) $ Just [] - (Data r cid args, PatternData r2 cid2 pats) - -> if r == r2 && cid == cid2 - then join <$> traverse tryCase (zip args pats) - else Nothing - (Sequence args, PatternSequenceLiteral pats) -> - if length args == length pats then join <$> traverse tryCase (zip (toList args) pats) else Nothing - (Sequence args, PatternSequenceCons l r) -> - case args of - h Sequence.:<| t -> (++) <$> tryCase (h, l) <*> tryCase (IR.Sequence t, r) - _ -> Nothing - (Sequence args, PatternSequenceSnoc l r) -> - case args of - t Sequence.:|> h -> (++) <$> tryCase (IR.Sequence t, l) <*> tryCase (h, r) - _ -> Nothing - (Sequence args, PatternSequenceConcat litLen l r) -> - (++) <$> tryCase (IR.Sequence a1, l) <*> tryCase (IR.Sequence a2, r) - where - (a1, a2) = Sequence.splitAt i args - i = either id (\j -> length args - j) litLen - (Pure v, PatternPure p) -> tryCase (v, p) - (Pure _, PatternBind _ _ _ _) -> Nothing - (Requested (Req r cid args k), PatternBind r2 cid2 pats kpat) -> - if r == r2 && cid == cid2 - then join <$> traverse tryCase (zip (args ++ [Cont k]) (pats ++ [kpat])) - else Nothing - (Requested _, PatternPure _) -> Nothing - (v, PatternAs p) -> (v:) <$> tryCase (v, p) - (_, PatternIgnore) -> Just [] - (v, PatternVar) -> Just [v] - (v, p) -> error $ - "bug: type error in pattern match: " <> - "tryCase (" <> show v <> ", " <> show p <> ")" - where when' b m = if b then m else Nothing - - tryCases size scrute m ((pat, _vars, cond, body) : remainingCases) = - case tryCase (scrute, pat) of - Nothing -> tryCases size scrute m remainingCases -- this pattern didn't match - Just vars -> do - (size', m) <- pushMany size vars m - case cond of - Just cond -> do - RDone (B cond) <- go size' m cond - if cond then go size' m body - else tryCases size scrute m remainingCases - Nothing -> go size' m body - tryCases sz scrute _ _ = - pure $ RMatchFail sz [] scrute - - -- To evaluate a `let rec`, we push an empty `Ref` onto the stack for each - -- binding, then evaluate each binding and set that `Ref` to its result. - -- As long as the variable references occur within a function body, - -- there's no problem. - letrec :: Size -> Stack -> [(Symbol, IR)] -> IR -> IO Result - letrec size m bs body = do - refs <- for bs $ \(v,b) -> do - r <- newIORef (UninitializedLetRecSlot v bs body) - i <- fresh - pure (Ref i v r, b) - -- push the empty references onto the stack - (size', m) <- pushMany size (fst <$> refs) m - for_ refs $ \(Ref _ _ r, ir) -> do - let toVal (RDone a) = a - toVal e = error ("bindings in a let rec must not have effects " ++ show e) - result <- toVal <$> go size' m ir - writeIORef r result - go size' m body - - -- Garbage collect the elements of the stack that are more than `maxSlot` - -- from the top - this is done just by copying to a fresh stack. - gc :: Size -> Stack -> Int -> IO (Size, Stack) - -- when maxSlot = -1, nothing from the old stack is needed. - gc _ _ _maxSlot@(-1) = do m <- MV.new 256; pure (0, m) - gc size m maxSlot = do - let start = size - maxSlot - 1 - len = maxSlot + 1 - m <- MV.clone $ MV.slice start len m - pure (len, m) - - loop (RRequest (Req ref cid vs k)) = do - ioResult <- ioHandler ref cid vs - case ioResult of - RDone ioResult -> do - x <- callContinuation 0 m0 k ioResult - loop x - r -> pure r - loop a = pure a - - r <- go 0 m0 ir - loop r - -instance Show ExternalFunction where - show _ = "ExternalFunction" - -instance CyclicEq ExternalFunction where - cyclicEq _ _ (ExternalFunction r _) (ExternalFunction r2 _) = pure (r == r2) - -instance CyclicOrd ExternalFunction where - cyclicOrd _ _ (ExternalFunction r _) (ExternalFunction r2 _) = pure (r `compare` r2) - -instance CyclicEq Continuation where - cyclicEq h1 h2 k1 k2 = do - n1 <- S.makeStableName k1 - n2 <- S.makeStableName k2 - if n1 == n2 then pure True - else case (k1, k2) of - (WrapHandler v1 k1, WrapHandler v2 k2) -> do - b <- cyclicEq h1 h2 v1 v2 - if b then cyclicEq h1 h2 k1 k2 - else pure False - (Chain _ k1 k2, Chain _ k1a k2a) -> do - b <- cyclicEq h1 h2 k1 k1a - if b then cyclicEq h1 h2 k2 k2a - else pure False - (One _needed1 _size1 _s1 _ir1, One _needed2 _size2 _s2 _ir2) -> - error "todo - fill CyclicEq Continuation" - _ -> pure False - -instance CyclicOrd Continuation where - cyclicOrd h1 h2 k1 k2 = do - n1 <- S.makeStableName k1 - n2 <- S.makeStableName k2 - if n1 == n2 then pure EQ - else case (k1, k2) of - (WrapHandler v1 k1, WrapHandler v2 k2) -> do - b <- cyclicOrd h1 h2 v1 v2 - if b == EQ then cyclicOrd h1 h2 k1 k2 - else pure b - (Chain _ k1 k2, Chain _ k1a k2a) -> do - b <- cyclicOrd h1 h2 k1 k1a - if b == EQ then cyclicOrd h1 h2 k2 k2a - else pure b - (One _needed1 _size1 _s1 _ir1, One _needed2 _size2 _s2 _ir2) -> - error "todo - fill CyclicOrd Continuation" - _ -> pure $ continuationConstructorId k1 `compare` continuationConstructorId k2 - -continuationConstructorId :: Continuation -> Int -continuationConstructorId k = case k of - One _ _ _ _ -> 0 - Chain _ _ _ -> 1 - WrapHandler _ _ -> 2 - -truncate0 :: (Num a, Ord a) => a -> a -truncate0 x = if x >= 0 then x else 0 diff --git a/parser-typechecker/src/Unison/Runtime/Rt1IO.hs b/parser-typechecker/src/Unison/Runtime/Rt1IO.hs deleted file mode 100644 index d09db6fd2e..0000000000 --- a/parser-typechecker/src/Unison/Runtime/Rt1IO.hs +++ /dev/null @@ -1,529 +0,0 @@ -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE Rank2Types #-} -{-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Runtime.Rt1IO where - -import Unison.Prelude - -import Control.Exception ( throwIO - , AsyncException(UserInterrupt) - , finally - , bracket - , asyncExceptionFromException - ) -import Control.Concurrent ( ThreadId - , forkIO - , killThread - , threadDelay - ) -import Control.Concurrent.MVar ( MVar - , modifyMVar_ - , readMVar - , newMVar - , newEmptyMVar - , takeMVar - , putMVar - ) -import Control.Lens -import Control.Monad.Morph ( hoist ) -import Control.Monad.Reader ( ReaderT - , runReaderT - , ask - ) -import Control.Monad.Except ( ExceptT(..) - , runExceptT - , throwError - ) -import Data.GUID ( genText ) -import qualified Data.Map as Map -import qualified Data.Sequence as Seq -import Data.Text as Text -import qualified Data.Text.IO as TextIO -import Data.Time.Clock.POSIX as Time -import qualified Network.Simple.TCP as Net -import qualified Network.Socket as Sock ---import qualified Network.Socket as Sock -import System.IO ( Handle - , IOMode(..) - , SeekMode(..) - , BufferMode(..) - , openFile - , hClose - , stdin - , stdout - , stderr - , hIsEOF - , hIsSeekable - , hSeek - , hTell - , hGetBuffering - , hSetBuffering - ) -import System.Directory ( getCurrentDirectory - , setCurrentDirectory - , getTemporaryDirectory - , getDirectoryContents - , doesPathExist - , doesDirectoryExist - , createDirectoryIfMissing - , removeDirectoryRecursive - , renameDirectory - , removeFile - , renameFile - , getModificationTime - , getFileSize - ) -import qualified System.IO.Error as SysError -import Type.Reflection ( Typeable ) -import Unison.Builtin.Decls as DD -import Unison.Symbol -import Unison.Parser ( Ann(External) ) -import qualified Unison.Reference as R -import qualified Unison.Runtime.Rt1 as RT -import qualified Unison.Runtime.IR as IR -import qualified Unison.Term as Term --- import Debug.Trace --- import qualified Unison.Util.Pretty as Pretty --- import Unison.TermPrinter ( pretty ) -import Unison.Codebase.Runtime ( Runtime(Runtime) ) -import Unison.Codebase.MainTerm ( nullaryMain ) -import qualified Unison.Runtime.IOSource as IOSrc -import qualified Unison.Util.Bytes as Bytes -import qualified Unison.Var as Var -import qualified Unison.Util.Pretty as P -import qualified Unison.TermPrinter as TermPrinter -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.Typechecker.Components as Components - --- TODO: Make this exception more structured? -newtype UnisonRuntimeException = UnisonRuntimeException Text - deriving (Typeable, Show) - -instance Exception UnisonRuntimeException - -type GUID = Text - -data IOState = IOState - { _handleMap :: HandleMap - , _socketMap :: SocketMap - , _threadMap :: ThreadMap - } - -type UIO a = ExceptT IOError (ReaderT S IO) a -type HandleMap = Map GUID Handle -type SocketMap = Map GUID Net.Socket -type ThreadMap = Map GUID ThreadId - -newtype S = S {_ioState :: MVar IOState } - -makeLenses 'S -makeLenses 'IOState - -haskellMode :: Text -> IOMode -haskellMode mode = case mode of - "io.Mode.Read" -> ReadMode - "io.Mode.Write" -> WriteMode - "io.Mode.Append" -> AppendMode - "io.Mode.ReadWrite" -> ReadWriteMode - _ -> error . Text.unpack $ "Unknown IO mode " <> mode - -newUnisonHandle :: Handle -> UIO RT.Value -newUnisonHandle h = do - t <- liftIO genText - m <- view ioState - liftIO . modifyMVar_ m $ pure . over handleMap (Map.insert t h) - pure $ IR.Data IOSrc.handleReference IOSrc.handleId [IR.T t] - -newUnisonSocket :: Net.Socket -> UIO RT.Value -newUnisonSocket s = do - t <- liftIO genText - m <- view ioState - liftIO . modifyMVar_ m $ pure . over socketMap (Map.insert t s) - pure $ IR.Data IOSrc.socketReference IOSrc.socketId [IR.T t] - -deleteUnisonHandle :: Text -> UIO () -deleteUnisonHandle h = do - m <- view ioState - liftIO . modifyMVar_ m $ pure . over handleMap (Map.delete h) - -getHaskellHandle :: Text -> UIO (Maybe Handle) -getHaskellHandle h = do - m <- view ioState - v <- liftIO $ readMVar m - pure . Map.lookup h $ view handleMap v - -getHaskellHandleOrThrow :: Text -> UIO Handle -getHaskellHandleOrThrow h = getHaskellHandle h >>= maybe throwHandleClosed pure - -getHaskellSocket :: Text -> UIO (Maybe Net.Socket) -getHaskellSocket s = do - m <- view ioState - v <- liftIO $ readMVar m - pure . Map.lookup s $ view socketMap v - -getHaskellSocketOrThrow :: Text -> UIO Net.Socket -getHaskellSocketOrThrow s = getHaskellSocket s >>= maybe throwSocketClosed pure - -constructLeft :: RT.Value -> RT.Value -constructLeft v = IR.Data IOSrc.eitherReference IOSrc.eitherLeftId [v] - -constructRight :: RT.Value -> RT.Value -constructRight v = IR.Data IOSrc.eitherReference IOSrc.eitherRightId [v] - -constructSome :: RT.Value -> RT.Value -constructSome v = IR.Data IOSrc.optionReference IOSrc.someId [v] - -constructNone :: RT.Value -constructNone = IR.Data IOSrc.optionReference IOSrc.noneId [] - -convertMaybe :: Maybe RT.Value -> RT.Value -convertMaybe Nothing = constructNone -convertMaybe (Just v) = constructSome v - -convertOptional :: RT.Value -> Maybe RT.Value -convertOptional (IR.Data _ _ [] ) = Nothing -convertOptional (IR.Data _ _ [x]) = Just x -convertOptional v = - error - $ "Compiler bug! This value showed up at runtime where " - <> "an Optional was expected: " - <> show v - -constructPair :: RT.Value -> RT.Value -> RT.Value -constructPair a b = IR.Data DD.pairRef 0 [a, b] - -convertErrorType :: IOError -> IR.ConstructorId -convertErrorType (SysError.ioeGetErrorType -> e) - | SysError.isAlreadyExistsErrorType e = IOSrc.alreadyExistsId - | SysError.isDoesNotExistErrorType e = IOSrc.noSuchThingId - | SysError.isAlreadyInUseErrorType e = IOSrc.resourceBusyId - | SysError.isFullErrorType e = IOSrc.resourceExhaustedId - | SysError.isEOFErrorType e = IOSrc.eofId - | SysError.isIllegalOperationErrorType e = IOSrc.illegalOperationId - | SysError.isPermissionErrorType e = IOSrc.permissionDeniedId - | otherwise = IOSrc.userErrorId - -haskellSeekMode :: Text -> SeekMode -haskellSeekMode mode = case mode of - "io.SeekMode.Absolute" -> AbsoluteSeek - "io.SeekMode.Relative" -> RelativeSeek - "io.SeekMode.FromEnd" -> SeekFromEnd - _ -> error . Text.unpack $ "Unknown seek mode " <> mode - -haskellBufferMode :: RT.Value -> BufferMode -haskellBufferMode mode = case mode of - IR.Data _ _ [] -> NoBuffering - IR.Data _ _ [IR.Data _ _ [] ] -> LineBuffering - IR.Data _ _ [IR.Data _ _ [IR.Data _ _ []]] -> BlockBuffering Nothing - IR.Data _ _ [IR.Data _ _ [IR.Data _ _ [IR.N n]]] -> - BlockBuffering (Just $ fromIntegral n) - _ -> error $ "Unknown buffer mode " <> show mode - -unisonBufferMode :: BufferMode -> RT.Value -unisonBufferMode mode = case mode of - NoBuffering -> constructNone - LineBuffering -> - constructSome (IR.Data IOSrc.bufferModeReference IOSrc.bufferModeLineId []) - BlockBuffering Nothing -> constructSome - (IR.Data IOSrc.bufferModeReference IOSrc.bufferModeBlockId [constructNone]) - BlockBuffering (Just size) -> constructSome - (IR.Data IOSrc.bufferModeReference - IOSrc.bufferModeBlockId - [constructSome . IR.N $ fromIntegral size] - ) - -unisonFilePath :: FilePath -> RT.Value -unisonFilePath fp = - IR.Data IOSrc.filePathReference IOSrc.filePathId [IR.T $ Text.pack fp] - -hostPreference :: [RT.Value] -> Net.HostPreference -hostPreference [] = Net.HostAny -hostPreference [IR.Data _ _ [IR.T host]] = Net.Host $ Text.unpack host -hostPreference x = - error $ "Runtime bug! Not a valid host preference: " <> show x - -constructIoError :: IOError -> RT.Value -constructIoError e = IR.Data - IOSrc.errorReference - IOSrc.ioErrorId - [ IR.Data IOSrc.errorTypeReference (convertErrorType e) [] - , IR.T . Text.pack $ show e - ] - -handleIO' - :: RT.CompilationEnv - -> S - -> R.Reference - -> IR.ConstructorId - -> [RT.Value] - -> IO RT.Result -handleIO' cenv s rid cid vs = case rid of - R.DerivedId x | x == IOSrc.ioHash -> flip runReaderT s $ do - ev <- runExceptT $ handleIO cenv cid vs - case ev of - Left e -> pure . RT.RDone . constructLeft $ constructIoError e - Right v -> pure . RT.RDone $ constructRight v - _ -> RT.RRequest . IR.Req rid cid vs <$> RT.idContinuation - -reraiseIO :: IO a -> UIO a -reraiseIO a = ExceptT . lift $ try @IOError $ liftIO a - -throwHandleClosed :: UIO a -throwHandleClosed = throwError $ illegalOperation "handle is closed" - -throwSocketClosed :: UIO a -throwSocketClosed = throwError $ illegalOperation "socket is closed" - -illegalOperation :: String -> IOError -illegalOperation msg = - SysError.mkIOError SysError.illegalOperationErrorType msg Nothing Nothing - -handleIO :: RT.CompilationEnv -> IR.ConstructorId -> [RT.Value] -> UIO RT.Value -handleIO cenv cid = go (IOSrc.constructorName IOSrc.ioReference cid) - where - go "io.IO.openFile_" [IR.Data _ 0 [IR.T filePath], IR.Data _ mode _] = do - let n = IOSrc.constructorName IOSrc.ioModeReference mode - h <- reraiseIO . openFile (Text.unpack filePath) $ haskellMode n - newUnisonHandle h - go "io.IO.closeFile_" [IR.Data _ 0 [IR.T handle]] = do - hh <- getHaskellHandle handle - reraiseIO $ maybe (pure ()) hClose hh - deleteUnisonHandle handle - pure IR.unit - go "io.IO.isFileEOF_" [IR.Data _ 0 [IR.T handle]] = do - hh <- getHaskellHandleOrThrow handle - isEOF <- reraiseIO $ hIsEOF hh - pure $ IR.B isEOF - go "io.IO.isFileOpen_" [IR.Data _ 0 [IR.T handle]] = - IR.B . isJust <$> getHaskellHandle handle - go "io.IO.getLine_" [IR.Data _ 0 [IR.T handle]] = do - hh <- getHaskellHandleOrThrow handle - line <- reraiseIO $ TextIO.hGetLine hh - pure . IR.T $ line - go "io.IO.getText_" [IR.Data _ 0 [IR.T handle]] = do - hh <- getHaskellHandleOrThrow handle - text <- reraiseIO $ TextIO.hGetContents hh - pure . IR.T $ text - go "io.IO.putText_" [IR.Data _ 0 [IR.T handle], IR.T string] = do - hh <- getHaskellHandleOrThrow handle - reraiseIO . TextIO.hPutStr hh $ string - pure IR.unit - go "io.IO.throw" [IR.Data _ _ [IR.Data _ _ [], IR.T message]] = - liftIO . throwIO $ UnisonRuntimeException message - go "io.IO.isSeekable_" [IR.Data _ 0 [IR.T handle]] = do - hh <- getHaskellHandleOrThrow handle - seekable <- reraiseIO $ hIsSeekable hh - pure $ IR.B seekable - go "io.IO.seek_" [IR.Data _ 0 [IR.T handle], IR.Data _ seekMode [], IR.I int] - = do - hh <- getHaskellHandleOrThrow handle - let mode = IOSrc.constructorName IOSrc.seekModeReference seekMode - reraiseIO . hSeek hh (haskellSeekMode mode) $ fromIntegral int - pure IR.unit - go "io.IO.position_" [IR.Data _ 0 [IR.T handle]] = do - hh <- getHaskellHandleOrThrow handle - pos <- reraiseIO $ hTell hh - pure . IR.I $ fromIntegral pos - go "io.IO.getBuffering_" [IR.Data _ 0 [IR.T handle]] = do - hh <- getHaskellHandleOrThrow handle - bufMode <- reraiseIO $ hGetBuffering hh - pure $ unisonBufferMode bufMode - go "io.IO.setBuffering_" [IR.Data _ 0 [IR.T handle], o] = do - hh <- getHaskellHandleOrThrow handle - reraiseIO . hSetBuffering hh $ haskellBufferMode o - pure IR.unit - go "io.IO.systemTime_" [] = do - t <- reraiseIO $ fmap round Time.getPOSIXTime - pure $ IR.Data IOSrc.epochTimeReference IOSrc.epochTimeId [IR.N t] - go "io.IO.getTemporaryDirectory_" [] = - reraiseIO $ unisonFilePath <$> getTemporaryDirectory - go "io.IO.getCurrentDirectory_" [] = - reraiseIO $ unisonFilePath <$> getCurrentDirectory - go "io.IO.setCurrentDirectory_" [IR.Data _ _ [IR.T dir]] = do - reraiseIO . setCurrentDirectory $ Text.unpack dir - pure IR.unit - go "io.IO.directoryContents_" [IR.Data _ _ [IR.T dir]] = - reraiseIO - $ IR.Sequence - . Seq.fromList - . fmap unisonFilePath - <$> getDirectoryContents (Text.unpack dir) - go "io.IO.fileExists_" [IR.Data _ _ [IR.T dir]] = - reraiseIO $ IR.B <$> doesPathExist (Text.unpack dir) - go "io.IO.isDirectory_" [IR.Data _ _ [IR.T dir]] = - reraiseIO $ IR.B <$> doesDirectoryExist (Text.unpack dir) - go "io.IO.createDirectory_" [IR.Data _ _ [IR.T dir]] = do - reraiseIO $ createDirectoryIfMissing True (Text.unpack dir) - pure IR.unit - go "io.IO.removeDirectory_" [IR.Data _ _ [IR.T dir]] = do - reraiseIO . removeDirectoryRecursive $ Text.unpack dir - pure IR.unit - go "io.IO.renameDirectory_" [IR.Data _ _ [IR.T from], IR.Data _ _ [IR.T to]] - = do - reraiseIO $ renameDirectory (Text.unpack from) (Text.unpack to) - pure IR.unit - go "io.IO.removeFile_" [IR.Data _ _ [IR.T file]] = do - reraiseIO . removeFile $ Text.unpack file - pure IR.unit - go "io.IO.renameFile_" [IR.Data _ _ [IR.T from], IR.Data _ _ [IR.T to]] = do - reraiseIO $ renameFile (Text.unpack from) (Text.unpack to) - pure IR.unit - go "io.IO.getFileTimestamp_" [IR.Data _ _ [IR.T file]] = do - t <- reraiseIO $ getModificationTime (Text.unpack file) - pure $ IR.Data IOSrc.epochTimeReference - IOSrc.epochTimeId - [IR.N . round $ Time.utcTimeToPOSIXSeconds t] - go "io.IO.getFileSize_" [IR.Data _ _ [IR.T file]] = - reraiseIO $ IR.N . fromIntegral <$> getFileSize (Text.unpack file) - go "io.IO.serverSocket_" [IR.Data _ _ mayHost, IR.Data _ _ [IR.T port]] = do - (s, _) <- reraiseIO - $ Net.bindSock (hostPreference mayHost) (Text.unpack port) - newUnisonSocket s - go "io.IO.listen_" [IR.Data _ _ [IR.T socket]] = do - hs <- getHaskellSocketOrThrow socket - reraiseIO $ Net.listenSock hs 2048 - pure IR.unit - go "io.IO.clientSocket_" [IR.Data _ _ [IR.T host], IR.Data _ _ [IR.T port]] = - do - (s, _) <- reraiseIO . Net.connectSock (Text.unpack host) $ Text.unpack - port - newUnisonSocket s - go "io.IO.closeSocket_" [IR.Data _ _ [IR.T socket]] = do - hs <- getHaskellSocket socket - reraiseIO $ traverse_ Net.closeSock hs - pure IR.unit - go "io.IO.accept_" [IR.Data _ _ [IR.T socket]] = do - hs <- getHaskellSocketOrThrow socket - conn <- reraiseIO $ Sock.accept hs - newUnisonSocket $ fst conn - go "io.IO.send_" [IR.Data _ _ [IR.T socket], IR.Bs bs] = do - hs <- getHaskellSocketOrThrow socket - reraiseIO . Net.send hs $ Bytes.toByteString bs - pure IR.unit - go "io.IO.receive_" [IR.Data _ _ [IR.T socket], IR.N n] = do - hs <- getHaskellSocketOrThrow socket - bs <- reraiseIO . Net.recv hs $ fromIntegral n - pure . convertMaybe $ IR.Bs . Bytes.fromByteString <$> bs - go "io.IO.fork_" [IR.Lam _ _ ir] = do - s <- ask - t <- liftIO genText - lock <- liftIO newEmptyMVar - m <- view ioState - id <- reraiseIO . forkIO . void $ do - void $ takeMVar lock - forceThunk cenv s ir - `finally` modifyMVar_ m (pure . over threadMap (Map.delete t)) - liftIO . modifyMVar_ m $ pure . over threadMap (Map.insert t id) - liftIO $ putMVar lock () - pure $ IR.Data IOSrc.threadIdReference IOSrc.threadIdId [IR.T t] - go "io.IO.kill_" [IR.Data _ _ [IR.T thread]] = do - m <- view ioState - map <- liftIO $ view threadMap <$> readMVar m - liftIO $ case Map.lookup thread map of - Nothing -> pure IR.unit - Just ht -> do - killThread ht - pure IR.unit - go "io.IO.delay_" [IR.N n] = do - reraiseIO . threadDelay $ fromIntegral n - pure IR.unit - go "io.IO.bracket_" [IR.Lam _ _ acquire, IR.Lam _ _ release, IR.Lam _ _ use] - = do - s <- ask - let resultToVal (RT.RDone v) = pure v - resultToVal v = - fail $ "IO bracket expected a value but got " <> show v - reraiseIO $ resultToVal =<< bracket - (resultToVal =<< forceThunk cenv s acquire) - (lamToHask cenv s release) - (lamToHask cenv s use) - go a _b = error $ show a <> " is not implemented yet." - -- error - -- $ "IO handler called with unimplemented cid " - -- <> show cid - -- <> " and " - -- <> show a - -- <> " args " - -- <> show b - -forceThunk :: RT.CompilationEnv -> S -> RT.IR -> IO RT.Result -forceThunk cenv s ir = lamToHask cenv s ir IR.unit - -lamToHask :: RT.CompilationEnv -> S -> RT.IR -> RT.Value -> IO RT.Result -lamToHask cenv s ir val = RT.run (handleIO' cenv s) cenv $ task val - where task x = IR.Let (Var.named "_") (IR.Leaf (IR.Val x)) ir mempty - -runtime :: Runtime Symbol -runtime = Runtime terminate eval (nullaryMain External) - where - terminate :: IO () - terminate = pure () - eval cl' ppe term = do - let cl = void (hoist (pure . runIdentity) IOSrc.codeLookup) <> cl' - -- traceM $ Pretty.render 80 (pretty mempty term) - cenv <- RT.compilationEnv cl term -- in `m` - mmap <- newMVar $ IOState - (Map.fromList [("stdin", stdin), ("stdout", stdout), ("stderr", stderr)]) - Map.empty - Map.empty - term <- case Components.minimize' term of - Left es -> fail . reportBug "B23784210" $ - "Term contains duplicate definitions: " <> show (fst <$> es) - Right term -> pure term - r <- try $ RT.run (handleIO' cenv $ S mmap) - cenv - (IR.compile cenv $ Term.amap (const ()) term) - toTermOrError ppe r - -toTermOrError :: PPE.PrettyPrintEnv -> Either SomeException RT.Result - -> IO (Either (P.Pretty P.ColorText) (IR.Term Symbol)) -toTermOrError ppe r = case r of - Right (RT.RDone result) -> Right <$> IR.decompile result - Right (RT.RMatchFail _ _ scrute) -> do - scrute <- IR.decompile scrute - pure . Left . P.callout icon . P.lines $ [ - P.wrap ("I've encountered a" <> P.red "pattern match failure" - <> "while scrutinizing:"), "", - P.indentN 2 $ TermPrinter.pretty ppe scrute, - "", - P.wrap "This happens when calling a function that doesn't handle all possible inputs.", - "", sorryMsg - ] - Right (RT.RError t val) -> do - msg <- IR.decompile val - let errorType = case t of - RT.ErrorTypeTodo -> "builtin.todo" - RT.ErrorTypeBug -> "builtin.bug" - pure . Left . P.callout icon . P.lines $ [ - P.wrap ("I've encountered a call to" <> P.red errorType - <> "with the following value:"), "", - P.indentN 2 $ TermPrinter.pretty ppe msg, - "", sorryMsg - ] - Right (RT.RRequest (IR.Req r cid vs _)) -> do - vs <- traverse IR.decompile vs - let tm = Term.apps' (Term.request() r cid) vs - pure . Left . P.callout icon . P.lines $ [ - P.wrap ("I stopped evaluation after encountering an " <> P.red "unhandled request:"), "", - P.indentN 2 $ TermPrinter.pretty ppe tm, - "", - P.wrap "This happens when using a handler that doesn't handle all possible requests.", - "", sorryMsg - ] - Left (asyncExceptionFromException -> Just e) -> pure . Left . P.callout "⏹" $ - case e of - UserInterrupt -> P.wrap $ "I've" <> P.purple "cancelled evaluation." - e -> P.wrap $ "I've stopped evaluation after receiving a " - <> P.purple (P.shown e) <> "signal." - Left e -> pure . Left . P.callout icon . P.lines $ [ - P.wrap ("I stopped evaluation after encountering " <> P.red "an error:"), "", - P.indentN 2 $ P.string (show (e :: SomeException)), - "", sorryMsg - ] - where - icon = "💔💥" - sorryMsg = P.wrap $ "I'm sorry this message doesn't have more detail about" - <> "the location of the failure." - <> "My makers plan to fix this in a future release. 😢" diff --git a/parser-typechecker/src/Unison/Runtime/SparseVector.hs b/parser-typechecker/src/Unison/Runtime/SparseVector.hs deleted file mode 100644 index 3ae57b9102..0000000000 --- a/parser-typechecker/src/Unison/Runtime/SparseVector.hs +++ /dev/null @@ -1,128 +0,0 @@ -{-# Language BangPatterns #-} -{-# Language MagicHash #-} -- used for unsafe pointer equality - -module Unison.Runtime.SparseVector where - -import Prelude hiding (unzip) -import qualified Data.Vector.Unboxed.Mutable as MUV -import Data.Bits ((.|.), (.&.)) -import qualified Data.Bits as B -import qualified GHC.Exts as Exts -import qualified Data.Vector.Unboxed as UV - --- Denotes a `Nat -> Maybe a`. --- Representation is a `Vector a` along with a bitset --- that encodes the index of each element. --- Ex: `[(1,a), (5,b)]` is encoded as (100010, [a,b]) -data SparseVector bits a - = SparseVector { indices :: !bits - , elements :: !(UV.Vector a) } - --- todo: instance (UV.Unbox a, B.FiniteBits bits, Num n) --- => Num (SparseVector bits n) - --- Denotationally: `map f v n = f <$> v n` -map :: (UV.Unbox a, UV.Unbox b) => (a -> b) -> SparseVector bits a -> SparseVector bits b -map f v = v { elements = UV.map f (elements v) } - --- Denotationally, a mask is a `Nat -> Bool`, so this implementation --- means: `mask ok v n = if ok n then v n else Nothing` -mask :: (UV.Unbox a, B.FiniteBits bits) - => bits -> SparseVector bits a -> SparseVector bits a -mask bits a = - if indices' == bits then a -- check if mask is a superset - else SparseVector indices' $ UV.create $ do - vec <- MUV.new (B.popCount indices') - go vec (indices a) bits 0 0 - where - indices' = indices a .&. bits - eas = elements a - go !out !indAs !indBs !i !k = - if indAs == B.zeroBits || indBs == B.zeroBits then pure out - else let - (!a1, !b1) = (B.countTrailingZeros indAs, B.countTrailingZeros indBs) - in if a1 == b1 then do - MUV.write out k (eas UV.! (i + a1)) - go out (indAs `B.shiftR` (a1 + 1)) (indBs `B.shiftR` (b1 + 1)) - (i + 1) (k + 1) - else if a1 < b1 then - go out (indAs `B.shiftR` (a1 + 1)) indBs - (i + 1) k - else - go out indAs (indBs `B.shiftR` (b1 + 1)) i k - --- Denotationally: `zipWith f a b n = f <$> a n <*> b n`, in other words, --- this takes the intersection of the two shapes. -zipWith - :: (UV.Unbox a, UV.Unbox b, UV.Unbox c, B.FiniteBits bits) - => (a -> b -> c) - -> SparseVector bits a - -> SparseVector bits b - -> SparseVector bits c -zipWith f a b = - if indices a `eq` indices b || indices a == indices b then - SparseVector (indices a) (UV.zipWith f (elements a) (elements b)) - else let - indices' = indices a .&. indices b - a' = mask indices' a - b' = mask indices' b - in SparseVector indices' (UV.zipWith f (elements a') (elements b')) - -_1 :: (UV.Unbox a, UV.Unbox b) => SparseVector bits (a,b) -> SparseVector bits a -_1 = fst . unzip - -_2 :: (UV.Unbox a, UV.Unbox b) => SparseVector bits (a,b) -> SparseVector bits b -_2 = snd . unzip - --- Denotationally: `unzip p = (\n -> fst <$> p n, \n -> snd <$> p n)` -unzip :: (UV.Unbox a, UV.Unbox b) - => SparseVector bits (a,b) - -> (SparseVector bits a, SparseVector bits b) -unzip (SparseVector inds ps) = - let (as,bs) = UV.unzip ps - in (SparseVector inds as, SparseVector inds bs) - --- Denotationally: `choose bs a b n = if bs n then a n else b n` -choose :: (B.FiniteBits bits, UV.Unbox a) - => bits - -> SparseVector bits a - -> SparseVector bits a - -> SparseVector bits a -choose bits t f - | B.zeroBits == bits = f - | B.complement bits == B.zeroBits = t - | otherwise = -- it's a mix of true and false - merge (mask bits t) (mask (B.complement bits) f) - --- Denotationally: `merge a b n = a n <|> b n` -merge :: (B.FiniteBits bits, UV.Unbox a) - => SparseVector bits a - -> SparseVector bits a - -> SparseVector bits a -merge a b = SparseVector indices' tricky - where - indices' = indices a .|. indices b - tricky = UV.create $ do - vec <- MUV.new (B.popCount indices') - go vec (indices a) (indices b) 0 0 0 - (!eas, !ebs) = (elements a, elements b) - go !out !indAs !indBs !i !j !k = - if indAs == B.zeroBits || indBs == B.zeroBits then pure out - else let - (!a1, !b1) = (B.countTrailingZeros indAs, B.countTrailingZeros indBs) - in if a1 == b1 then do - MUV.write out k (eas UV.! (i + a1)) - go out (indAs `B.shiftR` (a1 + 1)) (indBs `B.shiftR` (b1 + 1)) - (i + 1) (j + 1) (k + 1) - else if a1 < b1 then do - MUV.write out k (eas UV.! (i + a1)) - go out (indAs `B.shiftR` (a1 + 1)) indBs - (i + 1) j (k + 1) - else do - MUV.write out k (ebs UV.! (j + a1)) - go out indAs (indBs `B.shiftR` (b1 + 1)) i (j + 1) (k + 1) - --- Pointer equality a la Scala. -eq :: a -> a -> Bool -eq x y = Exts.isTrue# (Exts.reallyUnsafePtrEquality# x y Exts.==# 1#) -{-# INLINE eq #-} diff --git a/parser-typechecker/src/Unison/Runtime/Stack.hs b/parser-typechecker/src/Unison/Runtime/Stack.hs deleted file mode 100644 index 5092387e08..0000000000 --- a/parser-typechecker/src/Unison/Runtime/Stack.hs +++ /dev/null @@ -1,642 +0,0 @@ -{-# language GADTs #-} -{-# language DataKinds #-} -{-# language BangPatterns #-} -{-# language TypeFamilies #-} -{-# language ViewPatterns #-} -{-# language PatternGuards #-} -{-# language PatternSynonyms #-} - -module Unison.Runtime.Stack - ( K(..) - , IComb(.., Lam_) - , Closure(.., DataC, PApV, CapV) - , Callback(..) - , Augment(..) - , Dump(..) - , MEM(..) - , Stack(..) - , Off - , SZ - , FP - , universalCompare - , marshalToForeign - , unull - , bnull - , peekD - , peekOffD - , pokeD - , pokeOffD - , peekN - , peekOffN - , pokeN - , pokeOffN - , peekOffS - , pokeS - , pokeOffS - , peekOffT - , pokeT - , peekOffB - , pokeB - , uscount - , bscount - ) where - -import Prelude hiding (words) - -import Control.Monad (when) -import Control.Monad.Primitive - -import Data.Ord (comparing) -import Data.Foldable (fold) - -import Data.Foldable (toList, for_) -import Data.Primitive.ByteArray -import Data.Primitive.PrimArray -import Data.Primitive.Array -import Data.Sequence (Seq) -import qualified Data.Sequence as Sq -import Data.Text (Text) -import Data.Word - -import Unison.Reference (Reference) - -import Unison.Runtime.ANF (Mem(..), unpackTags, RTag) -import Unison.Runtime.Foreign -import Unison.Runtime.MCode - -import qualified Unison.Type as Ty - -import Unison.Util.EnumContainers as EC -import Unison.Util.Bytes (Bytes) - -import GHC.Stack (HasCallStack) - -newtype Callback = Hook (Stack 'UN -> Stack 'BX -> IO ()) - -instance Eq Callback where _ == _ = True -instance Ord Callback where compare _ _ = EQ - --- Evaluation stack -data K - = KE - -- callback hook - | CB Callback - -- mark continuation with a prompt - | Mark !(EnumSet Word64) - !(EnumMap Word64 Closure) - !K - -- save information about a frame for later resumption - | Push !Int -- unboxed frame size - !Int -- boxed frame size - !Int -- pending unboxed args - !Int -- pending boxed args - !Section -- code - !K - deriving (Eq, Ord) - --- Comb with an identifier -data IComb - = IC !Word64 !Comb - deriving (Show) - -instance Eq IComb where - IC i _ == IC j _ = i == j - -pattern Lam_ ua ba uf bf entry <- IC _ (Lam ua ba uf bf entry) - --- TODO: more reliable ordering for combinators -instance Ord IComb where - compare (IC i _) (IC j _) = compare i j - -data Closure - = PAp {-# unpack #-} !IComb -- code - {-# unpack #-} !(Seg 'UN) -- unboxed args - {- unpack -} !(Seg 'BX) -- boxed args - | Enum !Word64 - | DataU1 !Word64 !Int - | DataU2 !Word64 !Int !Int - | DataB1 !Word64 !Closure - | DataB2 !Word64 !Closure !Closure - | DataUB !Word64 !Int !Closure - | DataG !Word64 !(Seg 'UN) !(Seg 'BX) - | Captured !K {-# unpack #-} !(Seg 'UN) !(Seg 'BX) - | Foreign !Foreign - | BlackHole - deriving (Show, Eq, Ord) - -splitData :: Closure -> Maybe (Word64, [Int], [Closure]) -splitData (Enum t) = Just (t, [], []) -splitData (DataU1 t i) = Just (t, [i], []) -splitData (DataU2 t i j) = Just (t, [i,j], []) -splitData (DataB1 t x) = Just (t, [], [x]) -splitData (DataB2 t x y) = Just (t, [], [x,y]) -splitData (DataUB t i y) = Just (t, [i], [y]) -splitData (DataG t us bs) = Just (t, ints us, toList bs) -splitData _ = Nothing - -ints :: ByteArray -> [Int] -ints ba = fmap (indexByteArray ba) [0..n-1] - where - n = sizeofByteArray ba `div` 8 - -pattern DataC rt ct us bs <- - (splitData -> Just (unpackTags -> (rt, ct), us, bs)) - -pattern PApV ic us bs <- PAp ic (ints -> us) (toList -> bs) -pattern CapV k us bs <- Captured k (ints -> us) (toList -> bs) - -{-# complete DataC, PAp, Captured, Foreign, BlackHole #-} -{-# complete DataC, PApV, Captured, Foreign, BlackHole #-} -{-# complete DataC, PApV, CapV, Foreign, BlackHole #-} - -closureNum :: Closure -> Int -closureNum PAp{} = 0 -closureNum DataC{} = 1 -closureNum Captured{} = 2 -closureNum Foreign{} = 3 -closureNum BlackHole{} = error "BlackHole" - -universalCompare - :: (Word64 -> Reference) - -> (RTag -> Reference) - -> (Foreign -> Foreign -> Ordering) - -> Closure - -> Closure - -> Ordering -universalCompare comb tag frn = cmpc - where - cmpl cm l r - = compare (length l) (length r) <> fold (zipWith cm l r) - cmpc (DataC rt1 ct1 us1 bs1) (DataC rt2 ct2 us2 bs2) - = compare (tag rt1) (tag rt2) - <> compare ct1 ct2 - <> cmpl compare us1 us2 - <> cmpl cmpc bs1 bs2 - cmpc (PApV (IC i1 _) us1 bs1) (PApV (IC i2 _) us2 bs2) - = compare (comb i1) (comb i2) - <> cmpl compare us1 us2 - <> cmpl cmpc bs1 bs2 - cmpc (CapV k1 us1 bs1) (CapV k2 us2 bs2) - = compare k1 k2 - <> cmpl compare us1 us2 - <> cmpl cmpc bs1 bs2 - cmpc (Foreign fl) (Foreign fr) - | Just sl <- maybeUnwrapForeign Ty.vectorRef fl - , Just sr <- maybeUnwrapForeign Ty.vectorRef fr - = comparing Sq.length sl sr <> fold (Sq.zipWith cmpc sl sr) - | otherwise = frn fl fr - cmpc c d = comparing closureNum c d - -marshalToForeign :: HasCallStack => Closure -> Foreign -marshalToForeign (Foreign x) = x -marshalToForeign c - = error $ "marshalToForeign: unhandled closure: " ++ show c - -type Off = Int -type SZ = Int -type FP = Int - -type UA = MutableByteArray (PrimState IO) -type BA = MutableArray (PrimState IO) Closure - -words :: Int -> Int -words n = n `div` 8 - -bytes :: Int -> Int -bytes n = n * 8 - -uargOnto :: UA -> Off -> UA -> Off -> Args' -> IO Int -uargOnto stk sp cop cp0 (Arg1 i) = do - (x :: Int) <- readByteArray stk (sp-i) - writeByteArray cop cp x - pure cp - where cp = cp0+1 -uargOnto stk sp cop cp0 (Arg2 i j) = do - (x :: Int) <- readByteArray stk (sp-i) - (y :: Int) <- readByteArray stk (sp-j) - writeByteArray cop cp x - writeByteArray cop (cp-1) y - pure cp - where cp = cp0+2 -uargOnto stk sp cop cp0 (ArgN v) = do - buf <- if overwrite - then newByteArray $ bytes sz - else pure cop - let loop i - | i < 0 = return () - | otherwise = do - (x :: Int) <- readByteArray stk (sp-indexPrimArray v i) - writeByteArray buf (sz-1-i) x - loop $ i-1 - loop $ sz-1 - when overwrite $ - copyMutableByteArray cop (bytes $ cp+1) buf 0 (bytes sz) - pure cp - where - cp = cp0+sz - sz = sizeofPrimArray v - overwrite = sameMutableByteArray stk cop -uargOnto stk sp cop cp0 (ArgR i l) = do - moveByteArray cop cbp stk sbp (bytes l) - pure $ cp0+l - where - cbp = bytes $ cp0+1 - sbp = bytes $ sp-i-l+1 - -bargOnto :: BA -> Off -> BA -> Off -> Args' -> IO Int -bargOnto stk sp cop cp0 (Arg1 i) = do - x <- readArray stk (sp-i) - writeArray cop cp x - pure cp - where cp = cp0+1 -bargOnto stk sp cop cp0 (Arg2 i j) = do - x <- readArray stk (sp-i) - y <- readArray stk (sp-j) - writeArray cop cp x - writeArray cop (cp-1) y - pure cp - where cp = cp0+2 -bargOnto stk sp cop cp0 (ArgN v) = do - buf <- if overwrite - then newArray sz BlackHole - else pure cop - let loop i - | i < 0 = return () - | otherwise = do - x <- readArray stk $ sp-indexPrimArray v i - writeArray buf (sz-1-i) x - loop $ i-1 - loop $ sz-1 - when overwrite $ - copyMutableArray cop (cp0+1) buf 0 sz - pure cp - where - cp = cp0+sz - sz = sizeofPrimArray v - overwrite = stk == cop -bargOnto stk sp cop cp0 (ArgR i l) = do - copyMutableArray cop (cp0+1) stk (sp-i-l+1) l - pure $ cp0+l - -data Dump = A | F Int | S - -dumpAP :: Int -> Int -> Int -> Dump -> Int -dumpAP _ fp sz d@(F _) = dumpFP fp sz d -dumpAP ap _ _ _ = ap - -dumpFP :: Int -> Int -> Dump -> Int -dumpFP fp _ S = fp -dumpFP fp sz A = fp+sz -dumpFP fp sz (F n) = fp+sz-n - --- closure augmentation mode --- instruction, kontinuation, call -data Augment = I | K | C - -class MEM (b :: Mem) where - data Stack b :: * - type Elem b :: * - type Seg b :: * - alloc :: IO (Stack b) - peek :: Stack b -> IO (Elem b) - peekOff :: Stack b -> Off -> IO (Elem b) - poke :: Stack b -> Elem b -> IO () - pokeOff :: Stack b -> Off -> Elem b -> IO () - grab :: Stack b -> SZ -> IO (Seg b, Stack b) - ensure :: Stack b -> SZ -> IO (Stack b) - bump :: Stack b -> IO (Stack b) - bumpn :: Stack b -> SZ -> IO (Stack b) - duplicate :: Stack b -> IO (Stack b) - discardFrame :: Stack b -> IO (Stack b) - saveFrame :: Stack b -> IO (Stack b, SZ, SZ) - restoreFrame :: Stack b -> SZ -> SZ -> IO (Stack b) - prepareArgs :: Stack b -> Args' -> IO (Stack b) - acceptArgs :: Stack b -> Int -> IO (Stack b) - frameArgs :: Stack b -> IO (Stack b) - augSeg :: Augment -> Stack b -> Seg b -> Maybe Args' -> IO (Seg b) - dumpSeg :: Stack b -> Seg b -> Dump -> IO (Stack b) - fsize :: Stack b -> SZ - asize :: Stack b -> SZ - -instance MEM 'UN where - data Stack 'UN - -- Note: uap <= ufp <= usp - = US { uap :: !Int -- arg pointer - , ufp :: !Int -- frame pointer - , usp :: !Int -- stack pointer - , ustk :: {-# unpack #-} !(MutableByteArray (PrimState IO)) - } - type Elem 'UN = Int - type Seg 'UN = ByteArray - alloc = US (-1) (-1) (-1) <$> newByteArray 4096 - {-# inline alloc #-} - peek (US _ _ sp stk) = readByteArray stk sp - {-# inline peek #-} - peekOff (US _ _ sp stk) i = readByteArray stk (sp-i) - {-# inline peekOff #-} - poke (US _ _ sp stk) n = writeByteArray stk sp n - {-# inline poke #-} - pokeOff (US _ _ sp stk) i n = writeByteArray stk (sp-i) n - {-# inline pokeOff #-} - - -- Eats up arguments - grab (US _ fp sp stk) sze = do - mut <- newByteArray sz - copyMutableByteArray mut 0 stk (bfp-sz) sz - seg <- unsafeFreezeByteArray mut - moveByteArray stk (bfp-sz) stk bfp fsz - pure (seg, US (fp-sze) (fp-sze) (sp-sze) stk) - where - sz = bytes sze - bfp = bytes $ fp+1 - fsz = bytes $ sp-fp - {-# inline grab #-} - - ensure stki@(US ap fp sp stk) sze - | sze <= 0 - || bytes (sp+sze+1) < ssz = pure stki - | otherwise = do - stk' <- resizeMutableByteArray stk (ssz+10240) - pure $ US ap fp sp stk' - where - ssz = sizeofMutableByteArray stk - {-# inline ensure #-} - - bump (US ap fp sp stk) = pure $ US ap fp (sp+1) stk - {-# inline bump #-} - - bumpn (US ap fp sp stk) n = pure $ US ap fp (sp+n) stk - {-# inline bumpn #-} - - duplicate (US ap fp sp stk) - = US ap fp sp <$> do - b <- newByteArray sz - copyMutableByteArray b 0 stk 0 sz - pure b - where - sz = sizeofMutableByteArray stk - {-# inline duplicate #-} - - discardFrame (US ap fp _ stk) = pure $ US ap fp fp stk - {-# inline discardFrame #-} - - saveFrame (US ap fp sp stk) = pure (US sp sp sp stk, sp-fp, fp-ap) - {-# inline saveFrame #-} - - restoreFrame (US _ fp0 sp stk) fsz asz = pure $ US ap fp sp stk - where fp = fp0-fsz - ap = fp-asz - {-# inline restoreFrame #-} - - prepareArgs (US ap fp sp stk) (ArgR i l) - | fp+l+i == sp = pure $ US ap (sp-i) (sp-i) stk - prepareArgs (US ap fp sp stk) args = do - sp <- uargOnto stk sp stk fp args - pure $ US ap sp sp stk - {-# inline prepareArgs #-} - - acceptArgs (US ap fp sp stk) n = pure $ US ap (fp-n) sp stk - {-# inline acceptArgs #-} - - frameArgs (US ap _ sp stk) = pure $ US ap ap sp stk - {-# inline frameArgs #-} - - augSeg mode (US ap fp sp stk) seg margs = do - cop <- newByteArray $ ssz+psz+asz - copyByteArray cop soff seg 0 ssz - copyMutableByteArray cop 0 stk ap psz - for_ margs $ uargOnto stk sp cop (words poff + pix - 1) - unsafeFreezeByteArray cop - where - ssz = sizeofByteArray seg - pix | I <- mode = 0 | otherwise = fp-ap - (poff,soff) - | K <- mode = (ssz,0) - | otherwise = (0,psz+asz) - psz = bytes pix - asz = case margs of - Nothing -> 0 - Just (Arg1 _) -> 8 - Just (Arg2 _ _) -> 16 - Just (ArgN v) -> bytes $ sizeofPrimArray v - Just (ArgR _ l) -> bytes l - {-# inline augSeg #-} - - dumpSeg (US ap fp sp stk) seg mode = do - copyByteArray stk bsp seg 0 ssz - pure $ US ap' fp' sp' stk - where - bsp = bytes $ sp+1 - ssz = sizeofByteArray seg - sz = words ssz - sp' = sp+sz - fp' = dumpFP fp sz mode - ap' = dumpAP ap fp sz mode - {-# inline dumpSeg #-} - - fsize (US _ fp sp _) = sp-fp - {-# inline fsize #-} - - asize (US ap fp _ _) = fp-ap - {-# inline asize #-} - -peekN :: Stack 'UN -> IO Word64 -peekN (US _ _ sp stk) = readByteArray stk sp -{-# inline peekN #-} - -peekD :: Stack 'UN -> IO Double -peekD (US _ _ sp stk) = readByteArray stk sp -{-# inline peekD #-} - -peekOffN :: Stack 'UN -> Int -> IO Word64 -peekOffN (US _ _ sp stk) i = readByteArray stk (sp-i) -{-# inline peekOffN #-} - -peekOffD :: Stack 'UN -> Int -> IO Double -peekOffD (US _ _ sp stk) i = readByteArray stk (sp-i) -{-# inline peekOffD #-} - -pokeN :: Stack 'UN -> Word64 -> IO () -pokeN (US _ _ sp stk) n = writeByteArray stk sp n -{-# inline pokeN #-} - -pokeD :: Stack 'UN -> Double -> IO () -pokeD (US _ _ sp stk) d = writeByteArray stk sp d -{-# inline pokeD #-} - -pokeOffN :: Stack 'UN -> Int -> Word64 -> IO () -pokeOffN (US _ _ sp stk) i n = writeByteArray stk (sp-i) n -{-# inline pokeOffN #-} - -pokeOffD :: Stack 'UN -> Int -> Double -> IO () -pokeOffD (US _ _ sp stk) i d = writeByteArray stk (sp-i) d -{-# inline pokeOffD #-} - -peekOffT :: Stack 'BX -> Int -> IO Text -peekOffT bstk i = - unwrapForeign . marshalToForeign <$> peekOff bstk i -{-# inline peekOffT #-} - -pokeT :: Stack 'BX -> Text -> IO () -pokeT bstk t = poke bstk (Foreign $ wrapText t) -{-# inline pokeT #-} - -peekOffB :: Stack 'BX -> Int -> IO Bytes -peekOffB bstk i = unwrapForeign . marshalToForeign <$> peekOff bstk i -{-# inline peekOffB #-} - -pokeB :: Stack 'BX -> Bytes -> IO () -pokeB bstk b = poke bstk (Foreign $ wrapBytes b) - -peekOffS :: Stack 'BX -> Int -> IO (Seq Closure) -peekOffS bstk i = - unwrapForeign . marshalToForeign <$> peekOff bstk i -{-# inline peekOffS #-} - -pokeS :: Stack 'BX -> Seq Closure -> IO () -pokeS bstk s = poke bstk (Foreign $ Wrap Ty.vectorRef s) -{-# inline pokeS #-} - -pokeOffS :: Stack 'BX -> Int -> Seq Closure -> IO () -pokeOffS bstk i s = pokeOff bstk i (Foreign $ Wrap Ty.vectorRef s) -{-# inline pokeOffS #-} - -unull :: Seg 'UN -unull = byteArrayFromListN 0 ([] :: [Int]) - -bnull :: Seg 'BX -bnull = fromListN 0 [] - -instance Show (Stack 'BX) where - show (BS ap fp sp _) - = "BS " ++ show ap ++ " " ++ show fp ++ " " ++ show sp -instance Show (Stack 'UN) where - show (US ap fp sp _) - = "US " ++ show ap ++ " " ++ show fp ++ " " ++ show sp -instance Show K where - show k = "[" ++ go "" k - where - go _ KE = "]" - go _ (CB _) = "]" - go com (Push uf bf ua ba _ k) - = com ++ show (uf,bf,ua,ba) ++ go "," k - go com (Mark ps _ k) = com ++ "M" ++ show ps ++ go "," k - -instance MEM 'BX where - data Stack 'BX - = BS { bap :: !Int - , bfp :: !Int - , bsp :: !Int - , bstk :: {-# unpack #-} !(MutableArray (PrimState IO) Closure) - } - type Elem 'BX = Closure - type Seg 'BX = Array Closure - - alloc = BS (-1) (-1) (-1) <$> newArray 512 BlackHole - {-# inline alloc #-} - - peek (BS _ _ sp stk) = readArray stk sp - {-# inline peek #-} - - peekOff (BS _ _ sp stk) i = readArray stk (sp-i) - {-# inline peekOff #-} - - poke (BS _ _ sp stk) x = writeArray stk sp x - {-# inline poke #-} - - pokeOff (BS _ _ sp stk) i x = writeArray stk (sp-i) x - {-# inline pokeOff #-} - - grab (BS _ fp sp stk) sz = do - seg <- unsafeFreezeArray =<< cloneMutableArray stk (fp+1-sz) sz - copyMutableArray stk (fp+1-sz) stk (fp+1) fsz - pure (seg, BS (fp-sz) (fp-sz) (sp-sz) stk) - where fsz = sp-fp - {-# inline grab #-} - - ensure stki@(BS ap fp sp stk) sz - | sz <= 0 = pure stki - | sp+sz+1 < ssz = pure stki - | otherwise = do - stk' <- newArray (ssz+1280) BlackHole - copyMutableArray stk' 0 stk 0 (sp+1) - pure $ BS ap fp sp stk' - where ssz = sizeofMutableArray stk - {-# inline ensure #-} - - bump (BS ap fp sp stk) = pure $ BS ap fp (sp+1) stk - {-# inline bump #-} - - bumpn (BS ap fp sp stk) n = pure $ BS ap fp (sp+n) stk - {-# inline bumpn #-} - - duplicate (BS ap fp sp stk) - = BS ap fp sp <$> cloneMutableArray stk 0 (sizeofMutableArray stk) - {-# inline duplicate #-} - - discardFrame (BS ap fp _ stk) = pure $ BS ap fp fp stk - {-# inline discardFrame #-} - - saveFrame (BS ap fp sp stk) = pure (BS sp sp sp stk, sp-fp, fp-ap) - {-# inline saveFrame #-} - - restoreFrame (BS _ fp0 sp stk) fsz asz = pure $ BS ap fp sp stk - where - fp = fp0-fsz - ap = fp-asz - {-# inline restoreFrame #-} - - prepareArgs (BS ap fp sp stk) (ArgR i l) - | fp+i+l == sp = pure $ BS ap (sp-i) (sp-i) stk - prepareArgs (BS ap fp sp stk) args = do - sp <- bargOnto stk sp stk fp args - pure $ BS ap sp sp stk - {-# inline prepareArgs #-} - - acceptArgs (BS ap fp sp stk) n = pure $ BS ap (fp-n) sp stk - {-# inline acceptArgs #-} - - frameArgs (BS ap _ sp stk) = pure $ BS ap ap sp stk - {-# inline frameArgs #-} - - augSeg mode (BS ap fp sp stk) seg margs = do - cop <- newArray (ssz+psz+asz) BlackHole - copyArray cop soff seg 0 ssz - copyMutableArray cop poff stk ap psz - for_ margs $ bargOnto stk sp cop (poff+psz-1) - unsafeFreezeArray cop - where - ssz = sizeofArray seg - psz | I <- mode = 0 | otherwise = fp-ap - (poff,soff) - | K <- mode = (ssz,0) - | otherwise = (0,psz+asz) - asz = case margs of - Nothing -> 0 - Just (Arg1 _) -> 1 - Just (Arg2 _ _) -> 2 - Just (ArgN v) -> sizeofPrimArray v - Just (ArgR _ l) -> l - {-# inline augSeg #-} - - dumpSeg (BS ap fp sp stk) seg mode = do - copyArray stk (sp+1) seg 0 sz - pure $ BS ap' fp' sp' stk - where - sz = sizeofArray seg - sp' = sp+sz - fp' = dumpFP fp sz mode - ap' = dumpAP ap fp sz mode - {-# inline dumpSeg #-} - - fsize (BS _ fp sp _) = sp-fp - {-# inline fsize #-} - - asize (BS ap fp _ _) = fp-ap - -uscount :: Seg 'UN -> Int -uscount seg = words $ sizeofByteArray seg - -bscount :: Seg 'BX -> Int -bscount seg = sizeofArray seg - diff --git a/parser-typechecker/src/Unison/Runtime/Vector.hs b/parser-typechecker/src/Unison/Runtime/Vector.hs deleted file mode 100644 index 59fed5041e..0000000000 --- a/parser-typechecker/src/Unison/Runtime/Vector.hs +++ /dev/null @@ -1,54 +0,0 @@ -{-# Language GADTs #-} - -module Unison.Runtime.Vector where - -import Unison.Prelude - -import qualified Data.MemoCombinators as Memo -import qualified Data.Vector.Unboxed as UV - --- A `Vec a` denotes a `Nat -> Maybe a` -data Vec a where - Scalar :: a -> Vec a - Vec :: UV.Unbox a => UV.Vector a -> Vec a - Pair :: Vec a -> Vec b -> Vec (a, b) - Choose :: Vec Bool -> Vec a -> Vec a -> Vec a - Mux :: Vec Nat -> Vec (Vec a) -> Vec a - --- todo: maybe make representation `(UV.Vector Nat -> UnboxedMap Nat a, Bound)` --- `UnboxedMap Nat a = (UV.Vector Nat, UV.Vector a)` --- UnboxedMap Nat could be implemented as an `UArray` --- `Bound` is Nat, max possible index --- then easy to implement `+`, `-`, etc - -type Nat = Word64 - -mu :: Vec a -> Nat -> Maybe a -mu v = case v of - Scalar a -> const (Just a) - Vec vs -> \i -> vs UV.!? fromIntegral i - Choose cond t f -> let - (condr, tr, tf) = (mu cond, mu t, mu f) - in \i -> condr i >>= \b -> if b then tr i else tf i - Mux mux branches -> let - muxr = mu mux - branchesr = Memo.integral $ let f = mu branches in \i -> mu <$> f i - in \i -> do j <- muxr i; b <- branchesr j; b i - Pair v1 v2 -> let - (v1r, v2r) = (mu v1, mu v2) - in \i -> liftA2 (,) (v1r i) (v2r i) - --- Returns the maximum `Nat` for which `mu v` may return `Just`. -bound :: Nat -> Vec a -> Nat -bound width v = case v of - Scalar _ -> width - Vec vs -> fromIntegral $ UV.length vs - Pair v1 v2 -> bound width v1 `min` bound width v2 - Choose cond _ _ -> bound width cond - Mux mux _ -> bound width mux - -toList :: Vec a -> [a] -toList v = let - n = bound maxBound v - muv = mu v - in catMaybes $ muv <$> [0..n] diff --git a/parser-typechecker/src/Unison/Runtime/docs.markdown b/parser-typechecker/src/Unison/Runtime/docs.markdown deleted file mode 100644 index d08f3aed4d..0000000000 --- a/parser-typechecker/src/Unison/Runtime/docs.markdown +++ /dev/null @@ -1,240 +0,0 @@ - -# Documentation of the Unison runtime - -This directory has the code for the Unison runtime. - -The Unison runtime is responsible for evaluating Unison code which has been parsed and typechecked. Evaluation converts _computations_, which contain reducible expressions (an expression like `1 + 1` or `case (a,b) of ..`) to _values_ (like `42`), which don't have redexes and which are said to be in _normal form_. The runtime has some design constraints: - -##### It should be possible at runtime to hash, serialize, deserialize, and compute the dependencies of any value in the language, including functions. - -These capabilities are needed for the implementation of Unison's distributed programming API which ships arbitrary values over the network (and these functions are also just super convenient for other reasons too). That is, it needs to be possible to have functions like: - - * `encode : forall a . a -> Bytes` - * `decode : forall a . Bytes -> Either Err a` - * `dependencies : forall a . a -> Set Reference` - * `hash : forall a . a -> Hash` - * Note: the types of these might be more constrained than this so you can't break parametricity and use them in parametric code, but the idea is that they could in principle have these types and they really do work _for all_ `a`. - -Importantly, values may contain cycles (a recursive function, for instance), and these to be serializable and hashable as well, so there needs to be a way of detecting and encoding these cycles reliably in all cases (having the serialization code blow up when it hits a cycle is not allowed). - -##### The runtime should make it possible to decompile any value back to a Unison term. - -When you evaluate a watch expression, you should see a term in normal form. This is nicer than the usual approach of having runtime values be their own universe and requiring the programmer to write a bunch of boilerplate to extract useful information from these runtime values. - -##### The runtime should support algebraic effects, which requires being able to manipulate continuations of a running program. - - -##### This first version of the Haskell runtime isn't aiming for extreme speed. It should be correct, simple, and easy for us to understand and maintain. - -Within these parameters, if there's easy speed improvements to be had, great. And perhaps later, we can have a more complicated but insanely fast runtime which is also correct because of intense engineering effort. But now is not the time for that. - -##### The runtime should be modular, so that pieces of it can be reused even if we move from, say, directly interpreting some instruction set to JIT-ing via LLVM. - -The old Scala runtime was monolithic, going directly from a term to a compiled form. - -## Overview - -To evaluate a Unison term, `p : AnnotatedTerm v a` which has been successfully typechecked, the runtime goes through several phases: - - p0 : AnnotatedTerm v a - || - let rec minimization - || - p1 : AnnotatedTerm v a - || - lambda lifting - || - p2 : AnnotatedTerm v a - || - A-normal form (ANF) conversion - || - p3 : AnnotatedTerm v a - || - compilation - || - p4 : IR (see `IR.hs`) - || - evaluation - || - p5 : IR.V (see `IR.hs`) - || - decompilation - || - p6 : AnnotatedTerm v () - -Here's a brief overview of these phases: - -* let rec minimization eliminates needless cycles and reduces cycle sizes to the minimum, to prepare for just having `let` be the only place in the runtime that must deal with ability requests. -* lambda lifting eliminates lambdas with free variables by adding extra parameters for each free variable, simplifying the later compilation phase. -* ANF moves any function calls or ability requests to the body of a `let` or `let rec`, which is the last thing needed to ensure that `let` is the only place we need to deal with ability requests. -* Compilation converts the ANF code to an intermediate representation, which can be interpreted directly to produce a value, `V`. -* After evaluation, the `V` can be decompiled back to a term, which can be displayed to the user in the codebase editor tool. - -#### Phase 1: let rec minimization - -_let rec minimization_ breaks up recursive blocks into a mix of `let` and minimally-sized `let rec` blocks. The implementation is in [`Unison.Typechecker.Components.minimize`](../Typechecker/Components.hs#L17). - -__Why do we do this?__ - -* We decided for sanity and simplicity that the bindings of a cycle (like `ping` and `pong`) can't use any abilities, since it's unclear what order things happen in (if `ping` uses abilities and has a forward reference to `pong`, and `pong` has a reference to `ping` and uses abilities, which effect should happen first??). To clarify, mutually recursive functions in a let rec may use abilities in their body, since those abilities aren't required until the function is called. -* But when the source of a program reveals a clear dependency order to the bindings, we want to be able to use abilities. -* This transformation is also useful in conjunction with ANF conversion - it means that interpretation of `let` is the _one place in the runtime_ where we need to expect an ability request. It makes it very easy to construct the continuations which are passed to the ability handlers. - -_Note:_ The typechecker also does this same let rec minimization transform before typechecking, and when typechecking any `let rec` blocks that remain, it sets the ambient ability to be empty. (TODO: check that typechecker does in fact do this, and fix if not) - -#### Phase 2: lambda lifting - -This transform is currently in the [`ANF.hs`](ANF.hs#L26) file, see the `lambdaLift` function there. This transform eliminates free variables from any lambdas in the term, by turning them into ordinary function parameters. This leaves only closed lambdas, which are easy to compile. - -A lambda with free variables is really a program that will generate a function at runtime when values for those free variables are known. Turning these free variables into function parameters just means less cases to deal with later on during compilation. - -#### Phase 3: ANF conversion - -See [Wikipedia page for ANF](https://en.wikipedia.org/wiki/A-normal_form). __Why do we do this?__ It's much simpler to compile and optimize, and importantly, it leaves us with __just one place__, in `let`, where the continuations of ability requests are processed by the runtime. - -Example: - -``` -handle (state 0) in - x = State.get + 3 - y = x + 1 - State.set 42 - 99 -``` - -This isn't in ANF, and if we tried to compile this directly, our code for doing function application (the `State.get + 3`) would need to be prepared to deal with ability requests and would need to be able to construct the appropriate continuation: - -``` -r -> let - x = r + 3 - y = x + 1 - State.set 42 - 99 -``` - -In contrast, if the code is in ANF, then function application doesn't need to deal with ability requests, as functions are always being applied to values: - -``` -handle (state 0) in - r = State.get - x = r + 3 - y = x + 1 - State.set 42 - 99 -``` - -#### Phase 4: compilation to IR - -The `IR` type, defined in `IR.hs`, denotes a `[Value] -> Result Value`, a function that takes a stack of values and returns a result, where: - -``` -type Result v = - MatchFail | Done v | - Request Reference CtorId [v] IR --- ^ ^ ^ ^ --- ability ctor args continuation -``` - -`Value` (defined in [`IR.hs`](Value.hs)) has no redexes and is in normal form. - -An example of `Request`, for the expression `State.set 42` - the `Reference` is `State`, the `CtorId` (an `Int`) is the constructor number for `State.set`, the args is `[42]`, and - -`IR` is represented as an ANF tree, where variable references are represented by their [De Bruijn index](https://en.wikipedia.org/wiki/De_Bruijn_index): the nearest / innermost bound variable has an index of 0, the next nearest has an index of 1, etc. Some more interesting examples: - -* In `x y -> x + y`, in the body of the lambda, `x` has an index of 1 and `y` has an index of 0. -* In `let x = 1; y = x + 1`, in the body of `y`, `x` has an index of 0. -* In `let rec ping x = pong x; pong x = ping x`, in the body of `ping`, `x` has an index of `0`, `pong` has an index of `1`, and `ping` has an index of `2`. -* In `case x of (y, (z, q)) -> y + z`, in the `y + z` expression, `y` has an index of `2` and `z` has an index of `1` and `q` has an index of 0. - -Put another way, variable bindings are pushed onto a stack in "source order", and their position on the stack at each usage site is their De Bruijn index. - -In order to convert from the ANF form of the term, which has named variable references, and the De Bruijn indexed `IR` type, we need to convert from named references to De Bruijn indices. For that, we use the function [`ABT.annotateBound'`](../ABT.hs#L120-L126), which is defined for any abstract binding tree: - -```haskell -annotateBound' - :: (Ord v, Functor f, Foldable f) - => Term f v a0 - -> Term f v [v] -``` - -This annotates every node of an ABT with the list of bound variables, in order, such that the de bruijn index of variable, `v`, can be computed via [`elemIndex`](http://hackage.haskell.org/package/base-4.12.0.0/docs/Data-List.html#v:elemIndex) of `v` in this list. Easy peasy. - -Once we have that, the conversion to `IR`, defined in `IR.compile0`, is straightforward. A couple wrinkles: - -* When a lambda is partially applied, for instance `(x y -> foo x + y) 42`, we recompile the lambda, subsituting the arguments already applied, in this case substituting `x` with `42`, leaving the lambda `(y -> foo 42 + y)`. To support this, the `compile0` function actually takes this environment of already evaluated values, as a `[(SymbolC, Maybe V)]`. This is added onto the end of the list of bound variables. -* Variables which are lambda-lifted out of a function are compiled as "lazy stack lookups" (the `LazySlot` constructor, defined in [`IR.hs`](IR.hs)), which doesn't look inside references on the stack. Why is this done? - -Well, consider: - -```Haskell -let rec loop = n -> - if n == 100 then n - else loop (n + 1) -``` - -After lambda lifting, this looks like: - -```Haskell -let rec loop = - (self n -> if n == 100 then n - else self (n + 1)) loop - ^^^^ -``` - -But this isn't quite right - the `loop` which is passed to itself needs to be passed lazily, otherwise this would not terminate. Really, we want something more like: - -```Haskell -let rec loop = - (self n -> if n == 100 then n - else !self (n + 1)) 'loop - ^^^^ -``` - -Notice that `loop` is passed as the thunk, `'loop`, which is forced inside the body (the `!self`). But we don't literally need to use thunks like this, we can just avoid forcing the reference which appears at that stack slot. - -The `SymbolC` variable type used by the `compile0` function just tracks which variables need to get this treatment - these variables are compiled as a `LazySlot` rather than a `Slot`. - -#### Phase 5: evaluation - -There are interpreters in [`Rt0.hs`](Rt0.hs) and (in progress) [`Rt1.hs`](Rt1.hs). Recall our denotation for `IR` is an `[Value] -> Result Value`, a function that takes a stack of values and produces a `Result`, where: - -``` -type Result v = - MatchFail | Done v | - Request Reference CtorId [v] IR --- ^ ^ ^ ^ --- ability ctor args continuation -``` - -If you go through [`Rt0.hs`](Rt0.hs), many of the cases are straightforward: there is basically only one or two reasonable things to do. In general, instructions that introduce variables (like `let`, `let rec`, function calls, and pattern matching) will push onto the stack and invoke the interpreter on subexpressions with this updated stack. - -As a result of the ANF representation, only `let` needs to be prepared to deal with a `Request` result. - -A couple wrinkles: - -* Functions, when fully applied, are simple to interpret: we push the arguments onto the stack and then run the body of the function. But functions can also be under-applied (given fewer arguments than their arity) or over-applied (given more arguments than their arity). These cases all need to be handled: - * Under-applied functions are recompiled, with the given arguments substituted into the body of the function. So `(x y -> x + y) 42` becomes the lambda `y -> 42 + y`. - * Over-applied functions are treated as a fully-applied function, and the result of that call is then applied to the remaining arguments. So `(x -> x) (y -> y) 42` first evaluates the `(x -> x) (y -> y)`, producing `(y -> y)`, and then applies `(y -> y) 42`, producing `42`. -* __Tail calls:__ When a function call is the last expression in a function, that call can discard the stack elements for the current call. - * That is, suppose we are inside the function `x y -> let z = x + y; foo z 19`. At the point of the `foo z 19` call, `x` and `y` are on the stack, but aren't needed anymore. The call to `foo` could therefore drop `x` and `y` from the stack, push `z` and `19` onto the stack and then call `foo`. `foo` only examines at most two stack elements, one for each arg it receives, so it doesn't care whether `x` and `y` are below it in the stack or not. - * There are a few approaches for allowing the stack to be reclaimed: - * One is to have a separate tail call instruction in the `IR` (this makes the `compile` function a bit more complicated, and the evaluator, since it needs to different cases, one for regular calls and one for tail calls). - * Another is to track in the evaluator when a function call is in tail position, and interpret function calls accordingly (this means more state being tracked by the evaluator, and more cases. - * The last approach is to just garbage collect the stack occasionally, this is Baker's idea, [Cheney on the MTA](http://home.pipeline.com/~hbaker1/CheneyMTA.html). To GC the stack, you simply look at the current `IR` and compute its max De Bruijn index, say that's 4, which means that only the top `5` elements of the stack are still referenced by the rest of your computation. You copy these 5 elements to a fresh stack, preserving their order, reset the top of the stack to `5`, and continue on. - * And these approaches need not be mutually exclusive - you can garbage collect the stack and stil have a separate tail call instruction. - -Currently, the `IR` doesn't have a separate tail call instruction and nothing is implemented for tail calls. I think the Cheney on the MTA is very simple, so will probably just do that for now. - -One interesting aspect of the Cheney on the MTA approach is that it's more accurate about garbage collecting references that remain in lexical scope, but which aren't used any longer. For instance, consider: - -```Haskell -let - x = computeHugeList 99 - n = Sequence.size x - y = loop n - y + 100 -``` - -Assume `x` is some huge list and `loop` is some long running loop. At the point where this `loop` function is invoked, `x` is no longer used by the rest of the computation, but because `loop` isn't a tail call, `x` is kept around on the stack and not GC'd. With the Cheny on the MTA approach, this doesn't matter--`x` can be garbage collected as soon as the continuation of the computation no longer references it, independent of any tail calls. - -It seems nice to do this sort of GC (possibly in addition to having a separate tail call instruction). Having some dangling reference in lexical scope is one of those things that causes occasional hard-to-debug memory leaks. I've heard that the JVM will even null out stack slots which aren't used anymore, which is a bit like this. diff --git a/parser-typechecker/src/Unison/TermParser.hs b/parser-typechecker/src/Unison/TermParser.hs deleted file mode 100644 index e3a74ae5fb..0000000000 --- a/parser-typechecker/src/Unison/TermParser.hs +++ /dev/null @@ -1,901 +0,0 @@ -{-# OPTIONS_GHC -Wno-partial-type-signatures #-} - -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE BangPatterns #-} -{-# LANGUAGE PartialTypeSignatures #-} - -module Unison.TermParser where - -import Unison.Prelude - -import Control.Monad.Reader (asks, local) -import Prelude hiding (and, or, seq) -import Unison.Name (Name) -import Unison.Names3 (Names) -import Unison.Reference (Reference) -import Unison.Referent (Referent) -import Unison.Parser hiding (seq) -import Unison.Pattern (Pattern) -import Unison.Term (Term, IsTop) -import Unison.Type (Type) -import Unison.Util.List (intercalateMapWith, quenchRuns) -import Unison.Var (Var) -import qualified Data.List.Extra as List.Extra -import qualified Data.Char as Char -import qualified Data.List as List -import qualified Data.Maybe as Maybe -import qualified Data.Set as Set -import qualified Data.Text as Text -import qualified Data.Tuple.Extra as TupleE -import qualified Data.Sequence as Sequence -import qualified Text.Megaparsec as P -import qualified Unison.ABT as ABT -import qualified Unison.Builtin.Decls as DD -import qualified Unison.HashQualified as HQ -import qualified Unison.Lexer as L -import qualified Unison.Name as Name -import qualified Unison.Names3 as Names -import qualified Unison.Parser as Parser (seq, uniqueName) -import qualified Unison.Pattern as Pattern -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import qualified Unison.Typechecker.Components as Components -import qualified Unison.TypeParser as TypeParser -import qualified Unison.Var as Var - -watch :: Show a => String -> a -> a -watch msg a = let !_ = trace (msg ++ ": " ++ show a) () in a - -{- -Precedence of language constructs is identical to Haskell, except that all -operators (like +, <*>, or any sequence of non-alphanumeric characters) are -left-associative and equal precedence, and operators must have surrounding -whitespace (a + b, not a+b) to distinguish from identifiers that may contain -operator characters (like empty? or fold-left). - -Sections / partial application of infix operators is not implemented. --} - -type TermP v = P v (Term v Ann) - -term :: Var v => TermP v -term = term2 - -term2 :: Var v => TermP v -term2 = lam term2 <|> term3 - -term3 :: Var v => TermP v -term3 = do - t <- infixAppOrBooleanOp - ot <- optional (reserved ":" *> TypeParser.computationType) - pure $ case ot of - Nothing -> t - Just y -> Term.ann (mkAnn t y) t y - -keywordBlock :: Var v => TermP v -keywordBlock = letBlock <|> handle <|> ifthen <|> match <|> lamCase - -typeLink' :: Var v => P v (L.Token Reference) -typeLink' = do - id <- hqPrefixId - ns <- asks names - case Names.lookupHQType (L.payload id) ns of - s | Set.size s == 1 -> pure $ const (Set.findMin s) <$> id - | otherwise -> customFailure $ UnknownType id s - -termLink' :: Var v => P v (L.Token Referent) -termLink' = do - id <- hqPrefixId - ns <- asks names - case Names.lookupHQTerm (L.payload id) ns of - s | Set.size s == 1 -> pure $ const (Set.findMin s) <$> id - | otherwise -> customFailure $ UnknownTerm id s - -link' :: Var v => P v (Either (L.Token Reference) (L.Token Referent)) -link' = do - id <- hqPrefixId - ns <- asks names - case (Names.lookupHQTerm (L.payload id) ns, Names.lookupHQType (L.payload id) ns) of - (s, s2) | Set.size s == 1 && Set.null s2 -> pure . Right $ const (Set.findMin s) <$> id - (s, s2) | Set.size s2 == 1 && Set.null s -> pure . Left $ const (Set.findMin s2) <$> id - (s, s2) -> customFailure $ UnknownId id s s2 - -link :: Var v => TermP v -link = termLink <|> typeLink - where - typeLink = do - P.try (reserved "typeLink") -- type opens a block, gotta use something else - tok <- typeLink' - pure $ Term.typeLink (ann tok) (L.payload tok) - termLink = do - P.try (reserved "termLink") - tok <- termLink' - pure $ Term.termLink (ann tok) (L.payload tok) - --- We disallow type annotations and lambdas, --- just function application and operators -blockTerm :: Var v => TermP v -blockTerm = lam term <|> infixAppOrBooleanOp - -match :: Var v => TermP v -match = do - start <- openBlockWith "match" - scrutinee <- term - _ <- closeBlock - _ <- P.try (openBlockWith "with") <|> do - t <- anyToken - P.customFailure (ExpectedBlockOpen "with" t) - cases <- sepBy1 semi matchCase - -- TODO: Add error for empty match list - _ <- closeBlock - pure $ Term.match (ann start <> ann (last cases)) scrutinee cases - -matchCase :: Var v => P v (Term.MatchCase Ann (Term v Ann)) -matchCase = do - (p, boundVars) <- parsePattern - let boundVars' = snd <$> boundVars - guard <- optional $ reserved "|" *> infixAppOrBooleanOp - t <- block "->" - let absChain vs t = foldr (\v t -> ABT.abs' (ann t) v t) t vs - pure . Term.MatchCase p (fmap (absChain boundVars') guard) $ absChain boundVars' t - -parsePattern :: forall v. Var v => P v (Pattern Ann, [(Ann, v)]) -parsePattern = root - where - root = chainl1 patternCandidates patternInfixApp - patternCandidates = constructor <|> leaf - patternInfixApp :: P v ((Pattern Ann, [(Ann, v)]) - -> (Pattern Ann, [(Ann, v)]) - -> (Pattern Ann, [(Ann, v)])) - patternInfixApp = f <$> seqOp - where - f op (l, lvs) (r, rvs) = - (Pattern.SequenceOp (ann l <> ann r) l op r, lvs ++ rvs) - - -- note: nullaryCtor comes before var patterns, since (for better or worse) - -- they can overlap (a variable could be called 'Foo' in the current grammar). - -- This order treats ambiguous patterns as nullary constructors if there's - -- a constructor with a matching name. - leaf = literal <|> nullaryCtor <|> varOrAs <|> unbound <|> seqLiteral <|> - parenthesizedOrTuplePattern <|> effect - literal = (,[]) <$> asum [true, false, number, text, char] - true = (\t -> Pattern.Boolean (ann t) True) <$> reserved "true" - false = (\t -> Pattern.Boolean (ann t) False) <$> reserved "false" - number = number' (tok Pattern.Int) (tok Pattern.Nat) (tok Pattern.Float) - text = (\t -> Pattern.Text (ann t) (L.payload t)) <$> string - char = (\c -> Pattern.Char (ann c) (L.payload c)) <$> character - parenthesizedOrTuplePattern :: P v (Pattern Ann, [(Ann, v)]) - parenthesizedOrTuplePattern = tupleOrParenthesized parsePattern unit pair - unit ann = (Pattern.Constructor ann DD.unitRef 0 [], []) - pair (p1, v1) (p2, v2) = - (Pattern.Constructor (ann p1 <> ann p2) DD.pairRef 0 [p1, p2], - v1 ++ v2) - -- Foo x@(Blah 10) - varOrAs :: P v (Pattern Ann, [(Ann, v)]) - varOrAs = do - v <- wordyPatternName - o <- optional (reserved "@") - if isJust o then - (\(p, vs) -> (Pattern.As (ann v) p, tokenToPair v : vs)) <$> leaf - else pure (Pattern.Var (ann v), [tokenToPair v]) - unbound :: P v (Pattern Ann, [(Ann, v)]) - unbound = (\tok -> (Pattern.Unbound (ann tok), [])) <$> blank - ctor :: _ -> P v (L.Token (Reference, Int)) - ctor err = do - -- this might be a var, so we avoid consuming it at first - tok <- P.try (P.lookAhead hqPrefixId) - names <- asks names - case Names.lookupHQPattern (L.payload tok) names of - s | Set.null s -> die tok s - | Set.size s > 1 -> die tok s - | otherwise -> -- matched ctor name, consume the token - do anyToken; pure (Set.findMin s <$ tok) - where - isLower = Text.all Char.isLower . Text.take 1 . Name.toText - die hq s = case L.payload hq of - -- if token not hash qualified or uppercase, - -- fail w/out consuming it to allow backtracking - HQ.NameOnly n | Set.null s && - isLower n -> fail $ "not a constructor name: " <> show n - -- it was hash qualified, and wasn't found in the env, that's a failure! - _ -> failCommitted $ err hq s - - unzipPatterns f elems = case unzip elems of (patterns, vs) -> f patterns (join vs) - - effectBind0 = do - tok <- ctor UnknownAbilityConstructor - leaves <- many leaf - _ <- reserved "->" - pure (tok, leaves) - - effectBind = do - (tok, leaves) <- P.try effectBind0 - let (ref,cid) = L.payload tok - (cont, vsp) <- parsePattern - pure $ - let f patterns vs = (Pattern.EffectBind (ann tok <> ann cont) ref cid patterns cont, vs ++ vsp) - in unzipPatterns f leaves - - effectPure = go <$> parsePattern where - go (p, vs) = (Pattern.EffectPure (ann p) p, vs) - - effect = do - start <- openBlockWith "{" - (inner, vs) <- effectBind <|> effectPure - end <- closeBlock - pure (Pattern.setLoc inner (ann start <> ann end), vs) - - -- ex: unique type Day = Mon | Tue | ... - nullaryCtor = P.try $ do - tok <- ctor UnknownAbilityConstructor - let (ref, cid) = L.payload tok - pure (Pattern.Constructor (ann tok) ref cid [], []) - - constructor = do - tok <- ctor UnknownDataConstructor - let (ref,cid) = L.payload tok - f patterns vs = - let loc = foldl (<>) (ann tok) $ map ann patterns - in (Pattern.Constructor loc ref cid patterns, vs) - unzipPatterns f <$> many leaf - - seqLiteral = Parser.seq f root - where f loc = unzipPatterns ((,) . Pattern.SequenceLiteral loc) - -lam :: Var v => TermP v -> TermP v -lam p = label "lambda" $ mkLam <$> P.try (some prefixDefinitionName <* reserved "->") <*> p - where - mkLam vs b = Term.lam' (ann (head vs) <> ann b) (map L.payload vs) b - -letBlock, handle, lamCase, ifthen :: Var v => TermP v -letBlock = label "let" $ block "let" - -handle = label "handle" $ do - b <- block "handle" - handler <- block "with" - pure $ Term.handle (ann b) handler b - -lamCase = do - start <- openBlockWith "cases" - cases <- sepBy1 semi matchCase - -- TODO: Add error for empty match list - _ <- closeBlock - lamvar <- Parser.uniqueName 10 - let lamvarTerm = Term.var (ann start) (Var.named lamvar) - matchTerm = Term.match (ann start <> ann (last cases)) lamvarTerm cases - pure $ Term.lam (ann start <> ann (last cases)) (Var.named lamvar) matchTerm - - -ifthen = label "if" $ do - start <- peekAny - c <- block "if" - t <- block "then" - f <- block "else" - pure $ Term.iff (ann start <> ann f) c t f - -text :: Var v => TermP v -text = tok Term.text <$> string - -char :: Var v => TermP v -char = tok Term.char <$> character - -boolean :: Var v => TermP v -boolean = ((\t -> Term.boolean (ann t) True) <$> reserved "true") <|> - ((\t -> Term.boolean (ann t) False) <$> reserved "false") - -seq :: Var v => TermP v -> TermP v -seq = Parser.seq Term.seq - -hashQualifiedPrefixTerm :: Var v => TermP v -hashQualifiedPrefixTerm = resolveHashQualified =<< hqPrefixId - -hashQualifiedInfixTerm :: Var v => TermP v -hashQualifiedInfixTerm = resolveHashQualified =<< hqInfixId - --- If the hash qualified is name only, it is treated as a var, if it --- has a short hash, we resolve that short hash immediately and fail --- committed if that short hash can't be found in the current environment -resolveHashQualified :: Var v => L.Token HQ.HashQualified -> TermP v -resolveHashQualified tok = do - names <- asks names - case L.payload tok of - HQ.NameOnly n -> pure $ Term.var (ann tok) (Name.toVar n) - _ -> case Names.lookupHQTerm (L.payload tok) names of - s | Set.null s -> failCommitted $ UnknownTerm tok s - | Set.size s > 1 -> failCommitted $ UnknownTerm tok s - | otherwise -> pure $ Term.fromReferent (ann tok) (Set.findMin s) - -termLeaf :: forall v . Var v => TermP v -termLeaf = - asum - [ hashQualifiedPrefixTerm - , text - , char - , number - , boolean - , link - , tupleOrParenthesizedTerm - , keywordBlock - , seq term - , delayQuote - , bang - , docBlock - ] - -docBlock :: Var v => TermP v -docBlock = do - openTok <- openBlockWith "[:" - segs <- many segment - closeTok <- closeBlock - let a = ann openTok <> ann closeTok - pure . docNormalize $ Term.app a (Term.constructor a DD.docRef DD.docJoinId) (Term.seq a segs) - where - segment = blob <|> linky - blob = do - s <- string - pure $ Term.app (ann s) (Term.constructor (ann s) DD.docRef DD.docBlobId) - (Term.text (ann s) (L.payload s)) - linky = asum [include, signature, evaluate, source, link] - include = do - _ <- P.try (reserved "include") - hashQualifiedPrefixTerm - signature = do - _ <- P.try (reserved "signature") - tok <- termLink' - pure $ Term.app (ann tok) - (Term.constructor (ann tok) DD.docRef DD.docSignatureId) - (Term.termLink (ann tok) (L.payload tok)) - evaluate = do - _ <- P.try (reserved "evaluate") - tok <- termLink' - pure $ Term.app (ann tok) - (Term.constructor (ann tok) DD.docRef DD.docEvaluateId) - (Term.termLink (ann tok) (L.payload tok)) - source = do - _ <- P.try (reserved "source") - l <- link'' - pure $ Term.app (ann l) - (Term.constructor (ann l) DD.docRef DD.docSourceId) - l - link'' = either ty t <$> link' where - t tok = Term.app (ann tok) - (Term.constructor (ann tok) DD.linkRef DD.linkTermId) - (Term.termLink (ann tok) (L.payload tok)) - ty tok = Term.app (ann tok) - (Term.constructor (ann tok) DD.linkRef DD.linkTypeId) - (Term.typeLink (ann tok) (L.payload tok)) - link = d <$> link'' where - d tm = Term.app (ann tm) (Term.constructor (ann tm) DD.docRef DD.docLinkId) tm - --- Used by unbreakParas within docNormalize. Doc literals are a joined sequence --- segments. This type describes a property of a segment. -data UnbreakCase = - -- Finishes with a newline and hence does not determine whether the next - -- line starts with whitespace. - LineEnds - -- Ends with "\n something", i.e. introduces an indented line. - | StartsIndented - -- Ends with "\nsomething", i.e. introduces an unindented line. - | StartsUnindented deriving (Eq, Show) - --- Doc literal normalization --- --- This normalization allows the pretty-printer and doc display code to do --- indenting, and to do line-wrap of paragraphs, but without the inserted --- newlines being then frozen into the text for ever more over subsequent --- edit/update cycles. --- --- The alternative would be to stop line-wrapping docs on view/display by adding --- newlines in the pretty-printer, and instead leave wrapping to the --- terminal/editor. Might be worth considering if this code ends up being --- too buggy and fragile to maintain. Maybe display could add newlines, --- and view could refrain from doing so. --- --- Operates on the text of the Blobs within a doc (as parsed by docBlock): --- - reduces the whitespace after all newlines so that at least one of the --- non-initial lines has zero indent (important because the pretty-printer adds --- indenting when displaying doc literals) --- - removes trailing whitespace from each line --- - removes newlines between any sequence of non-empty zero-indent lines --- (i.e. undo line-breaking within paragraphs). --- --- Should be understood in tandem with Util.Pretty.paragraphyText, which --- outputs doc text for display/edit/view. --- See also unison-src/transcripts/doc-formatting.md. --- --- There is some heuristic/approximate logic in here - see the comment flagged --- with ** below. --- --- This function is a bit painful - it's trying to act on a sequence of lines, --- but that sequence is split up between the various blobs in the doc, which --- are separated by the elements tracking things like @[source] etc. It --- would be simplified if the doc representation was something like --- [Either Char EnrichedElement]. --- --- This function has some tracing which you can enable by deleting some calls to --- 'const id' below. -docNormalize :: (Ord v, Show v) => Term v a -> Term v a -docNormalize tm = case tm of - -- This pattern is just `DD.DocJoin seqs`, but exploded in order to grab - -- the annotations. The aim is just to map `normalize` over it. - a@(Term.App' c@(Term.Constructor' DD.DocRef DD.DocJoinId) s@(Term.Sequence' seqs)) - -> join (ABT.annotation a) - (ABT.annotation c) - (ABT.annotation s) - (normalize seqs) where - _ -> error $ "unexpected doc structure: " ++ show tm - where - normalize = - Sequence.fromList . (map TupleE.fst3) - . (tracing "after unbreakParas") . unbreakParas - . (tracing "after full preprocess") . preProcess - . (tracing "after unindent") . unIndent - . (tracing "initial parse") . miniPreProcess - preProcess xs = zip3 seqs - (lineStarteds $ Sequence.fromList seqs) - (followingLines $ Sequence.fromList seqs) - where seqs = map fst xs - miniPreProcess seqs = zip (toList seqs) (lineStarteds seqs) - unIndent - :: Ord v - => [(Term v a, UnbreakCase)] - -> [(Term v a, UnbreakCase)] - unIndent tms = map go tms where - go (b, previous) = - ((mapBlob $ (reduceIndent includeFirst minIndent)) b, previous) - where - -- Since previous was calculated before unindenting, it will often be wrongly - -- StartsIndented instead of StartsUnindented - but that's OK just for the test - -- below. And we'll recalculate it later in preProcess. - includeFirst = previous == LineEnds - concatenatedBlobs :: Text - concatenatedBlobs = mconcat (toList (fmap (getBlob . fst) tms)) - getBlob (DD.DocBlob txt) = txt - getBlob _ = "." - -- Note we exclude the first line when calculating the minimum indent - the lexer - -- already stripped leading spaces from it, and anyway it would have been sharing - -- its line with the [: and maybe other stuff. - nonInitialNonEmptyLines = - filter (not . Text.null) $ map Text.stripEnd $ drop 1 $ Text.lines - concatenatedBlobs - minIndent = minimumOrZero $ map (Text.length . (Text.takeWhile Char.isSpace)) - nonInitialNonEmptyLines - minimumOrZero xs = if length xs == 0 then 0 else minimum xs - reduceIndent :: Bool -> Int -> Text -> Text - reduceIndent includeFirst n t = - fixup - $ Text.unlines - $ mapExceptFirst reduceLineIndent onFirst - $ Text.lines t where - onFirst = if includeFirst then reduceLineIndent else id - reduceLineIndent l = result where - currentIndent = Text.length $ (Text.takeWhile Char.isSpace) l - remainder = (Text.dropWhile Char.isSpace) l - newIndent = maximum [0, currentIndent - n] - result = Text.replicate newIndent " " `mappend` remainder - -- unlines . lines adds a trailing newline if one was not present: undo that. - fixup = if Text.takeEnd 1 t == "\n" then id else Text.dropEnd 1 - -- Remove newlines between any sequence of non-empty zero-indent lines. - -- This is made more complicated by Doc elements (e.g. links) which break up a - -- blob but don't break a line of output text**. We sometimes need to refer back to the - -- previous blob to see whether a newline is between two zero-indented lines. - -- For example... - -- "This link to @foo makes it harder to see\n - -- that the newline should be removed." - -- ** Whether an element does this (breaks a blob but not a line of output text) really - -- depends on some things we don't know here: does an @[include] target doc occupy - -- just one line or several; whether this doc is going to be viewed or displayed. - -- So we'll get it wrong sometimes. The impact of this is that we may sometimes - -- misjudge whether a newline is separating two non-indented lines, and should therefore - -- be removed. - unbreakParas - :: (Show v, Ord v) - => [(Term v a, UnbreakCase, Bool)] - -> [(Term v a, UnbreakCase, Bool)] - unbreakParas = map go where - -- 'candidate' means 'candidate to be joined with an adjacent line as part of a - -- paragraph'. - go (b, previous, nextIsCandidate) = - (mapBlob go b, previous, nextIsCandidate) where - go txt = if Text.null txt then txt else tr result' where - tr = const id $ trace $ - "\nprocessElement on blob " ++ (show txt) ++ ", result' = " - ++ (show result') ++ ", lines: " ++ (show ls) ++ ", candidates = " - ++ (show candidates) ++ ", previous = " ++ (show previous) - ++ ", firstIsCandidate = " ++ (show firstIsCandidate) ++ "\n\n" - -- remove trailing whitespace - -- ls is non-empty thanks to the Text.null check above - -- Don't cut the last line's trailing whitespace - there's an assumption here - -- that it's followed by something which will put more text on the same line. - ls = mapExceptLast Text.stripEnd id $ Text.lines txt - -- Work out which lines are candidates to be joined as part of a paragraph, i.e. - -- are not indented. - candidate l = case Text.uncons l of - Just (initial, _) -> not . Char.isSpace $ initial - Nothing -> False -- empty line - -- The segment of this blob that runs up to the first newline may not itself - -- be the start of a line of the doc - for example if it's preceded by a link. - -- So work out whether the line of which it is a part is a candidate. - firstIsCandidate = case previous of - LineEnds -> candidate (head ls) - StartsIndented -> False - StartsUnindented -> True - candidates = firstIsCandidate : (tail (map candidate ls)) - result = mconcat $ intercalateMapWith sep fst (zip ls candidates) - sep (_, candidate1) (_, candidate2) = - if candidate1 && candidate2 then " " else "\n" - -- Text.lines forgets whether there was a trailing newline. - -- If there was one, then either add it back or convert it to a space. - result' = if (Text.takeEnd 1 txt) == "\n" - then if (last candidates) && nextIsCandidate - then result `Text.append` " " - else result `Text.append` "\n" - else result - -- A list whose entries match those of tms. `Nothing` is used for elements - -- which just continue a line, and so need to be ignored when looking back - -- for how the last line started. Otherwise describes whether the last - -- line of this entry is indented (or maybe terminated by a newline.) - -- A value of `Nothing` protects ensuing text from having its leading - -- whitespace removed by `unindent`. - -- Note that some elements render over multiple lines when displayed. - -- See test2 in transcript doc-formatting.md for an example of how - -- this looks when there is whitespace immediately following @[source] - -- or @[evaluate]. - lastLines :: Show v => Sequence.Seq (Term v a) -> [Maybe UnbreakCase] - lastLines tms = (flip fmap) (toList tms) $ \case - DD.DocBlob txt -> unbreakCase txt - DD.DocLink _ -> Nothing - DD.DocSource _ -> Nothing - DD.DocSignature _ -> Nothing - DD.DocEvaluate _ -> Nothing - Term.Var' _ -> Nothing -- @[include] - e@_ -> error ("unexpected doc element: " ++ show e) - -- Work out whether the last line of this blob is indented (or maybe - -- terminated by a newline.) - unbreakCase :: Text -> Maybe UnbreakCase - unbreakCase txt = - let (startAndNewline, afterNewline) = Text.breakOnEnd "\n" txt - in if Text.null startAndNewline - then Nothing - else if Text.null afterNewline - then Just LineEnds - else if Char.isSpace (Text.head afterNewline) - then Just StartsIndented - else Just StartsUnindented - -- A list whose entries match those of tms. Describes how the current - -- line started (the line including the start of this entry) - or LineEnds - -- if this entry is starting a line itself. - -- Calculated as the UnbreakCase of the previous entry that included a newline. - -- Really there's a function of type (a -> Bool) -> a -> [a] -> [a] in here - -- fighting to break free - overwriting elements that are 'shadowed' by - -- a preceding element for which the predicate is true, with a copy of - -- that element. - lineStarteds :: Show v => Sequence.Seq (Term v a) -> [UnbreakCase] - lineStarteds tms = tr $ quenchRuns LineEnds StartsUnindented $ xs'' where - tr = const id $ - trace $ "lineStarteds: xs = " ++ (show xs) ++ ", xss = " - ++ (show xss) ++ ", xs' = " ++ (show xs') ++ ", xs'' = " - ++ (show xs'') ++ "\n\n" - -- Make sure there's a Just at the start of the list so we always find - -- one when searching back. - -- Example: xs = [J1,N2,J3] - xs :: [Maybe UnbreakCase] - xs = Just LineEnds : (lastLines tms) - -- Example: xss = [[J1],[J1,N2],[J1,N2,J3]] - xss :: [[Maybe UnbreakCase]] - xss = drop 1 $ List.inits xs - -- Example: after each step of the map... - -- [[J1],[N2,J1],[J3,N2,J1]] -- after reverse - -- [Just J1, Just J1, Just J3] -- after find - -- ... - -- result = [1,1,3] - xs' = - map (Maybe.fromJust . Maybe.fromJust . (List.find isJust) . reverse) xss - xs'' = List.Extra.dropEnd 1 xs' - -- For each element, can it be a line-continuation of a preceding blob? - continuesLine :: Sequence.Seq (Term v a) -> [Bool] - continuesLine tms = (flip fmap) (toList tms) $ \case - DD.DocBlob _ -> False -- value doesn't matter - you don't get adjacent blobs - DD.DocLink _ -> True - DD.DocSource _ -> False - DD.DocSignature _ -> False - DD.DocEvaluate _ -> False - Term.Var' _ -> False -- @[include] - _ -> error ("unexpected doc element" ++ show tm) - -- A list whose entries match those of tms. Can the subsequent entry by a - -- line continuation of this one? - followingLines tms = drop 1 ((continuesLine tms) ++ [False]) - mapExceptFirst :: (a -> b) -> (a -> b) -> [a] -> [b] - mapExceptFirst fRest fFirst = \case - [] -> [] - x : rest -> (fFirst x) : (map fRest rest) - mapExceptLast fRest fLast = reverse . (mapExceptFirst fRest fLast) . reverse - tracing :: Show a => [Char] -> a -> a - tracing when x = - (const id $ trace ("at " ++ when ++ ": " ++ (show x) ++ "\n")) x - blob aa ac at txt = - Term.app aa (Term.constructor ac DD.docRef DD.docBlobId) (Term.text at txt) - join aa ac as segs = - Term.app aa (Term.constructor ac DD.docRef DD.docJoinId) (Term.seq' as segs) - mapBlob :: Ord v => (Text -> Text) -> Term v a -> Term v a - -- this pattern is just `DD.DocBlob txt` but exploded to capture the annotations as well - mapBlob f (aa@(Term.App' ac@(Term.Constructor' DD.DocRef DD.DocBlobId) at@(Term.Text' txt))) - = blob (ABT.annotation aa) (ABT.annotation ac) (ABT.annotation at) (f txt) - mapBlob _ t = t - -delayQuote :: Var v => TermP v -delayQuote = P.label "quote" $ do - start <- reserved "'" - e <- termLeaf - pure $ DD.delayTerm (ann start <> ann e) e - -bang :: Var v => TermP v -bang = P.label "bang" $ do - start <- reserved "!" - e <- termLeaf - pure $ DD.forceTerm (ann start <> ann e) (ann start) e - -var :: Var v => L.Token v -> Term v Ann -var t = Term.var (ann t) (L.payload t) - -seqOp :: Ord v => P v Pattern.SeqOp -seqOp = - (Pattern.Snoc <$ matchToken (L.SymbolyId ":+" Nothing)) - <|> (Pattern.Cons <$ matchToken (L.SymbolyId "+:" Nothing)) - <|> (Pattern.Concat <$ matchToken (L.SymbolyId "++" Nothing)) - -term4 :: Var v => TermP v -term4 = f <$> some termLeaf - where - f (func:args) = Term.apps func ((\a -> (ann func <> ann a, a)) <$> args) - f [] = error "'some' shouldn't produce an empty list" - --- e.g. term4 + term4 - term4 --- or term4 || term4 && term4 -infixAppOrBooleanOp :: Var v => TermP v -infixAppOrBooleanOp = chainl1 term4 (or <|> and <|> infixApp) - where or = orf <$> label "or" (reserved "||") - orf op lhs rhs = Term.or (ann op <> ann rhs) lhs rhs - and = andf <$> label "and" (reserved "&&") - andf op lhs rhs = Term.and (ann op <> ann rhs) lhs rhs - infixApp = infixAppf <$> label "infixApp" (hashQualifiedInfixTerm <* optional semi) - infixAppf op lhs rhs = Term.apps op [(ann lhs, lhs), (ann rhs, rhs)] - -typedecl :: Var v => P v (L.Token v, Type v Ann) -typedecl = - (,) <$> P.try (prefixDefinitionName <* reserved ":") - <*> TypeParser.valueType - <* semi - -verifyRelativeVarName :: Var v => P v (L.Token v) -> P v (L.Token v) -verifyRelativeVarName p = do - v <- p - verifyRelativeName' (Name.fromVar <$> v) - pure v - -verifyRelativeName :: Ord v => P v (L.Token Name) -> P v (L.Token Name) -verifyRelativeName name = do - name <- name - verifyRelativeName' name - pure name - -verifyRelativeName' :: Ord v => L.Token Name -> P v () -verifyRelativeName' name = do - let txt = Name.toText . L.payload $ name - when (Text.isPrefixOf "." txt && txt /= ".") $ - failCommitted (DisallowedAbsoluteName name) - -binding :: forall v. Var v => P v ((Ann, v), Term v Ann) -binding = label "binding" $ do - typ <- optional typedecl - -- a ++ b = ... OR - -- foo `mappend` bar = ... - let infixLhs = do - (arg1, op) <- P.try $ - (,) <$> prefixDefinitionName <*> infixDefinitionName - arg2 <- prefixDefinitionName - pure (ann arg1, op, [arg1, arg2]) - let prefixLhs = do - v <- prefixDefinitionName - vs <- many prefixDefinitionName - pure (ann v, v, vs) - let - lhs :: P v (Ann, L.Token v, [L.Token v]) - lhs = infixLhs <|> prefixLhs - case typ of - Nothing -> do - -- we haven't seen a type annotation, so lookahead to '=' before commit - (loc, name, args) <- P.try (lhs <* P.lookAhead (openBlockWith "=")) - body <- block "=" - verifyRelativeName' (fmap Name.fromVar name) - pure $ mkBinding loc (L.payload name) args body - Just (nameT, typ) -> do - (_, name, args) <- lhs - verifyRelativeName' (fmap Name.fromVar name) - when (L.payload name /= L.payload nameT) $ - customFailure $ SignatureNeedsAccompanyingBody nameT - body <- block "=" - pure $ fmap (\e -> Term.ann (ann nameT <> ann e) e typ) - (mkBinding (ann nameT) (L.payload name) args body) - where - mkBinding loc f [] body = ((loc, f), body) - mkBinding loc f args body = - ((loc, f), Term.lam' (loc <> ann body) (L.payload <$> args) body) - - -customFailure :: P.MonadParsec e s m => e -> m a -customFailure = P.customFailure - -block :: forall v. Var v => String -> TermP v -block s = block' False s (openBlockWith s) closeBlock - --- example: use Foo.bar.Baz + ++ x --- + ++ and x are called the "suffixes" of the `use` statement, and --- `Foo.bar.Baz` is called the prefix. A `use` statement has the effect --- of allowing you to reference identifiers of the form . --- using just . --- --- `use foo` by itself is equivalent to `use foo bar baz ...` for all --- names in the environment prefixed by `foo` --- --- todo: doesn't support use Foo.bar ++#abc, which lets you use `++` unqualified to refer to `Foo.bar.++#abc` -importp :: Ord v => P v [(Name, Name)] -importp = do - kw <- reserved "use" - -- we allow symbolyId here and parse the suffix optionaly, so we can generate - -- a nicer error message if the suffixes are empty - prefix <- optional - $ fmap Right (importWordyId <|> importDotId) -- use . Nat - <|> fmap Left importSymbolyId - suffixes <- optional (some (importWordyId <|> importSymbolyId)) - case (prefix, suffixes) of - (Nothing, _) -> P.customFailure $ UseEmpty kw - (Just prefix@(Left _), _) -> P.customFailure $ UseInvalidPrefixSuffix prefix suffixes - (Just (Right prefix), Nothing) -> do -- `wildcard import` - names <- asks names - pure $ Names.expandWildcardImport (L.payload prefix) (Names.currentNames names) - (Just (Right prefix), Just suffixes) -> pure $ do - suffix <- L.payload <$> suffixes - pure (suffix, Name.joinDot (L.payload prefix) suffix) - ---module Monoid where --- -- we replace all the binding names with Monoid.op, and --- -- if `op` is free in the body of any binding, we replace it with `Monoid.op` --- op : Monoid a -> (a -> a -> a) --- op m = case m of Monoid - -data BlockElement v - = Binding ((Ann, v), Term v Ann) - | Action (Term v Ann) - | Namespace String [BlockElement v] - -namespaceBlock :: Var v => P v (BlockElement v) -namespaceBlock = do - _ <- reserved "namespace" - -- need a version of verifyRelativeName that takes a `Token Name` - name <- verifyRelativeName importWordyId - let statement = (Binding <$> binding) <|> namespaceBlock - _ <- openBlockWith "where" - elems <- sepBy semi statement - _ <- closeBlock - pure $ Namespace (Name.toString $ L.payload name) elems - -toBindings :: forall v . Var v => [BlockElement v] -> [((Ann,v), Term v Ann)] -toBindings b = let - expand (Binding ((a, v), e)) = [((a, Just v), e)] - expand (Action e) = [((ann e, Nothing), e)] - expand (Namespace name bs) = scope name $ expand =<< bs - v `orBlank` i = fromMaybe (Var.nameds $ "_" ++ show i) v - finishBindings bs = - [((a, v `orBlank` i), e) | (((a,v), e), i) <- bs `zip` [(1::Int)..]] - - scope :: String -> [((Ann, Maybe v), Term v Ann)] - -> [((Ann, Maybe v), Term v Ann)] - scope name bs = let - vs :: [Maybe v] - vs = snd . fst <$> bs - prefix :: v -> v - prefix v = Var.named (Text.pack name `mappend` "." `mappend` Var.name v) - vs' :: [Maybe v] - vs' = fmap prefix <$> vs - substs = [ (v, Term.var () v') | (Just v, Just v') <- vs `zip` vs' ] - sub = ABT.substsInheritAnnotation substs - in [ ((a, v'), sub e) | (((a,_),e), v') <- bs `zip` vs' ] - in finishBindings (expand =<< b) - --- subst --- use Foo.Bar + blah --- use Bar.Baz zonk zazzle -imports :: Var v => P v (Names, [(v,v)]) -imports = do - let sem = P.try (semi <* P.lookAhead (reserved "use")) - imported <- mconcat . reverse <$> sepBy sem importp - ns' <- Names.importing imported <$> asks names - pure (ns', [(Name.toVar suffix, Name.toVar full) | (suffix,full) <- imported ]) - --- A key feature of imports is we want to be able to say: --- `use foo.bar Baz qux` without having to specify whether `Baz` or `qux` are --- terms or types. -substImports :: Var v => Names -> [(v,v)] -> Term v Ann -> Term v Ann -substImports ns imports = - ABT.substsInheritAnnotation [ (suffix, Term.var () full) - | (suffix,full) <- imports ] . -- no guard here, as `full` could be bound - -- not in Names, but in a later term binding - Term.substTypeVars [ (suffix, Type.var () full) - | (suffix, full) <- imports, Names.hasTypeNamed (Name.fromVar full) ns ] - -block' - :: forall v b - . Var v - => IsTop - -> String - -> P v (L.Token ()) - -> P v b - -> TermP v -block' isTop s openBlock closeBlock = do - open <- openBlock - (names, imports) <- imports - _ <- optional semi - statements <- local (\e -> e { names = names } ) $ sepBy semi statement - _ <- closeBlock - substImports names imports <$> go open statements - where - statement = namespaceBlock <|> - asum [ Binding <$> binding, Action <$> blockTerm ] - go :: L.Token () -> [BlockElement v] -> P v (Term v Ann) - go open bs - = let - startAnnotation = (fst . fst . head $ toBindings bs) - endAnnotation = (fst . fst . last $ toBindings bs) - finish tm = case Components.minimize' tm of - Left dups -> customFailure $ DuplicateTermNames (toList dups) - Right tm -> pure tm - in - case reverse bs of - Namespace _v _ : _ -> finish $ Term.letRec - isTop - (startAnnotation <> endAnnotation) - (toBindings bs) - (Term.var endAnnotation - (positionalVar endAnnotation Var.missingResult) - ) - Binding ((a, _v), _) : _ -> finish $ Term.letRec - isTop - (startAnnotation <> endAnnotation) - (toBindings bs) - (Term.var a (positionalVar endAnnotation Var.missingResult)) - Action e : bs -> finish $ Term.letRec - isTop - (startAnnotation <> ann e) - (toBindings $ reverse bs) - e - [] -> customFailure $ EmptyBlock (const s <$> open) - -number :: Var v => TermP v -number = number' (tok Term.int) (tok Term.nat) (tok Term.float) - -number' - :: Ord v - => (L.Token Int64 -> a) - -> (L.Token Word64 -> a) - -> (L.Token Double -> a) - -> P v a -number' i u f = fmap go numeric - where - go num@(L.payload -> p) | any (\c -> c == '.' || c == 'e') p && take 1 p == "+" = f (read . drop 1 <$> num) - | any (\c -> c == '.' || c == 'e') p = f (read <$> num) - | take 1 p == "+" = i (read . drop 1 <$> num) - | take 1 p == "-" = i (read <$> num) - | otherwise = u (read <$> num) - -tupleOrParenthesizedTerm :: Var v => TermP v -tupleOrParenthesizedTerm = label "tuple" $ tupleOrParenthesized term DD.unitTerm pair - where - pair t1 t2 = - Term.app (ann t1 <> ann t2) - (Term.app (ann t1) - (Term.constructor (ann t1 <> ann t2) DD.pairRef 0) - t1) - t2 diff --git a/parser-typechecker/src/Unison/TermPrinter.hs b/parser-typechecker/src/Unison/TermPrinter.hs deleted file mode 100644 index 35ed53ea8c..0000000000 --- a/parser-typechecker/src/Unison/TermPrinter.hs +++ /dev/null @@ -1,1029 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.TermPrinter where - -import Unison.Prelude - -import Data.List -import qualified Data.Map as Map -import qualified Data.Set as Set -import Data.Text ( unpack ) -import qualified Data.Text as Text -import qualified Text.Show.Unicode as U -import Data.Vector ( ) -import Unison.ABT ( pattern AbsN', reannotateUp, annotation ) -import qualified Unison.ABT as ABT -import qualified Unison.Blank as Blank -import qualified Unison.HashQualified as HQ -import Unison.Lexer ( symbolyId, showEscapeChar ) -import Unison.Name ( Name ) -import qualified Unison.Name as Name -import qualified Unison.NameSegment as NameSegment -import Unison.NamePrinter ( styleHashQualified'' ) -import qualified Unison.Pattern as Pattern -import Unison.Pattern ( Pattern ) -import Unison.Reference ( Reference ) -import qualified Unison.Referent as Referent -import qualified Unison.Util.SyntaxText as S -import Unison.Util.SyntaxText ( SyntaxText ) -import Unison.Term -import Unison.Type ( Type ) -import qualified Unison.Type as Type -import qualified Unison.TypePrinter as TypePrinter -import Unison.Var ( Var ) -import qualified Unison.Var as Var -import Unison.Util.Monoid ( intercalateMap ) -import qualified Unison.Util.Pretty as PP -import Unison.Util.Pretty ( Pretty, ColorText ) -import Unison.PrettyPrintEnv ( PrettyPrintEnv, Suffix, Prefix, Imports, elideFQN ) -import qualified Unison.PrettyPrintEnv as PrettyPrintEnv -import qualified Unison.Builtin.Decls as DD -import Unison.Builtin.Decls (pattern TuplePattern, pattern TupleTerm') -import qualified Unison.ConstructorType as CT - -pretty :: Var v => PrettyPrintEnv -> Term v a -> Pretty ColorText -pretty env tm = PP.syntaxToColor $ pretty0 env (ac (-1) Normal Map.empty MaybeDoc) (printAnnotate env tm) - -pretty' :: Var v => Maybe Int -> PrettyPrintEnv -> Term v a -> ColorText -pretty' (Just width) n t = PP.render width $ PP.syntaxToColor $ pretty0 n (ac (-1) Normal Map.empty MaybeDoc) (printAnnotate n t) -pretty' Nothing n t = PP.renderUnbroken $ PP.syntaxToColor $ pretty0 n (ac (-1) Normal Map.empty MaybeDoc) (printAnnotate n t) - --- Information about the context in which a term appears, which affects how the --- term should be rendered. -data AmbientContext = AmbientContext - { - -- The operator precedence of the enclosing context (a number from 0 to 11, - -- or -1 to render without outer parentheses unconditionally). - -- Function application has precedence 10. - precedence :: Int - , blockContext :: BlockContext - , infixContext :: InfixContext - , imports :: Imports - , docContext :: DocLiteralContext - } - --- Description of the position of this ABT node, when viewed in the --- surface syntax. -data BlockContext - -- This ABT node is at the top level of a TermParser.block. - = Block - | Normal - deriving (Eq) - -data InfixContext - -- This ABT node is an infix operator being used in infix position. - = Infix - | NonInfix - deriving (Eq) - -data DocLiteralContext - -- We won't try and render this ABT node or anything under it as a [: @Doc literal :] - = NoDoc - -- We'll keep checking as we recurse down - | MaybeDoc - deriving (Eq) - -{- Explanation of precedence handling - - We illustrate precedence rules as follows. - - >=10 - 10f 10x - - This example shows that a function application f x is enclosed in - parentheses whenever the ambient precedence around it is >= 10, and that - when printing its two components, an ambient precedence of 10 is used in - both places. - - The pretty-printer uses the following rules for printing terms. - - >=12 - let x = (-1)y - 1z - - >=11 - ! 11x - ' 11x - 11x ? - - >=10 - 10f 10x 10y ... - - >=3 - x -> 2y - 3x + 3y + ... 3z - - >=2 - if 0a then 0b else 0c - handle 0b with 0h - case 2x of - a | 2g -> 0b - - >=0 - 10a : 0Int - - - And the following for patterns. - - >=11 - x@11p - - >=10 - Con 10p 10q ... - - -- never any external parens added around the following - { p } - { Eff 10p 10q ... -> 0k } - --} - -pretty0 - :: Var v - => PrettyPrintEnv - -> AmbientContext - -> Term3 v PrintAnnotation - -> Pretty SyntaxText -pretty0 - n - a@AmbientContext - { precedence = p - , blockContext = bc - , infixContext = ic - , imports = im - , docContext = doc - } - term - -- Note: the set of places in this function that call calcImports has to be kept in sync - -- with the definition of immediateChildBlockTerms, otherwise `use` statements get - -- inserted at the wrong scope. - = specialCases term $ \case - Var' v -> parenIfInfix name ic $ styleHashQualified'' (fmt S.Var) name - -- OK since all term vars are user specified, any freshening was just added during typechecking - where name = elideFQN im $ HQ.unsafeFromVar (Var.reset v) - Ref' r -> parenIfInfix name ic $ styleHashQualified'' (fmt $ S.Reference r) name - where name = elideFQN im $ PrettyPrintEnv.termName n (Referent.Ref r) - TermLink' r -> parenIfInfix name ic $ - fmt S.LinkKeyword "termLink " <> styleHashQualified'' (fmt $ S.Referent r) name - where name = elideFQN im $ PrettyPrintEnv.termName n r - TypeLink' r -> parenIfInfix name ic $ - fmt S.LinkKeyword "typeLink " <> styleHashQualified'' (fmt $ S.Reference r) name - where name = elideFQN im $ PrettyPrintEnv.typeName n r - Ann' tm t -> - paren (p >= 0) - $ pretty0 n (ac 10 Normal im doc) tm - <> PP.hang (fmt S.TypeAscriptionColon " :" ) (TypePrinter.pretty0 n im 0 t) - Int' i -> fmt S.NumericLiteral $ (if i >= 0 then l "+" else mempty) <> l (show i) - Nat' u -> fmt S.NumericLiteral $ l $ show u - Float' f -> fmt S.NumericLiteral $ l $ show f - -- TODO How to handle Infinity, -Infinity and NaN? Parser cannot parse - -- them. Haskell doesn't have literals for them either. Is this - -- function only required to operate on terms produced by the parser? - -- In which case the code is fine as it stands. If it can somehow run - -- on values produced by execution (or, one day, on terms produced by - -- metaprograms), then it needs to be able to print them (and then the - -- parser ought to be able to parse them, to maintain symmetry.) - Boolean' b -> fmt S.BooleanLiteral $ if b then l "true" else l "false" - Text' s -> fmt S.TextLiteral $ l $ U.ushow s - Char' c -> fmt S.CharLiteral $ l $ case showEscapeChar c of - Just c -> "?\\" ++ [c] - Nothing -> '?': [c] - Blank' id -> fmt S.Blank $ l "_" <> l (fromMaybe "" (Blank.nameb id)) - Constructor' ref i -> styleHashQualified'' (fmt S.Constructor) $ - elideFQN im $ PrettyPrintEnv.termName n (Referent.Con ref i CT.Data) - Request' ref i -> styleHashQualified'' (fmt S.Request) $ - elideFQN im $ PrettyPrintEnv.termName n (Referent.Con ref i CT.Effect) - Handle' h body -> paren (p >= 2) $ - if PP.isMultiLine pb || PP.isMultiLine ph then PP.lines [ - (fmt S.ControlKeyword "handle") `PP.hang` pb, - (fmt S.ControlKeyword "with") `PP.hang` ph - ] - else PP.spaced [ - (fmt S.ControlKeyword "handle") `PP.hang` pb - <> PP.softbreak - <> (fmt S.ControlKeyword "with") `PP.hang` ph - ] - where - pb = pblock body - ph = pblock h - pblock tm = let (im', uses) = calcImports im tm - in uses $ [pretty0 n (ac 0 Block im' doc) tm] - App' x (Constructor' DD.UnitRef 0) -> - paren (p >= 11) $ (fmt S.DelayForceChar $ l "!") <> pretty0 n (ac 11 Normal im doc) x - LamNamed' v x | (Var.name v) == "()" -> - paren (p >= 11) $ (fmt S.DelayForceChar $ l "'") <> pretty0 n (ac 11 Normal im doc) x - Sequence' xs -> PP.group $ - (fmt S.DelimiterChar $ l "[") <> optSpace - <> intercalateMap ((fmt S.DelimiterChar $ l ",") <> PP.softbreak <> optSpace <> optSpace) - (pretty0 n (ac 0 Normal im doc)) - xs - <> optSpace <> (fmt S.DelimiterChar $ l "]") - where optSpace = PP.orElse "" " " - If' cond t f -> paren (p >= 2) $ - if PP.isMultiLine pt || PP.isMultiLine pf then PP.lines [ - (fmt S.ControlKeyword "if ") <> pcond <> (fmt S.ControlKeyword " then") `PP.hang` pt, - (fmt S.ControlKeyword "else") `PP.hang` pf - ] - else PP.spaced [ - ((fmt S.ControlKeyword "if") `PP.hang` pcond) <> ((fmt S.ControlKeyword " then") `PP.hang` pt), - (fmt S.ControlKeyword "else") `PP.hang` pf - ] - where - pcond = pretty0 n (ac 2 Block im doc) cond - pt = branch t - pf = branch f - branch tm = let (im', uses) = calcImports im tm - in uses $ [pretty0 n (ac 0 Block im' doc) tm] - And' x y -> - paren (p >= 10) $ PP.spaced [ - pretty0 n (ac 10 Normal im doc) x, - fmt S.ControlKeyword "&&", - pretty0 n (ac 10 Normal im doc) y - ] - Or' x y -> - paren (p >= 10) $ PP.spaced [ - pretty0 n (ac 10 Normal im doc) x, - fmt S.ControlKeyword "||", - pretty0 n (ac 10 Normal im doc) y - ] - LetBlock bs e -> printLet bc bs e im' uses - Match' scrutinee branches -> paren (p >= 2) $ - if PP.isMultiLine ps then PP.lines [ - (fmt S.ControlKeyword "match ") `PP.hang` ps, - (fmt S.ControlKeyword " with") `PP.hang` pbs - ] - else ((fmt S.ControlKeyword "match ") <> ps <> (fmt S.ControlKeyword " with")) `PP.hang` pbs - where ps = pretty0 n (ac 2 Normal im doc) scrutinee - pbs = printCase n im doc branches - - t -> l "error: " <> l (show t) - where - specialCases term go = case (term, binaryOpsPred) of - (DD.Doc, _) | doc == MaybeDoc -> - if isDocLiteral term - then prettyDoc n im term - else pretty0 n (a {docContext = NoDoc}) term - (TupleTerm' [x], _) -> let - pair = parenIfInfix name ic $ styleHashQualified'' (fmt S.Constructor) name - where name = elideFQN im $ PrettyPrintEnv.termName n (DD.pairCtorRef) in - paren (p >= 10) $ pair `PP.hang` - PP.spaced [pretty0 n (ac 10 Normal im doc) x, fmt S.Constructor "()" ] - (TupleTerm' xs, _) -> paren True $ commaList xs - BinaryAppsPred' apps lastArg -> paren (p >= 3) $ - binaryApps apps (pretty0 n (ac 3 Normal im doc) lastArg) - _ -> case (term, nonForcePred) of - AppsPred' f args -> - paren (p >= 10) $ pretty0 n (ac 10 Normal im doc) f `PP.hang` - PP.spacedMap (pretty0 n (ac 10 Normal im doc)) args - _ -> case (term, nonUnitArgPred) of - (LamsNamedMatch' [] branches, _) -> - paren (p >= 3) $ - PP.group (fmt S.ControlKeyword "cases") `PP.hang` printCase n im doc branches - LamsNamedPred' vs body -> - paren (p >= 3) $ - PP.group (varList vs <> fmt S.ControlKeyword " ->") `PP.hang` pretty0 n (ac 2 Block im doc) body - _ -> go term - - sepList = sepList' (pretty0 n (ac 0 Normal im doc)) - sepList' f sep xs = fold $ intersperse sep (map f xs) - varList = sepList' (PP.text . Var.name) PP.softbreak - commaList = sepList (fmt S.DelimiterChar (l ",") <> PP.softbreak) - - printLet :: Var v - => BlockContext - -> [(v, Term3 v PrintAnnotation)] - -> Term3 v PrintAnnotation - -> Imports - -> ([Pretty SyntaxText] -> Pretty SyntaxText) - -> Pretty SyntaxText - printLet sc bs e im' uses = - paren ((sc /= Block) && p >= 12) - $ letIntro - $ (uses [(PP.lines (map printBinding bs ++ - [PP.group $ pretty0 n (ac 0 Normal im' doc) e]))]) - where - printBinding (v, binding) = if isBlank $ Var.nameStr v - then pretty0 n (ac (-1) Normal im' doc) binding - else prettyBinding0 n (ac (-1) Normal im' doc) (HQ.unsafeFromVar v) binding - letIntro = case sc of - Block -> id - Normal -> \x -> (fmt S.ControlKeyword "let") `PP.hang` x - - -- This predicate controls which binary functions we render as infix - -- operators. At the moment the policy is just to render symbolic - -- operators as infix - not 'wordy' function names. So we produce - -- "x + y" and "foo x y" but not "x `foo` y". - binaryOpsPred :: Var v => Term3 v PrintAnnotation -> Bool - binaryOpsPred = \case - Ref' r | isSymbolic (PrettyPrintEnv.termName n (Referent.Ref r)) -> True - Var' v | isSymbolic (HQ.unsafeFromVar v) -> True - _ -> False - - nonForcePred :: Term3 v PrintAnnotation -> Bool - nonForcePred = \case - Constructor' DD.UnitRef 0 -> False - Constructor' DD.DocRef _ -> False - _ -> True - - nonUnitArgPred :: Var v => v -> Bool - nonUnitArgPred v = (Var.name v) /= "()" - - -- Render a binary infix operator sequence, like [(a2, f2), (a1, f1)], - -- meaning (a1 `f1` a2) `f2` (a3 rendered by the caller), producing - -- "a1 `f1` a2 `f2`". Except the operators are all symbolic, so we won't - -- produce any backticks. We build the result out from the right, - -- starting at `f2`. - binaryApps - :: Var v => [(Term3 v PrintAnnotation, Term3 v PrintAnnotation)] - -> Pretty SyntaxText - -> Pretty SyntaxText - binaryApps xs last = unbroken `PP.orElse` broken - -- todo: use `PP.column2` in the case where we need to break - where - unbroken = PP.spaced (ps ++ [last]) - broken = PP.column2 (psCols $ [""] ++ ps ++ [last]) - psCols ps = case take 2 ps of - [x,y] -> (x,y) : psCols (drop 2 ps) - [] -> [] - _ -> error "??" - ps = join $ [r a f | (a, f) <- reverse xs ] - r a f = [pretty0 n (ac 3 Normal im doc) a, - pretty0 n (AmbientContext 10 Normal Infix im doc) f] - - (im', uses) = calcImports im term - -prettyPattern - :: forall v loc . Var v - => PrettyPrintEnv - -> AmbientContext - -> Int - -> [v] - -> Pattern loc - -> (Pretty SyntaxText, [v]) --- vs is the list of pattern variables used by the pattern, plus possibly a --- tail of variables it doesn't use. This tail is the second component of --- the return value. -prettyPattern n c@(AmbientContext { imports = im }) p vs patt = case patt of - Pattern.Char _ c -> (fmt S.CharLiteral $ l $ case showEscapeChar c of - Just c -> "?\\" ++ [c] - Nothing -> '?': [c], vs) - Pattern.Unbound _ -> (fmt S.DelimiterChar $ l "_", vs) - Pattern.Var _ -> let (v : tail_vs) = vs in (fmt S.Var $ l $ Var.nameStr v, tail_vs) - Pattern.Boolean _ b -> (fmt S.BooleanLiteral $ if b then l "true" else l "false", vs) - Pattern.Int _ i -> (fmt S.NumericLiteral $ (if i >= 0 then l "+" else mempty) <> (l $ show i), vs) - Pattern.Nat _ u -> (fmt S.NumericLiteral $ l $ show u, vs) - Pattern.Float _ f -> (fmt S.NumericLiteral $ l $ show f, vs) - Pattern.Text _ t -> (fmt S.TextLiteral $ l $ show t, vs) - TuplePattern pats | length pats /= 1 -> - let (pats_printed, tail_vs) = patterns (-1) vs pats - in (PP.parenthesizeCommas pats_printed, tail_vs) - Pattern.Constructor _ ref i [] -> - (styleHashQualified'' (fmt S.Constructor) $ elideFQN im (PrettyPrintEnv.patternName n ref i), vs) - Pattern.Constructor _ ref i pats -> - let (pats_printed, tail_vs) = patternsSep 10 PP.softbreak vs pats - in ( paren (p >= 10) - $ styleHashQualified'' (fmt S.Constructor) (elideFQN im (PrettyPrintEnv.patternName n ref i)) - `PP.hang` pats_printed - , tail_vs) - Pattern.As _ pat -> - let (v : tail_vs) = vs - (printed, eventual_tail) = prettyPattern n c 11 tail_vs pat - in (paren (p >= 11) $ ((fmt S.Var $ l $ Var.nameStr v) <> (fmt S.DelimiterChar $ l "@") <> printed), eventual_tail) - Pattern.EffectPure _ pat -> - let (printed, eventual_tail) = prettyPattern n c (-1) vs pat - in (PP.sep " " [fmt S.DelimiterChar "{", printed, fmt S.DelimiterChar "}"], eventual_tail) - Pattern.EffectBind _ ref i pats k_pat -> - let (pats_printed , tail_vs ) = patternsSep 10 PP.softbreak vs pats - (k_pat_printed, eventual_tail) = prettyPattern n c 0 tail_vs k_pat - in ((fmt S.DelimiterChar "{" ) <> - (PP.sep " " . PP.nonEmpty $ [ - styleHashQualified'' (fmt S.Request) $ elideFQN im (PrettyPrintEnv.patternName n ref i), - pats_printed, - fmt S.ControlKeyword "->", - k_pat_printed]) <> - (fmt S.DelimiterChar "}") - , eventual_tail) - Pattern.SequenceLiteral _ pats -> - let (pats_printed, tail_vs) = patternsSep (-1) (fmt S.DelimiterChar ", ") vs pats - in ((fmt S.DelimiterChar "[") <> pats_printed <> (fmt S.DelimiterChar "]"), tail_vs) - Pattern.SequenceOp _ l op r -> - let (pl, lvs) = prettyPattern n c p vs l - (pr, rvs) = prettyPattern n c (p + 1) lvs r - f i s = (paren (p >= i) (pl <> " " <> (fmt (S.Op op) s) <> " " <> pr), rvs) - in case op of - Pattern.Cons -> f 9 "+:" - Pattern.Snoc -> f 9 ":+" - Pattern.Concat -> f 9 "++" - where - l :: IsString s => String -> s - l = fromString - patterns p vs (pat : pats) = - let (printed , tail_vs ) = - prettyPattern n c p vs pat - (rest_printed, eventual_tail) = patterns p tail_vs pats - in (printed : rest_printed, eventual_tail) - patterns _ vs [] = ([], vs) - patternsSep p sep vs pats = case patterns p vs pats of - (printed, tail_vs) -> (PP.sep sep printed, tail_vs) - -printCase - :: Var v - => PrettyPrintEnv - -> Imports - -> DocLiteralContext - -> [MatchCase () (Term3 v PrintAnnotation)] - -> Pretty SyntaxText -printCase env im doc ms = PP.lines $ map each gridArrowsAligned where - each (lhs, arrow, body) = PP.group $ (lhs <> arrow) `PP.hang` body - grid = go <$> ms - gridArrowsAligned = tidy <$> zip (PP.align' (f <$> grid)) grid where - f (a, b, _) = (a, Just b) - tidy ((a', b'), (_, _, c)) = (a', b', c) - go (MatchCase pat guard (AbsN' vs body)) = - (lhs, arrow, (uses [pretty0 env (ac 0 Block im' doc) body])) - where - lhs = PP.group (fst (prettyPattern env (ac 0 Block im doc) (-1) vs pat)) - <> printGuard guard - arrow = fmt S.ControlKeyword "->" - printGuard (Just g0) = let - -- strip off any Abs-chain around the guard, guard variables are rendered - -- like any other variable, ex: case Foo x y | x < y -> ... - g = case g0 of - AbsN' _ g' -> g' - _ -> g0 - in PP.group $ PP.spaced [(fmt S.DelimiterChar " |"), pretty0 env (ac 2 Normal im doc) g] - printGuard Nothing = mempty - (im', uses) = calcImports im body - go _ = (l "error", mempty, mempty) - -{- Render a binding, producing output of the form - -foo : t -> u -foo a = ... - -The first line is only output if the term has a type annotation as the -outermost constructor. - -Binary functions with symbolic names are output infix, as follows: - -(+) : t -> t -> t -a + b = ... - --} -prettyBinding - :: Var v - => PrettyPrintEnv - -> HQ.HashQualified - -> Term2 v at ap v a - -> Pretty SyntaxText -prettyBinding n = prettyBinding0 n $ ac (-1) Block Map.empty MaybeDoc - -prettyBinding' :: - Var v => Int -> PrettyPrintEnv -> HQ.HashQualified -> Term v a -> ColorText -prettyBinding' width n v t = PP.render width $ PP.syntaxToColor $ prettyBinding n v t - -prettyBinding0 - :: Var v - => PrettyPrintEnv - -> AmbientContext - -> HQ.HashQualified - -> Term2 v at ap v a - -> Pretty SyntaxText -prettyBinding0 env a@AmbientContext { imports = im, docContext = doc } v term = go - (symbolic && isBinary term) - term - where - go infix' = \case - Ann' tm tp -> PP.lines - [ PP.group - (renderName v <> PP.hang (fmt S.TypeAscriptionColon " :") - (TypePrinter.pretty0 env im (-1) tp) - ) - , PP.group (prettyBinding0 env a v tm) - ] - (printAnnotate env -> LamsNamedMatch' vs branches) -> - PP.group - $ PP.group (defnLhs v vs <> fmt S.BindingEquals " =" <> " " <> fmt S.ControlKeyword "cases") - `PP.hang` printCase env im doc branches - LamsNamedOrDelay' vs body -> - let (im', uses) = calcImports im body' - -- In the case where we're being called from inside `pretty0`, this - -- call to printAnnotate is unfortunately repeating work we've already - -- done. - body' = printAnnotate env body - in PP.group - $ PP.group (defnLhs v vs <> fmt S.BindingEquals " =") - `PP.hang` uses [pretty0 env (ac (-1) Block im' doc) body'] - t -> l "error: " <> l (show t) - where - defnLhs v vs - | infix' = case vs of - x : y : _ -> PP.sep - " " - [ fmt S.Var $ PP.text (Var.name x) - , styleHashQualified'' (fmt $ S.HashQualifier v) $ elideFQN im v - , fmt S.Var $ PP.text (Var.name y) - ] - _ -> l "error" - | null vs = renderName v - | otherwise = renderName v `PP.hang` args vs - args = PP.spacedMap $ fmt S.Var . PP.text . Var.name - renderName n = - let n' = elideFQN im n - in parenIfInfix n' NonInfix $ styleHashQualified'' (fmt $ S.HashQualifier n') n' - symbolic = isSymbolic v - isBinary = \case - Ann' tm _ -> isBinary tm - LamsNamedMatch' vs _ -> length vs == 1 - LamsNamedOrDelay' vs _ -> length vs == 2 - _ -> False -- unhittable - -isDocLiteral :: Term3 v PrintAnnotation -> Bool -isDocLiteral term = case term of - DD.DocJoin segs -> all isDocLiteral segs - DD.DocBlob _ -> True - DD.DocLink (DD.LinkTerm (TermLink' _)) -> True - DD.DocLink (DD.LinkType (TypeLink' _)) -> True - DD.DocSource (DD.LinkTerm (TermLink' _)) -> True - DD.DocSource (DD.LinkType (TypeLink' _)) -> True - DD.DocSignature (TermLink' _) -> True - DD.DocEvaluate (TermLink' _) -> True - Ref' _ -> True -- @[include] - _ -> False - --- Similar to DisplayValues.displayDoc, but does not follow and expand references. -prettyDoc :: Var v => PrettyPrintEnv -> Imports -> Term3 v a -> Pretty SyntaxText -prettyDoc n im term = mconcat [ fmt S.DocDelimiter $ l "[: " - , go term - , spaceUnlessBroken - , fmt S.DocDelimiter $ l ":]"] - where - go (DD.DocJoin segs) = foldMap go segs - go (DD.DocBlob txt) = PP.paragraphyText (escaped txt) - go (DD.DocLink (DD.LinkTerm (TermLink' r))) = - (fmt S.DocDelimiter $ l "@") <> ((fmt $ S.Referent r) $ fmtTerm r) - go (DD.DocLink (DD.LinkType (TypeLink' r))) = - (fmt S.DocDelimiter $ l "@") <> ((fmt $ S.Reference r) $ fmtType r) - go (DD.DocSource (DD.LinkTerm (TermLink' r))) = - atKeyword "source" <> fmtTerm r - go (DD.DocSource (DD.LinkType (TypeLink' r))) = - atKeyword "source" <> fmtType r - go (DD.DocSignature (TermLink' r)) = - atKeyword "signature" <> fmtTerm r - go (DD.DocEvaluate (TermLink' r)) = - atKeyword "evaluate" <> fmtTerm r - go (Ref' r) = atKeyword "include" <> fmtTerm (Referent.Ref r) - go _ = l $ "(invalid doc literal: " ++ show term ++ ")" - fmtName s = styleHashQualified'' (fmt $ S.HashQualifier s) $ elideFQN im s - fmtTerm r = fmtName $ PrettyPrintEnv.termName n r - fmtType r = fmtName $ PrettyPrintEnv.typeName n r - atKeyword w = - (fmt S.DocDelimiter $ l "@[") <> - (fmt S.DocKeyword $ l w) <> - (fmt S.DocDelimiter $ l "] ") - escaped = Text.replace "@" "\\@" . Text.replace ":]" "\\:]" - spaceUnlessBroken = PP.orElse " " "" - -paren :: Bool -> Pretty SyntaxText -> Pretty SyntaxText -paren True s = PP.group $ fmt S.Parenthesis "(" <> s <> fmt S.Parenthesis ")" -paren False s = PP.group s - -parenIfInfix - :: HQ.HashQualified -> InfixContext -> (Pretty SyntaxText -> Pretty SyntaxText) -parenIfInfix name ic = - if isSymbolic name && ic == NonInfix then paren True else id - -l :: IsString s => String -> Pretty s -l = fromString - -isSymbolic :: HQ.HashQualified -> Bool -isSymbolic (HQ.NameOnly name) = isSymbolic' name -isSymbolic (HQ.HashQualified name _) = isSymbolic' name -isSymbolic (HQ.HashOnly _) = False - -isSymbolic' :: Name -> Bool -isSymbolic' name = case symbolyId . Name.toString $ name of - Right _ -> True - _ -> False - -isBlank :: String -> Bool -isBlank ('_' : rest) | (isJust ((readMaybe rest) :: Maybe Int)) = True -isBlank _ = False - -ac :: Int -> BlockContext -> Imports -> DocLiteralContext -> AmbientContext -ac prec bc im doc = AmbientContext prec bc NonInfix im doc - -fmt :: S.Element -> Pretty S.SyntaxText -> Pretty S.SyntaxText -fmt = PP.withSyntax - -{- - # FQN elision - - The term pretty-printer inserts `use` statements in some circumstances, to - avoid the need for using fully-qualified names (FQNs) everywhere. The - following is an explanation and specification, as developed in issue #285. - - As an example, instead of - - foo p q r = - if p then Util.bar q else Util.bar r - - we actually output the following. - - foo p q r = - use Util bar - if p then bar q else bar r - - Here, the `use` statement `use Util bar` has been inserted at the start of - the block statement containing the `if`. Within that scope, `Util.bar` can - be referred to just with `bar`. We say `Util` is the prefix, and `bar` is - the suffix. - - When choosing where to place `use` statements, the pretty-printer tries to - - float them down, deeper into the syntax tree, to keep them visually close - to the use sites ('usages') of the names involved, but also tries to - - minimize the number of repetitions of `use` statements for the same names - by floating them up, towards the top of the syntax tree, so that one - `use` statement takes effect over more name usages. - - It avoids producing output like the following. - - foo p q r = - use My bar - if p then bar q else Your.bar r - - Here `My.bar` is imported with a `use` statement, but `Your.bar` is not. - We avoid this because it would be easy to misread `bar` as meaning - `Your.bar`. Instead both names are output fully qualified. - - This means that a `use` statement is only emitted for a name - when the suffix is unique, across all the names referenced in the scope of - the `use` statement. - - We don't emit a `use` statement for a name if it only occurs once within - the scope (unless it's an infix operator, since they look nicer without - a namespace qualifier.) - - The emitted code does not depend on Type-Driven Name Resolution (TDNR). - For example, we emit - foo = - use Nat + - 1 + 2 - even though TDNR means that `foo = 1 + 2` would have had the same - meaning. That avoids the reader having to run typechecker logic in their - head in order to know what functions are being called. - - Multi-level name qualification is allowed - like `Foo.Bar.baz`. The - pretty-printer tries to strip off as many sections of the prefix as - possible, without causing a clash with other names. If more sections - can be stripped off, further down the tree, then it does this too. - - ## Specification - - We output a `use` statement for prefix P and suffix S at a given scope if - - the scope is a block statement (so the `use` is syntactically valid) - - the number of usages of the thing referred to by P.S within the scope - - is > 1, or - - is 1, and S is an infix operator - - [uniqueness] there is no other Q with Q.S used in that scope - - there is no longer prefix PP (and suffix s, with PP.s == P.S) which - satisfies uniqueness - - [narrowness] there is no block statement further down inside this one - which contains all of the usages. - - Use statements in a block statement are sorted alphabetically by prefix. - Suffixes covered by a single use statement are sorted alphabetically. - Note that each `use` line cannot be line-broken. Ideally they would - fit the available space by splitting into multiple separate `use` lines. - - ## Algorithm - - Bubbling up from the leaves of the syntax tree, we calculate for each - node, a `Map Suffix (Map Prefix Int)` (the 'usages map'), where the `Int` - is the number of usages of Prefix.Suffix at/under that node. (Note that - a usage of `A.B.c` corresponds to two entries in the outer map.) See - `printAnnotate`. - - Once we have this decoration on all the terms, we start pretty-printing. - As we recurse back down through the tree, we keep a `Map Name Suffix` (the - 'imports map'), to record the effect of all the `use` statements we've added - in the nodes above. When outputting names, we check this map to work out - how to render them, using any suffix we find, or else falling back to the - FQN. At each block statement, each suffix in that term's usages map is a - candidate to be imported with a use statement, subject to the various - rules in the specification. - - # Debugging - - Start by enabling the tracing in elideFQN in PrettyPrintEnv.hs. - - There's also tracing in allInSubBlock to help when the narrowness check - is playing up. - - # Semantics of imports - - Here is some background on how imports work. - - `use XYZ blah` brings `XYZ.blah` into scope, bound to the name `blah`. More - generally, `use` is followed by a FQN prefix, then the local suffix. - Concatenate the FQN prefix with the local suffix, with a dot between them, - and you get the FQN, which is bound to the name equal to the local suffix. - - `use XYZ blah qux` is equivalent to the two statements (and this - generalizes for any N symbols): - use XYZ blah - use XYZ qux - - This syntax works the same even if XYZ or blah have dots in them, so: - `use Util.External My.Foo` brings `Util.External.My.Foo` into scope, bound - to the name `My.Foo`. - - That's it. No wildcard imports, imports that do renaming, etc. We can - consider adding some features like this later. --} - -data PrintAnnotation = PrintAnnotation - { - -- For each suffix that appears in/under this term, the set of prefixes - -- used with that suffix, and how many times each occurs. - usages :: Map Suffix (Map Prefix Int) - } deriving (Show) - -instance Semigroup PrintAnnotation where - (PrintAnnotation { usages = a } ) <> (PrintAnnotation { usages = b } ) = - PrintAnnotation { usages = Map.unionWith f a b } where - f a' b' = Map.unionWith (+) a' b' - -instance Monoid PrintAnnotation where - mempty = PrintAnnotation { usages = Map.empty } - -suffixCounterTerm :: Var v => PrettyPrintEnv -> Term2 v at ap v a -> PrintAnnotation -suffixCounterTerm n = \case - Var' v -> countHQ $ HQ.unsafeFromVar v - Ref' r -> countHQ $ PrettyPrintEnv.termName n (Referent.Ref r) - Ref' r -> countHQ $ PrettyPrintEnv.termName n (Referent.Ref r) - Constructor' r _ | noImportRefs r -> mempty - Constructor' r i -> countHQ $ PrettyPrintEnv.termName n (Referent.Con r i CT.Data) - Request' r i -> countHQ $ PrettyPrintEnv.termName n (Referent.Con r i CT.Effect) - Ann' _ t -> countTypeUsages n t - Match' _ bs -> let pat (MatchCase p _ _) = p - in foldMap ((countPatternUsages n) . pat) bs - _ -> mempty - -suffixCounterType :: Var v => PrettyPrintEnv -> Type v a -> PrintAnnotation -suffixCounterType n = \case - Type.Var' v -> countHQ $ HQ.unsafeFromVar v - Type.Ref' r | noImportRefs r || r == Type.vectorRef -> mempty - Type.Ref' r -> countHQ $ PrettyPrintEnv.typeName n r - _ -> mempty - -printAnnotate :: (Var v, Ord v) => PrettyPrintEnv -> Term2 v at ap v a -> Term3 v PrintAnnotation -printAnnotate n tm = fmap snd (go (reannotateUp (suffixCounterTerm n) tm)) where - go :: Ord v => Term2 v at ap v b -> Term2 v () () v b - go = extraMap' id (const ()) (const ()) - -countTypeUsages :: (Var v, Ord v) => PrettyPrintEnv -> Type v a -> PrintAnnotation -countTypeUsages n t = snd $ annotation $ reannotateUp (suffixCounterType n) t - -countPatternUsages :: PrettyPrintEnv -> Pattern loc -> PrintAnnotation -countPatternUsages n p = Pattern.foldMap' f p where - f = \case - Pattern.Unbound _ -> mempty - Pattern.Var _ -> mempty - Pattern.Boolean _ _ -> mempty - Pattern.Int _ _ -> mempty - Pattern.Nat _ _ -> mempty - Pattern.Float _ _ -> mempty - Pattern.Text _ _ -> mempty - Pattern.Char _ _ -> mempty - Pattern.As _ _ -> mempty - Pattern.SequenceLiteral _ _ -> mempty - Pattern.SequenceOp _ _ _ _ -> mempty - Pattern.EffectPure _ _ -> mempty - Pattern.EffectBind _ r i _ _ -> countHQ $ PrettyPrintEnv.patternName n r i - Pattern.Constructor _ r i _ -> - if noImportRefs r then mempty - else countHQ $ PrettyPrintEnv.patternName n r i - -countHQ :: HQ.HashQualified -> PrintAnnotation -countHQ hq = fold $ fmap countName (HQ.toName $ hq) - -countName :: Name -> PrintAnnotation -countName n = let f = \(p, s) -> (s, Map.singleton p 1) - in PrintAnnotation { usages = Map.fromList $ map f $ splitName n} - --- Generates all valid splits of a name into a prefix and suffix. --- See examples in Unison.Test.TermPrinter -splitName :: Name -> [(Prefix, Suffix)] -splitName n = - let ns = NameSegment.toText <$> Name.segments n - in filter (not . Text.null . snd) $ inits ns `zip` map dotConcat (tails ns) - -joinName :: Prefix -> Suffix -> Name -joinName p s = Name.unsafeFromText $ dotConcat $ p ++ [s] - -dotConcat :: [Text] -> Text -dotConcat = Text.concat . (intersperse ".") - --- This predicate is used to keep certain refs out of the FQN elision annotations, --- so that we don't get `use` statements for them. --- --- Don't do `use () ()` or `use Pair Pair`. Tuple syntax generates ().() and Pair.Pair --- under the covers anyway. This does mean that if someone is using Pair.Pair directly, --- then they'll miss out on FQN elision for that. --- --- Don't do `use builtin.Doc Blob`, `use builtin.Link Term`, or similar. That avoids --- unnecessary use statements above Doc literals and termLink/typeLink. -noImportRefs :: Reference -> Bool -noImportRefs r = - elem r - [ DD.pairRef - , DD.unitRef - , DD.docRef - , DD.linkRef - ] - -infixl 0 |> -(|>) :: a -> (a -> b) -> b -x |> f = f x - --- This function gets used each time we start printing a new block statement. --- It decides what extra imports to introduce (returning the full new set), and --- determines some pretty-printed lines that looks like --- use A x --- use B y --- providing a `[Pretty SyntaxText] -> Pretty SyntaxText` that prepends those --- lines to the list of lines provided, and then concatenates them. -calcImports - :: (Var v, Ord v) - => Imports - -> Term3 v PrintAnnotation - -> (Imports, [Pretty SyntaxText] -> Pretty SyntaxText) -calcImports im tm = (im', render $ getUses result) - where - -- The guts of this function is a pipeline of transformations and filters, starting from the - -- PrintAnnotation we built up in printAnnotate. - -- In `result`, the Name matches Prefix ++ Suffix; and the Int is the number of usages in this scope. - -- `result` lists all the names we're going to import, and what Prefix we'll use for each. - result :: Map Name (Prefix, Suffix, Int) - result = usages' - |> uniqueness - |> enoughUsages - |> groupAndCountLength - |> longestPrefix - |> avoidRepeatsAndClashes - |> narrowestPossible - usages' :: Map Suffix (Map Prefix Int) - usages' = usages $ annotation tm - -- Keep only names P.S where there is no other Q with Q.S also used in this scope. - uniqueness :: Map Suffix (Map Prefix Int) -> Map Suffix (Prefix, Int) - uniqueness m = m |> Map.filter (\ps -> (Map.size ps) == 1) - |> Map.map (\ps -> head $ Map.toList ps) - -- Keep only names where the number of usages in this scope - -- - is > 1, or - -- - is 1, and S is an infix operator. - -- Also drop names with an empty prefix. - lookupOrDie s m = fromMaybe msg (Map.lookup s m) where - msg = error $ "TermPrinter.enoughUsages " <> show (s, m) - - enoughUsages :: Map Suffix (Prefix, Int) -> Map Suffix (Prefix, Int) - enoughUsages m = (Map.keys m) |> filter (\s -> let (p, i) = lookupOrDie s m - in (i > 1 || isRight (symbolyId (unpack s))) && - (length p > 0)) - |> map (\s -> (s, lookupOrDie s m)) - |> Map.fromList - -- Group by `Prefix ++ Suffix`, and then by `length Prefix` - groupAndCountLength :: Map Suffix (Prefix, Int) -> Map (Name, Int) (Prefix, Suffix, Int) - groupAndCountLength m = Map.toList m |> map (\(s, (p, i)) -> let n = joinName p s - l = length p - in ((n, l), (p, s, i))) - |> Map.fromList - -- For each k1, choose the v with the largest k2. - longestPrefix :: (Show k1, Show k2, Ord k1, Ord k2) => Map (k1, k2) v -> Map k1 v - longestPrefix m = let k1s = Set.map fst $ Map.keysSet m - k2s = k1s |> Map.fromSet (\k1' -> Map.keysSet m - |> Set.filter (\(k1, _) -> k1 == k1') - |> Set.map snd) - maxk2s = Map.map maximum k2s - err k1 k2 = error $ - "TermPrinter.longestPrefix not found " - <> show (k1,k2) - <> " in " <> show maxk2s - in Map.mapWithKey (\k1 k2 -> fromMaybe (err k1 k2) $ Map.lookup (k1, k2) m) maxk2s - -- Don't do another `use` for a name for which we've already done one, unless the - -- new suffix is shorter. - avoidRepeatsAndClashes :: Map Name (Prefix, Suffix, Int) -> Map Name (Prefix, Suffix, Int) - avoidRepeatsAndClashes = Map.filterWithKey $ - \n (_, s', _) -> case Map.lookup n im of - Just s -> (Text.length s') < (Text.length s) - Nothing -> True - -- Is there a strictly smaller block term underneath this one, containing all the usages - -- of some of the names? Skip emitting `use` statements for those, so we can do it - -- further down, closer to the use sites. - narrowestPossible :: Map Name (Prefix, Suffix, Int) -> Map Name (Prefix, Suffix, Int) - narrowestPossible m = m |> Map.filter (\(p, s, i) -> not $ allInSubBlock tm p s i) - -- `union` is left-biased, so this can replace existing imports. - im' = getImportMapAdditions result `Map.union` im - getImportMapAdditions :: Map Name (Prefix, Suffix, Int) -> Map Name Suffix - getImportMapAdditions = Map.map (\(_, s, _) -> s) - getUses :: Map Name (Prefix, Suffix, Int) -> Map Prefix (Set Suffix) - getUses m = Map.elems m |> map (\(p, s, _) -> (p, Set.singleton s)) - |> Map.fromListWith Set.union - render :: Map Prefix (Set Suffix) -> [Pretty SyntaxText] -> Pretty SyntaxText - render m rest = - let uses = Map.mapWithKey (\p ss -> (fmt S.UseKeyword $ l"use ") <> - (fmt S.UsePrefix (intercalateMap (l".") (l . unpack) p)) <> l" " <> - (fmt S.UseSuffix (intercalateMap (l" ") (l . unpack) (Set.toList ss)))) m - |> Map.toList - |> map snd - in PP.lines (uses ++ rest) - --- Given a block term and a name (Prefix, Suffix) of interest, is there a strictly smaller --- blockterm within it, containing all usages of that name? A blockterm is a place --- where the syntax lets us put a use statement, like the branches of an if/then/else. --- We traverse the block terms by traversing the whole subtree with ABT.find, and paying --- attention to those subterms that look like a blockterm. This is complicated --- by the fact that you can't always tell if a term is a blockterm just --- by looking at it: in some cases you can only tell when you can see it in the context of --- the wider term that contains it. So actually we traverse the tree, at each term --- looking for child terms that are block terms, and see if any of those contain --- all the usages of the name. --- Cut out the occurrences of "const id $" to get tracing. -allInSubBlock :: (Var v, Ord v) => Term3 v PrintAnnotation -> Prefix -> Suffix -> Int -> Bool -allInSubBlock tm p s i = let found = concat $ ABT.find finder tm - result = any (/= tm) $ found - tr = const id $ trace ("\nallInSubBlock(" ++ show p ++ ", " ++ - show s ++ ", " ++ show i ++ "): returns " ++ - show result ++ "\nInput:\n" ++ show tm ++ - "\nFound: \n" ++ show found ++ "\n\n") - in tr result where - getUsages t = annotation t - |> usages - |> Map.lookup s - |> fmap (Map.lookup p) - |> join - |> fromMaybe 0 - finder t = let result = let i' = getUsages t - in if i' < i - then ABT.Prune - else - let found = filter hit $ immediateChildBlockTerms t - in if (i' == i) && (not $ null found) - then ABT.Found found - else ABT.Continue - children = concat (map (\t -> "child: " ++ show t ++ "\n") $ immediateChildBlockTerms t) - tr = const id $ trace ("\nfinder: returns " ++ show result ++ - "\n children:" ++ children ++ - "\n input: \n" ++ show t ++ "\n\n") - in tr $ result - hit t = (getUsages t) == i - --- Return any blockterms at or immediately under this term. Has to match the places in the --- syntax that get a call to `calcImports` in `pretty0`. AST nodes that do a calcImports in --- pretty0, in order to try and emit a `use` statement, need to be emitted also by this --- function, otherwise the `use` statement may come out at an enclosing scope instead. -immediateChildBlockTerms :: (Var vt, Var v) => Term2 vt at ap v a -> [Term2 vt at ap v a] -immediateChildBlockTerms = \case - Handle' handler body -> [handler, body] - If' _ t f -> [t, f] - LetBlock bs _ -> concat $ map doLet bs - Match' _ branches -> concat $ map doCase branches - _ -> [] - where - doCase (MatchCase _ _ (AbsN' _ body)) = [body] - doCase _ = error "bad match" [] - doLet (v, Ann' tm _) = doLet (v, tm) - doLet (v, LamsNamedOpt' _ body) = if isBlank $ Var.nameStr v - then [] - else [body] - doLet t = error (show t) [] - -pattern LetBlock bindings body <- (unLetBlock -> Just (bindings, body)) - --- Collects nested let/let rec blocks into one minimally nested block. --- Handy because `let` and `let rec` blocks get rendered the same way. --- We preserve nesting when the inner block shadows definitions in the --- outer block. -unLetBlock - :: Ord v - => Term2 vt at ap v a - -> Maybe ([(v, Term2 vt at ap v a)], Term2 vt at ap v a) -unLetBlock t = rec t where - dontIntersect v1s v2s = - all (`Set.notMember` v2set) (fst <$> v1s) where - v2set = Set.fromList (fst <$> v2s) - rec t = case unLetRecNamed t of - Nothing -> nonrec t - Just (_isTop, bindings, body) -> case rec body of - Just (innerBindings, innerBody) | dontIntersect bindings innerBindings -> - Just (bindings ++ innerBindings, innerBody) - _ -> Just (bindings, body) - nonrec t = case unLet t of - Nothing -> Nothing - Just (bindings0, body) -> - let bindings = [ (v,b) | (_,v,b) <- bindings0 ] in - case rec body of - Just (innerBindings, innerBody) | dontIntersect bindings innerBindings -> - Just (bindings ++ innerBindings, innerBody) - _ -> Just (bindings, body) diff --git a/parser-typechecker/src/Unison/TypeParser.hs b/parser-typechecker/src/Unison/TypeParser.hs deleted file mode 100644 index 86c5f350e9..0000000000 --- a/parser-typechecker/src/Unison/TypeParser.hs +++ /dev/null @@ -1,115 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.TypeParser where - -import Unison.Prelude - -import qualified Text.Megaparsec as P -import qualified Unison.Lexer as L -import Unison.Parser -import Unison.Type (Type) -import qualified Unison.Type as Type -import Unison.Var (Var) -import qualified Unison.Builtin.Decls as DD -import qualified Unison.HashQualified as HQ -import qualified Unison.Name as Name -import qualified Unison.Names3 as Names -import qualified Data.Set as Set -import Control.Monad.Reader (asks) - --- A parsed type is annotated with its starting and ending position in the --- source text. -type TypeP v = P v (Type v Ann) - --- Value types cannot have effects, unless those effects appear to --- the right of a function arrow: --- valueType ::= Int | Text | App valueType valueType | Arrow valueType computationType -valueType :: Var v => TypeP v -valueType = forall type1 <|> type1 - --- Computation --- computationType ::= [{effect*}] valueType -computationType :: Var v => TypeP v -computationType = effect <|> valueType - -valueTypeLeaf :: Var v => TypeP v -valueTypeLeaf = - tupleOrParenthesizedType valueType <|> typeAtom <|> sequenceTyp - --- Examples: Optional, Optional#abc, woot, #abc -typeAtom :: Var v => TypeP v -typeAtom = hqPrefixId >>= \tok -> case L.payload tok of - HQ.NameOnly n -> pure $ Type.var (ann tok) (Name.toVar n) - hq -> do - names <- asks names - let matches = Names.lookupHQType hq names - if Set.size matches /= 1 - then P.customFailure (UnknownType tok matches) - else pure $ Type.ref (ann tok) (Set.findMin matches) - -type1 :: Var v => TypeP v -type1 = arrow type2a - -type2a :: Var v => TypeP v -type2a = delayed <|> type2 - -delayed :: Var v => TypeP v -delayed = do - q <- reserved "'" - t <- effect <|> type2a - pure $ Type.arrow (Ann (L.start q) (end $ ann t)) - (DD.unitType (ann q)) - t - -type2 :: Var v => TypeP v -type2 = do - hd <- valueTypeLeaf - tl <- many (effectList <|> valueTypeLeaf) - pure $ foldl' (\a b -> Type.app (ann a <> ann b) a b) hd tl - --- ex : {State Text, IO} (Sequence Int) -effect :: Var v => TypeP v -effect = do - es <- effectList - t <- valueTypeLeaf - pure (Type.effect1 (ann es <> ann t) es t) - -effectList :: Var v => TypeP v -effectList = do - open <- openBlockWith "{" - es <- sepBy (reserved ",") valueType - close <- closeBlock - pure $ Type.effects (ann open <> ann close) es - -sequenceTyp :: Var v => TypeP v -sequenceTyp = do - open <- reserved "[" - t <- valueType - close <- reserved "]" - let a = ann open <> ann close - pure $ Type.app a (Type.vector a) t - -tupleOrParenthesizedType :: Var v => TypeP v -> TypeP v -tupleOrParenthesizedType rec = tupleOrParenthesized rec DD.unitType pair - where - pair t1 t2 = - let a = ann t1 <> ann t2 - in Type.app a (Type.app (ann t1) (DD.pairType a) t1) t2 - --- valueType ::= ... | Arrow valueType computationType -arrow :: Var v => TypeP v -> TypeP v -arrow rec = - let eff = mkArr <$> optional effectList - mkArr Nothing a b = Type.arrow (ann a <> ann b) a b - mkArr (Just es) a b = Type.arrow (ann a <> ann b) a (Type.effect1 (ann es <> ann b) es b) - in chainr1 (effect <|> rec) (reserved "->" *> eff) - --- "forall a b . List a -> List b -> Maybe Text" -forall :: Var v => TypeP v -> TypeP v -forall rec = do - kw <- reserved "forall" <|> reserved "∀" - vars <- fmap (fmap L.payload) . some $ prefixDefinitionName - _ <- matchToken $ L.SymbolyId "." Nothing - t <- rec - pure $ Type.foralls (ann kw <> ann t) vars t - diff --git a/parser-typechecker/src/Unison/TypePrinter.hs b/parser-typechecker/src/Unison/TypePrinter.hs deleted file mode 100644 index 750fe6ccb2..0000000000 --- a/parser-typechecker/src/Unison/TypePrinter.hs +++ /dev/null @@ -1,185 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} - -module Unison.TypePrinter where - -import Unison.Prelude - -import qualified Data.Map as Map -import Unison.HashQualified (HashQualified) -import Unison.NamePrinter (styleHashQualified'') -import Unison.PrettyPrintEnv (PrettyPrintEnv, Imports, elideFQN) -import qualified Unison.PrettyPrintEnv as PrettyPrintEnv -import Unison.Reference (pattern Builtin) -import Unison.Type -import Unison.Util.Pretty (ColorText, Pretty) -import Unison.Util.ColorText (toPlain) -import qualified Unison.Util.SyntaxText as S -import Unison.Util.SyntaxText (SyntaxText) -import qualified Unison.Util.Pretty as PP -import Unison.Var (Var) -import qualified Unison.Var as Var -import qualified Unison.Builtin.Decls as DD - -pretty :: forall v a . (Var v) => PrettyPrintEnv -> Type v a -> Pretty ColorText -pretty ppe = PP.syntaxToColor . pretty0 ppe mempty (-1) - -pretty' :: Var v => Maybe Int -> PrettyPrintEnv -> Type v a -> String -pretty' (Just width) n t = toPlain $ PP.render width $ PP.syntaxToColor $ pretty0 n Map.empty (-1) t -pretty' Nothing n t = toPlain $ PP.render maxBound $ PP.syntaxToColor $ pretty0 n Map.empty (-1) t - -{- Explanation of precedence handling - - We illustrate precedence rules as follows. - - >=10 - 10f 10x - - This example shows that a type application f x is enclosed in parentheses - whenever the ambient precedence around it is >= 10, and that when printing - its two components, an ambient precedence of 10 is used in both places. - - The pretty-printer uses the following rules for printing types. - - >=10 - 10f 10x - { 0e } 10t - - >=0 - 0a -> 0b - --} - -pretty0 - :: forall v a . (Var v) - => PrettyPrintEnv - -> Imports - -> Int - -> Type v a - -> Pretty SyntaxText -pretty0 n im p tp = prettyRaw n im p (cleanup (removePureEffects tp)) - -prettyRaw - :: forall v a . (Var v) - => PrettyPrintEnv - -> Imports - -> Int - -> Type v a - -> Pretty SyntaxText --- p is the operator precedence of the enclosing context (a number from 0 to --- 11, or -1 to avoid outer parentheses unconditionally). Function --- application has precedence 10. -prettyRaw n im p tp = go n im p tp - where - go :: PrettyPrintEnv -> Imports -> Int -> Type v a -> Pretty SyntaxText - go n im p tp = case stripIntroOuters tp of - Var' v -> fmt S.Var $ PP.text (Var.name v) - DD.TupleType' xs | length xs /= 1 -> PP.parenthesizeCommas $ map (go n im 0) xs - -- Would be nice to use a different SyntaxHighlights color if the reference is an ability. - Ref' r -> styleHashQualified'' (fmt $ S.Reference r) $ elideFQN im (PrettyPrintEnv.typeName n r) - Cycle' _ _ -> fromString "error: TypeParser does not currently emit Cycle" - Abs' _ -> fromString "error: TypeParser does not currently emit Abs" - Ann' _ _ -> fromString "error: TypeParser does not currently emit Ann" - App' (Ref' (Builtin "Sequence")) x -> - PP.group $ (fmt S.DelimiterChar "[") <> go n im 0 x <> (fmt S.DelimiterChar "]") - Apps' f xs -> PP.parenthesizeIf (p >= 10) $ go n im 9 f `PP.hang` PP.spaced - (go n im 10 <$> xs) - Effect1' e t -> - PP.parenthesizeIf (p >= 10) $ go n im 9 e <> " " <> go n im 10 t - Effects' es -> effects (Just es) - ForallsNamed' vs' body -> - let vs = filter (\v -> Var.name v /= "()") vs' - in if p < 0 && all Var.universallyQuantifyIfFree vs - then go n im p body - else paren (p >= 0) $ - let vformatted = PP.sep " " (fmt S.Var . PP.text . Var.name <$> vs) - in (fmt S.TypeOperator "∀ " <> vformatted <> fmt S.TypeOperator ".") - `PP.hang` go n im (-1) body - t@(Arrow' _ _) -> case t of - EffectfulArrows' (Ref' DD.UnitRef) rest -> arrows True True rest - EffectfulArrows' fst rest -> - case fst of - Var' v | Var.name v == "()" - -> fmt S.DelayForceChar "'" <> arrows False True rest - _ -> PP.parenthesizeIf (p >= 0) $ - go n im 0 fst <> arrows False False rest - _ -> "error" - _ -> "error" - effects Nothing = mempty - effects (Just es) = PP.group $ (fmt S.AbilityBraces "{") <> PP.commas (go n im 0 <$> es) <> (fmt S.AbilityBraces "}") - arrow delay first mes = - (if first then mempty else PP.softbreak <> (fmt S.TypeOperator "->")) - <> (if delay then (if first then (fmt S.DelayForceChar "'") else (fmt S.DelayForceChar " '")) else mempty) - <> effects mes - <> if (isJust mes) || (not delay) && (not first) then " " else mempty - - arrows delay first [(mes, Ref' DD.UnitRef)] = arrow delay first mes <> (fmt S.Unit "()") - arrows delay first ((mes, Ref' DD.UnitRef) : rest) = - arrow delay first mes <> (parenNoGroup delay $ arrows True True rest) - arrows delay first ((mes, arg) : rest) = - arrow delay first mes - <> ( parenNoGroup (delay && (not $ null rest)) - $ go n im 0 arg - <> arrows False False rest - ) - arrows False False [] = mempty - arrows False True [] = mempty -- not reachable - arrows True _ [] = mempty -- not reachable - - paren True s = PP.group $ ( fmt S.Parenthesis "(" ) <> s <> ( fmt S.Parenthesis ")" ) - paren False s = PP.group s - - parenNoGroup True s = ( fmt S.Parenthesis "(" ) <> s <> ( fmt S.Parenthesis ")" ) - parenNoGroup False s = s - -fmt :: S.Element -> Pretty S.SyntaxText -> Pretty S.SyntaxText -fmt = PP.withSyntax - --- todo: provide sample output in comment -prettySignatures' - :: Var v => PrettyPrintEnv - -> [(HashQualified, Type v a)] - -> [Pretty ColorText] -prettySignatures' env ts = map PP.syntaxToColor $ PP.align - [ ( styleHashQualified'' (fmt $ S.HashQualifier name) name - , (fmt S.TypeAscriptionColon ": " <> pretty0 env Map.empty (-1) typ) - `PP.orElse` ( fmt S.TypeAscriptionColon ": " - <> PP.indentNAfterNewline 2 (pretty0 env Map.empty (-1) typ) - ) - ) - | (name, typ) <- ts - ] - --- todo: provide sample output in comment; different from prettySignatures' -prettySignaturesAlt' - :: Var v => PrettyPrintEnv - -> [([HashQualified], Type v a)] - -> [Pretty ColorText] -prettySignaturesAlt' env ts = map PP.syntaxToColor $ PP.align - [ ( PP.commas . fmap (\name -> styleHashQualified'' (fmt $ S.HashQualifier name) name) $ names - , (fmt S.TypeAscriptionColon ": " <> pretty0 env Map.empty (-1) typ) - `PP.orElse` ( fmt S.TypeAscriptionColon ": " - <> PP.indentNAfterNewline 2 (pretty0 env Map.empty (-1) typ) - ) - ) - | (names, typ) <- ts - ] - --- prettySignatures'' :: Var v => PrettyPrintEnv -> [(Name, Type v a)] -> [Pretty ColorText] --- prettySignatures'' env ts = prettySignatures' env (first HQ.fromName <$> ts) - -prettySignatures - :: Var v - => PrettyPrintEnv - -> [(HashQualified, Type v a)] - -> Pretty ColorText -prettySignatures env ts = PP.lines $ - PP.group <$> prettySignatures' env ts - -prettySignaturesAlt - :: Var v - => PrettyPrintEnv - -> [([HashQualified], Type v a)] - -> Pretty ColorText -prettySignaturesAlt env ts = PP.lines $ - PP.group <$> prettySignaturesAlt' env ts diff --git a/parser-typechecker/src/Unison/Typechecker.hs b/parser-typechecker/src/Unison/Typechecker.hs deleted file mode 100644 index b0a131640e..0000000000 --- a/parser-typechecker/src/Unison/Typechecker.hs +++ /dev/null @@ -1,351 +0,0 @@ -{-# LANGUAGE OverloadedLists #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE ViewPatterns #-} - --- | This module is the primary interface to the Unison typechecker --- module Unison.Typechecker (admissibleTypeAt, check, check', checkAdmissible', equals, locals, subtype, isSubtype, synthesize, synthesize', typeAt, wellTyped) where - -module Unison.Typechecker where - -import Unison.Prelude - -import Control.Lens -import Control.Monad.Fail (fail) -import Control.Monad.State (State, StateT, execState, get, - modify) -import Control.Monad.Writer -import qualified Data.Map as Map -import qualified Data.Sequence.NonEmpty as NESeq (toSeq) -import qualified Data.Text as Text -import qualified Unison.ABT as ABT -import qualified Unison.Blank as B -import Unison.Referent (Referent) -import Unison.Result (pattern Result, Result, - ResultT, runResultT) -import qualified Unison.Result as Result -import Unison.Term (Term) -import qualified Unison.Term as Term -import Unison.Type (Type) -import qualified Unison.Typechecker.Context as Context -import qualified Unison.Typechecker.TypeVar as TypeVar -import Unison.Var (Var) -import qualified Unison.Var as Var -import qualified Unison.Typechecker.TypeLookup as TL -import Unison.Util.List ( uniqueBy ) - -type Name = Text - -data Notes v loc = Notes { - bugs :: Seq (Context.CompilerBug v loc), - errors :: Seq (Context.ErrorNote v loc), - infos :: Seq (Context.InfoNote v loc) -} - -instance Semigroup (Notes v loc) where - Notes bs es is <> Notes bs' es' is' = Notes (bs <> bs') (es <> es') (is <> is') - -instance Monoid (Notes v loc) where - mempty = Notes mempty mempty mempty - -convertResult :: Context.Result v loc a -> Result (Notes v loc) a -convertResult = \case - Context.Success is a -> Result (Notes mempty mempty is) (Just a) - Context.TypeError es is -> Result (Notes mempty (NESeq.toSeq es) is) Nothing - Context.CompilerBug bug es is -> Result (Notes [bug] es is) Nothing - -data NamedReference v loc = - NamedReference { fqn :: Name, fqnType :: Type v loc - , replacement :: Either v Referent } - deriving Show - -data Env v loc = Env - { _ambientAbilities :: [Type v loc] - , _typeLookup :: TL.TypeLookup v loc - , _unqualifiedTerms :: Map Name [NamedReference v loc] - } - -makeLenses ''Env - --- -- | Compute the allowed type of a replacement for a given subterm. --- -- Example, in @\g -> map g [1,2,3]@, @g@ has an admissible type of --- -- @Int -> r@, where @r@ is an unbound universal type variable, which --- -- means that an @Int -> Bool@, an @Int -> String@, etc could all be --- -- substituted for @g@. --- -- --- -- Algorithm works by replacing the subterm, @e@ with --- -- @(f e)@, where @f@ is a fresh function parameter. We then --- -- read off the type of @e@ from the inferred result type of @f@. --- admissibleTypeAt :: (Monad f, Var v) --- => (Env v loc) --- -> Path --- -> Term v loc --- -> f (Result v loc (Type v loc)) --- admissibleTypeAt env path t = --- let --- f = ABT.v' "f" --- shake (Type.Arrow' (Type.Arrow' _ tsub) _) = Type.generalize tsub --- shake (Type.ForallNamed' _ t) = shake t --- shake _ = error "impossible, f had better be a function" --- in case Term.lam() f <$> Paths.modifyTerm (\t -> Term.app() (Term.var() (ABT.Free f)) (Term.wrapV t)) path t of --- Nothing -> pure . failNote $ InvalidPath path t --- Just t -> fmap shake <$> synthesize env t - --- -- | Compute the type of the given subterm. --- typeAt :: (Monad f, Var v) => Env v loc -> Path -> Term v loc -> f (Type v loc) --- typeAt env [] t = synthesize env t --- typeAt env path t = --- let --- f = ABT.v' "f" --- remember e = Term.var() (ABT.Free f) `Term.app_` Term.wrapV e --- shake (Type.Arrow' (Type.Arrow' tsub _) _) = Type.generalize tsub --- shake (Type.ForallNamed' _ t) = shake t --- shake _ = error "impossible, f had better be a function" --- in case Term.lam() f <$> Paths.modifyTerm remember path t of --- Nothing -> failNote $ InvalidPath path t --- Just t -> pure . shake <$> synthesize env t --- --- -- | Return the type of all local variables in scope at the given location --- locals :: (Monad f, Var v) => Env v loc -> Path -> Term v loc --- -> f [(v, Type v loc)] --- locals env path ctx | ABT.isClosed ctx = --- zip (map ABT.unvar vars) <$> types --- where --- -- replace focus, x, with `let saved = f v1 v2 v3 ... vn in x`, --- -- where `f` is fresh variable, then infer type of `f`, read off the --- -- types of `v1`, `v2`, ... --- vars = map ABT.Bound (Paths.inScopeAtTerm path ctx) --- f = ABT.v' "f" --- saved = ABT.v' "saved" --- remember e = Term.let1_ [(saved, Term.var() (ABT.Free f) `Term.apps` map (((),) . Term.var()) vars)] (Term.wrapV e) --- usingAllLocals = Term.lam() f (Paths.modifyTerm' remember path ctx) --- types = if null vars then pure [] --- else extract <$> typeAt env [] usingAllLocals --- extract (Type.Arrow' i _) = extract1 i --- extract (Type.ForallNamed' _ t) = extract t --- extract t = error $ "expected function type, got: " ++ show t --- extract1 (Type.Arrow' i o) = i : extract1 o --- extract1 _ = [] --- locals _ _ _ _ ctx = --- -- need to call failNote multiple times --- failNote <$> (uncurry UnknownSymbol <$> ABT.freeVarAnnotations ctx) - - --- | Infer the type of a 'Unison.Term', using --- a function to resolve the type of @Ref@ constructors --- contained in that term. -synthesize - :: (Monad f, Var v, Ord loc) - => Env v loc - -> Term v loc - -> ResultT (Notes v loc) f (Type v loc) -synthesize env t = let - result = convertResult $ Context.synthesizeClosed - (TypeVar.liftType <$> view ambientAbilities env) - (view typeLookup env) - (TypeVar.liftTerm t) - in Result.hoist (pure . runIdentity) $ fmap TypeVar.lowerType result - -isSubtype :: Var v => Type v loc -> Type v loc -> Bool -isSubtype t1 t2 = - case Context.isSubtype (tvar $ void t1) (tvar $ void t2) of - Left bug -> error $ "compiler bug encountered: " ++ show bug - Right b -> b - where tvar = TypeVar.liftType - -isEqual :: Var v => Type v loc -> Type v loc -> Bool -isEqual t1 t2 = isSubtype t1 t2 && isSubtype t2 t1 - -type TDNR f v loc a = - StateT (Term v loc) (ResultT (Notes v loc) f) a - -data Resolution v loc = - Resolution { resolvedName :: Text - , inferredType :: Context.Type v loc - , resolvedLoc :: loc - , suggestions :: [Context.Suggestion v loc] - } - --- | Infer the type of a 'Unison.Term', using type-directed name resolution --- to attempt to resolve unknown symbols. -synthesizeAndResolve - :: (Monad f, Var v, Ord loc) => Env v loc -> TDNR f v loc (Type v loc) -synthesizeAndResolve env = do - tm <- get - (tp, notes) <- listen . lift $ synthesize env tm - typeDirectedNameResolution notes tp env - -compilerBug :: Context.CompilerBug v loc -> Result (Notes v loc) () -compilerBug bug = do - tell $ Notes [bug] mempty mempty - Control.Monad.Fail.fail "" - -typeError :: Context.ErrorNote v loc -> Result (Notes v loc) () -typeError note = do - tell $ Notes mempty [note] mempty - Control.Monad.Fail.fail "" - -btw :: Monad f => Context.InfoNote v loc -> ResultT (Notes v loc) f () -btw note = tell $ Notes mempty mempty [note] - -liftResult :: Monad f => Result (Notes v loc) a -> TDNR f v loc a -liftResult = lift . MaybeT . WriterT . pure . runIdentity . runResultT - --- Resolve "solved blanks". If a solved blank's type and name matches the type --- and unqualified name of a symbol that isn't imported, provide a note --- suggesting the import. If the blank is ambiguous and only one typechecks, use --- that one. Otherwise, provide an unknown symbol error to the user. --- The cases we consider are: --- 1. There exist names that match and their types match too. Tell the user --- the fully qualified names of these terms, and their types. --- 2. There's more than one name that matches, --- but only one that typechecks. Substitute that one into the code. --- 3. No match at all. Throw an unresolved symbol at the user. -typeDirectedNameResolution - :: forall v loc f - . (Monad f, Var v, Ord loc) - => Notes v loc - -> Type v loc - -> Env v loc - -> TDNR f v loc (Type v loc) -typeDirectedNameResolution oldNotes oldType env = do - -- Add typed components (local definitions) to the TDNR environment. - let tdnrEnv = execState (traverse_ addTypedComponent $ infos oldNotes) env - -- Resolve blanks in the notes and generate some resolutions - resolutions <- liftResult . traverse (resolveNote tdnrEnv) . toList $ infos - oldNotes - case catMaybes resolutions of - [] -> pure oldType - rs -> - let - goAgain = - any ((== 1) . length . dedupe . filter Context.isExact . suggestions) rs - in if goAgain - then do - traverse_ substSuggestion rs - synthesizeAndResolve tdnrEnv - else do - -- The type hasn't changed - liftResult $ suggest rs - pure oldType - where - addTypedComponent :: Context.InfoNote v loc -> State (Env v loc) () - addTypedComponent (Context.TopLevelComponent vtts) - = for_ vtts $ \(v, typ, _) -> - unqualifiedTerms %= Map.insertWith (<>) - (Var.unqualifiedName v) - [NamedReference (Var.name v) typ (Left v)] - addTypedComponent _ = pure () - - suggest :: [Resolution v loc] -> Result (Notes v loc) () - suggest = traverse_ - (\(Resolution name inferredType loc suggestions) -> - typeError $ Context.ErrorNote - (Context.UnknownTerm loc (Var.named name) (dedupe suggestions) inferredType) - [] - ) - guard x a = if x then Just a else Nothing - - substSuggestion :: Resolution v loc -> TDNR f v loc () - substSuggestion (Resolution name _ loc (filter Context.isExact -> - [Context.Suggestion _ _ replacement Context.Exact])) - = do - modify (substBlank (Text.unpack name) loc solved) - lift . btw $ Context.Decision (Var.named name) loc solved - where - solved = either (Term.var loc) (Term.fromReferent loc) replacement - substSuggestion _ = pure () - - -- Resolve a `Blank` to a term - substBlank :: String -> loc -> Term v loc -> Term v loc -> Term v loc - substBlank s a r = ABT.visitPure go - where - go t = guard (ABT.annotation t == a) $ ABT.visitPure resolve t - resolve (Term.Blank' (B.Recorded (B.Resolve loc name))) | name == s = - Just (const loc <$> r) - resolve _ = Nothing - - -- Returns Nothing for irrelevant notes - resolveNote - :: Env v loc - -> Context.InfoNote v loc - -> Result (Notes v loc) (Maybe (Resolution v loc)) - resolveNote env (Context.SolvedBlank (B.Resolve loc n) _ it) - = fmap (Just . Resolution (Text.pack n) it loc . dedupe . join) - . traverse (resolve it) - . join - . maybeToList - . Map.lookup (Text.pack n) - $ view unqualifiedTerms env - resolveNote _ n = btw n >> pure Nothing - dedupe :: [Context.Suggestion v loc] -> [Context.Suggestion v loc] - dedupe = uniqueBy Context.suggestionReplacement - resolve - :: Context.Type v loc - -> NamedReference v loc - -> Result (Notes v loc) [Context.Suggestion v loc] - resolve inferredType (NamedReference fqn foundType replace) = - -- We found a name that matches. See if the type matches too. - case Context.isSubtype (TypeVar.liftType foundType) inferredType of - Left bug -> const [] <$> compilerBug bug - -- Suggest the import if the type matches. - Right b -> pure - [ Context.Suggestion - fqn - (TypeVar.liftType foundType) - replace - (if b then Context.Exact else Context.WrongType) - ] - --- | Check whether a term matches a type, using a --- function to resolve the type of @Ref@ constructors --- contained in the term. Returns @typ@ if successful, --- and a note about typechecking failure otherwise. -check - :: (Monad f, Var v, Ord loc) - => Env v loc - -> Term v loc - -> Type v loc - -> ResultT (Notes v loc) f (Type v loc) -check env term typ = synthesize env (Term.ann (ABT.annotation term) term typ) --- | `checkAdmissible' e t` tests that `(f : t -> r) e` is well-typed. --- If `t` has quantifiers, these are moved outside, so if `t : forall a . a`, --- this will check that `(f : forall a . a -> a) e` is well typed. --- checkAdmissible' :: Var v => Term v -> Type v -> Either Note (Type v) --- checkAdmissible' term typ = --- synthesize' (Term.blank() `Term.ann_` tweak typ `Term.app_` term) --- where --- tweak (Type.ForallNamed' v body) = Type.forall() v (tweak body) --- tweak t = Type.arrow() t t --- | Returns `True` if the expression is well-typed, `False` otherwise -wellTyped :: (Monad f, Var v, Ord loc) => Env v loc -> Term v loc -> f Bool -wellTyped env term = go <$> runResultT (synthesize env term) - where go (may, _) = isJust may - --- | @subtype a b@ is @Right b@ iff @f x@ is well-typed given --- @x : a@ and @f : b -> t@. That is, if a value of type `a` --- can be passed to a function expecting a `b`, then `subtype a b` --- returns `Right b`. This function returns @Left note@ with information --- about the reason for subtyping failure otherwise. --- --- Example: @subtype (forall a. a -> a) (Int -> Int)@ returns @Right (Int -> Int)@. --- subtype :: Var v => Type v -> Type v -> Either Note (Type v) --- subtype t1 t2 = error "todo" - -- let (t1', t2') = (ABT.vmap TypeVar.Universal t1, ABT.vmap TypeVar.Universal t2) - -- in case Context.runM (Context.subtype t1' t2') - -- (Context.MEnv Context.env0 [] Map.empty True) of - -- Left e -> Left e - -- Right _ -> Right t2 - --- | Returns true if @subtype t1 t2@ returns @Right@, false otherwise --- isSubtype :: Var v => Type v -> Type v -> Bool --- isSubtype t1 t2 = case subtype t1 t2 of --- Left _ -> False --- Right _ -> True - --- | Returns true if the two type are equal, up to alpha equivalence and --- order of quantifier introduction. Note that alpha equivalence considers: --- `forall b a . a -> b -> a` and --- `forall a b . a -> b -> a` to be different types --- equals :: Var v => Type v -> Type v -> Bool --- equals t1 t2 = isSubtype t1 t2 && isSubtype t2 t1 diff --git a/parser-typechecker/src/Unison/Typechecker/Components.hs b/parser-typechecker/src/Unison/Typechecker/Components.hs deleted file mode 100644 index 9cf283ba7a..0000000000 --- a/parser-typechecker/src/Unison/Typechecker/Components.hs +++ /dev/null @@ -1,88 +0,0 @@ -module Unison.Typechecker.Components (minimize, minimize') where - -import Unison.Prelude - -import Control.Arrow ((&&&)) -import Data.Bifunctor (first) -import Data.Function (on) -import Data.List (groupBy, sortBy) -import Data.List.NonEmpty (NonEmpty) -import qualified Data.List.NonEmpty as Nel -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Unison.ABT as ABT -import Unison.Term (Term') -import qualified Unison.Term as Term -import Unison.Var (Var) - -unordered :: Var v => [(v,Term' vt v a)] -> [[(v,Term' vt v a)]] -unordered = ABT.components - -ordered :: Var v => [(v,Term' vt v a)] -> [[(v,Term' vt v a)]] -ordered = ABT.orderedComponents - --- | Algorithm for minimizing cycles of a `let rec`. This can --- improve generalization during typechecking and may also be more --- efficient for execution. --- --- For instance: --- --- minimize (let rec id x = x; g = id 42; y = id "hi" in g) --- ==> --- Just (let id x = x; g = id 42; y = id "hi" in g) --- --- Gets rid of the let rec and replaces it with an ordinary `let`, such --- that `id` is suitably generalized. --- --- Fails on the left if there are duplicate definitions. -minimize - :: Var v - => Term' vt v a - -> Either (NonEmpty (v, [a])) (Maybe (Term' vt v a)) -minimize (Term.LetRecNamedAnnotatedTop' isTop ann bs e) = - let bindings = first snd <$> bs - group = map (fst . head &&& map (ABT.annotation . snd)) . groupBy ((==) `on` fst) . sortBy - (compare `on` fst) - grouped = group bindings - dupes = filter ((> 1) . length . snd) grouped - in if not $ null dupes - then Left $ Nel.fromList dupes - else - let cs0 = if isTop then unordered bindings else ordered bindings - -- within a cycle, we put the lambdas first, so - -- unguarded definitions can refer to these lambdas, example: - -- - -- foo x = blah + 1 + x - -- blah = foo 10 - -- - -- Here `foo` and `blah` are part of a cycle, but putting `foo` - -- first at least lets the program run (though it has an infinite - -- loop). - cs = sortOn (\(_,e) -> Term.arity e == 0) <$> cs0 - varAnnotations = Map.fromList ((\((a, v), _) -> (v, a)) <$> bs) - msg v = error $ "Components.minimize " <> show (v, Map.keys varAnnotations) - annotationFor v = fromMaybe (msg v) $ Map.lookup v varAnnotations - annotatedVar v = (annotationFor v, v) - -- When introducing a nested let/let rec, we use the annotation - -- of the variable that starts off that let/let rec - mklet [(hdv, hdb)] e - | Set.member hdv (ABT.freeVars hdb) = Term.letRec isTop - (annotationFor hdv) - [(annotatedVar hdv, hdb)] - e - | otherwise = Term.let1 isTop [(annotatedVar hdv, hdb)] e - mklet cycle@((hdv, _) : _) e = Term.letRec isTop - (annotationFor hdv) - (first annotatedVar <$> cycle) - e - mklet [] e = e - in - -- The outer annotation is going to be meaningful, so we make - -- sure to preserve it, whereas the annotations at intermediate Abs - -- nodes aren't necessarily meaningful - Right . Just . ABT.annotate ann . foldr mklet e $ cs -minimize _ = Right Nothing - -minimize' - :: Var v => Term' vt v a -> Either (NonEmpty (v,[a])) (Term' vt v a) -minimize' term = fromMaybe term <$> minimize term diff --git a/parser-typechecker/src/Unison/Typechecker/Context.hs b/parser-typechecker/src/Unison/Typechecker/Context.hs deleted file mode 100644 index 521e19ff4c..0000000000 --- a/parser-typechecker/src/Unison/Typechecker/Context.hs +++ /dev/null @@ -1,1801 +0,0 @@ -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE BangPatterns #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Typechecker.Context - ( synthesizeClosed - , ErrorNote(..) - , CompilerBug (..) - , InfoNote(..) - , Cause(..) - , Context(..) - , ActualArgCount - , ExpectedArgCount - , ConstructorId - , Element(..) - , PathElement(..) - , Term - , Type - , TypeVar - , Result(..) - , errorTerms - , innermostErrorTerm - , lookupAnn - , lookupSolved - , apply - , isEqual - , isSubtype - , isRedundant - , Suggestion(..) - , SuggestionMatch(..) - , isExact - , typeErrors - , infoNotes - ) -where - -import Unison.Prelude - -import qualified Control.Monad.Fail as MonadFail -import Control.Monad.Reader.Class -import Control.Monad.State ( get - , put - , StateT - , runStateT - ) -import Data.Bifunctor ( first - , second - ) -import qualified Data.Foldable as Foldable -import Data.List -import Data.List.NonEmpty ( NonEmpty ) -import qualified Data.Map as Map -import qualified Data.Sequence as Seq -import Data.Sequence.NonEmpty ( NESeq ) -import qualified Data.Sequence.NonEmpty as NESeq -import qualified Data.Set as Set -import qualified Data.Text as Text -import qualified Unison.ABT as ABT -import qualified Unison.Blank as B -import Unison.DataDeclaration ( DataDeclaration - , EffectDeclaration - ) -import qualified Unison.DataDeclaration as DD -import Unison.Pattern ( Pattern ) -import qualified Unison.Pattern as Pattern -import Unison.Reference ( Reference ) -import Unison.Referent ( Referent ) -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import Unison.Typechecker.Components ( minimize' ) -import qualified Unison.Typechecker.TypeLookup as TL -import qualified Unison.Typechecker.TypeVar as TypeVar -import Unison.Var ( Var ) -import qualified Unison.Var as Var -import qualified Unison.TypePrinter as TP - -type TypeVar v loc = TypeVar.TypeVar (B.Blank loc) v -type Type v loc = Type.Type (TypeVar v loc) loc -type Term v loc = Term.Term' (TypeVar v loc) v loc -type Monotype v loc = Type.Monotype (TypeVar v loc) loc -type RedundantTypeAnnotation = Bool - -pattern Universal v = Var (TypeVar.Universal v) -pattern Existential b v = Var (TypeVar.Existential b v) - -existential :: v -> Element v loc -existential = Existential B.Blank - -existential' :: Ord v => a -> B.Blank loc -> v -> Type.Type (TypeVar v loc) a -existential' a blank v = ABT.annotatedVar a (TypeVar.Existential blank v) - -existentialp :: Ord v => a -> v -> Type v a -existentialp a = existential' a B.Blank - -universal' :: Ord v => a -> v -> Type.Type (TypeVar v loc) a -universal' a v = ABT.annotatedVar a (TypeVar.Universal v) - --- | Elements of an ordered algorithmic context -data Element v loc - = Var (TypeVar v loc) -- A variable declaration - | Solved (B.Blank loc) v (Monotype v loc) -- `v` is solved to some monotype - | Ann v (Type v loc) -- `v` has type `a`, maybe quantified - | Marker v -- used for scoping - -instance (Ord loc, Var v) => Eq (Element v loc) where - Var v == Var v2 = v == v2 - Solved _ v t == Solved _ v2 t2 = v == v2 && t == t2 - Ann v t == Ann v2 t2 = v == v2 && t == t2 - Marker v == Marker v2 = v == v2 - _ == _ = False - -data Env v loc = Env { freshId :: Word64, ctx :: Context v loc } - -type DataDeclarations v loc = Map Reference (DataDeclaration v loc) -type EffectDeclarations v loc = Map Reference (EffectDeclaration v loc) - -data Result v loc a = Success (Seq (InfoNote v loc)) a - | TypeError (NESeq (ErrorNote v loc)) (Seq (InfoNote v loc)) - | CompilerBug (CompilerBug v loc) - (Seq (ErrorNote v loc)) -- type errors before hitting the bug - (Seq (InfoNote v loc)) -- info notes before hitting the bug - deriving (Functor) - -instance Applicative (Result v loc) where - pure = Success mempty - CompilerBug bug es is <*> _ = CompilerBug bug es is - r <*> CompilerBug bug es' is' = CompilerBug bug (typeErrors r <> es') (infoNotes r <> is') - TypeError es is <*> r' = TypeError (es NESeq.|>< (typeErrors r')) (is <> infoNotes r') - Success is _ <*> TypeError es' is' = TypeError es' (is <> is') - Success is f <*> Success is' a = Success (is <> is') (f a) - -instance Monad (Result v loc) where - s@(Success _ a) >>= f = s *> f a - TypeError es is >>= _ = TypeError es is - CompilerBug bug es is >>= _ = CompilerBug bug es is - -btw' :: InfoNote v loc -> Result v loc () -btw' note = Success (Seq.singleton note) () - -typeError :: Cause v loc -> Result v loc a -typeError cause = TypeError (pure $ ErrorNote cause mempty) mempty - -compilerBug :: CompilerBug v loc -> Result v loc a -compilerBug bug = CompilerBug bug mempty mempty - -typeErrors :: Result v loc a -> Seq (ErrorNote v loc) -typeErrors = \case - TypeError es _ -> NESeq.toSeq es - CompilerBug _ es _ -> es - Success _ _ -> mempty - -infoNotes :: Result v loc a -> Seq (InfoNote v loc) -infoNotes = \case - TypeError _ is -> is - CompilerBug _ _ is -> is - Success is _ -> is - -mapErrors :: (ErrorNote v loc -> ErrorNote v loc) -> Result v loc a -> Result v loc a -mapErrors f r = case r of - TypeError es is -> TypeError (f <$> es) is - CompilerBug bug es is -> CompilerBug bug (f <$> es) is - s@(Success _ _) -> s - -newtype MT v loc f a = MT { - runM :: MEnv v loc -> f (a, Env v loc) -} - --- | Typechecking monad -type M v loc = MT v loc (Result v loc) - --- | Typechecking computation that, unless it crashes --- with a compiler bug, always produces a value. -type TotalM v loc = MT v loc (Either (CompilerBug v loc)) - -liftResult :: Result v loc a -> M v loc a -liftResult r = MT (\m -> (, env m) <$> r) - -liftTotalM :: TotalM v loc a -> M v loc a -liftTotalM (MT m) = MT $ \menv -> case m menv of - Left bug -> CompilerBug bug mempty mempty - Right a -> Success mempty a - --- errorNote :: Cause v loc -> M v loc () --- errorNote = liftResult . errorNote - -btw :: InfoNote v loc -> M v loc () -btw = liftResult . btw' - -modEnv :: (Env v loc -> Env v loc) -> M v loc () -modEnv f = modEnv' $ ((), ) . f - -modEnv' :: (Env v loc -> (a, Env v loc)) -> M v loc a -modEnv' f = MT (\menv -> pure . f $ env menv) - -data Unknown = Data | Effect deriving Show - -data CompilerBug v loc - = UnknownDecl Unknown Reference (Map Reference (DataDeclaration v loc)) - | UnknownConstructor Unknown Reference Int (DataDeclaration v loc) - | UndeclaredTermVariable v (Context v loc) - | RetractFailure (Element v loc) (Context v loc) - | EmptyLetRec (Term v loc) -- the body of the empty let rec - | PatternMatchFailure - | EffectConstructorHadMultipleEffects (Type v loc) - | FreeVarsInTypeAnnotation (Set (TypeVar v loc)) - | UnannotatedReference Reference - | MalformedPattern (Pattern loc) - | UnknownTermReference Reference - | UnknownExistentialVariable v (Context v loc) - -- `IllegalContextExtension ctx elem msg` - -- extending `ctx` with `elem` would make `ctx` ill-formed, as explained by `msg` - | IllegalContextExtension (Context v loc) (Element v loc) String - | OtherBug String - deriving Show - -data PathElement v loc - = InSynthesize (Term v loc) - | InSubtype (Type v loc) (Type v loc) - | InCheck (Term v loc) (Type v loc) - | InInstantiateL v (Type v loc) - | InInstantiateR (Type v loc) v - | InSynthesizeApp (Type v loc) (Term v loc) Int - | InFunctionCall [v] (Term v loc) (Type v loc) [Term v loc] - | InAndApp - | InOrApp - | InIfCond - | InIfBody loc -- location of `then` expression - | InVectorApp loc -- location of 1st vector element - | InMatch loc -- location of 1st case body - | InMatchGuard - | InMatchBody - deriving Show - -type ExpectedArgCount = Int -type ActualArgCount = Int -type ConstructorId = Int - -data SuggestionMatch = Exact | WrongType | WrongName - deriving (Ord, Eq, Show) - -data Suggestion v loc = - Suggestion { suggestionName :: Text - , suggestionType :: Type v loc - , suggestionReplacement :: Either v Referent - , suggestionMatch :: SuggestionMatch - } - deriving (Eq, Show) - -isExact :: Suggestion v loc -> Bool -isExact Suggestion {..} = suggestionMatch == Exact - -data ErrorNote v loc = ErrorNote { - cause :: Cause v loc, - path :: Seq (PathElement v loc) -} deriving Show - --- `Decision v loc fqn` is a decision to replace the name v at location loc --- with the fully qualified name fqn. -data InfoNote v loc - = SolvedBlank (B.Recorded loc) v (Type v loc) - | Decision v loc (Term.Term v loc) - | TopLevelComponent [(v, Type.Type v loc, RedundantTypeAnnotation)] - deriving (Show) - -data Cause v loc - = TypeMismatch (Context v loc) - | IllFormedType (Context v loc) - | UnknownSymbol loc v - | UnknownTerm loc v [Suggestion v loc] (Type v loc) - | AbilityCheckFailure [Type v loc] [Type v loc] (Context v loc) -- ambient, requested - | EffectConstructorWrongArgCount ExpectedArgCount ActualArgCount Reference ConstructorId - | MalformedEffectBind (Type v loc) (Type v loc) [Type v loc] -- type of ctor, type of ctor result - -- Type of ctor, number of arguments we got - | PatternArityMismatch loc (Type v loc) Int - -- A variable is defined twice in the same block - | DuplicateDefinitions (NonEmpty (v, [loc])) - -- A let rec where things that aren't guarded cyclicly depend on each other - | UnguardedLetRecCycle [v] [(v, Term v loc)] - | ConcatPatternWithoutConstantLength loc (Type v loc) - | HandlerOfUnexpectedType loc (Type v loc) - deriving Show - -errorTerms :: ErrorNote v loc -> [Term v loc] -errorTerms n = Foldable.toList (path n) >>= \e -> case e of - InCheck e _ -> [e] - InSynthesizeApp _ e _ -> [e] - InSynthesize e -> [e] - _ -> [ ] - -innermostErrorTerm :: ErrorNote v loc -> Maybe (Term v loc) -innermostErrorTerm n = listToMaybe $ errorTerms n - -solveBlank :: B.Recorded loc -> v -> Type v loc -> M v loc () -solveBlank blank v typ = btw $ SolvedBlank blank v typ - --- Add `p` onto the end of the `path` of this `ErrorNote` -scope' :: PathElement v loc -> ErrorNote v loc -> ErrorNote v loc -scope' p (ErrorNote cause path) = ErrorNote cause (path `mappend` pure p) - --- Add `p` onto the end of the `path` of any `ErrorNote`s emitted by the action -scope :: PathElement v loc -> M v loc a -> M v loc a -scope p (MT m) = MT (mapErrors (scope' p) . m) - --- | The typechecking environment -data MEnv v loc = MEnv { - env :: Env v loc, -- The typechecking state - abilities :: [Type v loc], -- Allowed ambient abilities - dataDecls :: DataDeclarations v loc, -- Data declarations in scope - effectDecls :: EffectDeclarations v loc, -- Effect declarations in scope - -- Types for which ability check should be skipped. - -- See abilityCheck function for how this is used. - skipAbilityCheck :: [Type v loc] -} - -newtype Context v loc = Context [(Element v loc, Info v loc)] - -data Info v loc = - Info { existentialVars :: Set v -- set of existentials seen so far - , solvedExistentials :: Map v (Monotype v loc) -- `v` is solved to some monotype - , universalVars :: Set v -- set of universals seen so far - , termVarAnnotations :: Map v (Type v loc) - , allVars :: Set v -- all variables seen so far - , previouslyTypecheckedVars :: Set v -- term vars already typechecked - } - --- | The empty context -context0 :: Context v loc -context0 = Context [] - --- | Focuses on the first element in the list that satisfies the predicate. --- Returns `(prefix, focusedElem, suffix)`, where `prefix` is in reverse order. -focusAt :: (a -> Bool) -> [a] -> Maybe ([a], a, [a]) -focusAt p xs = go [] xs where - go _ [] = Nothing - go l (h:t) = if p h then Just (l, h, t) else go (h:l) t - --- | Delete from the end of this context up to and including --- the given `Element`. Returns `Nothing` if the element is not found. -retract0 :: (Var v, Ord loc) => Element v loc -> Context v loc -> Maybe (Context v loc, [Element v loc]) -retract0 e (Context ctx) = case focusAt (\(e',_) -> e' == e) ctx of - Just (discarded, _, remaining) -> - -- note: no need to recompute used variables; any suffix of the - -- context snoc list is also a valid context - Just (Context remaining, map fst discarded) - Nothing -> Nothing - --- | Adds a marker to the end of the context, runs the `body` and then discards --- from the end of the context up to and including the marker. Returns the result --- of `body` and the discarded context (not including the marker), respectively. --- Freshened `markerHint` is used to create the marker. -markThenRetract :: (Var v, Ord loc) => v -> M v loc a -> M v loc (a, [Element v loc]) -markThenRetract markerHint body = do - v <- freshenVar markerHint - extendContext (Marker v) - a <- body - (a,) <$> doRetract (Marker v) - where - doRetract :: (Var v, Ord loc) => Element v loc -> M v loc [Element v loc] - doRetract e = do - ctx <- getContext - case retract0 e ctx of - Nothing -> compilerCrash (RetractFailure e ctx) - Just (t, discarded) -> do - let solved = - [ (b, v, inst $ Type.getPolytype sa) - | Solved (B.Recorded b) v sa <- discarded - ] - unsolved = - [ (b, v, inst $ existential' (B.loc b) b' v) - | Existential b'@(B.Recorded b) v <- discarded - ] - go (b, v, sa) = solveBlank b v sa - inst = apply ctx - Foldable.traverse_ go (solved ++ unsolved) - setContext t - pure discarded - -markThenRetract0 :: (Var v, Ord loc) => v -> M v loc a -> M v loc () -markThenRetract0 markerHint body = () <$ markThenRetract markerHint body - --- unsolved' :: Context v loc -> [(B.Blank loc, v)] --- unsolved' (Context ctx) = [(b,v) | (Existential b v, _) <- ctx] - -replace :: (Var v, Ord loc) => Element v loc -> [Element v loc] -> Context v loc -> M v loc (Context v loc) -replace e focus ctx = - case breakAt e ctx of - Just (l, _, r) -> l `extendN` (focus <> r) - Nothing -> pure ctx - -breakAt :: (Var v, Ord loc) - => Element v loc - -> Context v loc - -> Maybe (Context v loc, Element v loc, [Element v loc]) -breakAt m (Context xs) = - case focusAt (\(e,_) -> e === m) xs of - Just (r, m, l) -> - -- l is a suffix of xs and is already a valid context - Just (Context l, fst m, map fst r) - Nothing -> Nothing - where - Existential _ v === Existential _ v2 | v == v2 = True - Universal v === Universal v2 | v == v2 = True - Marker v === Marker v2 | v == v2 = True - _ === _ = False - - --- | ordered Γ α β = True <=> Γ[α^][β^] -ordered :: (Var v, Ord loc) => Context v loc -> v -> v -> Bool -ordered ctx v v2 = Set.member v (existentials (retract' (existential v2) ctx)) - where - -- Like `retract`, but returns the empty context if retracting would remove - -- all elements. - retract' - :: (Var v, Ord loc) => Element v loc -> Context v loc -> Context v loc - retract' e ctx = maybe context0 fst $ retract0 e ctx - --- env0 :: Env v loc --- env0 = Env 0 context0 - -debugEnabled :: Bool -debugEnabled = False - -debugPatternsEnabled :: Bool -debugPatternsEnabled = False - -_logContext :: (Ord loc, Var v) => String -> M v loc () -_logContext msg = when debugEnabled $ do - ctx <- getContext - let !_ = trace ("\n"++msg ++ ": " ++ show ctx) () - setContext ctx - -usedVars :: Ord v => Context v loc -> Set v -usedVars = allVars . info - -fromMEnv :: (MEnv v loc -> a) -> M v loc a -fromMEnv f = f <$> ask - -getContext :: M v loc (Context v loc) -getContext = fromMEnv $ ctx . env - -setContext :: Context v loc -> M v loc () -setContext ctx = modEnv (\e -> e { ctx = ctx }) - -modifyContext :: (Context v loc -> M v loc (Context v loc)) -> M v loc () -modifyContext f = do - c <- getContext - c <- f c - setContext c - -appendContext :: (Var v, Ord loc) => [Element v loc] -> M v loc () -appendContext = traverse_ extendContext - -extendContext :: Var v => Element v loc -> M v loc () -extendContext e = isReserved (varOf e) >>= \case - True -> modifyContext (extend e) - False -> getContext >>= \ctx -> compilerCrash $ - IllegalContextExtension ctx e $ - "Extending context with a variable that is not reserved by the typechecking environment." <> - " That means `freshenVar` is allowed to return it as a fresh variable, which would be wrong." - -replaceContext :: (Var v, Ord loc) => Element v loc -> [Element v loc] -> M v loc () -replaceContext elem replacement = - fromMEnv (\menv -> find (not . (`isReservedIn` env menv) . varOf) replacement) >>= \case - Nothing -> modifyContext (replace elem replacement) - Just e -> getContext >>= \ctx -> compilerCrash $ - IllegalContextExtension ctx e $ - "Extending context with a variable that is not reserved by the typechecking environment." <> - " That means `freshenVar` is allowed to return it as a fresh variable, which would be wrong." - -varOf :: Element v loc -> v -varOf (Var tv) = TypeVar.underlying tv -varOf (Solved _ v _) = v -varOf (Ann v _) = v -varOf (Marker v) = v - -isReserved :: Var v => v -> M v loc Bool -isReserved v = fromMEnv $ (v `isReservedIn`) . env - -isReservedIn :: Var v => v -> Env v loc -> Bool -isReservedIn v e = freshId e > Var.freshId v - -universals :: Ord v => Context v loc -> Set v -universals = universalVars . info - -existentials :: Ord v => Context v loc -> Set v -existentials = existentialVars . info - --- | "Reserves" the given variables in this typechecking environment, --- i.e. ensures that they won't be returned from `freshenVar` as fresh. -reserveAll :: (Var v, Foldable t) => t v -> M v loc () -reserveAll vs = - let maxId = foldr (max . Var.freshId) 0 vs - in modEnv (\e -> e { freshId = freshId e `max` maxId + 1}) - -freshenVar :: Var v => v -> M v0 loc v -freshenVar v = modEnv' - (\e -> - let id = freshId e in (Var.freshenId id v, e { freshId = freshId e + 1 }) - ) - -freshenTypeVar :: Var v => TypeVar v loc -> M v loc v -freshenTypeVar v = modEnv' - (\e -> - let id = freshId e - in (Var.freshenId id (TypeVar.underlying v), e { freshId = id + 1 }) - ) - -isClosed :: Var v => Term v loc -> M v loc Bool -isClosed e = Set.null <$> freeVars e - -freeVars :: Var v => Term v loc -> M v loc (Set v) -freeVars e = do - ctx <- getContext - pure $ ABT.freeVars e `Set.difference` previouslyTypecheckedVars (info ctx) - --- todo: do we want this to return a location for the aspect of the type that was not well formed --- todo: or maybe a note / list of notes, or an M --- | Check that the type is well formed wrt the given `Context`, see Figure 7 of paper -wellformedType :: Var v => Context v loc -> Type v loc -> Bool -wellformedType c t = case t of - Type.Var' (TypeVar.Existential _ v) -> Set.member v (existentials c) - Type.Var' (TypeVar.Universal v) -> Set.member v (universals c) - Type.Ref' _ -> True - Type.Arrow' i o -> wellformedType c i && wellformedType c o - Type.Ann' t' _ -> wellformedType c t' - Type.App' x y -> wellformedType c x && wellformedType c y - Type.Effect1' e a -> wellformedType c e && wellformedType c a - Type.Effects' es -> all (wellformedType c) es - Type.IntroOuterNamed' _ t -> wellformedType c t - Type.Forall' t' -> - let (v,ctx2) = extendUniversal c - in wellformedType ctx2 (ABT.bind t' (universal' (ABT.annotation t) v)) - _ -> error $ "Match failure in wellformedType: " ++ show t - where - -- | Extend this `Context` with a single variable, guaranteed fresh - extendUniversal ctx = - let v = Var.freshIn (usedVars ctx) (Var.named "var") - Right ctx' = extend' (Universal v) ctx - in (v, ctx') - --- | Return the `Info` associated with the last element of the context, or the zero `Info`. -info :: Ord v => Context v loc -> Info v loc -info (Context []) = Info mempty mempty mempty mempty mempty mempty -info (Context ((_,i):_)) = i - --- | Add an element onto the end of this `Context`. Takes `O(log N)` time, --- including updates to the accumulated `Info` value. --- Fail if the new context is not well formed (see Figure 7 of paper). -extend' :: Var v => Element v loc -> Context v loc -> Either (CompilerBug v loc) (Context v loc) -extend' e c@(Context ctx) = Context . (:ctx) . (e,) <$> i' where - Info es ses us uas vs pvs = info c - -- see figure 7 - i' = case e of - Var v -> case v of - -- UvarCtx - ensure no duplicates - TypeVar.Universal v -> if Set.notMember v vs - then pure $ Info es ses (Set.insert v us) uas (Set.insert v vs) pvs - else crash $ "variable " <> show v <> " already defined in the context" - -- EvarCtx - ensure no duplicates, and that this existential is not solved earlier in context - TypeVar.Existential _ v -> if Set.notMember v vs - then pure $ Info (Set.insert v es) ses us uas (Set.insert v vs) pvs - else crash $ "variable " <> show v <> " already defined in the context" - -- SolvedEvarCtx - ensure `v` is fresh, and the solution is well-formed wrt the context - Solved _ v sa@(Type.getPolytype -> t) - | Set.member v vs -> crash $ "variable " <> show v <> " already defined in the context" - | not (wellformedType c t) -> crash $ "type " <> show t <> " is not well-formed wrt the context" - | otherwise -> pure $ - Info (Set.insert v es) (Map.insert v sa ses) us uas (Set.insert v vs) pvs - -- VarCtx - ensure `v` is fresh, and annotation is well-formed wrt the context - Ann v t - | Set.member v vs -> crash $ "variable " <> show v <> " already defined in the context" - | not (wellformedType c t) -> crash $ "type " <> show t <> " is not well-formed wrt the context" - | otherwise -> pure $ - Info es ses us (Map.insert v t uas) (Set.insert v vs) - ((if Set.null (Type.freeVars t) then Set.insert v else id) pvs) - -- MarkerCtx - note that since a Marker is always the first mention of a variable, suffices to - -- just check that `v` is not previously mentioned - Marker v -> if Set.notMember v vs - then pure $ Info es ses us uas (Set.insert v vs) pvs - else crash $ "marker variable " <> show v <> " already defined in the context" - crash reason = Left $ IllegalContextExtension c e reason - -extend :: Var v => Element v loc -> Context v loc -> M v loc (Context v loc) -extend e c = either compilerCrash pure $ extend' e c - --- | Add the given elements onto the end of the given `Context`. --- Fail if the new context is not well-formed. -extendN :: Var v => Context v loc -> [Element v loc] -> M v loc (Context v loc) -extendN ctx es = foldM (flip extend) ctx es - --- | doesn't combine notes -orElse :: M v loc a -> M v loc a -> M v loc a -orElse m1 m2 = MT go where - go menv = runM m1 menv <|> runM m2 menv - s@(Success _ _) <|> _ = s - TypeError _ _ <|> r = r - CompilerBug _ _ _ <|> r = r -- swallowing bugs for now: when checking whether a type annotation - -- is redundant, typechecking without that annotation might result in - -- a CompilerBug that we want `orElse` to recover from - --- getMaybe :: Result v loc a -> Result v loc (Maybe a) --- getMaybe = hoistMaybe Just - --- hoistMaybe :: (Maybe a -> Maybe b) -> Result v loc a -> Result v loc b --- hoistMaybe f (Result es is a) = Result es is (f a) - -getDataDeclarations :: M v loc (DataDeclarations v loc) -getDataDeclarations = fromMEnv dataDecls - -getEffectDeclarations :: M v loc (EffectDeclarations v loc) -getEffectDeclarations = fromMEnv effectDecls - -getAbilities :: M v loc [Type v loc] -getAbilities = fromMEnv abilities - -shouldPerformAbilityCheck :: (Ord loc, Var v) => Type v loc -> M v loc Bool -shouldPerformAbilityCheck t = do - skip <- fromMEnv skipAbilityCheck - skip <- traverse applyM skip - t <- applyM t - pure $ all (/= t) skip - -compilerCrash :: CompilerBug v loc -> M v loc a -compilerCrash bug = liftResult $ compilerBug bug - -failWith :: Cause v loc -> M v loc a -failWith cause = liftResult $ typeError cause - -compilerCrashResult :: CompilerBug v loc -> Result v loc a -compilerCrashResult bug = CompilerBug bug mempty mempty - -getDataDeclaration :: Reference -> M v loc (DataDeclaration v loc) -getDataDeclaration r = do - decls <- getDataDeclarations - case Map.lookup r decls of - Nothing -> compilerCrash (UnknownDecl Data r decls) - Just decl -> pure decl - -getEffectDeclaration :: Reference -> M v loc (EffectDeclaration v loc) -getEffectDeclaration r = do - decls <- getEffectDeclarations - case Map.lookup r decls of - Nothing -> compilerCrash (UnknownDecl Effect r (DD.toDataDecl <$> decls)) - Just decl -> pure decl - -getDataConstructorType :: (Var v, Ord loc) => Reference -> Int -> M v loc (Type v loc) -getDataConstructorType = getConstructorType' Data getDataDeclaration - -getEffectConstructorType :: (Var v, Ord loc) => Reference -> Int -> M v loc (Type v loc) -getEffectConstructorType = getConstructorType' Effect go where - go r = DD.toDataDecl <$> getEffectDeclaration r - --- Encountered an unknown constructor in the typechecker; unknown constructors --- should have been detected earlier though. -getConstructorType' :: Var v - => Unknown - -> (Reference -> M v loc (DataDeclaration v loc)) - -> Reference - -> Int - -> M v loc (Type v loc) -getConstructorType' kind get r cid = do - decl <- get r - case drop cid (DD.constructors decl) of - [] -> compilerCrash $ UnknownConstructor kind r cid decl - (_v, typ) : _ -> pure $ ABT.vmap TypeVar.Universal typ - -extendUniversal :: (Var v) => v -> M v loc v -extendUniversal v = do - v' <- freshenVar v - extendContext (Universal v') - pure v' - -extendExistential :: (Var v) => v -> M v loc v -extendExistential v = do - v' <- freshenVar v - extendContext (Existential B.Blank v') - pure v' - -extendExistentialTV :: Var v => v -> M v loc (TypeVar v loc) -extendExistentialTV v = - TypeVar.Existential B.Blank <$> extendExistential v - -notMember :: (Var v, Ord loc) => v -> Set (TypeVar v loc) -> Bool -notMember v s = - Set.notMember (TypeVar.Universal v) s && - Set.notMember (TypeVar.Existential B.Blank v) s - --- | Replace any existentials with their solution in the context -apply :: (Var v, Ord loc) => Context v loc -> Type v loc -> Type v loc -apply ctx = apply' (solvedExistentials . info $ ctx) - --- | Replace any existentials with their solution in the context (given as a list of elements) -applyCtx :: (Var v, Ord loc) => [Element v loc] -> Type v loc -> Type v loc -applyCtx elems = apply' $ Map.fromList [ (v, sa) | Solved _ v sa <- elems ] - -apply' :: (Var v, Ord loc) => Map v (Monotype v loc) -> Type v loc -> Type v loc -apply' _ t | Set.null (Type.freeVars t) = t -apply' solvedExistentials t = go t where - go t = case t of - Type.Var' (TypeVar.Universal _) -> t - Type.Ref' _ -> t - Type.Var' (TypeVar.Existential _ v) -> - maybe t (\(Type.Monotype t') -> go t') (Map.lookup v solvedExistentials) - Type.Arrow' i o -> Type.arrow a (go i) (go o) - Type.App' x y -> Type.app a (go x) (go y) - Type.Ann' v k -> Type.ann a (go v) k - Type.Effect1' e t -> Type.effect1 a (go e) (go t) - Type.Effects' es -> Type.effects a (map go es) - Type.ForallNamed' v t' -> Type.forall a v (go t') - Type.IntroOuterNamed' v t' -> Type.introOuter a v (go t') - _ -> error $ "Match error in Context.apply': " ++ show t - where a = ABT.annotation t - -loc :: ABT.Term f v loc -> loc -loc = ABT.annotation - --- Prepends the provided abilities onto the existing ambient for duration of `m` -withEffects :: [Type v loc] -> M v loc a -> M v loc a -withEffects abilities' m = - MT (\menv -> runM m (menv { abilities = abilities' ++ abilities menv })) - --- Replaces the ambient abilities with the provided for duration of `m` -withEffects0 :: [Type v loc] -> M v loc a -> M v loc a -withEffects0 abilities' m = - MT (\menv -> runM m (menv { abilities = abilities' })) - - -synthesizeApps :: (Foldable f, Var v, Ord loc) => Type v loc -> f (Term v loc) -> M v loc (Type v loc) -synthesizeApps ft args = - foldM go ft $ Foldable.toList args `zip` [1..] - where go ft arg = do - ctx <- getContext - synthesizeApp (apply ctx ft) arg - --- | Synthesize the type of the given term, `arg` given that a function of --- the given type `ft` is being applied to `arg`. Update the context in --- the process. --- e.g. in `(f:t) x` -- finds the type of (f x) given t and x. -synthesizeApp :: (Var v, Ord loc) => Type v loc -> (Term v loc, Int) -> M v loc (Type v loc) -synthesizeApp ft arg | debugEnabled && traceShow ("synthesizeApp"::String, ft, arg) False = undefined -synthesizeApp (Type.stripIntroOuters -> Type.Effect'' es ft) argp@(arg, argNum) = - scope (InSynthesizeApp ft arg argNum) $ abilityCheck es >> go ft - where - go (Type.Forall' body) = do -- Forall1App - v <- ABT.freshen body freshenTypeVar - appendContext [existential v] - let ft2 = ABT.bindInheritAnnotation body (existential' () B.Blank v) - synthesizeApp ft2 argp - go (Type.Arrow' i o) = do -- ->App - let (es, _) = Type.stripEffect o - abilityCheck es - o <$ check arg i - go (Type.Var' (TypeVar.Existential b a)) = do -- a^App - [i,e,o] <- traverse freshenVar [Var.named "i", Var.named "synthsizeApp-refined-effect", Var.named "o"] - let it = existential' (loc ft) B.Blank i - ot = existential' (loc ft) B.Blank o - et = existential' (loc ft) B.Blank e - soln = Type.Monotype (Type.arrow (loc ft) - it - (Type.effect (loc ft) [et] ot)) - ctxMid = [existential o, existential e, - existential i, Solved b a soln] - replaceContext (existential a) ctxMid - synthesizeApp (Type.getPolytype soln) argp - go _ = getContext >>= \ctx -> failWith $ TypeMismatch ctx -synthesizeApp _ _ = error "unpossible - Type.Effect'' pattern always succeeds" - --- For arity 3, creates the type `∀ a . a -> a -> a -> Sequence a` --- For arity 2, creates the type `∀ a . a -> a -> Sequence a` -vectorConstructorOfArity :: (Var v, Ord loc) => loc -> Int -> M v loc (Type v loc) -vectorConstructorOfArity loc arity = do - let elementVar = Var.named "elem" - args = replicate arity (loc, Type.var loc elementVar) - resultType = Type.app loc (Type.vector loc) (Type.var loc elementVar) - vt = Type.forall loc elementVar (Type.arrows args resultType) - pure vt - -generalizeAndUnTypeVar :: Var v => Type v a -> Type.Type v a -generalizeAndUnTypeVar t = - Type.cleanup . ABT.vmap TypeVar.underlying . Type.generalize (Set.toList $ ABT.freeVars t) $ t - -generalizeExistentials' - :: Var v => Type v a -> Type v a -generalizeExistentials' t = - Type.generalize (filter isExistential . Set.toList $ ABT.freeVars t) t - where - isExistential (TypeVar.Existential _ _) = True - isExistential _ = False - -noteTopLevelType - :: (Ord loc, Var v) - => ABT.Subst f v a - -> Term v loc - -> Type v loc - -> M v loc () -noteTopLevelType e binding typ = case binding of - Term.Ann' strippedBinding _ -> do - inferred <- (Just <$> synthesize strippedBinding) `orElse` pure Nothing - case inferred of - Nothing -> btw $ TopLevelComponent - [(Var.reset (ABT.variable e), generalizeAndUnTypeVar typ, False)] - Just inferred -> do - redundant <- isRedundant typ inferred - btw $ TopLevelComponent - [(Var.reset (ABT.variable e), generalizeAndUnTypeVar typ, redundant)] - -- The signature didn't exist, so was definitely redundant - _ -> btw $ TopLevelComponent - [(Var.reset (ABT.variable e), generalizeAndUnTypeVar typ, True)] - --- | Synthesize the type of the given term, updating the context in the process. --- | Figure 11 from the paper -synthesize :: forall v loc . (Var v, Ord loc) => Term v loc -> M v loc (Type v loc) -synthesize e | debugEnabled && traceShow ("synthesize"::String, e) False = undefined -synthesize e = scope (InSynthesize e) $ - case minimize' e of - Left es -> failWith (DuplicateDefinitions es) - Right e -> do - Type.Effect'' es t <- go e - abilityCheck es - pure t - where - l = loc e - go :: (Var v, Ord loc) => Term v loc -> M v loc (Type v loc) - go (Term.Var' v) = getContext >>= \ctx -> case lookupAnn ctx v of -- Var - Nothing -> compilerCrash $ UndeclaredTermVariable v ctx - Just t -> pure t - go (Term.Blank' blank) = do - v <- freshenVar Var.blank - appendContext [Existential blank v] - pure $ existential' l blank v -- forall (TypeVar.Universal v) (Type.universal v) - go (Term.Ann' (Term.Ref' _) t) = case ABT.freeVars t of - s | Set.null s -> - -- innermost Ref annotation assumed to be correctly provided by `synthesizeClosed` - existentializeArrows t - s -> compilerCrash $ FreeVarsInTypeAnnotation s - go (Term.Ref' h) = compilerCrash $ UnannotatedReference h - go (Term.Constructor' r cid) = do - t <- getDataConstructorType r cid - existentializeArrows t - go (Term.Request' r cid) = do - t <- ungeneralize =<< getEffectConstructorType r cid - existentializeArrows t - go (Term.Ann' e t) = do - t <- existentializeArrows t - t <$ checkScoped e t - go (Term.Float' _) = pure $ Type.float l -- 1I=> - go (Term.Int' _) = pure $ Type.int l -- 1I=> - go (Term.Nat' _) = pure $ Type.nat l -- 1I=> - go (Term.Boolean' _) = pure $ Type.boolean l - go (Term.Text' _) = pure $ Type.text l - go (Term.Char' _) = pure $ Type.char l - go (Term.TermLink' _) = pure $ Type.termLink l - go (Term.TypeLink' _) = pure $ Type.typeLink l - go (Term.Apps' f args) = do -- ->EEEEE - ft <- synthesize f - ctx <- getContext - (vs, ft) <- ungeneralize' ft - scope (InFunctionCall vs f ft args) $ synthesizeApps (apply ctx ft) args - go (Term.Sequence' v) = do - ft <- vectorConstructorOfArity (loc e) (Foldable.length v) - case Foldable.toList v of - [] -> pure ft - v1 : _ -> - scope (InVectorApp (ABT.annotation v1)) $ synthesizeApps ft v - go (Term.Let1Top' top binding e) = do - isClosed <- isClosed binding - -- note: no need to freshen binding, it can't refer to v - (t, ctx2) <- markThenRetract Var.inferOther $ do - _ <- extendExistential Var.inferOther - synthesize binding - -- If the binding has no free variables, we generalize over its existentials - tbinding <- - if isClosed then pure $ generalizeExistentials ctx2 t - else applyM . applyCtx ctx2 $ t - v' <- ABT.freshen e freshenVar - appendContext [Ann v' tbinding] - t <- applyM =<< synthesize (ABT.bindInheritAnnotation e (Term.var() v')) - when top $ noteTopLevelType e binding tbinding - -- doRetract $ Ann v' tbinding - pure t - go (Term.Lam' body) = do -- ->I=> (Full Damas Milner rule) - -- arya: are there more meaningful locations we could put into and pull out of the abschain?) - [arg, i, e, o] <- sequence [ ABT.freshen body freshenVar - , freshenVar (ABT.variable body) - , freshenVar Var.inferAbility - , freshenVar Var.inferOutput ] - let it = existential' l B.Blank i - ot = existential' l B.Blank o - et = existential' l B.Blank e - appendContext $ - [existential i, existential e, existential o, Ann arg it] - body' <- pure $ ABT.bindInheritAnnotation body (Term.var() arg) - if Term.isLam body' then withEffects0 [] $ check body' ot - else withEffects0 [et] $ check body' ot - ctx <- getContext - let t = Type.arrow l it (Type.effect l (apply ctx <$> [et]) ot) - pure t - go (Term.LetRecNamed' [] body) = synthesize body - go (Term.LetRecTop' isTop letrec) = do - (t, ctx2) <- markThenRetract (Var.named "let-rec-marker") $ do - e <- annotateLetRecBindings isTop letrec - synthesize e - pure $ generalizeExistentials ctx2 t - go (Term.If' cond t f) = do - scope InIfCond $ check cond (Type.boolean l) - scope (InIfBody $ ABT.annotation t) $ synthesizeApps (Type.iff2 l) [t, f] - go (Term.And' a b) = - scope InAndApp $ synthesizeApps (Type.andor' l) [a, b] - go (Term.Or' a b) = - scope InOrApp $ synthesizeApps (Type.andor' l) [a, b] - go (Term.Match' scrutinee cases) = do - scrutineeType <- synthesize scrutinee - outputTypev <- freshenVar (Var.named "match-output") - let outputType = existential' l B.Blank outputTypev - appendContext [existential outputTypev] - case cases of -- only relevant with 2 or more cases, but 1 is safe too. - [] -> pure () - Term.MatchCase _ _ t : _ -> scope (InMatch (ABT.annotation t)) $ - Foldable.traverse_ (checkCase scrutineeType outputType) cases - ctx <- getContext - pure $ apply ctx outputType - go (Term.Handle' h body) = do - -- To synthesize a handle block, we first synthesize the handler h, - -- then push its allowed abilities onto the current ambient set when - -- checking the body. Assuming that works, we also verify that the - -- handler only uses abilities in the current ambient set. - ht <- synthesize h >>= applyM >>= ungeneralize - ctx <- getContext - case ht of - -- common case, like `h : Request {Remote} a -> b`, brings - -- `Remote` into ambient when checking `body` - Type.Arrow' (Type.Apps' (Type.Ref' ref) [et,i]) o | ref == Type.effectRef -> do - let es = Type.flattenEffects et - withEffects es $ check body i - o <- applyM o - let (oes, o') = Type.stripEffect o - abilityCheck oes - pure o' - -- degenerate case, like `handle x -> 10 in ...` - Type.Arrow' (i@(Type.Var' (TypeVar.Existential _ v@(lookupSolved ctx -> Nothing)))) o -> do - e <- extendExistential v - withEffects [existentialp (loc i) e] $ check body i - o <- applyM o - let (oes, o') = Type.stripEffect o - abilityCheck oes - pure o' - _ -> failWith $ HandlerOfUnexpectedType (loc h) ht - go _e = compilerCrash PatternMatchFailure - -checkCase :: forall v loc . (Var v, Ord loc) - => Type v loc - -> Type v loc - -> Term.MatchCase loc (Term v loc) - -> M v loc () -checkCase scrutineeType outputType (Term.MatchCase pat guard rhs) = do - scrutineeType <- applyM scrutineeType - outputType <- applyM outputType - markThenRetract0 Var.inferOther $ do - let peel t = case t of - ABT.AbsN' vars bod -> (vars, bod) - _ -> ([], t) - (rhsvs, rhsbod) = peel rhs - mayGuard = snd . peel <$> guard - (substs, remains) <- runStateT (checkPattern scrutineeType pat) rhsvs - unless (null remains) $ compilerCrash (MalformedPattern pat) - let subst = ABT.substsInheritAnnotation (second (Term.var ()) <$> substs) - rhs' = subst rhsbod - guard' = subst <$> mayGuard - for_ guard' $ \g -> scope InMatchGuard $ check g (Type.boolean (loc g)) - outputType <- applyM outputType - scope InMatchBody $ check rhs' outputType - -checkPattern - :: (Var v, Ord loc) - => Type v loc - -> Pattern loc - -> StateT [v] (M v loc) [(v, v)] -checkPattern tx ty | (debugEnabled || debugPatternsEnabled) && traceShow ("checkPattern"::String, tx, ty) False = undefined -checkPattern scrutineeType0 p = - lift (ungeneralize scrutineeType0) >>= \scrutineeType -> case p of - Pattern.Unbound _ -> pure [] - Pattern.Var _loc -> do - v <- getAdvance p - v' <- lift $ freshenVar v - lift . appendContext $ [Ann v' scrutineeType] - pure [(v, v')] - Pattern.SequenceLiteral loc ps -> do - vt <- lift $ do - v <- freshenVar Var.inferOther - let vt = existentialp loc v - appendContext [existential v] - subtype (Type.app loc (Type.vector loc) vt) scrutineeType - applyM vt - join <$> traverse (checkPattern vt) ps - Pattern.SequenceOp loc l op r -> do - let (locL, locR) = (Pattern.loc l, Pattern.loc r) - vt <- lift $ do - v <- freshenVar Var.inferOther - let vt = existentialp loc v - appendContext [existential v] - -- todo: `Type.vector loc` is super-probably wrong; - -- I'm thinking it should be Ann.Intrinsic, but we don't - -- have access to that here. - subtype (Type.app loc (Type.vector loc) vt) scrutineeType - applyM vt - case op of - Pattern.Cons -> do - lvs <- checkPattern vt l - -- todo: same `Type.vector loc` thing - rvs <- checkPattern (Type.app locR (Type.vector locR) vt) r - pure $ lvs ++ rvs - Pattern.Snoc -> do - -- todo: same `Type.vector loc` thing - lvs <- checkPattern (Type.app locL (Type.vector locL) vt) l - rvs <- checkPattern vt r - pure $ lvs ++ rvs - Pattern.Concat -> - case (l, r) of - (p, _) | isConstLen p -> f - (_, p) | isConstLen p -> f - (_, _) -> lift . failWith $ - ConcatPatternWithoutConstantLength loc (Type.app loc (Type.vector loc) vt) - where - f = liftA2 (++) (g locL l) (g locR r) - -- todo: same `Type.vector loc` thing - g l p = checkPattern (Type.app l (Type.vector l) vt) p - - -- Only pertains to sequences, returns False if not a sequence - isConstLen :: Pattern loc -> Bool - isConstLen p = case p of - Pattern.SequenceLiteral _ _ -> True - Pattern.SequenceOp _ l op r -> case op of - Pattern.Snoc -> isConstLen l - Pattern.Cons -> isConstLen r - Pattern.Concat -> isConstLen l && isConstLen r - Pattern.As _ p -> isConstLen p - _ -> False - -- TODO: provide a scope here for giving a good error message - Pattern.Boolean loc _ -> - lift $ subtype (Type.boolean loc) scrutineeType $> mempty - Pattern.Int loc _ -> - lift $ subtype (Type.int loc) scrutineeType $> mempty - Pattern.Nat loc _ -> - lift $ subtype (Type.nat loc) scrutineeType $> mempty - Pattern.Float loc _ -> - lift $ subtype (Type.float loc) scrutineeType $> mempty - Pattern.Text loc _ -> - lift $ subtype (Type.text loc) scrutineeType $> mempty - Pattern.Char loc _ -> - lift $ subtype (Type.char loc) scrutineeType $> mempty - Pattern.Constructor loc ref cid args -> do - dct <- lift $ getDataConstructorType ref cid - udct <- lift $ ungeneralize dct - unless (Type.arity udct == length args) - . lift - . failWith - $ PatternArityMismatch loc dct (length args) - let step (Type.Arrow' i o, vso) pat = - (\vso' -> (o, vso ++ vso')) <$> checkPattern i pat - step _ _ = - lift . failWith $ PatternArityMismatch loc dct (length args) - (overall, vs) <- foldM step (udct, []) args - st <- lift $ applyM scrutineeType - lift $ subtype overall st - pure vs - Pattern.As _loc p' -> do - v <- getAdvance p - v' <- lift $ freshenVar v - lift . appendContext $ [Ann v' scrutineeType] - ((v, v') :) <$> checkPattern scrutineeType p' - Pattern.EffectPure loc p -> do - vt <- lift $ do - v <- freshenVar Var.inferPatternPureV - e <- freshenVar Var.inferPatternPureE - let vt = existentialp loc v - let et = existentialp loc e - appendContext [existential v, existential e] - subtype (Type.effectV loc (loc, et) (loc, vt)) scrutineeType - applyM vt - checkPattern vt p - Pattern.EffectBind loc ref cid args k -> do - -- scrutineeType should be a supertype of `Effect e vt` - -- for fresh existentials `e` and `vt` - e <- lift $ extendExistential Var.inferPatternBindE - v <- lift $ extendExistential Var.inferPatternBindV - let evt = Type.effectV loc (loc, existentialp loc e) - (loc, existentialp loc v) - lift $ subtype evt scrutineeType - ect <- lift $ getEffectConstructorType ref cid - uect <- lift $ ungeneralize ect - unless (Type.arity uect == length args) - . lift - . failWith - . PatternArityMismatch loc ect - $ length args - let step (Type.Arrow' i o, vso) pat = - (\vso' -> (o, vso ++ vso')) <$> checkPattern i pat - step _ _ = - lift . failWith $ PatternArityMismatch loc ect (length args) - (ctorOutputType, vs) <- foldM step (uect, []) args - case ctorOutputType of - -- an effect ctor should have exactly 1 effect! - Type.Effect'' [et] it -> do - -- expecting scrutineeType to be `Effect et vt` - st <- lift $ applyM scrutineeType - case st of - Type.App' _ vt -> - let kt = Type.arrow (Pattern.loc k) - it - (Type.effect (Pattern.loc k) [et] vt) - in (vs ++) <$> checkPattern kt k - _ -> lift . compilerCrash $ PatternMatchFailure - _ -> lift . compilerCrash $ EffectConstructorHadMultipleEffects - ctorOutputType - where - - getAdvance :: Pattern loc -> StateT [v] (M v loc) v - getAdvance p = do - vs <- get - case vs of - [] -> lift $ compilerCrash (MalformedPattern p) - (v : vs) -> do - put vs - pure v - -applyM :: (Var v, Ord loc) => Type v loc -> M v loc (Type v loc) -applyM t = (`apply` t) <$> getContext - -lookupAnn :: Ord v => Context v loc -> v -> Maybe (Type v loc) -lookupAnn ctx v = Map.lookup v (termVarAnnotations . info $ ctx) - -lookupSolved :: Ord v => Context v loc -> v -> Maybe (Monotype v loc) -lookupSolved ctx v = Map.lookup v (solvedExistentials . info $ ctx) - -resetContextAfter :: a -> M v loc a -> M v loc a -resetContextAfter x a = do - ctx <- getContext - a <- a `orElse` pure x - setContext ctx - pure a - --- | Synthesize and generalize the type of each binding in a let rec. --- Updates the context so that all bindings are annotated with --- their type. Also returns the freshened version of `body`. --- See usage in `synthesize` and `check` for `LetRec'` case. -annotateLetRecBindings - :: (Var v, Ord loc) - => Term.IsTop - -> ((v -> M v loc v) -> M v loc ([(v, Term v loc)], Term v loc)) - -> M v loc (Term v loc) -annotateLetRecBindings isTop letrec = - -- If this is a top-level letrec, then emit a TopLevelComponent note, - -- which asks if the user-provided type annotations were needed. - if isTop - then do - -- First, typecheck (using annotateLetRecBindings') the bindings with any - -- user-provided annotations. - (body, vts) <- annotateLetRecBindings' True - -- Then, try typechecking again, but ignoring any user-provided annotations. - -- This will infer whatever type. If it altogether fails to typecheck here - -- then, ...(1) - withoutAnnotations <- - resetContextAfter Nothing $ Just <$> annotateLetRecBindings' False - -- convert from typechecker TypeVar back to regular `v` vars - let unTypeVar (v, t) = (v, generalizeAndUnTypeVar t) - case withoutAnnotations of - Just (_, vts') -> do - r <- and <$> zipWithM isRedundant (fmap snd vts) (fmap snd vts') - btw $ TopLevelComponent ((\(v,b) -> (Var.reset v, b,r)) . unTypeVar <$> vts) - -- ...(1) we'll assume all the user-provided annotations were needed - Nothing -> btw - $ TopLevelComponent ((\(v, b) -> (Var.reset v, b, False)) . unTypeVar <$> vts) - pure body - -- If this isn't a top-level letrec, then we don't have to do anything special - else fst <$> annotateLetRecBindings' True - where - annotateLetRecBindings' useUserAnnotations = do - (bindings, body) <- letrec freshenVar - let vs = map fst bindings - ((bindings, bindingTypes), ctx2) <- markThenRetract Var.inferOther $ do - let f (v, binding) = case binding of - -- If user has provided an annotation, we use that - Term.Ann' e t | useUserAnnotations -> do - -- Arrows in `t` with no ability lists get an attached fresh - -- existential to allow inference of required abilities - t2 <- existentializeArrows =<< applyM t - pure (Term.ann (loc binding) e t2, t2) - -- If we're not using an annotation, we make one up. There's 2 cases: - - lam@(Term.Lam' _) -> - -- If `e` is a lambda of arity K, we immediately refine the - -- existential to `a1 ->{e1} a2 ... ->{eK} r`. This gives better - -- inference of the lambda's ability variables in conjunction with - -- handling of lambdas in `check` judgement. - (lam,) <$> existentialFunctionTypeFor lam - e -> do - -- Anything else, just make up a fresh existential - -- which will be refined during typechecking of the binding - vt <- extendExistential v - pure $ (e, existential' (loc binding) B.Blank vt) - (bindings, bindingTypes) <- unzip <$> traverse f bindings - appendContext (zipWith Ann vs bindingTypes) - -- check each `bi` against its type - Foldable.for_ (zip bindings bindingTypes) $ \(b, t) -> - -- note: elements of a cycle have to be pure, otherwise order of effects - -- is unclear and chaos ensues - withEffects0 [] (checkScoped b t) - ensureGuardedCycle (vs `zip` bindings) - pure (bindings, bindingTypes) - -- compute generalized types `gt1, gt2 ...` for each binding `b1, b2...`; - -- add annotations `v1 : gt1, v2 : gt2 ...` to the context - let bindingArities = Term.arity <$> bindings - gen bindingType _arity = generalizeExistentials ctx2 bindingType - bindingTypesGeneralized = zipWith gen bindingTypes bindingArities - annotations = zipWith Ann vs bindingTypesGeneralized - appendContext annotations - pure (body, vs `zip` bindingTypesGeneralized) - -ensureGuardedCycle :: Var v => [(v, Term v loc)] -> M v loc () -ensureGuardedCycle bindings = let - -- We make sure that nonLambdas can depend only on lambdas, not on each other - nonLambdas = Set.fromList [ v | (v, b) <- bindings, Term.arity b == 0 ] - (notok, ok) = partition f bindings - f (v, b) = - if Set.member v nonLambdas then - not $ Set.null (ABT.freeVars b `Set.intersection` nonLambdas) - else False - in if length ok == length bindings then pure () - else failWith $ UnguardedLetRecCycle (fst <$> notok) bindings - -existentialFunctionTypeFor :: Var v => Term v loc -> M v loc (Type v loc) -existentialFunctionTypeFor lam@(Term.LamNamed' v body) = do - v <- extendExistential v - e <- extendExistential Var.inferAbility - o <- existentialFunctionTypeFor body - pure $ Type.arrow (loc lam) - (existentialp (loc lam) v) - (Type.effect (loc lam) [existentialp (loc lam) e] o) -existentialFunctionTypeFor e = do - v <- extendExistential Var.inferOutput - pure $ existentialp (loc e) v - -existentializeArrows :: Var v => Type v loc -> M v loc (Type v loc) -existentializeArrows t = do - t <- Type.existentializeArrows (extendExistentialTV Var.inferAbility) t - pure t - -ungeneralize :: (Var v, Ord loc) => Type v loc -> M v loc (Type v loc) -ungeneralize t = snd <$> ungeneralize' t - -ungeneralize' :: (Var v, Ord loc) => Type v loc -> M v loc ([v], Type v loc) -ungeneralize' (Type.Forall' t) = do - v <- ABT.freshen t freshenTypeVar - appendContext [existential v] - t <- pure $ ABT.bindInheritAnnotation t (existential' () B.Blank v) - first (v:) <$> ungeneralize' t -ungeneralize' t = pure ([], t) - --- | Apply the context to the input type, then convert any unsolved existentials --- to universals. -generalizeExistentials :: (Var v, Ord loc) => [Element v loc] -> Type v loc -> Type v loc -generalizeExistentials ctx t = - foldr gen (applyCtx ctx t) unsolvedExistentials - where - unsolvedExistentials = [ v | Var (TypeVar.Existential _ v) <- ctx ] - gen e t = - if TypeVar.Existential B.Blank e `ABT.isFreeIn` t - -- location of the forall is just the location of the input type - -- and the location of each quantified variable is just inherited from - -- its source location - then Type.forall (loc t) - (TypeVar.Universal e) - (ABT.substInheritAnnotation - (TypeVar.Existential B.Blank e) - (universal' () e) - t) - else t -- don't bother introducing a forall if type variable is unused - --- This checks `e` against the type `t`, but if `t` is a `∀`, any ∀-quantified --- variables are freshened and substituted into `e`. This should be called whenever --- a term is being checked against a type due to a user-provided signature on `e`. --- See its usage in `synthesize` and `annotateLetRecBindings`. -checkScoped :: forall v loc . (Var v, Ord loc) => Term v loc -> Type v loc -> M v loc () -checkScoped e t = case t of - Type.Forall' body -> do -- ForallI - v <- ABT.freshen body freshenTypeVar - markThenRetract0 v $ do - x <- extendUniversal v - let e' = Term.substTypeVar (ABT.variable body) (universal' () x) e - checkScoped e' (ABT.bindInheritAnnotation body (universal' () x)) - _ -> check e t - --- | Check that under the given context, `e` has type `t`, --- updating the context in the process. -check :: forall v loc . (Var v, Ord loc) => Term v loc -> Type v loc -> M v loc () -check e t | debugEnabled && traceShow ("check" :: String, e, t) False = undefined -check e0 t0 = scope (InCheck e0 t0) $ do - ctx <- getContext - let Type.Effect'' es t = t0 - let e = minimize' e0 - case e of - Left e -> failWith $ DuplicateDefinitions e - Right e -> - if wellformedType ctx t0 - then case t of - -- expand existentials before checking - t@(Type.Var' (TypeVar.Existential _ _)) -> abilityCheck es >> go e (apply ctx t) - t -> go e (Type.stripIntroOuters t) - else failWith $ IllFormedType ctx - where - go :: Term v loc -> Type v loc -> M v loc () - go e (Type.Forall' body) = do -- ForallI - v <- ABT.freshen body freshenTypeVar - markThenRetract0 v $ do - x <- extendUniversal v - check e (ABT.bindInheritAnnotation body (universal' () x)) - go (Term.Lam' body) (Type.Arrow' i o) = do -- =>I - x <- ABT.freshen body freshenVar - markThenRetract0 x $ do - extendContext (Ann x i) - let Type.Effect'' es ot = o - body' <- pure $ ABT.bindInheritAnnotation body (Term.var() x) - withEffects0 es $ check body' ot - go (Term.Let1' binding e) t = do - v <- ABT.freshen e freshenVar - tbinding <- synthesize binding - markThenRetract0 v $ do - extendContext (Ann v tbinding) - check (ABT.bindInheritAnnotation e (Term.var () v)) t - go (Term.LetRecNamed' [] e) t = check e t - go (Term.LetRecTop' isTop letrec) t = - markThenRetract0 (Var.named "let-rec-marker") $ do - e <- annotateLetRecBindings isTop letrec - check e t - go e t = do -- Sub - a <- synthesize e - ctx <- getContext - subtype (apply ctx a) (apply ctx t) - --- | `subtype ctx t1 t2` returns successfully if `t1` is a subtype of `t2`. --- This may have the effect of altering the context. -subtype :: forall v loc . (Var v, Ord loc) => Type v loc -> Type v loc -> M v loc () -subtype tx ty | debugEnabled && traceShow ("subtype"::String, tx, ty) False = undefined -subtype tx ty = scope (InSubtype tx ty) $ do - ctx <- getContext - go (ctx :: Context v loc) (Type.stripIntroOuters tx) (Type.stripIntroOuters ty) - where -- Rules from figure 9 - go :: Context v loc -> Type v loc -> Type v loc -> M v loc () - go _ (Type.Ref' r) (Type.Ref' r2) | r == r2 = pure () -- `Unit` - go ctx t1@(Type.Var' (TypeVar.Universal v1)) t2@(Type.Var' (TypeVar.Universal v2)) -- `Var` - | v1 == v2 && wellformedType ctx t1 && wellformedType ctx t2 - = pure () - go ctx t1@(Type.Var' (TypeVar.Existential _ v1)) t2@(Type.Var' (TypeVar.Existential _ v2)) -- `Exvar` - | v1 == v2 && wellformedType ctx t1 && wellformedType ctx t2 - = pure () - go _ (Type.Arrow' i1 o1) (Type.Arrow' i2 o2) = do -- `-->` - subtype i2 i1; ctx' <- getContext - subtype (apply ctx' o1) (apply ctx' o2) - go _ (Type.App' x1 y1) (Type.App' x2 y2) = do -- analogue of `-->` - subtype x1 x2 - -- We don't know the variance of the type argument, so we assume - -- (conservatively) that it's invariant, see - -- discussion https://github.com/unisonweb/unison/issues/512 - y1 <- applyM y1; y2 <- applyM y2 - subtype y1 y2 - y1 <- applyM y1; y2 <- applyM y2 - -- performing the subtype check in both directions means the types must be equal - subtype y2 y1 - go _ t (Type.Forall' t2) = do - v <- ABT.freshen t2 freshenTypeVar - markThenRetract0 v $ do - v' <- extendUniversal v - t2 <- pure $ ABT.bindInheritAnnotation t2 (universal' () v') - subtype t t2 - go _ (Type.Forall' t) t2 = do - v0 <- ABT.freshen t freshenTypeVar - markThenRetract0 v0 $ do - v <- extendExistential v0 - t <- pure $ ABT.bindInheritAnnotation t (existential' () B.Blank v) - t1 <- applyM t - subtype t1 t2 - go _ (Type.Effect1' e1 a1) (Type.Effect1' e2 a2) = do - subtype e1 e2 - ctx <- getContext - subtype (apply ctx a1) (apply ctx a2) - go _ a (Type.Effect1' _e2 a2) = subtype a a2 - go _ (Type.Effect1' es a) a2 = do - subtype es (Type.effects (loc es) []) - subtype a a2 - go ctx (Type.Var' (TypeVar.Existential b v)) t -- `InstantiateL` - | Set.member v (existentials ctx) && notMember v (Type.freeVars t) = - instantiateL b v t - go ctx t (Type.Var' (TypeVar.Existential b v)) -- `InstantiateR` - | Set.member v (existentials ctx) && notMember v (Type.freeVars t) = - instantiateR t b v - go _ (Type.Effects' es1) (Type.Effects' es2) = do - ctx <- getContext - let es1' = map (apply ctx) es1 - es2' = map (apply ctx) es2 - if all (`elem` es2') es1' then pure () else abilityCheck' es2' es1' - go _ t t2@(Type.Effects' _) | expand t = subtype (Type.effects (loc t) [t]) t2 - go _ t@(Type.Effects' _) t2 | expand t2 = subtype t (Type.effects (loc t2) [t2]) - go ctx _ _ = failWith $ TypeMismatch ctx - - expand :: Type v loc -> Bool - expand t = case t of - Type.Var' (TypeVar.Existential _ _) -> True - Type.App' _ _ -> True - Type.Ref' _ -> True - _ -> False - - --- | Instantiate the given existential such that it is --- a subtype of the given type, updating the context --- in the process. -instantiateL :: (Var v, Ord loc) => B.Blank loc -> v -> Type v loc -> M v loc () -instantiateL _ v t | debugEnabled && traceShow ("instantiateL"::String, v, t) False = undefined -instantiateL blank v (Type.stripIntroOuters -> t) = scope (InInstantiateL v t) $ - getContext >>= \ctx -> case Type.monotype t of - Just t -> solve ctx v t >>= \case - Just ctx -> setContext ctx -- InstLSolve - Nothing -> go ctx - Nothing -> go ctx - where - go ctx = case t of - Type.Var' (TypeVar.Existential _ v2) | ordered ctx v v2 -> -- InstLReach (both are existential, set v2 = v) - solve ctx v2 (Type.Monotype (existentialp (loc t) v)) >>= - maybe (failWith $ TypeMismatch ctx) setContext - Type.Arrow' i o -> do -- InstLArr - [i',o'] <- traverse freshenVar [nameFrom Var.inferInput i, nameFrom Var.inferOutput o] - let s = Solved blank v (Type.Monotype (Type.arrow (loc t) - (existentialp (loc i) i') - (existentialp (loc o) o'))) - replaceContext (existential v) - [existential o', existential i', s] - instantiateR i B.Blank i' -- todo: not sure about this, could also be `blank` - applyM o >>= instantiateL B.Blank o' - Type.App' x y -> do -- analogue of InstLArr - [x', y'] <- traverse freshenVar [nameFrom Var.inferTypeConstructor x, nameFrom Var.inferTypeConstructorArg y] - let s = Solved blank v (Type.Monotype (Type.app (loc t) - (existentialp (loc x) x') - (existentialp (loc y) y'))) - replaceContext (existential v) - [existential y', existential x', s] - applyM x >>= instantiateL B.Blank x' - applyM y >>= instantiateL B.Blank y' - Type.Effect1' es vt -> do - es' <- freshenVar Var.inferAbility - vt' <- freshenVar Var.inferOther - let t' = Type.effect1 (loc t) (existentialp (loc es) es') - (existentialp (loc vt) vt') - s = Solved blank v (Type.Monotype t') - replaceContext (existential v) - [existential es', existential vt', s] - applyM es >>= instantiateL B.Blank es' - applyM vt >>= instantiateL B.Blank vt' - Type.Effects' es -> do - es' <- traverse (\e -> freshenVar (nameFrom Var.inferAbility e)) es - let locs = loc <$> es - t' = Type.effects (loc t) (uncurry existentialp <$> locs `zip` es') - s = Solved blank v $ Type.Monotype t' - replaceContext (existential v) - ((existential <$> es') ++ [s]) - Foldable.for_ (es' `zip` es) $ \(e',e) -> - applyM e >>= instantiateL B.Blank e' - Type.Forall' body -> do -- InstLIIL - v0 <- ABT.freshen body freshenTypeVar - markThenRetract0 v0 $ do - v <- extendUniversal v0 - instantiateL B.Blank v (ABT.bindInheritAnnotation body (universal' () v)) - _ -> failWith $ TypeMismatch ctx - -nameFrom :: Var v => v -> Type v loc -> v -nameFrom _ (Type.Var' v) = TypeVar.underlying (Var.reset v) -nameFrom ifNotVar _ = ifNotVar - --- | Instantiate the given existential such that it is --- a supertype of the given type, updating the context --- in the process. -instantiateR :: (Var v, Ord loc) => Type v loc -> B.Blank loc -> v -> M v loc () -instantiateR t _ v | debugEnabled && traceShow ("instantiateR"::String, t, v) False = undefined -instantiateR (Type.stripIntroOuters -> t) blank v = scope (InInstantiateR t v) $ - getContext >>= \ctx -> case Type.monotype t of - Just t -> solve ctx v t >>= \case - Just ctx -> setContext ctx -- InstRSolve - Nothing -> go ctx - Nothing -> go ctx - where - go ctx = case t of - Type.Var' (TypeVar.Existential _ v2) | ordered ctx v v2 -> -- InstRReach (both are existential, set v2 = v) - solve ctx v2 (Type.Monotype (existentialp (loc t) v)) >>= - maybe (failWith $ TypeMismatch ctx) setContext - Type.Arrow' i o -> do -- InstRArrow - [i', o'] <- traverse freshenVar [nameFrom Var.inferInput i, nameFrom Var.inferOutput o] - let s = Solved blank v (Type.Monotype - (Type.arrow (loc t) - (existentialp (loc i) i') - (existentialp (loc o) o'))) - replaceContext (existential v) - [existential o', existential i', s] - ctx <- instantiateL B.Blank i' i >> getContext - instantiateR (apply ctx o) B.Blank o' - Type.App' x y -> do -- analogue of InstRArr - -- example foo a <: v' will - -- 1. create foo', a', add these to the context - -- 2. add v' = foo' a' to the context - -- 3. recurse to refine the types of foo' and a' - [x', y'] <- traverse freshenVar [nameFrom Var.inferTypeConstructor x, nameFrom Var.inferTypeConstructorArg y] - let s = Solved blank v (Type.Monotype (Type.app (loc t) (existentialp (loc x) x') (existentialp (loc y) y'))) - replaceContext (existential v) [existential y', existential x', s] - applyM x >>= \x -> instantiateR x B.Blank x' - applyM y >>= \y -> instantiateR y B.Blank y' - Type.Effect1' es vt -> do - es' <- freshenVar (nameFrom Var.inferAbility es) - vt' <- freshenVar (nameFrom Var.inferTypeConstructorArg vt) - let t' = Type.effect1 (loc t) (existentialp (loc es) es') - (existentialp (loc vt) vt') - s = Solved blank v (Type.Monotype t') - replaceContext (existential v) - [existential es', existential vt', s] - applyM es >>= \es -> instantiateR es B.Blank es' - applyM vt >>= \vt -> instantiateR vt B.Blank vt' - Type.Effects' es -> do - es' <- traverse (\e -> freshenVar (nameFrom Var.inferAbility e)) es - let locs = loc <$> es - t' = Type.effects (loc t) (uncurry existentialp <$> locs `zip` es') - s = Solved blank v $ Type.Monotype t' - replaceContext (existential v) - ((existential <$> es') ++ [s]) - Foldable.for_ (es `zip` es') $ \(e, e') -> do - ctx <- getContext - instantiateR (apply ctx e) B.Blank e' - Type.Forall' body -> do -- InstRAIIL - x' <- ABT.freshen body freshenTypeVar - markThenRetract0 x' $ do - appendContext [existential x'] - instantiateR (ABT.bindInheritAnnotation body (existential' () B.Blank x')) B.Blank v - _ -> failWith $ TypeMismatch ctx - --- | solve (ΓL,α^,ΓR) α τ = (ΓL,α^ = τ,ΓR) --- Solve the given existential variable to the given monotype. --- If the given monotype is not well-formed at the context location --- where the existential variable is introduced, return `Nothing`. --- Fail with type mismatch if the existential is already solved to something else. --- Fail with a compiler bug if the existential does not appear in the context at all. -solve :: (Var v, Ord loc) => Context v loc -> v -> Monotype v loc -> M v loc (Maybe (Context v loc)) -solve ctx v t = case lookupSolved ctx v of - Just t2 -> - -- okay to solve something again if it's to an identical type - if same t t2 then pure (Just ctx) - else failWith $ TypeMismatch ctx - where same t1 t2 = apply ctx (Type.getPolytype t1) == apply ctx (Type.getPolytype t2) - Nothing -> case breakAt (existential v) ctx of - Just (ctxL, Existential blank v, ctxR) -> - if wellformedType ctxL (Type.getPolytype t) - then Just <$> ctxL `extendN` ((Solved blank v t) : ctxR) - else pure Nothing - _ -> compilerCrash $ UnknownExistentialVariable v ctx - -abilityCheck' :: forall v loc . (Var v, Ord loc) => [Type v loc] -> [Type v loc] -> M v loc () -abilityCheck' [] [] = pure () -abilityCheck' ambient0 requested0 = go ambient0 requested0 where - go _ambient [] = pure () - go ambient0 (r:rs) = do - -- Note: if applyM returns an existential, it's unsolved - ambient <- traverse applyM ambient0 - r <- applyM r - -- 1. Look in ambient for exact match of head of `r` - case find (headMatch r) ambient of - -- 2a. If yes for `a` in ambient, do `subtype amb r` and done. - Just amb -> do - subtype amb r `orElse` die r - go ambient rs - -- 2b. If no: - Nothing -> case r of - -- It's an unsolved existential, instantiate it to all of ambient - Type.Var' (TypeVar.Existential b v) -> do - let et2 = Type.effects (loc r) ambient - -- instantiate it to `{}` if can't cover all of ambient - instantiateR et2 b v - `orElse` instantiateR (Type.effects (loc r) []) b v - `orElse` die1 - go ambient rs - _ -> -- find unsolved existential, 'e, that appears in ambient - let unsolveds = (ambient >>= Type.flattenEffects >>= vars) - vars (Type.Var' (TypeVar.Existential b v)) = [(b,v)] - vars _ = [] - in case listToMaybe unsolveds of - Just (b, e') -> do - -- introduce fresh existential 'e2 to context - e2' <- extendExistential e' - let et2 = Type.effects (loc r) [r, existentialp (loc r) e2'] - instantiateR et2 b e' `orElse` die r - go ambient rs - _ -> die r - - headMatch :: Type v loc -> Type v loc -> Bool - headMatch (Type.App' f _) (Type.App' f2 _) = headMatch f f2 - headMatch r r2 = r == r2 - - -- as a last ditch effort, if the request is an existential and there are - -- no remaining unbound existentials left in ambient, we try to instantiate - -- the request to the ambient effect list - die r = case r of - Type.Var' (TypeVar.Existential b v) -> - instantiateL b v (Type.effects (loc r) ambient0) `orElse` die1 - -- instantiateL b v (Type.effects (loc r) []) `orElse` die1 - _ -> die1 -- and if that doesn't work, then we're really toast - - die1 = do - ctx <- getContext - failWith $ AbilityCheckFailure (apply ctx <$> ambient0) - (apply ctx <$> requested0) - ctx - -abilityCheck :: (Var v, Ord loc) => [Type v loc] -> M v loc () -abilityCheck requested = do - ambient <- getAbilities - requested' <- filterM shouldPerformAbilityCheck requested - ctx <- getContext - abilityCheck' (apply ctx <$> ambient >>= Type.flattenEffects) - (apply ctx <$> requested' >>= Type.flattenEffects) - -verifyDataDeclarations :: (Var v, Ord loc) => DataDeclarations v loc -> Result v loc () -verifyDataDeclarations decls = forM_ (Map.toList decls) $ \(_ref, decl) -> do - let ctors = DD.constructors decl - forM_ ctors $ \(_ctorName,typ) -> verifyClosed typ id - --- | public interface to the typechecker -synthesizeClosed - :: (Var v, Ord loc) - => [Type v loc] - -> TL.TypeLookup v loc - -> Term v loc - -> Result v loc (Type v loc) -synthesizeClosed abilities lookupType term0 = let - datas = TL.dataDecls lookupType - effects = TL.effectDecls lookupType - term = annotateRefs (TL.typeOfTerm' lookupType) term0 - in case term of - Left missingRef -> - compilerCrashResult (UnknownTermReference missingRef) - Right term -> run [] datas effects $ do - liftResult $ verifyDataDeclarations datas - *> verifyDataDeclarations (DD.toDataDecl <$> effects) - *> verifyClosedTerm term - synthesizeClosed' abilities term - -verifyClosedTerm :: forall v loc . Ord v => Term v loc -> Result v loc () -verifyClosedTerm t = do - ok1 <- verifyClosed t id - let freeTypeVars = Map.toList $ Term.freeTypeVarAnnotations t - reportError (v, locs) = for_ locs $ \loc -> - typeError (UnknownSymbol loc (TypeVar.underlying v)) - for_ freeTypeVars reportError - when (not ok1 || (not . null) freeTypeVars) $ compilerBug (OtherBug "impossible") - -verifyClosed :: (Traversable f, Ord v) => ABT.Term f v a -> (v -> v2) -> Result v2 a Bool -verifyClosed t toV2 = - let isBoundIn v t = Set.member v (snd (ABT.annotation t)) - loc t = fst (ABT.annotation t) - go t@(ABT.Var' v) | not (isBoundIn v t) = typeError (UnknownSymbol (loc t) $ toV2 v) - go _ = pure True - in all id <$> ABT.foreachSubterm go (ABT.annotateBound t) - -annotateRefs :: (Applicative f, Var v) - => (Reference -> f (Type.Type v loc)) - -> Term v loc - -> f (Term v loc) -annotateRefs synth = ABT.visit f where - f r@(Term.Ref' h) = Just (Term.ann ra (Term.ref ra h) <$> (ge <$> synth h)) - where ra = ABT.annotation r - ge t = ABT.vmap TypeVar.Universal $ t - f _ = Nothing - -run - :: (Var v, Ord loc, Functor f) - => [Type v loc] - -> DataDeclarations v loc - -> EffectDeclarations v loc - -> MT v loc f a - -> f a -run ambient datas effects m = - fmap fst - . runM m - $ MEnv (Env 1 context0) ambient datas effects [] - -synthesizeClosed' :: (Var v, Ord loc) - => [Type v loc] - -> Term v loc - -> M v loc (Type v loc) -synthesizeClosed' abilities term = do - -- save current context, for restoration when done - ctx0 <- getContext - setContext context0 - (t, ctx) <- markThenRetract (Var.named "start") $ do - -- retract will cause notes to be written out for - -- any `Blank`-tagged existentials passing out of scope - withEffects0 abilities (synthesize term) - setContext ctx0 -- restore the initial context - pure $ generalizeExistentials ctx t - --- Check if the given typechecking action succeeds. -succeeds :: M v loc a -> TotalM v loc Bool -succeeds m = do - e <- ask - case runM m e of - Success _ _ -> pure True - TypeError _ _ -> pure False - CompilerBug bug _ _ -> MT (\_ -> Left bug) - --- Check if `t1` is a subtype of `t2`. Doesn't update the typechecking context. -isSubtype' :: (Var v, Ord loc) => Type v loc -> Type v loc -> TotalM v loc Bool -isSubtype' type1 type2 = succeeds $ do - let vars = Set.toList $ Set.union (ABT.freeVars type1) (ABT.freeVars type2) - reserveAll (TypeVar.underlying <$> vars) - appendContext (Var <$> vars) - subtype type1 type2 - --- `isRedundant userType inferredType` returns `True` if the `userType` --- is equal "up to inferred abilities" to `inferredType`. --- --- Example: `userType` is `Nat -> Nat`, `inferredType` is `∀ a . a ->{IO} a`. --- In this case, the signature isn't redundant, and we return --- `False`. --- Example: `userType` is (`∀ a . a -> a`) and inferred is `∀ z e . z ->{e} z`. --- In this case, the signature IS redundant, and we return `True`. -isRedundant - :: (Var v, Ord loc) - => Type v loc - -> Type v loc - -> M v loc Bool -isRedundant userType0 inferredType0 = do - ctx0 <- getContext - -- the inferred type may have some unsolved existentials, which we generalize over - -- before doing the comparison, otherwise it will just test equal to any - -- concrete instantiation of those existentials. For instance, the - -- inferred type `a -> a` for a existential `a` should get generalized - -- to `∀ a . a -> a` before comparison to `Nat -> Nat`, otherwise the - -- typechecker will solve `a = Nat` and call the types equal! - userType <- existentializeArrows userType0 - inferredType <- generalizeExistentials' <$> applyM inferredType0 - -- We already know `inferred <: userType`, otherwise the user's given - -- type would have caused the program not to typecheck! Ex: if user writes - -- `: Nat -> Nat` when it has an inferred type of `a -> a`. So we only - -- need to check the other direction to determine redundancy. - (liftTotalM $ isSubtype' userType inferredType) <* setContext ctx0 - --- Public interface to `isSubtype` -isSubtype - :: (Var v, Ord loc) => Type v loc -> Type v loc -> Either (CompilerBug v loc) Bool -isSubtype t1 t2 = - run [] Map.empty Map.empty (isSubtype' t1 t2) - -isEqual - :: (Var v, Ord loc) => Type v loc -> Type v loc -> Either (CompilerBug v loc) Bool -isEqual t1 t2 = - (&&) <$> isSubtype t1 t2 <*> isSubtype t2 t1 - -instance (Var v) => Show (Element v loc) where - show (Var v) = case v of - TypeVar.Universal x -> "@" <> show x - TypeVar.Existential _ x -> "'" ++ show x - show (Solved _ v t) = "'"++Text.unpack (Var.name v)++" = "++TP.pretty' Nothing mempty (Type.getPolytype t) - show (Ann v t) = Text.unpack (Var.name v) ++ " : " ++ - TP.pretty' Nothing mempty t - show (Marker v) = "|"++Text.unpack (Var.name v)++"|" - -instance (Ord loc, Var v) => Show (Context v loc) where - show ctx@(Context es) = "Γ\n " ++ (intercalate "\n " . map (showElem ctx . fst)) (reverse es) - where - showElem _ctx (Var v) = case v of - TypeVar.Universal x -> "@" <> show x - TypeVar.Existential _ x -> "'" ++ show x - showElem ctx (Solved _ v (Type.Monotype t)) = "'"++Text.unpack (Var.name v)++" = "++ TP.pretty' Nothing mempty (apply ctx t) - showElem ctx (Ann v t) = Text.unpack (Var.name v) ++ " : " ++ TP.pretty' Nothing mempty (apply ctx t) - showElem _ (Marker v) = "|"++Text.unpack (Var.name v)++"|" - --- MEnv v loc -> (Seq (ErrorNote v loc), (a, Env v loc)) -instance Monad f => Monad (MT v loc f) where - return a = MT (\menv -> pure (a, env menv)) - m >>= f = MT go where - go menv = do - (a, env1) <- runM m menv - runM (f a) (menv { env = env1 }) - -instance Monad f => MonadFail.MonadFail (MT v loc f) where - fail = error - -instance Monad f => Applicative (MT v loc f) where - pure a = MT (\menv -> pure (a, env menv)) - (<*>) = ap - -instance Functor f => Functor (MT v loc f) where - fmap f (MT m) = MT (\menv -> fmap (first f) (m menv)) - -instance Monad f => MonadReader (MEnv v loc) (MT v loc f) where - ask = MT (\e -> pure (e, env e)) - local f m = MT $ runM m . f diff --git a/parser-typechecker/src/Unison/Typechecker/Extractor.hs b/parser-typechecker/src/Unison/Typechecker/Extractor.hs deleted file mode 100644 index edd3db69b4..0000000000 --- a/parser-typechecker/src/Unison/Typechecker/Extractor.hs +++ /dev/null @@ -1,343 +0,0 @@ -module Unison.Typechecker.Extractor where - -import Unison.Prelude hiding (whenM) - -import Control.Monad.Reader -import qualified Data.List as List -import Data.List.NonEmpty ( NonEmpty ) -import qualified Data.Set as Set -import Unison.Reference ( Reference ) -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import qualified Unison.Typechecker.Context as C -import Unison.Util.Monoid ( whenM ) -import qualified Unison.Blank as B -import Unison.Var (Var) -import qualified Unison.Var as Var -import Unison.Type (Type) - -type RedundantTypeAnnotation = Bool - -type Extractor e a = MaybeT (Reader e) a - -type ErrorExtractor v loc a = Extractor (C.ErrorNote v loc) a - -type InfoExtractor v loc a = Extractor (C.InfoNote v loc) a - -type PathExtractor v loc a = Extractor (C.PathElement v loc) a - -type SubseqExtractor v loc a = SubseqExtractor' (C.ErrorNote v loc) a - -extractor :: (e -> Maybe a) -> Extractor e a -extractor = MaybeT . reader - -extract :: Extractor e a -> e -> Maybe a -extract = runReader . runMaybeT - -subseqExtractor :: (C.ErrorNote v loc -> [Ranged a]) -> SubseqExtractor v loc a -subseqExtractor f = SubseqExtractor' f - -traceSubseq :: Show a => String -> SubseqExtractor' n a -> SubseqExtractor' n a -traceSubseq s ex = SubseqExtractor' $ \n -> - let rs = runSubseq ex n - in trace (if null s then show rs else s ++ ": " ++ show rs) rs - -traceNote - :: Show a => String -> ErrorExtractor v loc a -> ErrorExtractor v loc a -traceNote s ex = extractor $ \n -> - let result = extract ex n - in trace (if null s then show result else s ++ ": " ++ show result) result - -unique :: SubseqExtractor v loc a -> ErrorExtractor v loc a -unique ex = extractor $ \note -> case runSubseq ex note of - [Pure a ] -> Just a - [Ranged a _ _] -> Just a - _ -> Nothing - -data SubseqExtractor' n a = - SubseqExtractor' { runSubseq :: n -> [Ranged a] } - -data Ranged a - = Pure a - | Ranged { get :: a, start :: Int, end :: Int } - deriving (Functor, Show) - --- | collects the regions where `xa` doesn't match / aka invert a set of intervals --- unused, but don't want to delete it yet - Aug 30, 2018 -_no :: SubseqExtractor' n a -> SubseqExtractor' n () -_no xa = SubseqExtractor' $ \note -> - let as = runSubseq xa note - in if null [ a | Pure a <- as ] - then -- results are not full - if null as - then [Pure ()] -- results are empty, make them full - -- not full and not empty, find the negation - else reverse . fst $ foldl' go - ([], Nothing) - (List.sort $ fmap toPairs as) - else [] -- results were full, make them empty - where - toPairs :: Ranged a -> (Int, Int) - toPairs (Pure _ ) = error "this case should be avoided by the if!" - toPairs (Ranged _ start end) = (start, end) - - go :: ([Ranged ()], Maybe Int) -> (Int, Int) -> ([Ranged ()], Maybe Int) - go ([] , Nothing) (0, r) = ([], Just (r + 1)) - go ([] , Nothing) (l, r) = ([Ranged () 0 (l - 1)], Just r) - go (_ : _, Nothing) _ = error "state machine bug in Extractor2.no" - go (rs, Just r0) (l, r) = - (if r0 + 1 <= l - 1 then Ranged () (r0 + 1) (l - 1) : rs else rs, Just r) - --- unused / untested -_any :: SubseqExtractor v loc () -_any = _any' (\n -> pathLength n - 1) - where - pathLength :: C.ErrorNote v loc -> Int - pathLength = length . toList . C.path - -_any' :: (n -> Int) -> SubseqExtractor' n () -_any' getLast = SubseqExtractor' $ \note -> Pure () : do - let last = getLast note - start <- [0 .. last] - end <- [0 .. last] - pure $ Ranged () start end - --- Kind of a newtype for Ranged.Ranged. --- The Eq instance ignores the embedded value -data DistinctRanged a = DistinctRanged a Int Int -instance Eq (DistinctRanged a) where - DistinctRanged _ l r == DistinctRanged _ l' r' = l == l' && r == r' -instance Ord (DistinctRanged a) where - DistinctRanged _ l r <= DistinctRanged _ l' r' = - l < l' || (l == l' && r <= r') - --- todo: this could return NonEmpty -some :: forall n a . SubseqExtractor' n a -> SubseqExtractor' n [a] -some xa = SubseqExtractor' $ \note -> - let as :: [Ranged a] - as = runSubseq xa note - -- Given a list of subseqs [Ranged a], find the adjacent groups [Ranged [a]]. - -- `Pure`s arguably can't be adjacent; not sure what to do with them. Currently ignored. - in fmap reverse <$> go Set.empty as - where - fromDistinct (DistinctRanged a l r) = Ranged a l r - go :: Set (DistinctRanged [a]) -> [Ranged a] -> [Ranged [a]] - go seen [] = fmap fromDistinct . toList $ seen - go seen (rh@(Ranged h start end) : t) = - let seen' :: Set (DistinctRanged [a]) - seen' = - Set.fromList . join . fmap (toList . consRange rh) . toList $ seen - in go (Set.insert (DistinctRanged [h] start end) seen `Set.union` seen') t - go seen (Pure _ : t) = go seen t - - consRange :: Ranged a -> DistinctRanged [a] -> Maybe (DistinctRanged [a]) - consRange new group@(DistinctRanged as start' _) = if isAdjacent group new - then Just (DistinctRanged (get new : as) start' (end new)) - else Nothing - - -- Returns true if inputs are adjacent Ranged regions - -- Question: Should a Pure be considered adjacent? - isAdjacent :: forall a b . DistinctRanged a -> Ranged b -> Bool - isAdjacent (DistinctRanged _ _ endA) (Ranged _ startB _) = endA + 1 == startB - isAdjacent _ _ = False - -pathStart :: SubseqExtractor' n () -pathStart = SubseqExtractor' $ \_ -> [Ranged () (-1) (-1)] - --- Scopes -- -asPathExtractor :: (C.PathElement v loc -> Maybe a) -> SubseqExtractor v loc a -asPathExtractor = fromPathExtractor . extractor - where - fromPathExtractor :: PathExtractor v loc a -> SubseqExtractor v loc a - fromPathExtractor ex = - subseqExtractor $ join . fmap go . (`zip` [0 ..]) . toList . C.path - where - go (e, i) = case extract ex e of - Just a -> [Ranged a i i] - Nothing -> [] - -inSynthesize :: SubseqExtractor v loc (C.Term v loc) -inSynthesize = asPathExtractor $ \case - C.InSynthesize t -> Just t - _ -> Nothing - -inSubtype :: SubseqExtractor v loc (C.Type v loc, C.Type v loc) -inSubtype = asPathExtractor $ \case - C.InSubtype found expected -> Just (found, expected) - _ -> Nothing - -inCheck :: SubseqExtractor v loc (C.Term v loc, C.Type v loc) -inCheck = asPathExtractor $ \case - C.InCheck e t -> Just (e, t) - _ -> Nothing - --- inInstantiateL --- inInstantiateR - -inSynthesizeApp :: SubseqExtractor v loc (C.Type v loc, C.Term v loc, Int) -inSynthesizeApp = asPathExtractor $ \case - C.InSynthesizeApp t e n -> Just (t, e, n) - _ -> Nothing - -inFunctionCall - :: SubseqExtractor v loc ([v], C.Term v loc, C.Type v loc, [C.Term v loc]) -inFunctionCall = asPathExtractor $ \case - C.InFunctionCall vs f ft e -> case f of - Term.Ann' f _ -> Just (vs, f, ft, e) - f -> Just (vs, f, ft, e) - _ -> Nothing - -inAndApp, inOrApp, inIfCond, inMatchGuard, inMatchBody - :: SubseqExtractor v loc () -inAndApp = asPathExtractor $ \case - C.InAndApp -> Just () - _ -> Nothing -inOrApp = asPathExtractor $ \case - C.InOrApp -> Just () - _ -> Nothing -inIfCond = asPathExtractor $ \case - C.InIfCond -> Just () - _ -> Nothing -inMatchGuard = asPathExtractor $ \case - C.InMatchGuard -> Just () - _ -> Nothing -inMatchBody = asPathExtractor $ \case - C.InMatchBody -> Just () - _ -> Nothing - -inMatch, inVector, inIfBody :: SubseqExtractor v loc loc -inMatch = asPathExtractor $ \case - C.InMatch loc -> Just loc - _ -> Nothing -inVector = asPathExtractor $ \case - C.InVectorApp loc -> Just loc - _ -> Nothing -inIfBody = asPathExtractor $ \case - C.InIfBody loc -> Just loc - _ -> Nothing - --- Causes -- -cause :: ErrorExtractor v loc (C.Cause v loc) -cause = extractor $ pure . C.cause - -duplicateDefinitions :: ErrorExtractor v loc (NonEmpty (v, [loc])) -duplicateDefinitions = cause >>= \case - C.DuplicateDefinitions vs -> pure vs - _ -> mzero - -typeMismatch :: ErrorExtractor v loc (C.Context v loc) -typeMismatch = cause >>= \case - C.TypeMismatch c -> pure c - _ -> mzero - -illFormedType :: ErrorExtractor v loc (C.Context v loc) -illFormedType = cause >>= \case - C.IllFormedType c -> pure c - _ -> mzero - -unknownSymbol :: ErrorExtractor v loc (loc, v) -unknownSymbol = cause >>= \case - C.UnknownSymbol loc v -> pure (loc, v) - _ -> mzero - -unknownTerm :: Var v => ErrorExtractor v loc (loc, v, [C.Suggestion v loc], C.Type v loc) -unknownTerm = cause >>= \case - C.UnknownTerm loc v suggestions expectedType -> do - let k = Var.Inference Var.Ability - cleanup = Type.cleanup . Type.removePureEffects . Type.generalize' k - pure (loc, v, suggestions, cleanup expectedType) - _ -> mzero - -abilityCheckFailure - :: ErrorExtractor v loc ([C.Type v loc], [C.Type v loc], C.Context v loc) -abilityCheckFailure = cause >>= \case - C.AbilityCheckFailure ambient requested ctx -> pure (ambient, requested, ctx) - _ -> mzero - -effectConstructorWrongArgCount - :: ErrorExtractor - v - loc - (C.ExpectedArgCount, C.ActualArgCount, Reference, C.ConstructorId) -effectConstructorWrongArgCount = cause >>= \case - C.EffectConstructorWrongArgCount expected actual r cid -> - pure (expected, actual, r, cid) - _ -> mzero - -malformedEffectBind - :: ErrorExtractor v loc (C.Type v loc, C.Type v loc, [C.Type v loc]) -malformedEffectBind = cause >>= \case - C.MalformedEffectBind ctor ctorResult es -> pure (ctor, ctorResult, es) - _ -> mzero - -solvedBlank :: InfoExtractor v loc (B.Recorded loc, v, C.Type v loc) -solvedBlank = extractor $ \n -> case n of - C.SolvedBlank b v t -> pure (b, v, t) - _ -> mzero - --- Misc -- -errorNote :: ErrorExtractor v loc (C.ErrorNote v loc) -errorNote = extractor $ Just . id - -infoNote :: InfoExtractor v loc (C.InfoNote v loc) -infoNote = extractor $ Just . id - -innermostTerm :: ErrorExtractor v loc (C.Term v loc) -innermostTerm = extractor $ \n -> case C.innermostErrorTerm n of - Just e -> pure e - Nothing -> mzero - -path :: ErrorExtractor v loc [C.PathElement v loc] -path = extractor $ pure . toList . C.path - --- Informational notes -- -topLevelComponent - :: InfoExtractor - v - loc - [(v, Type v loc, RedundantTypeAnnotation)] -topLevelComponent = extractor go - where - go (C.TopLevelComponent c) = Just c - go _ = Nothing - -instance Functor (SubseqExtractor' n) where - fmap = liftM - -instance Applicative (SubseqExtractor' n) where - pure = return - (<*>) = ap - -instance MonadFail (SubseqExtractor' n) where - fail _ = mzero - -instance Monad (SubseqExtractor' n) where - return a = SubseqExtractor' $ \_ -> [Pure a] - xa >>= f = SubseqExtractor' $ \note -> - let as = runSubseq xa note in do - ra <- as - case ra of - Pure a -> runSubseq (f a) note - Ranged a startA endA -> - let rbs = runSubseq (f a) note in do - rb <- rbs - case rb of - Pure b -> pure (Ranged b startA endA) - Ranged b startB endB -> - whenM (startB == endA + 1) (pure (Ranged b startA endB)) - -instance Alternative (SubseqExtractor' n) where - empty = mzero - (<|>) = mplus - -instance MonadPlus (SubseqExtractor' n) where - mzero = SubseqExtractor' $ \_ -> [] - mplus (SubseqExtractor' f1) (SubseqExtractor' f2) = - SubseqExtractor' (\n -> f1 n `mplus` f2 n) - -instance Monoid (SubseqExtractor' n a) where - mempty = mzero - mappend = mplus - -instance Semigroup (SubseqExtractor' n a) where - (<>) = mappend diff --git a/parser-typechecker/src/Unison/Typechecker/TypeError.hs b/parser-typechecker/src/Unison/Typechecker/TypeError.hs deleted file mode 100644 index 8a6def305f..0000000000 --- a/parser-typechecker/src/Unison/Typechecker/TypeError.hs +++ /dev/null @@ -1,298 +0,0 @@ -{-# LANGUAGE BangPatterns #-} - -module Unison.Typechecker.TypeError where - -import Unison.Prelude hiding (whenM) - -import Data.Bifunctor (second) -import Data.List.NonEmpty (NonEmpty) -import Prelude hiding (all, and, or) -import qualified Unison.ABT as ABT -import qualified Unison.Type as Type -import qualified Unison.Typechecker.Context as C -import qualified Unison.Typechecker.Extractor as Ex -import Unison.Util.Monoid (whenM) -import Unison.Var (Var) -import Unison.Type (Type) - -data BooleanMismatch = CondMismatch | AndMismatch | OrMismatch | GuardMismatch - deriving Show - -data ExistentialMismatch = IfBody | VectorBody | CaseBody - deriving Show - -data TypeError v loc - = Mismatch { foundType :: C.Type v loc -- overallType1 - , expectedType :: C.Type v loc -- overallType2 - , foundLeaf :: C.Type v loc -- leaf1 - , expectedLeaf :: C.Type v loc -- leaf2 - , mismatchSite :: C.Term v loc - , note :: C.ErrorNote v loc - } - | BooleanMismatch { getBooleanMismatch :: BooleanMismatch - , mismatchSite :: C.Term v loc - , foundType :: C.Type v loc - , note :: C.ErrorNote v loc - } - | ExistentialMismatch { getExistentialMismatch :: ExistentialMismatch - , expectedType :: C.Type v loc - , expectedLoc :: loc - , foundType :: C.Type v loc - , mismatchSite :: C.Term v loc - , note :: C.ErrorNote v loc - } - | FunctionApplication { f :: C.Term v loc - , ft :: C.Type v loc - , arg :: C.Term v loc - , argNum :: Int - , foundType :: C.Type v loc - , expectedType :: C.Type v loc - , leafs :: Maybe (C.Type v loc, C.Type v loc) -- found, expected - , solvedVars :: [(v, C.Type v loc)] - , note :: C.ErrorNote v loc - } - | NotFunctionApplication { f :: C.Term v loc - , ft :: C.Type v loc - , note :: C.ErrorNote v loc - } - | AbilityCheckFailure { ambient :: [C.Type v loc] - , requested :: [C.Type v loc] - , abilityCheckFailureSite :: loc - , note :: C.ErrorNote v loc - } - | UnguardedLetRecCycle { cycle :: [v] - , cycleLocs :: [loc] - , note :: C.ErrorNote v loc } - | UnknownType { unknownTypeV :: v - , typeSite :: loc - , note :: C.ErrorNote v loc - } - | UnknownTerm { unknownTermV :: v - , termSite :: loc - , suggestions :: [C.Suggestion v loc] - , expectedType :: C.Type v loc - , note :: C.ErrorNote v loc - } - | DuplicateDefinitions { defns :: NonEmpty (v, [loc]) - , note :: C.ErrorNote v loc - } - | Other (C.ErrorNote v loc) - deriving (Show) - -type RedundantTypeAnnotation = Bool - -data TypeInfo v loc = - TopLevelComponent - { definitions :: [(v, Type v loc, RedundantTypeAnnotation)] } - deriving (Show) - -type TypeNote v loc = Either (TypeError v loc) (TypeInfo v loc) - -typeErrorFromNote - :: (Ord loc, Show loc, Var v) => C.ErrorNote v loc -> TypeError v loc -typeErrorFromNote n = case Ex.extract allErrors n of - Just msg -> msg - Nothing -> Other n - -typeInfoFromNote - :: (Ord loc, Show loc, Var v) => C.InfoNote v loc -> Maybe (TypeInfo v loc) -typeInfoFromNote n = case n of - C.TopLevelComponent defs -> Just $ TopLevelComponent defs - _ -> Nothing - -allErrors - :: (Var v, Ord loc) => Ex.ErrorExtractor v loc (TypeError v loc) -allErrors = asum - [ and - , or - , cond - , matchGuard - , ifBody - , vectorBody - , matchBody - , applyingFunction - , applyingNonFunction - , generalMismatch - , abilityCheckFailure - , unguardedCycle - , unknownType - , unknownTerm - , duplicateDefinitions - ] - -topLevelComponent :: Ex.InfoExtractor v a (TypeInfo v a) -topLevelComponent = do - defs <- Ex.topLevelComponent - pure $ TopLevelComponent defs - -abilityCheckFailure :: Ex.ErrorExtractor v a (TypeError v a) -abilityCheckFailure = do - (ambient, requested, _ctx) <- Ex.abilityCheckFailure - e <- Ex.innermostTerm - n <- Ex.errorNote - pure $ AbilityCheckFailure ambient requested (ABT.annotation e) n - -duplicateDefinitions :: Ex.ErrorExtractor v a (TypeError v a) -duplicateDefinitions = do - vs <- Ex.duplicateDefinitions - n <- Ex.errorNote - pure $ DuplicateDefinitions vs n - -unknownType :: Ex.ErrorExtractor v loc (TypeError v loc) -unknownType = do - (loc, v) <- Ex.unknownSymbol - n <- Ex.errorNote - pure $ UnknownType v loc n - -unknownTerm :: Var v => Ex.ErrorExtractor v loc (TypeError v loc) -unknownTerm = do - (loc, v, suggs, typ) <- Ex.unknownTerm - n <- Ex.errorNote - pure $ UnknownTerm v loc suggs (Type.cleanup typ) n - -generalMismatch :: (Var v, Ord loc) => Ex.ErrorExtractor v loc (TypeError v loc) -generalMismatch = do - ctx <- Ex.typeMismatch - let sub t = C.apply ctx t - - subtypes :: Ex.ErrorExtractor v loc [(C.Type v loc, C.Type v loc)] - subtypes = do - path <- Ex.path - pure [ (t1, t2) | C.InSubtype t1 t2 <- path ] - - firstLastSubtype :: Ex.ErrorExtractor v loc ( (C.Type v loc, C.Type v loc) - , (C.Type v loc, C.Type v loc) ) - firstLastSubtype = subtypes >>= \case - [] -> empty - l -> pure (head l, last l) - n <- Ex.errorNote - mismatchSite <- Ex.innermostTerm - ((foundLeaf, expectedLeaf), (foundType, expectedType)) <- firstLastSubtype - let [ft, et, fl, el] = Type.cleanups [sub foundType, sub expectedType, - sub foundLeaf, sub expectedLeaf] - pure $ Mismatch ft et fl el mismatchSite n - - -and,or,cond,matchGuard - :: (Var v, Ord loc) - => Ex.ErrorExtractor v loc (TypeError v loc) -and = booleanMismatch0 AndMismatch (Ex.inSynthesizeApp >> Ex.inAndApp) -or = booleanMismatch0 OrMismatch (Ex.inSynthesizeApp >> Ex.inOrApp) -cond = booleanMismatch0 CondMismatch Ex.inIfCond -matchGuard = booleanMismatch0 GuardMismatch Ex.inMatchGuard - -unguardedCycle :: Ex.ErrorExtractor v loc (TypeError v loc) -unguardedCycle = do - n <- Ex.errorNote - C.UnguardedLetRecCycle vs es <- Ex.cause - let loc = ABT.annotation . snd <$> es - pure $ UnguardedLetRecCycle vs loc n - --- | helper function to support `and` / `or` / `cond` -booleanMismatch0 :: (Var v, Ord loc) - => BooleanMismatch - -> Ex.SubseqExtractor v loc () - -> Ex.ErrorExtractor v loc (TypeError v loc) -booleanMismatch0 b ex = do - n <- Ex.errorNote - ctx <- Ex.typeMismatch - let sub t = C.apply ctx t - mismatchSite <- Ex.innermostTerm - foundType <- Ex.unique $ do - Ex.pathStart - (foundType, _, _) <- inSubtypes - void $ Ex.some Ex.inCheck - ex - pure $ Type.cleanup foundType - pure (BooleanMismatch b mismatchSite (sub foundType) n) - -existentialMismatch0 - :: (Var v, Ord loc) - => ExistentialMismatch - -> Ex.SubseqExtractor v loc loc - -> Ex.ErrorExtractor v loc (TypeError v loc) -existentialMismatch0 em getExpectedLoc = do - n <- Ex.errorNote - ctx <- Ex.typeMismatch - let sub t = C.apply ctx t - mismatchSite <- Ex.innermostTerm - ([foundType, expectedType], expectedLoc) <- Ex.unique $ do - Ex.pathStart - subtypes@(_:_) <- Ex.some Ex.inSubtype - let (foundType, expectedType) = last subtypes - void $ Ex.some Ex.inCheck - expectedLoc <- getExpectedLoc - pure (Type.cleanups [foundType, expectedType], expectedLoc) - pure $ ExistentialMismatch em (sub expectedType) expectedLoc - (sub foundType) mismatchSite - -- todo : save type leaves too - n - -ifBody, vectorBody, matchBody - :: (Var v, Ord loc) => Ex.ErrorExtractor v loc (TypeError v loc) -ifBody = existentialMismatch0 IfBody (Ex.inSynthesizeApp >> Ex.inIfBody) -vectorBody = existentialMismatch0 VectorBody (Ex.inSynthesizeApp >> Ex.inVector) -matchBody = existentialMismatch0 CaseBody (Ex.inMatchBody >> Ex.inMatch) - -applyingNonFunction :: Var v => Ex.ErrorExtractor v loc (TypeError v loc) -applyingNonFunction = do - _ <- Ex.typeMismatch - n <- Ex.errorNote - (f, ft) <- Ex.unique $ do - Ex.pathStart - (arity0Type, _arg, _argNum) <- Ex.inSynthesizeApp - (_, f, ft, args) <- Ex.inFunctionCall - let expectedArgCount = Type.arity ft - foundArgCount = length args - -- unexpectedArgLoc = ABT.annotation arg - whenM (expectedArgCount < foundArgCount) $ pure (f, arity0Type) - pure $ NotFunctionApplication f (Type.cleanup ft) n - --- | Want to collect this info: - -- The `n`th argument to `f` is `foundType`, but I was expecting `expectedType`. - -- - -- 30 | asdf asdf asdf - -- - -- If you're curious - -- `f` has type `blah`, where - -- `a` was chosen as `A` - -- `b` was chosen as `B` - -- `c` was chosen as `C` - -- (many colors / groups) -applyingFunction :: forall v loc. (Var v) => Ex.ErrorExtractor v loc (TypeError v loc) -applyingFunction = do - n <- Ex.errorNote - ctx <- Ex.typeMismatch - Ex.unique $ do - Ex.pathStart - -- todo: make a new extrator for (some inSubtype) that pulls out the head and tail and nothing in between? - (found, expected, leafs) <- inSubtypes - arg <- fst . head <$> Ex.some Ex.inCheck - (_, _, argIndex) <- Ex.inSynthesizeApp - (typeVars, f, ft, _args) <- Ex.inFunctionCall - let go :: v -> Maybe (v, C.Type v loc) - go v = (v,) . Type.getPolytype <$> C.lookupSolved ctx v - solvedVars = catMaybes (go <$> typeVars) - let vm = Type.cleanupVarsMap $ [ft, found, expected] - <> (fst <$> toList leafs) - <> (snd <$> toList leafs) - <> (snd <$> solvedVars) - cleanup = Type.cleanupVars1' vm . Type.cleanupAbilityLists - pure $ FunctionApplication f (cleanup ft) - arg argIndex - (cleanup found) - (cleanup expected) - ((\(a,b) -> (cleanup a, cleanup b)) <$> leafs) - (second cleanup <$> solvedVars) - n - -inSubtypes :: Ex.SubseqExtractor v loc (C.Type v loc, - C.Type v loc, - Maybe (C.Type v loc, C.Type v loc)) -inSubtypes = do - subtypes <- Ex.some Ex.inSubtype - let ((found, expected), leaves) = case subtypes of - [] -> error "unpossible: Ex.some should only succeed on nonnull output" - [(found, expected)] -> ((found, expected), Nothing) - _ -> (last subtypes, Just $ head subtypes) - pure (found, expected, leaves) diff --git a/parser-typechecker/src/Unison/Typechecker/TypeLookup.hs b/parser-typechecker/src/Unison/Typechecker/TypeLookup.hs deleted file mode 100644 index 2925e7c005..0000000000 --- a/parser-typechecker/src/Unison/Typechecker/TypeLookup.hs +++ /dev/null @@ -1,66 +0,0 @@ -module Unison.Typechecker.TypeLookup where - -import Unison.Prelude - -import Unison.Reference (Reference) -import Unison.Referent (Referent) -import Unison.Type (Type) -import qualified Data.Map as Map -import qualified Unison.ConstructorType as CT -import qualified Unison.DataDeclaration as DD -import Unison.DataDeclaration (EffectDeclaration, DataDeclaration) -import qualified Unison.Referent as Referent - --- Used for typechecking. -data TypeLookup v a = - TypeLookup { typeOfTerms :: Map Reference (Type v a) - , dataDecls :: Map Reference (DataDeclaration v a) - , effectDecls :: Map Reference (EffectDeclaration v a) } - deriving Show - -typeOfReferent :: TypeLookup v a -> Referent -> Maybe (Type v a) -typeOfReferent tl r = case r of - Referent.Ref r -> typeOfTerm tl r - Referent.Con r cid CT.Data -> typeOfDataConstructor tl r cid - Referent.Con r cid CT.Effect -> typeOfEffectConstructor tl r cid - --- bombs if not found -unsafeConstructorType :: TypeLookup v a -> Reference -> CT.ConstructorType -unsafeConstructorType tl r = fromMaybe - (error $ "no constructor type for " <> show r) - (constructorType tl r) - -constructorType :: TypeLookup v a -> Reference -> Maybe CT.ConstructorType -constructorType tl r = - (const CT.Data <$> Map.lookup r (dataDecls tl)) <|> - (const CT.Effect <$> Map.lookup r (effectDecls tl)) - -typeOfDataConstructor :: TypeLookup v a -> Reference -> Int -> Maybe (Type v a) -typeOfDataConstructor tl r cid = go =<< Map.lookup r (dataDecls tl) - where go dd = DD.typeOfConstructor dd cid - -typeOfEffectConstructor :: TypeLookup v a -> Reference -> Int -> Maybe (Type v a) -typeOfEffectConstructor tl r cid = go =<< Map.lookup r (effectDecls tl) - where go dd = DD.typeOfConstructor (DD.toDataDecl dd) cid - -typeOfTerm :: TypeLookup v a -> Reference -> Maybe (Type v a) -typeOfTerm tl r = Map.lookup r (typeOfTerms tl) - -typeOfTerm' :: TypeLookup v a -> Reference -> Either Reference (Type v a) -typeOfTerm' tl r = case Map.lookup r (typeOfTerms tl) of - Nothing -> Left r - Just a -> Right a - -instance Semigroup (TypeLookup v a) where (<>) = mappend - -instance Monoid (TypeLookup v a) where - mempty = TypeLookup mempty mempty mempty - mappend (TypeLookup a b c) (TypeLookup a2 b2 c2) = - TypeLookup (a <> a2) (b <> b2) (c <> c2) - -instance Functor (TypeLookup v) where - fmap f tl = - TypeLookup - (fmap f <$> typeOfTerms tl) - (fmap f <$> dataDecls tl) - (fmap f <$> effectDecls tl) diff --git a/parser-typechecker/src/Unison/Typechecker/TypeVar.hs b/parser-typechecker/src/Unison/Typechecker/TypeVar.hs deleted file mode 100644 index b24a0cacbc..0000000000 --- a/parser-typechecker/src/Unison/Typechecker/TypeVar.hs +++ /dev/null @@ -1,53 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Typechecker.TypeVar where - -import qualified Data.Set as Set -import qualified Unison.ABT as ABT -import qualified Unison.Term as Term -import Unison.Term (Term, Term') -import Unison.Type (Type) -import Unison.Var (Var) -import qualified Unison.Var as Var - -data TypeVar b v = Universal v | Existential b v deriving (Functor) - -instance Eq v => Eq (TypeVar b v) where - Universal v == Universal v2 = v == v2 - Existential _ v == Existential _ v2 = v == v2 - _ == _ = False - -instance Ord v => Ord (TypeVar b v) where - Universal v `compare` Universal v2 = compare v v2 - Existential _ v `compare` Existential _ v2 = compare v v2 - Universal _ `compare` Existential _ _ = LT - _ `compare` _ = GT - -underlying :: TypeVar b v -> v -underlying (Universal v) = v -underlying (Existential _ v) = v - -instance Show v => Show (TypeVar b v) where - show (Universal v) = show v - show (Existential _ v) = "'" ++ show v - -instance ABT.Var v => ABT.Var (TypeVar b v) where - freshIn s v = ABT.freshIn (Set.map underlying s) <$> v - -instance Var v => Var (TypeVar b v) where - typed t = Universal (Var.typed t) - typeOf v = Var.typeOf (underlying v) - freshId v = Var.freshId (underlying v) - freshenId id v = Var.freshenId id <$> v - -liftType :: Ord v => Type v a -> Type (TypeVar b v) a -liftType = ABT.vmap Universal - -lowerType :: Ord v => Type (TypeVar b v) a -> Type v a -lowerType = ABT.vmap underlying - -liftTerm :: Ord v => Term v a -> Term' (TypeVar b v) v a -liftTerm = Term.vtmap Universal - -lowerTerm :: Ord v => Term' (TypeVar b v) v a -> Term v a -lowerTerm = Term.vtmap underlying diff --git a/parser-typechecker/src/Unison/UnisonFile.hs b/parser-typechecker/src/Unison/UnisonFile.hs deleted file mode 100644 index 60db70c47b..0000000000 --- a/parser-typechecker/src/Unison/UnisonFile.hs +++ /dev/null @@ -1,369 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE PatternSynonyms #-} - -module Unison.UnisonFile where - -import Unison.Prelude - -import Control.Lens -import Data.Bifunctor (second, first) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Unison.ABT as ABT -import qualified Unison.ConstructorType as CT -import Unison.DataDeclaration (DataDeclaration) -import Unison.DataDeclaration (EffectDeclaration(..)) -import Unison.DataDeclaration (hashDecls) -import qualified Unison.DataDeclaration as DD -import qualified Unison.Builtin.Decls as DD -import qualified Unison.Name as Name -import qualified Unison.Names3 as Names -import Unison.Reference (Reference) -import qualified Unison.Reference as Reference -import qualified Unison.Referent as Referent -import Unison.Term (Term) -import qualified Unison.Term as Term -import Unison.Type (Type) -import qualified Unison.Type as Type -import qualified Unison.Util.List as List -import Unison.Util.Relation (Relation) -import qualified Unison.Util.Relation as Relation -import Unison.Var (Var) -import qualified Unison.Var as Var -import qualified Unison.Typechecker.TypeLookup as TL -import Unison.Names3 (Names0) -import qualified Unison.LabeledDependency as LD -import Unison.LabeledDependency (LabeledDependency) --- import qualified Unison.Typechecker.Components as Components - -data UnisonFile v a = UnisonFileId { - dataDeclarationsId :: Map v (Reference.Id, DataDeclaration v a), - effectDeclarationsId :: Map v (Reference.Id, EffectDeclaration v a), - terms :: [(v, Term v a)], - watches :: Map WatchKind [(v, Term v a)] -} deriving Show - -pattern UnisonFile ds es tms ws <- - UnisonFileId (fmap (first Reference.DerivedId) -> ds) - (fmap (first Reference.DerivedId) -> es) - tms - ws -{-# COMPLETE UnisonFile #-} - -dataDeclarations :: UnisonFile v a -> Map v (Reference, DataDeclaration v a) -dataDeclarations = fmap (first Reference.DerivedId) . dataDeclarationsId - -effectDeclarations :: UnisonFile v a -> Map v (Reference, EffectDeclaration v a) -effectDeclarations = fmap (first Reference.DerivedId) . effectDeclarationsId - -watchesOfKind :: WatchKind -> UnisonFile v a -> [(v, Term v a)] -watchesOfKind kind uf = Map.findWithDefault [] kind (watches uf) - -watchesOfOtherKinds :: WatchKind -> UnisonFile v a -> [(v, Term v a)] -watchesOfOtherKinds kind uf = - join [ ws | (k, ws) <- Map.toList (watches uf), k /= kind ] - -allWatches :: UnisonFile v a -> [(v, Term v a)] -allWatches = join . Map.elems . watches - -type WatchKind = Var.WatchKind -pattern RegularWatch = Var.RegularWatch -pattern TestWatch = Var.TestWatch - --- Converts a file to a single let rec with a body of `()`, for --- purposes of typechecking. -typecheckingTerm :: (Var v, Monoid a) => UnisonFile v a -> Term v a -typecheckingTerm uf = - Term.letRec' True (terms uf <> testWatches <> watchesOfOtherKinds TestWatch uf) $ - DD.unitTerm mempty - where - -- we make sure each test has type Test.Result - f w = let wa = ABT.annotation w in Term.ann wa w (DD.testResultType wa) - testWatches = map (second f) $ watchesOfKind TestWatch uf - --- Converts a file and a body to a single let rec with the given body. -uberTerm' :: (Var v, Monoid a) => UnisonFile v a -> Term v a -> Term v a -uberTerm' uf body = - Term.letRec' True (terms uf <> allWatches uf) $ body - --- A UnisonFile after typechecking. Terms are split into groups by --- cycle and the type of each term is known. -data TypecheckedUnisonFile v a = - TypecheckedUnisonFileId { - dataDeclarationsId' :: Map v (Reference.Id, DataDeclaration v a), - effectDeclarationsId' :: Map v (Reference.Id, EffectDeclaration v a), - topLevelComponents' :: [[(v, Term v a, Type v a)]], - watchComponents :: [(WatchKind, [(v, Term v a, Type v a)])], - hashTermsId :: Map v (Reference.Id, Term v a, Type v a) - } deriving Show - --- backwards compatibility with the old data type -dataDeclarations' :: TypecheckedUnisonFile v a -> Map v (Reference, DataDeclaration v a) -dataDeclarations' = fmap (first Reference.DerivedId) . dataDeclarationsId' -effectDeclarations' :: TypecheckedUnisonFile v a -> Map v (Reference, EffectDeclaration v a) -effectDeclarations' = fmap (first Reference.DerivedId) . effectDeclarationsId' -hashTerms :: TypecheckedUnisonFile v a -> Map v (Reference, Term v a, Type v a) -hashTerms = fmap (over _1 Reference.DerivedId) . hashTermsId - -{-# COMPLETE TypecheckedUnisonFile #-} -pattern TypecheckedUnisonFile ds es tlcs wcs hts <- - TypecheckedUnisonFileId (fmap (first Reference.DerivedId) -> ds) - (fmap (first Reference.DerivedId) -> es) - tlcs - wcs - (fmap (over _1 Reference.DerivedId) -> hts) - --- todo: this is confusing, right? --- currently: create a degenerate TypecheckedUnisonFile --- multiple definitions of "top-level components" non-watch vs w/ watch -typecheckedUnisonFile :: Var v - => Map v (Reference.Id, DataDeclaration v a) - -> Map v (Reference.Id, EffectDeclaration v a) - -> [[(v, Term v a, Type v a)]] - -> [(WatchKind, [(v, Term v a, Type v a)])] - -> TypecheckedUnisonFile v a -typecheckedUnisonFile datas effects tlcs watches = - file0 { hashTermsId = hashImpl file0 } - where - file0 = TypecheckedUnisonFileId datas effects tlcs watches mempty - hashImpl file = let - -- test watches are added to the codebase also - -- todo: maybe other kinds of watches too - components = topLevelComponents file - types = Map.fromList [(v,t) | (v,_,t) <- join components ] - terms0 = Map.fromList [(v,e) | (v,e,_) <- join components ] - hcs = Term.hashComponents terms0 - in Map.fromList [ (v, (r, e, t)) | (v, (r, e)) <- Map.toList hcs, - Just t <- [Map.lookup v types] ] - -lookupDecl :: Ord v => v -> TypecheckedUnisonFile v a - -> Maybe (Reference.Id, DD.Decl v a) -lookupDecl v uf = - over _2 Right <$> (Map.lookup v (dataDeclarationsId' uf)) <|> - over _2 Left <$> (Map.lookup v (effectDeclarationsId' uf)) - -allTerms :: Ord v => TypecheckedUnisonFile v a -> Map v (Term v a) -allTerms uf = - Map.fromList [ (v, t) | (v, t, _) <- join $ topLevelComponents' uf ] - -topLevelComponents :: TypecheckedUnisonFile v a - -> [[(v, Term v a, Type v a)]] -topLevelComponents file = - topLevelComponents' file ++ [ comp | (TestWatch, comp) <- watchComponents file ] - -getDecl' :: Ord v => TypecheckedUnisonFile v a -> v -> Maybe (DD.Decl v a) -getDecl' uf v = - (Right . snd <$> Map.lookup v (dataDeclarations' uf)) <|> - (Left . snd <$> Map.lookup v (effectDeclarations' uf)) - --- External type references that appear in the types of the file's terms -termSignatureExternalLabeledDependencies - :: Ord v => TypecheckedUnisonFile v a -> Set LabeledDependency -termSignatureExternalLabeledDependencies - (TypecheckedUnisonFile dataDeclarations' effectDeclarations' _ _ hashTerms) = - Set.difference - (Set.map LD.typeRef - . foldMap Type.dependencies - . fmap (\(_r, _e, t) -> t) - . toList - $ hashTerms) - -- exclude any references that are defined in this file - (Set.fromList $ - (map (LD.typeRef . fst) . toList) dataDeclarations' <> - (map (LD.typeRef . fst) . toList) effectDeclarations') - --- Returns a relation for the dependencies of this file. The domain is --- the dependent, and the range is its dependencies, thus: --- `R.lookupDom r (dependencies file)` returns the set of dependencies --- of the reference `r`. -dependencies' :: - forall v a. Var v => TypecheckedUnisonFile v a -> Relation Reference.Id Reference -dependencies' file = let - terms :: Map v (Reference.Id, Term v a, Type v a) - terms = hashTermsId file - decls :: Map v (Reference.Id, DataDeclaration v a) - decls = dataDeclarationsId' file <> - fmap (second toDataDecl) (effectDeclarationsId' file ) - termDeps = foldl' f Relation.empty $ toList terms - allDeps = foldl' g termDeps $ toList decls - f acc (r, tm, tp) = acc <> termDeps <> typeDeps - where termDeps = - Relation.fromList [ (r, dep) | dep <- toList (Term.dependencies tm)] - typeDeps = - Relation.fromList [ (r, dep) | dep <- toList (Type.dependencies tp)] - g acc (r, decl) = acc <> ctorDeps - where ctorDeps = - Relation.fromList [ (r, dep) | (_, _, tp) <- DD.constructors' decl - , dep <- toList (Type.dependencies tp) - ] - in allDeps - --- Returns the dependencies of the `UnisonFile` input. Needed so we can --- load information about these dependencies before starting typechecking. -dependencies :: (Monoid a, Var v) => UnisonFile v a -> Set Reference -dependencies (UnisonFile ds es ts ws) = - foldMap (DD.dependencies . snd) ds - <> foldMap (DD.dependencies . DD.toDataDecl . snd) es - <> foldMap (Term.dependencies . snd) ts - <> foldMap (foldMap (Term.dependencies . snd)) ws - -discardTypes :: TypecheckedUnisonFile v a -> UnisonFile v a -discardTypes (TypecheckedUnisonFileId datas effects terms watches _) = let - watches' = g . mconcat <$> List.multimap watches - g tup3s = [(v,e) | (v,e,_t) <- tup3s ] - in UnisonFileId datas effects [ (a,b) | (a,b,_) <- join terms ] watches' - -declsToTypeLookup :: Var v => UnisonFile v a -> TL.TypeLookup v a -declsToTypeLookup uf = TL.TypeLookup mempty - (wrangle (dataDeclarations uf)) - (wrangle (effectDeclarations uf)) - where wrangle = Map.fromList . Map.elems - -toNames :: Var v => UnisonFile v a -> Names0 -toNames uf = datas <> effects - where - datas = foldMap DD.dataDeclToNames' (Map.toList (dataDeclarationsId uf)) - effects = foldMap DD.effectDeclToNames' (Map.toList (effectDeclarationsId uf)) - -typecheckedToNames0 :: Var v => TypecheckedUnisonFile v a -> Names0 -typecheckedToNames0 uf = Names.names0 (terms <> ctors) types where - terms = Relation.fromList - [ (Name.fromVar v, Referent.Ref r) - | (v, (r, _, _)) <- Map.toList $ hashTerms uf ] - types = Relation.fromList - [ (Name.fromVar v, r) - | (v, r) <- Map.toList $ fmap fst (dataDeclarations' uf) - <> fmap fst (effectDeclarations' uf) ] - ctors = Relation.fromMap - . Map.mapKeys Name.fromVar - . fmap (fmap Reference.DerivedId) - . hashConstructors - $ uf - -typecheckedUnisonFile0 :: Ord v => TypecheckedUnisonFile v a -typecheckedUnisonFile0 = TypecheckedUnisonFileId Map.empty Map.empty mempty mempty mempty - --- Returns true if the file has any definitions or watches -nonEmpty :: TypecheckedUnisonFile v a -> Bool -nonEmpty uf = - not (Map.null (dataDeclarations' uf)) || - not (Map.null (effectDeclarations' uf)) || - any (not . null) (topLevelComponents' uf) || - any (not . null) (watchComponents uf) - -hashConstructors - :: forall v a. Ord v => TypecheckedUnisonFile v a -> Map v Referent.Id -hashConstructors file = - let ctors1 = Map.elems (dataDeclarationsId' file) >>= \(ref, dd) -> - [ (v, Referent.Con' ref i CT.Data) | (v,i) <- DD.constructorVars dd `zip` [0 ..] ] - ctors2 = Map.elems (effectDeclarationsId' file) >>= \(ref, dd) -> - [ (v, Referent.Con' ref i CT.Effect) | (v,i) <- DD.constructorVars (DD.toDataDecl dd) `zip` [0 ..] ] - in Map.fromList (ctors1 ++ ctors2) - -type CtorLookup = Map String (Reference, Int) - --- Substitutes free type and term variables occurring in the terms of this --- `UnisonFile` using `externalNames`. --- --- Hash-qualified names are substituted during parsing, but non-HQ names are --- substituted at the end of parsing, since they can be locally bound. Example, in --- `x -> x + math.sqrt 2`, we don't know if `math.sqrt` is locally bound until --- we are done parsing, whereas `math.sqrt#abc` can be resolved immediately --- as it can't refer to a local definition. -bindNames :: Var v - => Names0 - -> UnisonFile v a - -> Names.ResolutionResult v a (UnisonFile v a) -bindNames names (UnisonFileId d e ts ws) = do - -- todo: consider having some kind of binding structure for terms & watches - -- so that you don't weirdly have free vars to tiptoe around. - -- The free vars should just be the things that need to be bound externally. - let termVars = (fst <$> ts) ++ (Map.elems ws >>= map fst) - termVarsSet = Set.fromList termVars - -- todo: can we clean up this lambda using something like `second` - ts' <- traverse (\(v,t) -> (v,) <$> Term.bindNames termVarsSet names t) ts - ws' <- traverse (traverse (\(v,t) -> (v,) <$> Term.bindNames termVarsSet names t)) ws - pure $ UnisonFileId d e ts' ws' - -constructorType :: - Var v => UnisonFile v a -> Reference -> Maybe CT.ConstructorType -constructorType = TL.constructorType . declsToTypeLookup - -data Env v a = Env - -- Data declaration name to hash and its fully resolved form - { datasId :: Map v (Reference.Id, DataDeclaration v a) - -- Effect declaration name to hash and its fully resolved form - , effectsId :: Map v (Reference.Id, EffectDeclaration v a) - -- Naming environment - , names :: Names0 -} - -datas :: Env v a -> Map v (Reference, DataDeclaration v a) -datas = fmap (first Reference.DerivedId) . datasId - -effects :: Env v a -> Map v (Reference, EffectDeclaration v a) -effects = fmap (first Reference.DerivedId) . effectsId - -data Error v a - -- A free type variable that couldn't be resolved - = UnknownType v a - -- A variable which is both a data and an ability declaration - | DupDataAndAbility v a a - deriving (Eq,Ord,Show) - --- This function computes hashes for data and effect declarations, and --- also returns a function for resolving strings to (Reference, ConstructorId) --- for parsing of pattern matching --- --- If there are duplicate declarations, the duplicated names are returned on the --- left. -environmentFor - :: forall v a . Var v - => Names0 - -> Map v (DataDeclaration v a) - -> Map v (EffectDeclaration v a) - -> Names.ResolutionResult v a (Either [Error v a] (Env v a)) -environmentFor names dataDecls0 effectDecls0 = do - let locallyBoundTypes = Map.keysSet dataDecls0 <> Map.keysSet effectDecls0 - -- data decls and hash decls may reference each other, and thus must be hashed together - dataDecls :: Map v (DataDeclaration v a) <- - traverse (DD.bindNames locallyBoundTypes names) dataDecls0 - effectDecls :: Map v (EffectDeclaration v a) <- - traverse (DD.withEffectDeclM (DD.bindNames locallyBoundTypes names)) effectDecls0 - let allDecls0 :: Map v (DataDeclaration v a) - allDecls0 = Map.union dataDecls (toDataDecl <$> effectDecls) - hashDecls' :: [(v, Reference.Id, DataDeclaration v a)] <- - hashDecls allDecls0 - -- then we have to pick out the dataDecls from the effectDecls - let - allDecls = Map.fromList [ (v, (r, de)) | (v, r, de) <- hashDecls' ] - dataDecls' = Map.difference allDecls effectDecls - effectDecls' = second EffectDeclaration <$> Map.difference allDecls dataDecls - -- ctor and effect terms - ctors = foldMap DD.dataDeclToNames' (Map.toList dataDecls') - effects = foldMap DD.effectDeclToNames' (Map.toList effectDecls') - names' = ctors <> effects - overlaps = let - w v dd (toDataDecl -> ed) = DupDataAndAbility v (DD.annotation dd) (DD.annotation ed) - in Map.elems $ Map.intersectionWithKey w dataDecls effectDecls where - okVars = Map.keysSet allDecls0 - unknownTypeRefs = Map.elems allDecls0 >>= \dd -> - let cts = DD.constructorTypes dd - in cts >>= \ct -> [ UnknownType v a | (v,a) <- ABT.freeVarOccurrences mempty ct - , not (Set.member v okVars) ] - pure $ - if null overlaps && null unknownTypeRefs - then pure $ Env dataDecls' effectDecls' names' - else Left (unknownTypeRefs ++ overlaps) - -allVars :: Ord v => UnisonFile v a -> Set v -allVars (UnisonFile ds es ts ws) = Set.unions - [ Map.keysSet ds - , foldMap (DD.allVars . snd) ds - , Map.keysSet es - , foldMap (DD.allVars . toDataDecl . snd) es - , Set.unions [ Set.insert v (Term.allVars t) | (v, t) <- ts ] - , Set.unions [ Set.insert v (Term.allVars t) | (v, t) <- join . Map.elems $ ws ] - ] diff --git a/parser-typechecker/src/Unison/Util/AnnotatedText.hs b/parser-typechecker/src/Unison/Util/AnnotatedText.hs deleted file mode 100644 index 3b537516bf..0000000000 --- a/parser-typechecker/src/Unison/Util/AnnotatedText.hs +++ /dev/null @@ -1,199 +0,0 @@ -{-# LANGUAGE DeriveFoldable #-} -{-# LANGUAGE GeneralizedNewtypeDeriving #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE TypeFamilies #-} - -module Unison.Util.AnnotatedText where - -import Unison.Prelude - -import qualified Data.List as L -import qualified Data.Foldable as Foldable -import qualified Data.Map as Map -import Data.Sequence (Seq ((:|>), (:<|))) -import qualified Data.Sequence as Seq -import Data.Tuple.Extra (second) -import Unison.Lexer (Line, Pos (..)) -import Unison.Util.Monoid (intercalateMap) -import Unison.Util.Range (Range (..), inRange) -import qualified Data.ListLike as LL - --- type AnnotatedText a = AnnotatedText (Maybe a) - -newtype AnnotatedText a = AnnotatedText (Seq (String, Maybe a)) - deriving (Eq, Functor, Foldable, Show) - -instance Semigroup (AnnotatedText a) where - AnnotatedText (as :|> ("", _)) <> bs = AnnotatedText as <> bs - as <> AnnotatedText (("", _) :<| bs) = as <> AnnotatedText bs - AnnotatedText as <> AnnotatedText bs = AnnotatedText (as <> bs) - -instance Monoid (AnnotatedText a) where - mempty = AnnotatedText Seq.empty - -instance LL.FoldableLL (AnnotatedText a) Char where - foldl' f z (AnnotatedText at) = Foldable.foldl' f' z at where - f' z (str, _) = L.foldl' f z str - foldl = LL.foldl - foldr f z (AnnotatedText at) = Foldable.foldr f' z at where - f' (str, _) z = L.foldr f z str - -instance LL.ListLike (AnnotatedText a) Char where - singleton ch = fromString [ch] - uncons (AnnotatedText at) = case at of - (s,a) :<| tl -> case L.uncons s of - Nothing -> LL.uncons (AnnotatedText tl) - Just (hd,s) -> Just (hd, AnnotatedText $ (s,a) :<| tl) - Seq.Empty -> Nothing - break f at = (LL.takeWhile (not . f) at, LL.dropWhile (not . f) at) - takeWhile f (AnnotatedText at) = case at of - Seq.Empty -> AnnotatedText Seq.Empty - (s,a) :<| tl -> let s' = L.takeWhile f s in - if length s' == length s then - AnnotatedText (pure (s,a)) <> LL.takeWhile f (AnnotatedText tl) - else - AnnotatedText (pure (s',a)) - dropWhile f (AnnotatedText at) = case at of - Seq.Empty -> AnnotatedText Seq.Empty - (s,a) :<| tl -> case L.dropWhile f s of - [] -> LL.dropWhile f (AnnotatedText tl) - s -> AnnotatedText $ (s,a) :<| tl - take n (AnnotatedText at) = case at of - Seq.Empty -> AnnotatedText Seq.Empty - (s,a) :<| tl -> - if n <= length s then AnnotatedText $ pure (take n s, a) - else AnnotatedText (pure (s,a)) <> - LL.take (n - length s) (AnnotatedText tl) - drop n (AnnotatedText at) = case at of - Seq.Empty -> AnnotatedText Seq.Empty - (s,a) :<| tl -> - if n <= length s then AnnotatedText $ (drop n s, a) :<| tl - else LL.drop (n - length s) (AnnotatedText tl) - null (AnnotatedText at) = all (null . fst) at - - -- Quoted text (indented, with source line numbers) with annotated portions. -data AnnotatedExcerpt a = AnnotatedExcerpt - { lineOffset :: Line - , text :: String - , annotations :: Map Range a - } deriving (Functor) - -annotate' :: Maybe b -> AnnotatedText a -> AnnotatedText b -annotate' a (AnnotatedText at) = - AnnotatedText $ (\(s,_) -> (s, a)) <$> at - -deannotate :: AnnotatedText a -> AnnotatedText b -deannotate = annotate' Nothing - --- Replace the annotation (whether existing or no) with the given annotation -annotate :: a -> AnnotatedText a -> AnnotatedText a -annotate a (AnnotatedText at) = - AnnotatedText $ (\(s,_) -> (s,Just a)) <$> at - -annotateMaybe :: AnnotatedText (Maybe a) -> AnnotatedText a -annotateMaybe (AnnotatedText s) = AnnotatedText (fmap (second join) s) - -trailingNewLine :: AnnotatedText a -> Bool -trailingNewLine (AnnotatedText (init :|> (s,_))) = - case lastMay s of - Just '\n' -> True - Just _ -> False - _ -> trailingNewLine (AnnotatedText init) -trailingNewLine _ = False - -markup :: AnnotatedExcerpt a -> Map Range a -> AnnotatedExcerpt a -markup a r = a { annotations = r `Map.union` annotations a } - --- renderTextUnstyled :: AnnotatedText a -> Rendered Void --- renderTextUnstyled (AnnotatedText chunks) = foldl' go mempty chunks --- where go r (text, _) = r <> fromString text - -textLength :: AnnotatedText a -> Int -textLength (AnnotatedText chunks) = foldl' go 0 chunks - where go len (text, _a) = len + length text - -textEmpty :: AnnotatedText a -> Bool -textEmpty = (==0) . textLength - -condensedExcerptToText :: Int -> AnnotatedExcerpt a -> AnnotatedText a -condensedExcerptToText margin e = - intercalateMap " .\n" excerptToText $ snipWithContext margin e - -excerptToText :: forall a. AnnotatedExcerpt a -> AnnotatedText a -excerptToText e = - track (Pos line1 1) [] (Map.toList $ annotations e) (renderLineNumber line1) (text e) - where - line1 :: Int - line1 = lineOffset e - renderLineNumber :: Int -> AnnotatedText a - renderLineNumber n = fromString $ " " ++ spaces ++ sn ++ " | " - where sn = show n - spaces = replicate (lineNumberWidth - length sn) ' ' - lineNumberWidth = 4 - - -- step through the source characters and annotations - track _ _ _ rendered "" = rendered - track _ _ _ rendered "\n" = rendered <> "\n" - track pos@(Pos line col) stack annotations rendered _input@(c:rest) = - let - (poppedAnnotations, remainingAnnotations) = span (inRange pos . fst) annotations - -- drop any stack entries that will be closed after this char - -- and add new stack entries - stack' = foldl' pushColor stack0 poppedAnnotations - where pushColor s (Range _ end, style) = (style, end) : s - stack0 = dropWhile ((<=pos) . snd) stack - maybeColor = fst <$> headMay stack' - -- on new line, advance pos' vertically and set up line header - -- additions :: AnnotatedText (Maybe a) - pos' :: Pos - (additions, pos') = - if c == '\n' - then ("\n" <> renderLineNumber (line + 1), Pos (line + 1) 1) - else (annotate' maybeColor (fromString [c]), Pos line (col + 1)) - in track pos' stack' remainingAnnotations (rendered <> additions) rest - -snipWithContext :: Int -> AnnotatedExcerpt a -> [AnnotatedExcerpt a] -snipWithContext margin source = - case foldl' whileWithinMargin - (Nothing, mempty, mempty) - (Map.toList $ annotations source) of - (Nothing, _, _) -> [] - (Just (Range (Pos startLine' _) (Pos endLine' _)), group', rest') -> - let dropLineCount = startLine' - lineOffset source - takeLineCount = endLine' - startLine' + 1 - text', text2' :: [String] - (text', text2') = - splitAt takeLineCount (drop dropLineCount (lines (text source))) - in AnnotatedExcerpt startLine' (unlines text') group' - : snipWithContext - margin (AnnotatedExcerpt (endLine' + 1) (unlines text2') rest') - where - withinMargin :: Range -> Range -> Bool - withinMargin (Range _start1 (Pos end1 _)) (Range (Pos start2 _) _end2) = - end1 + margin >= start2 - - whileWithinMargin :: (Maybe Range, Map Range a, Map Range a) - -> (Range, a) - -> (Maybe Range, Map Range a, Map Range a) - whileWithinMargin (r0, taken, rest) (r1,a1) = - case r0 of - Nothing -> -- haven't processed any annotations yet - (Just r1, Map.singleton r1 a1, mempty) - Just r0 -> - -- if all annotations so far can be joined without .. separations - if null rest - -- if this one can be joined to the new region without .. separation - then if withinMargin r0 r1 - -- add it to the first set and grow the compare region - then (Just $ r0 <> r1, Map.insert r1 a1 taken, mempty) - -- otherwise add it to the second set - else (Just r0, taken, Map.singleton r1 a1) - -- once we've added to the second set, anything more goes there too - else (Just r0, taken, Map.insert r1 a1 rest) - -instance IsString (AnnotatedText a) where - fromString s = AnnotatedText . pure $ (s, Nothing) - -instance IsString (AnnotatedExcerpt a) where - fromString s = AnnotatedExcerpt 1 s mempty diff --git a/parser-typechecker/src/Unison/Util/Bytes.hs b/parser-typechecker/src/Unison/Util/Bytes.hs deleted file mode 100644 index 4c9dffeecb..0000000000 --- a/parser-typechecker/src/Unison/Util/Bytes.hs +++ /dev/null @@ -1,96 +0,0 @@ -{-# Language ViewPatterns #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} - -module Unison.Util.Bytes where - -import Unison.Prelude hiding (empty) - -import Data.Monoid (Sum(..)) -import Prelude hiding (drop) -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.FingerTree as T - --- Bytes type represented as a finger tree of ByteStrings. --- Can be efficiently sliced and indexed, using the byte count --- annotation at each subtree. -newtype Bytes = Bytes (T.FingerTree (Sum Int) B.ByteString) - -null :: Bytes -> Bool -null (Bytes bs) = T.null bs - -empty :: Bytes -empty = Bytes mempty - -fromByteString :: B.ByteString -> Bytes -fromByteString = snoc empty - -toByteString :: Bytes -> B.ByteString -toByteString b = B.concat (chunks b) - -size :: Bytes -> Int -size (Bytes bs) = getSum (T.measure bs) - -chunks :: Bytes -> [B.ByteString] -chunks (Bytes b) = toList b - -cons :: B.ByteString -> Bytes -> Bytes -cons b bs | B.null b = bs -cons b (Bytes bs) = Bytes (b T.<| bs) - -snoc :: Bytes -> B.ByteString -> Bytes -snoc bs b | B.null b = bs -snoc (Bytes bs) b = Bytes (bs T.|> b) - -flatten :: Bytes -> Bytes -flatten b = snoc mempty (B.concat (chunks b)) - -take :: Int -> Bytes -> Bytes -take n (Bytes bs) = go (T.split (> Sum n) bs) where - go (ok, s) = Bytes $ case T.viewl s of - last T.:< _ -> - if T.measure ok == Sum n then ok - else ok T.|> B.take (n - getSum (T.measure ok)) last - _ -> ok - -drop :: Int -> Bytes -> Bytes -drop n b0@(Bytes bs) = go (T.dropUntil (> Sum n) bs) where - go s = Bytes $ case T.viewl s of - head T.:< tail -> - if (size b0 - getSum (T.measure s)) == n then s - else B.drop (n - (size b0 - getSum (T.measure s))) head T.<| tail - _ -> s - -at :: Int -> Bytes -> Maybe Word8 -at i bs = case drop i bs of - Bytes (T.viewl -> hd T.:< _) -> Just (B.head hd) - _ -> Nothing - -toWord8s :: Bytes -> [Word8] -toWord8s bs = catMaybes [ at i bs | i <- [0..(size bs - 1)] ] - -fromWord8s :: [Word8] -> Bytes -fromWord8s bs = fromByteString (B.pack bs) - -instance Monoid Bytes where - mempty = Bytes mempty - mappend (Bytes b1) (Bytes b2) = Bytes (b1 `mappend` b2) - -instance Semigroup Bytes where (<>) = mappend - -instance T.Measured (Sum Int) B.ByteString where - measure b = Sum (B.length b) - -instance Show Bytes where - show bs = show (toWord8s bs) - -instance Eq Bytes where - b1 == b2 | size b1 == size b2 = go b1 b2 - where - go b1 b2 = BL.fromChunks (chunks b1) == BL.fromChunks (chunks b2) - _ == _ = False - --- Lexicographical ordering -instance Ord Bytes where - b1 `compare` b2 = - BL.fromChunks (chunks b1) `compare` BL.fromChunks (chunks b2) diff --git a/parser-typechecker/src/Unison/Util/Cache.hs b/parser-typechecker/src/Unison/Util/Cache.hs deleted file mode 100644 index 499d75f806..0000000000 --- a/parser-typechecker/src/Unison/Util/Cache.hs +++ /dev/null @@ -1,90 +0,0 @@ -module Unison.Util.Cache where - -import Prelude hiding (lookup) -import Unison.Prelude -import UnliftIO (newTVarIO, modifyTVar', writeTVar, atomically, readTVar, readTVarIO) -import qualified Data.Map as Map - -data Cache m k v = - Cache { lookup :: k -> m (Maybe v) - , insert :: k -> v -> m () - } - --- Create a cache of unbounded size. -cache :: (MonadIO m, Ord k) => m (Cache m k v) -cache = do - t <- newTVarIO Map.empty - let - lookup k = Map.lookup k <$> readTVarIO t - insert k v = do - m <- readTVarIO t - case Map.lookup k m of - Nothing -> atomically $ modifyTVar' t (Map.insert k v) - _ -> pure () - - pure $ Cache lookup insert - -nullCache :: (MonadIO m, Ord k) => m (Cache m k v) -nullCache = pure $ Cache (const (pure Nothing)) (\_ _ -> pure ()) - --- Create a cache of bounded size. Once the cache --- reaches a size of `maxSize`, older unused entries --- are evicted from the cache. Unlike LRU caching, --- where cache hits require updating LRU info, --- cache hits here are read-only and contention free. -semispaceCache :: (MonadIO m, Ord k) => Word -> m (Cache m k v) -semispaceCache 0 = nullCache -semispaceCache maxSize = do - -- Analogous to semispace GC, keep 2 maps: gen0 and gen1 - -- `insert k v` is done in gen0 - -- if full, gen1 = gen0; gen0 = Map.empty - -- `lookup k` is done in gen0; then gen1 - -- if found in gen0, return immediately - -- if found in gen1, `insert k v`, then return - -- Thus, older keys not recently looked up are forgotten - gen0 <- newTVarIO Map.empty - gen1 <- newTVarIO Map.empty - let - lookup k = readTVarIO gen0 >>= \m0 -> - case Map.lookup k m0 of - Nothing -> readTVarIO gen1 >>= \m1 -> - case Map.lookup k m1 of - Nothing -> pure Nothing - Just v -> insert k v $> Just v - just -> pure just - insert k v = atomically $ do - modifyTVar' gen0 (Map.insert k v) - m0 <- readTVar gen0 - when (fromIntegral (Map.size m0) >= maxSize) $ do - writeTVar gen1 m0 - writeTVar gen0 Map.empty - pure $ Cache lookup insert - --- Cached function application: if a key `k` is not in the cache, --- calls `f` and inserts `f k` results in the cache. -apply :: Monad m => Cache m k v -> (k -> m v) -> k -> m v -apply c f k = lookup c k >>= \case - Just v -> pure v - Nothing -> do - v <- f k - insert c k v - pure v - --- Cached function application which only caches values for --- which `f k` is non-empty. For instance, if `g` is `Maybe`, --- and `f x` returns `Nothing`, this won't be cached. --- --- Useful when we think that missing results for `f` may be --- later filled in so we don't want to cache missing results. -applyDefined :: (Monad m, Applicative g, Traversable g) - => Cache m k v - -> (k -> m (g v)) - -> k - -> m (g v) -applyDefined c f k = lookup c k >>= \case - Just v -> pure (pure v) - Nothing -> do - v <- f k - -- only populate the cache if f returns a non-empty result - for_ v $ \v -> insert c k v - pure v diff --git a/parser-typechecker/src/Unison/Util/ColorText.hs b/parser-typechecker/src/Unison/Util/ColorText.hs deleted file mode 100644 index 5cfd21b23c..0000000000 --- a/parser-typechecker/src/Unison/Util/ColorText.hs +++ /dev/null @@ -1,129 +0,0 @@ -module Unison.Util.ColorText ( - ColorText, Color(..), style, toANSI, toPlain, toHTML, defaultColors, - black, red, green, yellow, blue, purple, cyan, white, hiBlack, hiRed, hiGreen, hiYellow, hiBlue, hiPurple, hiCyan, hiWhite, bold, underline, - module Unison.Util.AnnotatedText) -where - -import Unison.Prelude - -import qualified System.Console.ANSI as ANSI -import Unison.Util.AnnotatedText (AnnotatedText(..), annotate) -import qualified Unison.Util.SyntaxText as ST hiding (toPlain) - -type ColorText = AnnotatedText Color - -data Color - = Black | Red | Green | Yellow | Blue | Purple | Cyan | White - | HiBlack| HiRed | HiGreen | HiYellow | HiBlue | HiPurple | HiCyan | HiWhite - | Bold | Underline - deriving (Eq, Ord, Bounded, Enum, Show, Read) - -black, red, green, yellow, blue, purple, cyan, white, hiBlack, hiRed, hiGreen, hiYellow, hiBlue, hiPurple, hiCyan, hiWhite, bold, underline :: ColorText -> ColorText -black = style Black -red = style Red -green = style Green -yellow = style Yellow -blue = style Blue -purple = style Purple -cyan = style Cyan -white = style White -hiBlack = style HiBlack -hiRed = style HiRed -hiGreen = style HiGreen -hiYellow = style HiYellow -hiBlue = style HiBlue -hiPurple = style HiPurple -hiCyan = style HiCyan -hiWhite = style HiWhite -bold = style Bold -underline = style Underline - -style :: Color -> ColorText -> ColorText -style = annotate - -toHTML :: String -> ColorText -> String -toHTML cssPrefix (AnnotatedText at) = toList at >>= \case - (s, color) -> wrap color (s >>= newlineToBreak) - where - newlineToBreak '\n' = "
\n" - newlineToBreak ch = [ch] - wrap Nothing s = "" <> s <> "" - wrap (Just c) s = - "" <> s <> "" - colorName c = "\"" <> cssPrefix <> "-" <> show c <> "\"" - --- Convert a `ColorText` to a `String`, ignoring colors -toPlain :: ColorText -> String -toPlain (AnnotatedText at) = join (toList $ fst <$> at) - --- Convert a `ColorText` to a `String`, using ANSI codes to produce colors -toANSI :: ColorText -> String -toANSI (AnnotatedText chunks) = - join . toList $ snd (foldl' go (Nothing, mempty) chunks) <> resetANSI - where - go - :: (Maybe Color, Seq String) - -> (String, Maybe Color) - -> (Maybe Color, Seq String) - go (prev, r) (text, new) = if prev == new - then (prev, r <> pure text) - else - ( new - , case new of - Nothing -> r <> resetANSI <> pure text - Just style -> r <> resetANSI <> toANSI style <> pure text - ) - resetANSI = pure . ANSI.setSGRCode $ [ANSI.Reset] - toANSI :: Color -> Seq String - toANSI c = pure . ANSI.setSGRCode $ case c of - Black -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Black] - Red -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Red] - Green -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Green] - Yellow -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Yellow] - Blue -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Blue] - Purple -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Magenta] - Cyan -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Cyan] - White -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.White] - HiBlack -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Black] - HiRed -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Red] - HiGreen -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Green] - HiYellow -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Yellow] - HiBlue -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Blue] - HiPurple -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Magenta] - HiCyan -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Cyan] - HiWhite -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.White] - Bold -> [ANSI.SetConsoleIntensity ANSI.BoldIntensity] - Underline -> [ANSI.SetUnderlining ANSI.SingleUnderline] - -defaultColors :: ST.Element -> Maybe Color -defaultColors = \case - ST.NumericLiteral -> Nothing - ST.TextLiteral -> Nothing - ST.CharLiteral -> Nothing - ST.BooleanLiteral -> Nothing - ST.Blank -> Nothing - ST.Var -> Nothing - ST.Reference _ -> Nothing - ST.Referent _ -> Nothing - ST.Op _ -> Nothing - ST.Unit -> Nothing - ST.Constructor -> Nothing - ST.Request -> Nothing - ST.AbilityBraces -> Just HiBlack - ST.ControlKeyword -> Just Bold - ST.LinkKeyword -> Just HiBlack - ST.TypeOperator -> Just HiBlack - ST.BindingEquals -> Nothing - ST.TypeAscriptionColon -> Just Blue - ST.DataTypeKeyword -> Nothing - ST.DataTypeParams -> Nothing - ST.DataTypeModifier -> Nothing - ST.UseKeyword -> Just HiBlack - ST.UsePrefix -> Just HiBlack - ST.UseSuffix -> Just HiBlack - ST.HashQualifier _ -> Just HiBlack - ST.DelayForceChar -> Just Yellow - ST.DelimiterChar -> Nothing - ST.Parenthesis -> Nothing - ST.DocDelimiter -> Just Green - ST.DocKeyword -> Just Bold diff --git a/parser-typechecker/src/Unison/Util/CycleTable.hs b/parser-typechecker/src/Unison/Util/CycleTable.hs deleted file mode 100644 index 9792636555..0000000000 --- a/parser-typechecker/src/Unison/Util/CycleTable.hs +++ /dev/null @@ -1,39 +0,0 @@ -module Unison.Util.CycleTable where - -import Data.HashTable.IO (BasicHashTable) -import Data.Hashable (Hashable) -import qualified Data.HashTable.IO as HT -import qualified Data.Mutable as M - --- A hash table along with a unique number which gets incremented on --- each insert. This is used as an implementation detail by `CyclicEq`, --- `CyclicOrd`, etc to be able to compare, hash, or serialize cyclic structures. - -data CycleTable k v = - CycleTable { - table :: BasicHashTable k v, - sizeRef :: M.IOPRef Int - } - -new :: Int -> IO (CycleTable k v) -new size = do - t <- HT.newSized size - r <- M.newRef 0 - pure (CycleTable t r) - -lookup :: (Hashable k, Eq k) => k -> CycleTable k v -> IO (Maybe v) -lookup k t = HT.lookup (table t) k - -insert :: (Hashable k, Eq k) => k -> v -> CycleTable k v -> IO () -insert k v t = do - HT.insert (table t) k v - M.modifyRef (sizeRef t) (1 +) - -size :: CycleTable k v -> IO Int -size h = M.readRef (sizeRef h) - -insertEnd :: (Hashable k, Eq k) => k -> CycleTable k Int -> IO () -insertEnd k t = do - n <- size t - insert k n t - diff --git a/parser-typechecker/src/Unison/Util/CyclicEq.hs b/parser-typechecker/src/Unison/Util/CyclicEq.hs deleted file mode 100644 index 46cb72c6bd..0000000000 --- a/parser-typechecker/src/Unison/Util/CyclicEq.hs +++ /dev/null @@ -1,60 +0,0 @@ -{-# Language BangPatterns #-} -{-# Language Strict #-} -{-# Language StrictData #-} - -module Unison.Util.CyclicEq where - -import Unison.Prelude - -import Data.Vector (Vector) -import qualified Data.Vector as V -import qualified Data.Sequence as S -import qualified Unison.Util.CycleTable as CT - -{- - Typeclass used for comparing potentially cyclic types for equality. - Cyclic types may refer to themselves indirectly, so something is needed to - prevent an infinite loop in these cases. The basic idea: when a subexpression - is first examined, its "id" (represented as some `Int`) may be added to the - mutable hash table along with its position. The next time that same id is - encountered, it will be compared based on this position. - -} -class CyclicEq a where - -- Map from `Ref` ID to position in the stream - -- If a ref is encountered again, we use its mapped ID - cyclicEq :: CT.CycleTable Int Int -> CT.CycleTable Int Int -> a -> a -> IO Bool - -bothEq' :: (Eq a, CyclicEq b) => CT.CycleTable Int Int -> CT.CycleTable Int Int - -> a -> a -> b -> b -> IO Bool -bothEq' h1 h2 a1 a2 b1 b2 = - if a1 == a2 then cyclicEq h1 h2 b1 b2 - else pure False - -bothEq :: - (CyclicEq a, CyclicEq b) => CT.CycleTable Int Int -> CT.CycleTable Int Int - -> a -> a -> b -> b -> IO Bool -bothEq h1 h2 a1 a2 b1 b2 = cyclicEq h1 h2 a1 a2 >>= \b -> - if b then cyclicEq h1 h2 b1 b2 - else pure False - -instance CyclicEq a => CyclicEq [a] where - cyclicEq h1 h2 (x:xs) (y:ys) = bothEq h1 h2 x y xs ys - cyclicEq _ _ [] [] = pure True - cyclicEq _ _ _ _ = pure False - -instance CyclicEq a => CyclicEq (S.Seq a) where - cyclicEq h1 h2 xs ys = - if S.length xs == S.length ys then cyclicEq h1 h2 (toList xs) (toList ys) - else pure False - -instance CyclicEq a => CyclicEq (Vector a) where - cyclicEq h1 h2 xs ys = - if V.length xs /= V.length ys then pure False - else go 0 h1 h2 xs ys - where - go !i !h1 !h2 !xs !ys = - if i >= V.length xs then pure True - else do - b <- cyclicEq h1 h2 (xs V.! i) (ys V.! i) - if b then go (i + 1) h1 h2 xs ys - else pure False diff --git a/parser-typechecker/src/Unison/Util/CyclicOrd.hs b/parser-typechecker/src/Unison/Util/CyclicOrd.hs deleted file mode 100644 index 6896110ed7..0000000000 --- a/parser-typechecker/src/Unison/Util/CyclicOrd.hs +++ /dev/null @@ -1,54 +0,0 @@ -{-# Language BangPatterns #-} -{-# Language Strict #-} -{-# Language StrictData #-} - -module Unison.Util.CyclicOrd where - -import Unison.Prelude - -import Data.Vector (Vector) -import Unison.Util.CycleTable (CycleTable) -import qualified Data.Vector as V -import qualified Data.Sequence as S -import qualified Unison.Util.CycleTable as CT - --- Same idea as `CyclicEq`, but for ordering. -class CyclicOrd a where - -- Map from `Ref` ID to position in the stream - -- If a ref is encountered again, we use its mapped ID - cyclicOrd :: CycleTable Int Int -> CycleTable Int Int -> a -> a -> IO Ordering - -bothOrd' :: - (Ord a, CyclicOrd b) => CT.CycleTable Int Int -> CT.CycleTable Int Int - -> a -> a -> b -> b -> IO Ordering -bothOrd' h1 h2 a1 a2 b1 b2 = case compare a1 a2 of - EQ -> cyclicOrd h1 h2 b1 b2 - c -> pure c - -bothOrd :: - (CyclicOrd a, CyclicOrd b) => CT.CycleTable Int Int -> CT.CycleTable Int Int - -> a -> a -> b -> b -> IO Ordering -bothOrd h1 h2 a1 a2 b1 b2 = cyclicOrd h1 h2 a1 a2 >>= \b -> - if b == EQ then cyclicOrd h1 h2 b1 b2 - else pure b - -instance CyclicOrd a => CyclicOrd [a] where - cyclicOrd h1 h2 (x:xs) (y:ys) = bothOrd h1 h2 x y xs ys - cyclicOrd _ _ [] [] = pure EQ - cyclicOrd _ _ [] _ = pure LT - cyclicOrd _ _ _ [] = pure GT - -instance CyclicOrd a => CyclicOrd (S.Seq a) where - cyclicOrd h1 h2 xs ys = cyclicOrd h1 h2 (toList xs) (toList ys) - -instance CyclicOrd a => CyclicOrd (Vector a) where - cyclicOrd h1 h2 xs ys = go 0 h1 h2 xs ys - where - go !i !h1 !h2 !xs !ys = - if i >= V.length xs && i >= V.length ys then pure EQ - else if i >= V.length xs then pure LT - else if i >= V.length ys then pure GT - else do - b <- cyclicOrd h1 h2 (xs V.! i) (ys V.! i) - if b == EQ then go (i + 1) h1 h2 xs ys - else pure b diff --git a/parser-typechecker/src/Unison/Util/EnumContainers.hs b/parser-typechecker/src/Unison/Util/EnumContainers.hs deleted file mode 100644 index a00134a21c..0000000000 --- a/parser-typechecker/src/Unison/Util/EnumContainers.hs +++ /dev/null @@ -1,115 +0,0 @@ -{-# language DeriveTraversable #-} -{-# language GeneralizedNewtypeDeriving #-} - -module Unison.Util.EnumContainers - ( EnumMap - , EnumSet - , EnumKey(..) - , mapFromList - , setFromList - , mapSingleton - , setSingleton - , mapInsert - , unionWith - , keys - , restrictKeys - , withoutKeys - , member - , lookup - , lookupWithDefault - , foldMapWithKey - , mapToList - , (!) - , findMin - ) where - -import Prelude hiding (lookup) - -import Data.Bifunctor -import Data.Word (Word64,Word16) - -import qualified Data.IntSet as IS -import qualified Data.IntMap.Strict as IM - -class EnumKey k where - keyToInt :: k -> Int - intToKey :: Int -> k - -instance EnumKey Word64 where - keyToInt e = fromIntegral e - intToKey i = fromIntegral i - -instance EnumKey Word16 where - keyToInt e = fromIntegral e - intToKey i = fromIntegral i - -newtype EnumMap k a = EM (IM.IntMap a) - deriving - ( Monoid - , Semigroup - , Functor - , Foldable - , Traversable - , Show - , Eq - , Ord - ) - -newtype EnumSet k = ES IS.IntSet - deriving - ( Monoid - , Semigroup - , Show - , Eq - , Ord - ) - -mapFromList :: EnumKey k => [(k, a)] -> EnumMap k a -mapFromList = EM . IM.fromList . fmap (first keyToInt) - -setFromList :: EnumKey k => [k] -> EnumSet k -setFromList = ES . IS.fromList . fmap keyToInt - -mapSingleton :: EnumKey k => k -> a -> EnumMap k a -mapSingleton e a = EM $ IM.singleton (keyToInt e) a - -setSingleton :: EnumKey k => k -> EnumSet k -setSingleton e = ES . IS.singleton $ keyToInt e - -mapInsert :: EnumKey k => k -> a -> EnumMap k a -> EnumMap k a -mapInsert e x (EM m) = EM $ IM.insert (keyToInt e) x m - -unionWith - :: EnumKey k => EnumKey k - => (a -> a -> a) -> EnumMap k a -> EnumMap k a -> EnumMap k a -unionWith f (EM l) (EM r) = EM $ IM.unionWith f l r - -keys :: EnumKey k => EnumMap k a -> [k] -keys (EM m) = fmap intToKey . IM.keys $ m - -restrictKeys :: EnumKey k => EnumMap k a -> EnumSet k -> EnumMap k a -restrictKeys (EM m) (ES s) = EM $ IM.restrictKeys m s - -withoutKeys :: EnumKey k => EnumMap k a -> EnumSet k -> EnumMap k a -withoutKeys (EM m) (ES s) = EM $ IM.withoutKeys m s - -member :: EnumKey k => k -> EnumSet k -> Bool -member e (ES s) = IS.member (keyToInt e) s - -lookup :: EnumKey k => k -> EnumMap k a -> Maybe a -lookup e (EM m) = IM.lookup (keyToInt e) m - -lookupWithDefault :: EnumKey k => a -> k -> EnumMap k a -> a -lookupWithDefault d e (EM m) = IM.findWithDefault d (keyToInt e) m - -foldMapWithKey :: EnumKey k => Monoid m => (k -> a -> m) -> EnumMap k a -> m -foldMapWithKey f (EM m) = IM.foldMapWithKey (f . intToKey) m - -mapToList :: EnumKey k => EnumMap k a -> [(k, a)] -mapToList (EM m) = first intToKey <$> IM.toList m - -(!) :: EnumKey k => EnumMap k a -> k -> a -EM m ! e = m IM.! keyToInt e - -findMin :: EnumKey k => EnumSet k -> k -findMin (ES s) = intToKey $ IS.findMin s diff --git a/parser-typechecker/src/Unison/Util/Exception.hs b/parser-typechecker/src/Unison/Util/Exception.hs deleted file mode 100644 index c4db7c2f7a..0000000000 --- a/parser-typechecker/src/Unison/Util/Exception.hs +++ /dev/null @@ -1,16 +0,0 @@ -module Unison.Util.Exception where - -import Unison.Prelude - -import Control.Concurrent.Async (withAsync, waitCatch) - --- These are adapted from: https://github.com/snoyberg/classy-prelude/blob/ccd19f2c62882c69d5dcdd3da5c0df1031334c5a/classy-prelude/ClassyPrelude.hs#L320 --- License is MIT: https://github.com/snoyberg/classy-prelude/blob/ccd19f2c62882c69d5dcdd3da5c0df1031334c5a/classy-prelude/LICENSE - --- Catch all exceptions except asynchronous exceptions. -tryAny :: MonadIO m => IO a -> m (Either SomeException a) -tryAny action = liftIO $ withAsync action waitCatch - --- Catch all exceptions except asynchronous exceptions. -catchAny :: IO a -> (SomeException -> IO a) -> IO a -catchAny action onE = tryAny action >>= either onE return diff --git a/parser-typechecker/src/Unison/Util/Find.hs b/parser-typechecker/src/Unison/Util/Find.hs deleted file mode 100644 index 089e44f850..0000000000 --- a/parser-typechecker/src/Unison/Util/Find.hs +++ /dev/null @@ -1,188 +0,0 @@ -{-# LANGUAGE ViewPatterns #-} - -module Unison.Util.Find ( - fuzzyFinder, simpleFuzzyFinder, simpleFuzzyScore, fuzzyFindInBranch, fuzzyFindMatchArray, prefixFindInBranch - ) where - -import Unison.Prelude - -import qualified Data.Char as Char -import qualified Data.List as List -import qualified Data.Text as Text --- http://www.serpentine.com/blog/2007/02/27/a-haskell-regular-expression-tutorial/ --- https://www.stackage.org/haddock/lts-13.9/regex-base-0.93.2/Text-Regex-Base-Context.html -- re-exported by TDFA --- https://www.stackage.org/haddock/lts-13.9/regex-tdfa-1.2.3.1/Text-Regex-TDFA.html -import qualified Text.Regex.TDFA as RE -import Unison.Codebase.SearchResult (SearchResult) -import qualified Unison.Codebase.SearchResult as SR -import Unison.HashQualified' (HashQualified) -import qualified Unison.HashQualified' as HQ -import qualified Unison.Name as Name -import qualified Unison.Names2 as Names -import Unison.Names2 ( Names0 ) -import Unison.NamePrinter (prettyHashQualified') -import qualified Unison.Reference as Reference -import qualified Unison.Referent as Referent -import qualified Unison.ShortHash as SH -import Unison.Util.Monoid (intercalateMap) -import qualified Unison.Util.Pretty as P -import qualified Unison.Util.Relation as R - - -fuzzyFinder :: forall a. - String -> [a] -> (a -> String) -> [(a, P.Pretty P.ColorText)] -fuzzyFinder query items render = - sortAndCleanup $ fuzzyFindMatchArray query items render - where - sortAndCleanup = List.map snd . List.sortOn fst - -simpleFuzzyFinder :: forall a. - String -> [a] -> (a -> String) -> [(a, P.Pretty P.ColorText)] -simpleFuzzyFinder query items render = - sortAndCleanup $ do - a <- items - let s = render a - score <- toList (simpleFuzzyScore query s) - pure ((a, hi s), score) - where - hi = highlightSimple query - sortAndCleanup = List.map fst . List.sortOn snd - --- highlights `query` if it is a prefix of `s`, or if it --- appears in the final segement of s (after the final `.`) -highlightSimple :: String -> String -> P.Pretty P.ColorText -highlightSimple "" = P.string -highlightSimple query = go where - go [] = mempty - go s@(h:t) | query `List.isPrefixOf` s = hiQuery <> go (drop len s) - | otherwise = P.string [h] <> go t - len = length query - hiQuery = P.hiBlack (P.string query) - -simpleFuzzyScore :: String -> String -> Maybe Int -simpleFuzzyScore query s - | query `List.isPrefixOf` s = Just (bonus s 2) - | query `List.isSuffixOf` s = Just (bonus s 1) - | query `List.isInfixOf` s = Just (bonus s 3) - | lowerquery `List.isInfixOf` lowers = Just (bonus s 4) - | otherwise = Nothing - where - -- prefer relative names - bonus ('.':_) n = n*10 - bonus _ n = n - lowerquery = Char.toLower <$> query - lowers = Char.toLower <$> s - --- This logic was split out of fuzzyFinder because the `RE.MatchArray` has an --- `Ord` instance that helps us sort the fuzzy matches in a nice way. (see --- comment below.) `Editor.fuzzyNameDistance` uses this `Ord` instance. -fuzzyFindMatchArray :: forall a. - String -> [a] -> (a -> String) - -> [(RE.MatchArray, (a, P.Pretty P.ColorText))] -fuzzyFindMatchArray query items render = - scoreAndHighlight $ items - where - scoreAndHighlight = catMaybes . List.map go - go :: a -> Maybe (RE.MatchArray, (a, P.Pretty P.ColorText)) - go a = - let string = render a - text = Text.pack string - matches = RE.matchOnce regex string - addContext matches = - let highlighted = highlight P.bold text . tail . toList $ matches - in (matches, (a, highlighted)) - in addContext <$> matches - -- regex "Foo" = "(\\F).*(\\o).*(\\o)" - regex :: RE.Regex - regex = let - s = if null query then ".*" - else intercalateMap ".*" esc query where esc c = "(\\" <> [c] <> ")" - in RE.makeRegexOpts - RE.defaultCompOpt { RE.caseSensitive = False - -- newSyntax = False, otherwise "\<" and "\>" - -- matches word boundaries instead of literal < and > - , RE.newSyntax = False - } - RE.defaultExecOpt - s - -- Sort on: - -- a. length of match group to find the most compact match - -- b. start position of the match group to find the earliest match - -- c. the item itself for alphabetical ranking - -- Ord MatchArray already provides a. and b. todo: c. - -prefixFindInBranch :: - Names0 -> HashQualified -> [(SearchResult, P.Pretty P.ColorText)] -prefixFindInBranch b hq = fmap getName $ - case HQ.toName hq of - -- query string includes a name component, so do a prefix find on that - (Name.toString -> n) -> - filter (filterName n) (candidates b hq) - where - filterName n sr = - fromString n `Name.isPrefixOf` (HQ.toName . SR.name) sr - --- only search before the # before the # and after the # after the # -fuzzyFindInBranch :: Names0 - -> HashQualified - -> [(SearchResult, P.Pretty P.ColorText)] -fuzzyFindInBranch b hq = - case HQ.toName hq of - (Name.toString -> n) -> - simpleFuzzyFinder n (candidates b hq) - (Name.toString . HQ.toName . SR.name) - -getName :: SearchResult -> (SearchResult, P.Pretty P.ColorText) -getName sr = (sr, P.syntaxToColor $ prettyHashQualified' (SR.name sr)) - -candidates :: Names.Names' Name.Name -> HashQualified -> [SearchResult] -candidates b hq = typeCandidates <> termCandidates - where - -- filter branch by hash - typeCandidates = - fmap typeResult . filterTypes . R.toList . Names.types $ b - termCandidates = - fmap termResult . filterTerms . R.toList . Names.terms $ b - filterTerms = case HQ.toHash hq of - Just sh -> List.filter $ SH.isPrefixOf sh . Referent.toShortHash . snd - Nothing -> id - filterTypes = case HQ.toHash hq of - Just sh -> List.filter $ SH.isPrefixOf sh . Reference.toShortHash. snd - Nothing -> id - typeResult (n, r) = SR.typeResult (Names._hqTypeName b n r) r - (Names._hqTypeAliases b n r) - termResult (n, r) = SR.termResult (Names._hqTermName b n r) r - (Names._hqTermAliases b n r) - -type Pos = Int -type Len = Int --- This [(Pos, Len)] type is the same as `tail . toList` of a regex MatchArray -highlight :: (P.Pretty P.ColorText -> P.Pretty P.ColorText) - -> Text - -> [(Pos, Len)] - -> P.Pretty P.ColorText -highlight on = highlight' on id - -highlight' :: (P.Pretty P.ColorText -> P.Pretty P.ColorText) - -> (P.Pretty P.ColorText -> P.Pretty P.ColorText) - -> Text - -> [(Pos, Len)] - -> P.Pretty P.ColorText -highlight' on off t groups = case groups of - [] -> (off . P.text) t - (0,_) : _ -> go groups - (start,_) : _ -> (off . P.text . Text.take start) t <> go groups - where - go = \case - [] -> error "unpossible I think" - (start, len) : (start2, len2) : groups - | start + len == start2 -> - -- avoid an on/off since there's no gap between groups - go ((start, len + len2) : groups) - (start, len) : groups -> - let (selected, remaining) = Text.splitAt len . Text.drop start $ t - in (on . P.text) selected <> case groups of - [] -> (off . P.text) remaining - (start2, _) : _ -> - (off . P.text . Text.drop (start + len) . Text.take start2 $ t) - <> go groups diff --git a/parser-typechecker/src/Unison/Util/Free.hs b/parser-typechecker/src/Unison/Util/Free.hs deleted file mode 100644 index f10e8c3cd4..0000000000 --- a/parser-typechecker/src/Unison/Util/Free.hs +++ /dev/null @@ -1,68 +0,0 @@ -{-# Language ExistentialQuantification, Rank2Types #-} - -module Unison.Util.Free where - -import Unison.Prelude hiding (fold) - --- We would use another package for this if we knew of one. --- Neither http://hackage.haskell.org/package/free --- nor http://hackage.haskell.org/package/free-functors --- nor http://hackage.haskell.org/package/freer --- appear to have this. - -data Free f a = Pure a | forall x . Bind (f x) (x -> Free f a) - -eval :: f a -> Free f a -eval fa = Bind fa Pure - --- unfold :: (v -> f (Either a v)) -> v -> Free f a - -fold :: Monad m => (forall x. f x -> m x) -> Free f a -> m a -fold f m = case m of - Pure a -> pure a - Bind x k -> f x >>= fold f . k - -unfold :: (v -> Either a (f v)) -> v -> Free f a -unfold f seed = case f seed of - Left a -> Pure a - Right fv -> Bind fv (unfold f) - -unfold' :: (v -> Free f (Either a v)) -> v -> Free f a -unfold' f seed = f seed >>= either Pure (unfold' f) - -unfoldM :: (Traversable f, Applicative m, Monad m) - => (b -> m (Either a (f b))) -> b -> m (Free f a) -unfoldM f seed = do - e <- f seed - case e of - Left a -> pure (Pure a) - Right fb -> free <$> traverse (unfoldM f) fb - -free :: Traversable f => f (Free f a) -> Free f a -free = go . sequence - where go (Pure fa) = Bind fa Pure - go (Bind fi f) = Bind fi (go . f) - - -foldWithIndex :: forall f m a . Monad m => (forall x. Int -> f x -> m x) -> Free f a -> m a -foldWithIndex f m = go 0 f m - where go :: Int -> (forall x. Int -> f x -> m x) -> Free f a -> m a - go starting f m = case m of - Pure a -> pure a - Bind x k -> (f starting x) >>= (go $ starting + 1) f . k - - -instance Functor (Free f) where - fmap = liftM - -instance Monad (Free f) where - return = Pure - Pure a >>= f = f a - Bind fx f >>= g = Bind fx (f >=> g) - - -instance Applicative (Free f) where - pure = Pure - (<*>) = ap - -instance MonadTrans Free where lift = eval diff --git a/parser-typechecker/src/Unison/Util/Less.hs b/parser-typechecker/src/Unison/Util/Less.hs deleted file mode 100644 index 145a20cfb7..0000000000 --- a/parser-typechecker/src/Unison/Util/Less.hs +++ /dev/null @@ -1,25 +0,0 @@ -module Unison.Util.Less where - -import System.Process -import System.IO (hPutStr, hClose) -import Control.Exception.Extra (ignore) -import Unison.Prelude (void) - -less :: String -> IO () -less str = do - let args = ["--no-init" -- don't clear the screen on exit - ,"--raw-control-chars" -- pass through colors and stuff - ,"--prompt=[less] Use space/arrow keys to navigate, or 'q' to return to ucm:" - ,"--quit-if-one-screen" -- self-explanatory - ] - (Just stdin, _stdout, _stderr, pid) - <- createProcess (proc "less" args) { std_in = CreatePipe } - - -- If `less` exits before consuming all of stdin, `hPutStr` will crash. - ignore $ hPutStr stdin str - - -- If `less` has already exited, hClose throws an exception. - ignore $ hClose stdin - - -- Wait for `less` to exit. - void $ waitForProcess pid diff --git a/parser-typechecker/src/Unison/Util/Logger.hs b/parser-typechecker/src/Unison/Util/Logger.hs deleted file mode 100644 index 762f16234a..0000000000 --- a/parser-typechecker/src/Unison/Util/Logger.hs +++ /dev/null @@ -1,109 +0,0 @@ --- | Small logging library. Typical usage, import qualified: --- --- import qualified Unison.Util.Logger as L --- --- do --- logger <- L.atomic . L.atInfo . L.scope "worker" . L.toHandle $ stderr --- L.warn logger "WARNING!!!" --- L.debug logger "Debug message, will be ignored" --- let logger2 = L.atDebug logger --- L.debug logger2 "Debug message, will be printed" --- logger' <- L.at L.warnLevel --- -module Unison.Util.Logger where - -import Unison.Prelude - -import Control.Concurrent (forkIO) -import Control.Concurrent.MVar -import Control.Exception (bracket) -import Data.List -import System.IO (Handle, hPutStrLn, hGetLine, stdout, stderr) -import System.IO.Error (isEOFError) - -type Level = Int -type Scope = [String] - -data Logger = - Logger { getScope :: !Scope - , prefix :: String -> String - , getLevel :: !Level - , raw :: String -> IO () } - --- | Ensure at most one message is logged at the same time -atomic :: Logger -> IO Logger -atomic logger = do - lock <- newMVar () - pure $ - let raw' msg = bracket (takeMVar lock) (\_ -> putMVar lock ()) (\_ -> raw logger msg) - in logger { raw = raw' } - -toHandle :: Handle -> Logger -toHandle h = logger (hPutStrLn h) - -toStandardError :: Logger -toStandardError = toHandle stderr - -toStandardOut :: Logger -toStandardOut = toHandle stdout - -logHandleAt :: Logger -> Level -> Handle -> IO () -logHandleAt logger lvl h - | lvl > getLevel logger = pure () - | otherwise = void . forkIO $ loop where - loop = do - line <- try (hGetLine h) - case line of - Left ioe | isEOFError ioe -> logAt (scope "logHandleAt" logger) 3 "EOF" - | otherwise -> logAt (scope "logHandleAt" logger) 2 (show ioe) - Right line -> logAt logger lvl line >> loop - -logAt' :: Logger -> Level -> IO String -> IO () -logAt' logger lvl msg | lvl <= getLevel logger = msg >>= \msg -> raw logger (prefix logger msg) - | otherwise = pure () - -logAt :: Logger -> Level -> String -> IO () -logAt logger lvl msg | lvl <= getLevel logger = raw logger (prefix logger msg) - | otherwise = pure () - -scope :: String -> Logger -> Logger -scope s (Logger s0 _ lvl raw) = Logger s' prefix' lvl raw where - prefix' msg = prefix ++ msg - prefix = "[" ++ intercalate " " s' ++ "] " - s' = s:s0 - -scope' :: [String] -> Logger -> Logger -scope' s l = foldr scope l s - -logger :: (String -> IO ()) -> Logger -logger log = Logger [] id 0 log - -error, warn, info, debug, trace :: Logger -> String -> IO () -error l = logAt l errorLevel -warn l = logAt l warnLevel -info l = logAt l infoLevel -debug l = logAt l debugLevel -trace l = logAt l traceLevel - -error', warn', info', debug', trace' :: Logger -> IO String -> IO () -error' l = logAt' l errorLevel -warn' l = logAt' l warnLevel -info' l = logAt' l infoLevel -debug' l = logAt' l debugLevel -trace' l = logAt' l traceLevel - -errorLevel, warnLevel, infoLevel, debugLevel, traceLevel :: Level -(errorLevel, warnLevel, infoLevel, debugLevel, traceLevel) = (1,2,3,4,5) - -at :: Level -> Logger -> Logger -at lvl logger = logger { getLevel = lvl } - -atError, atWarn, atInfo, atDebug, atTrace :: Logger -> Logger -(atError, atWarn, atInfo, atDebug, atTrace) = - (at errorLevel, at warnLevel, at infoLevel, at debugLevel, at traceLevel) - -increment :: Logger -> Logger -increment (Logger s p n l) = Logger s p (n+1) l - -decrement :: Logger -> Logger -decrement (Logger s p n l) = Logger s p (n-1) l diff --git a/parser-typechecker/src/Unison/Util/Map.hs b/parser-typechecker/src/Unison/Util/Map.hs deleted file mode 100644 index 4df2bc54e6..0000000000 --- a/parser-typechecker/src/Unison/Util/Map.hs +++ /dev/null @@ -1,16 +0,0 @@ -module Unison.Util.Map - ( unionWithM - ) where - -import qualified Control.Monad as Monad -import qualified Data.Map as Map - -import Unison.Prelude - -unionWithM :: forall m k a. - (Monad m, Ord k) => (a -> a -> m a) -> Map k a -> Map k a -> m (Map k a) -unionWithM f m1 m2 = Monad.foldM go m1 $ Map.toList m2 where - go :: Map k a -> (k, a) -> m (Map k a) - go m1 (k, a2) = case Map.lookup k m1 of - Just a1 -> do a <- f a1 a2; pure $ Map.insert k a m1 - Nothing -> pure $ Map.insert k a2 m1 diff --git a/parser-typechecker/src/Unison/Util/Menu.hs b/parser-typechecker/src/Unison/Util/Menu.hs deleted file mode 100644 index 90a49a907d..0000000000 --- a/parser-typechecker/src/Unison/Util/Menu.hs +++ /dev/null @@ -1,286 +0,0 @@ -{-# LANGUAGE OverloadedLists #-} -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Util.Menu (menu1, menuN, groupMenuN) where - -import Unison.Prelude - -import Data.List (find, isPrefixOf) -import qualified Data.Set as Set -import Data.Strings (strPadLeft) -import qualified Text.Read as Read -import Unison.Util.AnnotatedText (textEmpty) -import Unison.Util.ColorText (ColorText, toANSI) -import Unison.Util.Monoid (intercalateMap) --- utility - command line menus - -type Caption = ColorText -type Stylized = ColorText -type Keyword = String -type Console = IO String - -renderChoices :: forall a mc - . (a -> Stylized) - -> (mc -> Stylized) - -> [([Keyword], [a])] - -> [([Keyword], mc)] - -> (Keyword -> Bool) - -> Stylized -renderChoices render renderMeta groups metas isSelected = - showGroups <> showMetas - where - showGroups = intercalateMap "\n" format numberedGroups <> - if (not.null) groups && (not.null) metas then "\n\n" else "" - showMetas = intercalateMap "\n" (("["<>) . (<>"]") . renderMeta . snd) metas - numberedGroups :: [(([Keyword], [a]), Int)] - numberedGroups = zip groups [1..] - numberWidth = (1+) . floor @Double . logBase 10 . fromIntegral $ length groups - format :: (([Keyword], [a]), Int) -> Stylized - format ((keywords, as), number) = - intercalateMap - "\n" - (format1 number (length as) (any isSelected keywords)) - (zip as [0..]) - format1 :: Int -> Int -> Bool -> (a, Int) -> Stylized - format1 groupNumber groupSize isSelected (a, index) = - header <> bracket <> render a - where - header :: (Semigroup s, IsString s) => s - header = - (if representativeRow - then (if isSelected then "*" else " ") - <> fromString (strPadLeft ' ' numberWidth (show groupNumber)) - <> ". " - else fromString $ replicate (numberWidth + 3) ' ') - representativeRow :: Bool - representativeRow = index == (groupSize - 1) `div` 2 - bracket :: IsString s => s - bracket = - if maxGroupSize > 1 then - if groupSize == 1 then "╶" - else if index == 0 then "┌" - else if index < groupSize - 1 then "│" - else "└" - else "" - maxGroupSize = maximum (length . snd <$> groups) - - -{- - - - 1 ping - pong - 2 foo - 3 bar - - [cancel] - [help] - - >> ping - - -} - -menu1 :: forall a mc - . Console - -> Caption - -> (a -> Stylized) - -> (mc -> Stylized) - -> [(Keyword, a)] - -> [(Keyword, mc)] - -> Maybe Keyword - -> IO (Maybe (Either mc a)) -menu1 console caption render renderMeta groups metas initial = do - let groups' = [ ([k], [a]) | (k, a) <- groups ] - metas' = [ ([k], mc) | (k, mc) <- metas ] - groupMenu1 console caption render renderMeta groups' metas' initial >>= \case - Just (Right [a]) -> pure (Just (Right a)) - Just (Left mc) -> pure (Just (Left mc)) - Nothing -> pure Nothing - _ -> error "unpossible; by construction we should only get singleton lists back" - -_repeatMenu1 :: forall a mc - . Console - -> Caption - -> (a -> Stylized) - -> (mc -> Stylized) - -> [([Keyword], [a])] - -> [([Keyword], mc)] - -> Maybe Keyword - -> IO (Either mc [a]) -_repeatMenu1 console caption render renderMeta groups metas initial = - groupMenu1 console caption render renderMeta groups metas initial >>= \case - Just x -> pure x - Nothing -> _repeatMenu1 console caption render renderMeta groups metas initial - -groupMenu1 :: forall a mc - . Console - -> Caption - -> (a -> Stylized) - -> (mc -> Stylized) - -> [([Keyword], [a])] - -> [([Keyword], mc)] - -> Maybe Keyword - -> IO (Maybe (Either mc [a])) -groupMenu1 console caption render renderMeta groups metas initial = do - when ((not . textEmpty) caption) $ do - print . toANSI $ caption - putStrLn "" - print . toANSI $ renderChoices render renderMeta groups metas (`elem` initial) - resume - where - restart = groupMenu1 console caption render renderMeta groups metas initial - -- restart with an updated caption - restart' caption groups metas initial = - groupMenu1 console caption render renderMeta groups metas initial - resume = do - putStr "\n>> " - input <- console - case words input of - [] -> useExistingSelections groups initial - input : _ -> case Read.readMaybe input of - Just i -> pickGroupByNumber i - Nothing -> pickGroupByPrefix input - where - pickGroupByNumber :: Int -> IO (Maybe (Either mc [a])) - pickGroupByNumber i = case atMay groups (i-1) of - Nothing -> do - putStrLn $ "Please pick a number from 1 to " ++ - show (length groups) ++ "." - restart - Just (_keywords, as) -> pure (Just (Right as)) - pickGroupByPrefix :: String -> IO (Maybe (Either mc [a])) - pickGroupByPrefix s = case matchingItems groups metas s of - ([],[]) -> do - putStrLn $ "Sorry, '" ++ s ++ "' didn't match anything." - resume - ([(_, as)],[]) -> pure (Just (Right as)) - ([], [(_, mc)]) -> pure (Just (Left mc)) - (groups, metas) -> - restart' - "Please clarify your selection, or press Enter to back up:" - groups metas Nothing >>= \case - Nothing -> restart - x -> pure x - matchingItems :: - forall a mc. [([Keyword], [a])] -> [([Keyword], mc)] -> String - -> ([([Keyword], [a])], [([Keyword], mc)]) - matchingItems groups metas s = - (filter (any (s `isPrefixOf`) . fst) groups - ,filter (any (s `isPrefixOf`) . fst) metas) - useExistingSelections :: - [([Keyword], [a])] -> Maybe Keyword -> IO (Maybe (Either mc [a])) - useExistingSelections groups initial = case initial of - Nothing -> pure Nothing - Just initial -> - case findMatchingGroup [initial] groups of - Just group -> pure (Just (Right group)) - Nothing -> error $ - "Default selection \"" ++ show initial ++ "\"" ++ - " not found in choice groups:\n" ++ show (fst <$> groups) - findMatchingGroup :: forall a. [Keyword] -> [([Keyword], [a])] -> Maybe [a] - findMatchingGroup initials groups = - snd <$> find (\(keywords, _as) -> any (`elem` keywords) initials) groups - - -{- - - - 1 ping - pong - 2 foo - 3 bar - - [all] - [cancel] - [help] - - >> 1 3 - >> * - - -} -menuN :: Console - -> Caption - -> (a -> Stylized) - -> (mc -> Stylized) - -> [([Keyword], [a])] - -> [([Keyword], mc)] - -> [Keyword] - -> IO (Either mc [[a]]) -menuN _console _caption _render _renderMeta _groups _metas _initials = pure (Right []) - -groupMenuN :: forall a mc. Ord a - => Console - -> Caption - -> (a -> Stylized) - -> (mc -> Stylized) - -> [([Keyword], [a])] - -> [([Keyword], mc)] - -> [[Keyword]] - -> IO (Either mc [[a]]) -groupMenuN console caption render renderMeta groups metas initials = - groupMenuN' console caption render renderMeta groups metas (Set.fromList initials) - -groupMenuN' :: forall a mc. Ord a - => Console - -> Caption - -> (a -> Stylized) - -> (mc -> Stylized) - -> [([Keyword], [a])] - -> [([Keyword], mc)] - -> Set [Keyword] - -> IO (Either mc [[a]]) -groupMenuN' console caption render renderMeta groups metas initials = do - when ((not . textEmpty) caption) $ do - print . toANSI $ caption - putStrLn "" - print . toANSI $ renderChoices render renderMeta groups metas ((`any` initials) . elem) - resume initials - where - restart initials = groupMenuN' console caption render renderMeta groups metas initials - -- restart with an updated caption - restart' caption groups metas initials = - groupMenuN' console caption render renderMeta groups metas initials - resume :: Set [Keyword] -> IO (Either mc [[a]]) - resume initials = do - putStr "\n>> " - input <- console - case words input of - [] -> useExistingSelections groups initials - input : _ -> case Read.readMaybe input of - Just i -> pickGroupByNumber i - Nothing -> pickGroupByPrefix input - where - pickGroupByNumber :: Int -> IO (Either mc [[a]]) - pickGroupByNumber i = case atMay groups (i-1) of - Nothing -> do - putStrLn $ "Please pick a number from 1 to " ++ - show (length groups) ++ "." - restart initials - Just (kw, _) -> restart (Set.insert kw initials) - pickGroupByPrefix :: String -> IO (Either mc [[a]]) - pickGroupByPrefix s = case matchingItems groups metas s of - ([],[]) -> do - putStrLn $ "Sorry, '" ++ s ++ "' didn't match anything." - resume initials - ([], [(_, mc)]) -> pure (Left mc) - ([(kw, _)],[]) -> restart (Set.insert kw initials) - (_, _) -> - restart' - "Your prefix matched both groups and commands; please choose by number or use a longer prefix:" - groups metas initials - matchingItems :: - forall a mc. [([Keyword], [a])] -> [([Keyword], mc)] -> String - -> ([([Keyword], [a])], [([Keyword], mc)]) - matchingItems groups metas s = - (filter (any (s `isPrefixOf`) . fst) groups - ,filter (any (s `isPrefixOf`) . fst) metas) - useExistingSelections :: - [([Keyword], [a])] -> Set [Keyword] -> IO (Either mc [[a]]) - useExistingSelections groups initials = pure . pure $ - foldr go [] initials where - go kws selections = case findMatchingGroup kws groups of - Just as -> as : selections - Nothing -> selections - findMatchingGroup :: forall a. [Keyword] -> [([Keyword], [a])] -> Maybe [a] - findMatchingGroup initials groups = - snd <$> find (\(keywords, _as) -> any (`elem` keywords) initials) groups diff --git a/parser-typechecker/src/Unison/Util/PinBoard.hs b/parser-typechecker/src/Unison/Util/PinBoard.hs deleted file mode 100644 index f7482f94a4..0000000000 --- a/parser-typechecker/src/Unison/Util/PinBoard.hs +++ /dev/null @@ -1,143 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - --- | A utility type for saving memory in the presence of many duplicate ByteStrings, etc. If you have data that may be --- a redundant duplicate, try pinning it to a pin board, and use the result of that operation instead. --- --- Without a pin board: --- --- x ───── "38dce848c8c829c62" --- y ───── "38dce848c8c829c62" --- z ───── "d2518f260535b927b" --- --- With a pin board: --- --- x ───── "38dce848c8c829c62" ┄┄┄┄┄┐ --- y ────────┘ board --- z ───── "d2518f260535b927b" ┄┄┄┄┄┘ --- --- ... and after x is garbage collected: --- --- "38dce848c8c829c62" ┄┄┄┄┄┐ --- y ────────┘ board --- z ───── "d2518f260535b927b" ┄┄┄┄┄┘ --- --- ... and after y is garbage collected: --- --- board --- z ───── "d2518f260535b927b" ┄┄┄┄┄┘ -module Unison.Util.PinBoard - ( PinBoard, - new, - pin, - - -- * For debugging - debugDump, - debugSize, - ) -where - -import Control.Concurrent.MVar -import Data.Foldable (find, foldlM) -import Data.Functor.Compose -import Data.Hashable (Hashable, hash) -import qualified Data.IntMap as IntMap -import Data.IntMap.Strict (IntMap) -import qualified Data.Text as Text -import qualified Data.Text.IO as Text -import Data.Tuple (swap) -import System.Mem.Weak (Weak, deRefWeak, mkWeakPtr) -import Unison.Prelude - --- | A "pin board" is a place to pin values; semantically, it's a set, but differs in a few ways: --- --- * Pinned values aren't kept alive by the pin board, they might be garbage collected at any time. --- * If you try to pin a value that's already pinned (per its Eq instance), the pinned one will be returned --- instead. --- * It has a small API: just 'new' and 'pin'. -newtype PinBoard a - = PinBoard (MVar (IntMap (Bucket a))) - -new :: MonadIO m => m (PinBoard a) -new = - liftIO (PinBoard <$> newMVar IntMap.empty) - -pin :: forall a m. (Eq a, Hashable a, MonadIO m) => PinBoard a -> a -> m a -pin (PinBoard boardVar) x = liftIO do - modifyMVar boardVar \board -> - swap <$> getCompose (IntMap.alterF alter n board) - where - -- Pin to pin board at a hash key: either there's nothing there (ifMiss), or there's a nonempty bucket (ifHit). - alter :: Maybe (Bucket a) -> Compose IO ((,) a) (Maybe (Bucket a)) - alter = - Compose . maybe ifMiss ifHit - -- Pin a new value: create a new singleton bucket. - ifMiss :: IO (a, Maybe (Bucket a)) - ifMiss = - (x,) . Just <$> newBucket x finalizer - -- Possibly pin a new value: if it already exists in the bucket, return that one instead. Otherwise, insert it. - ifHit :: Bucket a -> IO (a, Maybe (Bucket a)) - ifHit bucket = - bucketFind bucket x >>= \case - -- Hash collision: the bucket has things in it, but none are the given value. Insert. - Nothing -> (x,) . Just <$> bucketAdd bucket x finalizer - -- The thing being inserted already exists; return it. - Just y -> pure (y, Just bucket) - -- When each thing pinned here is garbage collected, compact its bucket. - finalizer :: IO () - finalizer = - modifyMVar_ boardVar (IntMap.alterF (maybe (pure Nothing) bucketCompact) n) - n :: Int - n = - hash x - -debugDump :: MonadIO m => (a -> Text) -> PinBoard a -> m () -debugDump f (PinBoard boardVar) = liftIO do - board <- readMVar boardVar - contents <- (traverse . traverse) bucketToList (IntMap.toList board) - Text.putStrLn (Text.unlines ("PinBoard" : map row contents)) - where - row (n, xs) = - Text.pack (show n) <> " => " <> Text.pack (show (map f xs)) - -debugSize :: PinBoard a -> IO Int -debugSize (PinBoard boardVar) = do - board <- readMVar boardVar - foldlM step 0 board - where - step :: Int -> Bucket a -> IO Int - step acc = - bucketToList >=> \xs -> pure (acc + length xs) - --- | A bucket of weak pointers to different values that all share a hash. -newtype Bucket a - = Bucket [Weak a] -- Invariant: non-empty list - --- | A singleton bucket. -newBucket :: a -> IO () -> IO (Bucket a) -newBucket = - bucketAdd (Bucket []) - --- | Add a value to a bucket. -bucketAdd :: Bucket a -> a -> IO () -> IO (Bucket a) -bucketAdd (Bucket weaks) x finalizer = do - weak <- mkWeakPtr x (Just finalizer) - pure (Bucket (weak : weaks)) - --- | Drop all garbage-collected values from a bucket. If none remain, returns Nothing. -bucketCompact :: Bucket a -> IO (Maybe (Bucket a)) -bucketCompact (Bucket weaks) = - bucketFromList <$> mapMaybeM (\w -> (w <$) <$> deRefWeak w) weaks - --- | Look up a value in a bucket per its Eq instance. -bucketFind :: Eq a => Bucket a -> a -> IO (Maybe a) -bucketFind bucket x = - find (== x) <$> bucketToList bucket - -bucketFromList :: [Weak a] -> Maybe (Bucket a) -bucketFromList = \case - [] -> Nothing - weaks -> Just (Bucket weaks) - -bucketToList :: Bucket a -> IO [a] -bucketToList (Bucket weaks) = - mapMaybeM deRefWeak weaks diff --git a/parser-typechecker/src/Unison/Util/Pretty.hs b/parser-typechecker/src/Unison/Util/Pretty.hs deleted file mode 100644 index e9621257ee..0000000000 --- a/parser-typechecker/src/Unison/Util/Pretty.hs +++ /dev/null @@ -1,903 +0,0 @@ -{-# LANGUAGE DeriveTraversable #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Util.Pretty ( - Pretty, - ColorText, - align, - align', - alternations, - backticked, - backticked', - boxForkLeft, - boxLeft, - boxLeftM, - boxRight, - boxRightM, - bulleted, - bracket, - -- breakable - callout, - excerptSep, - excerptSep', - excerptColumn2, - excerptColumn2Headed, - warnCallout, blockedCallout, fatalCallout, okCallout, - column2, - column2sep, - column2Header, - column2M, - column2UnzippedM, - column3, - column3M, - column3UnzippedM, - column3sep, - commas, - commented, - oxfordCommas, - oxfordCommasWith, - plural, - dashed, - flatMap, - group, - hang', - hang, - hangUngrouped', - hangUngrouped, - indent, - indentAfterNewline, - indentN, - indentNonEmptyN, - indentNAfterNewline, - isMultiLine, - leftPad, - lines, - linesNonEmpty, - linesSpaced, - lit, - map, - mayColumn2, - nest, - num, - newline, - lineSkip, - nonEmpty, - numbered, - numberedColumn2, - numberedColumn2Header, - numberedList, - orElse, - orElses, - paragraphyText, - parenthesize, - parenthesizeCommas, - parenthesizeIf, - render, - renderUnbroken, - rightPad, - sep, - sepNonEmpty, - sepSpaced, - shown, - softbreak, - spaceIfBreak, - spaced, - spacedMap, - spacesIfBreak, - string, - surroundCommas, - syntaxToColor, - text, - toANSI, - toAnsiUnbroken, - toHTML, - toPlain, - toPlainUnbroken, - underline, - withSyntax, - wrap, - wrapColumn2, - wrapString, - black, red, green, yellow, blue, purple, cyan, white, hiBlack, hiRed, hiGreen, hiYellow, hiBlue, hiPurple, hiCyan, hiWhite, bold, - border, - Width, - -- * Exported for testing - delta, - Delta, - ) where - -import Unison.Prelude - -import Data.Bifunctor ( second ) -import Data.Char ( isSpace ) -import Data.List ( intersperse ) -import Prelude hiding ( lines , map ) -import Unison.Util.AnnotatedText ( annotateMaybe ) -import qualified Unison.Util.ColorText as CT -import qualified Unison.Util.SyntaxText as ST -import Unison.Util.Monoid ( intercalateMap ) -import qualified Data.ListLike as LL -import qualified Data.Sequence as Seq -import qualified Data.Text as Text -import Control.Monad.Identity (runIdentity, Identity(..)) - -type Width = Int -type ColorText = CT.ColorText - -data Pretty s = Pretty { delta :: Delta, out :: F s (Pretty s) } deriving Eq - -instance Functor Pretty where - fmap f (Pretty d o) = Pretty d (mapLit f $ fmap (fmap f) o) - -data F s r - = Empty - -- | A group adds a level of breaking. Layout tries not to break a group - -- unless needed to fit in available width. Breaking is done "outside in". - -- - -- (a | b) <> (c | d) will try (a <> c), then (b <> d) - -- - -- (a | b) <> group (c | d) will try (a <> c), then (b <> c), then (b <> d) - | Group r - | Lit s - | Wrap (Seq r) - | OrElse r r - | Append (Seq r) - deriving (Eq, Show, Foldable, Traversable, Functor) - -mapLit :: (s -> t) -> F s r -> F t r -mapLit f (Lit s) = Lit (f s) -mapLit _ Empty = Empty -mapLit _ (Group r) = Group r -mapLit _ (Wrap s) = Wrap s -mapLit _ (OrElse r s) = OrElse r s -mapLit _ (Append s) = Append s - -lit :: (IsString s, LL.ListLike s Char) => s -> Pretty s -lit s = lit' (foldMap chDelta $ LL.toList s) s - -lit' :: Delta -> s -> Pretty s -lit' d s = Pretty d (Lit s) - -orElse :: Pretty s -> Pretty s -> Pretty s -orElse p1 p2 = Pretty (delta p1) (OrElse p1 p2) - -orElses :: [Pretty s] -> Pretty s -orElses [] = mempty -orElses ps = foldr1 orElse ps - -wrapImpl :: IsString s => [Pretty s] -> Pretty s -wrapImpl [] = mempty -wrapImpl (p:ps) = wrap_ . Seq.fromList $ - p : fmap (\p -> (" " <> p) `orElse` (newline <> p)) ps - -wrapImplPreserveSpaces :: (LL.ListLike s Char, IsString s) => [Pretty s] -> Pretty s -wrapImplPreserveSpaces = \case - [] -> mempty - (p:ps) -> wrap_ . Seq.fromList $ p : fmap f ps - where - startsWithSpace p = case out p of - (Lit s) -> fromMaybe False (fmap (isSpaceNotNewline . fst) $ LL.uncons s) - _ -> False - f p | startsWithSpace p = p `orElse` newline - f p = p - -isSpaceNotNewline :: Char -> Bool -isSpaceNotNewline c = isSpace c && not (c == '\n') - -wrapString :: (LL.ListLike s Char, IsString s) => String -> Pretty s -wrapString s = wrap (lit $ fromString s) - --- Wrap text, preserving whitespace (apart from at the wrap points.) --- Used in particular for viewing/displaying doc literals. --- Should be understood in tandem with TermParser.docNormalize. --- See also unison-src/transcripts/doc-formatting.md. -paragraphyText :: (LL.ListLike s Char, IsString s) => Text -> Pretty s -paragraphyText = sep "\n" . fmap (wrapPreserveSpaces . text) . Text.splitOn "\n" - -wrap :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -wrap p = wrapImpl (toLeaves [p]) where - toLeaves [] = [] - toLeaves (hd:tl) = case out hd of - Empty -> toLeaves tl - Lit s -> wordify s ++ toLeaves tl - Group _ -> hd : toLeaves tl - OrElse a _ -> toLeaves (a:tl) - Wrap _ -> hd : toLeaves tl - Append hds -> toLeaves (toList hds ++ tl) - wordify s0 = let s = LL.dropWhile isSpace s0 in - if LL.null s then [] - else case LL.break isSpace s of (word1, s) -> lit word1 : wordify s - --- Does not insert spaces where none were present, and does not collapse --- sequences of spaces into one. --- It'd be a bit painful to just replace wrap with the following version, because --- lots of OutputMessages code depends on wrap's behaviour of sometimes adding --- extra spaces. -wrapPreserveSpaces :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -wrapPreserveSpaces p = wrapImplPreserveSpaces (toLeaves [p]) where - toLeaves [] = [] - toLeaves (hd:tl) = case out hd of - Empty -> toLeaves tl - Lit s -> (fmap lit $ alternations isSpaceNotNewline s) ++ toLeaves tl - Group _ -> hd : toLeaves tl - OrElse a _ -> toLeaves (a:tl) - Wrap _ -> hd : toLeaves tl - Append hds -> toLeaves (toList hds ++ tl) - --- Cut a list every time a predicate changes. Produces a list of --- non-empty lists. -alternations :: (LL.ListLike s c) => (c -> Bool) -> s -> [s] -alternations p s = reverse $ go True s [] where - go _ s acc | LL.null s = acc - go w s acc = go (not w) rest acc' where - (t, rest) = LL.span p' s - p' = if w then p else (\x -> not (p x)) - acc' = if (LL.null t) then acc else t : acc - -wrap_ :: Seq (Pretty s) -> Pretty s -wrap_ ps = Pretty (foldMap delta ps) (Wrap ps) - -group :: Pretty s -> Pretty s -group p = Pretty (delta p) (Group p) - -toANSI :: Width -> Pretty CT.ColorText -> String -toANSI avail p = CT.toANSI (render avail p) - -toAnsiUnbroken :: Pretty ColorText -> String -toAnsiUnbroken p = CT.toANSI (renderUnbroken p) - -toPlain :: Width -> Pretty CT.ColorText -> String -toPlain avail p = CT.toPlain (render avail p) - -toHTML :: String -> Width -> Pretty CT.ColorText -> String -toHTML cssPrefix avail p = CT.toHTML cssPrefix (render avail p) - -toPlainUnbroken :: Pretty ColorText -> String -toPlainUnbroken p = CT.toPlain (renderUnbroken p) - -syntaxToColor :: Pretty ST.SyntaxText -> Pretty ColorText -syntaxToColor = fmap $ annotateMaybe . fmap CT.defaultColors - --- set the syntax, overriding any present syntax -withSyntax :: ST.Element -> Pretty ST.SyntaxText -> Pretty ST.SyntaxText -withSyntax e = fmap $ ST.syntax e - -renderUnbroken :: (Monoid s, IsString s) => Pretty s -> s -renderUnbroken = render maxBound - -render :: (Monoid s, IsString s) => Width -> Pretty s -> s -render availableWidth p = go mempty [Right p] where - go _ [] = mempty - go cur (p:rest) = case p of - Right p -> -- `p` might fit, let's try it! - if p `fits` cur then flow p <> go (cur <> delta p) rest - else go cur (Left p : rest) -- nope, switch to breaking mode - Left p -> case out p of -- `p` requires breaking - Append ps -> go cur ((Left <$> toList ps) <> rest) - Empty -> go cur rest - Group p -> go cur (Right p : rest) - -- Note: literals can't be broken further so they're - -- added to output unconditionally - Lit l -> l <> go (cur <> delta p) rest - OrElse _ p -> go cur (Right p : rest) - Wrap ps -> go cur ((Right <$> toList ps) <> rest) - - flow p = case out p of - Append ps -> foldMap flow ps - Empty -> mempty - Group p -> flow p - Lit s -> s - OrElse p _ -> flow p - Wrap ps -> foldMap flow ps - - fits p cur = - maxCol (surgery cur <> delta p) < availableWidth - where - -- Surgically modify 'cur' to pretend it has not exceeded availableWidth. - -- This is necessary because sometimes things cannot be split and *must* - -- exceed availableWidth; in this case, we do not want to entirely "blame" - -- the new proposed (cur <> delta p) for this overflow. - -- - -- For example, when appending - -- - -- availableWidth - -- | - -- xxx | - -- yyyyyy - -- zz | - -- - -- with - -- - -- aa | - -- bb | - -- - -- we want to end up with - -- - -- xxx | - -- yyyyyy - -- zzaa| - -- bb | - -- - surgery = \case - SingleLine c -> SingleLine (min c (availableWidth-1)) - MultiLine fc lc mc -> MultiLine fc lc (min mc (availableWidth-1)) - -newline :: IsString s => Pretty s -newline = "\n" - -lineSkip :: IsString s => Pretty s -lineSkip = newline <> newline - -spaceIfBreak :: IsString s => Pretty s -spaceIfBreak = "" `orElse` " " - -spacesIfBreak :: IsString s => Int -> Pretty s -spacesIfBreak n = "" `orElse` fromString (replicate n ' ') - -softbreak :: IsString s => Pretty s -softbreak = " " `orElse` newline - -spaced :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s -spaced = intercalateMap softbreak id - -spacedMap :: (Foldable f, IsString s) => (a -> Pretty s) -> f a -> Pretty s -spacedMap f as = spaced . fmap f $ toList as - -commas :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s -commas = intercalateMap ("," <> softbreak) id - -oxfordCommas :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s -oxfordCommas = oxfordCommasWith "" - --- Like `oxfordCommas`, but attaches `end` at the end (without a space). --- For example, `oxfordCommasWith "."` will attach a period. -oxfordCommasWith - :: (Foldable f, IsString s) => Pretty s -> f (Pretty s) -> Pretty s -oxfordCommasWith end xs = case toList xs of - [] -> "" - [x] -> group (x <> end) - [x, y] -> x <> " and " <> group (y <> end) - xs -> - intercalateMap ("," <> softbreak) id (init xs) - <> "," - <> softbreak - <> "and" - <> softbreak - <> group (last xs <> end) - -parenthesizeCommas :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s -parenthesizeCommas = surroundCommas "(" ")" - -surroundCommas - :: (Foldable f, IsString s) - => Pretty s - -> Pretty s - -> f (Pretty s) - -> Pretty s -surroundCommas start stop fs = - group - $ start - <> spaceIfBreak - <> intercalateMap ("," <> softbreak <> align) id fs - <> stop - where align = spacesIfBreak (preferredWidth start + 1) - -sepSpaced :: (Foldable f, IsString s) => Pretty s -> f (Pretty s) -> Pretty s -sepSpaced between = sep (between <> softbreak) - -sep :: (Foldable f, IsString s) => Pretty s -> f (Pretty s) -> Pretty s -sep between = intercalateMap between id - -sepNonEmpty :: (Foldable f, IsString s) => Pretty s -> f (Pretty s) -> Pretty s -sepNonEmpty between ps = sep between (nonEmpty ps) - --- if list is too long, adds `... 22 more` to the end -excerptSep :: IsString s => Maybe Int -> Pretty s -> [Pretty s] -> Pretty s -excerptSep maxCount = - excerptSep' maxCount (\i -> group ("... " <> shown i <> " more")) - -excerptSep' - :: IsString s - => Maybe Int - -> (Int -> Pretty s) - -> Pretty s - -> [Pretty s] - -> Pretty s -excerptSep' maxCount summarize s ps = case maxCount of - Just max | length ps > max -> - sep s (take max ps) <> summarize (length ps - max) - _ -> sep s ps - -nonEmpty :: (Foldable f, IsString s) => f (Pretty s) -> [Pretty s] -nonEmpty (toList -> l) = case l of - (out -> Empty) : t -> nonEmpty t - h : t -> h : nonEmpty t - [] -> [] - -parenthesize :: IsString s => Pretty s -> Pretty s -parenthesize p = group $ "(" <> p <> ")" - -parenthesizeIf :: IsString s => Bool -> Pretty s -> Pretty s -parenthesizeIf False s = s -parenthesizeIf True s = parenthesize s - -lines :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s -lines = intercalateMap (append newline) id where - append p = Pretty (delta p) (Append $ Seq.singleton p) - -linesNonEmpty :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s -linesNonEmpty = lines . nonEmpty - -linesSpaced :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s -linesSpaced ps = lines (intersperse "" $ toList ps) - -prefixed :: (Foldable f, LL.ListLike s Char, IsString s) - => Pretty s -> Pretty s -> f (Pretty s) -> Pretty s -prefixed first rest = - intercalateMap newline (\b -> first <> indentAfterNewline rest b) - -bulleted - :: (Foldable f, LL.ListLike s Char, IsString s) => f (Pretty s) -> Pretty s -bulleted = prefixed "* " " " - -dashed - :: (Foldable f, LL.ListLike s Char, IsString s) => f (Pretty s) -> Pretty s -dashed = prefixed "- " " " - -commented - :: (Foldable f, LL.ListLike s Char, IsString s) => f (Pretty s) -> Pretty s -commented = prefixed "-- " "-- " - -numbered - :: (Foldable f, LL.ListLike s Char, IsString s) - => (Int -> Pretty s) - -> f (Pretty s) - -> Pretty s -numbered num ps = column2 (fmap num [1 ..] `zip` toList ps) - -numberedHeader - :: (Foldable f, LL.ListLike s Char, IsString s) - => (Maybe Int -> Pretty s) - -> f (Pretty s) - -> Pretty s -numberedHeader num ps = column2 (fmap num (Nothing : fmap Just [1 ..]) `zip` toList ps) - --- Like `column2` but with the lines numbered. For instance: --- --- 1. one thing : this is a thing --- 2. another thing : this is another thing --- 3. and another : yet one more thing -numberedColumn2 - :: (Foldable f, LL.ListLike s Char, IsString s) - => (Int -> Pretty s) - -> f (Pretty s, Pretty s) - -> Pretty s -numberedColumn2 num ps = numbered num (align $ toList ps) - -numberedColumn2Header - :: (Foldable f, LL.ListLike s Char, IsString s) - => (Int -> Pretty s) - -> f (Pretty s, Pretty s) - -> Pretty s -numberedColumn2Header num ps = numberedHeader (maybe mempty num) (align $ toList ps) - --- Opinionated `numbered` that uses bold numbers in front -numberedList :: Foldable f => f (Pretty ColorText) -> Pretty ColorText -numberedList = numbered (\i -> hiBlack . fromString $ show i <> ".") - -leftPad, rightPad :: IsString s => Int -> Pretty s -> Pretty s -leftPad n p = - let rem = n - preferredWidth p - in if rem > 0 then fromString (replicate rem ' ') <> p else p -rightPad n p = - let rem = n - preferredWidth p - in if rem > 0 then p <> fromString (replicate rem ' ') else p - -excerptColumn2Headed - :: (LL.ListLike s Char, IsString s) - => Maybe Int - -> (Pretty s, Pretty s) - -> [(Pretty s, Pretty s)] - -> Pretty s -excerptColumn2Headed max hd cols = case max of - Just max | len > max -> - lines [column2 (hd : take max cols), "... " <> shown (len - max) <> " more"] - _ -> column2 (hd : cols) - where len = length cols - -excerptColumn2 - :: (LL.ListLike s Char, IsString s) - => Maybe Int - -> [(Pretty s, Pretty s)] - -> Pretty s -excerptColumn2 max cols = case max of - Just max | len > max -> lines [column2 cols, "... " <> shown (len - max)] - _ -> column2 cols - where len = length cols - -column2 - :: (LL.ListLike s Char, IsString s) => [(Pretty s, Pretty s)] -> Pretty s -column2 = column2sep "" - -column2Header - :: Pretty ColorText -> Pretty ColorText -> [(Pretty ColorText, Pretty ColorText)] -> Pretty ColorText -column2Header left right = column2sep " " . ((fmap CT.hiBlack left, fmap CT.hiBlack right):) - -column2sep - :: (LL.ListLike s Char, IsString s) => Pretty s -> [(Pretty s, Pretty s)] -> Pretty s -column2sep sep rows = lines . (group <$>) . align $ [(a, sep <> b) | (a, b) <- rows] - -column2M - :: (Applicative m, LL.ListLike s Char, IsString s) - => [m (Pretty s, Pretty s)] - -> m (Pretty s) -column2M = fmap column2 . sequenceA - -mayColumn2 - :: (LL.ListLike s Char, IsString s) - => [(Pretty s, Maybe (Pretty s))] - -> Pretty s -mayColumn2 = lines . (group <$>) . ((uncurry (<>)) <$>) . align' - -column3 - :: (LL.ListLike s Char, IsString s) - => [(Pretty s, Pretty s, Pretty s)] - -> Pretty s -column3 = column3sep "" - -column3M - :: (LL.ListLike s Char, IsString s, Monad m) - => [m (Pretty s, Pretty s, Pretty s)] - -> m (Pretty s) -column3M = fmap column3 . sequence - -column3UnzippedM - :: forall m s . (LL.ListLike s Char, IsString s, Monad m) - => Pretty s - -> [m (Pretty s)] - -> [m (Pretty s)] - -> [m (Pretty s)] - -> m (Pretty s) -column3UnzippedM bottomPadding left mid right = let - rowCount = maximum (fmap length [left, mid, right]) - pad :: [m (Pretty s)] -> [m (Pretty s)] - pad a = a ++ replicate (rowCount - length a) (pure bottomPadding) - (pleft, pmid, pright) = (pad left, pad mid, pad right) - in column3M $ zipWith3 (liftA3 (,,)) pleft pmid pright - -column2UnzippedM - :: forall m s . (LL.ListLike s Char, IsString s, Monad m) - => Pretty s - -> [m (Pretty s)] - -> [m (Pretty s)] - -> m (Pretty s) -column2UnzippedM bottomPadding left right = let - rowCount = length left `max` length right - pad :: [m (Pretty s)] -> [m (Pretty s)] - pad a = a ++ replicate (rowCount - length a) (pure bottomPadding) - sep :: [m (Pretty s)] -> [m (Pretty s)] - sep = fmap (fmap (" " <>)) - (pleft, pright) = (pad left, sep $ pad right) - in column2M $ zipWith (liftA2 (,)) pleft pright - -column3sep - :: (LL.ListLike s Char, IsString s) => Pretty s -> [(Pretty s, Pretty s, Pretty s)] -> Pretty s -column3sep sep rows = let - bc = align [(b,sep <> c) | (_,b,c) <- rows ] - abc = group <$> align [(a,sep <> bc) | ((a,_,_),bc) <- rows `zip` bc ] - in lines abc - -wrapColumn2 :: - (LL.ListLike s Char, IsString s) => [(Pretty s, Pretty s)] -> Pretty s -wrapColumn2 rows = lines (align rows) where - align rows = let lwidth = foldl' max 0 (preferredWidth . fst <$> rows) + 2 - in [ group (rightPad lwidth l <> indentNAfterNewline lwidth (wrap r)) - | (l, r) <- rows] - -align - :: (LL.ListLike s Char, IsString s) => [(Pretty s, Pretty s)] -> [Pretty s] -align rows = (((uncurry (<>)) <$>) . align') (second Just <$> rows) - --- [("foo", Just "bar") --- ,("barabaz", Nothing) --- ,("qux","quux")] --- --- results in: --- --- [("foo ", "bar"), --- [("barabaz", ""), --- [("qux ", "quuxbill")] --- --- The first component has padding added, sufficient to align the second --- component. The second component has whitespace added after its --- newlines, again sufficient to line it up in a second column. -align' - :: (LL.ListLike s Char, IsString s) - => [(Pretty s, Maybe (Pretty s))] - -> [(Pretty s, Pretty s)] -align' rows = alignedRows - where - col0Width = foldl' max 0 [ preferredWidth col1 | (col1, Just _) <- rows ] + 1 - alignedRows = - [ case col1 of - Just s -> - (rightPad col0Width col0, indentNAfterNewline col0Width s) - Nothing -> (col0, mempty) - | (col0, col1) <- rows - ] - -text :: IsString s => Text -> Pretty s -text t = fromString (Text.unpack t) - -num :: (Show n, Num n, IsString s) => n -> Pretty s -num n = fromString (show n) - -string :: IsString s => String -> Pretty s -string = fromString - -shown :: (Show a, IsString s) => a -> Pretty s -shown = fromString . show - -hang' - :: (LL.ListLike s Char, IsString s) - => Pretty s - -> Pretty s - -> Pretty s - -> Pretty s -hang' from by p = group $ if isMultiLine p - then from <> "\n" <> group (indent by p) - else (from <> " " <> group p) `orElse` (from <> "\n" <> group (indent by p)) - -hangUngrouped' - :: (LL.ListLike s Char, IsString s) - => Pretty s - -> Pretty s - -> Pretty s - -> Pretty s -hangUngrouped' from by p = if isMultiLine p - then from <> "\n" <> indent by p - else (from <> " " <> p) `orElse` (from <> "\n" <> indent by p) - -hangUngrouped - :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -> Pretty s -hangUngrouped from = hangUngrouped' from " " - -hang :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -> Pretty s -hang from = hang' from " " - -nest :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -> Pretty s -nest = hang' "" - -indent :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -> Pretty s -indent by p = by <> indentAfterNewline by p - -indentN :: (LL.ListLike s Char, IsString s) => Width -> Pretty s -> Pretty s -indentN by = indent (fromString $ replicate by ' ') - -indentNonEmptyN :: (LL.ListLike s Char, IsString s) => Width -> Pretty s -> Pretty s -indentNonEmptyN _ (out -> Empty) = mempty -indentNonEmptyN by p = indentN by p - -indentNAfterNewline - :: (LL.ListLike s Char, IsString s) => Width -> Pretty s -> Pretty s -indentNAfterNewline by = indentAfterNewline (fromString $ replicate by ' ') - -indentAfterNewline - :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -> Pretty s -indentAfterNewline by = flatMap f - where - f s0 = case LL.break (== '\n') s0 of - (hd, s) -> if LL.null s - then lit s0 - -- use `take` and `drop` to preserve annotations or - -- or other extra info attached to the original `s` - else lit (LL.take (LL.length hd) s0) <> "\n" <> by <> f (LL.drop 1 s) - -instance IsString s => IsString (Pretty s) where - fromString s = lit' (foldMap chDelta s) (fromString s) - -instance Semigroup (Pretty s) where (<>) = mappend -instance Monoid (Pretty s) where - mempty = Pretty mempty Empty - mappend p1 p2 = Pretty (delta p1 <> delta p2) . - Append $ case (out p1, out p2) of - (Append ps1, Append ps2) -> ps1 <> ps2 - (Append ps1, _) -> ps1 <> pure p2 - (_, Append ps2) -> pure p1 <> ps2 - (_,_) -> pure p1 <> pure p2 - -data Delta = - -- | The number of columns. - SingleLine !Width - -- | The number of columns in the first, last, and longest lines. - | MultiLine !Width !Width !Width - deriving stock (Eq, Ord, Show) - -instance Semigroup Delta where - SingleLine c <> SingleLine c2 = SingleLine (c + c2) - SingleLine c <> MultiLine fc lc mc = - let fc' = c + fc - in MultiLine fc' lc (max fc' mc) - MultiLine fc lc mc <> SingleLine c = - let lc' = lc + c - in MultiLine fc lc' (max lc' mc) - MultiLine fc lc mc <> MultiLine fc2 lc2 mc2 = - MultiLine fc lc2 (max mc (max mc2 (lc + fc2))) - -instance Monoid Delta where - mempty = SingleLine 0 - mappend = (<>) - -maxCol :: Delta -> Width -maxCol = \case - SingleLine c -> c - MultiLine _ _ c -> c - -lastCol :: Delta -> Width -lastCol = \case - SingleLine c -> c - MultiLine _ c _ -> c - -chDelta :: Char -> Delta -chDelta '\n' = MultiLine 0 0 0 -chDelta _ = SingleLine 1 - -preferredWidth :: Pretty s -> Width -preferredWidth p = lastCol (delta p) - -isMultiLine :: Pretty s -> Bool -isMultiLine p = - case delta p of - SingleLine{} -> False - MultiLine{} -> True - -black, red, green, yellow, blue, purple, cyan, white, hiBlack, hiRed, hiGreen, hiYellow, hiBlue, hiPurple, hiCyan, hiWhite, bold, underline - :: Pretty CT.ColorText -> Pretty CT.ColorText -black = map CT.black -red = map CT.red -green = map CT.green -yellow = map CT.yellow -blue = map CT.blue -purple = map CT.purple -cyan = map CT.cyan -white = map CT.white -hiBlack = map CT.hiBlack -hiRed = map CT.hiRed -hiGreen = map CT.hiGreen -hiYellow = map CT.hiYellow -hiBlue = map CT.hiBlue -hiPurple = map CT.hiPurple -hiCyan = map CT.hiCyan -hiWhite = map CT.hiWhite -bold = map CT.bold -underline = map CT.underline - -plural :: Foldable f - => f a -> Pretty ColorText -> Pretty ColorText -plural f p = case length f of - 0 -> mempty - 1 -> p - -- todo: consider use of plural package - _ -> p <> case reverse (toPlainUnbroken p) of - 's' : _ -> "es" - _ -> "s" - -border :: (LL.ListLike s Char, IsString s) => Int -> Pretty s -> Pretty s -border n p = "\n" <> indentN n p <> "\n" - -callout :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -> Pretty s -callout header p = header <> "\n\n" <> p - -bracket :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -bracket = indent " " - -boxForkLeft, boxLeft, boxRight :: - forall s . (LL.ListLike s Char, IsString s) => [Pretty s] -> [Pretty s] -boxForkLeft = boxLeft' lBoxStyle1 -boxLeft = boxLeft' lBoxStyle2 -boxRight = boxRight' rBoxStyle2 - -boxLeft', boxRight' :: (LL.ListLike s Char, IsString s) - => BoxStyle s -> [Pretty s] -> [Pretty s] -boxLeft' style = fmap runIdentity . boxLeftM' style . fmap Identity -boxRight' style = fmap runIdentity . boxRightM' style . fmap Identity - -type BoxStyle s = - ( (Pretty s, Pretty s) -- first (start, continue) - , (Pretty s, Pretty s) -- middle - , (Pretty s, Pretty s) -- last - , (Pretty s, Pretty s) -- singleton - ) -lBoxStyle1, lBoxStyle2, rBoxStyle2 :: IsString s => BoxStyle s -lBoxStyle1 = (("┌ ", "│ ") -- first - ,("├ ", "│ ") -- middle - ,("└ ", " ") -- last - ,("" , "" )) -- singleton -lBoxStyle2 = (("┌ "," ") - ,("│ "," ") - ,("└ "," ") - ,("" ,"" )) -rBoxStyle2 = ((" ┐", " │") - ,(" │", " │") - ,(" ┘", " ") - ,(" ", " ")) - -boxLeftM, boxRightM :: forall m s . (Monad m, LL.ListLike s Char, IsString s) - => [m (Pretty s)] -> [m (Pretty s)] -boxLeftM = boxLeftM' lBoxStyle2 -boxRightM = boxRightM' rBoxStyle2 - -boxLeftM' :: forall m s . (Monad m, LL.ListLike s Char, IsString s) - => BoxStyle s -> [m (Pretty s)] -> [m (Pretty s)] -boxLeftM' (first, middle, last, singleton) ps = go (Seq.fromList ps) where - go Seq.Empty = [] - go (p Seq.:<| Seq.Empty) = [decorate singleton <$> p] - go (a Seq.:<| (mid Seq.:|> b)) = - [decorate first <$> a] - ++ toList (fmap (decorate middle) <$> mid) - ++ [decorate last <$> b] - decorate (first, mid) p = first <> indentAfterNewline mid p - --- this implementation doesn't work for multi-line inputs, --- because i dunno how to inspect multi-line inputs - - -boxRightM' :: forall m s. (Monad m, LL.ListLike s Char, IsString s) - => BoxStyle s -> [m (Pretty s)] -> [m (Pretty s)] -boxRightM' (first, middle, last, singleton) ps = go (Seq.fromList ps) where - go :: Seq.Seq (m (Pretty s)) -> [m (Pretty s)] - go Seq.Empty = [] - go (p Seq.:<| Seq.Empty) = [decorate singleton <$> p] - go (a Seq.:<| (mid Seq.:|> b)) = - [decorate first <$> a] - ++ toList (fmap (decorate middle) <$> mid) - ++ [decorate last <$> b] - decorate (first, _mid) p = p <> first - -warnCallout, blockedCallout, fatalCallout, okCallout - :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -warnCallout = callout "⚠️" -fatalCallout = callout "❗️" -okCallout = callout "✅" -blockedCallout = callout "🚫" - -backticked :: IsString s => Pretty s -> Pretty s -backticked p = group ("`" <> p <> "`") - --- |Attach some punctuation after the closing backtick. -backticked' :: IsString s => Pretty s -> Pretty s -> Pretty s -backticked' p end = group ("`" <> p <> "`" <> end) - -instance Show s => Show (Pretty s) where - show p = render 80 (metaPretty p) - -metaPretty :: Show s => Pretty s -> Pretty String -metaPretty = go (0::Int) where - go prec p = case out p of - Lit s -> parenthesizeIf (prec > 0) $ "Lit" `hang` lit (show s) - Empty -> "Empty" - Group g -> parenthesizeIf (prec > 0) $ "Group" `hang` go 1 g - Wrap s -> parenthesizeIf (prec > 0) $ "Wrap" `hang` - surroundCommas "[" "]" (go 1 <$> s) - OrElse a b -> parenthesizeIf (prec > 0) $ - "OrElse" `hang` spaced [go 1 a, go 1 b] - Append s -> surroundCommas "[" "]" (go 1 <$> s) - -map :: LL.ListLike s2 Char => (s -> s2) -> Pretty s -> Pretty s2 -map f p = case out p of - Append ps -> foldMap (map f) ps - Empty -> mempty - Group p -> group (map f p) - Lit s -> lit' (foldMap chDelta $ LL.toList s2) s2 where s2 = f s - OrElse p1 p2 -> orElse (map f p1) (map f p2) - Wrap p -> wrap_ (map f <$> p) - -flatMap :: (s -> Pretty s2) -> Pretty s -> Pretty s2 -flatMap f p = case out p of - Append ps -> foldMap (flatMap f) ps - Empty -> mempty - Group p -> group (flatMap f p) - Lit s -> f s - OrElse p1 p2 -> orElse (flatMap f p1) (flatMap f p2) - Wrap p -> wrap_ (flatMap f <$> p) diff --git a/parser-typechecker/src/Unison/Util/Range.hs b/parser-typechecker/src/Unison/Util/Range.hs deleted file mode 100644 index e2377bc027..0000000000 --- a/parser-typechecker/src/Unison/Util/Range.hs +++ /dev/null @@ -1,27 +0,0 @@ -module Unison.Util.Range where - -import Unison.Lexer (Pos(..)) - --- | True if `_x` contains `_y` -contains :: Range -> Range -> Bool -_x@(Range a b) `contains` _y@(Range c d) = a <= c && d <= b - -overlaps :: Range -> Range -> Bool -overlaps (Range a b) (Range c d) = a < d && c < b - -inRange :: Pos -> Range -> Bool -inRange p (Range a b) = p >= a && p < b - -isMultiLine :: Range -> Bool -isMultiLine (Range (Pos startLine _) (Pos endLine _)) = startLine < endLine - -data Range = Range { start :: Pos, end :: Pos } deriving (Eq, Ord, Show) - -startingLine :: Range -> Range -startingLine r@(Range start@(Pos startLine _) (Pos stopLine _)) = - if stopLine == startLine then r - else Range start (Pos (startLine+1) 0) - -instance Semigroup Range where - (Range start end) <> (Range start2 end2) = - Range (min start start2) (max end end2) diff --git a/parser-typechecker/src/Unison/Util/SyntaxText.hs b/parser-typechecker/src/Unison/Util/SyntaxText.hs deleted file mode 100644 index e2fcfb6c36..0000000000 --- a/parser-typechecker/src/Unison/Util/SyntaxText.hs +++ /dev/null @@ -1,62 +0,0 @@ -module Unison.Util.SyntaxText where - -import Unison.Prelude -import Unison.Reference (Reference) -import Unison.Referent (Referent) -import Unison.HashQualified (HashQualified) -import Unison.Pattern (SeqOp) - -import Unison.Util.AnnotatedText ( AnnotatedText(..), annotate ) - -type SyntaxText = AnnotatedText Element - --- The elements of the Unison grammar, for syntax highlighting purposes -data Element = NumericLiteral - | TextLiteral - | CharLiteral - | BooleanLiteral - | Blank - | Var - | Reference Reference - | Referent Referent - | Op SeqOp - | Constructor - | Request - | AbilityBraces - -- let|handle|in|where|match|with|cases|->|if|then|else|and|or - | ControlKeyword - -- forall|-> - | TypeOperator - | BindingEquals - | TypeAscriptionColon - -- type|ability - | DataTypeKeyword - | DataTypeParams - | Unit - -- unique - | DataTypeModifier - -- `use Foo bar` is keyword, prefix, suffix - | UseKeyword - | UsePrefix - | UseSuffix - | HashQualifier HashQualified - | DelayForceChar - -- ? , ` [ ] @ | - -- Currently not all commas in the pretty-print output are marked up as DelimiterChar - we miss - -- out characters emitted by Pretty.hs helpers like Pretty.commas. - | DelimiterChar - -- ! ' - | Parenthesis - | LinkKeyword -- `typeLink` and `termLink` - -- [: :] @[] - | DocDelimiter - -- the 'include' in @[include], etc - | DocKeyword - deriving (Eq, Ord, Show) - -syntax :: Element -> SyntaxText -> SyntaxText -syntax = annotate - --- Convert a `SyntaxText` to a `String`, ignoring syntax markup -toPlain :: SyntaxText -> String -toPlain (AnnotatedText at) = join (toList $ fst <$> at) diff --git a/parser-typechecker/src/Unison/Util/TQueue.hs b/parser-typechecker/src/Unison/Util/TQueue.hs deleted file mode 100644 index e088b13e85..0000000000 --- a/parser-typechecker/src/Unison/Util/TQueue.hs +++ /dev/null @@ -1,89 +0,0 @@ -module Unison.Util.TQueue where - -import Unison.Prelude - -import UnliftIO (MonadUnliftIO) -import UnliftIO.STM hiding (TQueue) -import qualified UnliftIO.Async as Async - -import qualified Data.Sequence as S -import Data.Sequence (Seq((:<|)), (|>)) - -data TQueue a = TQueue (TVar (Seq a)) (TVar Word64) - -newIO :: MonadIO m => m (TQueue a) -newIO = TQueue <$> newTVarIO mempty <*> newTVarIO 0 - -size :: TQueue a -> STM Int -size (TQueue q _) = S.length <$> readTVar q - --- Waits for this queue to reach a size <= target. --- Consumes no elements; it's expected there is some --- other thread which is consuming elements from the queue. -awaitSize :: Int -> TQueue a -> STM () -awaitSize target q = size q >>= \n -> - if n <= target then pure () - else retrySTM - -peek :: TQueue a -> STM a -peek (TQueue v _) = readTVar v >>= \case - a :<| _ -> pure a - _ -> retrySTM - -dequeue :: TQueue a -> STM a -dequeue (TQueue v _) = readTVar v >>= \case - a :<| as -> writeTVar v as *> pure a - _ -> retrySTM - -undequeue :: TQueue a -> a -> STM () -undequeue (TQueue v _) a = readTVar v >>= \ - as -> writeTVar v (a :<| as) - -tryDequeue :: TQueue a -> STM (Maybe a) -tryDequeue (TQueue v _) = readTVar v >>= \case - a :<| as -> writeTVar v as *> pure (Just a) - _ -> pure Nothing - -dequeueN :: TQueue a -> Int -> STM [a] -dequeueN (TQueue v _) n = readTVar v >>= \s -> - if length s >= n then writeTVar v (S.drop n s) $> toList (S.take n s) - else retrySTM - --- return the number of enqueues over the life of the queue -enqueueCount :: TQueue a -> STM Word64 -enqueueCount (TQueue _ count) = readTVar count - -flush :: TQueue a -> STM [a] -flush (TQueue v _) = do - s <- readTVar v - writeTVar v mempty - pure . toList $ s - -enqueue :: TQueue a -> a -> STM () -enqueue (TQueue v count) a = do - modifyTVar' v (|> a) - modifyTVar' count (+1) - -raceIO :: MonadUnliftIO m => STM a -> STM b -> m (Either a b) -raceIO a b = do - aa <- Async.async $ atomically a - ab <- Async.async $ atomically b - Async.waitEitherCancel aa ab - --- take all elements up to but not including the first not satisfying cond -tryPeekWhile :: (a -> Bool) -> TQueue a -> STM [a] -tryPeekWhile cond (TQueue v _) = toList . S.takeWhileL cond <$> readTVar v - --- block until at least one element is enqueued not satisfying cond, --- then return the prefix before that -takeWhile :: (a -> Bool) -> TQueue a -> STM [a] -takeWhile cond (TQueue v _) = readTVar v >>= \s -> let - (left, right) = S.spanl cond s in - if null right then retrySTM - else writeTVar v right $> toList left - -peekWhile :: (a -> Bool) -> TQueue a -> STM [a] -peekWhile cond (TQueue v _) = readTVar v >>= \s -> let - (left, right) = S.spanl cond s in - if null right then retrySTM - else pure $ toList left diff --git a/parser-typechecker/src/Unison/Util/Timing.hs b/parser-typechecker/src/Unison/Util/Timing.hs deleted file mode 100644 index 40faed90b1..0000000000 --- a/parser-typechecker/src/Unison/Util/Timing.hs +++ /dev/null @@ -1,41 +0,0 @@ -{-# LANGUAGE BangPatterns #-} - -module Unison.Util.Timing where - -import System.CPUTime (getCPUTime) -import System.IO.Unsafe (unsafePerformIO) -import UnliftIO (MonadIO, liftIO) -import Data.Time.Clock.System (getSystemTime, systemToTAITime) -import Data.Time.Clock.TAI (diffAbsoluteTime) -import Data.Time.Clock (picosecondsToDiffTime) - -enabled :: Bool -enabled = False - -time :: MonadIO m => String -> m a -> m a -time _ ma | not enabled = ma -time label ma = do - systemStart <- liftIO getSystemTime - cpuPicoStart <- liftIO getCPUTime - liftIO $ putStrLn $ "Timing " ++ label ++ "..." - a <- ma - cpuPicoEnd <- liftIO getCPUTime - systemEnd <- liftIO getSystemTime - let systemDiff = diffAbsoluteTime (systemToTAITime systemEnd) (systemToTAITime systemStart) - let cpuDiff = picosecondsToDiffTime (cpuPicoEnd - cpuPicoStart) - liftIO $ putStrLn $ "Finished " ++ label ++ " in " ++ show cpuDiff ++ " (cpu), " ++ show systemDiff ++ " (system)" - pure a - -unsafeTime :: Monad m => String -> m a -> m a -unsafeTime _ ma | not enabled = ma -unsafeTime label ma = do - let !systemStart = unsafePerformIO getSystemTime - !cpuPicoStart = unsafePerformIO getCPUTime - !_ = unsafePerformIO $ putStrLn $ "Timing " ++ label ++ "..." - a <- ma - let !cpuPicoEnd = unsafePerformIO getCPUTime - !systemEnd = unsafePerformIO getSystemTime - let systemDiff = diffAbsoluteTime (systemToTAITime systemEnd) (systemToTAITime systemStart) - let cpuDiff = picosecondsToDiffTime (cpuPicoEnd - cpuPicoStart) - let !_ = unsafePerformIO $ putStrLn $ "Finished " ++ label ++ " in " ++ show cpuDiff ++ " (cpu), " ++ show systemDiff ++ " (system)" - pure a diff --git a/parser-typechecker/src/Unison/Util/TransitiveClosure.hs b/parser-typechecker/src/Unison/Util/TransitiveClosure.hs deleted file mode 100644 index 1c865f2ebf..0000000000 --- a/parser-typechecker/src/Unison/Util/TransitiveClosure.hs +++ /dev/null @@ -1,31 +0,0 @@ -module Unison.Util.TransitiveClosure where - -import Unison.Prelude - -import Data.Functor.Identity (runIdentity) -import qualified Data.Set as Set - -transitiveClosure :: forall m a. (Monad m, Ord a) - => (a -> m (Set a)) - -> Set a - -> m (Set a) -transitiveClosure getDependencies open = - let go :: Set a -> [a] -> m (Set a) - go closed [] = pure closed - go closed (h:t) = - if Set.member h closed - then go closed t - else do - deps <- getDependencies h - go (Set.insert h closed) (toList deps ++ t) - in go Set.empty (toList open) - -transitiveClosure' :: Ord a => (a -> Set a) -> Set a -> Set a -transitiveClosure' f as = runIdentity $ transitiveClosure (pure . f) as - -transitiveClosure1 :: forall m a. (Monad m, Ord a) - => (a -> m (Set a)) -> a -> m (Set a) -transitiveClosure1 f a = transitiveClosure f (Set.singleton a) - -transitiveClosure1' :: Ord a => (a -> Set a) -> a -> Set a -transitiveClosure1' f a = runIdentity $ transitiveClosure1 (pure . f) a diff --git a/parser-typechecker/tests/Suite.hs b/parser-typechecker/tests/Suite.hs deleted file mode 100644 index df763cd654..0000000000 --- a/parser-typechecker/tests/Suite.hs +++ /dev/null @@ -1,87 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-incomplete-patterns #-} - -module Main where - -import EasyTest -import System.Environment (getArgs) -import System.IO -import qualified Unison.Core.Test.Name as Name -import qualified Unison.Test.ABT as ABT -import qualified Unison.Test.Cache as Cache -import qualified Unison.Test.Codebase as Codebase -import qualified Unison.Test.Codebase.Causal as Causal -import qualified Unison.Test.Codebase.FileCodebase as FileCodebase -import qualified Unison.Test.Codebase.Path as Path -import qualified Unison.Test.ColorText as ColorText -import qualified Unison.Test.DataDeclaration as DataDeclaration -import qualified Unison.Test.FileParser as FileParser -import qualified Unison.Test.Git as Git -import qualified Unison.Test.Lexer as Lexer -import qualified Unison.Test.IO as TestIO -import qualified Unison.Test.Range as Range -import qualified Unison.Test.Referent as Referent -import qualified Unison.Test.Term as Term -import qualified Unison.Test.TermParser as TermParser -import qualified Unison.Test.TermPrinter as TermPrinter -import qualified Unison.Test.Type as Type -import qualified Unison.Test.TypePrinter as TypePrinter -import qualified Unison.Test.Typechecker as Typechecker -import qualified Unison.Test.Typechecker.Context as Context -import qualified Unison.Test.Typechecker.TypeError as TypeError -import qualified Unison.Test.UnisonSources as UnisonSources -import qualified Unison.Test.UriParser as UriParser -import qualified Unison.Test.Util.Bytes as Bytes -import qualified Unison.Test.Util.PinBoard as PinBoard -import qualified Unison.Test.Util.Pretty as Pretty -import qualified Unison.Test.Var as Var -import qualified Unison.Test.ANF as ANF -import qualified Unison.Test.MCode as MCode -import qualified Unison.Test.VersionParser as VersionParser - -test :: Bool -> Test () -test rt = tests - [ Cache.test - , Lexer.test - , Term.test - , TermParser.test - , TermPrinter.test - , Type.test - , TypeError.test - , TypePrinter.test - , UnisonSources.test rt - , FileParser.test - , DataDeclaration.test - , Range.test - , ColorText.test - , Bytes.test - , Path.test - , Causal.test - , Referent.test - , FileCodebase.test - , ABT.test - , ANF.test - , MCode.test - , Var.test - , Codebase.test - , Typechecker.test - , UriParser.test - , Context.test - , Git.test - , TestIO.test rt - , Name.test - , VersionParser.test - , Pretty.test - , PinBoard.test - ] - -main :: IO () -main = do - args0 <- getArgs - let (rt, args) - | "--new-runtime":rest <- args0 = (True, rest) - | otherwise = (False, args0) - mapM_ (`hSetEncoding` utf8) [stdout, stdin, stderr] - case args of - [] -> runOnly "" (test rt) - [prefix] -> runOnly prefix (test rt) - [seed, prefix] -> rerunOnly (read seed) prefix (test rt) diff --git a/parser-typechecker/tests/Unison/Core/Test/Name.hs b/parser-typechecker/tests/Unison/Core/Test/Name.hs deleted file mode 100644 index e16c07ee7f..0000000000 --- a/parser-typechecker/tests/Unison/Core/Test/Name.hs +++ /dev/null @@ -1,29 +0,0 @@ -{-# Language OverloadedStrings #-} - -module Unison.Core.Test.Name where - -import EasyTest -import Unison.Name as Name -import Unison.NameSegment as NameSegment -import Data.List ( intercalate ) -import Data.Text ( pack ) - -test :: Test () -test = scope "name" $ tests - [ scope "suffixes" $ tests - [ scope "empty" $ expectEqual (suffixes "") [] - , scope "one namespace" $ expectEqual (suffixes "bar") ["bar"] - , scope "two namespaces" - $ expectEqual (suffixes "foo.bar") ["foo.bar", "bar"] - , scope "multiple namespaces" - $ expectEqual (suffixes "foo.bar.baz") ["foo.bar.baz", "bar.baz", "baz"] - , scope "terms named `.`" $ expectEqual (suffixes "base..") ["base..", "."] - ] - , scope "segments" $ do - numDots <- int' 0 10 - numSegs <- int' 0 10 - n <- int' 0 10 - segs <- listOf n . pick $ replicate numDots "." ++ replicate numSegs "foo" - expectEqual (segments $ Name . pack $ intercalate "." segs) - (NameSegment . pack <$> segs) - ] diff --git a/parser-typechecker/tests/Unison/Test/ABT.hs b/parser-typechecker/tests/Unison/Test/ABT.hs deleted file mode 100644 index 2f36c15450..0000000000 --- a/parser-typechecker/tests/Unison/Test/ABT.hs +++ /dev/null @@ -1,44 +0,0 @@ -{-# Language OverloadedStrings #-} - -module Unison.Test.ABT where - -import Data.Set as Set -import EasyTest -import Unison.ABT as ABT -import Unison.Symbol (Symbol(..)) -import Unison.Var as Var -import Unison.Codebase.Serialization ( getFromBytes, putBytes ) -import qualified Unison.Codebase.Serialization.V1 as V1 - -test :: Test () -test = scope "abt" $ tests [ - scope "freshInBoth" $ - let - t1 = var 1 "a" - t2 = var 0 "a" - fresh = ABT.freshInBoth t1 t2 $ symbol 0 "a" - in tests - [ scope "first" $ expect (not $ Set.member fresh (ABT.freeVars t1)) - , scope "second" $ expect (not $ Set.member fresh (ABT.freeVars t2)) - ], - scope "rename" $ do - -- rename x to a in \a -> [a, x] should yield - -- \a1 -> [a1, a] - let t1 = ABT.abs (symbol 0 "a") (ABT.tm [var 0 "a", var 0 "x"]) - t2 = ABT.rename (symbol 0 "x") (symbol 0 "a") t1 - fvs = toList . ABT.freeVars $ t2 - -- make sure the variable wasn't captured - expectEqual fvs [symbol 0 "a"] - -- make sure the resulting term is alpha equiv to \a1 -> [a1, a] - expectEqual t2 (ABT.abs (symbol 0 "b") (ABT.tm [var 0 "b", var 0 "a"])), - - -- confirmation of fix for https://github.com/unisonweb/unison/issues/1388 - -- where symbols with nonzero freshIds did not round trip - scope "putSymbol" $ let - v = Symbol 10 (User "hi") - v' = getFromBytes V1.getSymbol (putBytes V1.putSymbol v) - in expectEqual (Just v) v' - ] - where - symbol i n = Symbol i (Var.User n) - var i n = ABT.var $ symbol i n diff --git a/parser-typechecker/tests/Unison/Test/ANF.hs b/parser-typechecker/tests/Unison/Test/ANF.hs deleted file mode 100644 index 3bca0b4812..0000000000 --- a/parser-typechecker/tests/Unison/Test/ANF.hs +++ /dev/null @@ -1,199 +0,0 @@ -{-# language BangPatterns #-} -{-# language PatternGuards #-} - -module Unison.Test.ANF where - -import EasyTest - -import Unison.ABT.Normalized (Term(TAbs)) -import qualified Unison.Pattern as P -import Unison.Reference (Reference) -import Unison.Runtime.ANF as ANF -import Unison.Runtime.MCode (emitCombs) -import Unison.Type as Ty -import Unison.Var as Var - -import Unison.Util.EnumContainers as EC - -import qualified Data.Set as Set -import qualified Data.Map as Map - -import qualified Unison.Term as Term -import qualified Unison.ABT as ABT -import Unison.Test.Common (tm) - -import Control.Monad.Reader (ReaderT(..)) -import Control.Monad.State (evalState) - --- testSNF s = ok --- where --- t0 = tm s --- snf = toSuperNormal (const 0) t0 - -simpleRefs :: Reference -> RTag -simpleRefs r - | r == Ty.natRef = 0 - | r == Ty.intRef = 1 - | r == Ty.floatRef = 2 - | r == Ty.booleanRef = 3 - | r == Ty.textRef = 4 - | r == Ty.charRef = 5 - | otherwise = 100 - -runANF :: Var v => ANFM v a -> a -runANF m = evalState (runReaderT m env) (0, []) - where - env = (Set.empty, const 0, simpleRefs) - -testANF :: String -> Test () -testANF s - | t0 == denormalize anf = ok - | otherwise = crash $ show $ denormalize anf - where - t0 = const () `Term.amap` tm s - anf = runANF $ anfTerm t0 - -testLift :: String -> Test () -testLift s = case cs of (!_, !_, _) -> ok - where - cs = emitCombs 0 . superNormalize (const 0) (const 0) . lamLift $ tm s - -denormalize :: Var v => ANormal v -> Term.Term0 v -denormalize (TVar v) = Term.var () v -denormalize (TLit l) = case l of - I i -> Term.int () i - N n -> Term.nat () n - F f -> Term.float () f - T t -> Term.text () t - C c -> Term.char () c - LM r -> Term.termLink () r - LY r -> Term.typeLink () r -denormalize (THnd _ _ _) - = error "denormalize handler" - -- = Term.match () (denormalize b) $ denormalizeHandler h -denormalize (TShift _ _ _) - = error "denormalize shift" -denormalize (TLet v _ bn bo) - | typeOf v == ANFBlank = ABT.subst v dbn dbo - | otherwise = Term.let1_ False [(v, dbn)] dbo - where - dbn = denormalize $ TTm bn - dbo = denormalize bo -denormalize (TName _ _ _ _) - = error "can't denormalize by-name bindings" -denormalize (TMatch v cs) - = Term.match () (ABT.var v) $ denormalizeMatch cs -denormalize (TApp f args) - | FCon rt 0 <- f - , r <- denormalizeRef rt - , r `elem` [Ty.natRef, Ty.intRef] - , [v] <- args - = Term.var () v -denormalize (TApp f args) = Term.apps' df (Term.var () <$> args) - where - df = case f of - FVar v -> Term.var () v - FComb _ -> error "FComb" - FCon r n -> - Term.constructor () (denormalizeRef r) (fromIntegral $ rawTag n) - FReq r n -> - Term.request () (denormalizeRef r) (fromIntegral $ rawTag n) - FPrim _ -> error "FPrim" - FCont _ -> error "denormalize FCont" -denormalize (TFrc _) = error "denormalize TFrc" - -denormalizeRef :: RTag -> Reference -denormalizeRef r - | 0 <- rawTag r = Ty.natRef - | 1 <- rawTag r = Ty.intRef - | 2 <- rawTag r = Ty.floatRef - | 3 <- rawTag r = Ty.booleanRef - | 4 <- rawTag r = Ty.textRef - | 5 <- rawTag r = Ty.charRef - | otherwise = error "denormalizeRef" - -backReference :: RTag -> Reference -backReference _ = error "backReference" - -denormalizeMatch - :: Var v => Branched (ANormal v) -> [Term.MatchCase () (Term.Term0 v)] -denormalizeMatch b - | MatchEmpty <- b = [] - | MatchIntegral m df <- b - = (dcase (ipat Ty.intRef) <$> mapToList m) ++ dfcase df - | MatchText m df <- b - = (dcase (const $ P.Text ()) <$> Map.toList m) ++ dfcase df - | MatchData r cs Nothing <- b - , [(0, ([UN], zb))] <- mapToList cs - , TAbs i (TMatch j (MatchIntegral m df)) <- zb - , i == j - = (dcase (ipat r) <$> mapToList m) ++ dfcase df - | MatchData r m df <- b - = (dcase (dpat r) . fmap snd <$> mapToList m) ++ dfcase df - | MatchRequest hs df <- b = denormalizeHandler hs df - | MatchSum _ <- b = error "MatchSum not a compilation target" - where - dfcase (Just d) - = [Term.MatchCase (P.Unbound ()) Nothing $ denormalize d] - dfcase Nothing = [] - - dcase p (t, br) = Term.MatchCase (p n t) Nothing dbr - where (n, dbr) = denormalizeBranch br - - ipat r _ i - | r == Ty.natRef = P.Nat () $ fromIntegral i - | otherwise = P.Int () $ fromIntegral i - dpat r n t = P.Constructor () r (fromEnum t) (replicate n $ P.Var ()) - -denormalizeBranch (TAbs v br) = (n+1, ABT.abs v dbr) - where (n, dbr) = denormalizeBranch br -denormalizeBranch tm = (0, denormalize tm) - -denormalizeHandler - :: Var v - => EnumMap RTag (EnumMap CTag ([Mem], ANormal v)) - -> ANormal v - -> [Term.MatchCase () (Term.Term0 v)] -denormalizeHandler cs df = dcs - where - dcs = foldMapWithKey rf cs <> dfc - dfc = [ Term.MatchCase - (P.EffectPure () (P.Var ())) - Nothing - db - ] - where (_, db) = denormalizeBranch df - rf r rcs = foldMapWithKey (cf $ backReference r) rcs - cf r t b = [ Term.MatchCase - (P.EffectBind () r (fromEnum t) - (replicate n $ P.Var ()) (P.Var ())) - Nothing - db - ] - where (n, db) = denormalizeBranch (snd b) - -test :: Test () -test = scope "anf" . tests $ - [ scope "lift" . tests $ - [ testLift "let\n\ - \ g = m x -> ##Nat.+ x m\n\ - \ m -> g m m" - , testLift "m n -> let\n\ - \ f acc i = match i with\n\ - \ 0 -> acc\n\ - \ _ -> f (##Nat.+ acc n) (##Nat.sub i 1)\n\ - \ f 0 m" - ] - , scope "denormalize" . tests $ - [ testANF "1" - , testANF "1 + 2" - , testANF "match x with\n\ - \ +1 -> foo\n\ - \ +2 -> bar\n\ - \ +3 -> baz" - , testANF "1 + match x with\n\ - \ +1 -> foo\n\ - \ +2 -> bar" - , testANF "(match x with +3 -> foo) + (match x with +2 -> foo)" - ] - ] diff --git a/parser-typechecker/tests/Unison/Test/Cache.hs b/parser-typechecker/tests/Unison/Test/Cache.hs deleted file mode 100644 index fafd6459f8..0000000000 --- a/parser-typechecker/tests/Unison/Test/Cache.hs +++ /dev/null @@ -1,80 +0,0 @@ -module Unison.Test.Cache where - -import EasyTest -import Control.Monad -import Control.Concurrent.STM -import Control.Concurrent.Async -import qualified Unison.Util.Cache as Cache - -test :: Test () -test = scope "util.cache" $ tests [ - scope "ex1" $ fits Cache.cache - , scope "ex2" $ fits (Cache.semispaceCache n) - , scope "ex3" $ doesn'tFit (Cache.semispaceCache n) - , scope "ex4" $ do - replicateM_ 10 $ concurrent (Cache.semispaceCache n) - ok - ] - where - n :: Word - n = 1000 - - -- This checks that items properly expire from the cache - doesn'tFit mkCache = do - cache <- io $ mkCache - misses <- io $ newTVarIO 0 - let f x = do - atomically $ modifyTVar misses (+1) - pure x - -- populate the cache, all misses (n*2), but first 1-n will have expired by the end - results1 <- io $ traverse (Cache.apply cache f) [1..n*2] - -- should be half hits, so an additional `n` misses - results2 <- io $ traverse (Cache.apply cache f) (reverse [1..n*2]) - misses <- io $ readTVarIO misses - expect' (results1 == [1..n*2]) - expect' (results2 == reverse [1..n*2]) - expect (misses == n * 3) - - -- This checks the simple case that everything fits in the cache - fits mkCache = do - cache <- io $ mkCache - misses <- io $ newTVarIO 0 - let f x = do - atomically $ modifyTVar misses (+1) - pure x - -- populate the cache - results1 <- io $ traverse (Cache.apply cache f) [1..n] - -- should be all hits - results2 <- io $ traverse (Cache.apply cache f) [1..n] - misses <- io $ readTVarIO misses - expect' (results1 == [1..n]) - expect' (results2 == [1..n]) - expect (misses == n) - - -- A simple smoke test of concurrent access. The cache doesn't - -- try to linearize all reads / writes so the number of misses - -- during concurrent access is unpredictable, but once the cache is - -- fully populated, concurrent reads should generate no further misses - concurrent mkCache = do - cache <- io $ mkCache - misses <- io $ newTVarIO 0 - let f x = do - atomically $ modifyTVar misses (+1) - pure x - -- we're populating the cache in parallel - results1 <- io $ async $ traverse (Cache.apply cache f) [1 .. (n `div` 2)] - results2 <- io $ async $ traverse (Cache.apply cache f) [(n `div` 2 + 1) .. n] - (results1, results2) <- io $ waitBoth results1 results2 - -- now the cache should be fully populated, so no further misses - misses1 <- io $ readTVarIO misses - - -- these should be all hits - results3 <- io $ async $ traverse (Cache.apply cache f) [1 .. (n `div` 2)] - results4 <- io $ async $ traverse (Cache.apply cache f) [(n `div` 2 + 1) .. n] - (results3, results4) <- io $ waitBoth results3 results4 - - misses2 <- io $ readTVarIO misses - expect' (results1 ++ results2 == [1..n]) - expect' (results3 ++ results4 == [1..n]) - expect' (misses1 == misses2) - diff --git a/parser-typechecker/tests/Unison/Test/Codebase.hs b/parser-typechecker/tests/Unison/Test/Codebase.hs deleted file mode 100644 index ad46c853b6..0000000000 --- a/parser-typechecker/tests/Unison/Test/Codebase.hs +++ /dev/null @@ -1,40 +0,0 @@ -{-# Language OverloadedStrings #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Test.Codebase where - -import Data.Functor.Identity -import qualified Data.Map as Map -import Data.Map ( (!) ) -import EasyTest -import qualified Unison.Codebase as Codebase -import Unison.Codebase.CodeLookup ( CodeLookup(..) ) -import qualified Unison.Hash as Hash -import qualified Unison.Reference as R -import Unison.Symbol ( Symbol ) -import qualified Unison.Term as Term -import qualified Unison.UnisonFile as UF -import qualified Unison.Var as Var - -test :: Test () -test = scope "codebase" $ tests - [ scope "makeSelfContained" $ - let h = Hash.unsafeFromBase32Hex "abcd" - ref = R.Derived h 0 1 - v1 = Var.refNamed @Symbol ref - foo = Var.named "foo" - -- original binding: `foo = \v1 -> ref` - binding = (foo, Term.lam () v1 (Term.ref () ref)) - uf = UF.UnisonFileId mempty mempty [binding] mempty - code :: CodeLookup Symbol Identity () - code = CodeLookup - { getTerm = \rid -> pure $ - if R.DerivedId rid == ref then Just (Term.int () 42) - else Nothing - , getTypeDeclaration = \_ -> pure Nothing - } - -- expected binding after makeSelfContained: `foo = \v1 -> v2`, where `v2 /= v1` - UF.UnisonFile _ _ (Map.fromList -> bindings) _ = runIdentity $ Codebase.makeSelfContained' code uf - Term.LamNamed' _ (Term.Var' v2) = bindings ! foo - in expect $ v2 /= v1 - ] diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Causal.hs b/parser-typechecker/tests/Unison/Test/Codebase/Causal.hs deleted file mode 100644 index 2aa192a949..0000000000 --- a/parser-typechecker/tests/Unison/Test/Codebase/Causal.hs +++ /dev/null @@ -1,318 +0,0 @@ -{-# LANGUAGE PartialTypeSignatures #-} - -module Unison.Test.Codebase.Causal where - -import EasyTest -import Unison.Codebase.Causal ( Causal(Cons, Merge) - , RawHash(..) - , one - , currentHash - , before - ) -import qualified Unison.Codebase.Causal as Causal -import Control.Monad.Trans.State (State, state, put) -import Data.Int (Int64) -import qualified Data.Map as Map -import qualified Data.Set as Set -import Control.Monad (replicateM_) -import Control.Monad.Extra (ifM) -import Control.Applicative (liftA2) -import Data.List (foldl1') -import Data.Functor ((<&>)) -import Unison.Hashable (Hashable) -import Data.Set (Set) -import Data.Functor.Identity -import Unison.Hash (Hash) -import Unison.CommandLine (beforeHash) - -c :: M (Causal M Int64 [Int64]) -c = merge (foldr cons (one [1]) t1) - (foldr cons (foldr cons (one [1]) t2) t3) - where - t1, t2, t3 :: [[Int64]] - t1 = fmap pure [5,4..2] - t2 = fmap pure [100..105] - t3 = fmap pure [999,998] - -c2 :: M (Causal M Int64 [Int64]) -c2 = merge (foldr cons (one [1]) t1) - (foldr cons (foldr cons (one [1]) t2) t3) - where - t1, t2, t3 :: [[Int64]] - t1 = fmap pure [5,4..2] - t2 = fmap pure [10,9..2] - t3 = fmap pure [999,998] - -{- -λ> show Unison.Test.Codebase.Causal.c -"Identity Merge 4gP [999,5] [\"3rG\",\"58U\"]" -λ> runIdentity Unison.Test.Codebase.Causal.result -step a=fromList [1,10] seen=[] rest=fromList [Merge 4gP [999,5] ["3rG","58U"]] -step a=fromList [1,10] seen=["4gP"] rest=fromList [Cons 3rG [999] 4LX,Cons 58U [5] 4vC] -step a=fromList [1,10] seen=["3rG","4gP"] rest=fromList [Cons 58U [5] 4vC,Cons 4LX [998] 26J] -step a=fromList [1,10] seen=["3rG","4gP","58U"] rest=fromList [Cons 4LX [998] 26J,Cons 4vC [4] yFt] -step a=fromList [1,10] seen=["3rG","4LX","4gP","58U"] rest=fromList [Cons 4vC [4] yFt,Cons 26J [100] 4FR] -step a=fromList [1,10] seen=["3rG","4LX","4gP","4vC","58U"] rest=fromList [Cons 26J [100] 4FR,Cons yFt [3] 3So] -step a=fromList [1,10] seen=["26J","3rG","4LX","4gP","4vC","58U"] rest=fromList [Cons yFt [3] 3So,Cons 4FR [101] 4az] -step a=fromList [1,10] seen=["yFt","26J","3rG","4LX","4gP","4vC","58U"] rest=fromList [Cons 4FR [101] 4az,Cons 3So [2] 5Lu] -step a=fromList [1,10] seen=["yFt","26J","3rG","4FR","4LX","4gP","4vC","58U"] rest=fromList [Cons 3So [2] 5Lu,Cons 4az [102] 2V3] -step a=fromList [1,10] seen=["yFt","26J","3So","3rG","4FR","4LX","4gP","4vC","58U"] rest=fromList [Cons 4az [102] 2V3,One 5Lu [1]] -step a=fromList [1,10] seen=["yFt","26J","3So","3rG","4FR","4LX","4az","4gP","4vC","58U"] rest=fromList [One 5Lu [1],Cons 2V3 [103] 5pS] -step a=fromList [10] seen=["yFt","26J","3So","3rG","4FR","4LX","4az","4gP","4vC","58U","5Lu"] rest=fromList [Cons 2V3 [103] 5pS] -step a=fromList [10] seen=["yFt","26J","2V3","3So","3rG","4FR","4LX","4az","4gP","4vC","58U","5Lu"] rest=fromList [Cons 5pS [104] 2tq] -step a=fromList [10] seen=["yFt","26J","2V3","3So","3rG","4FR","4LX","4az","4gP","4vC","58U","5Lu","5pS"] rest=fromList [Cons 2tq [105] 5Lu] -step a=fromList [10] seen=["yFt","26J","2V3","2tq","3So","3rG","4FR","4LX","4az","4gP","4vC","58U","5Lu","5pS"] rest=fromList [One 5Lu [1]] -step a=fromList [10] seen=["yFt","26J","2V3","2tq","3So","3rG","4FR","4LX","4az","4gP","4vC","58U","5Lu","5pS"] rest=fromList [] -Unsatisfied (fromList [10]) - -λ> runIdentity Unison.Test.Codebase.Causal.result (with c2) -step a=fromList [1,10] seen=[] rest=fromList [Cons 2tg [999] 3AW] -step a=fromList [1,10] seen=["2tg"] rest=fromList [Cons 3AW [998] 33b] -step a=fromList [1,10] seen=["2tg","3AW"] rest=fromList [Cons 33b [10] 2NF] -step a=fromList [1] seen=["2tg","33b","3AW"] rest=fromList [Cons 2NF [9] 57i] -step a=fromList [1] seen=["2NF","2tg","33b","3AW"] rest=fromList [Cons 57i [8] ipV] -step a=fromList [1] seen=["2NF","2tg","33b","3AW","57i"] rest=fromList [Cons ipV [7] 3BZ] -step a=fromList [1] seen=["ipV","2NF","2tg","33b","3AW","57i"] rest=fromList [Cons 3BZ [6] 58U] -step a=fromList [1] seen=["ipV","2NF","2tg","33b","3AW","3BZ","57i"] rest=fromList [Cons 58U [5] 4vC] -step a=fromList [1] seen=["ipV","2NF","2tg","33b","3AW","3BZ","57i","58U"] rest=fromList [Cons 4vC [4] yFt] -step a=fromList [1] seen=["ipV","2NF","2tg","33b","3AW","3BZ","4vC","57i","58U"] rest=fromList [Cons yFt [3] 3So] -step a=fromList [1] seen=["ipV","yFt","2NF","2tg","33b","3AW","3BZ","4vC","57i","58U"] rest=fromList [Cons 3So [2] 5Lu] -step a=fromList [1] seen=["ipV","yFt","2NF","2tg","33b","3AW","3BZ","3So","4vC","57i","58U"] rest=fromList [One 5Lu [1]] -Satisfied (fromList []) -λ> - --} - -test :: Test () -test = - scope "causal" - . tests - $ [ scope "threeWayMerge.ex1" - . expect - $ Causal.head testThreeWay - == Set.fromList [3, 4] - , scope "threeWayMerge.idempotent" - . expect - $ testIdempotent oneCausal -- == oneCausal - -- $ prop_mergeIdempotent - - , scope "threeWayMerge.identity" - . expect - $ testIdentity oneCausal emptyCausal - -- $ prop_mergeIdentity - , scope "threeWayMerge.commutative" - . expect - $ testCommutative (Set.fromList [3,4]) oneRemoved - -- $ prop_mergeCommutative - {- , scope "threeWayMerge.commonAncestor" - . expect - $ testCommonAncestor - -- $ prop_mergeCommonAncestor --} - , scope "lca.hasLca" lcaPairTest - , scope "lca.noLca" noLcaPairTest - , scope "beforeHash" $ beforeHashTests - ] - -beforeHashTests :: Test () -beforeHashTests = do - -- c1 and c2 have unrelated histories - c1 <- pure $ Causal.one (0 :: Int64) - c2 <- pure $ Causal.one (1 :: Int64) - -- c1' and c2' are extension of c1 and c2, respectively - c1' <- pure $ Causal.cons 2 c1 - c2' <- pure $ Causal.cons 3 c2 - c12 <- Causal.threeWayMerge sillyMerge c1' c2' - - -- verifying basic properties of `before` for these examples - expect' =<< before c1 c1 - expect' =<< before c1 c12 - expect' =<< before c2 c2 - expect' =<< before c2 c12 - expect' =<< before c2 c2' - expect' =<< before c1 c1' - expect' . not =<< before c1 c2 - expect' . not =<< before c2 c1 - - -- make sure the search cutoff works - - -- even though both start with `Causal.one 0`, that's - -- more than 10 steps back from `longCausal 1000`, so we - -- want this to be false - expect' . not =<< before c1 (longCausal (1000 :: Int64)) - ok - where - before h c = beforeHash 10 (Causal.currentHash h) c - sillyMerge _lca l _r = pure l - longCausal 0 = Causal.one 0 - longCausal n = Causal.cons n (longCausal (n - 1)) - -int64 :: Test Int64 -int64 = random - -extend - :: Int - -> Causal Identity Hash Int64 - -> Test (Causal Identity Hash Int64) -extend 0 ca = pure ca -extend n ca = do - i <- int64 - extend (n-1) (Causal.cons i ca) - -lcaPair :: Test (Causal Identity Hash Int64, Causal Identity Hash Int64) -lcaPair = do - base <- one <$> int64 - ll <- int' 0 20 - lr <- int' 0 20 - (,) <$> extend ll base <*> extend lr base - -lcaPairTest :: Test () -lcaPairTest = replicateM_ 50 test >> ok - where - test = runIdentity . uncurry Causal.lca <$> lcaPair >>= \case - Just _ -> pure () - Nothing -> crash "expected lca" - -noLcaPair - :: Test (Causal Identity Hash Int64, Causal Identity Hash Int64) -noLcaPair = do - basel <- one <$> int64 - baser <- one <$> int64 - ll <- int' 0 20 - lr <- int' 0 20 - (,) <$> extend ll basel <*> extend lr baser - -noLcaPairTest :: Test () -noLcaPairTest = replicateM_ 50 test >> ok - where - test = runIdentity . uncurry Causal.lca <$> noLcaPair >>= \case - Nothing -> pure () - Just _ -> crash "expected no lca" - -oneRemoved :: Causal Identity Hash (Set Int64) -oneRemoved = foldr Causal.cons - (one (Set.singleton 1)) - (Set.fromList <$> [[2, 3, 4], [1, 2, 3, 4], [1, 2]]) - -twoRemoved :: Causal Identity Hash (Set Int64) -twoRemoved = foldr Causal.cons - (one (Set.singleton 1)) - (Set.fromList <$> [[1, 3, 4], [1, 2, 3], [1, 2]]) - -testThreeWay :: Causal Identity Hash (Set Int64) -testThreeWay = runIdentity - $ threeWayMerge' oneRemoved twoRemoved - -setCombine :: Applicative m => Ord a => Set a -> Set a -> m (Set a) -setCombine a b = pure $ a <> b - -setDiff :: Applicative m => Ord a => Set a -> Set a -> m (Set a, Set a) -setDiff old new = pure (Set.difference new old, Set.difference old new) - -setPatch :: Applicative m => Ord a => Set a -> (Set a, Set a) -> m (Set a) -setPatch s (added, removed) = pure (added <> Set.difference s removed) - --- merge x x == x, should not add a new head, and also the value at the head should be the same of course -testIdempotent :: Causal Identity Hash (Set Int64) -> Bool -- Causal Identity Hash (Set Int64) -testIdempotent causal = - runIdentity (threeWayMerge' causal causal) - == causal - --- prop_mergeIdempotent :: Bool --- prop_mergeIdempotent = and (map testIdempotent (take 1000 generateRandomCausals)) - -oneCausal :: Causal Identity Hash (Set Int64) -oneCausal = Causal.one (Set.fromList [1]) - --- generateRandomCausals :: Causal Identity Hash (Set Int64) --- generateRandomCausals = undefined - -easyCombine - :: (Monad m, Semigroup d) - => (e -> e -> m e) - -> (e -> e -> m d) - -> (e -> d -> m e) - -> (Maybe e -> e -> e -> m e) -easyCombine comb _ _ Nothing l r = comb l r -easyCombine _ diff appl (Just ca) l r = do - dl <- diff ca l - dr <- diff ca r - appl ca (dl <> dr) - -threeWayMerge' - :: Causal Identity Hash (Set Int64) - -> Causal Identity Hash (Set Int64) - -> Identity (Causal Identity Hash (Set Int64)) -threeWayMerge' = Causal.threeWayMerge (easyCombine setCombine setDiff setPatch) - --- merge x mempty == x, merge mempty x == x -testIdentity :: Causal Identity Hash (Set Int64) -> Causal Identity Hash (Set Int64) -> Bool -testIdentity causal mempty = - (threeWayMerge' causal mempty) - == (threeWayMerge' mempty causal) - -emptyCausal :: Causal Identity Hash (Set Int64) -emptyCausal = one (Set.empty) - --- merge (cons hd tl) tl == cons hd tl, merge tl (cons hd tl) == cons hd tl -testCommutative :: Set Int64 -> Causal Identity Hash (Set Int64) -> Bool -testCommutative hd tl = (threeWayMerge' (Causal.cons hd tl) tl) - == (threeWayMerge' tl (Causal.cons hd tl)) - - -{- -testCommonAncestor :: -testCommonAncestor = --} - - - --- [ scope "foldHistoryUntil" . expect $ execState c mempty == Set.fromList [3,2,1]] - ---result :: M (Causal.FoldHistoryResult (Set Int64)) ---result = Causal.foldHistoryUntil f (Set.fromList [10, 1]) =<< c2 where --- f s e = let s' = Set.difference s (Set.fromList e) in (s', Set.null s') - -result, result2 :: M (Causal.FoldHistoryResult (Set Int64)) -(result, result2) = - (Causal.foldHistoryUntil f (Set.fromList [10, 1]) =<< (do c' <- c; put mempty ; pure c') - ,Causal.foldHistoryUntil f (Set.fromList [10, 1]) =<< (do c' <- c2; put mempty ; pure c')) - where f s e = let s' = Set.difference s (Set.fromList e) in (s', Set.null s') - ----- special cons and merge that mess with state monad for logging -type M = State [[Int64]] -cons :: [Int64] - -> Causal M h [Int64] - -> Causal M h [Int64] - -merge :: Causal M h [Int64] - -> Causal M h [Int64] - -> M (Causal M h [Int64]) - -(cons, merge) = (cons'' pure, merge'' pure) - where - pure :: Causal m h [Int64] -> M (Causal m h [Int64]) - pure c = state (\s -> (c, Causal.head c : s)) - -cons'' :: Hashable e1 - => (Causal m1 h e2 -> m2 (Causal m2 h e1)) - -> e1 -> Causal m1 h e2 -> Causal m2 h e1 -cons'' pure e tl = - Cons (RawHash $ Causal.hash [Causal.hash e, unRawHash . currentHash $ tl]) e (currentHash tl, pure tl) - -merge'' :: (Monad m, Semigroup e) - => (Causal m h e -> m (Causal m h e)) - -> Causal m h e -> Causal m h e -> m (Causal m h e) -merge'' pure a b = - ifM (before a b) (pure b) . ifM (before b a) (pure a) $ case (a, b) of - (Merge _ _ tls, Merge _ _ tls2) -> merge0 $ Map.union tls tls2 - (Merge _ _ tls, b) -> merge0 $ Map.insert (currentHash b) (pure b) tls - (b, Merge _ _ tls) -> merge0 $ Map.insert (currentHash b) (pure b) tls - (a, b) -> - merge0 $ Map.fromList [(currentHash a, pure a), (currentHash b, pure b)] - where - merge0 m = - let e = if Map.null m - then error "Causal.merge0 empty map" - else foldl1' (liftA2 (<>)) (fmap Causal.head <$> Map.elems m) - h = Causal.hash (Map.keys m) -- sorted order - in e <&> \e -> Merge (RawHash h) e m - diff --git a/parser-typechecker/tests/Unison/Test/Codebase/FileCodebase.hs b/parser-typechecker/tests/Unison/Test/Codebase/FileCodebase.hs deleted file mode 100644 index 147477b48c..0000000000 --- a/parser-typechecker/tests/Unison/Test/Codebase/FileCodebase.hs +++ /dev/null @@ -1,48 +0,0 @@ -module Unison.Test.Codebase.FileCodebase where - -import EasyTest -import Unison.Codebase.FileCodebase.Common (encodeFileName, decodeFileName) -import qualified Data.Set as Set -import Data.Char as Char -import Data.Foldable (toList) - -test :: Test () -test = scope "FileCodebase" . tests $ - [ scope "encode/decodeFileName" . tests $ - [ encodeDecode "abc" - , encodeDecode "👍" - , encodeDecode "\xfff" - , tests $ encodeDecode . (:[]) <$> ['!'..'~'] - , encodeDecode ("Universal." ++ ['!'..'~']) - , specialEncode "." - , specialEncode ".." - , tests $ map specialEncodeChar (toList specificallyBadChars) - , specialEncodeChar '👍' - , specialEncodeChar '\xfff' - ] - ] - -specialEncode :: String -> Test () -specialEncode s = - scope (" " <> s <> " gets special encoding") $ expect (encodeFileName s /= s) - -specialEncodeChar :: Char -> Test () -specialEncodeChar = specialEncode . pure - -encodeDecode :: String -> Test () -encodeDecode s = - let e = encodeFileName s - d = decodeFileName e - in scope s $ expect $ d == s && all isSafeChar e - --- In the past we had considered a much smaller set of safe chars: --- [0-9,a-z,A-Z,-._] from https://superuser.com/a/748264 --- Currently we are going by https://superuser.com/a/358861 -isSafeChar :: Char -> Bool -isSafeChar c = Set.notMember c specificallyBadChars - && Char.isPrint c - && Char.isAscii c - -specificallyBadChars :: Set.Set Char -specificallyBadChars = Set.fromList "\\/:*?\"<>|" - diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Path.hs b/parser-typechecker/tests/Unison/Test/Codebase/Path.hs deleted file mode 100644 index e775fb489f..0000000000 --- a/parser-typechecker/tests/Unison/Test/Codebase/Path.hs +++ /dev/null @@ -1,66 +0,0 @@ -{-# LANGUAGE OverloadedStrings, OverloadedLists #-} - -module Unison.Test.Codebase.Path where - -import EasyTest -import Unison.Codebase.Path -import Data.Sequence -import Data.Text -import Unison.NameSegment -import Data.Either -import qualified Unison.HashQualified' as HQ' -import qualified Unison.ShortHash as SH - -test :: Test () -test = scope "path" . tests $ - [ scope "parsePathImpl'" . tests $ - [ let s = "foo.bar.baz.34" in scope s . expect $ parsePathImpl' s == Right (relative ["foo","bar","baz"], "34") - , let s = "foo.bar.baz" in scope s . expect $ parsePathImpl' s == Right (relative ["foo", "bar"], "baz") - , let s = "baz" in scope s . expect $ parsePathImpl' s == Right (relative [], "baz") - , let s = "-" in scope s . expect $ parsePathImpl' s == Right (relative [], "-") - , let s = "34" in scope s . pending . expect $ parsePathImpl' s == Right (relative [], "34") - , let s = "foo.bar.baz#a8fj" in scope s . expect $ isLeft $ parsePathImpl' s - ] - , scope "parseSplit'" . tests $ - [ scope "wordyNameSegment" . tests $ - [ let s = "foo.bar.baz" in scope s . expect $ - parseSplit' wordyNameSegment s == Right (relative ["foo", "bar"], NameSegment "baz") - - , let s = "foo.bar.baz#abc" in scope s . expect $ isLeft $ parseSplit' wordyNameSegment s - - , let s = "foo.bar.+" in scope s . expect $ - isLeft $ parseSplit' wordyNameSegment s - ] - - , scope "definitionNameSegment" . tests $ - [ let s = "foo.bar.+" in scope s . expect $ - parseSplit' definitionNameSegment s == Right (relative ["foo", "bar"], NameSegment "+") - ] - ] - , scope "parseShortHashOrHQSplit'" . tests $ - [ let s = "foo.bar#34" in scope s . expect $ - parseShortHashOrHQSplit' s == - (Right . Right) - (relative ["foo"], HQ'.HashQualified (NameSegment "bar") (SH.unsafeFromText "#34")) - - , let s = "foo.bar.+" in scope s . expect $ - parseShortHashOrHQSplit' s == - (Right . Right) - (relative ["foo", "bar"], HQ'.NameOnly (NameSegment "+")) - - , let s = "#123" in scope s . expect $ - parseShortHashOrHQSplit' s == - (Right . Left) (SH.unsafeFromText "#123") - ] - , scope "parseHQ'Split'" . tests $ - [ let s = "foo.bar#34" in scope s . expect $ - parseHQSplit' s == Right (relative ["foo"], HQ'.HashQualified (NameSegment "bar") (SH.unsafeFromText "#34")) - , let s = "foo.bar.+" in scope s . expect $ - parseHQSplit' s == Right (relative ["foo", "bar"], HQ'.NameOnly (NameSegment "+")) - , let s = "#123" in scope s . expect $ isLeft $ parseHQSplit' s - ] - ] - - -relative :: Seq Text -> Path' -relative = Path' . Right . Relative . Path . fmap NameSegment diff --git a/parser-typechecker/tests/Unison/Test/ColorText.hs b/parser-typechecker/tests/Unison/Test/ColorText.hs deleted file mode 100644 index 55375a9728..0000000000 --- a/parser-typechecker/tests/Unison/Test/ColorText.hs +++ /dev/null @@ -1,84 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE QuasiQuotes #-} -module Unison.Test.ColorText where - --- import EasyTest -import qualified Data.Map as Map -import EasyTest -import Text.RawString.QQ -import Unison.Lexer (Pos (..)) -import Unison.Util.AnnotatedText (AnnotatedExcerpt (..), - condensedExcerptToText, markup) -import Unison.Util.ColorText (Color (..), toANSI) -import qualified Unison.Util.ColorText as ColorText -import Unison.Util.Range (Range (..)) - -test :: Test () -test = scope "colortext" . tests $ [ - -- commented out because they don't render exactly the same escape sequences, but they're equivalent4 as of this writing - -- scope "inclusive-exclusive range" . expect . trace ("ex4e: " ++ show (rawRender ex4e) ++ "\n" ++ "ex4t: " ++ show (rawRender ex4t) ++ "\n")$ ex4e == ex4t - ] - -ex4e :: String -ex4e = toANSI . condensedExcerptToText 1 $ markup "abc" m - where m = Map.singleton (Range (Pos 1 2) (Pos 1 3)) Red - -ex4t :: String -ex4t = toANSI $ " 1 | " <> "a" <> ColorText.style Red "b" <> "c" <> "\n" - - -ex2 :: AnnotatedExcerpt Color -ex2 = markup ex (Map.fromList - [ (Range (Pos 3 1) (Pos 3 5), Red) -- SCENE - , (Range (Pos 5 9) (Pos 5 14), Blue) -- Master - , (Range (Pos 5 22) (Pos 5 30), Blue) -- Boatswain - , (Range (Pos 25 1) (Pos 25 6), Red) -- ALONSO - , (Range (Pos 12 30) (Pos 13 27), Green) -- fall ... aground. - ]) - -renderEx2 :: String -renderEx2 = toANSI . condensedExcerptToText 3 $ ex2 - -ex3 :: AnnotatedExcerpt Color -ex3 = markup "Hello, world!" $ Map.fromList - [ (Range (Pos 1 8) (Pos 1 12), Blue) - , (Range (Pos 1 1) (Pos 1 5), Green) ] - -ex4 :: AnnotatedExcerpt Color -ex4 = markup "Hello,\nworld!" $ Map.fromList - [ (Range (Pos 2 1) (Pos 2 5), Blue) - , (Range (Pos 1 1) (Pos 1 5), Green) ] - -ex :: Ord a => AnnotatedExcerpt a -ex = [r|The Tempest | Act 1, Scene 1 - -SCENE I. On a ship at sea: a tempestuous noise -of thunder and lightning heard. -Enter a Master and a Boatswain - -Master -Boatswain! -Boatswain -Here, master: what cheer? -Master -Good, speak to the mariners: fall to't, yarely, -or we run ourselves aground: bestir, bestir. -Exit - -Enter Mariners - -Boatswain -Heigh, my hearts! cheerly, cheerly, my hearts! -yare, yare! Take in the topsail. Tend to the -master's whistle. Blow, till thou burst thy wind, -if room enough! -Enter ALONSO, SEBASTIAN, ANTONIO, FERDINAND, GONZALO, and others - -ALONSO -Good boatswain, have care. Where's the master? -Play the men. -Boatswain -I pray now, keep below. -|] - --- test = scope "colortext.snipWithContext" . expect $ diff --git a/parser-typechecker/tests/Unison/Test/Common.hs b/parser-typechecker/tests/Unison/Test/Common.hs deleted file mode 100644 index 2c078fdb3b..0000000000 --- a/parser-typechecker/tests/Unison/Test/Common.hs +++ /dev/null @@ -1,75 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Test.Common - ( hqLength - , t - , tm - , parseAndSynthesizeAsFile - , parsingEnv - ) where - -import Data.Sequence (Seq) -import qualified Data.Text as Text -import qualified Unison.Builtin as B -import qualified Unison.FileParsers as FP -import Unison.Parser (Ann(..)) -import Unison.PrintError ( prettyParseError ) -import Unison.Result (Result, Note) -import Unison.Symbol (Symbol) -import Unison.Var (Var) -import Unison.UnisonFile (TypecheckedUnisonFile) -import qualified Unison.ABT as ABT -import qualified Unison.Lexer as L -import qualified Unison.Parser as Parser -import qualified Unison.Term as Term -import qualified Unison.TermParser as TermParser -import qualified Unison.Type as Type -import qualified Unison.TypeParser as TypeParser -import qualified Unison.Util.Pretty as Pr -import qualified Text.Megaparsec.Error as MPE -import qualified Unison.Names3 - - -type Term v = Term.Term v Ann -type Type v = Type.Type v Ann - -hqLength :: Int -hqLength = 10 - -t :: String -> Type Symbol -t s = ABT.amap (const Intrinsic) - -- . either (error . show ) id - -- . Type.bindSomeNames B.names0 - . either (error . showParseError s) tweak - $ Parser.run (Parser.root TypeParser.valueType) s parsingEnv - where tweak = Type.generalizeLowercase mempty - -tm :: String -> Term Symbol -tm s = either (error . show) id - -- . Term.bindSomeNames mempty B.names0 - -- . either (error . showParseError s) id - $ Parser.run (Parser.root TermParser.term) s parsingEnv - -showParseError :: Var v - => String - -> MPE.ParseError (L.Token L.Lexeme) (Parser.Error v) - -> String -showParseError s = Pr.toANSI 60 . prettyParseError s - -parseAndSynthesizeAsFile - :: Var v - => [Type v] - -> FilePath - -> String - -> Result - (Seq (Note v Ann)) - (Either Unison.Names3.Names0 (TypecheckedUnisonFile v Ann)) -parseAndSynthesizeAsFile ambient filename s = FP.parseAndSynthesizeFile - ambient - (\_deps -> pure B.typeLookup) - parsingEnv - filename - (Text.pack s) - -parsingEnv :: Parser.ParsingEnv -parsingEnv = Parser.ParsingEnv mempty B.names diff --git a/parser-typechecker/tests/Unison/Test/DataDeclaration.hs b/parser-typechecker/tests/Unison/Test/DataDeclaration.hs deleted file mode 100644 index 40824ecb50..0000000000 --- a/parser-typechecker/tests/Unison/Test/DataDeclaration.hs +++ /dev/null @@ -1,121 +0,0 @@ -{-# LANGUAGE OverloadedStrings, QuasiQuotes #-} - -module Unison.Test.DataDeclaration where - -import qualified Data.Map as Map -import Data.Map ( Map, (!) ) -import EasyTest -import Text.RawString.QQ -import qualified Unison.DataDeclaration as DD -import Unison.DataDeclaration ( DataDeclaration(..), Decl, hashDecls ) -import qualified Unison.Hash as Hash -import Unison.Parser ( Ann ) -import Unison.Parsers ( unsafeParseFile ) -import qualified Unison.Reference as R -import Unison.Symbol ( Symbol ) -import qualified Unison.Test.Common as Common -import qualified Unison.Type as Type -import Unison.UnisonFile ( UnisonFile(..) ) -import qualified Unison.Var as Var - -test :: Test () -test = scope "datadeclaration" $ - let Right hashes = hashDecls . (snd <$>) . dataDeclarationsId $ file - hashMap = Map.fromList $ fmap (\(a,b,_) -> (a,b)) hashes - hashOf k = Map.lookup (Var.named k) hashMap - in tests [ - scope "Bool == Bool'" . expect $ hashOf "Bool" == hashOf "Bool'", - scope "Bool != Option'" . expect $ hashOf "Bool" /= hashOf "Option'", - scope "Option == Option'" . expect $ hashOf "Option" == hashOf "Option'", - scope "List == List'" . expect $ hashOf "List" == hashOf "List'", - scope "List != SnocList" . expect $ hashOf "List" /= hashOf "SnocList", - scope "Ping != Pong" . expect $ hashOf "Ping" /= hashOf "Pong", - scope "Ping == Ling'" . expect $ hashOf "Ping" == hashOf "Ling'", - scope "Pong == Long'" . expect $ hashOf "Pong" == hashOf "Long'", - scope "unhashComponent" unhashComponentTest - ] - -file :: UnisonFile Symbol Ann -file = flip unsafeParseFile Common.parsingEnv $ [r| - -type Bool = True | False -type Bool' = False | True - -type Option a = Some a | None -type Option' b = Nothing | Just b - -type List a = Nil | Cons a (List a) -type List' b = Prepend b (List' b) | Empty -type SnocList a = Snil | Snoc (List a) a - -type ATree a = Tree a (List (ATree a)) | Leaf (Option a) - -type Ping a = Ping a (Pong a) -type Pong a = Pnong | Pong (Ping a) - -type Long' a = Long' (Ling' a) | Lnong -type Ling' a = Ling' a (Long' a) -|] - - --- faketest = scope "termparser" . tests . map parses $ --- ["x" --- , "match x with\n" ++ --- " {Pair x y} -> 1\n" ++ --- " {State.set 42 -> k} -> k 42\n" --- ] --- --- builtins = Map.fromList --- [("Pair", (R.Builtin "Pair", 0)), --- ("State.set", (R.Builtin "State", 0))] --- --- parses s = scope s $ do --- let p = unsafeParseTerm s builtins :: Term Symbol --- noteScoped $ "parsing: " ++ s ++ "\n " ++ show p --- ok - -unhashComponentTest :: Test () -unhashComponentTest = tests - [ scope "invented-vars-are-fresh" inventedVarsFreshnessTest - ] - where - inventedVarsFreshnessTest = - let - var = Type.var () - app = Type.app () - forall = Type.forall () - (-->) = Type.arrow () - h = Hash.unsafeFromBase32Hex "abcd" - ref = R.Derived h 0 1 - a = Var.refNamed ref - b = Var.named "b" - nil = Var.named "Nil" - cons = Var.refNamed ref - listRef = ref - listType = Type.ref () listRef - listDecl = DataDeclaration { - modifier = DD.Structural, - annotation = (), - bound = [], - constructors' = - [ ((), nil, forall a (listType `app` var a)) - , ((), cons, forall b (var b --> listType `app` var b --> listType `app` var b)) - ] - } - component :: Map R.Reference (Decl Symbol ()) - component = Map.singleton listRef (Right listDecl) - component' :: Map R.Reference (Symbol, Decl Symbol ()) - component' = DD.unhashComponent component - (listVar, Right listDecl') = component' ! listRef - listType' = var listVar - constructors = Map.fromList $ DD.constructors listDecl' - nilType' = constructors ! nil - z = Var.named "z" - in tests - [ -- check that `nil` constructor's type did not collapse to `forall a. a a`, - -- which would happen if the var invented for `listRef` was simply `Var.refNamed listRef` - expectEqual (forall z (listType' `app` var z)) nilType' - , -- check that the variable assigned to `listRef` is different from `cons`, - -- which would happen if the var invented for `listRef` was simply `Var.refNamed listRef` - expectNotEqual cons listVar - ] diff --git a/parser-typechecker/tests/Unison/Test/FileParser.hs b/parser-typechecker/tests/Unison/Test/FileParser.hs deleted file mode 100644 index f45a6298a6..0000000000 --- a/parser-typechecker/tests/Unison/Test/FileParser.hs +++ /dev/null @@ -1,136 +0,0 @@ -{-# Language BangPatterns, OverloadedStrings #-} - -module Unison.Test.FileParser where - - import EasyTest - import Data.List (uncons) - import Data.Set (elems) - import qualified Text.Megaparsec.Error as MPE - import Unison.FileParser (file) - import qualified Unison.Parser as P - import Unison.Parsers (unsafeGetRightFrom, unsafeParseFileBuiltinsOnly) - import Unison.Symbol (Symbol) - import Unison.UnisonFile (UnisonFile) - import Unison.Var (Var) - import qualified Unison.Test.Common as Common - - test1 :: Test () - test1 = scope "test1" . tests . map parses $ - [ - -- , "type () = ()\n()" - "type Pair a b = Pair a b\n" - , "type Optional a = Just a | Nothing\n" - , unlines - ["type Optional2 a" - ," = Just a" - ," | Nothing\n"] - ------ -- ,unlines - ------ -- ["type Optional a b c where" - ------ -- ," Just : a -> Optional a" - ------ -- ," Nothing : Optional Int"] - ------ -- , unlines - ------ -- ["type Optional" - ------ -- ," a" - ------ -- ," b" - ------ -- ," c where" - ------ -- ," Just : a -> Optional a" - ------ -- ," Nothing : Optional Int"] - , unlines -- NB: this currently fails because we don't have type AST or parser for effect types yet - ["ability State s where" - ," get : {State s} s" - ," set : s -> {State s} ()" - ] - , unlines - ["ping x = pong (x + 1)" - ,"pong x = ping (x - 1)" - ] - ] - - test2 :: Test () - test2 = scope "test2" $ - (io $ unsafeParseFileBuiltinsOnly "unison-src/test1.u") *> ok - - test :: Test () - test = scope "fileparser" . tests $ - [test1 - , emptyWatchTest - , signatureNeedsAccompanyingBodyTest - , emptyBlockTest - , expectedBlockOpenTest - , unknownDataConstructorTest - , unknownAbilityConstructorTest - ] - - expectFileParseFailure :: String -> (P.Error Symbol -> Test ()) -> Test () - expectFileParseFailure s expectation = scope s $ do - let result = P.run (P.rootFile file) s Common.parsingEnv - case result of - Right _ -> crash "Parser succeeded" - Left (MPE.FancyError _ sets) -> - case (fmap (fst) . uncons . elems) sets of - Just (MPE.ErrorCustom e) -> expectation e - Just _ -> crash "Error encountered was not custom" - Nothing -> crash "No error found" - Left e -> crash ("Parser failed with an error which was a trivial parser error: " ++ show e) - - emptyWatchTest :: Test () - emptyWatchTest = scope "emptyWatchTest" $ - expectFileParseFailure ">" expectation - where - expectation :: Var e => P.Error e -> Test () - expectation e = case e of - P.EmptyWatch -> ok - _ -> crash "Error wasn't EmptyWatch" - - signatureNeedsAccompanyingBodyTest :: Test () - signatureNeedsAccompanyingBodyTest = scope "signatureNeedsAccompanyingBodyTest" $ - expectFileParseFailure (unlines ["f : Nat -> Nat", "", "g a = a + 1"]) expectation - where - expectation :: Var e => P.Error e -> Test () - expectation e = case e of - P.SignatureNeedsAccompanyingBody _ -> ok - _ -> crash "Error wasn't SignatureNeedsAccompanyingBody" - - emptyBlockTest :: Test () - emptyBlockTest = scope "emptyBlockTest" $ - expectFileParseFailure (unlines ["f a =", "", "> 1 + 1"]) expectation - where - expectation :: Var e => P.Error e -> Test () - expectation e = case e of - P.EmptyBlock _ -> ok - _ -> crash "Error wasn't EmptyBlock" - - expectedBlockOpenTest :: Test () - expectedBlockOpenTest = scope "expectedBlockOpenTest" $ - expectFileParseFailure "f a b = match a b" expectation - where - expectation :: Var e => P.Error e -> Test () - expectation e = case e of - P.ExpectedBlockOpen _ _ -> ok - _ -> crash "Error wasn't ExpectedBlockOpen" - - unknownDataConstructorTest :: Test () - unknownDataConstructorTest = scope "unknownDataConstructorTest" $ - expectFileParseFailure "m a = match a with A -> 1" expectation - where - expectation :: Var e => P.Error e -> Test () - expectation e = case e of - P.UnknownDataConstructor _ _ -> ok - _ -> crash "Error wasn't UnknownDataConstructor" - - unknownAbilityConstructorTest :: Test () - unknownAbilityConstructorTest = scope "unknownAbilityConstructorTest" $ - expectFileParseFailure "f e = match e with {E t -> u} -> 1" expectation - where - expectation :: Var e => P.Error e -> Test () - expectation e = case e of - P.UnknownAbilityConstructor _ _ -> ok - _ -> crash "Error wasn't UnknownAbilityConstructor" - - parses :: String -> Test () - parses s = scope s $ do - let - p :: UnisonFile Symbol P.Ann - !p = unsafeGetRightFrom s $ - P.run (P.rootFile file) s Common.parsingEnv - pure p >> ok diff --git a/parser-typechecker/tests/Unison/Test/Git.hs b/parser-typechecker/tests/Unison/Test/Git.hs deleted file mode 100644 index f938ce75b9..0000000000 --- a/parser-typechecker/tests/Unison/Test/Git.hs +++ /dev/null @@ -1,523 +0,0 @@ -{-# Language OverloadedStrings #-} -{-# Language QuasiQuotes #-} - -module Unison.Test.Git where - -import EasyTest -import Data.List (intercalate) -import Data.List.Split (splitOn) -import qualified Data.Sequence as Seq -import Data.String.Here (iTrim) -import Unison.Prelude -import qualified Data.Text as Text -import qualified System.IO.Temp as Temp -import Shellmet () -import System.FilePath (()) -import System.Directory (doesFileExist, removeDirectoryRecursive, removeFile) - -import Unison.Codebase (BuiltinAnnotation, Codebase, CodebasePath) -import qualified Unison.Codebase as Codebase -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.FileCodebase as FC -import qualified Unison.Codebase.Serialization.V1 as V1 -import qualified Unison.Codebase.SyncMode as SyncMode -import qualified Unison.Codebase.TranscriptParser as TR -import Unison.Codebase.Path (Path(..)) -import Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex as SlimCopyRegenerateIndex -import Unison.Codebase.FileCodebase.Common (SyncToDir, formatAnn) -import Unison.Parser (Ann) -import Unison.Symbol (Symbol) -import qualified Unison.Util.Cache as Cache -import Unison.Var (Var) - -test :: Test () -test = scope "git" . tests $ - [ testPull - , testPush - , syncComplete - , syncTestResults - ] - -traceTranscriptOutput :: Bool -traceTranscriptOutput = False - --- | make sure that a definition present in the target dir doesn't prevent --- syncing of its dependencies -syncComplete :: Test () -syncComplete = scope "syncComplete" $ do - tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "syncComplete" - - targetDir <- io $ Temp.createTempDirectory tmp "target" - let - delete = io . traverse_ removeFile . fmap (targetDir ) - observe title expectation files = scope title . for_ files $ \path -> - scope (makeTitle path) $ io (doesFileExist $ targetDir path) >>= expectation - - cache <- io Cache.nullCache - codebase <- io $ snd <$> initCodebase cache tmp "codebase" - - runTranscript_ tmp codebase cache [iTrim| -```ucm:hide -.builtin> alias.type ##Nat Nat -.builtin> alias.term ##Nat.+ Nat.+ -``` -```unison -pushComplete.a.x = 3 -pushComplete.b.c.y = x + 1 -``` -```ucm -.> add -.> history pushComplete.b -``` -|] - - -- sync pushComplete.b to targetDir - -- observe that pushComplete.b.c and x exist - b <- io (Codebase.getRootBranch codebase) - >>= either (crash.show) - (pure . Branch.getAt' (Path $ Seq.fromList ["pushComplete", "b"] )) - io $ Codebase.syncToDirectory codebase targetDir SyncMode.ShortCircuit b - observe "initial" expect files - - -- delete pushComplete.b.c (#5lk9autjd5) - -- delete x (#msp7bv40rv) - -- observe that pushComplete.b.c and x are now gone - delete files - observe "deleted" (expect . not) files - - -- sync again with ShortCircuit - -- observe that pushComplete.b.c and x are still missing. - -- `c` is short-circuited at `b`, and `x` is short-circuited - -- at both `pushComplete` and `y`. - io $ Codebase.syncToDirectory codebase targetDir SyncMode.ShortCircuit b - observe "short-circuited" (expect . not) files - - -- sync again with Complete - -- observe that pushComplete.b.c and x are back - io $ Codebase.syncToDirectory codebase targetDir SyncMode.Complete b - observe "complete" expect files - - -- if we haven't crashed, clean up! - io $ removeDirectoryRecursive tmp - - where - files = - [ ".unison/v1/paths/5lk9autjd5911i8m52vsvf3si8ckino03gqrks1fokd9lf9kvc4id9gmuudjk4q06j3rkhi83o9g47mde5amchc1leqlskjs391m7fg.ub" - , ".unison/v1/terms/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/type.ub" - , ".unison/v1/terms/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/compiled.ub" - ] - -syncTestResults :: Test () -syncTestResults = scope "syncTestResults" $ do - -- put all our junk into here - tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "syncTestResults" - - targetDir <- io $ Temp.createTempDirectory tmp "target" - cache <- io Cache.nullCache - codebase <- io $ snd <$> initCodebase cache tmp "codebase" - - runTranscript_ tmp codebase cache [iTrim| -```ucm -.> builtins.merge -``` -```unison -test> tests.x = [Ok "Great!"] -``` -```ucm -.> add -``` -|] - -{- - .> history tests - ⊙ #0bnfrk7cu4 - .> debug.file - tests.x#2c2hpa2jm1 - .> --} - - b <- io (Codebase.getRootBranch codebase) >>= \case - Left e -> crash $ show e - Right b -> pure b - - io $ Codebase.syncToDirectory codebase targetDir SyncMode.ShortCircuit - (Branch.getAt' (Path $ pure "tests") b) - - scope "target-should-have" $ - for targetShouldHave $ \path -> - scope (makeTitle path) $ io (doesFileExist $ targetDir path) >>= expect - - -- if we haven't crashed, clean up! - io $ removeDirectoryRecursive tmp - where - targetShouldHave = - [ ".unison/v1/paths/0bnfrk7cu44q0vvaj7a0osl90huv6nj01nkukplcsbgn3i09h6ggbthhrorm01gpqc088673nom2i491fh9rtbqcc6oud6iqq6oam88.ub" - , ".unison/v1/terms/#2c2hpa2jm1101sq10k4jqhpmv5cvvgtqm8sf9710kl8mlrum5b6i2d0rdtrrpg3k1ned5ljna1rvomjte7rcbpd9ouaqcsit1n1np3o/type.ub" - , ".unison/v1/terms/#2c2hpa2jm1101sq10k4jqhpmv5cvvgtqm8sf9710kl8mlrum5b6i2d0rdtrrpg3k1ned5ljna1rvomjte7rcbpd9ouaqcsit1n1np3o/compiled.ub" - , ".unison/v1/watches/test/#2c2hpa2jm1101sq10k4jqhpmv5cvvgtqm8sf9710kl8mlrum5b6i2d0rdtrrpg3k1ned5ljna1rvomjte7rcbpd9ouaqcsit1n1np3o.ub" - ] - --- goal of this test is to make sure that pull doesn't grab a ton of unneeded --- dependencies -testPull :: Test () -testPull = scope "pull" $ do - branchCache <- io $ Branch.boundedCache 4096 - -- let's push a broader set of stuff, pull a narrower one (to a fresh codebase) - -- and verify that we have the definitions we expected and don't have some of - -- the ones we didn't expect. - - -- put all our junk into here - tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-pull" - - -- initialize author and user codebases - authorCodebase <- io $ snd <$> initCodebase branchCache tmp "author" - (userDir, userCodebase) <- io $ initCodebase branchCache tmp "user" - - -- initialize git repo - let repo = tmp "repo.git" - io $ "git" ["init", "--bare", Text.pack repo] - - -- run author/push transcript - runTranscript_ tmp authorCodebase branchCache [iTrim| -```ucm:hide -.builtin> alias.type ##Nat Nat -.builtin> alias.term ##Nat.+ Nat.+ -``` -```unison -unique type outside.A = A Nat -unique type outside.B = B Nat Nat -outside.c = 3 -outside.d = 4 - -unique type inside.X = X outside.A -inside.y = c + c -``` -```ucm -.myLib> debug.file -.myLib> add -.myLib> push ${repo} -``` -|] - - -- check out the resulting repo so we can inspect it - io $ "git" ["clone", Text.pack repo, Text.pack $ tmp "repo" ] - - scope "git-should-have" $ - for gitShouldHave $ \path -> - scope (makeTitle path) $ io (doesFileExist $ tmp "repo" path) >>= expect - - -- run user/pull transcript - runTranscript_ tmp userCodebase branchCache [iTrim| -```ucm:hide -.builtin> alias.type ##Nat Nat -.builtin> alias.term ##Nat.+ Nat.+ -``` -```ucm -.yourLib> pull ${repo}:.inside -``` - |] - - -- inspect user codebase - scope "user-should-have" $ - for userShouldHave $ \path -> - scope (makeTitle path) $ io (doesFileExist $ userDir path) >>= expect - scope "user-should-not-have" $ -- this definitely won't pass with current implementation - for userShouldNotHave $ \path -> - scope (makeTitle path) $ io (doesFileExist $ userDir path) >>= expect . not - - -- if we haven't crashed, clean up! - io $ removeDirectoryRecursive tmp - - where - gitShouldHave = userShouldHave ++ userShouldNotHave ++ - [ ".unison/v1/paths/p8ahoj90hkdjpvlcu60f6ks7q2is1uqbn1e74k5qn4jt1qmrhk0a62e9b2gamm6qmjdii478la2fha5pnnuvhit2b1mp439od7mrqmg.ub" - ] - userShouldHave = - [ ".unison/v1/type-mentions-index/_builtin/Nat/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg" - , ".unison/v1/type-mentions-index/_builtin/Nat/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo" - , ".unison/v1/type-mentions-index/_builtin/Nat/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0#d0" - , ".unison/v1/type-mentions-index/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0#d0" - , ".unison/v1/type-mentions-index/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0#d0" - , ".unison/v1/type-mentions-index/#2po5mnhi28fbs9fecf4ceq4q9htbfcgkl3ljnkhmhq30ec7m5h77fpl1ec96it21690ju6gnhkj8sqr2entn0cu1gfvl8rfddohk6ug/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0#d0" - , ".unison/v1/type-mentions-index/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0#d0" - , ".unison/v1/type-mentions-index/#k1lik85h1sgcpqura4riuipjq3mtkkuu5slida6q2lkg028fd7jn12kufrk2sqrtbftq3snteeh8l9o984mhnurmo3arr5j4d7hg5oo/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0#d0" - , ".unison/v1/types/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0/compiled.ub" - , ".unison/v1/types/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0/compiled.ub" - , ".unison/v1/dependents/_builtin/Nat.+/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg" - , ".unison/v1/dependents/_builtin/Nat/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg" - , ".unison/v1/dependents/_builtin/Nat/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0" - , ".unison/v1/dependents/_builtin/Nat/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo" - , ".unison/v1/dependents/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0" - , ".unison/v1/dependents/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg" - , ".unison/v1/terms/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg/type.ub" - , ".unison/v1/terms/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg/compiled.ub" - , ".unison/v1/terms/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/type.ub" - , ".unison/v1/terms/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/compiled.ub" - , ".unison/v1/type-index/_builtin/Nat/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg" - , ".unison/v1/type-index/_builtin/Nat/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo" - , ".unison/v1/type-index/#2po5mnhi28fbs9fecf4ceq4q9htbfcgkl3ljnkhmhq30ec7m5h77fpl1ec96it21690ju6gnhkj8sqr2entn0cu1gfvl8rfddohk6ug/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0#d0" - , ".unison/v1/type-index/#k1lik85h1sgcpqura4riuipjq3mtkkuu5slida6q2lkg028fd7jn12kufrk2sqrtbftq3snteeh8l9o984mhnurmo3arr5j4d7hg5oo/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0#d0" - , ".unison/v1/paths/esvotl1kr2aqo4tkq7p6lp2chkepmg7n3im1t6hqgd93slk97kops8idp7fj7i57pakvg6lhk0efsco6s2vvtql0jffomm8tvngogd0.ub" - , ".unison/v1/paths/ucnhqspklepn3ihu1o3ph2or9hsrhcpoav93v4gi1v97ttoc2vuup173mcophp8r90r0j3k5mg2knlqr85gdq1dseh8mt5t94c4am4o.ub" - ] - userShouldNotHave = - [ ".unison/v1/type-mentions-index/_builtin/Nat/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58#d0" - , ".unison/v1/type-mentions-index/_builtin/Nat/#52addbrohuv4kimiv8n6v00vsv46g3pig4imoor34lojgla9bo2tdcumh07pasuo4lmfnab53s1ulj9toai7963spt2jkk5h1qfdnlg" - , ".unison/v1/type-mentions-index/#ap7kd0rc80kp7vjosb0im9j365kgbqhqhj3fv4ufs7bv5b3ed0d4jleqqulu74lj60fuht1oqr117u17jnp1ql8te67vjit95p7k80o/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58#d0" - , ".unison/v1/type-mentions-index/#7krpfrn5gm7m3beiho9jmar3dojnj7mrksnjbmh8i0p9hbmekqv21kqrtsr5lq4rr4n0sako6e7lmt8k2a39senua9efjfo7214s3q8/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58#d0" - , ".unison/v1/type-mentions-index/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58#d0" - , ".unison/v1/types/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58/compiled.ub" - , ".unison/v1/dependents/_builtin/Nat/#52addbrohuv4kimiv8n6v00vsv46g3pig4imoor34lojgla9bo2tdcumh07pasuo4lmfnab53s1ulj9toai7963spt2jkk5h1qfdnlg" - , ".unison/v1/dependents/_builtin/Nat/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58" - , ".unison/v1/terms/#52addbrohuv4kimiv8n6v00vsv46g3pig4imoor34lojgla9bo2tdcumh07pasuo4lmfnab53s1ulj9toai7963spt2jkk5h1qfdnlg/type.ub" - , ".unison/v1/terms/#52addbrohuv4kimiv8n6v00vsv46g3pig4imoor34lojgla9bo2tdcumh07pasuo4lmfnab53s1ulj9toai7963spt2jkk5h1qfdnlg/compiled.ub" - , ".unison/v1/type-index/_builtin/Nat/#52addbrohuv4kimiv8n6v00vsv46g3pig4imoor34lojgla9bo2tdcumh07pasuo4lmfnab53s1ulj9toai7963spt2jkk5h1qfdnlg" - , ".unison/v1/type-index/#ap7kd0rc80kp7vjosb0im9j365kgbqhqhj3fv4ufs7bv5b3ed0d4jleqqulu74lj60fuht1oqr117u17jnp1ql8te67vjit95p7k80o/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58#d0" - , ".unison/v1/paths/000fqlrbs84nui3o3sp04s32vsbq39iv9foqvs4c38ajki3re86v72s0j5deqtcdqqml9r8e50lcmld2j8ncj7a1fqnqb4pvcaphcu0.ub" - , ".unison/v1/paths/d8ercjm1ol1htu82nmr37ejru1lt7lrl03d5j0u0dp0g2a98nl6n8abdjpf2jkvjuoq4u2qrhn99ps6fiqqn60b0tni7nkp7o593sr0.ub" - , ".unison/v1/paths/bih5ebeug86npp1n0mp51vi7a902ma6m1r3s1ehhfhpc0m71le2fdge8nftte5fuambfo2r753bjnguq5e3p6mip7incmghkho643pg.ub" - ] --- path "[inside]." esvotl1kr2aqo4tkq7p6lp2chkepmg7n3im1t6hqgd93slk97kops8idp7fj7i57pakvg6lhk0efsco6s2vvtql0jffomm8tvngogd0 --- path "[inside].X" ucnhqspklepn3ihu1o3ph2or9hsrhcpoav93v4gi1v97ttoc2vuup173mcophp8r90r0j3k5mg2knlqr85gdq1dseh8mt5t94c4am4o.ub --- type outside.A #19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0 --- type outside.B #aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58 --- outside.c #msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo --- outside.d #52addbrohuv4kimiv8n6v00vsv46g3pig4imoor34lojgla9bo2tdcumh07pasuo4lmfnab53s1ulj9toai7963spt2jkk5h1qfdnlg --- type inside.X #p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0 --- inside.y #omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg --- paths: esvot|ucnhq --- want: A, c, X, y: 19lkp|msp7b|p8f8g|omqnf --- no: B, d: aocoe|52add| - --- initialize a fresh codebase -initCodebaseDir :: Branch.Cache IO -> FilePath -> String -> IO CodebasePath -initCodebaseDir branchCache tmpDir name = fst <$> initCodebase branchCache tmpDir name - -initCodebase :: Branch.Cache IO -> FilePath -> String -> IO (CodebasePath, Codebase IO Symbol Ann) -initCodebase branchCache tmpDir name = do - let codebaseDir = tmpDir name - c <- FC.initCodebase branchCache codebaseDir - pure (codebaseDir, c) - --- run a transcript on an existing codebase -runTranscript_ :: MonadIO m => FilePath -> Codebase IO Symbol Ann -> Branch.Cache IO -> String -> m () -runTranscript_ tmpDir c branchCache transcript = do - let configFile = tmpDir ".unisonConfig" - -- transcript runner wants a "current directory" for I guess writing scratch files? - let cwd = tmpDir "cwd" - let err err = error $ "Parse error: \n" <> show err - - -- parse and run the transcript - flip (either err) (TR.parse "transcript" (Text.pack transcript)) $ \stanzas -> - void . liftIO $ TR.run Nothing cwd configFile stanzas c branchCache >>= - when traceTranscriptOutput . traceM . Text.unpack - --- goal of this test is to make sure that push works correctly: --- the destination should contain the right definitions from the namespace, --- unnamed transitive dependencies (terms and types), --- dependents, type, and type mentions indices. -testPush :: Test () -testPush = scope "push" $ do - branchCache <- io $ Branch.boundedCache 4096 - tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-push" - - -- initialize a fresh codebase named "c" - (codebasePath, c) <- io $ initCodebase branchCache tmp "c" - - -- Run the "setup transcript" to do the adds and updates; everything short of - -- pushing. - runTranscript_ tmp c branchCache setupTranscript - - -- now we'll try pushing multiple ways. - for_ pushImplementations $ \(implName, impl) -> scope implName $ do - -- initialize git repo - let repoGit = tmp (implName ++ ".git") - io $ "git" ["init", "--bare", Text.pack repoGit] - - -- push one way! - codebase <- io $ FC.codebase1' impl branchCache V1.formatSymbol formatAnn codebasePath - runTranscript_ tmp codebase branchCache (pushTranscript repoGit) - - -- check out the resulting repo so we can inspect it - io $ "git" ["clone", Text.pack repoGit, Text.pack $ tmp implName ] - - -- inspect it - for_ groups $ \(group, list) -> scope group $ - for_ list $ \(title, path) -> scope title $ - io (doesFileExist $ tmp implName path) >>= expect - - for_ notGroups $ \(group, list) -> scope group $ - for_ list $ \(title, path) -> scope title $ - io (fmap not . doesFileExist $ tmp implName path) >>= expect - - -- if we haven't crashed, clean up! - io $ removeDirectoryRecursive tmp - - where - setupTranscript = [iTrim| - ```ucm - .> builtins.merge - ``` - ```unison:hide - --#0n4pbd0q9u - type outside.A = A Nat outside.B - - --#muulibntaq - type outside.B = B Int - - --#msp7bv40rv - outside.c = 3 - - --#6cdi7g1oi2 - outside.d = c < (p + 1) - - --#4idrjau939 - type inside.M = M outside.A - - --#fiupm7pl7o - inside.p = c - - --#l5pndeifuh - inside.q x = x + p * p - - inside.r = d - ``` - ```ucm - .foo> add - ``` - ```unison:hide - r = false - ``` - ```ucm - .foo.inside> update - ``` - |] - pushTranscript repo = [iTrim| - ```ucm - .foo.inside> push ${repo} - ``` - |] - - pushImplementations :: (MonadIO m, Var v, BuiltinAnnotation a) - => [(String, SyncToDir m v a)] - pushImplementations = - [ ("SlimCopyRegenerateIndex", SlimCopyRegenerateIndex.syncToDirectory) - ] - - groups = - [ ("types", types) - , ("terms", terms) - , ("branches", branches) - , ("patches", patches) - , ("dependentsIndex", dependentsIndex) - , ("typeIndex", typeIndex) - , ("typeMentionsIndex", typeMentionsIndex) ] - - notGroups = - [ ("notBranches", notBranches) ] - - types = - [ ("M", ".unison/v1/types/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg/compiled.ub") - , ("A", ".unison/v1/types/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8/compiled.ub") - , ("B", ".unison/v1/types/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0/compiled.ub") - ] - - terms = - [ ("p (type)", ".unison/v1/terms/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500/type.ub") - , ("p (compiled)", ".unison/v1/terms/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500/compiled.ub") - , ("c (type)", ".unison/v1/terms/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/type.ub") - , ("c (compiled)", ".unison/v1/terms/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/compiled.ub") - , ("d (type)", ".unison/v1/terms/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do/type.ub") - , ("d (compiled)", ".unison/v1/terms/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do/compiled.ub") - , ("q (type)", ".unison/v1/terms/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8/type.ub") - , ("q (compiled)", ".unison/v1/terms/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8/compiled.ub") - , ("r (type)", ".unison/v1/terms/#im2kiu2hmnfdvv5fbfc5lhaakebbs69074hjrb3ptkjnrh6dpkcp1rnnq99mhson2gr6g8uduppvpelpq4jvq1rg5p3f9jpiplpk9u8/type.ub") - , ("r (compiled)", ".unison/v1/terms/#im2kiu2hmnfdvv5fbfc5lhaakebbs69074hjrb3ptkjnrh6dpkcp1rnnq99mhson2gr6g8uduppvpelpq4jvq1rg5p3f9jpiplpk9u8/compiled.ub") - , ("r' (type)", ".unison/v1/terms/#gi015he0n17ji9sl5hgh1q8tjas74341p48h719kkgajj75d6qapakq993gu2duvit32b7qhqac1odk6jhvad0ku8ajcj7sup6t6mbo/type.ub") - , ("r' (compiled)", ".unison/v1/terms/#gi015he0n17ji9sl5hgh1q8tjas74341p48h719kkgajj75d6qapakq993gu2duvit32b7qhqac1odk6jhvad0ku8ajcj7sup6t6mbo/compiled.ub") - ] - - branches = - [ ("_head", ".unison/v1/paths/_head/pciob2qnondela4h4u1dtk9pvbc9up7qed0j311lkomordjah2lliddis7tdl76h5mdbs5ja10tm8kh2o3sni1bu2kdsqtm4fkv5288") - , (".foo.inside", ".unison/v1/paths/pciob2qnondela4h4u1dtk9pvbc9up7qed0j311lkomordjah2lliddis7tdl76h5mdbs5ja10tm8kh2o3sni1bu2kdsqtm4fkv5288.ub") - , (".foo.inside'", ".unison/v1/paths/0ufjqqmabderbejfhrled8i4lirgpqgimejbkdnk1m9t90ibj25oi7g1h2adougdqhv72sv939eq67ur77n3qciajh0reiuqs68th00.ub") - , (".foo.inside.M", ".unison/v1/paths/i2p08iv1l50fc934gh6kea181kvjnt3kdgiid5c4r5016kjuliesji43u4j4mjvsne3qvmq43puk9dkm61nuc542n7pchsvg6t0v55o.ub") - , ("", ".unison/v1/paths/7asfbtqmoj56pq7b053v2jc1spgb8g5j4cg1tj97ausi3scveqa50ktv4b2ofoclnkqmnl18vnt5d83jrh85qd43nnrsh6qetbksb70.ub") - ] - - notBranches = - [ (".", ".unison/v1/paths/9r7l4k8ks1tog088fg96evunq1ednlsskf2lh0nacpe5n00khcrl8f1g5sevm7cqd3s64cj22ukvkh2fflm3rhhkn2hh2rj1n20mnm8.ub") - , (".'", ".unison/v1/paths/llton7oiormlimkdmqjdr8tja12i6tebii7cmfd7545b7mt1sb02f9usjqnjd6iaisnn1ngpsl76hfg024l8dlult3s6stkt28j42sg.ub") - , (".''", ".unison/v1/paths/givahf3f6fu8vv07kglsofdcoem7q5dm4rracr78a5didjc4pq2djh2rfdo5sn7nld2757oi02a4a07cv9rk4peafhh76nllcp8l1n8.ub") - , (".foo", ".unison/v1/paths/a8dt4i16905fql2d4fbmtipmj35tj6qmkq176dlnsn6klh0josr255eobn0d3f0aku360h0em6oit9ftjpq3vhcdap8bgpqr79qne58.ub") - , (".foo'", ".unison/v1/paths/l3r86dvdmbe2lsinh213tp9upm5qjtk17iep3n5mah7qg5bupj1e7ikpv1iqbgegp895r0krlo0u2c4nclvfvch3e6kspu766th6tqo.ub") - , (".foo.outside", ".unison/v1/paths/s6iquav10f69pvrpj6rtm7vcp6fs6hgnnmjb1qs00n594ljugbf2qtls93oc4lvb3kjro8fpakoua05gqido4haj4m520rip2gu2hvo.ub") - , (".foo.outside.A", ".unison/v1/paths/2i1lh7pntl3rqrtn4c10ajdg4m3al1rqm6u6ak5ak6urgsaf6nhqn2olt3rjqj5kcj042h8lqseguk3opp019hc7g8ncukds25t9r40.ub") - , (".foo.outside.B", ".unison/v1/paths/jag86haq235jmifji4n8nff8dg1ithenefs2uk5ms6b4qgj9pfa9g40vs4kdn3uhm066ni0bvfb7ib9tqtdgqcn90eadl7282nqqbc0.ub") - ] - - patches = - [ ("patch", ".unison/v1/patches/96b419pm6l896ncmef9kqkpj29gq205amsl6prsl2num29thpn9fej8v8ndcmubadv5hehege4s43n3ljbifsnna92lpeuacq9fm3qo.up") ] - - dependentsIndex = - [ ("Nat <- A", ".unison/v1/dependents/_builtin/Nat/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8") - , ("B <- A", ".unison/v1/dependents/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8") - , ("Int <- B", ".unison/v1/dependents/_builtin/Int/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0") - , ("Nat <- c", ".unison/v1/dependents/_builtin/Nat/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo") - , ("Boolean <- d", ".unison/v1/dependents/_builtin/Boolean/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") - , ("Nat <- d", ".unison/v1/dependents/_builtin/Nat/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") - , ("Nat.+ <- d", ".unison/v1/dependents/_builtin/Nat.+/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") - , ("Universal.< <- d",".unison/v1/dependents/_builtin/Universal.$less-than$/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") - , ("c <- d", ".unison/v1/dependents/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") - , ("p <- d", ".unison/v1/dependents/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") - , ("A <- M", ".unison/v1/dependents/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg") - , ("Nat <- p", ".unison/v1/dependents/_builtin/Nat/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500") - , ("c <- p", ".unison/v1/dependents/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500") - , ("Nat <- q", ".unison/v1/dependents/_builtin/Nat/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") - , ("Nat.* <- q", ".unison/v1/dependents/_builtin/Nat.$star$/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") - , ("Nat.+ <- q", ".unison/v1/dependents/_builtin/Nat.+/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") - , ("p <- q", ".unison/v1/dependents/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") - , ("Boolean <- r", ".unison/v1/dependents/_builtin/Boolean/#im2kiu2hmnfdvv5fbfc5lhaakebbs69074hjrb3ptkjnrh6dpkcp1rnnq99mhson2gr6g8uduppvpelpq4jvq1rg5p3f9jpiplpk9u8") - , ("d <- r", ".unison/v1/dependents/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do/#im2kiu2hmnfdvv5fbfc5lhaakebbs69074hjrb3ptkjnrh6dpkcp1rnnq99mhson2gr6g8uduppvpelpq4jvq1rg5p3f9jpiplpk9u8") - , ("Boolean <- r'", ".unison/v1/dependents/_builtin/Boolean/#gi015he0n17ji9sl5hgh1q8tjas74341p48h719kkgajj75d6qapakq993gu2duvit32b7qhqac1odk6jhvad0ku8ajcj7sup6t6mbo") - ] - - typeIndex = - [ ("(Nat -> B -> A) <- A#0",".unison/v1/type-index/#6n4ih159cqcvr52285qj3899ft380ao9l8is9louoen4ea6thgmq8hu38fmblo3tl6gjp0f6nrifplbh6d7770o96adr3d71i913aco/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8#d0") - , ("(Int -> B) <- B#0", ".unison/v1/type-index/#vjftvem4n0os6pnuko48ld67v7av3hq23r2gqvj7o536tfb1ctsci2fcgmmplj9b6slsege96onv4c2q8a0n8iadpe56mm4bc90muh8/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0#d0") - , ("Nat <- c", ".unison/v1/type-index/_builtin/Nat/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo") - , ("Boolean <- d", ".unison/v1/type-index/_builtin/Boolean/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") - , ("(A -> M) <- M#0", ".unison/v1/type-index/#735ugfihokh6o8ob9akhe1ei05ocsfncdrj76bdomeue5rb9td82q7m4a72e68bpgl3np562fehe9uio4vfcs07ib0mss1o5m08plk8/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg#d0") - , ("Nat <- p", ".unison/v1/type-index/_builtin/Nat/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500") - -- note: typeForIndexing = Type.removeAllEffectVars typ - , ("(Nat -> Nat) <- q", ".unison/v1/type-index/#29pbek54phqkda8dp4erqn9u6etr8dm74h3sbg431kdvrt23l3c2a7eh01qpnc4kqq6i8fu1g0r5dsc08qqofnrlvfhpqs4cb6snls0/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") - , ("Boolean <- r", ".unison/v1/type-index/_builtin/Boolean/#im2kiu2hmnfdvv5fbfc5lhaakebbs69074hjrb3ptkjnrh6dpkcp1rnnq99mhson2gr6g8uduppvpelpq4jvq1rg5p3f9jpiplpk9u8") - , ("Boolean <- r'", ".unison/v1/type-index/_builtin/Boolean/#gi015he0n17ji9sl5hgh1q8tjas74341p48h719kkgajj75d6qapakq993gu2duvit32b7qhqac1odk6jhvad0ku8ajcj7sup6t6mbo") - ] - - typeMentionsIndex = - [ ("(Nat -> B -> A) <- A#0",".unison/v1/type-mentions-index/#6n4ih159cqcvr52285qj3899ft380ao9l8is9louoen4ea6thgmq8hu38fmblo3tl6gjp0f6nrifplbh6d7770o96adr3d71i913aco/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8#d0") - , ("(B -> A) <- A#0", ".unison/v1/type-mentions-index/#7u2a6hguqo74e3aq141fvopo9snclmfbg149k6e51j96hebi23q0tjq2dqjme76smull2r2lkap58ph0pcvpqn0dv1rk1ssfdt20cvo/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8#d0") - , ("Nat <- A#0", ".unison/v1/type-mentions-index/_builtin/Nat/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8#d0") - , ("B <- A#0", ".unison/v1/type-mentions-index/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8#d0") - , ("A <- A#0", ".unison/v1/type-mentions-index/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8#d0") - , ("(Int -> B) <- B#0", ".unison/v1/type-mentions-index/#vjftvem4n0os6pnuko48ld67v7av3hq23r2gqvj7o536tfb1ctsci2fcgmmplj9b6slsege96onv4c2q8a0n8iadpe56mm4bc90muh8/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0#d0") - , ("Int <- B#0", ".unison/v1/type-mentions-index/_builtin/Int/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0#d0") - , ("B <- B#0", ".unison/v1/type-mentions-index/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0#d0") - , ("Nat <- c", ".unison/v1/type-mentions-index/_builtin/Nat/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo") - , ("Boolean <- d", ".unison/v1/type-mentions-index/_builtin/Boolean/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") - , ("(A -> M) <- M#0", ".unison/v1/type-mentions-index/#735ugfihokh6o8ob9akhe1ei05ocsfncdrj76bdomeue5rb9td82q7m4a72e68bpgl3np562fehe9uio4vfcs07ib0mss1o5m08plk8/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg#d0") - , ("A <- M#0", ".unison/v1/type-mentions-index/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg#d0") - , ("M <- M#0", ".unison/v1/type-mentions-index/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg#d0") - , ("Nat <- p", ".unison/v1/type-mentions-index/_builtin/Nat/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500") - , ("(Nat -> Nat) <- q", ".unison/v1/type-mentions-index/#29pbek54phqkda8dp4erqn9u6etr8dm74h3sbg431kdvrt23l3c2a7eh01qpnc4kqq6i8fu1g0r5dsc08qqofnrlvfhpqs4cb6snls0/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") - , ("Nat <- q", ".unison/v1/type-mentions-index/_builtin/Nat/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") - , ("Boolean <- r", ".unison/v1/type-mentions-index/_builtin/Boolean/#im2kiu2hmnfdvv5fbfc5lhaakebbs69074hjrb3ptkjnrh6dpkcp1rnnq99mhson2gr6g8uduppvpelpq4jvq1rg5p3f9jpiplpk9u8") - , ("Boolean <- r'", ".unison/v1/type-mentions-index/_builtin/Boolean/#gi015he0n17ji9sl5hgh1q8tjas74341p48h719kkgajj75d6qapakq993gu2duvit32b7qhqac1odk6jhvad0ku8ajcj7sup6t6mbo") - ] - --- a helper to try turning these repo path names into test titles, by --- limiting each path segment to 20 chars. may produce duplicate names since --- it ends up dropping reference cycles suffixes, constructor ids, etc. -makeTitle :: String -> String -makeTitle = intercalate "/" . map (take 20) . drop 2 . splitOn "/" diff --git a/parser-typechecker/tests/Unison/Test/IO.hs b/parser-typechecker/tests/Unison/Test/IO.hs deleted file mode 100644 index de07354e03..0000000000 --- a/parser-typechecker/tests/Unison/Test/IO.hs +++ /dev/null @@ -1,114 +0,0 @@ -{-# Language OverloadedStrings #-} -{-# Language QuasiQuotes #-} - -module Unison.Test.IO where - -import Unison.Prelude -import EasyTest -import qualified System.IO.Temp as Temp -import qualified Data.Text as Text -import qualified Data.Text.IO as TextIO -import Shellmet () -import Data.String.Here (iTrim) -import System.FilePath (()) -import System.Directory (removeDirectoryRecursive) - -import Unison.Codebase (Codebase, CodebasePath) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.FileCodebase as FC -import qualified Unison.Codebase.TranscriptParser as TR -import Unison.Parser (Ann) -import Unison.Symbol (Symbol) - --- * IO Tests - -test :: Bool -> Test () -test newRt = scope "IO" . tests $ [ testHandleOps newRt ] - --- * Implementation - --- | Test reading from and writing to a handle --- --- The transcript writes expectedText to a file, reads the same file and --- writes the read text to the result file which is then checked by the haskell. -testHandleOps :: Bool -> Test () -testHandleOps newRt = - withScopeAndTempDir "handleOps" $ \workdir codebase cache -> do - let myFile = workdir "handleOps.txt" - resultFile = workdir "handleOps.result" - expectedText = "Good Job!" :: Text.Text - runTranscript_ newRt workdir codebase cache [iTrim| -```ucm:hide -.> builtins.mergeio -``` - -```unison -use io IO - -main : '{IO} () -main = 'let - fp = ${Text.pack myFile} - res = ${Text.pack resultFile} - expected = ${expectedText} - - -- Write to myFile - h1 = builtins.io.openFile (FilePath fp) Write - putText h1 expected - builtins.io.closeFile h1 - - -- Read from myFile - h2 = builtins.io.openFile (FilePath fp) Read - myC = getText h2 - builtins.io.closeFile h2 - - -- Write what we read from myFile to resultFile - h3 = builtins.io.openFile (FilePath res) Write - putText h3 myC - builtins.io.closeFile h3 -``` - -```ucm -.> run main -``` -|] - - res <- io $ TextIO.readFile (resultFile) - if res == expectedText - then ok - else crash $ "Failed to read expectedText from file: " ++ show myFile - --- * Utilities - -initCodebase :: Branch.Cache IO -> FilePath -> String -> IO (CodebasePath, Codebase IO Symbol Ann) -initCodebase branchCache tmpDir name = do - let codebaseDir = tmpDir name - c <- FC.initCodebase branchCache codebaseDir - pure (codebaseDir, c) - --- run a transcript on an existing codebase -runTranscript_ - :: MonadIO m - => Bool - -> FilePath - -> Codebase IO Symbol Ann - -> Branch.Cache IO - -> String - -> m () -runTranscript_ newRt tmpDir c branchCache transcript = do - let configFile = tmpDir ".unisonConfig" - let cwd = tmpDir "cwd" - let err err = error $ "Parse error: \n" <> show err - - -- parse and run the transcript - flip (either err) (TR.parse "transcript" (Text.pack transcript)) $ \stanzas -> - void . liftIO $ - TR.run (Just newRt) cwd configFile stanzas c branchCache - >>= traceM . Text.unpack - -withScopeAndTempDir :: String -> (FilePath -> Codebase IO Symbol Ann -> Branch.Cache IO -> Test ()) -> Test () -withScopeAndTempDir name body = scope name $ do - tmp <- io (Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory name) - cache <- io $ Branch.boundedCache 4096 - (_, codebase) <- io $ initCodebase cache tmp "user" - body tmp codebase cache - io $ removeDirectoryRecursive tmp diff --git a/parser-typechecker/tests/Unison/Test/Lexer.hs b/parser-typechecker/tests/Unison/Test/Lexer.hs deleted file mode 100644 index c9d3c34155..0000000000 --- a/parser-typechecker/tests/Unison/Test/Lexer.hs +++ /dev/null @@ -1,207 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Test.Lexer where - -import EasyTest -import Unison.Lexer -import qualified Unison.ShortHash as ShortHash - -test :: Test () -test = - scope "lexer" - . tests - $ [ t "1" [Numeric "1"] - , t "+1" [Numeric "+1"] - , t "-1" [Numeric "-1"] - , t "-1.0" [Numeric "-1.0"] - , t "+1.0" [Numeric "+1.0"] - - , t "1e3" [Numeric "1e3"] - , t "1e+3" [Numeric "1e+3"] - , t "1e-3" [Numeric "1e-3"] - , t "+1e3" [Numeric "+1e3"] - , t "+1e+3" [Numeric "+1e+3"] - , t "+1e-3" [Numeric "+1e-3"] - , t "-1e3" [Numeric "-1e3"] - , t "-1e+3" [Numeric "-1e+3"] - , t "-1e-3" [Numeric "-1e-3"] - , t "1.2e3" [Numeric "1.2e3"] - , t "1.2e+3" [Numeric "1.2e+3"] - , t "1.2e-3" [Numeric "1.2e-3"] - , t "+1.2e3" [Numeric "+1.2e3"] - , t "+1.2e+3" [Numeric "+1.2e+3"] - , t "+1.2e-3" [Numeric "+1.2e-3"] - , t "-1.2e3" [Numeric "-1.2e3"] - , t "-1.2e+3" [Numeric "-1.2e+3"] - , t "-1.2e-3" [Numeric "-1.2e-3"] - , t "1E3" [Numeric "1e3"] - , t "1E+3" [Numeric "1e+3"] - , t "1E-3" [Numeric "1e-3"] - , t "+1E3" [Numeric "+1e3"] - , t "+1E+3" [Numeric "+1e+3"] - , t "+1E-3" [Numeric "+1e-3"] - , t "-1E3" [Numeric "-1e3"] - , t "-1E+3" [Numeric "-1e+3"] - , t "-1E-3" [Numeric "-1e-3"] - , t "1.2E3" [Numeric "1.2e3"] - , t "1.2E+3" [Numeric "1.2e+3"] - , t "1.2E-3" [Numeric "1.2e-3"] - , t "+1.2E3" [Numeric "+1.2e3"] - , t "+1.2E+3" [Numeric "+1.2e+3"] - , t "+1.2E-3" [Numeric "+1.2e-3"] - , t "-1.2E3" [Numeric "-1.2e3"] - , t "-1.2E+3" [Numeric "-1.2e+3"] - , t "-1.2E-3" [Numeric "-1.2e-3"] - - , t "1-1" [Numeric "1", simpleSymbolyId "-", Numeric "1"] - , t "1+1" [Numeric "1", simpleSymbolyId "+", Numeric "1"] - , t "1 +1" [Numeric "1", Numeric "+1"] - , t "1+ 1" [Numeric "1", simpleSymbolyId "+", Numeric "1"] - , t "x+y" [simpleWordyId "x", simpleSymbolyId "+", simpleWordyId "y"] - , t "++;++" [simpleSymbolyId "++", Semi False, simpleSymbolyId "++"] - , t "++; woot" [simpleSymbolyId "++", Semi False, simpleWordyId "woot"] - , t "woot;woot" [simpleWordyId "woot", Semi False, simpleWordyId "woot"] - , t "woot;(woot)" [simpleWordyId "woot", Semi False, Open "(", simpleWordyId "woot", Close] - , t - "[+1,+1]" - [Reserved "[", Numeric "+1", Reserved ",", Numeric "+1", Reserved "]"] - , t - "[ +1 , +1 ]" - [Reserved "[", Numeric "+1", Reserved ",", Numeric "+1", Reserved "]"] - , t "-- a comment 1.0" [] - , t "\"woot\" -- a comment 1.0" [Textual "woot"] - , t "0:Int" [Numeric "0", Reserved ":", simpleWordyId "Int"] - , t "0 : Int" [Numeric "0", Reserved ":", simpleWordyId "Int"] - , t - ".Foo Foo . .foo.bar.baz" - [ simpleWordyId ".Foo" - , simpleWordyId "Foo" - , simpleSymbolyId "." - , simpleWordyId ".foo.bar.baz" - ] - , t ".Foo.Bar.+" [simpleSymbolyId ".Foo.Bar.+"] - - -- idents with hashes - , t "foo#bar" [WordyId "foo" (Just (ShortHash.unsafeFromText "#bar"))] - , t "+#bar" [SymbolyId "+" (Just (ShortHash.unsafeFromText "#bar"))] - - -- note - these are all the same, just with different spacing - , let ex1 = "if x then y else z" - ex2 = unlines ["if", " x", "then", " y", "else z"] - ex3 = unlines ["if", " x", " then", " y", "else z"] - ex4 = unlines ["if", " x", " then", " y", "else z"] - expected = - [ Open "if" - , simpleWordyId "x" - , Close - , Open "then" - , simpleWordyId "y" - , Close - , Open "else" - , simpleWordyId "z" - , Close - ] - - -- directly close empty = block - in tests $ map (`t` expected) [ex1, ex2, ex3, ex4] - , let ex = unlines ["test =", "", "x = 1"] - - -- directly close nested empty blocks - in t - ex - [ simpleWordyId "test" - , Open "=" - , Close - , (Semi True) - , simpleWordyId "x" - , Open "=" - , Numeric "1" - , Close - ] - , let ex = unlines ["test =", " test2 =", "", "x = 1"] - in t - ex - [ simpleWordyId "test" - , Open "=" - , simpleWordyId "test2" - , Open "=" - , Close - , Close - , (Semi True) - , simpleWordyId "x" - , Open "=" - , Numeric "1" - , Close - ] - , let - ex = unlines - ["if a then b", "else if c then d", "else if e then f", "else g"] -- close of the three `else` blocks - - -- In an empty `then` clause, the `else` is interpreted as a `Reserved` token - in t - ex - [ Open "if" - , simpleWordyId "a" - , Close - , Open "then" - , simpleWordyId "b" - , Close - , Open "else" - , Open "if" - , simpleWordyId "c" - , Close - , Open "then" - , simpleWordyId "d" - , Close - , Open "else" - , Open "if" - , simpleWordyId "e" - , Close - , Open "then" - , simpleWordyId "f" - , Close - , Open "else" - , simpleWordyId "g" - , Close - , Close - , Close - ] - , t - "if x then else" - [ Open "if" - , simpleWordyId "x" - , Close - , Open "then" - , Reserved "else" - , Close - ] - -- Empty `else` clause - , t - "if x then 1 else" - [ Open "if" - , simpleWordyId "x" - , Close - , Open "then" - , Numeric "1" - , Close - , Open "else" - , Close - ] - -- Test string literals - , t "\"simple string without escape characters\"" - [Textual "simple string without escape characters"] - , t "\"test escaped quotes \\\"in quotes\\\"\"" - [Textual "test escaped quotes \"in quotes\""] - , t "\"\\n \\t \\b \\a\"" [Textual "\n \t \b \a"] - ] - -t :: String -> [Lexeme] -> Test () -t s expected = - let actual0 = payload <$> lexer "ignored filename" s - actual = take (length actual0 - 2) . drop 1 $ actual0 - in scope s $ if actual == expected - then ok - else do - note $ "expected: " ++ show expected - note $ "actual : " ++ show actual - crash "actual != expected" diff --git a/parser-typechecker/tests/Unison/Test/MCode.hs b/parser-typechecker/tests/Unison/Test/MCode.hs deleted file mode 100644 index c998b037d1..0000000000 --- a/parser-typechecker/tests/Unison/Test/MCode.hs +++ /dev/null @@ -1,111 +0,0 @@ -{-# language PatternGuards #-} -{-# language TypeApplications #-} -{-# language OverloadedStrings #-} - -module Unison.Test.MCode where - -import EasyTest - -import qualified Data.Map.Strict as Map - -import Data.Bits (bit) -import Data.Maybe (fromMaybe) -import Data.Word (Word64) - -import Unison.Util.EnumContainers as EC - -import Unison.Term (unannotate) -import Unison.Symbol (Symbol) -import Unison.Reference (Reference(Builtin)) -import Unison.Runtime.Pattern -import Unison.Runtime.ANF - ( superNormalize - , lamLift - ) -import Unison.Runtime.MCode - ( Section(..) - , Instr(..) - , Args(..) - , Comb(..) - , Branch(..) - , emitComb - , emitCombs - ) -import Unison.Runtime.Builtin -import Unison.Runtime.Machine - ( REnv(..), eval0 ) - -import Unison.Test.Common (tm) - -testEval0 :: (Word64 -> Comb) -> Section -> Test () -testEval0 env sect = do - io $ eval0 (Refs mempty mempty) env sect - ok - -builtins :: Reference -> Word64 -builtins r - | Builtin "todo" <- r = bit 64 - | Just i <- Map.lookup r builtinTermNumbering = i - | otherwise = error $ "builtins: " ++ show r - -cenv :: EnumMap Word64 Comb -cenv = fmap (emitComb mempty) $ numberedTermLookup @Symbol - -benv :: Word64 -> Maybe Comb -benv i - | i == bit 64 = Just $ Lam 0 1 2 1 asrt - | otherwise = EC.lookup i cenv - -env :: EnumMap Word64 Comb -> Word64 -> Comb -env m n = fromMaybe (m ! n) $ benv n - -asrt :: Section -asrt = Ins (Unpack 0) - $ Match 0 - $ Test1 1 (Yield ZArgs) - (Die "assertion failed") - -multRec :: String -multRec - = "let\n\ - \ n = 5\n\ - \ f acc i = match i with\n\ - \ 0 -> acc\n\ - \ _ -> f (##Nat.+ acc n) (##Nat.sub i 1)\n\ - \ ##todo (##Nat.== (f 0 1000) 5000)" - -dataSpec :: DataSpec -dataSpec = mempty - -testEval :: String -> Test () -testEval s = testEval0 (env aux) main - where - (Lam 0 0 _ _ main, aux, _) - = emitCombs (bit 24) - . superNormalize builtins (builtinTypeNumbering Map.!) - . lamLift - . splitPatterns dataSpec - . unannotate - $ tm s - -nested :: String -nested - = "let\n\ - \ x = match 2 with\n\ - \ 0 -> ##Nat.+ 0 1\n\ - \ m@n -> n\n\ - \ ##todo (##Nat.== x 2)" - -test :: Test () -test = scope "mcode" . tests $ - [ scope "2=2" $ testEval "##todo (##Nat.== 2 2)" - , scope "2=1+1" $ testEval "##todo (##Nat.== 2 (##Nat.+ 1 1))" - , scope "2=3-1" $ testEval "##todo (##Nat.== 2 (##Nat.sub 3 1))" - , scope "5*5=25" - $ testEval "##todo (##Nat.== (##Nat.* 5 5) 25)" - , scope "5*1000=5000" - $ testEval "##todo (##Nat.== (##Nat.* 5 1000) 5000)" - , scope "5*1000=5000 rec" $ testEval multRec - , scope "nested" - $ testEval nested - ] diff --git a/parser-typechecker/tests/Unison/Test/Range.hs b/parser-typechecker/tests/Unison/Test/Range.hs deleted file mode 100644 index f0b521ef79..0000000000 --- a/parser-typechecker/tests/Unison/Test/Range.hs +++ /dev/null @@ -1,33 +0,0 @@ -module Unison.Test.Range where - -import EasyTest -import Unison.Lexer (Pos (..)) -import Unison.Util.Range - - -test :: Test () -test = scope "range" . tests $ - [ scope "contains 11 11" . expect $ contains zero zero - , antisymmetric "contains 11 12" (not . uncurry contains) $ (zero, one) - , scope "contains 12 23" . expect . not $ contains one one' - , scope "contains 23 12" . expect . not $ contains one' one - - , symmetric "overlaps 11 11" (not . uncurry overlaps) $ (zero, zero) - , symmetric "overlaps 12 11" (not . uncurry overlaps) $ (one, zero) - , symmetric "overlaps 12 23" (not . uncurry overlaps) $ (one, one') - , symmetric "overlaps 12 13" (uncurry overlaps) $ (one, two) - , symmetric "overlaps 23 13" (uncurry overlaps) $ (one', two) - - , scope "inrange 1 12" . expect $ inRange (Pos 1 1) (Range (Pos 1 1) (Pos 1 2)) - , scope "inrange 2 12" . expect . not $ inRange (Pos 1 2) (Range (Pos 1 1) (Pos 1 2)) - ] - where symmetric s f (a,b) = - tests [ scope s . expect $ f (a, b) - , scope (s ++ " (symmetric)") . expect $ f (b, a)] - antisymmetric s f (a,b) = - tests [ scope s . expect $ f (a, b) - , scope (s ++ " (antisymmetric)") . expect . not $ f (b, a)] - zero = Range (Pos 1 1) (Pos 1 1) - one = Range (Pos 1 1) (Pos 1 2) - one' = Range (Pos 1 2) (Pos 1 3) - two = Range (Pos 1 1) (Pos 1 3) diff --git a/parser-typechecker/tests/Unison/Test/Referent.hs b/parser-typechecker/tests/Unison/Test/Referent.hs deleted file mode 100644 index 9c9dfb51db..0000000000 --- a/parser-typechecker/tests/Unison/Test/Referent.hs +++ /dev/null @@ -1,82 +0,0 @@ -{-# Language OverloadedStrings #-} - -module Unison.Test.Referent where - -import Data.Text (Text) -import qualified Data.Text as Text -import qualified Unison.Referent as R -import qualified Unison.ShortHash as SH -import qualified Unison.Reference as Rf -import EasyTest - -test :: Test () -test = scope "hashparsing" . tests $ - [ - scope "Reference" $ tests - [ ref h - , ref (h <> "." <> suffix1) - , ref (h <> "." <> suffix2) ], - - scope "Referent" $ tests - [ r h - , r $ h <> "." <> suffix1 - , r $ h <> "#d10" - , r $ h <> "#a0" - , r $ h <> "." <> suffix2 <> "#d6" - , r $ h <> "." <> suffix1 <> "#a9" ], - - scope "ShortHash" $ tests - [ sh h - , sh "#abcd" - , sh $ "#abcd." <> suffix1 - , sh "#abcd#d10" - , sh "#abcd#a3" - , sh $ "#abcd." <> suffix2 <> "#d10" - , sh $ "#abcd.y6#a5" - , scope "builtin" $ - expect (SH.fromText "##Text.take" == Just (SH.Builtin "Text.take")) - , pending $ scope "builtins don't have CIDs" $ - expect (SH.fromText "##FileIO#3" == Nothing) - , scope "term ref, no cycle" $ - expect (SH.fromText "#2tWjVAuc7" == - Just (SH.ShortHash "2tWjVAuc7" Nothing Nothing)) - , scope "term ref, part of cycle" $ - expect (SH.fromText "#y9ycWkiC1.y9" == - Just (SH.ShortHash "y9ycWkiC1" (Just "y9") Nothing)) - , scope "constructor" $ - expect (SH.fromText "#cWkiC1x89#1" == - Just (SH.ShortHash "cWkiC1x89" Nothing (Just "1"))) - , scope "constructor of a type in a cycle" $ - expect (SH.fromText "#DCxrnCAPS.WD#0" == - Just (SH.ShortHash "DCxrnCAPS" (Just "WD") (Just "0"))) - , scope "Anything to the left of the first # is ignored" $ - expect (SH.fromText "foo#abc" == - Just (SH.ShortHash "abc" Nothing Nothing)) - , pending $ scope "Anything including and following a third # is rejected" $ - expect (SH.fromText "foo#abc#2#hello" == Nothing) - , scope "Anything after a second . before a second # is ignored" $ - expect (SH.fromText "foo#abc.1f.x" == - Just (SH.ShortHash "abc" (Just "1f") Nothing)) - ] - ] - where - h = "#1tdqrgl90qnmqvrff0j76kg2rnajq7n8j54e9cbk4p8pdi41q343bnh8h2rv6nadhlin8teg8371d445pvo0as7j2sav8k401d2s3no" - suffix1 = Rf.showSuffix 0 10 - suffix2 = Rf.showSuffix 3 6 - ref txt = scope (Text.unpack txt) $ case Rf.fromText txt of - Left e -> fail e - Right r1 -> case Rf.fromText (Rf.toText r1) of - Left e -> fail e - Right r2 -> expect (r1 == r2) - r :: Text -> Test () - r txt = scope (Text.unpack txt) $ case R.fromText txt of - Nothing -> fail "oh noes" - Just referent -> case R.fromText (R.toText referent) of - Nothing -> fail "oh noes" - Just referent2 -> expect (referent == referent2) - sh :: Text -> Test () - sh txt = scope (Text.unpack txt) $ case SH.fromText txt of - Nothing -> fail "oh noes" - Just shorthash -> case SH.fromText (SH.toText shorthash) of - Nothing -> fail "oh noes" - Just shorthash2 -> expect (shorthash == shorthash2) diff --git a/parser-typechecker/tests/Unison/Test/Term.hs b/parser-typechecker/tests/Unison/Test/Term.hs deleted file mode 100644 index 5afdad551d..0000000000 --- a/parser-typechecker/tests/Unison/Test/Term.hs +++ /dev/null @@ -1,53 +0,0 @@ -{-# Language OverloadedStrings #-} - -module Unison.Test.Term where - -import EasyTest -import qualified Data.Map as Map -import Data.Map ( (!) ) -import qualified Unison.Hash as Hash -import qualified Unison.Reference as R -import Unison.Symbol ( Symbol ) -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import qualified Unison.Var as Var - -test :: Test () -test = scope "term" $ tests - [ scope "Term.substTypeVar" $ do - -- check that capture avoidance works in substTypeVar - let v s = Var.nameds s :: Symbol - tv s = Type.var() (v s) - v1 s = Var.freshenId 1 (v s) - tm :: Term.Term Symbol () - tm = Term.ann() (Term.ann() - (Term.nat() 42) - (Type.introOuter() (v "a") $ - Type.arrow() (tv "a") (tv "x"))) - (Type.forall() (v "a") (tv "a")) - tm' = Term.substTypeVar (v "x") (tv "a") tm - expected = - Term.ann() (Term.ann() - (Term.nat() 42) - (Type.introOuter() (v1 "a") $ - Type.arrow() (Type.var() $ v1 "a") (tv "a"))) - (Type.forall() (v1 "a") (Type.var() $ v1 "a")) - note $ show tm' - note $ show expected - expect $ tm == tm - expect $ tm' == tm' - expect $ tm' == expected - ok - , scope "Term.unhashComponent" $ - let h = Hash.unsafeFromBase32Hex "abcd" - ref = R.Derived h 0 1 - v1 = Var.refNamed @Symbol ref - -- input component: `ref = \v1 -> ref` - component = Map.singleton ref (Term.lam () v1 (Term.ref () ref)) - component' = Term.unhashComponent component - -- expected unhashed component: `v2 = \v1 -> v2`, where `v2 /= v1`, - -- i.e. `v2` cannot be just `ref` converted to a ref-named variable, - -- since that would collide with `v1` - (v2, _) = component' ! ref - in expect $ v2 /= v1 - ] diff --git a/parser-typechecker/tests/Unison/Test/TermParser.hs b/parser-typechecker/tests/Unison/Test/TermParser.hs deleted file mode 100644 index 93227022b0..0000000000 --- a/parser-typechecker/tests/Unison/Test/TermParser.hs +++ /dev/null @@ -1,227 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE QuasiQuotes #-} - -module Unison.Test.TermParser where - -import Control.Applicative -import Control.Monad (join) -import EasyTest -import qualified Text.Megaparsec as P -import Text.RawString.QQ -import Unison.Parser -import qualified Unison.Parsers as Ps -import Unison.PrintError (renderParseErrorAsANSI) -import Unison.Symbol (Symbol) -import qualified Unison.TermParser as TP -import qualified Unison.Test.Common as Common - -test1 :: Test () -test1 = scope "termparser" . tests . map parses $ - [ "1" - , "1.0" - , "+1" - , "-1" - , "+1.0" - , "-1.0" - - , "1e3" - , "1e+3" - , "1e-3" - , "+1e3" - , "+1e+3" - , "+1e-3" - , "-1e3" - , "-1e+3" - , "-1e-3" - , "1.2e3" - , "1.2e+3" - , "1.2e-3" - , "+1.2e3" - , "+1.2e+3" - , "+1.2e-3" - , "-1.2e3" - , "-1.2e+3" - , "-1.2e-3" - - , "-4th" - , "()" - , "(0)" - , "forty" - , "forty two" - , "\"forty two\"" - , "[1,2,3]" - , "\"abc\"" - , "?x" - , "?\\n" - , "x + 1" - , "1 + 1" - , "1 Nat.+ 1" - , "( x + 1 )" - , "foo 42" - , "1 Nat.== 1" - , "x Nat.== y" - , "if 1 Nat.== 1 then 1 else 1" - , "if 1 Nat.== x then 1 else 1" - , "if x Nat.== 1 then 1 else 1" - , "if x == 1 then 1 else 1" - , "if x Nat.== x then 1 else 1" - -- - -- Block tests - , "let x = 1\n" ++ - " x" - , "let\n" ++ - " y = 1\n" ++ - " x" - , unlines [ - "let y = 1 ", - " x = 2 ", - " x + y"] - , "(let \n" ++ - " x = 23 + 42\n" ++ - " x + 1 )" - -- - -- Handlers - , "handle\n" ++ - " x = 23 + 42\n" ++ - " x + foo 8 102.0 +4\n" ++ - "with foo" - , "handle\n" ++ - " x = 1\n" ++ - " x\n" ++ - "with foo" - , "handle x with foo" - , "handle foo with cases\n" ++ - " { x } -> x" - - -- Patterns - , "match x with x -> x" - , "match x with 0 -> 1" - , "match x with\n" ++ - " 0 -> 1" - , "match +0 with\n" ++ - " +0 -> -1" - , "match x with\n" ++ - " x -> 1\n" ++ - " 2 -> 7\n" ++ - " _ -> 3\n" ++ - " Tuple.Cons x y -> x + y\n" ++ - " Tuple.Cons (Tuple.Cons x y) _ -> x + y \n" - , "match x with\n" ++ - " {Tuple.Cons x y} -> 1\n" ++ - " {Optional.Some 42 -> k} -> k 42\n" - , "match x with\n" ++ - " 0 ->\n" ++ - " z = 0\n" ++ - " z" - , "match x with\n" ++ - " 0 | 1 == 2 -> 123" - , "match x with\n" ++ - " [] -> 0\n" ++ - " [1] -> 1\n" ++ - " 2 +: _ -> 2\n" ++ - " _ :+ 3 -> 3\n" ++ - " [4] ++ _ -> 4\n" ++ - " _ ++ [5] -> 5\n" ++ - " _ -> -1" - , "cases x -> x" - , "cases\n" ++ - " [] -> 0\n" ++ - " [x] -> 1\n" ++ - " _ -> 2" - , "cases\n" ++ - " 0 ->\n" ++ - " z = 0\n" ++ - " z" - - -- Conditionals - , "if x then y else z" - , "-- if test 1\n" ++ - "if\n" ++ - " s = 0\n" ++ - " s > 0\n" ++ - "then\n" ++ - " s = 0\n" ++ - " s + 1\n" ++ - "else\n" ++ - " s = 0\n" ++ - " s + 2\n" - , "-- if test 2\n" ++ - "if\n" ++ - " s = 0\n" ++ - " s > 0\n" ++ - "then\n" ++ - " s: Int\n" ++ - " s = (0: Int)\n" ++ - " s + 1\n" ++ - "else\n" ++ - " s = 0\n" ++ - " s + 2\n" - , "-- if test 3\n" ++ - "if\n" ++ - " s = 0\n" ++ - " s > 0\n" ++ - "then\n" ++ - " s: Int\n" ++ - " s = (0 : Int)\n" ++ - " s + 1\n" ++ - "else\n" ++ - " s = 0\n" ++ - " s + 2\n" - , "x && y" - , "x || y" - , [r|--let r1 - let r1 : Nat - r1 = match Optional.Some 3 with - x -> 1 - 42 |] - , [r|let - increment = (Nat.+) 1 - - (|>) : forall a . a -> (a -> b) -> b - a |> f = f a - - Stream.fromInt -3 - |> Stream.take 10 - |> Stream.foldLeft 0 increment - |] - ] - -test2 :: Test () -test2 = scope "fiddle" . tests $ unitTests - -test :: Test () -test = test1 <|> test2 - -unitTests :: [Test ()] -unitTests = - [ t w "hi" - , t s "foo.+" - , t (w <|> s) "foo.+" - , t (w *> w) "foo bar" - , t (P.try (w *> w) <|> (w *> s)) "foo +" - , t TP.term "x -> x" - , t (TP.lam TP.term) "x y z -> 1 + 1" - , t (sepBy s w) "" - , t (sepBy s w) "uno" - , t (sepBy s w) "uno + dos" - , t (sepBy s w) "uno + dos * tres" - , t (openBlockWith "(" *> sepBy s w <* closeBlock) "(uno + dos + tres)" - , t TP.term "( 0 )" - ] - where - -- type TermP v = P v (AnnotatedTerm v Ann) - t :: P Symbol a -> String -> Test () - t = parseWith - w = wordyDefinitionName - s = symbolyDefinitionName - -parses :: String -> Test () -parses = parseWith TP.term - -parseWith :: P Symbol a -> String -> Test () -parseWith p s = scope (join . take 1 $ lines s) $ - case Ps.parse @ Symbol p s Common.parsingEnv of - Left e -> do - note $ renderParseErrorAsANSI 60 s e - crash $ renderParseErrorAsANSI 60 s e - Right _ -> ok diff --git a/parser-typechecker/tests/Unison/Test/TermPrinter.hs b/parser-typechecker/tests/Unison/Test/TermPrinter.hs deleted file mode 100755 index 80fb635ace..0000000000 --- a/parser-typechecker/tests/Unison/Test/TermPrinter.hs +++ /dev/null @@ -1,586 +0,0 @@ -{-# Language OverloadedStrings #-} - -module Unison.Test.TermPrinter (test) where - -import EasyTest -import qualified Data.Text as Text -import Unison.ABT (annotation) -import qualified Unison.HashQualified as HQ -import Unison.Term (Term) -import qualified Unison.Term as Term -import Unison.TermPrinter -import qualified Unison.Type as Type -import Unison.Symbol (Symbol, symbol) -import qualified Unison.Builtin -import Unison.Parser (Ann(..)) -import qualified Unison.Util.Pretty as PP -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.Util.ColorText as CT -import Unison.Test.Common (t, tm) -import qualified Unison.Test.Common as Common - -getNames :: PPE.PrettyPrintEnv -getNames = PPE.fromNames Common.hqLength Unison.Builtin.names - --- Test the result of the pretty-printer. Expect the pretty-printer to --- produce output that differs cosmetically from the original code we parsed. --- Check also that re-parsing the pretty-printed code gives us the same ABT. --- (Skip that latter check if rtt is false.) --- Note that this does not verify the position of the PrettyPrint Break elements. -tcDiffRtt :: Bool -> String -> String -> Int -> Test () -tcDiffRtt rtt s expected width - = let - inputTerm = tm s :: Term Symbol Ann - prettied = CT.toPlain <$> pretty getNames inputTerm - actual = if width == 0 - then PP.renderUnbroken prettied - else PP.render width prettied - actualReparsed = tm actual - in - scope s $ tests - [ if actual == expected - then ok - else do - note $ "expected:\n" ++ expected - note $ "actual:\n" ++ actual - note $ "show(input) : " ++ show inputTerm - -- note $ "prettyprint : " ++ show prettied - crash "actual != expected" - , if not rtt || (inputTerm == actualReparsed) - then ok - else do - note "round trip test..." - note $ "single parse: " ++ show inputTerm - note $ "double parse: " ++ show actualReparsed - note $ "prettyprint : " ++ show prettied - crash "single parse != double parse" - ] - --- As above, but do the round-trip test unconditionally. -tcDiff :: String -> String -> Test () -tcDiff s expected = tcDiffRtt True s expected 0 - --- As above, but expect not even cosmetic differences between the input string --- and the pretty-printed version. -tc :: String -> Test () -tc s = tcDiff s s - --- Use renderBroken to render the output to some maximum width. -tcBreaksDiff :: Int -> String -> String -> Test () -tcBreaksDiff width s expected = tcDiffRtt True s expected width - -tcBreaks :: Int -> String -> Test () -tcBreaks width s = tcDiffRtt True s s width - -tcBinding :: Int -> String -> Maybe String -> String -> String -> Test () -tcBinding width v mtp tm expected - = let - baseTerm = - Unison.Test.Common.tm tm :: Term Symbol Ann - inputType = fmap Unison.Test.Common.t mtp :: Maybe (Type.Type Symbol Ann) - inputTerm (Just tp) = Term.ann (annotation tp) baseTerm tp - inputTerm Nothing = baseTerm - varV = symbol $ Text.pack v - prettied = fmap CT.toPlain $ PP.syntaxToColor $ prettyBinding - getNames - (HQ.unsafeFromVar varV) - (inputTerm inputType) - actual = if width == 0 - then PP.renderUnbroken prettied - else PP.render width prettied - in - scope expected $ tests - [ if actual == expected - then ok - else do - note $ "expected: " ++ show expected - note $ "actual : " ++ show actual - note $ "show(input) : " ++ show (inputTerm inputType) - note $ "prettyprint : " ++ show prettied - crash "actual != expected" - ] - -test :: Test () -test = scope "termprinter" $ tests - [ scope "splitName" $ tests - [ scope "x" $ expectEqual (splitName "x") [([], "x")] - , scope "A.x" $ expectEqual (splitName "A.x") [([],"A.x"),(["A"],"x")] - , scope "A.B.x" - $ expectEqual (splitName "A.B.x") [([],"A.B.x"),(["A"],"B.x"),(["A","B"],"x")] - ] - , tc "if true then +2 else -2" - , tc "[2, 3, 4]" - , tc "[2]" - , tc "[]" - , tc "true && false" - , tc "false || false" - , tc "g ((true || false) && (f x y))" - , tc "if _something then _foo else _blah" - , tc "3.14159" - , tc "+0" - , tc "\"some text\"" - , tc "\"they said \\\"hi\\\"\"" - , pending $ tc "\'they said \\\'hi\\\'\'" -- TODO lexer doesn't support strings with single quotes in - , tc "Rúnar" - , pending $ tc "῎Ανδρα μοι ἔννεπε, Μοῦσα, πολύτροπον" -- TODO lexer does not like classics! - , tc "古池や蛙飛びこむ水の音" - , tc "2 : Nat" - , tc "x -> x && false" - , tc "x y -> x && y" - , tc "x y z -> x && y" - , tc "x y y -> x && y" - , tc "()" - , tc "Cons" - , tc "foo" - , tc "List.empty" - , tc "None" - , tc "Optional.None" - , tc "handle foo with bar" - , tc "Cons 1 1" - , tc "let\n\ - \ x = 1\n\ - \ x" - , tcBreaks 50 "let\n\ - \ x = 1\n\ - \ x" - , tcBreaks 50 "let\n\ - \ x = 1\n\ - \ y = 2\n\ - \ f x y" - , tc "let\n\ - \ f = cases\n\ - \ 0 -> 0\n\ - \ x -> x\n\ - \ f y" - , tc "let\n\ - \ f z = cases\n\ - \ 0 -> z\n\ - \ y -> g y\n\ - \ f \"\" 1" - , tc "let\n\ - \ f _ = cases\n\ - \ 0 -> 0\n\ - \ x -> x\n\ - \ !f 1" - , pending $ tc "match x with Pair t 0 -> foo t" -- TODO hitting UnknownDataConstructor when parsing pattern - , pending $ tc "match x with Pair t 0 | pred t -> foo t" -- ditto - , pending $ tc "match x with Pair t 0 | pred t -> foo t; Pair t 0 -> foo' t; Pair t u -> bar;" -- ditto - , tc "match x with () -> foo" - , tc "match x with _ -> foo" - , tc "match x with y -> y" - , tc "match x with 1 -> foo" - , tc "match x with +1 -> foo" - , tc "match x with -1 -> foo" - , tc "match x with 3.14159 -> foo" - , tcDiffRtt False "match x with\n\ - \ true -> foo\n\ - \ false -> bar" - "match x with\n\ - \ true -> foo\n\ - \ false -> bar" - 0 - , tcBreaks 50 "match x with\n\ - \ true -> foo\n\ - \ false -> bar" - , tc "match x with false -> foo" - , tc "match x with y@() -> y" - , tc "match x with a@(b@(c@())) -> c" - , tc "match e with { a } -> z" - , pending $ tc "match e with { () -> k } -> z" -- TODO doesn't parse since 'many leaf' expected before the "-> k" - -- need an actual effect constructor to test this with - , tc "cases x -> x" - , tc "cases\n\ - \ [] -> 0\n\ - \ [x] -> 1\n\ - \ _ -> 2" - , tc "if a then if b then c else d else e" - , tc "handle handle foo with bar with baz" - , tcBreaks 16 "match (if a then\n\ - \ b\n\ - \else c) with\n\ - \ 112 -> x" -- dodgy layout. note #517 and #518 - , tc "handle bar with Pair 1 1" - , tc "handle bar with x -> foo" - , tcDiffRtt True "let\n\ - \ x = (1 : Int)\n\ - \ (x : Int)" - "let\n\ - \ x : Int\n\ - \ x = 1\n\ - \ (x : Int)" 50 - , tc "match x with 12 -> (y : Int)" - , tc "if a then (b : Int) else (c : Int)" - , tc "match x with 12 -> if a then b else c" - , tc "match x with 12 -> x -> f x" - , tcDiff "match x with (12) -> x" "match x with 12 -> x" - , tcDiff "match (x) with 12 -> x" "match x with 12 -> x" - , tc "match x with 12 -> x" - , tcDiffRtt True "match x with\n\ - \ 12 -> x" - "match x with 12 -> x" 50 - , tcBreaks 15 "match x with\n\ - \ 12 -> x\n\ - \ 13 -> y\n\ - \ 14 -> z" - , tcBreaks 21 "match x with\n\ - \ 12 | p x -> x\n\ - \ 13 | q x -> y\n\ - \ 14 | r x y -> z" - , tcBreaks 9 "match x with\n\ - \ 112 ->\n\ - \ x\n\ - \ 113 ->\n\ - \ y\n\ - \ 114 ->\n\ - \ z" - , pending $ tcBreaks 19 "match\n\ - \ myFunction\n\ - \ argument1\n\ - \ argument2\n\ - \with\n\ - \ 112 -> x" -- TODO, 'unexpected semi' before 'of' - should the parser accept this? - , tc "if c then x -> f x else x -> g x" - , tc "(f x) : Int" - , tc "(f x) : Pair Int Int" - , tcBreaks 50 "let\n\ - \ x = if a then b else c\n\ - \ if x then y else z" - , tc "f x y" - , tc "f x y z" - , tc "f (g x) y" - , tcDiff "(f x) y" "f x y" - , pending $ tc "1.0e-19" -- TODO parser throws UnknownLexeme - , pending $ tc "-1.0e19" -- ditto - , tc "0.0" - , tc "-0.0" - , pending $ tcDiff "+0.0" "0.0" -- TODO parser throws "Prelude.read: no parse" - should it? Note +0 works for UInt. - , tcBreaksDiff 21 "match x with 12 -> if a then b else c" - "match x with\n\ - \ 12 ->\n\ - \ if a then b\n\ - \ else c" - , tcDiffRtt True "if foo\n\ - \then\n\ - \ true && true\n\ - \ 12\n\ - \else\n\ - \ namespace baz where\n\ - \ f : Int -> Int\n\ - \ f x = x\n\ - \ 13" - "if foo then\n\ - \ true && true\n\ - \ 12\n\ - \else\n\ - \ baz.f : Int -> Int\n\ - \ baz.f x = x\n\ - \ 13" 50 - , tcBreaks 50 "if foo then\n\ - \ true && true\n\ - \ 12\n\ - \else\n\ - \ baz.f : Int -> Int\n\ - \ baz.f x = x\n\ - \ 13" - , tcBreaks 90 "handle\n\ - \ a = 5\n\ - \ b =\n\ - \ c = 3\n\ - \ true\n\ - \ false\n\ - \with foo" - , tcBreaks 50 "match x with\n\ - \ true ->\n\ - \ d = 1\n\ - \ false\n\ - \ false ->\n\ - \ f x = x + 1\n\ - \ true" - , pending $ tcBreaks 50 "x -> e = 12\n\ - \ x + 1" -- TODO parser looks like lambda body should be a block, but we hit 'unexpected =' - , tc "x + y" - , tc "x ~ y" - , tcDiff "x `foo` y" "foo x y" - , tc "x + (y + z)" - , tc "x + y + z" - , tc "x + y * z" -- i.e. (x + y) * z ! - , tc "x \\ y == z ~ a" - , tc "foo x (y + z)" - , tc "foo (x + y) z" - , tc "foo x y + z" - , tc "foo p q + r + s" - , tc "foo (p + q) r + s" - , tc "foo (p + q + r) s" - , tc "p + q + r + s" - , tcDiffRtt False "(foo.+) x y" "x foo.+ y" 0 - , tc "x + y + f a b c" - , tc "x + y + foo a b" - , tc "foo x y p + z" - , tc "foo p q a + r + s" - , tc "foo (p + q) r a + s" - , tc "foo (x + y) (p - q)" - , tc "x -> x + y" - , tc "if p then x + y else a - b" - , tc "(x + y) : Int" - , tc "!foo" - , tc "!(foo a b)" - , tc "!f a" - , tcDiff "f () a ()" "!(!f a)" - , tcDiff "f a b ()" "!(f a b)" - , tcDiff "!f ()" "!(!f)" - , tc "!(!foo)" - , tc "'bar" - , tc "'(bar a b)" - , tc "'('bar)" - , tc "!('bar)" - , tc "'(!foo)" - , tc "x -> '(y -> 'z)" - , tc "'(x -> '(y -> z))" - , tc "(\"a\", 2)" - , tc "(\"a\", 2, 2.0)" - , tcDiff "(2)" "2" - , pending $ tcDiff "Pair \"2\" (Pair 2 ())" "(\"2\", 2)" -- TODO parser produced - -- Pair "2" (Pair 2 ()#0) - -- instead of - -- Pair#0 "2" (Pair#0 2 ()#0) - -- Maybe because in this context the - -- parser can't distinguish between a constructor - -- called 'Pair' and a function called 'Pair'. - , pending $ tc "Pair 2 ()" -- unary tuple; fails for same reason as above - , tc "match x with (a, b) -> a" - , tc "match x with () -> foo" - , pending $ tc "match x with [a, b] -> a" -- issue #266 - , pending $ tc "match x with [a] -> a" -- ditto - , pending $ tc "match x with [] -> a" -- ditto - , tc "match x with Optional.Some (Optional.Some _) -> ()" -- Issue #695 - -- need an actual effect constructor to test the following - , pending $ tc "match x with { SomeRequest (Optional.Some _) -> k } -> ()" - , tcBinding 50 "foo" (Just "Int") "3" "foo : Int\n\ - \foo = 3" - , tcBinding 50 "foo" Nothing "3" "foo = 3" - , tcBinding 50 "foo" (Just "Int -> Int") "n -> 3" "foo : Int -> Int\n\ - \foo n = 3" - , tcBinding 50 "foo" Nothing "n -> 3" "foo n = 3" - , tcBinding 50 "foo" Nothing "n m -> 3" "foo n m = 3" - , tcBinding 9 "foo" Nothing "n m -> 3" "foo n m =\n\ - \ 3" - , tcBinding 50 "+" (Just "Int -> Int -> Int") "a b -> foo a b" "(+) : Int -> Int -> Int\n\ - \a + b = foo a b" - , tcBinding 50 "+" (Just "Int -> Int -> Int -> Int") "a b c -> foo a b c" "(+) : Int -> Int -> Int -> Int\n\ - \(+) a b c = foo a b c" - , tcBinding 50 "+" Nothing "a b -> foo a b" "a + b = foo a b" - , tcBinding 50 "+" Nothing "a b c -> foo a b c" "(+) a b c = foo a b c" - , tcBinding 50 "." Nothing "f g x -> f (g x)" "(.) f g x = f (g x)" - , tcBreaks 32 "let\n\ - \ go acc a b =\n\ - \ match List.at 0 a with\n\ - \ Optional.None -> 0\n\ - \ Optional.Some hd1 -> 0\n\ - \ go [] a b" - , tcBreaks 30 "match x with\n\ - \ (Optional.None, _) -> foo" - , tcBreaks 50 "if true then match x with 12 -> x else x" - , tcBreaks 50 "if true then x else match x with 12 -> x" - , pending $ tcBreaks 80 "x -> (if c then t else f)" -- TODO 'unexpected )', surplus parens - , tcBreaks 80 "'let\n\ - \ foo = bar\n\ - \ baz foo" - , tcBreaks 80 "!let\n\ - \ foo = bar\n\ - \ baz foo" - , tcDiffRtt True "foo let\n\ - \ a = 1\n\ - \ b" - "foo\n\ - \ let\n\ - \ a = 1\n\ - \ b" 80 - , tcBreaks 80 "if\n\ - \ a = b\n\ - \ a then foo else bar" -- missing break before 'then', issue #518 - , tcBreaks 80 "Stream.foldLeft 0 (+) t" - , tcBreaks 80 "let\n\ - \ delay = 'isEven\n\ - \ ()" - , tcBreaks 80 "let\n\ - \ a = ()\n\ - \ b = ()\n\ - \ c = (1, 2)\n\ - \ ()" - , tcBreaks 80 "let\n\ - \ a = [: escaped: \\@ :]\n\ - \ ()" - --- FQN elision tests - , tcBreaks 12 "if foo then\n\ - \ use A x\n\ - \ f x x\n\ - \else\n\ - \ use B y\n\ - \ f y y" - , tcBreaks 12 "if foo then\n\ - \ use A x\n\ - \ f x x\n\ - \else\n\ - \ use B x\n\ - \ f x x" - , tcBreaks 80 "let\n\ - \ a =\n\ - \ use A x\n\ - \ if foo then f x x else g x x\n\ - \ bar" - , tcBreaks 80 "if foo then f A.x B.x else f A.x B.x" - , tcBreaks 80 "if foo then f A.x A.x B.x else y" - , tcBreaks 80 "if foo then A.f x else y" - , tcBreaks 13 "if foo then\n\ - \ use A +\n\ - \ x + y\n\ - \else y" - , tcBreaks 20 "if p then\n\ - \ use A x\n\ - \ use B y z\n\ - \ f z z y y x x\n\ - \else q" - , tcBreaks 30 "if foo then\n\ - \ use A.X c\n\ - \ use AA.PP.QQ e\n\ - \ f c c e e\n\ - \else\n\ - \ use A.B X.d Y.d\n\ - \ use A.B.X f\n\ - \ g X.d X.d Y.d Y.d f f" - , tcBreaks 30 "if foo then\n\ - \ use A.X c\n\ - \ f c c\n\ - \else\n\ - \ use A X.c YY.c\n\ - \ g X.c X.c YY.c YY.c" - , tcBreaks 20 "handle\n\ - \ if foo then\n\ - \ use A.X c\n\ - \ f c c\n\ - \ else\n\ - \ use A.Y c\n\ - \ g c c\n\ - \with bar" - , tcBreaks 20 "let\n\ - \ a = 2\n\ - \ handle baz\n\ - \ with\n\ - \ use A.X c\n\ - \ if foo then\n\ - \ f c c\n\ - \ else g c c" - , tcBreaks 28 "if foo then\n\ - \ f (x : (∀ t. Pair t t))\n\ - \else\n\ - \ f (x : (∀ t. Pair t t))" - , tcBreaks 15 "handle\n\ - \ use A x\n\ - \ if f x x then\n\ - \ x\n\ - \ else y\n\ - \with foo" -- missing break before 'then', issue #518 - , tcBreaks 20 "match x with\n\ - \ () ->\n\ - \ use A y\n\ - \ f y y" - , tcBreaks 12 "let\n\ - \ use A x\n\ - \ f x x\n\ - \ c = g x x\n\ - \ h x x" - , tcBreaks 15 "handle\n\ - \ use A x\n\ - \ f x x\n\ - \with foo" - , tcBreaks 15 "let\n\ - \ c =\n\ - \ use A x\n\ - \ f x x\n\ - \ g c" - , tcBreaks 20 "if foo then\n\ - \ f x x A.x A.x\n\ - \else g" - , tcBreaks 27 "match t with\n\ - \ () ->\n\ - \ a =\n\ - \ use A B.x\n\ - \ f B.x B.x\n\ - \ handle\n\ - \ q =\n\ - \ use A.B.D x\n\ - \ h x x\n\ - \ foo\n\ - \ with foo\n\ - \ bar\n\ - \ _ ->\n\ - \ b =\n\ - \ use A.C x\n\ - \ g x x\n\ - \ bar" - , tcBreaks 20 "let\n\ - \ a =\n\ - \ handle\n\ - \ use A x\n\ - \ f x x\n\ - \ with foo\n\ - \ bar" - , tcBreaks 16 "let\n\ - \ a =\n\ - \ b =\n\ - \ use A x\n\ - \ f x x\n\ - \ foo\n\ - \ bar" - , tcBreaks 20 "let\n\ - \ a =\n\ - \ match x with\n\ - \ () ->\n\ - \ use A x\n\ - \ f x x\n\ - \ bar" - , tcBreaks 20 "let\n\ - \ a =\n\ - \ use A x\n\ - \ b = f x x\n\ - \ c = g x x\n\ - \ foo\n\ - \ bar" - , tcBreaks 13 "let\n\ - \ a =\n\ - \ use A p q r\n\ - \ f p p\n\ - \ f q q\n\ - \ f r r\n\ - \ foo" - -- The following behaviour is possibly not ideal. Note how the `use A B.x` - -- would have the same effect if it was under the `c =`. It doesn't actually - -- need to be above the `b =`, because all the usages of A.B.X in that tree are - -- covered by another use statement, the `use A.B x`. Fixing this would - -- probably require another annotation pass over the AST, to place 'candidate' - -- use statements, to then push some of them down on the next pass. - -- Not worth it! - , tcBreaks 20 "let\n\ - \ a =\n\ - \ use A B.x\n\ - \ b =\n\ - \ use A.B x\n\ - \ f x x\n\ - \ c =\n\ - \ g B.x B.x\n\ - \ h A.D.x\n\ - \ foo\n\ - \ bar" - , tcBreaks 80 "let\n\ - \ use A x\n\ - \ use A.T.A T1\n\ - \ g = T1 +3\n\ - \ h = T1 +4\n\ - \ i : T -> T -> Int\n\ - \ i p q =\n\ - \ g' = T1 +3\n\ - \ h' = T1 +4\n\ - \ +2\n\ - \ if true then x else x" - ] diff --git a/parser-typechecker/tests/Unison/Test/Type.hs b/parser-typechecker/tests/Unison/Test/Type.hs deleted file mode 100644 index f0042d3539..0000000000 --- a/parser-typechecker/tests/Unison/Test/Type.hs +++ /dev/null @@ -1,33 +0,0 @@ -{-# Language OverloadedStrings #-} - -module Unison.Test.Type where - -import EasyTest -import Unison.Type -import Unison.Symbol (Symbol) -import qualified Unison.Var as Var -import qualified Unison.Typechecker as Typechecker - -infixr 1 --> - -(-->) :: Ord v => Type v () -> Type v () -> Type v () -(-->) a b = arrow() a b - -test :: Test () -test = scope "type" $ tests [ - scope "unArrows" $ - let x = arrow() (builtin() "a") (builtin() "b") :: Type Symbol () - in case x of - Arrows' [i,o] -> - expect (i == builtin() "a" && o == builtin() "b") - _ -> crash "unArrows (a -> b) did not return a spine of [a,b]" - , - scope "subtype" $ do - let v = Var.named "a" - v2 = Var.named "b" - vt = var() v - vt2 = var() v2 - x = forall() v (nat() --> effect() [vt, builtin() "eff"] (nat())) :: Type Symbol () - y = forall() v2 (nat() --> effect() [vt2] (nat())) :: Type Symbol () - expect . not $ Typechecker.isSubtype x y - ] diff --git a/parser-typechecker/tests/Unison/Test/TypePrinter.hs b/parser-typechecker/tests/Unison/Test/TypePrinter.hs deleted file mode 100755 index 5b157ba064..0000000000 --- a/parser-typechecker/tests/Unison/Test/TypePrinter.hs +++ /dev/null @@ -1,170 +0,0 @@ -module Unison.Test.TypePrinter where - -import EasyTest -import qualified Data.Map as Map -import Unison.TypePrinter -import qualified Unison.Builtin -import Unison.Util.ColorText (toPlain) -import qualified Unison.Util.Pretty as PP -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.Test.Common as Common - - --- Test the result of the pretty-printer. Expect the pretty-printer to --- produce output that differs cosmetically from the original code we parsed. --- Check also that re-parsing the pretty-printed code gives us the same ABT. --- (Skip that latter check if rtt is false.) --- Note that this does not verify the position of the PrettyPrint Break elements. -tc_diff_rtt :: Bool -> String -> String -> Int -> Test () -tc_diff_rtt rtt s expected width = - let input_type = Common.t s - get_names = PPE.fromNames Common.hqLength Unison.Builtin.names - prettied = fmap toPlain $ PP.syntaxToColor $ prettyRaw get_names Map.empty (-1) input_type - actual = if width == 0 - then PP.renderUnbroken $ prettied - else PP.render width $ prettied - actual_reparsed = Common.t actual - in scope s $ tests [( - if actual == expected then ok - else do note $ "expected: " ++ show expected - note $ "actual : " ++ show actual - note $ "expectedS:\n" ++ expected - note $ "actualS:\n" ++ actual - note $ "show(input) : " ++ show input_type - note $ "prettyprint : " ++ show prettied - crash "actual != expected" - ), ( - if (not rtt) || (input_type == actual_reparsed) then ok - else do note $ "round trip test..." - note $ "single parse: " ++ show input_type - note $ "double parse: " ++ show actual_reparsed - note $ "prettyprint : " ++ show prettied - crash "single parse != double parse" - )] - --- As above, but do the round-trip test unconditionally. -tc_diff :: String -> String -> Test () -tc_diff s expected = tc_diff_rtt True s expected 0 - --- As above, but expect not even cosmetic differences between the input string --- and the pretty-printed version. -tc :: String -> Test () -tc s = tc_diff s s - --- Use renderBroken to render the output to some maximum width. -tc_breaks :: String -> Int -> String -> Test () -tc_breaks s width expected = tc_diff_rtt True s expected width - -test :: Test () -test = scope "typeprinter" . tests $ - [ tc "a -> b" - , tc "()" - , tc "Pair" - , tc "Pair a b" - , tc "Pair a a" - , tc_diff "((a))" $ "a" - , tc "Pair a ()" -- unary tuple - , tc "(a, a)" - , tc "(a, a, a)" - , tc "(a, b, c, d)" - , tc "Pair a (Pair a a)" - , tc "Pair (Pair a a) a" - , tc "{} (Pair a a)" - , tc "a ->{} b" - , tc "a ->{e1} b" - , tc "a ->{e1, e2} b -> c ->{} d" - , tc "a ->{e1, e2} b ->{} c -> d" - , tc "a -> b -> c ->{} d" - , tc "a -> b ->{} c -> d" - , tc "{e1, e2} (Pair a a)" - , tc "Pair (a -> b) (c -> d)" - , tc "Pair a b ->{e1, e2} Pair a b ->{} Pair (a -> b) d -> Pair c d" - , tc "[Pair a a]" - , tc "'a" - , tc "'Pair a a" - , tc "a -> 'b" - , tc "'(a -> b)" - , tc "(a -> b) -> c" - , tc "'a -> b" - , tc "∀ A. A -> A" - , tc "∀ foo.A. foo.A -> foo.A" - , tc "∀ A B. A -> B -> (A, B)" - , tc "a -> 'b -> c" - , tc "a -> (b -> c) -> d" - , tc "(a -> b) -> c -> d" - , tc "((a -> b) -> c) -> d" - , tc "(∀ a. 'a) -> ()" - , tc "(∀ a. (∀ b. 'b) -> a) -> ()" - , tc_diff "∀ a. 'a" $ "'a" - , tc "a -> '(b -> c)" - , tc "a -> b -> c -> d" - , tc "a -> 'Pair b c" - , tc "a -> b -> 'c" - , tc "a ->{e} 'b" - , tc "a -> '{e} b" - , tc "a -> '{e} b -> c" - , tc "a -> '{e} b ->{f} c" - , tc "a -> '{e} (b -> c)" - , tc "a -> '{e} (b ->{f} c)" - , tc "a -> 'b" - , tc "a -> '('b)" - , tc "a -> '('(b -> c))" - , tc "a -> '('('(b -> c)))" - , tc "a -> '{e} ('('(b -> c)))" - , tc "a -> '('{e} ('(b -> c)))" - , tc "a -> '('('{e} (b -> c)))" - , tc "a -> 'b ->{f} c" - , tc "a -> '(b -> c)" - , tc "a -> '(b ->{f} c)" - , tc "a -> '{e} ('b)" - , pending $ tc "a -> '{e} 'b" -- issue #249 - , pending $ tc "a -> '{e} '{f} b" -- issue #249 - , tc "a -> '{e} ('b)" - , tc_diff "a -> () ->{e} () -> b -> c" $ "a -> '{e} ('(b -> c))" - , tc "a -> '{e} ('(b -> c))" - , tc_diff "a ->{e} () ->{f} b" $ "a ->{e} '{f} b" - , tc "a ->{e} '{f} b" - , tc_diff "a -> () ->{e} () ->{f} b" $ "a -> '{e} ('{f} b)" - , tc "a -> '{e} ('{f} b)" - , tc "a -> '{e} () ->{f} b" - , tc "a -> '{e} ('{f} (b -> c))" - , tc "a ->{e} '(b -> c)" - , tc "a -> '{e} (b -> c)" - , tc_diff "a -> () ->{e} () -> b" $ "a -> '{e} ('b)" - , tc "'{e} a" - , tc "'{e} (a -> b)" - , tc "'{e} (a ->{f} b)" - , pending $ tc "Pair a '{e} b" -- parser hits unexpected ' - , tc_diff_rtt False "Pair a ('{e} b)" "Pair a '{e} b" 80 -- no RTT due to the above - , tc "'(a -> 'a)" - , tc "'()" - , tc "'('a)" - , tc_diff "''a" "'('a)" - , tc_diff "'''a" "'('('a))" - , tc_diff "∀ a . a" $ "a" - , tc_diff "∀ a. a" $ "a" - , tc_diff "∀ a . 'a" $ "'a" - , pending $ tc_diff "∀a . a" $ "a" -- lexer doesn't accept, treats ∀a as one lexeme - feels like it should work - , pending $ tc_diff "∀ A . 'A" $ "'A" -- 'unknown parse error' - should this be accepted? - - , tc_diff_rtt False "a -> b -> c -> d" -- hitting 'unexpected Semi' in the reparse - "a\n\ - \-> b\n\ - \-> c\n\ - \-> d" 10 - - , tc_diff_rtt False "a -> Pair b c -> d" -- ditto, and extra line breaks that seem superfluous in Pair - "a\n\ - \-> Pair b c\n\ - \-> d" 14 - - , tc_diff_rtt False "Pair (forall a. (a -> a -> a)) b" -- as above, and TODO not nesting under Pair - "Pair\n\ - \ (∀ a. a -> a -> a) b" 24 - - , tc_diff_rtt False "Pair (forall a. (a -> a -> a)) b" -- as above, and TODO not breaking under forall - "Pair\n\ - \ (∀ a. a -> a -> a)\n\ - \ b" 21 - - ] diff --git a/parser-typechecker/tests/Unison/Test/Typechecker.hs b/parser-typechecker/tests/Unison/Test/Typechecker.hs deleted file mode 100644 index d7254ff54c..0000000000 --- a/parser-typechecker/tests/Unison/Test/Typechecker.hs +++ /dev/null @@ -1,33 +0,0 @@ -{-# Language OverloadedStrings #-} - -module Unison.Test.Typechecker where - -import EasyTest -import Unison.Symbol ( Symbol(..) ) -import qualified Unison.Type as Type -import qualified Unison.Typechecker as Typechecker -import qualified Unison.Var as Var - -test :: Test () -test = scope "typechecker" $ tests - [ scope "isSubtype" isSubtypeTest - ] - -isSubtypeTest :: Test () -isSubtypeTest = - let - symbol i n = Symbol i (Var.User n) - forall v t = Type.forall () v t - var v = Type.var () v - - a = symbol 0 "a" - a_ i = symbol i "a" - lhs = forall a (var a) -- ∀a. a - rhs_ i = var (a_ i) -- a_i - in - -- check that `∀a. a <: a_i` (used to fail for i = 2, 3) - tests [ expectSubtype lhs (rhs_ i) | i <- [0 .. 5] ] - where - expectSubtype t1 t2 = - scope ("isSubtype (" <> show t1 <> ") (" <> show t2 <> ")") - (expect $ Typechecker.isSubtype t1 t2) diff --git a/parser-typechecker/tests/Unison/Test/Typechecker/Components.hs b/parser-typechecker/tests/Unison/Test/Typechecker/Components.hs deleted file mode 100644 index 327a51d510..0000000000 --- a/parser-typechecker/tests/Unison/Test/Typechecker/Components.hs +++ /dev/null @@ -1,35 +0,0 @@ -module Unison.Test.Typechecker.Components where - --- import Control.Monad -import EasyTest --- import Unison.Parsers (unsafeParseTerm) --- import qualified Unison.Note as Note --- import qualified Unison.Test.Common as Common --- import qualified Unison.Typechecker.Components as Components - -test :: Test () -test = scope "Typechecker.Components" $ ok - -- [ - -- -- simple case, no minimization done - -- t "{ id x = x; g = id 42; y = id id g; y }" - -- "{ id x = x; g = id 42; y = id id g; y }" - -- -- check that we get let generalization - -- , t "{ id x = x; g = id 42; y = id id g; y }" - -- "{ id x = x; g = id 42; y = id id g; y }" - -- -- check that we preserve order of components as much as possible - -- , t "{ id2 x = x; id1 x = x; id3 x = x; id3 }" - -- "{ id2 x = x; id1 x = x; id3 x = x; id3 }" - -- -- check that we reorder according to dependencies - -- , t "{ g = id 42; y = id id g; id x = x; y }" - -- "{ id x = x; g = id 42; y = id id g; y }" - -- -- insane example, checks for: generalization, reordering, - -- -- preservation of order when possible - -- , t "{ g = id 42; y = id id g; ping x = pong x; pong x = id (ping x); id x = x; y }" - -- "{ id x = x; g = id 42; y = id id g ; ({ ping x = pong x; pong x = id (ping x) ; y })}" - -- ] - -- where - -- t before after = scope (before ++ " ⟹ " ++ after) $ do - -- let term = unsafeParseTerm before - -- let after' = Components.minimize' term - -- guard $ Common.typechecks' after' - -- expect (unsafeParseTerm after == after') diff --git a/parser-typechecker/tests/Unison/Test/Typechecker/Context.hs b/parser-typechecker/tests/Unison/Test/Typechecker/Context.hs deleted file mode 100644 index a759708341..0000000000 --- a/parser-typechecker/tests/Unison/Test/Typechecker/Context.hs +++ /dev/null @@ -1,41 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Test.Typechecker.Context ( test ) -where - -import Data.Foldable ( for_ ) -import EasyTest -import Unison.Symbol ( Symbol ) -import qualified Unison.Typechecker.Context as Context -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import qualified Unison.Var as Var - -test :: Test () -test = scope "context" $ tests - [ scope "verifyClosedTerm" verifyClosedTermTest - ] - -type TV = Context.TypeVar Symbol () - -verifyClosedTermTest :: Test () -verifyClosedTermTest = tests - [ scope "report-all-free-vars" $ - let - a = Var.named @Symbol "a" - b = Var.named @Symbol "b" - a' = Var.named @TV "a'" - b' = Var.named @TV "b'" - -- (a : a')(b : b') - t = Term.app() - (Term.ann() (Term.var() a) (Type.var() a')) - (Term.ann() (Term.var() b) (Type.var() b')) - res = Context.synthesizeClosed [] mempty t - errors = Context.typeErrors res - expectUnknownSymbol (Context.ErrorNote cause _) = case cause of - Context.UnknownSymbol _ _ -> ok - e -> crash $ "Unexpected type error " <> show e - in do - expectEqual 4 (length errors) -- there are 4 unknown symbols: a, a', b, b' - for_ errors expectUnknownSymbol - ] diff --git a/parser-typechecker/tests/Unison/Test/Typechecker/TypeError.hs b/parser-typechecker/tests/Unison/Test/Typechecker/TypeError.hs deleted file mode 100644 index 062975322b..0000000000 --- a/parser-typechecker/tests/Unison/Test/Typechecker/TypeError.hs +++ /dev/null @@ -1,57 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Test.Typechecker.TypeError where - -import Data.Foldable (toList) -import Data.Maybe (isJust) -import EasyTest -import Unison.Parser (Ann) -import Unison.Result (pattern Result) -import qualified Unison.Result as Result -import Unison.Symbol (Symbol) -import qualified Unison.Typechecker.Context as C -import Unison.Typechecker.Extractor (ErrorExtractor) -import qualified Unison.Typechecker.Extractor as Ex -import qualified Unison.Typechecker.TypeError as Err -import Unison.Var (Var) -import qualified Unison.Test.Common as Common - -test :: Test () -test = scope "> extractor" . tests $ - [ y "> true && 3" Err.and - , y "> true || 3" Err.or - , y "> if 3 then 1 else 2" Err.cond - , y "> if true then 1 else \"surprise\"" Err.ifBody - , y "> match 3 with 3 | 3 -> 3" Err.matchGuard - , y "> match 3 with\n 3 -> 3\n 4 -> \"surprise\"" Err.matchBody - -- , y "> match 3 with true -> true" Err. - , y "> [1, +1]" Err.vectorBody - , n "> true && ((x -> x + 1) true)" Err.and - , n "> true || ((x -> x + 1) true)" Err.or - , n "> if ((x -> x + 1) true) then 1 else 2" Err.cond - , n "> match 3 with 3 | 3 -> 3" Err.matchBody - , y "> 1 1" Err.applyingNonFunction - , y "> 1 Int.+ 1" Err.applyingFunction - , y ( "ability Abort where\n" ++ - " abort : {Abort} a\n" ++ - "\n" ++ - "xyz : t -> Request Abort t -> t\n" ++ - "xyz default abort = match abort with\n" ++ - " {a} -> 3\n" ++ - " {Abort.abort -> k} ->\n" ++ - " handle k 100 with xyz default\n" - ) Err.matchBody - ] - where y, n :: String -> ErrorExtractor Symbol Ann a -> Test () - y s ex = scope s $ expect $ yieldsError s ex - n s ex = scope s $ expect $ noYieldsError s ex - -noYieldsError :: Var v => String -> ErrorExtractor v Ann a -> Bool -noYieldsError s ex = not $ yieldsError s ex - -yieldsError :: forall v a. Var v => String -> ErrorExtractor v Ann a -> Bool -yieldsError s ex = let - Result notes (Just _) = Common.parseAndSynthesizeAsFile [] "> test" s - notes' :: [C.ErrorNote v Ann] - notes' = [ n | Result.TypeError n <- toList notes ] - in any (isJust . Ex.extract ex) notes' diff --git a/parser-typechecker/tests/Unison/Test/UnisonSources.hs b/parser-typechecker/tests/Unison/Test/UnisonSources.hs deleted file mode 100644 index 35452d9b36..0000000000 --- a/parser-typechecker/tests/Unison/Test/UnisonSources.hs +++ /dev/null @@ -1,195 +0,0 @@ -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE RankNTypes #-} - -module Unison.Test.UnisonSources where - -import Control.Lens ( view ) -import Control.Lens.Tuple ( _5 ) -import Control.Monad (void) -import Control.Monad.IO.Class (liftIO) -import qualified Data.Map as Map -import Data.Sequence (Seq) -import Data.Text (unpack) -import Data.Text.IO (readFile) -import EasyTest -import System.FilePath (joinPath, splitPath, replaceExtension) -import System.FilePath.Find (always, extension, find, (==?)) -import System.Directory ( doesFileExist ) -import qualified Unison.ABT as ABT -import qualified Unison.Builtin as Builtin -import Unison.Codebase.Runtime ( Runtime, evaluateWatches ) -import Unison.Codebase.Serialization ( getFromBytes, putBytes ) -import qualified Unison.Codebase.Serialization.V1 as V1 -import Unison.DataDeclaration (EffectDeclaration, DataDeclaration) -import Unison.Parser as Parser -import qualified Unison.Parsers as Parsers -import qualified Unison.PrettyPrintEnv as PPE -import qualified Unison.PrintError as PrintError -import Unison.Reference ( Reference ) -import Unison.Result (pattern Result, Result) -import qualified Unison.Result as Result -import qualified Unison.Runtime.Rt1IO as RT -import qualified Unison.Runtime.Interface as RTI -import Unison.Symbol (Symbol) -import qualified Unison.Term as Term -import Unison.Term ( Term ) -import Unison.Test.Common (parseAndSynthesizeAsFile, parsingEnv) -import Unison.Type ( Type ) -import qualified Unison.UnisonFile as UF -import Unison.Util.Monoid (intercalateMap) -import qualified Unison.Var as Var -import qualified Unison.Test.Common as Common -import qualified Unison.Names3 - -type Note = Result.Note Symbol Parser.Ann - -type TFile = UF.TypecheckedUnisonFile Symbol Ann -type SynthResult = - Result (Seq Note) - (Either Unison.Names3.Names0 TFile) - -type EitherResult = Either String TFile - - -ppEnv :: PPE.PrettyPrintEnv -ppEnv = PPE.fromNames Common.hqLength Builtin.names - -expectRight' :: Either String a -> Test a -expectRight' (Left e) = crash e -expectRight' (Right a) = ok >> pure a - -good :: EitherResult -> Test TFile -good = expectRight' - -bad :: EitherResult -> Test TFile -bad r = EasyTest.expectLeft r >> done - -test :: Bool -> Test () -test new = do - rt <- if new then io RTI.startRuntime else pure RT.runtime - scope "unison-src" - . tests - $ [ go rt shouldPassNow good - , go rt shouldFailNow bad - , go rt shouldPassLater (pending . bad) - , go rt shouldFailLater (pending . good) - ] - -shouldPassPath, shouldFailPath :: String -shouldPassPath = "unison-src/tests" -shouldFailPath = "unison-src/errors" - -shouldPassNow :: IO [FilePath] -shouldPassNow = find always (extension ==? ".u") shouldPassPath - -shouldFailNow :: IO [FilePath] -shouldFailNow = find always (extension ==? ".u") shouldFailPath - -shouldPassLater :: IO [FilePath] -shouldPassLater = find always (extension ==? ".uu") shouldPassPath - -shouldFailLater :: IO [FilePath] -shouldFailLater = find always (extension ==? ".uu") shouldFailPath - -go :: Runtime Symbol -> IO [FilePath] -> (EitherResult -> Test TFile) -> Test () -go rt files how = do - files' <- liftIO files - tests (makePassingTest rt how <$> files') - -showNotes :: Foldable f => String -> PrintError.Env -> f Note -> String -showNotes source env = - intercalateMap "\n\n" $ PrintError.renderNoteAsANSI 60 env source - -decodeResult - :: String -> SynthResult -> EitherResult-- String (UF.TypecheckedUnisonFile Symbol Ann) -decodeResult source (Result notes Nothing) = - Left $ showNotes source ppEnv notes -decodeResult source (Result notes (Just (Left errNames))) = - Left $ showNotes - source - (PPE.fromNames Common.hqLength - (Unison.Names3.shadowing errNames Builtin.names)) - notes -decodeResult _source (Result _notes (Just (Right uf))) = - Right uf - -makePassingTest - :: Runtime Symbol -> (EitherResult -> Test TFile) -> FilePath -> Test () -makePassingTest rt how filepath = scope (shortName filepath) $ do - uf <- typecheckingTest how filepath - resultTest rt uf filepath *> serializationTest uf - -shortName :: FilePath -> FilePath -shortName = joinPath . drop 1 . splitPath - -typecheckingTest :: (EitherResult -> Test TFile) -> FilePath -> Test TFile -typecheckingTest how filepath = scope "typecheck" $ do - source <- io $ unpack <$> Data.Text.IO.readFile filepath - how . decodeResult source $ parseAndSynthesizeAsFile [] (shortName filepath) source - -resultTest - :: Runtime Symbol -> TFile -> FilePath -> Test () -resultTest rt uf filepath = do - let valueFile = replaceExtension filepath "ur" - rFileExists <- io $ doesFileExist valueFile - if rFileExists - then scope "result" $ do - values <- io $ unpack <$> Data.Text.IO.readFile valueFile - let untypedFile = UF.discardTypes uf - let term = Parsers.parseTerm values parsingEnv - (bindings, watches) <- io $ either undefined id <$> - evaluateWatches Builtin.codeLookup - mempty - (const $ pure Nothing) - rt - untypedFile - case term of - Right tm -> do - -- compare the the watch expression from the .u with the expr in .ur - let [watchResult] = view _5 <$> Map.elems watches - tm' = Term.letRec' False bindings watchResult - -- note . show $ tm' - -- note . show $ Term.amap (const ()) tm - expect $ tm' == Term.amap (const ()) tm - Left e -> crash $ show e - else pure () - -serializationTest :: TFile -> Test () -serializationTest uf = scope "serialization" . tests . concat $ - [ map testDataDeclaration (Map.toList $ UF.dataDeclarations' uf) - , map testEffectDeclaration (Map.toList $ UF.effectDeclarations' uf) - , map testTerm (Map.toList $ UF.hashTerms uf) - ] - where - putUnit :: Monad m => () -> m () - putUnit () = pure () - getUnit :: Monad m => m () - getUnit = pure () - testDataDeclaration :: (Symbol, (Reference, DataDeclaration Symbol Ann)) -> Test () - testDataDeclaration (name, (_, decl)) = scope (Var.nameStr name) $ - let decl' :: DataDeclaration Symbol () - decl' = void decl - bytes = putBytes (V1.putDataDeclaration V1.putSymbol putUnit) decl' - decl'' = getFromBytes (V1.getDataDeclaration V1.getSymbol getUnit) bytes - in expectEqual decl'' (Just decl') - testEffectDeclaration :: (Symbol, (Reference, EffectDeclaration Symbol Ann)) -> Test () - testEffectDeclaration (name, (_, decl)) = scope (Var.nameStr name) $ - let decl' :: EffectDeclaration Symbol () - decl' = void decl - bytes = putBytes (V1.putEffectDeclaration V1.putSymbol putUnit) decl' - decl'' = getFromBytes (V1.getEffectDeclaration V1.getSymbol getUnit) bytes - in expectEqual decl'' (Just decl') - testTerm :: (Symbol, (Reference, Term Symbol Ann, Type Symbol Ann)) -> Test () - testTerm (name, (_, tm, tp)) = scope (Var.nameStr name) $ - let tm' :: Term Symbol () - tm' = Term.amap (const ()) tm - tp' :: Type Symbol () - tp' = ABT.amap (const ()) tp - tmBytes = putBytes (V1.putTerm V1.putSymbol putUnit) tm' - tpBytes = putBytes (V1.putType V1.putSymbol putUnit) tp' - tm'' = getFromBytes (V1.getTerm V1.getSymbol getUnit) tmBytes - tp'' = getFromBytes (V1.getType V1.getSymbol getUnit) tpBytes - in tests - [ scope "type" $ expectEqual tp'' (Just tp') - , scope "term" $ expectEqual tm'' (Just tm') - ] diff --git a/parser-typechecker/tests/Unison/Test/UriParser.hs b/parser-typechecker/tests/Unison/Test/UriParser.hs deleted file mode 100644 index fbea77318a..0000000000 --- a/parser-typechecker/tests/Unison/Test/UriParser.hs +++ /dev/null @@ -1,84 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Test.UriParser where - -import EasyTest -import Unison.Codebase.Editor.RemoteRepo (RemoteRepo(..)) -import Unison.Codebase.Path (Path(..)) -import qualified Unison.Codebase.Path as Path -import qualified Text.Megaparsec as P -import qualified Unison.Codebase.Editor.UriParser as UriParser -import qualified Data.Sequence as Seq -import Unison.Codebase.ShortBranchHash (ShortBranchHash(..)) -import Data.Text (Text) -import Unison.NameSegment (NameSegment(..)) -import qualified Data.Text as Text - -test :: Test () -test = scope "uriparser" . tests $ [ testAugmented ] - -testAugmented:: Test () -testAugmented = scope "augmented" . tests $ --- Local Protocol --- $ git clone /srv/git/project.git --- $ git clone /srv/git/project.git[:treeish][:[#hash][.path]] - [ scope "local-protocol" . tests . map parseAugmented $ - [ ("/srv/git/project.git", - (GitRepo "/srv/git/project.git" Nothing, Nothing, Path.empty)) - , ("/srv/git/project.git:abc:#def.hij.klm", - (GitRepo "/srv/git/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) - , ("srv/git/project.git", - (GitRepo "srv/git/project.git" Nothing, Nothing, Path.empty)) - , ("srv/git/project.git:abc:#def.hij.klm", - (GitRepo "srv/git/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) - ], --- File Protocol --- $ git clone file:///srv/git/project.git[:treeish][:[#hash][.path]] <- imagined - scope "file-protocol" . tests . map parseAugmented $ - [ ("file:///srv/git/project.git", - (GitRepo "file:///srv/git/project.git" Nothing, Nothing, Path.empty)) - , ("file:///srv/git/project.git:abc:#def.hij.klm", - (GitRepo "file:///srv/git/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) - , ("file://srv/git/project.git", - (GitRepo "file://srv/git/project.git" Nothing, Nothing, Path.empty)) - , ("file://srv/git/project.git:abc:#def.hij.klm", - (GitRepo "file://srv/git/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) - ], --- Smart / Dumb HTTP protocol --- $ git clone https://example.com/gitproject.git[:treeish][:[#hash][.path]] <- imagined - scope "http-protocol" . tests . map parseAugmented $ - [ ("https://example.com/git/project.git", - (GitRepo "https://example.com/git/project.git" Nothing, Nothing, Path.empty)) - , ("https://user@example.com/git/project.git:abc:#def.hij.klm]", - (GitRepo "https://user@example.com/git/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) - ], --- SSH Protocol --- $ git clone ssh://[user@]server/project.git[:treeish][:[#hash][.path]] - scope "ssh-protocol" . tests . map parseAugmented $ - [ ("ssh://git@8.8.8.8:222/user/project.git", - (GitRepo "ssh://git@8.8.8.8:222/user/project.git" Nothing, Nothing, Path.empty)) - , ("ssh://git@github.com/user/project.git:abc:#def.hij.klm", - (GitRepo "ssh://git@github.com/user/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) - ], --- $ git clone [user@]server:project.git[:treeish][:[#hash][.path]] - scope "scp-protocol" . tests . map parseAugmented $ - [ ("git@github.com:user/project.git", - (GitRepo "git@github.com:user/project.git" Nothing, Nothing, Path.empty)) - , ("github.com:user/project.git", - (GitRepo "github.com:user/project.git" Nothing, Nothing, Path.empty)) - , ("git@github.com:user/project.git:abc:#def.hij.klm", - (GitRepo "git@github.com:user/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) - ] - ] - -parseAugmented :: (Text, (RemoteRepo, Maybe ShortBranchHash, Path)) -> Test () -parseAugmented (s, r) = scope (Text.unpack s) $ - case P.parse UriParser.repoPath "test case" s of - Left x -> crash $ show x - Right x -> expectEqual x r - -path :: [Text] -> Path -path = Path . Seq.fromList . fmap NameSegment - -sbh :: Text -> Maybe ShortBranchHash -sbh = Just . ShortBranchHash diff --git a/parser-typechecker/tests/Unison/Test/Util/Bytes.hs b/parser-typechecker/tests/Unison/Test/Util/Bytes.hs deleted file mode 100644 index 549a5eb949..0000000000 --- a/parser-typechecker/tests/Unison/Test/Util/Bytes.hs +++ /dev/null @@ -1,62 +0,0 @@ -module Unison.Test.Util.Bytes where - -import EasyTest -import Control.Monad -import Data.List (foldl') -import qualified Unison.Util.Bytes as Bytes -import qualified Data.ByteString as BS - -test :: Test () -test = scope "util.bytes" . tests $ [ - scope "empty ==" . expect $ Bytes.empty == Bytes.empty, - - scope "empty `compare`" . expect $ Bytes.empty `compare` Bytes.empty == EQ, - - scope "==" . expect $ - Bytes.fromWord8s [0,1,2,3,4,5] <> Bytes.fromWord8s [6,7,8,9] - == - Bytes.fromWord8s [0,1,2,3,4,5,6,7,8,9], - - scope "consistency with ByteString" $ do - forM_ [(1::Int)..100] $ \_ -> do - n <- int' 0 50 - m <- int' 0 50 - k <- int' 0 (n + m) - o <- int' 0 50 - b1 <- BS.pack <$> replicateM n word8 - b2 <- BS.pack <$> replicateM m word8 - b3 <- BS.pack <$> replicateM o word8 - let [b1s, b2s, b3s] = Bytes.fromByteString <$> [b1, b2, b3] - scope "associtivity" . expect' $ - b1s <> (b2s <> b3s) == (b1s <> b2s) <> b3s - scope "<>" . expect' $ - Bytes.toByteString (b1s <> b2s <> b3s) == b1 <> b2 <> b3 - scope "Ord" . expect' $ - (b1 <> b2 <> b3) `compare` b3 == - (b1s <> b2s <> b3s) `compare` b3s - scope "take" . expect' $ - Bytes.toByteString (Bytes.take k (b1s <> b2s)) == BS.take k (b1 <> b2) - scope "drop" . expect' $ - Bytes.toByteString (Bytes.drop k (b1s <> b2s)) == BS.drop k (b1 <> b2) - scope "at" $ - let bs = b1s <> b2s <> b3s - b = b1 <> b2 <> b3 - in forM_ [0 .. (BS.length b - 1)] $ \ind -> - expect' $ Just (BS.index b ind) == Bytes.at ind bs - ok, - - scope "lots of chunks" $ do - forM_ [(0::Int)..100] $ \i -> do - n <- int' 0 50 - k <- int' 0 i - chunks <- replicateM n (replicateM k word8) - let b1 = foldMap Bytes.fromWord8s chunks - b2 = foldr (<>) mempty (Bytes.fromWord8s <$> chunks) - b3 = foldl' (<>) mempty (Bytes.fromWord8s <$> chunks) - b = BS.concat (BS.pack <$> chunks) - expect' $ b1 == b2 && b2 == b3 - expect' $ Bytes.toByteString b1 == b - expect' $ Bytes.toByteString b2 == b - expect' $ Bytes.toByteString b3 == b - ok - ] diff --git a/parser-typechecker/tests/Unison/Test/Util/PinBoard.hs b/parser-typechecker/tests/Unison/Test/Util/PinBoard.hs deleted file mode 100644 index dd8888d598..0000000000 --- a/parser-typechecker/tests/Unison/Test/Util/PinBoard.hs +++ /dev/null @@ -1,52 +0,0 @@ -{-# LANGUAGE MagicHash #-} -{-# LANGUAGE UnboxedTuples #-} - -module Unison.Test.Util.PinBoard - ( test, - ) -where - -import qualified Data.ByteString as ByteString -import EasyTest -import GHC.Exts (isTrue#, reallyUnsafePtrEquality#, touch#) -import GHC.IO (IO (IO)) -import System.Mem (performGC) -import qualified Unison.Util.PinBoard as PinBoard - -test :: Test () -test = - scope "util.pinboard" . tests $ - [ scope "pinning equal values stores only one" $ do - let b0 = ByteString.singleton 0 - let b1 = ByteString.copy b0 - - board <- PinBoard.new - - -- pinning a thing for the first time returns it - b0' <- PinBoard.pin board b0 - expectSamePointer b0 b0' - - -- pinning an equal thing returns the first - b1' <- PinBoard.pin board b1 - expectSamePointer b0 b1' - - -- the board should only have one value in it - expect' . (== 1) <$> io (PinBoard.debugSize board) - - -- keep b0 alive until here - touch b0 - - -- observe that the board doesn't keep its value alive - io performGC - expect' . (== 0) <$> io (PinBoard.debugSize board) - - ok - ] - -expectSamePointer :: a -> a -> Test () -expectSamePointer x y = - expect' (isTrue# (reallyUnsafePtrEquality# x y)) - -touch :: a -> Test () -touch x = - io (IO \s -> (# touch# x s, () #)) diff --git a/parser-typechecker/tests/Unison/Test/Util/Pretty.hs b/parser-typechecker/tests/Unison/Test/Util/Pretty.hs deleted file mode 100644 index 6859adc3f6..0000000000 --- a/parser-typechecker/tests/Unison/Test/Util/Pretty.hs +++ /dev/null @@ -1,33 +0,0 @@ -module Unison.Test.Util.Pretty - ( test - ) where - -import Control.Monad -import Data.String (fromString) -import EasyTest -import qualified Unison.Util.Pretty as Pretty - -test :: Test () -test = - scope "util.pretty" . tests $ [ - scope "Delta.Semigroup.<>.associative" $ do - replicateM_ 100 $ do - d1 <- randomDelta - d2 <- randomDelta - d3 <- randomDelta - expect' $ (d1 <> d2) <> d3 == d1 <> (d2 <> d3) - ok - ] - -randomDelta :: Test Pretty.Delta -randomDelta = - Pretty.delta <$> randomPretty - - where - randomPretty :: Test (Pretty.Pretty String) - randomPretty = - fromString <$> randomString - - randomString :: Test String - randomString = - replicateM 3 (pick ['x', 'y', 'z', '\n']) diff --git a/parser-typechecker/tests/Unison/Test/Var.hs b/parser-typechecker/tests/Unison/Test/Var.hs deleted file mode 100644 index 938bcb1e8c..0000000000 --- a/parser-typechecker/tests/Unison/Test/Var.hs +++ /dev/null @@ -1,23 +0,0 @@ -module Unison.Test.Var where - -import EasyTest -import Unison.Symbol (Symbol) -import Unison.Var as Var - -test :: Test () -test = scope "var" $ tests [ - scope "free synthetic vars are universally quantifiable" $ tests - [ scope (Var.nameStr v) - (expect $ Var.universallyQuantifyIfFree @Symbol v) - | v <- [ Var.inferAbility - , Var.inferInput - , Var.inferOutput - , Var.inferPatternPureE - , Var.inferPatternPureV - , Var.inferPatternBindE - , Var.inferPatternBindV - , Var.inferTypeConstructor - , Var.inferTypeConstructorArg - ] - ] - ] diff --git a/parser-typechecker/tests/Unison/Test/VersionParser.hs b/parser-typechecker/tests/Unison/Test/VersionParser.hs deleted file mode 100644 index 64b5741a75..0000000000 --- a/parser-typechecker/tests/Unison/Test/VersionParser.hs +++ /dev/null @@ -1,26 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -module Unison.Test.VersionParser where - -import EasyTest -import Data.Text -import Unison.Codebase.Editor.VersionParser -import qualified Unison.Codebase.Path as Path -import Control.Error.Safe (rightMay) -import Unison.Codebase.Editor.RemoteRepo -import Text.Megaparsec - -test :: Test () -test = scope "versionparser" . tests . fmap makeTest $ - [ ("release/M1j", "releases._M1j") - , ("release/M1j.2", "releases._M1j") - , ("devel/M1k", "trunk") - ] - -makeTest :: (Text, Text) -> Test () -makeTest (version, path) = - scope (unpack version) $ expectEqual - (rightMay $ runParser defaultBaseLib "versionparser" version) - (Just - ( GitRepo "https://github.com/unisonweb/base" Nothing - , Nothing - , Path.fromText path )) diff --git a/parser-typechecker/transcripts/Transcripts.hs b/parser-typechecker/transcripts/Transcripts.hs deleted file mode 100644 index 81fd7a4e06..0000000000 --- a/parser-typechecker/transcripts/Transcripts.hs +++ /dev/null @@ -1,87 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Main where - -import Unison.Prelude -import EasyTest -import Shellmet (($|)) -import System.Directory -import System.FilePath ( () - , takeExtensions - , takeBaseName - ) -import System.Process ( readProcessWithExitCode ) - -import Data.Text ( pack - , unpack - ) -import Data.List - -type TestBuilder = FilePath -> FilePath -> String -> Test () - -testBuilder :: FilePath -> FilePath -> String -> Test () -testBuilder ucm dir transcript = scope transcript $ do - io $ fromString ucm ["transcript", pack (dir transcript)] - ok - -testBuilder' :: FilePath -> FilePath -> String -> Test () -testBuilder' ucm dir transcript = scope transcript $ do - let input = pack (dir transcript) - let output = dir takeBaseName transcript <> ".output.md" - io $ runAndCaptureError ucm ["transcript", input] output - ok - where - -- Given a command and arguments, run it and capture the standard error to a file - -- regardless of success or failure. - runAndCaptureError :: FilePath -> [Text] -> FilePath -> IO () - runAndCaptureError cmd args outfile = do - t <- readProcessWithExitCode cmd (map unpack args) "" - let output = (\(_, _, stderr) -> stderr) t - writeUtf8 outfile $ (pack . dropRunMessage) output - - -- Given the standard error, drops the part in the end that changes each run - dropRunMessage :: String -> String - dropRunMessage = unlines . reverse . drop 3 . reverse . lines - - -buildTests :: TestBuilder -> FilePath -> Test () -buildTests testBuilder dir = do - io - . putStrLn - . unlines - $ [ "" - , "Searching for transcripts to run in: " ++ dir - ] - files <- io $ listDirectory dir - let transcripts = sort . filter (\f -> takeExtensions f == ".md") $ files - ucm <- io $ unpack <$> "stack" $| ["exec", "--", "which", "unison"] -- todo: what is it in windows? - tests (testBuilder ucm dir <$> transcripts) - --- Transcripts that exit successfully get cleaned-up by the transcript parser. --- Any remaining folders matching "transcript-.*" are output directories --- of failed transcripts and should be moved under the "test-output" folder -cleanup :: Test () -cleanup = do - files' <- io $ listDirectory "." - let dirs = filter ("transcript-" `isPrefixOf`) files' - - -- if any such codebases remain they are moved under test-output - unless (null dirs) $ do - io $ createDirectoryIfMissing True "test-output" - io $ for_ dirs (\d -> renameDirectory d ("test-output" d)) - io - . putStrLn - . unlines - $ [ "" - , "NOTE: All transcript codebases have been moved into" - , "the `test-output` directory. Feel free to delete it." - ] - -test :: Test () -test = do - buildTests testBuilder $"unison-src" "transcripts" - buildTests testBuilder' $"unison-src" "transcripts" "errors" - cleanup - -main :: IO () -main = run test diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal deleted file mode 100644 index 8ad3147bca..0000000000 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ /dev/null @@ -1,382 +0,0 @@ -cabal-version: 2.2 -name: unison-parser-typechecker -category: Compiler -version: 0.1 -license: MIT -license-file: LICENSE -author: Unison Computing, public benefit corp -maintainer: Paul Chiusano , Runar Bjarnason , Arya Irani -stability: provisional -homepage: http://unisonweb.org -bug-reports: https://github.com/unisonweb/unison/issues -copyright: Copyright (C) 2013-2018 Unison Computing, PBC and contributors -synopsis: Parser and typechecker for the Unison language -description: - -build-type: Simple -extra-source-files: -data-files: - -source-repository head - type: git - location: git://github.com/unisonweb/unison.git - --- `cabal install -foptimized` enables optimizations -flag optimized - manual: True - default: False - -flag quiet - manual: True - default: False - --- NOTE: Keep in sync throughout repo. -common unison-common - default-language: Haskell2010 - default-extensions: - ApplicativeDo, - BlockArguments, - DeriveFunctor, - DerivingStrategies, - DoAndIfThenElse, - FlexibleContexts, - FlexibleInstances, - LambdaCase, - MultiParamTypeClasses, - ScopedTypeVariables, - TupleSections, - TypeApplications - -library - import: unison-common - - hs-source-dirs: src - - exposed-modules: - Unison.Builtin - Unison.Builtin.Decls - Unison.Codecs - Unison.Codebase - Unison.Codebase.Branch - Unison.Codebase.Branch.Dependencies - Unison.Codebase.BranchDiff - Unison.Codebase.BranchUtil - Unison.Codebase.Causal - Unison.Codebase.Classes - Unison.Codebase.CodeLookup - Unison.Codebase.Editor.AuthorInfo - Unison.Codebase.Editor.Command - Unison.Codebase.Editor.DisplayThing - Unison.Codebase.Editor.Git - Unison.Codebase.Editor.HandleInput - Unison.Codebase.Editor.HandleCommand - Unison.Codebase.Editor.Input - Unison.Codebase.Editor.Output - Unison.Codebase.Editor.Output.BranchDiff - Unison.Codebase.Editor.Propagate - Unison.Codebase.Editor.RemoteRepo - Unison.Codebase.Editor.SearchResult' - Unison.Codebase.Editor.SlurpResult - Unison.Codebase.Editor.SlurpComponent - Unison.Codebase.Editor.TodoOutput - Unison.Codebase.Editor.UriParser - Unison.Codebase.Editor.VersionParser - Unison.Codebase.FileCodebase - Unison.Codebase.FileCodebase.Common - Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex - Unison.Codebase.GitError - Unison.Codebase.Metadata - Unison.Codebase.NameEdit - Unison.Codebase.Path - Unison.Codebase.Patch - Unison.Codebase.Reflog - Unison.Codebase.Runtime - Unison.Codebase.SearchResult - Unison.Codebase.Serialization - Unison.Codebase.Serialization.PutT - Unison.Codebase.Serialization.V1 - Unison.Codebase.ShortBranchHash - Unison.Codebase.SyncMode - Unison.Codebase.TermEdit - Unison.Codebase.TranscriptParser - Unison.Codebase.TypeEdit - Unison.Codebase.Watch - Unison.Codebase.Execute - Unison.Codebase.MainTerm - Unison.CommandLine - Unison.CommandLine.DisplayValues - Unison.CommandLine.InputPattern - Unison.CommandLine.InputPatterns - Unison.CommandLine.Main - Unison.CommandLine.OutputMessages - Unison.DeclPrinter - Unison.FileParser - Unison.FileParsers - Unison.Lexer - Unison.NamePrinter - Unison.Parser - Unison.Parsers - Unison.Path - Unison.PrettyPrintEnv - Unison.PrettyTerminal - Unison.PrintError - Unison.Result - Unison.Runtime.ANF - Unison.Runtime.Builtin - Unison.Runtime.Debug - Unison.Runtime.Decompile - Unison.Runtime.Foreign - Unison.Runtime.Interface - Unison.Runtime.IR - Unison.Runtime.MCode - Unison.Runtime.Machine - Unison.Runtime.Pattern - Unison.Runtime.Rt1 - Unison.Runtime.Rt1IO - Unison.Runtime.IOSource - Unison.Runtime.Vector - Unison.Runtime.SparseVector - Unison.Runtime.Stack - Unison.TermParser - Unison.TermPrinter - Unison.TypeParser - Unison.TypePrinter - Unison.Typechecker - Unison.Typechecker.Components - Unison.Typechecker.Context - Unison.Typechecker.Extractor - Unison.Typechecker.TypeError - Unison.Typechecker.TypeLookup - Unison.Typechecker.TypeVar - Unison.UnisonFile - Unison.Util.AnnotatedText - Unison.Util.Bytes - Unison.Util.Cache - Unison.Util.ColorText - Unison.Util.EnumContainers - Unison.Util.Exception - Unison.Util.Free - Unison.Util.Find - Unison.Util.Less - Unison.Util.Logger - Unison.Util.Map - Unison.Util.Menu - Unison.Util.PinBoard - Unison.Util.Pretty - Unison.Util.Range - Unison.Util.Star3 - Unison.Util.SyntaxText - Unison.Util.Timing - Unison.Util.TQueue - Unison.Util.TransitiveClosure - Unison.Util.CycleTable - Unison.Util.CyclicEq - Unison.Util.CyclicOrd - - build-depends: - ansi-terminal, - async, - base, - base16 >= 0.2.1.0, - bifunctors, - bytes, - bytestring, - cereal, - containers, - comonad, - concurrent-supply, - configurator, - cryptonite, - directory, - guid, - data-memocombinators, - edit-distance, - errors, - exceptions, - extra, - filepath, - filepattern, - fingertree, - free, - fsnotify, - generic-monoid, - hashable, - hashtables, - haskeline, - io-streams, - lens, - ListLike, - megaparsec >= 5.0.0 && < 7.0.0, - memory, - mmorph, - monad-loops, - mtl, - murmur-hash, - mutable-containers, - network, - network-simple, - nonempty-containers, - process, - primitive, - random, - raw-strings-qq, - regex-base, - regex-tdfa, - safe, - shellmet, - split, - stm, - strings, - terminal-size, - text, - time, - transformers, - unison-core, - unliftio, - util, - vector, - unicode-show - - ghc-options: -Wall -O0 -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures - - if flag(optimized) - ghc-options: -funbox-strict-fields -O2 - - if flag(quiet) - ghc-options: -v0 - -executable unison - import: unison-common - main-is: Main.hs - hs-source-dirs: unison - ghc-options: -Wall -threaded -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -rtsopts -with-rtsopts=-I0 -optP-Wno-nonportable-include-path - other-modules: - System.Path - Version - build-depends: - base, - containers, - configurator, - directory, - errors, - filepath, - megaparsec, - safe, - shellmet, - template-haskell, - temporary, - text, - unison-core, - unison-parser-typechecker - if !os(windows) - build-depends: - unix - -executable prettyprintdemo - import: unison-common - main-is: Main.hs - hs-source-dirs: prettyprintdemo - ghc-options: -Wall -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures - build-depends: - base, - safe, - text, - unison-parser-typechecker - -executable tests - import: unison-common - main-is: Suite.hs - hs-source-dirs: tests - ghc-options: -W -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -threaded -rtsopts "-with-rtsopts=-N -T" -v0 - build-depends: - base, - easytest - other-modules: - Unison.Test.ABT - Unison.Test.ANF - Unison.Test.Cache - Unison.Test.Codebase - Unison.Test.Codebase.Causal - Unison.Test.Codebase.FileCodebase - Unison.Test.Codebase.Path - Unison.Test.ColorText - Unison.Test.Common - Unison.Test.DataDeclaration - Unison.Test.FileParser - Unison.Test.Git - Unison.Test.Lexer - Unison.Test.IO - Unison.Test.MCode - Unison.Test.Range - Unison.Test.Referent - Unison.Test.Term - Unison.Test.TermParser - Unison.Test.TermPrinter - Unison.Test.Type - Unison.Test.TypePrinter - Unison.Test.Typechecker - Unison.Test.Typechecker.Components - Unison.Test.Typechecker.Context - Unison.Test.Typechecker.TypeError - Unison.Test.UnisonSources - Unison.Test.UriParser - Unison.Test.Util.Bytes - Unison.Test.Util.PinBoard - Unison.Test.Util.Pretty - Unison.Test.Var - Unison.Test.VersionParser - Unison.Core.Test.Name - - build-depends: - async, - base, - bytestring, - containers, - directory, - easytest, - errors, - extra, - filepath, - filemanip, - here, - lens, - megaparsec, - mtl, - raw-strings-qq, - stm, - shellmet, - split, - temporary, - text, - transformers, - unison-core, - unison-parser-typechecker - -executable transcripts - import: unison-common - main-is: Transcripts.hs - ghc-options: -W -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -threaded -rtsopts -with-rtsopts=-N -v0 - hs-source-dirs: transcripts - other-modules: - build-depends: - base, - directory, - easytest, - filepath, - shellmet, - process, - text, - unison-core, - unison-parser-typechecker - -benchmark runtime - type: exitcode-stdio-1.0 - main-is: Main.hs - ghc-options: -O2 - hs-source-dirs: benchmarks/runtime - build-depends: - base, - criterion, - containers, - unison-core, - unison-parser-typechecker diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs deleted file mode 100644 index 375c88e819..0000000000 --- a/parser-typechecker/unison/Main.hs +++ /dev/null @@ -1,317 +0,0 @@ -{-# Language OverloadedStrings #-} -{-# Language PartialTypeSignatures #-} -{-# LANGUAGE CPP #-} -{-# OPTIONS_GHC -Wno-partial-type-signatures #-} - -module Main where - -import Unison.Prelude -import Control.Concurrent ( mkWeakThreadId, myThreadId ) -import Control.Error.Safe (rightMay) -import Control.Exception ( throwTo, AsyncException(UserInterrupt) ) -import Data.Configurator.Types ( Config ) -import System.Directory ( getCurrentDirectory, removeDirectoryRecursive ) -import System.Environment ( getArgs, getProgName ) -import System.Mem.Weak ( deRefWeak ) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Editor.VersionParser as VP -import Unison.Codebase.Execute ( execute ) -import qualified Unison.Codebase.FileCodebase as FileCodebase -import Unison.Codebase.FileCodebase.Common ( codebasePath ) -import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace) -import Unison.Codebase.Runtime ( Runtime ) -import Unison.CommandLine ( watchConfig ) -import qualified Unison.CommandLine.Main as CommandLine -import qualified Unison.Runtime.Rt1IO as Rt1 -import qualified Unison.Runtime.Interface as RTI -import Unison.Symbol ( Symbol ) -import qualified Unison.Codebase.Path as Path -import qualified Unison.Util.Cache as Cache -import qualified Version -import qualified Unison.Codebase.TranscriptParser as TR -import qualified System.Path as Path -import qualified System.FilePath as FP -import qualified System.IO.Temp as Temp -import qualified System.Exit as Exit -import System.IO.Error (catchIOError) -import qualified Unison.Codebase.Editor.Input as Input -import qualified Unison.Util.Pretty as P -import qualified Unison.PrettyTerminal as PT -import qualified Data.Text as Text -import qualified Data.Configurator as Config -import Text.Megaparsec (runParser) - -#if defined(mingw32_HOST_OS) -import qualified GHC.ConsoleHandler as WinSig -#else -import qualified System.Posix.Signals as Sig -#endif - -usage :: String -> P.Pretty P.ColorText -usage executableStr = P.callout "🌻" $ P.lines [ - P.bold "Usage instructions for the Unison Codebase Manager", - "You are running version: " <> P.string Version.gitDescribe, - "", - P.bold executable, - P.wrap "Starts Unison interactively, using the codebase in the home directory.", - "", - P.bold $ executable <> " -codebase path/to/codebase", - P.wrap "Starts Unison interactively, using the specified codebase. This flag can also be set for any of the below commands.", - "", - P.bold $ executable <> " run .mylib.mymain", - P.wrap "Executes the definition `.mylib.mymain` from the codebase, then exits.", - "", - P.bold $ executable <> " run.file foo.u mymain", - P.wrap "Executes the definition called `mymain` in `foo.u`, then exits.", - "", - P.bold $ executable <> " run.pipe mymain", - P.wrap "Executes the definition called `mymain` from a `.u` file read from the standard input, then exits.", - "", - P.bold $ executable <> " transcript mytranscript.md", - P.wrap $ "Executes the `mytranscript.md` transcript and creates" - <> "`mytranscript.output.md` if successful. Exits after completion, and deletes" - <> "the temporary directory created." - <> "Multiple transcript files may be provided; they are processed in sequence" - <> "starting from the same codebase.", - "", - P.bold $ executable <> " transcript -save-codebase mytranscript.md", - P.wrap $ "Executes the `mytranscript.md` transcript and creates" - <> "`mytranscript.output.md` if successful. Exits after completion, and saves" - <> "the resulting codebase to a new directory on disk." - <> "Multiple transcript files may be provided; they are processed in sequence" - <> "starting from the same codebase.", - "", - P.bold $ executable <> " transcript.fork mytranscript.md", - P.wrap $ "Executes the `mytranscript.md` transcript in a copy of the current codebase" - <> "and creates `mytranscript.output.md` if successful. Exits after completion." - <> "Multiple transcript files may be provided; they are processed in sequence" - <> "starting from the same codebase.", - "", - P.bold $ executable <> " transcript.fork -save-codebase mytranscript.md", - P.wrap $ "Executes the `mytranscript.md` transcript in a copy of the current codebase" - <> "and creates `mytranscript.output.md` if successful. Exits after completion," - <> "and saves the resulting codebase to a new directory on disk." - <> "Multiple transcript files may be provided; they are processed in sequence" - <> "starting from the same codebase.", - "", - P.bold $ executable <> " version", - "Prints version of Unison then quits.", - "", - P.bold $ executable <> " help", - "Prints this help."] - where executable = (P.text . Text.pack) executableStr - -installSignalHandlers :: IO () -installSignalHandlers = do - main_thread <- myThreadId - wtid <- mkWeakThreadId main_thread - - let interrupt = do - r <- deRefWeak wtid - case r of - Nothing -> return () - Just t -> throwTo t UserInterrupt - -#if defined(mingw32_HOST_OS) - let sig_handler WinSig.ControlC = interrupt - sig_handler WinSig.Break = interrupt - sig_handler _ = return () - _ <- WinSig.installHandler (WinSig.Catch sig_handler) -#else - _ <- Sig.installHandler Sig.sigQUIT (Sig.Catch interrupt) Nothing - _ <- Sig.installHandler Sig.sigINT (Sig.Catch interrupt) Nothing -#endif - - return () - -main :: IO () -main = do - args <- getArgs - progName <- getProgName - -- hSetBuffering stdout NoBuffering -- cool - - _ <- installSignalHandlers - -- We need to know whether the program was invoked with -codebase for - -- certain messages. Therefore we keep a Maybe FilePath - mcodepath - -- rather than just deciding on whether to use the supplied path or - -- the home directory here and throwing away that bit of information - let (mcodepath, restargs0) = case args of - "-codebase" : codepath : restargs -> (Just codepath, restargs) - _ -> (Nothing, args) - (mNewRun, restargs) = case restargs0 of - "--new-runtime" : rest -> (Just True, rest) - _ -> (Nothing, restargs0) - currentDir <- getCurrentDirectory - configFilePath <- getConfigFilePath mcodepath - config@(config_, _cancelConfig) <- - catchIOError (watchConfig configFilePath) $ \_ -> - Exit.die "Your .unisonConfig could not be loaded. Check that it's correct!" - branchCacheSize :: Word <- Config.lookupDefault 4096 config_ "NamespaceCacheSize" - branchCache <- Cache.semispaceCache branchCacheSize - case restargs of - [] -> do - theCodebase <- FileCodebase.getCodebaseOrExit branchCache mcodepath - launch currentDir mNewRun config theCodebase branchCache [] - [version] | isFlag "version" version -> - putStrLn $ progName ++ " version: " ++ Version.gitDescribe - [help] | isFlag "help" help -> PT.putPrettyLn (usage progName) - ["init"] -> FileCodebase.initCodebaseAndExit mcodepath - "run" : [mainName] -> do - theCodebase <- FileCodebase.getCodebaseOrExit branchCache mcodepath - runtime <- join . getStartRuntime mNewRun $ fst config - execute theCodebase runtime mainName - "run.file" : file : [mainName] | isDotU file -> do - e <- safeReadUtf8 file - case e of - Left _ -> PT.putPrettyLn $ P.callout "⚠️" "I couldn't find that file or it is for some reason unreadable." - Right contents -> do - theCodebase <- FileCodebase.getCodebaseOrExit branchCache mcodepath - let fileEvent = Input.UnisonFileChanged (Text.pack file) contents - launch currentDir mNewRun config theCodebase branchCache [Left fileEvent, Right $ Input.ExecuteI mainName, Right Input.QuitI] - "run.pipe" : [mainName] -> do - e <- safeReadUtf8StdIn - case e of - Left _ -> PT.putPrettyLn $ P.callout "⚠️" "I had trouble reading this input." - Right contents -> do - theCodebase <- FileCodebase.getCodebaseOrExit branchCache mcodepath - let fileEvent = Input.UnisonFileChanged (Text.pack "") contents - launch - currentDir mNewRun config theCodebase branchCache - [Left fileEvent, Right $ Input.ExecuteI mainName, Right Input.QuitI] - "transcript" : args' -> - case args' of - "-save-codebase" : transcripts -> runTranscripts mNewRun branchCache False True mcodepath transcripts - _ -> runTranscripts mNewRun branchCache False False mcodepath args' - "transcript.fork" : args' -> - case args' of - "-save-codebase" : transcripts -> runTranscripts mNewRun branchCache True True mcodepath transcripts - _ -> runTranscripts mNewRun branchCache True False mcodepath args' - _ -> do - PT.putPrettyLn (usage progName) - Exit.exitWith (Exit.ExitFailure 1) - -prepareTranscriptDir :: Branch.Cache IO -> Bool -> Maybe FilePath -> IO FilePath -prepareTranscriptDir branchCache inFork mcodepath = do - tmp <- Temp.getCanonicalTemporaryDirectory >>= (`Temp.createTempDirectory` "transcript") - unless inFork $ do - PT.putPrettyLn . P.wrap $ "Transcript will be run on a new, empty codebase." - _ <- FileCodebase.initCodebase branchCache tmp - pure() - - when inFork $ FileCodebase.getCodebaseOrExit branchCache mcodepath >> do - path <- FileCodebase.getCodebaseDir mcodepath - PT.putPrettyLn $ P.lines [ - P.wrap "Transcript will be run on a copy of the codebase at: ", "", - P.indentN 2 (P.string path) - ] - Path.copyDir (path FP. codebasePath) (tmp FP. codebasePath) - - pure tmp - -runTranscripts' - :: Maybe Bool - -> Branch.Cache IO - -> Maybe FilePath - -> FilePath - -> [String] - -> IO Bool -runTranscripts' mNewRun branchCache mcodepath transcriptDir args = do - currentDir <- getCurrentDirectory - theCodebase <- FileCodebase.getCodebaseOrExit branchCache $ Just transcriptDir - case args of - args@(_:_) -> do - for_ args $ \arg -> case arg of - md | isMarkdown md -> do - parsed <- TR.parseFile arg - case parsed of - Left err -> - PT.putPrettyLn $ P.callout "❓" ( - P.lines [ - P.indentN 2 "A parsing error occurred while reading a file:", "", - P.indentN 2 $ P.string err]) - Right stanzas -> do - configFilePath <- getConfigFilePath mcodepath - mdOut <- TR.run mNewRun transcriptDir configFilePath stanzas theCodebase branchCache - let out = currentDir FP. - FP.addExtension (FP.dropExtension arg ++ ".output") - (FP.takeExtension md) - writeUtf8 out mdOut - putStrLn $ "💾 Wrote " <> out - wat -> - PT.putPrettyLn $ P.callout "❓" ( - P.lines [ - P.indentN 2 "Unrecognized command, skipping:", "", - P.indentN 2 $ P.string wat]) - pure True - [] -> - pure False - -runTranscripts - :: Maybe Bool - -> Branch.Cache IO - -> Bool - -> Bool - -> Maybe FilePath - -> [String] - -> IO () -runTranscripts mNewRun branchCache inFork keepTemp mcodepath args = do - progName <- getProgName - transcriptDir <- prepareTranscriptDir branchCache inFork mcodepath - completed <- - runTranscripts' mNewRun branchCache (Just transcriptDir) transcriptDir args - when completed $ do - unless keepTemp $ removeDirectoryRecursive transcriptDir - when keepTemp $ PT.putPrettyLn $ - P.callout "🌸" ( - P.lines [ - "I've finished running the transcript(s) in this codebase:", "", - P.indentN 2 (P.string transcriptDir), "", - P.wrap $ "You can run" - <> P.backticked (P.string progName <> " -codebase " <> P.string transcriptDir) - <> "to do more work with it."]) - - unless completed $ do - unless keepTemp $ removeDirectoryRecursive transcriptDir - PT.putPrettyLn (usage progName) - Exit.exitWith (Exit.ExitFailure 1) - -initialPath :: Path.Absolute -initialPath = Path.absoluteEmpty - -getStartRuntime :: Maybe Bool -> Config -> IO (IO (Runtime Symbol)) -getStartRuntime newRun config = do - b <- maybe (Config.lookupDefault False config "new-runtime") pure newRun - pure $ if b then RTI.startRuntime else pure Rt1.runtime - -launch - :: FilePath - -> Maybe Bool - -> (Config, IO ()) - -> _ - -> Branch.Cache IO - -> [Either Input.Event Input.Input] - -> IO () -launch dir newRun config code branchCache inputs = do - startRuntime <- getStartRuntime newRun $ fst config - CommandLine.main dir defaultBaseLib initialPath config inputs startRuntime code branchCache Version.gitDescribe - -isMarkdown :: String -> Bool -isMarkdown md = case FP.takeExtension md of - ".md" -> True - ".markdown" -> True - _ -> False - -isDotU :: String -> Bool -isDotU file = FP.takeExtension file == ".u" - --- so we can do `ucm --help`, `ucm -help` or `ucm help` (I hate --- having to remember which one is supported) -isFlag :: String -> String -> Bool -isFlag f arg = arg == f || arg == "-" ++ f || arg == "--" ++ f - -getConfigFilePath :: Maybe FilePath -> IO FilePath -getConfigFilePath mcodepath = (FP. ".unisonConfig") <$> FileCodebase.getCodebaseDir mcodepath - -defaultBaseLib :: Maybe RemoteNamespace -defaultBaseLib = rightMay $ - runParser VP.defaultBaseLib "version" (Text.pack Version.gitDescribe) diff --git a/parser-typechecker/unison/System/Path.hs b/parser-typechecker/unison/System/Path.hs deleted file mode 100644 index df8ac1b1a4..0000000000 --- a/parser-typechecker/unison/System/Path.hs +++ /dev/null @@ -1,106 +0,0 @@ --- Copied from --- --- --- --- because: --- --- * base <4.7 upper bound would require patching, but lib hasn't been updated --- in 8 years --- * according to Arya, this code will not be necessary soon --- --- License file (MIT) was dropped in deps/fsutils - --- | A collection of file system utilities that appear to be missing from --- Directory, FilePath, Prelude, etc. Some of these may overlap with MissingH --- but the versions here will probably be more simplistic. Furthermore, this --- library is focused on this one thing and not a whole bunch of things. -module System.Path - ( mtreeList - , fileList - , walkDir - , copyDir - , replaceRoot - , removeRoot - , Directory - , dirPath - , subDirs - , files - , createDir - , filterUseless - ) where - -import Control.Monad (filterM, forM_) -import System.Directory -import System.FilePath ((), addTrailingPathSeparator) -import Data.List ((\\)) - --- | Remove useless paths from a list of paths. -filterUseless :: [FilePath] -> [FilePath] -filterUseless = (\\ [".", ".."]) - --- | Returns a list of nodes in a tree via a depth-first walk. -mtreeList :: Monad m => (a -> m [a]) -> a -> m [a] -mtreeList children root = do - xs <- children root - subChildren <- mapM (mtreeList children) xs - return $ root : concat subChildren - --- | Get a list of files in path, but not recursively. Removes '.' and '..'. -topFileList :: FilePath -> IO [FilePath] -topFileList path = - fmap (map (path ) . filterUseless) $ getDirectoryContents path - --- | Recursively list the contents of a directory. Depth-first. -fileList :: FilePath -> IO [FilePath] -fileList = mtreeList children - where children path = do - directory <- doesDirectoryExist path - if directory - then topFileList path - else return [] - --- | We can use this data type to represent the pieces of a directory. -data Directory = Directory - { -- | The path of the directory itself. - dirPath :: FilePath - -- | All subdirectories of this directory. - , subDirs :: [FilePath] - -- | All files contained in this directory. - , files :: [FilePath] - } - deriving (Show) - --- | Creates a Directory instance from a FilePath. -createDir :: FilePath -> IO Directory -createDir path = do - contents <- topFileList path - subdirs <- filterM doesDirectoryExist contents - files <- filterM doesFileExist contents - return (Directory path subdirs files) - --- | Walk a directory depth-first. Similar to Python's os.walk and fs.core/walk --- from the fs Clojure library. -walkDir :: FilePath -> IO [Directory] -walkDir root = createDir root >>= mtreeList children - where children path = do - let dirs = subDirs path - mapM createDir dirs - --- | Given a root (prefix), remove it from a path. This is useful --- for getting the filename and subdirs of a path inside of a root. -removeRoot :: FilePath -> FilePath -> FilePath -removeRoot prefix = drop . length $ addTrailingPathSeparator prefix - --- | Given a root path, a new root path, and a path to be changed, --- removes the old root from the path and replaces it with to. -replaceRoot :: FilePath -> FilePath -> FilePath -> FilePath -replaceRoot root to path = to removeRoot root path - --- | Copy a directory recursively. Moves every file, creates every directory. -copyDir :: FilePath -> FilePath -> IO () -copyDir from to = do - createDirectoryIfMissing True to - walked <- walkDir from - forM_ walked $ \(Directory _ dirs files) -> do - mapM_ (createDirectoryIfMissing True . replaceRoot from to) dirs - forM_ files $ \path -> copyFile path (replaceRoot from to path) diff --git a/parser-typechecker/unison/Version.hs b/parser-typechecker/unison/Version.hs deleted file mode 100644 index da45288409..0000000000 --- a/parser-typechecker/unison/Version.hs +++ /dev/null @@ -1,16 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE TemplateHaskell #-} - -module Version where - -import Language.Haskell.TH (runIO) -import Language.Haskell.TH.Syntax (Exp(LitE), Lit(StringL)) -import Shellmet -import Data.Text - -gitDescribe :: String -gitDescribe = $( fmap (LitE . StringL . unpack) . runIO $ - "git" $| ["describe", "--tags", "--always", "--dirty='"] - $? pure "unknown" - ) - diff --git a/stack.yaml b/stack.yaml index 552ac67081..512f61f79c 100644 --- a/stack.yaml +++ b/stack.yaml @@ -5,9 +5,19 @@ flags: {} allow-different-user: true packages: -- yaks/easytest -- parser-typechecker -- unison-core +# - yaks/easytest +# - parser-typechecker +# - unison-core +- codebase1/codebase +- codebase2/codebase +- codebase2/codebase-sqlite +- codebase2/core +- codebase2/language +- codebase2/runtime +- codebase2/editor +- codebase2/syntax +- codebase2/util +- codebase2/util-serialization #compiler-check: match-exact resolver: lts-15.15 @@ -25,7 +35,9 @@ extra-deps: - prelude-extras-0.4.0.3@sha256:1c10b0123ea13a6423d74a8fcbaeb2d5249b472588abde418a36b47b7c4f48c8,1163 - sandi-0.5@sha256:b278d072ca717706ea38f9bd646e023f7f2576a778fb43565b434f93638849aa,3010 - strings-1.1@sha256:0285dec4c8ab262359342b3e5ef1eb567074669461b9b38404f1cb870c881c5c,1617 +- sqlite-simple-0.4.18.0@sha256:3ceea56375c0a3590c814e411a4eb86943f8d31b93b110ca159c90689b6b39e5,3002 +- direct-sqlite-2.3.26@sha256:04e835402f1508abca383182023e4e2b9b86297b8533afbd4e57d1a5652e0c23,3718 ghc-options: # All packages - "$locals": -Werror -Wno-type-defaults #-freverse-errors + "$locals": -Wall -Wno-name-shadowing -Werror -Wno-type-defaults #-freverse-errors diff --git a/unison-core/LICENSE b/unison-core/LICENSE deleted file mode 100644 index cca9c4376c..0000000000 --- a/unison-core/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2013, Paul Chiusano and contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/unison-core/src/Unison/ABT.hs b/unison-core/src/Unison/ABT.hs deleted file mode 100644 index 4ba958704e..0000000000 --- a/unison-core/src/Unison/ABT.hs +++ /dev/null @@ -1,715 +0,0 @@ --- Based on: http://semantic-domain.blogspot.com/2015/03/abstract-binding-trees.html -{-# LANGUAGE DeriveFoldable #-} -{-# LANGUAGE DeriveTraversable #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE Rank2Types #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.ABT where - -import Unison.Prelude - -import Control.Lens (Lens', use, (.=)) -import Control.Monad.State (MonadState,evalState) -import Data.Functor.Identity (runIdentity) -import Data.List hiding (cycle) -import Data.Vector ((!)) -import Prelude hiding (abs,cycle) -import Prelude.Extras (Eq1(..), Show1(..), Ord1(..)) -import Unison.Hashable (Accumulate,Hashable1,hash1) -import qualified Data.Foldable as Foldable -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Data.Vector as Vector -import qualified Unison.Hashable as Hashable -import qualified Unison.Util.Components as Components - -data ABT f v r - = Var v - | Cycle r - | Abs v r - | Tm (f r) deriving (Functor, Foldable, Traversable) - --- | At each level in the tree, we store the set of free variables and --- a value of type `a`. Variables are of type `v`. -data Term f v a = Term { freeVars :: Set v, annotation :: a, out :: ABT f v (Term f v a) } - --- | A class for variables. --- --- * `Set.notMember (freshIn vs v) vs`: --- `freshIn` returns a variable not used in the `Set` -class Ord v => Var v where - freshIn :: Set v -> v -> v - -data V v = Free v | Bound v deriving (Eq,Ord,Show,Functor) - -unvar :: V v -> v -unvar (Free v) = v -unvar (Bound v) = v - -instance Var v => Var (V v) where - freshIn s v = freshIn (Set.map unvar s) <$> v - -newtype Path s t a b m = Path { focus :: s -> Maybe (a, b -> Maybe t, m) } - -here :: Monoid m => Path s t s t m -here = Path $ \s -> Just (s, Just, mempty) - -instance Semigroup (Path s t a b m) where - (<>) = mappend - -instance Monoid (Path s t a b m) where - mempty = Path (const Nothing) - mappend (Path p1) (Path p2) = Path p3 where - p3 s = p1 s <|> p2 s - -type Path' f g m = forall a v . Var v => Path (Term f v a) (Term f (V v) a) (Term g v a) (Term g (V v) a) m - -compose :: Monoid m => Path s t a b m -> Path a b a' b' m -> Path s t a' b' m -compose (Path p1) (Path p2) = Path p3 where - p3 s = do - (get1,set1,m1) <- p1 s - (get2,set2,m2) <- p2 get1 - pure (get2, set2 >=> set1, m1 `mappend` m2) - -at :: Path s t a b m -> s -> Maybe a -at p s = (\(a,_,_) -> a) <$> focus p s - -modify' :: Path s t a b m -> (m -> a -> b) -> s -> Maybe t -modify' p f s = focus p s >>= \(get,set,m) -> set (f m get) - -wrap :: (Functor f, Foldable f, Var v) => v -> Term f (V v) a -> (V v, Term f (V v) a) -wrap v t = - if Set.member (Free v) (freeVars t) - then let v' = fresh t (Bound v) in (v', rename (Bound v) v' t) - else (Bound v, t) - -wrap' :: (Functor f, Foldable f, Var v) - => v -> Term f (V v) a -> (V v -> Term f (V v) a -> c) -> c -wrap' v t f = uncurry f (wrap v t) - --- | Return the list of all variables bound by this ABT -bound' :: Foldable f => Term f v a -> [v] -bound' t = case out t of - Abs v t -> v : bound' t - Cycle t -> bound' t - Tm f -> Foldable.toList f >>= bound' - _ -> [] - -annotateBound' :: (Ord v, Functor f, Foldable f) => Term f v a0 -> Term f v [v] -annotateBound' t = snd <$> annotateBound'' t - --- Annotate the tree with the set of bound variables at each node. -annotateBound :: (Ord v, Foldable f, Functor f) => Term f v a -> Term f v (a, Set v) -annotateBound = go Set.empty where - go bound t = let a = (annotation t, bound) in case out t of - Var v -> annotatedVar a v - Cycle body -> cycle' a (go bound body) - Abs x body -> abs' a x (go (Set.insert x bound) body) - Tm body -> tm' a (go bound <$> body) - -annotateBound'' :: (Ord v, Functor f, Foldable f) => Term f v a -> Term f v (a, [v]) -annotateBound'' = go [] where - go env t = let a = (annotation t, env) in case out t of - Abs v body -> abs' a v (go (v : env) body) - Cycle body -> cycle' a (go env body) - Tm f -> tm' a (go env <$> f) - Var v -> annotatedVar a v - --- | Return the set of all variables bound by this ABT -bound :: (Ord v, Foldable f) => Term f v a -> Set v -bound t = Set.fromList (bound' t) - --- | `True` if the term has no free variables, `False` otherwise -isClosed :: Term f v a -> Bool -isClosed t = Set.null (freeVars t) - --- | `True` if `v` is a member of the set of free variables of `t` -isFreeIn :: Ord v => v -> Term f v a -> Bool -isFreeIn v t = Set.member v (freeVars t) - --- | Replace the annotation with the given argument. -annotate :: a -> Term f v a -> Term f v a -annotate a (Term fvs _ out) = Term fvs a out - -vmap :: (Functor f, Foldable f, Ord v2) => (v -> v2) -> Term f v a -> Term f v2 a -vmap f (Term _ a out) = case out of - Var v -> annotatedVar a (f v) - Tm fa -> tm' a (fmap (vmap f) fa) - Cycle r -> cycle' a (vmap f r) - Abs v body -> abs' a (f v) (vmap f body) - -amap :: (Functor f, Foldable f, Ord v) => (a -> a2) -> Term f v a -> Term f v a2 -amap = amap' . const - -amap' :: (Functor f, Foldable f, Ord v) => (Term f v a -> a -> a2) -> Term f v a -> Term f v a2 -amap' f t@(Term _ a out) = case out of - Var v -> annotatedVar (f t a) v - Tm fa -> tm' (f t a) (fmap (amap' f) fa) - Cycle r -> cycle' (f t a) (amap' f r) - Abs v body -> abs' (f t a) v (amap' f body) - --- | Modifies the annotations in this tree -instance Functor f => Functor (Term f v) where - fmap f (Term fvs a sub) = Term fvs (f a) (fmap (fmap f) sub) - -extraMap :: Functor g => (forall k . f k -> g k) -> Term f v a -> Term g v a -extraMap p (Term fvs a sub) = Term fvs a (go p sub) where - go :: Functor g => (forall k . f k -> g k) -> ABT f v (Term f v a) -> ABT g v (Term g v a) - go p = \case - Var v -> Var v - Cycle r -> Cycle (extraMap p r) - Abs v r -> Abs v (extraMap p r) - Tm x -> Tm (fmap (extraMap p) (p x)) - -pattern Var' v <- Term _ _ (Var v) -pattern Cycle' vs t <- Term _ _ (Cycle (AbsN' vs t)) --- pattern Abs' v body <- Term _ _ (Abs v body) -pattern Abs' subst <- (unabs1 -> Just subst) -pattern AbsN' vs body <- (unabs -> (vs, body)) -pattern Tm' f <- Term _ _ (Tm f) -pattern CycleA' a avs t <- Term _ a (Cycle (AbsNA' avs t)) -pattern AbsNA' avs body <- (unabsA -> (avs, body)) -pattern Abs1NA' avs body <- (unabs1A -> Just (avs, body)) - -unabsA :: Term f v a -> ([(a,v)], Term f v a) -unabsA (Term _ a (Abs hd body)) = - let (tl, body') = unabsA body in ((a,hd) : tl, body') -unabsA t = ([], t) - -unabs1A :: Term f v a -> Maybe ([(a,v)], Term f v a) -unabs1A t = case unabsA t of - ([], _) -> Nothing - x -> Just x - -var :: v -> Term f v () -var = annotatedVar () - -annotatedVar :: a -> v -> Term f v a -annotatedVar a v = Term (Set.singleton v) a (Var v) - -abs :: Ord v => v -> Term f v () -> Term f v () -abs = abs' () - -abs' :: Ord v => a -> v -> Term f v a -> Term f v a -abs' a v body = Term (Set.delete v (freeVars body)) a (Abs v body) - -absr :: (Functor f, Foldable f, Var v) => v -> Term f (V v) () -> Term f (V v) () -absr = absr' () - --- | Rebuild an `abs`, renaming `v` to avoid capturing any `Free v` in `body`. -absr' :: (Functor f, Foldable f, Var v) => a -> v -> Term f (V v) a -> Term f (V v) a -absr' a v body = wrap' v body $ \v body -> abs' a v body - -absChain :: Ord v => [v] -> Term f v () -> Term f v () -absChain vs t = foldr abs t vs - -absCycle :: Ord v => [v] -> Term f v () -> Term f v () -absCycle vs t = cycle $ absChain vs t - -absChain' :: Ord v => [(a, v)] -> Term f v a -> Term f v a -absChain' vs t = foldr (\(a,v) t -> abs' a v t) t vs - -tm :: (Foldable f, Ord v) => f (Term f v ()) -> Term f v () -tm = tm' () - -tm' :: (Foldable f, Ord v) => a -> f (Term f v a) -> Term f v a -tm' a t = - Term (Set.unions (fmap freeVars (Foldable.toList t))) a (Tm t) - -cycle :: Term f v () -> Term f v () -cycle = cycle' () - -cycle' :: a -> Term f v a -> Term f v a -cycle' a t = Term (freeVars t) a (Cycle t) - -cycler' :: (Functor f, Foldable f, Var v) => a -> [v] -> Term f (V v) a -> Term f (V v) a -cycler' a vs t = cycle' a $ foldr (absr' a) t vs - -cycler :: (Functor f, Foldable f, Var v) => [v] -> Term f (V v) () -> Term f (V v) () -cycler = cycler' () - -into :: (Foldable f, Ord v) => ABT f v (Term f v ()) -> Term f v () -into = into' () - -into' :: (Foldable f, Ord v) => a -> ABT f v (Term f v a) -> Term f v a -into' a abt = case abt of - Var x -> annotatedVar a x - Cycle t -> cycle' a t - Abs v r -> abs' a v r - Tm t -> tm' a t - --- | renames `old` to `new` in the given term, ignoring subtrees that bind `old` -rename :: (Foldable f, Functor f, Var v) => v -> v -> Term f v a -> Term f v a -rename old new t0@(Term fvs ann t) = - if Set.notMember old fvs then t0 - else case t of - Var v -> if v == old then annotatedVar ann new else t0 - Cycle body -> cycle' ann (rename old new body) - Abs v body -> - -- v shadows old, so skip this subtree - if v == old then abs' ann v body - - -- the rename would capture new, freshen this Abs - -- to make that no longer true, then proceed with - -- renaming `old` to `new` - else if v == new then - let v' = freshIn (Set.fromList [new,old] <> freeVars body) v - in abs' ann v' (rename old new (rename v v' body)) - - -- nothing special, just rename inside body of Abs - else abs' ann v (rename old new body) - Tm v -> tm' ann (fmap (rename old new) v) - -changeVars :: (Foldable f, Functor f, Var v) => Map v v -> Term f v a -> Term f v a -changeVars m t = case out t of - Abs v body -> case Map.lookup v m of - Nothing -> abs' (annotation t) v (changeVars m body) - Just v' -> abs' (annotation t) v' (changeVars m body) - Cycle body -> cycle' (annotation t) (changeVars m body) - Var v -> case Map.lookup v m of - Nothing -> t - Just v -> annotatedVar (annotation t) v - Tm v -> tm' (annotation t) (changeVars m <$> v) - --- | Produce a variable which is free in both terms -freshInBoth :: Var v => Term f v a -> Term f v a -> v -> v -freshInBoth t1 t2 = freshIn $ Set.union (freeVars t1) (freeVars t2) - -fresh :: Var v => Term f v a -> v -> v -fresh t = freshIn (freeVars t) - -freshEverywhere :: (Foldable f, Var v) => Term f v a -> v -> v -freshEverywhere t = freshIn . Set.fromList $ allVars t - -allVars :: Foldable f => Term f v a -> [v] -allVars t = case out t of - Var v -> [v] - Cycle body -> allVars body - Abs v body -> v : allVars body - Tm v -> Foldable.toList v >>= allVars - -freshes :: Var v => Term f v a -> [v] -> [v] -freshes = freshes' . freeVars - -freshes' :: Var v => Set v -> [v] -> [v] -freshes' used vs = evalState (traverse freshenS vs) used - --- | Freshens the given variable wrt. the set of used variables --- tracked by state. Adds the result to the set of used variables. -freshenS :: (Var v, MonadState (Set v) m) => v -> m v -freshenS = freshenS' id - --- | A more general version of `freshenS` that uses a lens --- to focus on used variables inside state. -freshenS' :: (Var v, MonadState s m) => Lens' s (Set v) -> v -> m v -freshenS' uvLens v = do - usedVars <- use uvLens - let v' = freshIn usedVars v - uvLens .= Set.insert v' usedVars - pure v' - --- | `subst v e body` substitutes `e` for `v` in `body`, avoiding capture by --- renaming abstractions in `body` -subst - :: (Foldable f, Functor f, Var v) - => v - -> Term f v a - -> Term f v a - -> Term f v a -subst v r = subst' (const r) v (freeVars r) - --- Slightly generalized version of `subst`, the replacement action is handled --- by the function `replace`, which is given the annotation `a` at the point --- of replacement. `r` should be the set of free variables contained in the --- term returned by `replace`. See `substInheritAnnotation` for an example usage. -subst' :: (Foldable f, Functor f, Var v) => (a -> Term f v a) -> v -> Set v -> Term f v a -> Term f v a -subst' replace v r t2@(Term fvs ann body) - | Set.notMember v fvs = t2 -- subtrees not containing the var can be skipped - | otherwise = case body of - Var v' | v == v' -> replace ann -- var match; perform replacement - | otherwise -> t2 -- var did not match one being substituted; ignore - Cycle body -> cycle' ann (subst' replace v r body) - Abs x _ | x == v -> t2 -- x shadows v; ignore subtree - Abs x e -> abs' ann x' e' - where x' = freshIn (fvs `Set.union` r) x - -- rename x to something that cannot be captured by `r` - e' = if x /= x' then subst' replace v r (rename x x' e) - else subst' replace v r e - Tm body -> tm' ann (fmap (subst' replace v r) body) - --- Like `subst`, but the annotation of the replacement is inherited from --- the previous annotation at each replacement point. -substInheritAnnotation :: (Foldable f, Functor f, Var v) - => v -> Term f v b -> Term f v a -> Term f v a -substInheritAnnotation v r = - subst' (\ann -> const ann <$> r) v (freeVars r) - -substsInheritAnnotation - :: (Foldable f, Functor f, Var v) - => [(v, Term f v b)] - -> Term f v a - -> Term f v a -substsInheritAnnotation replacements body = - foldr (uncurry substInheritAnnotation) body (reverse replacements) - --- | `substs [(t1,v1), (t2,v2), ...] body` performs multiple simultaneous --- substitutions, avoiding capture -substs - :: (Foldable f, Functor f, Var v) - => [(v, Term f v a)] - -> Term f v a - -> Term f v a -substs replacements body = foldr (uncurry subst) body (reverse replacements) - --- Count the number times the given variable appears free in the term -occurrences :: (Foldable f, Var v) => v -> Term f v a -> Int -occurrences v t | not (v `isFreeIn` t) = 0 -occurrences v t = case out t of - Var v2 -> if v == v2 then 1 else 0 - Cycle t -> occurrences v t - Abs v2 t -> if v == v2 then 0 else occurrences v t - Tm t -> foldl' (\s t -> s + occurrences v t) 0 $ Foldable.toList t - -rebuildUp :: (Ord v, Foldable f, Functor f) - => (f (Term f v a) -> f (Term f v a)) - -> Term f v a - -> Term f v a -rebuildUp f (Term _ ann body) = case body of - Var v -> annotatedVar ann v - Cycle body -> cycle' ann (rebuildUp f body) - Abs x e -> abs' ann x (rebuildUp f e) - Tm body -> tm' ann (f $ fmap (rebuildUp f) body) - -rebuildUp' :: (Ord v, Foldable f, Functor f) - => (Term f v a -> Term f v a) - -> Term f v a - -> Term f v a -rebuildUp' f (Term _ ann body) = case body of - Var v -> f (annotatedVar ann v) - Cycle body -> f $ cycle' ann (rebuildUp' f body) - Abs x e -> f $ abs' ann x (rebuildUp' f e) - Tm body -> f $ tm' ann (fmap (rebuildUp' f) body) - -freeVarOccurrences :: (Traversable f, Ord v) => Set v -> Term f v a -> [(v, a)] -freeVarOccurrences except t = - [ (v, a) | (v,a) <- go $ annotateBound t, not (Set.member v except) ] - where - go e = case out e of - Var v -> if Set.member v (snd $ annotation e) - then [] - else [(v, fst $ annotation e)] - Cycle body -> go body - Abs _ body -> go body - Tm body -> foldMap go body - -foreachSubterm - :: (Traversable f, Applicative g, Ord v) - => (Term f v a -> g b) - -> Term f v a - -> g [b] -foreachSubterm f e = case out e of - Var _ -> pure <$> f e - Cycle body -> (:) <$> f e <*> foreachSubterm f body - Abs _ body -> (:) <$> f e <*> foreachSubterm f body - Tm body -> - (:) - <$> f e - <*> (join . Foldable.toList <$> traverse (foreachSubterm f) body) - -subterms :: (Ord v, Traversable f) => Term f v a -> [Term f v a] -subterms t = runIdentity $ foreachSubterm pure t - --- | `visit f t` applies an effectful function to each subtree of --- `t` and sequences the results. When `f` returns `Nothing`, `visit` --- descends into the children of the current subtree. When `f` returns --- `Just t2`, `visit` replaces the current subtree with `t2`. Thus: --- `visit (const Nothing) t == pure t` and --- `visit (const (Just (pure t2))) t == pure t2` -visit - :: (Traversable f, Applicative g, Ord v) - => (Term f v a -> Maybe (g (Term f v a))) - -> Term f v a - -> g (Term f v a) -visit f t = flip fromMaybe (f t) $ case out t of - Var _ -> pure t - Cycle body -> cycle' (annotation t) <$> visit f body - Abs x e -> abs' (annotation t) x <$> visit f e - Tm body -> tm' (annotation t) <$> traverse (visit f) body - --- | Apply an effectful function to an ABT tree top down, sequencing the results. -visit' :: (Traversable f, Applicative g, Monad g, Ord v) - => (f (Term f v a) -> g (f (Term f v a))) - -> Term f v a - -> g (Term f v a) -visit' f t = case out t of - Var _ -> pure t - Cycle body -> cycle' (annotation t) <$> visit' f body - Abs x e -> abs' (annotation t) x <$> visit' f e - Tm body -> f body >>= (fmap (tm' (annotation t)) . traverse (visit' f)) - --- | `visit` specialized to the `Identity` effect. -visitPure :: (Traversable f, Ord v) - => (Term f v a -> Maybe (Term f v a)) -> Term f v a -> Term f v a -visitPure f = runIdentity . visit (fmap pure . f) - -rewriteDown :: (Traversable f, Ord v) - => (Term f v a -> Term f v a) - -> Term f v a - -> Term f v a -rewriteDown f t = let t' = f t in case out t' of - Var _ -> t' - Cycle body -> cycle' (annotation t) (rewriteDown f body) - Abs x e -> abs' (annotation t) x (rewriteDown f e) - Tm body -> tm' (annotation t) (rewriteDown f `fmap` body) - -data Subst f v a = - Subst { freshen :: forall m v' . Monad m => (v -> m v') -> m v' - , bind :: Term f v a -> Term f v a - , bindInheritAnnotation :: forall b . Term f v b -> Term f v a - , variable :: v } - -unabs1 :: (Foldable f, Functor f, Var v) => Term f v a -> Maybe (Subst f v a) -unabs1 (Term _ _ (Abs v body)) = Just (Subst freshen bind bindInheritAnnotation v) where - freshen f = f v - bind x = subst v x body - bindInheritAnnotation x = substInheritAnnotation v x body -unabs1 _ = Nothing - -unabs :: Term f v a -> ([v], Term f v a) -unabs (Term _ _ (Abs hd body)) = - let (tl, body') = unabs body in (hd : tl, body') -unabs t = ([], t) - -reabs :: Ord v => [v] -> Term f v () -> Term f v () -reabs vs t = foldr abs t vs - -transform :: (Ord v, Foldable g, Functor f) - => (forall a. f a -> g a) -> Term f v a -> Term g v a -transform f tm = case out tm of - Var v -> annotatedVar (annotation tm) v - Abs v body -> abs' (annotation tm) v (transform f body) - Tm subterms -> - let subterms' = fmap (transform f) subterms - in tm' (annotation tm) (f subterms') - Cycle body -> cycle' (annotation tm) (transform f body) - --- Rebuild the tree annotations upward, starting from the leaves, --- using the Monoid to choose the annotation at intermediate nodes -reannotateUp :: (Ord v, Foldable f, Functor f, Monoid b) - => (Term f v a -> b) - -> Term f v a - -> Term f v (a, b) -reannotateUp g t = case out t of - Var v -> annotatedVar (annotation t, g t) v - Cycle body -> - let body' = reannotateUp g body - in cycle' (annotation t, snd (annotation body')) body' - Abs v body -> - let body' = reannotateUp g body - in abs' (annotation t, snd (annotation body')) v body' - Tm body -> - let - body' = reannotateUp g <$> body - ann = g t <> foldMap (snd . annotation) body' - in tm' (annotation t, ann) body' - --- Find all subterms that match a predicate. Prune the search for speed. --- (Some patterns of pruning can cut the complexity of the search.) -data FindAction x = Found x | Prune | Continue deriving Show -find :: (Ord v, Foldable f, Functor f) - => (Term f v a -> FindAction x) - -> Term f v a - -> [x] -find p t = case p t of - Found x -> x : go - Prune -> [] - Continue -> go - where go = case out t of - Var _ -> [] - Cycle body -> Unison.ABT.find p body - Abs _ body -> Unison.ABT.find p body - Tm body -> Foldable.concat (Unison.ABT.find p <$> body) - -find' :: (Ord v, Foldable f, Functor f) - => (Term f v a -> Bool) - -> Term f v a - -> [Term f v a] -find' p = Unison.ABT.find (\t -> if p t then Found t else Continue) - -instance (Foldable f, Functor f, Eq1 f, Var v) => Eq (Term f v a) where - -- alpha equivalence, works by renaming any aligned Abs ctors to use a common fresh variable - t1 == t2 = go (out t1) (out t2) where - go (Var v) (Var v2) | v == v2 = True - go (Cycle t1) (Cycle t2) = t1 == t2 - go (Abs v1 body1) (Abs v2 body2) = - if v1 == v2 then body1 == body2 - else let v3 = freshInBoth body1 body2 v1 - in rename v1 v3 body1 == rename v2 v3 body2 - go (Tm f1) (Tm f2) = f1 ==# f2 - go _ _ = False - -instance (Foldable f, Functor f, Ord1 f, Var v) => Ord (Term f v a) where - -- alpha equivalence, works by renaming any aligned Abs ctors to use a common fresh variable - t1 `compare` t2 = go (out t1) (out t2) where - go (Var v) (Var v2) = v `compare` v2 - go (Cycle t1) (Cycle t2) = t1 `compare` t2 - go (Abs v1 body1) (Abs v2 body2) = - if v1 == v2 then body1 `compare` body2 - else let v3 = freshInBoth body1 body2 v1 - in rename v1 v3 body1 `compare` rename v2 v3 body2 - go (Tm f1) (Tm f2) = compare1 f1 f2 - go t1 t2 = tag t1 `compare` tag t2 - tag (Var _) = 0 :: Word - tag (Tm _) = 1 - tag (Abs _ _) = 2 - tag (Cycle _) = 3 - -components :: Var v => [(v, Term f v a)] -> [[(v, Term f v a)]] -components = Components.components freeVars - --- Converts to strongly connected components while preserving the --- order of definitions. Satisfies `join (orderedComponents bs) == bs`. -orderedComponents' :: Var v => [(v, Term f v a)] -> [[(v, Term f v a)]] -orderedComponents' tms = go [] Set.empty tms - where - go [] _ [] = [] - go [] deps (hd:rem) = go [hd] (deps <> freeVars (snd hd)) rem - go cur deps rem = case findIndex isDep rem of - Nothing -> reverse cur : let (hd,tl) = splitAt 1 rem - in go hd (depsFor hd) tl - Just i -> go (reverse newMembers ++ cur) deps' (drop (i+1) rem) - where deps' = deps <> depsFor newMembers - newMembers = take (i+1) rem - where - depsFor = foldMap (freeVars . snd) - isDep (v, _) = Set.member v deps - --- Like `orderedComponents'`, but further break up cycles and move --- cyclic subcycles before other components in the same cycle. --- Tweak suggested by @aryairani. --- --- Example: given `[[x],[ping,r,s,pong]]`, where `ping` and `pong` --- are mutually recursive but `r` and `s` are uninvolved, this produces: --- `[[x], [ping,pong], [r], [s]]`. -orderedComponents :: Var v => [(v, Term f v a)] -> [[(v, Term f v a)]] -orderedComponents bs0 = tweak =<< orderedComponents' bs0 where - tweak :: Var v => [(v,Term f v a)] -> [[(v,Term f v a)]] - tweak bs@(_:_:_) = case takeWhile isCyclic (components bs) of - [] -> [bs] - cycles -> cycles <> orderedComponents rest - where - rest = [ (v,b) | (v,b) <- bs, Set.notMember v cycleVars ] - cycleVars = Set.fromList (fst <$> join cycles) - tweak bs = [bs] -- any cycle with < 2 bindings is left alone - isCyclic [(v,b)] = Set.member v (freeVars b) - isCyclic bs = length bs > 1 - --- Hash a strongly connected component and sort its definitions into a canonical order. -hashComponent :: - (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Var v, Ord h, Accumulate h) - => Map.Map v (Term f v a) -> (h, [(v, Term f v a)]) -hashComponent byName = let - ts = Map.toList byName - embeds = [ (v, void (transform Embed t)) | (v,t) <- ts ] - vs = fst <$> ts - tms = [ (v, absCycle vs (tm $ Component (snd <$> embeds) (var v))) | v <- vs ] - hashed = [ ((v,t), hash t) | (v,t) <- tms ] - sortedHashed = sortOn snd hashed - overallHash = Hashable.accumulate (Hashable.Hashed . snd <$> sortedHashed) - in (overallHash, [ (v, t) | ((v, _),_) <- sortedHashed, Just t <- [Map.lookup v byName] ]) - --- Group the definitions into strongly connected components and hash --- each component. Substitute the hash of each component into subsequent --- components (using the `termFromHash` function). Requires that the --- overall component has no free variables. -hashComponents - :: (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Var v, Ord h, Accumulate h) - => (h -> Word64 -> Word64 -> Term f v ()) - -> Map.Map v (Term f v a) - -> [(h, [(v, Term f v a)])] -hashComponents termFromHash termsByName = let - bound = Set.fromList (Map.keys termsByName) - escapedVars = Set.unions (freeVars <$> Map.elems termsByName) `Set.difference` bound - sccs = components (Map.toList termsByName) - go _ [] = [] - go prevHashes (component : rest) = let - sub = substsInheritAnnotation (Map.toList prevHashes) - (h, sortedComponent) = hashComponent $ Map.fromList [ (v, sub t) | (v, t) <- component ] - size = fromIntegral (length sortedComponent) - curHashes = Map.fromList [ (v, termFromHash h i size) | ((v, _),i) <- sortedComponent `zip` [0..]] - newHashes = prevHashes `Map.union` curHashes - newHashesL = Map.toList newHashes - sortedComponent' = [ (v, substsInheritAnnotation newHashesL t) | (v, t) <- sortedComponent ] - in (h, sortedComponent') : go newHashes rest - in if Set.null escapedVars then go Map.empty sccs - else error $ "can't hashComponents if bindings have free variables:\n " - ++ show (map show (Set.toList escapedVars)) - ++ "\n " ++ show (map show (Map.keys termsByName)) - --- Implementation detail of hashComponent -data Component f a = Component [a] a | Embed (f a) deriving (Functor, Traversable, Foldable) - -instance (Hashable1 f, Functor f) => Hashable1 (Component f) where - hash1 hashCycle hash c = case c of - Component as a -> let - (hs, hash) = hashCycle as - toks = Hashable.Hashed <$> hs - in Hashable.accumulate $ (Hashable.Tag 1 : toks) ++ [Hashable.Hashed (hash a)] - Embed fa -> Hashable.hash1 hashCycle hash fa - --- | We ignore annotations in the `Term`, as these should never affect the --- meaning of the term. -hash :: forall f v a h . (Functor f, Hashable1 f, Eq v, Show v, Var v, Ord h, Accumulate h) - => Term f v a -> h -hash = hash' [] where - hash' :: [Either [v] v] -> Term f v a -> h - hash' env (Term _ _ t) = case t of - Var v -> maybe die hashInt ind - where lookup (Left cycle) = v `elem` cycle - lookup (Right v') = v == v' - ind = findIndex lookup env - hashInt :: Int -> h - hashInt i = Hashable.accumulate [Hashable.Nat $ fromIntegral i] - die = error $ "unknown var in environment: " ++ show v - ++ " environment = " ++ show env - Cycle (AbsN' vs t) -> hash' (Left vs : env) t - Cycle t -> hash' env t - Abs v t -> hash' (Right v : env) t - Tm t -> Hashable.hash1 (hashCycle env) (hash' env) t - - hashCycle :: [Either [v] v] -> [Term f v a] -> ([h], Term f v a -> h) - hashCycle env@(Left cycle : envTl) ts | length cycle == length ts = - let - permute p xs = case Vector.fromList xs of xs -> map (xs !) p - hashed = map (\(i,t) -> ((i,t), hash' env t)) (zip [0..] ts) - pt = fst <$> sortOn snd hashed - (p,ts') = unzip pt - in case map Right (permute p cycle) ++ envTl of - env -> (map (hash' env) ts', hash' env) - hashCycle env ts = (map (hash' env) ts, hash' env) - --- | Use the `hash` function to efficiently remove duplicates from the list, preserving order. -distinct :: forall f v h a proxy . (Functor f, Hashable1 f, Eq v, Show v, Var v, Ord h, Accumulate h) - => proxy h - -> [Term f v a] -> [Term f v a] -distinct _ ts = fst <$> sortOn snd m - where m = Map.elems (Map.fromList (hashes `zip` (ts `zip` [0 :: Int .. 1]))) - hashes = map hash ts :: [h] - --- | Use the `hash` function to remove elements from `t1s` that exist in `t2s`, preserving order. -subtract :: forall f v h a proxy . (Functor f, Hashable1 f, Eq v, Show v, Var v, Ord h, Accumulate h) - => proxy h - -> [Term f v a] -> [Term f v a] -> [Term f v a] -subtract _ t1s t2s = - let skips = Set.fromList (map hash t2s :: [h]) - in filter (\t -> Set.notMember (hash t) skips) t1s - -instance (Show1 f, Show v) => Show (Term f v a) where - -- annotations not shown - showsPrec p (Term _ _ out) = case out of - Var v -> showParen (p>=9) $ \x -> "Var " ++ show v ++ x - Cycle body -> ("Cycle " ++) . showsPrec p body - Abs v body -> showParen True $ (show v ++) . showString ". " . showsPrec p body - Tm f -> showsPrec1 p f diff --git a/unison-core/src/Unison/ABT/Normalized.hs b/unison-core/src/Unison/ABT/Normalized.hs deleted file mode 100644 index b5e606c83f..0000000000 --- a/unison-core/src/Unison/ABT/Normalized.hs +++ /dev/null @@ -1,134 +0,0 @@ -{-# language GADTs #-} -{-# language RankNTypes #-} -{-# language ViewPatterns #-} -{-# language DeriveFunctor #-} -{-# language PatternGuards #-} -{-# language DeriveFoldable #-} -{-# language PatternSynonyms #-} -{-# language DeriveTraversable #-} - -{-# language UndecidableInstances #-} -{-# language QuantifiedConstraints #-} - -module Unison.ABT.Normalized - ( ABT(..) - , Term(.., TAbs, TTm, TAbss) - , renames - , rename - , transform - ) - where - -import Data.Bifunctor -import Data.Bifoldable --- import Data.Bitraversable - -import Data.Set (Set) -import qualified Data.Set as Set - -import Data.Map.Strict (Map) -import qualified Data.Map.Strict as Map - -import Unison.ABT (Var(..)) - --- ABTs with support for 'normalized' structure where only variables --- may occur at some positions. This is accomplished by passing the --- variable type to the base functor. -data ABT f v - = Abs v (Term f v) - | Tm (f v (Term f v)) - -data Term f v = Term - { freeVars :: Set v - , out :: ABT f v - } - -instance (forall a b. Show a => Show b => Show (f a b), Show v) - => Show (ABT f v) - where - showsPrec p a = showParen (p >= 9) $ case a of - Abs v tm - -> showString "Abs " . showsPrec 10 v - . showString " " . showsPrec 10 tm - Tm e -> showString "Tm " . showsPrec 10 e - -instance (forall a b. Show a => Show b => Show (f a b), Show v) - => Show (Term f v) - where - showsPrec p (Term _ e) - = showParen (p >= 9) $ showString "Term " . showsPrec 10 e - -pattern TAbs :: Var v => v -> Term f v -> Term f v -pattern TAbs u bd <- Term _ (Abs u bd) - where TAbs u bd = Term (Set.delete u (freeVars bd)) (Abs u bd) - -pattern TTm :: (Var v, Bifoldable f) => f v (Term f v) -> Term f v -pattern TTm bd <- Term _ (Tm bd) - where TTm bd = Term (bifoldMap Set.singleton freeVars bd) (Tm bd) - -{-# complete TAbs, TTm #-} - -unabss :: Var v => Term f v -> ([v], Term f v) -unabss (TAbs v (unabss -> (vs, bd))) = (v:vs, bd) -unabss bd = ([], bd) - -pattern TAbss :: Var v => [v] -> Term f v -> Term f v -pattern TAbss vs bd <- (unabss -> (vs, bd)) - where TAbss vs bd = foldr TAbs bd vs - -{-# complete TAbss #-} - --- Simultaneous variable renaming. --- --- subvs0 counts the number of variables being renamed to a particular --- variable --- --- rnv0 is the variable renaming map. -renames - :: (Var v, Ord v, Bifunctor f, Bifoldable f) - => Map v Int -> Map v v -> Term f v -> Term f v -renames subvs0 rnv0 tm = case tm of - TAbs u body - | not $ Map.null rnv' -> TAbs u' (renames subvs' rnv' body) - where - rnv' = Map.alter (const $ adjustment) u rnv - -- if u is in the set of variables we're substituting in, it - -- needs to be renamed to avoid capturing things. - u' | u `Map.member` subvs = freshIn (fvs `Set.union` Map.keysSet subvs) u - | otherwise = u - - -- if u needs to be renamed to avoid capturing subvs - -- and u actually occurs in the body, then add it to - -- the substitutions - (adjustment, subvs') - | u /= u' && u `Set.member` fvs = (Just u', Map.insertWith (+) u' 1 subvs) - | otherwise = (Nothing, subvs) - - TTm body - | not $ Map.null rnv - -> TTm $ bimap (\u -> Map.findWithDefault u u rnv) (renames subvs rnv) body - - _ -> tm - where - fvs = freeVars tm - - -- throw out irrelevant renamings - rnv = Map.restrictKeys rnv0 fvs - - -- decrement the variable usage counts for the renamings we threw away - subvs = Map.foldl' decrement subvs0 $ Map.withoutKeys rnv0 fvs - decrement sv v = Map.update drop v sv - drop n | n <= 1 = Nothing - | otherwise = Just (n-1) - -rename - :: (Var v, Ord v, Bifunctor f, Bifoldable f) - => v -> v -> Term f v -> Term f v -rename old new = renames (Map.singleton new 1) (Map.singleton old new) - -transform - :: (Var v, Bifunctor g, Bifoldable f, Bifoldable g) - => (forall a b. f a b -> g a b) - -> Term f v -> Term g v -transform phi (TTm body) = TTm . second (transform phi) $ phi body -transform phi (TAbs u body) = TAbs u $ transform phi body diff --git a/unison-core/src/Unison/DataDeclaration.hs b/unison-core/src/Unison/DataDeclaration.hs deleted file mode 100644 index 8f6bad3f67..0000000000 --- a/unison-core/src/Unison/DataDeclaration.hs +++ /dev/null @@ -1,413 +0,0 @@ -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE DeriveAnyClass #-} -{-# Language DeriveFoldable #-} -{-# Language DeriveTraversable #-} -{-# Language OverloadedStrings #-} -{-# Language PatternSynonyms #-} -{-# Language ViewPatterns #-} - -module Unison.DataDeclaration where - -import Unison.Prelude - -import Control.Lens (_3, over) -import Control.Monad.State (evalState) - -import Data.Bifunctor (first, second, bimap) -import qualified Unison.Util.Relation as Rel -import Unison.Hash ( Hash ) -import qualified Data.Map as Map -import qualified Data.Set as Set -import Prelude hiding ( cycle ) -import Prelude.Extras ( Show1 ) -import qualified Unison.ABT as ABT -import Unison.Hashable ( Accumulate - , Hashable1 - ) -import qualified Unison.Hashable as Hashable -import qualified Unison.Name as Name -import Unison.Reference ( Reference ) -import qualified Unison.Reference as Reference -import qualified Unison.Reference.Util as Reference.Util -import qualified Unison.Referent as Referent -import qualified Unison.Term as Term -import Unison.Term ( Term ) -import Unison.Type ( Type ) -import qualified Unison.Type as Type -import Unison.Var ( Var ) -import qualified Unison.Var as Var -import Unison.Names3 (Names0) -import qualified Unison.Names3 as Names -import qualified Unison.Pattern as Pattern -import qualified Unison.ConstructorType as CT - -type ConstructorId = Term.ConstructorId - -type Decl v a = Either (EffectDeclaration v a) (DataDeclaration v a) - -data DeclOrBuiltin v a = - Builtin CT.ConstructorType | Decl (Decl v a) - deriving (Eq, Show) - -asDataDecl :: Decl v a -> DataDeclaration v a -asDataDecl = either toDataDecl id - -declDependencies :: Ord v => Decl v a -> Set Reference -declDependencies = either (dependencies . toDataDecl) dependencies - -constructorType :: Decl v a -> CT.ConstructorType -constructorType = \case - Left{} -> CT.Effect - Right{} -> CT.Data - -data Modifier = Structural | Unique Text -- | Opaque (Set Reference) - deriving (Eq, Ord, Show) - -data DataDeclaration v a = DataDeclaration { - modifier :: Modifier, - annotation :: a, - bound :: [v], - constructors' :: [(a, v, Type v a)] -} deriving (Eq, Show, Functor) - -newtype EffectDeclaration v a = EffectDeclaration { - toDataDecl :: DataDeclaration v a -} deriving (Eq,Show,Functor) - -withEffectDecl - :: (DataDeclaration v a -> DataDeclaration v' a') - -> (EffectDeclaration v a -> EffectDeclaration v' a') -withEffectDecl f e = EffectDeclaration (f . toDataDecl $ e) - -withEffectDeclM :: Functor f - => (DataDeclaration v a -> f (DataDeclaration v' a')) - -> EffectDeclaration v a - -> f (EffectDeclaration v' a') -withEffectDeclM f = fmap EffectDeclaration . f . toDataDecl - -generateConstructorRefs - :: (Reference -> ConstructorId -> Reference) - -> Reference.Id - -> Int - -> [(ConstructorId, Reference)] -generateConstructorRefs hashCtor rid n = - (\i -> (i, hashCtor (Reference.DerivedId rid) i)) <$> [0 .. n] - -generateRecordAccessors - :: (Semigroup a, Var v) - => [(v, a)] - -> v - -> Reference - -> [(v, Term v a)] -generateRecordAccessors fields typename typ = - join [ tm t i | (t, i) <- fields `zip` [(0::Int)..] ] - where - argname = Var.uncapitalize typename - tm (fname, ann) i = - [(Var.namespaced [typename, fname], get), - (Var.namespaced [typename, fname, Var.named "set"], set), - (Var.namespaced [typename, fname, Var.named "modify"], modify)] - where - -- example: `point -> case point of Point x _ -> x` - get = Term.lam ann argname $ Term.match ann - (Term.var ann argname) - [Term.MatchCase pat Nothing rhs] - where - pat = Pattern.Constructor ann typ 0 cargs - cargs = [ if j == i then Pattern.Var ann else Pattern.Unbound ann - | (_, j) <- fields `zip` [0..]] - rhs = ABT.abs' ann fname (Term.var ann fname) - -- example: `x point -> case point of Point _ y -> Point x y` - set = Term.lam' ann [fname', argname] $ Term.match ann - (Term.var ann argname) - [Term.MatchCase pat Nothing rhs] - where - fname' = Var.named . Var.name $ - Var.freshIn (Set.fromList $ [argname] <> (fst <$> fields)) fname - pat = Pattern.Constructor ann typ 0 cargs - cargs = [ if j == i then Pattern.Unbound ann else Pattern.Var ann - | (_, j) <- fields `zip` [0..]] - rhs = foldr (ABT.abs' ann) (Term.constructor ann typ 0 `Term.apps'` vargs) - [ f | ((f, _), j) <- fields `zip` [0..], j /= i ] - vargs = [ if j == i then Term.var ann fname' else Term.var ann v - | ((v, _), j) <- fields `zip` [0..]] - -- example: `f point -> case point of Point x y -> Point (f x) y` - modify = Term.lam' ann [fname', argname] $ Term.match ann - (Term.var ann argname) - [Term.MatchCase pat Nothing rhs] - where - fname' = Var.named . Var.name $ - Var.freshIn (Set.fromList $ [argname] <> (fst <$> fields)) - (Var.named "f") - pat = Pattern.Constructor ann typ 0 cargs - cargs = replicate (length fields) $ Pattern.Var ann - rhs = foldr (ABT.abs' ann) (Term.constructor ann typ 0 `Term.apps'` vargs) - (fst <$> fields) - vargs = [ if j == i - then Term.apps' (Term.var ann fname') [Term.var ann v] - else Term.var ann v - | ((v, _), j) <- fields `zip` [0..]] - --- Returns references to the constructors, --- along with the terms for those references and their types. -constructorTerms - :: (Reference -> ConstructorId -> Reference) - -> (a -> Reference -> ConstructorId -> Term v a) - -> Reference.Id - -> DataDeclaration v a - -> [(Reference.Id, Term v a, Type v a)] -constructorTerms hashCtor f rid dd = - (\((a, _, t), (i, re@(Reference.DerivedId r))) -> (r, f a re i, t)) <$> zip - (constructors' dd) - (generateConstructorRefs hashCtor rid (length $ constructors dd)) - -dataConstructorTerms - :: Ord v - => Reference.Id - -> DataDeclaration v a - -> [(Reference.Id, Term v a, Type v a)] -dataConstructorTerms = constructorTerms Term.hashConstructor Term.constructor - -effectConstructorTerms - :: Ord v - => Reference.Id - -> EffectDeclaration v a - -> [(Reference.Id, Term v a, Type v a)] -effectConstructorTerms rid ed = - constructorTerms Term.hashRequest Term.request rid $ toDataDecl ed - -constructorTypes :: DataDeclaration v a -> [Type v a] -constructorTypes = (snd <$>) . constructors - -declFields :: Var v => Decl v a -> Either [Int] [Int] -declFields = bimap cf cf . first toDataDecl - where - cf = fmap fields . constructorTypes - fields (Type.ForallsNamed' _ ty) = fields ty - fields (Type.Arrows' spine) = length spine - 1 - fields _ = 0 - -typeOfConstructor :: DataDeclaration v a -> ConstructorId -> Maybe (Type v a) -typeOfConstructor dd i = constructorTypes dd `atMay` i - -constructors :: DataDeclaration v a -> [(v, Type v a)] -constructors (DataDeclaration _ _ _ ctors) = [(v,t) | (_,v,t) <- ctors ] - -constructorVars :: DataDeclaration v a -> [v] -constructorVars dd = fst <$> constructors dd - -constructorNames :: Var v => DataDeclaration v a -> [Text] -constructorNames dd = Var.name <$> constructorVars dd - -declConstructorReferents :: Reference.Id -> Decl v a -> [Referent.Id] -declConstructorReferents rid decl = - [ Referent.Con' rid i ct | i <- constructorIds (asDataDecl decl) ] - where ct = constructorType decl - -constructorIds :: DataDeclaration v a -> [Int] -constructorIds dd = [0 .. length (constructors dd) - 1] - --- | All variables mentioned in the given data declaration. --- Includes both term and type variables, both free and bound. -allVars :: Ord v => DataDeclaration v a -> Set v -allVars (DataDeclaration _ _ bound ctors) = Set.unions $ - Set.fromList bound : [ Set.insert v (Set.fromList $ ABT.allVars tp) | (_,v,tp) <- ctors ] - --- | All variables mentioned in the given declaration. --- Includes both term and type variables, both free and bound. -allVars' :: Ord v => Decl v a -> Set v -allVars' = allVars . either toDataDecl id - -bindNames :: Var v - => Set v - -> Names0 - -> DataDeclaration v a - -> Names.ResolutionResult v a (DataDeclaration v a) -bindNames keepFree names (DataDeclaration m a bound constructors) = do - constructors <- for constructors $ \(a, v, ty) -> - (a,v,) <$> Type.bindNames keepFree names ty - pure $ DataDeclaration m a bound constructors - -dependencies :: Ord v => DataDeclaration v a -> Set Reference -dependencies dd = - Set.unions (Type.dependencies <$> constructorTypes dd) - -third :: (a -> b) -> (x,y,a) -> (x,y,b) -third f (x,y,a) = (x, y, f a) - --- implementation of dataDeclToNames and effectDeclToNames -toNames0 :: Var v => CT.ConstructorType -> v -> Reference.Id -> DataDeclaration v a -> Names0 -toNames0 ct typeSymbol (Reference.DerivedId -> r) dd = - -- constructor names - foldMap names (constructorVars dd `zip` [0 ..]) - -- name of the type itself - <> Names.names0 mempty (Rel.singleton (Name.fromVar typeSymbol) r) - where - names (ctor, i) = - Names.names0 (Rel.singleton (Name.fromVar ctor) (Referent.Con r i ct)) mempty - -dataDeclToNames :: Var v => v -> Reference.Id -> DataDeclaration v a -> Names0 -dataDeclToNames = toNames0 CT.Data - -effectDeclToNames :: Var v => v -> Reference.Id -> EffectDeclaration v a -> Names0 -effectDeclToNames typeSymbol r ed = toNames0 CT.Effect typeSymbol r $ toDataDecl ed - -dataDeclToNames' :: Var v => (v, (Reference.Id, DataDeclaration v a)) -> Names0 -dataDeclToNames' (v,(r,d)) = dataDeclToNames v r d - -effectDeclToNames' :: Var v => (v, (Reference.Id, EffectDeclaration v a)) -> Names0 -effectDeclToNames' (v, (r, d)) = effectDeclToNames v r d - -mkEffectDecl' - :: Modifier -> a -> [v] -> [(a, v, Type v a)] -> EffectDeclaration v a -mkEffectDecl' m a b cs = EffectDeclaration (DataDeclaration m a b cs) - -mkEffectDecl :: Modifier -> [v] -> [(v, Type v ())] -> EffectDeclaration v () -mkEffectDecl m b cs = mkEffectDecl' m () b $ map (\(v, t) -> ((), v, t)) cs - -mkDataDecl' - :: Modifier -> a -> [v] -> [(a, v, Type v a)] -> DataDeclaration v a -mkDataDecl' = DataDeclaration - -mkDataDecl :: Modifier -> [v] -> [(v, Type v ())] -> DataDeclaration v () -mkDataDecl m b cs = mkDataDecl' m () b $ map (\(v,t) -> ((),v,t)) cs - -constructorArities :: DataDeclaration v a -> [Int] -constructorArities (DataDeclaration _ _a _bound ctors) = - Type.arity . (\(_,_,t) -> t) <$> ctors - -data F a - = Type (Type.F a) - | LetRec [a] a - | Constructors [a] - | Modified Modifier a - deriving (Functor, Foldable, Show, Show1) - -instance Hashable1 F where - hash1 hashCycle hash e = - let (tag, hashed) = (Hashable.Tag, Hashable.Hashed) - -- Note: start each layer with leading `2` byte, to avoid collisions with - -- terms, which start each layer with leading `1`. See `Hashable1 Term.F` - in Hashable.accumulate $ tag 2 : case e of - Type t -> [tag 0, hashed $ Hashable.hash1 hashCycle hash t] - LetRec bindings body -> - let (hashes, hash') = hashCycle bindings - in [tag 1] ++ map hashed hashes ++ [hashed $ hash' body] - Constructors cs -> - let (hashes, _) = hashCycle cs - in tag 2 : map hashed hashes - Modified m t -> - [tag 3, Hashable.accumulateToken m, hashed $ hash t] - -instance Hashable.Hashable Modifier where - tokens Structural = [Hashable.Tag 0] - tokens (Unique txt) = [Hashable.Tag 1, Hashable.Text txt] - -{- - type UpDown = Up | Down - - type List a = Nil | Cons a (List a) - - type Ping p = Ping (Pong p) - type Pong p = Pong (Ping p) - - type Foo a f = Foo Int (Bar a) - type Bar a f = Bar Long (Foo a) --} - -hash :: (Eq v, Var v, Ord h, Accumulate h) - => [(v, ABT.Term F v ())] -> [(v, h)] -hash recursiveDecls = zip (fst <$> recursiveDecls) hashes where - hashes = ABT.hash <$> toLetRec recursiveDecls - -toLetRec :: Ord v => [(v, ABT.Term F v ())] -> [ABT.Term F v ()] -toLetRec decls = do1 <$> vs - where - (vs, decls') = unzip decls - -- we duplicate this letrec once (`do1`) - -- for each of the mutually recursive types - do1 v = ABT.cycle (ABT.absChain vs . ABT.tm $ LetRec decls' (ABT.var v)) - -unsafeUnwrapType :: (Var v) => ABT.Term F v a -> Type v a -unsafeUnwrapType typ = ABT.transform f typ - where f (Type t) = t - f _ = error $ "Tried to unwrap a type that wasn't a type: " ++ show typ - -toABT :: Var v => DataDeclaration v () -> ABT.Term F v () -toABT dd = ABT.tm $ Modified (modifier dd) dd' - where - dd' = ABT.absChain (bound dd) $ ABT.cycle - (ABT.absChain - (fst <$> constructors dd) - (ABT.tm . Constructors $ ABT.transform Type <$> constructorTypes dd)) - -updateDependencies :: Ord v => Map Reference Reference -> Decl v a -> Decl v a -updateDependencies typeUpdates decl = back $ dataDecl - { constructors' = over _3 (Type.updateDependencies typeUpdates) - <$> constructors' dataDecl - } - where - dataDecl = either toDataDecl id decl - back = either (const $ Left . EffectDeclaration) (const Right) decl - - --- This converts `Reference`s it finds that are in the input `Map` --- back to free variables -unhashComponent - :: forall v a. Var v => Map Reference (Decl v a) -> Map Reference (v, Decl v a) -unhashComponent m - = let - usedVars = foldMap allVars' m - m' :: Map Reference (v, Decl v a) - m' = evalState (Map.traverseWithKey assignVar m) usedVars where - assignVar r d = (,d) <$> ABT.freshenS (Var.refNamed r) - unhash1 = ABT.rebuildUp' go - where - go e@(Type.Ref' r) = case Map.lookup r m' of - Nothing -> e - Just (v,_) -> Type.var (ABT.annotation e) v - go e = e - unhash2 (Right dd@DataDeclaration{}) = Right $ unhash3 dd - unhash2 (Left (EffectDeclaration dd)) = - Left . EffectDeclaration $ unhash3 dd - unhash3 dd@DataDeclaration {..} = - dd { constructors' = fmap (over _3 unhash1) constructors' } - in - second unhash2 <$> m' - --- Implementation detail of `hashDecls`, works with unannotated data decls -hashDecls0 :: (Eq v, Var v) => Map v (DataDeclaration v ()) -> [(v, Reference.Id)] -hashDecls0 decls = - let abts = toABT <$> decls - ref r = ABT.tm (Type (Type.Ref (Reference.DerivedId r))) - cs = Reference.Util.hashComponents ref abts - in [ (v, r) | (v, (r, _)) <- Map.toList cs ] - --- | compute the hashes of these user defined types and update any free vars --- corresponding to these decls with the resulting hashes --- --- data List a = Nil | Cons a (List a) --- becomes something like --- (List, #xyz, [forall a. #xyz a, forall a. a -> (#xyz a) -> (#xyz a)]) --- --- NOTE: technical limitation, this implementation gives diff results if ctors --- have the same FQN as one of the types. TODO: assert this and bomb if not --- satisfied, or else do local mangling and unmangling to ensure this doesn't --- affect the hash. -hashDecls - :: (Eq v, Var v) - => Map v (DataDeclaration v a) - -> Names.ResolutionResult v a [(v, Reference.Id, DataDeclaration v a)] -hashDecls decls = do - -- todo: make sure all other external references are resolved before calling this - let varToRef = hashDecls0 (void <$> decls) - varToRef' = second Reference.DerivedId <$> varToRef - decls' = bindTypes <$> decls - bindTypes dd = dd { constructors' = over _3 (Type.bindExternal varToRef') <$> constructors' dd } - typeNames0 = Names.names0 mempty - $ Rel.fromList (first Name.fromVar <$> varToRef') - -- normalize the order of the constructors based on a hash of their types - sortCtors dd = dd { constructors' = sortOn hash3 $ constructors' dd } - hash3 (_, _, typ) = ABT.hash typ :: Hash - decls' <- fmap sortCtors <$> traverse (bindNames mempty typeNames0) decls' - pure [ (v, r, dd) | (v, r) <- varToRef, Just dd <- [Map.lookup v decls'] ] diff --git a/unison-core/src/Unison/Hash.hs b/unison-core/src/Unison/Hash.hs deleted file mode 100644 index d6738540a3..0000000000 --- a/unison-core/src/Unison/Hash.hs +++ /dev/null @@ -1,109 +0,0 @@ -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Hash (Hash, toBytes, base32Hex, base32Hexs, fromBase32Hex, fromBytes, unsafeFromBase32Hex, showBase32Hex, validBase32HexChars) where - -import Unison.Prelude - -import Data.ByteString.Builder (doubleBE, word64BE, int64BE, toLazyByteString) -import qualified Data.ByteArray as BA - -import qualified Crypto.Hash as CH -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL - -import qualified Unison.Hashable as H -import qualified Codec.Binary.Base32Hex as Base32Hex -import qualified Data.Text as Text -import qualified Data.Set as Set - --- | Hash which uniquely identifies a Unison type or term -newtype Hash = Hash { toBytes :: ByteString } deriving (Eq,Ord,Generic) - -instance Show Hash where - show h = take 999 $ Text.unpack (base32Hex h) - -instance H.Hashable Hash where - tokens h = [H.Bytes (toBytes h)] - -fromBytesImpl :: ByteString -> Hash -fromBytesImpl = fromBytes - -toBytesImpl :: Hash -> ByteString -toBytesImpl = toBytes - -instance H.Accumulate Hash where - accumulate = fromBytes . BA.convert . CH.hashFinalize . go CH.hashInit where - go :: CH.Context CH.SHA3_512 -> [H.Token Hash] -> CH.Context CH.SHA3_512 - go acc tokens = CH.hashUpdates acc (tokens >>= toBS) - toBS (H.Tag b) = [B.singleton b] - toBS (H.Bytes bs) = [encodeLength $ B.length bs, bs] - toBS (H.Int i) = BL.toChunks . toLazyByteString . int64BE $ i - toBS (H.Nat i) = BL.toChunks . toLazyByteString . word64BE $ i - toBS (H.Double d) = BL.toChunks . toLazyByteString . doubleBE $ d - toBS (H.Text txt) = - let tbytes = encodeUtf8 txt - in [encodeLength (B.length tbytes), tbytes] - toBS (H.Hashed h) = [toBytes h] - encodeLength :: Integral n => n -> B.ByteString - encodeLength = BL.toStrict . toLazyByteString . word64BE . fromIntegral - fromBytes = fromBytesImpl - toBytes = toBytesImpl - --- | Return the lowercase unpadded base32Hex encoding of this 'Hash'. --- Multibase prefix would be 'v', see https://github.com/multiformats/multibase -base32Hex :: Hash -> Text -base32Hex (Hash h) = - -- we're using an uppercase encoder that adds padding, so we drop the - -- padding and convert it to lowercase - Text.toLower . Text.dropWhileEnd (== '=') . decodeUtf8 $ - Base32Hex.encode h - -validBase32HexChars :: Set Char -validBase32HexChars = Set.fromList $ ['0' .. '9'] ++ ['a' .. 'v'] - --- | Produce a 'Hash' from a base32hex-encoded version of its binary representation -fromBase32Hex :: Text -> Maybe Hash -fromBase32Hex txt = case Base32Hex.decode (encodeUtf8 $ Text.toUpper txt <> paddingChars) of - Left (_, _rem) -> Nothing - Right h -> pure $ Hash h - where - -- The decoder we're using is a base32 uppercase decoder that expects padding, - -- so we provide it with the appropriate number of padding characters for the - -- expected hash length. - -- - -- The decoder requires 40 bit (8 5-bit characters) chunks, so if the number - -- of characters of the input is not a multiple of 8, we add '=' padding chars - -- until it is. - -- - -- See https://tools.ietf.org/html/rfc4648#page-8 - paddingChars :: Text - paddingChars = case Text.length txt `mod` 8 of - 0 -> "" - n -> Text.replicate (8 - n) "=" - - hashLength :: Int - hashLength = 512 - - _paddingChars :: Text - _paddingChars = case hashLength `mod` 40 of - 0 -> "" - 8 -> "======" - 16 -> "====" - 24 -> "===" - 32 -> "=" - i -> error $ "impossible hash length `mod` 40 not in {0,8,16,24,32}: " <> show i - -base32Hexs :: Hash -> String -base32Hexs = Text.unpack . base32Hex - -unsafeFromBase32Hex :: Text -> Hash -unsafeFromBase32Hex txt = - fromMaybe (error $ "invalid base32Hex value: " ++ Text.unpack txt) $ fromBase32Hex txt - -fromBytes :: ByteString -> Hash -fromBytes = Hash - -showBase32Hex :: H.Hashable t => t -> String -showBase32Hex = base32Hexs . H.accumulate' - diff --git a/unison-core/src/Unison/HashQualified'.hs b/unison-core/src/Unison/HashQualified'.hs deleted file mode 100644 index 23816e06eb..0000000000 --- a/unison-core/src/Unison/HashQualified'.hs +++ /dev/null @@ -1,126 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.HashQualified' where - -import Unison.Prelude - -import qualified Data.Text as Text -import Prelude hiding ( take ) -import Unison.Name ( Name ) -import qualified Unison.Name as Name -import Unison.NameSegment ( NameSegment ) -import Unison.Reference ( Reference ) -import qualified Unison.Reference as Reference -import Unison.Referent ( Referent ) -import qualified Unison.Referent as Referent -import Unison.ShortHash ( ShortHash ) -import qualified Unison.ShortHash as SH -import qualified Unison.HashQualified as HQ - -data HashQualified' n = NameOnly n | HashQualified n ShortHash - deriving (Eq, Functor) - -type HQSegment = HashQualified' NameSegment - -type HashQualified = HashQualified' Name - -toHQ :: HashQualified' n -> HQ.HashQualified' n -toHQ = \case - NameOnly n -> HQ.NameOnly n - HashQualified n sh -> HQ.HashQualified n sh - -fromHQ :: HQ.HashQualified' n -> Maybe (HashQualified' n) -fromHQ = \case - HQ.NameOnly n -> Just $ NameOnly n - HQ.HashQualified n sh -> Just $ HashQualified n sh - HQ.HashOnly{} -> Nothing - --- Like fromHQ, but turns hashes into hash-qualified empty names -fromHQ' :: Monoid n => HQ.HashQualified' n -> HashQualified' n -fromHQ' = \case - HQ.NameOnly n -> NameOnly n - HQ.HashQualified n sh -> HashQualified n sh - HQ.HashOnly h -> HashQualified mempty h - -toName :: HashQualified' n -> n -toName = \case - NameOnly name -> name - HashQualified name _ -> name - -nameLength :: HashQualified' Name -> Int -nameLength = Text.length . toText - -take :: Int -> HashQualified' n -> HashQualified' n -take i = \case - n@(NameOnly _) -> n - HashQualified n s -> if i == 0 then NameOnly n else HashQualified n (SH.take i s) - -toNameOnly :: HashQualified' n -> HashQualified' n -toNameOnly = fromName . toName - -toHash :: HashQualified' n -> Maybe ShortHash -toHash = \case - NameOnly _ -> Nothing - HashQualified _ sh -> Just sh - -toString :: Show n => HashQualified' n -> String -toString = Text.unpack . toText - --- Parses possibly-hash-qualified into structured type. -fromText :: Text -> Maybe HashQualified -fromText t = case Text.breakOn "#" t of - (name, "" ) -> - Just $ NameOnly (Name.unsafeFromText name) -- safe bc breakOn # - (name, hash) -> - HashQualified (Name.unsafeFromText name) <$> SH.fromText hash - -unsafeFromText :: Text -> HashQualified -unsafeFromText txt = fromMaybe msg (fromText txt) where - msg = error ("HashQualified'.unsafeFromText " <> show txt) - -fromString :: String -> Maybe HashQualified -fromString = fromText . Text.pack - -toText :: Show n => HashQualified' n -> Text -toText = \case - NameOnly name -> Text.pack (show name) - HashQualified name hash -> Text.pack (show name) <> SH.toText hash - --- Returns the full referent in the hash. Use HQ.take to just get a prefix -fromNamedReferent :: n -> Referent -> HashQualified' n -fromNamedReferent n r = HashQualified n (Referent.toShortHash r) - --- Returns the full reference in the hash. Use HQ.take to just get a prefix -fromNamedReference :: n -> Reference -> HashQualified' n -fromNamedReference n r = HashQualified n (Reference.toShortHash r) - -fromName :: n -> HashQualified' n -fromName = NameOnly - -matchesNamedReferent :: Eq n => n -> Referent -> HashQualified' n -> Bool -matchesNamedReferent n r = \case - NameOnly n' -> n' == n - HashQualified n' sh -> n' == n && sh `SH.isPrefixOf` Referent.toShortHash r - -matchesNamedReference :: Eq n => n -> Reference -> HashQualified' n -> Bool -matchesNamedReference n r = \case - NameOnly n' -> n' == n - HashQualified n' sh -> n' == n && sh `SH.isPrefixOf` Reference.toShortHash r - --- Use `requalify hq . Referent.Ref` if you want to pass in a `Reference`. -requalify :: HashQualified -> Referent -> HashQualified -requalify hq r = case hq of - NameOnly n -> fromNamedReferent n r - HashQualified n _ -> fromNamedReferent n r - -instance Ord n => Ord (HashQualified' n) where - compare a b = case compare (toName a) (toName b) of - EQ -> compare (toHash a) (toHash b) - o -> o - -instance IsString HashQualified where - fromString = unsafeFromText . Text.pack - - -instance Show n => Show (HashQualified' n) where - show = Text.unpack . toText diff --git a/unison-core/src/Unison/HashQualified.hs b/unison-core/src/Unison/HashQualified.hs deleted file mode 100644 index 8fafec1e12..0000000000 --- a/unison-core/src/Unison/HashQualified.hs +++ /dev/null @@ -1,169 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.HashQualified where - -import Unison.Prelude hiding (fromString) - -import qualified Data.Text as Text -import Prelude hiding ( take ) -import Unison.Name ( Name ) -import qualified Unison.Name as Name -import Unison.Reference ( Reference ) -import qualified Unison.Reference as Reference -import Unison.Referent ( Referent ) -import qualified Unison.Referent as Referent -import Unison.ShortHash ( ShortHash ) -import qualified Unison.ShortHash as SH -import Unison.Var ( Var ) -import qualified Unison.Var as Var - -data HashQualified' n - = NameOnly n | HashOnly ShortHash | HashQualified n ShortHash - deriving (Eq, Functor, Show) - -type HashQualified = HashQualified' Name - -stripNamespace :: Text -> HashQualified -> HashQualified -stripNamespace namespace hq = case hq of - NameOnly name -> NameOnly $ strip name - HashQualified name sh -> HashQualified (strip name) sh - ho -> ho - where - strip name = - fromMaybe name $ Name.stripNamePrefix (Name.unsafeFromText namespace) name - -toName :: HashQualified' n -> Maybe n -toName = \case - NameOnly name -> Just name - HashQualified name _ -> Just name - HashOnly _ -> Nothing - --- Sort the list of names by length of segments: smaller number of --- segments is listed first. NameOnly < Hash qualified < Hash only --- --- Examples: --- [foo.bar.baz, bar.baz] -> [bar.baz, foo.bar.baz] --- [#a29dj2k91, foo.bar.baz] -> [foo.bar.baz, #a29dj2k91] --- [foo.bar#abc, foo.bar] -> [foo.bar, foo.bar#abc] --- [.foo.bar, foo.bar] -> [foo.bar, .foo.bar] -sortByLength :: [HashQualified' Name] -> [HashQualified' Name] -sortByLength hs = sortOn f hs where - f (NameOnly n) = (countDots n, 0, Left n) - f (HashQualified n _h) = (countDots n, 1, Left n) - f (HashOnly h) = (maxBound, 0, Right h) - countDots n = Text.count "." (Text.dropEnd 1 (Name.toText n)) - -hasName, hasHash :: HashQualified -> Bool -hasName = isJust . toName -hasHash = isJust . toHash - -toHash :: HashQualified' n -> Maybe ShortHash -toHash = \case - NameOnly _ -> Nothing - HashQualified _ sh -> Just sh - HashOnly sh -> Just sh - --- partial: assumes either a name or hash is provided (or both) -fromNameHash :: Maybe Name -> Maybe ShortHash -> HashQualified -fromNameHash n h = case n of - Just name -> case h of - Just hash -> HashQualified name hash - Nothing -> NameOnly name - Nothing -> case h of - Just hash -> HashOnly hash - Nothing -> error "bad HQ construction" - -take :: Int -> HashQualified' n -> HashQualified' n -take i = \case - n@(NameOnly _) -> n - HashOnly s -> HashOnly (SH.take i s) - HashQualified n s -> if i == 0 then NameOnly n else HashQualified n (SH.take i s) - -toString :: Show n => HashQualified' n -> String -toString = Text.unpack . toText - -fromString :: String -> Maybe HashQualified -fromString = fromText . Text.pack - -unsafeFromString :: String -> HashQualified -unsafeFromString s = fromMaybe msg . fromString $ s where - msg = error $ "HashQualified.unsafeFromString " <> show s - --- Parses possibly-hash-qualified into structured type. --- Doesn't validate against base58 or the codebase. -fromText :: Text -> Maybe HashQualified -fromText t = case Text.breakOn "#" t of -- breakOn leaves the '#' on the RHS - (name, "" ) -> Just $ NameOnly (Name.unsafeFromText name) -- safe bc breakOn # - ("" , hash) -> HashOnly <$> SH.fromText hash - (name, hash) -> HashQualified (Name.unsafeFromText name) <$> SH.fromText hash - --- Won't crash as long as SH.unsafeFromText doesn't crash on any input that --- starts with '#', which is true as of the time of this writing, but not great. -unsafeFromText :: Text -> HashQualified -unsafeFromText txt = fromMaybe msg . fromText $ txt where - msg = error $ "HashQualified.unsafeFromText " <> show txt - -toText :: Show n => HashQualified' n -> Text -toText = \case - NameOnly name -> Text.pack (show name) - HashQualified name hash -> Text.pack (show name) <> SH.toText hash - HashOnly hash -> SH.toText hash - --- Returns the full referent in the hash. Use HQ.take to just get a prefix -fromNamedReferent :: n -> Referent -> HashQualified' n -fromNamedReferent n r = HashQualified n (Referent.toShortHash r) - --- Returns the full reference in the hash. Use HQ.take to just get a prefix -fromNamedReference :: n -> Reference -> HashQualified' n -fromNamedReference n r = HashQualified n (Reference.toShortHash r) - -fromReferent :: Referent -> HashQualified -fromReferent = HashOnly . Referent.toShortHash - -fromReference :: Reference -> HashQualified -fromReference = HashOnly . Reference.toShortHash - -fromPattern :: Reference -> Int -> HashQualified -fromPattern r cid = HashOnly $ Referent.patternShortHash r cid - -fromName :: n -> HashQualified' n -fromName = NameOnly - -unsafeFromVar :: Var v => v -> HashQualified -unsafeFromVar = unsafeFromText . Var.name - -fromVar :: Var v => v -> Maybe HashQualified -fromVar = fromText . Var.name - -toVar :: Var v => HashQualified -> v -toVar = Var.named . toText - --- todo: find this logic elsewhere and replace with call to this -matchesNamedReferent :: Name -> Referent -> HashQualified -> Bool -matchesNamedReferent n r = \case - NameOnly n' -> n' == n - HashOnly sh -> sh `SH.isPrefixOf` Referent.toShortHash r - HashQualified n' sh -> n' == n && sh `SH.isPrefixOf` Referent.toShortHash r - -matchesNamedReference :: Name -> Reference -> HashQualified -> Bool -matchesNamedReference n r = \case - NameOnly n' -> n' == n - HashOnly sh -> sh `SH.isPrefixOf` Reference.toShortHash r - HashQualified n' sh -> n' == n && sh `SH.isPrefixOf` Reference.toShortHash r - --- Use `requalify hq . Referent.Ref` if you want to pass in a `Reference`. -requalify :: HashQualified -> Referent -> HashQualified -requalify hq r = case hq of - NameOnly n -> fromNamedReferent n r - HashQualified n _ -> fromNamedReferent n r - HashOnly _ -> fromReferent r - --- this implementation shows HashOnly before the others, because None < Some. --- Flip it around carefully if HashOnly should come last. -instance Ord n => Ord (HashQualified' n) where - compare a b = case compare (toName a) (toName b) of - EQ -> compare (toHash a) (toHash b) - o -> o - ---instance Show n => Show (HashQualified' n) where --- show = Text.unpack . toText diff --git a/unison-core/src/Unison/Name.hs b/unison-core/src/Unison/Name.hs deleted file mode 100644 index 80bd2ddaac..0000000000 --- a/unison-core/src/Unison/Name.hs +++ /dev/null @@ -1,173 +0,0 @@ -{-# LANGUAGE GeneralizedNewtypeDeriving #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Name - ( Name(Name) - , fromString - , isPrefixOf - , joinDot - , makeAbsolute - , parent - , sortNames - , sortNamed - , sortNamed' - , stripNamePrefix - , stripPrefixes - , segments - , segments' - , suffixes - , toString - , toText - , toVar - , unqualified - , unqualified' - , unsafeFromText - , unsafeFromString - , fromSegment - , fromVar - ) -where - -import Unison.Prelude -import qualified Unison.NameSegment as NameSegment -import Unison.NameSegment ( NameSegment(NameSegment) - , segments' - ) - -import Control.Lens ( unsnoc ) -import qualified Control.Lens as Lens -import qualified Data.Text as Text -import qualified Unison.Hashable as H -import Unison.Var ( Var ) -import qualified Unison.Var as Var -import qualified Data.RFC5051 as RFC5051 -import Data.List ( sortBy, tails ) - -newtype Name = Name { toText :: Text } deriving (Eq, Ord, Monoid, Semigroup) - -sortNames :: [Name] -> [Name] -sortNames = sortNamed id - -sortNamed :: (a -> Name) -> [a] -> [a] -sortNamed by as = let - as' = [ (a, Text.unpack (toText (by a))) | a <- as ] - comp (_,s) (_,s2) = RFC5051.compareUnicode s s2 - in fst <$> sortBy comp as' - --- | Like sortNamed, but takes an additional backup comparison function if two --- names are equal. -sortNamed' :: (a -> Name) -> (a -> a -> Ordering) -> [a] -> [a] -sortNamed' by by2 as = let - as' = [ (a, Text.unpack (toText (by a))) | a <- as ] - comp (a,s) (a2,s2) = RFC5051.compareUnicode s s2 <> by2 a a2 - in fst <$> sortBy comp as' - -unsafeFromText :: Text -> Name -unsafeFromText t = - if Text.any (== '#') t then error $ "not a name: " <> show t else Name t - -unsafeFromString :: String -> Name -unsafeFromString = unsafeFromText . Text.pack - -toVar :: Var v => Name -> v -toVar (Name t) = Var.named t - -fromVar :: Var v => v -> Name -fromVar = unsafeFromText . Var.name - -toString :: Name -> String -toString = Text.unpack . toText - -isPrefixOf :: Name -> Name -> Bool -a `isPrefixOf` b = toText a `Text.isPrefixOf` toText b - --- stripTextPrefix a.b. a.b.c = Just c --- stripTextPrefix a.b a.b.c = Just .c; you probably don't want to do this --- stripTextPrefix x.y. a.b.c = Nothing --- stripTextPrefix "" a.b.c = undefined -_stripTextPrefix :: Text -> Name -> Maybe Name -_stripTextPrefix prefix name = - Name <$> Text.stripPrefix prefix (toText name) - --- stripNamePrefix a.b a.b.c = Just c --- stripNamePrefix a.b. a.b.c = undefined, "a.b." isn't a valid name IMO --- stripNamePrefix x.y a.b.c = Nothing, x.y isn't a prefix of a.b.c --- stripNamePrefix "" a.b.c = undefined, "" isn't a valid name IMO --- stripNamePrefix . .Nat = Just Nat -stripNamePrefix :: Name -> Name -> Maybe Name -stripNamePrefix prefix name = - Name <$> Text.stripPrefix (toText prefix <> mid) (toText name) - where - mid = if toText prefix == "." then "" else "." - --- a.b.c.d -> d -stripPrefixes :: Name -> Name -stripPrefixes = fromSegment . last . segments - -joinDot :: Name -> Name -> Name -joinDot prefix suffix = - if toText prefix == "." then Name (toText prefix <> toText suffix) - else Name (toText prefix <> "." <> toText suffix) - -unqualified :: Name -> Name -unqualified = unsafeFromText . unqualified' . toText - --- parent . -> Nothing --- parent + -> Nothing --- parent foo -> Nothing --- parent foo.bar -> foo --- parent foo.bar.+ -> foo.bar -parent :: Name -> Maybe Name -parent n = case unsnoc (NameSegment.toText <$> segments n) of - Nothing -> Nothing - Just ([] , _) -> Nothing - Just (init, _) -> Just $ Name (Text.intercalate "." init) - --- suffixes "" -> [] --- suffixes bar -> [bar] --- suffixes foo.bar -> [foo.bar, bar] --- suffixes foo.bar.baz -> [foo.bar.baz, bar.baz, baz] --- suffixes ".base.." -> [base.., .] -suffixes :: Name -> [Name] -suffixes (Name "") = [] -suffixes (Name n ) = fmap up . filter (not . null) . tails $ segments' n - where up ns = Name (Text.intercalate "." ns) - -unqualified' :: Text -> Text -unqualified' = last . segments' - -makeAbsolute :: Name -> Name -makeAbsolute n | toText n == "." = Name ".." - | Text.isPrefixOf "." (toText n) = n - | otherwise = Name ("." <> toText n) - -instance Show Name where - show = toString - -instance IsString Name where - fromString = unsafeFromText . Text.pack - -instance H.Hashable Name where - tokens s = [H.Text (toText s)] - -fromSegment :: NameSegment -> Name -fromSegment = unsafeFromText . NameSegment.toText - --- Smarter segmentation than `text.splitOn "."` --- e.g. split `base..` into `[base,.]` -segments :: Name -> [NameSegment] -segments (Name n) = NameSegment <$> segments' n - -instance Lens.Snoc Name Name NameSegment NameSegment where - _Snoc = Lens.prism snoc unsnoc - where - snoc :: (Name, NameSegment) -> Name - snoc (n, s) = joinDot n (fromSegment s) - unsnoc :: Name -> Either Name (Name, NameSegment) - unsnoc n@(segments -> ns) = case Lens.unsnoc (NameSegment.toText <$> ns) of - Nothing -> Left n - Just ([], _) -> Left n - Just (init, last) -> - Right (Name (Text.intercalate "." init), NameSegment last) diff --git a/unison-core/src/Unison/NameSegment.hs b/unison-core/src/Unison/NameSegment.hs deleted file mode 100644 index d220ebfabc..0000000000 --- a/unison-core/src/Unison/NameSegment.hs +++ /dev/null @@ -1,41 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.NameSegment where - -import Unison.Prelude - -import qualified Data.Text as Text -import qualified Unison.Hashable as H - --- Represents the parts of a name between the `.`s -newtype NameSegment = NameSegment { toText :: Text } deriving (Eq, Ord) - --- Split text into segments. A smarter version of `Text.splitOn` that handles --- the name `.` properly. -segments' :: Text -> [Text] -segments' n = go split - where - split = Text.splitOn "." n - go [] = [] - go ("" : "" : z) = "." : go z - go ("" : z) = go z - go (x : y) = x : go y - -instance H.Hashable NameSegment where - tokens s = [H.Text (toText s)] - -isEmpty :: NameSegment -> Bool -isEmpty ns = toText ns == mempty - -isPrefixOf :: NameSegment -> NameSegment -> Bool -isPrefixOf n1 n2 = Text.isPrefixOf (toText n1) (toText n2) - -toString :: NameSegment -> String -toString = Text.unpack . toText - -instance Show NameSegment where - show = Text.unpack . toText - -instance IsString NameSegment where - fromString = NameSegment . Text.pack - diff --git a/unison-core/src/Unison/Names2.hs b/unison-core/src/Unison/Names2.hs deleted file mode 100644 index 6c43f612f1..0000000000 --- a/unison-core/src/Unison/Names2.hs +++ /dev/null @@ -1,334 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE RecordWildCards #-} - -module Unison.Names2 - ( Names0 - , Names'(Names) - , Names - , addTerm - , addType - , allReferences - , conflicts - , contains - , difference - , filter - , filterByHQs - , filterBySHs - , filterTypes - , hqName - , hqTermName - , hqTypeName - , hqTermName' - , hqTypeName' - , _hqTermName - , _hqTypeName - , _hqTermAliases - , _hqTypeAliases - , names0ToNames - , prefix0 - , restrictReferences - , refTermsNamed - , terms - , types - , termReferences - , termReferents - , typeReferences - , termsNamed - , typesNamed - , unionLeft - , unionLeftName - , namesForReference - , namesForReferent - ) -where - -import Unison.Prelude - -import qualified Data.Set as Set -import Prelude hiding (filter) -import Unison.HashQualified' (HashQualified) -import qualified Unison.HashQualified' as HQ -import Unison.Name (Name) -import qualified Unison.Name as Name -import Unison.Reference (Reference) -import qualified Unison.Reference as Reference -import Unison.Referent (Referent) -import qualified Unison.Referent as Referent -import Unison.Util.Relation (Relation) -import qualified Unison.Util.Relation as R -import qualified Unison.ShortHash as SH -import Unison.ShortHash (ShortHash) - --- This will support the APIs of both PrettyPrintEnv and the old Names. --- For pretty-printing, we need to look up names for References; they may have --- some hash-qualification, depending on the context. --- For parsing (both .u files and command-line args) -data Names' n = Names - { terms :: Relation n Referent - , types :: Relation n Reference - } deriving (Eq,Ord) - -type Names = Names' HashQualified -type Names0 = Names' Name - -names0ToNames :: Names0 -> Names -names0ToNames names0 = Names terms' types' where - terms' = R.map doTerm (terms names0) - types' = R.map doType (types names0) - length = numHashChars names0 - doTerm (n, r) = - if Set.size (R.lookupDom n (terms names0)) > 1 - then (HQ.take length $ HQ.fromNamedReferent n r, r) - else (HQ.NameOnly n, r) - doType (n, r) = - if Set.size (R.lookupDom n (types names0)) > 1 - then (HQ.take length $ HQ.fromNamedReference n r, r) - else (HQ.NameOnly n, r) - -termReferences, typeReferences, allReferences :: Names' n -> Set Reference -termReferences Names{..} = Set.map Referent.toReference $ R.ran terms -typeReferences Names{..} = R.ran types -allReferences n = termReferences n <> typeReferences n - -termReferents :: Names' n -> Set Referent -termReferents Names{..} = R.ran terms - -restrictReferences :: Ord n => Set Reference -> Names' n -> Names' n -restrictReferences refs Names{..} = Names terms' types' where - terms' = R.filterRan ((`Set.member` refs) . Referent.toReference) terms - types' = R.filterRan (`Set.member` refs) types - --- | Guide to unionLeft* --- Is it ok to create new aliases for parsing? --- Sure. --- --- Is it ok to create name conflicts for parsing? --- It's okay but not great. The user will have to hash-qualify to disambiguate. --- --- Is it ok to create new aliases for pretty-printing? --- Not helpful, we need to choose a name to show. --- We'll just have to choose one at random if there are aliases. --- Is it ok to create name conflicts for pretty-printing? --- Still okay but not great. The pretty-printer will have to hash-qualify --- to disambiguate. --- --- Thus, for parsing: --- unionLeftName is good if the name `n` on the left is the only `n` the --- user will want to reference. It allows the rhs to add aliases. --- unionLeftRef allows new conflicts but no new aliases. Lame? --- (<>) is ok for parsing if we expect to add some conflicted names, --- e.g. from history --- --- For pretty-printing: --- Probably don't want to add new aliases, unless we don't know which --- `Names` is higher priority. So if we do have a preferred `Names`, --- don't use `unionLeftName` or (<>). --- You don't want to create new conflicts either if you have a preferred --- `Names`. So in this case, don't use `unionLeftRef` either. --- I guess that leaves `unionLeft`. --- --- Not sure if the above is helpful or correct! - --- unionLeft two Names, including new aliases, but excluding new name conflicts. --- e.g. unionLeftName [foo -> #a, bar -> #a, cat -> #c] --- [foo -> #b, baz -> #c] --- = [foo -> #a, bar -> #a, baz -> #c, cat -> #c)] --- Btw, it's ok to create name conflicts for parsing environments, if you don't --- mind disambiguating. -unionLeftName :: Ord n => Names' n -> Names' n -> Names' n -unionLeftName = unionLeft' $ const . R.memberDom - --- unionLeft two Names, including new name conflicts, but excluding new aliases. --- e.g. unionLeftRef [foo -> #a, bar -> #a, cat -> #c] --- [foo -> #b, baz -> #c] --- = [foo -> #a, bar -> #a, foo -> #b, cat -> #c] -_unionLeftRef :: Ord n => Names' n -> Names' n -> Names' n -_unionLeftRef = unionLeft' $ const R.memberRan - --- unionLeft two Names, but don't create new aliases or new name conflicts. --- e.g. unionLeft [foo -> #a, bar -> #a, cat -> #c] --- [foo -> #b, baz -> #c] --- = [foo -> #a, bar -> #a, cat -> #c] -unionLeft :: Ord n => Names' n -> Names' n -> Names' n -unionLeft = unionLeft' go - where go n r acc = R.memberDom n acc || R.memberRan r acc - --- implementation detail of the above -unionLeft' - :: Ord n - => (forall a b . (Ord a, Ord b) => a -> b -> Relation a b -> Bool) - -> Names' n - -> Names' n - -> Names' n -unionLeft' p a b = Names terms' types' - where - terms' = foldl' go (terms a) (R.toList $ terms b) - types' = foldl' go (types a) (R.toList $ types b) - go :: (Ord a, Ord b) => Relation a b -> (a, b) -> Relation a b - go acc (n, r) = if p n r acc then acc else R.insert n r acc - --- could move this to a read-only field in Names --- todo: kill this function and pass thru an Int from the codebase, I suppose -numHashChars :: Names' n -> Int -numHashChars b = lenFor hashes - where lenFor _hashes = 3 - hashes = foldl' f (foldl' g mempty (R.ran $ types b)) (R.ran $ terms b) - g s r = Set.insert r s - f s r = Set.insert (Referent.toReference r) s - -termsNamed :: Ord n => Names' n -> n -> Set Referent -termsNamed = flip R.lookupDom . terms - -refTermsNamed :: Ord n => Names' n -> n -> Set Reference -refTermsNamed names n = - Set.fromList [ r | Referent.Ref r <- toList $ termsNamed names n ] - -typesNamed :: Ord n => Names' n -> n -> Set Reference -typesNamed = flip R.lookupDom . types - -namesForReferent :: Names' n -> Referent -> Set n -namesForReferent names r = R.lookupRan r (terms names) - -namesForReference :: Names' n -> Reference -> Set n -namesForReference names r = R.lookupRan r (types names) - -termAliases :: Ord n => Names' n -> n -> Referent -> Set n -termAliases names n r = Set.delete n $ namesForReferent names r - -typeAliases :: Ord n => Names' n -> n -> Reference -> Set n -typeAliases names n r = Set.delete n $ namesForReference names r - -addType :: Ord n => n -> Reference -> Names' n -> Names' n -addType n r = (<> fromTypes [(n, r)]) - -addTerm :: Ord n => n -> Referent -> Names' n -> Names' n -addTerm n r = (<> fromTerms [(n, r)]) - --- | Like hqTermName and hqTypeName, but considers term and type names to --- conflict with each other (so will hash-qualify if there is e.g. both a term --- and a type named "foo"). --- --- This is useful in contexts such as printing branch diffs. Example: --- --- - Deletes: --- --- foo --- foo --- --- We want to append the hash regardless of whether or not one is a term and the --- other is a type. -hqName :: Ord n => Names' n -> n -> Either Reference Referent -> HQ.HashQualified' n -hqName b n = \case - Left r -> if ambiguous then _hqTypeName' b n r else HQ.fromName n - Right r -> if ambiguous then _hqTermName' b n r else HQ.fromName n - where - ambiguous = Set.size (termsNamed b n) + Set.size (typesNamed b n) > 1 - --- Conditionally apply hash qualifier to term name. --- Should be the same as the input name if the Names0 is unconflicted. -hqTermName :: Ord n => Int -> Names' n -> n -> Referent -> HQ.HashQualified' n -hqTermName hqLen b n r = if Set.size (termsNamed b n) > 1 - then hqTermName' hqLen n r - else HQ.fromName n - -hqTypeName :: Ord n => Int -> Names' n -> n -> Reference -> HQ.HashQualified' n -hqTypeName hqLen b n r = if Set.size (typesNamed b n) > 1 - then hqTypeName' hqLen n r - else HQ.fromName n - -_hqTermName :: Ord n => Names' n -> n -> Referent -> HQ.HashQualified' n -_hqTermName b n r = if Set.size (termsNamed b n) > 1 - then _hqTermName' b n r - else HQ.fromName n - -_hqTypeName :: Ord n => Names' n -> n -> Reference -> HQ.HashQualified' n -_hqTypeName b n r = if Set.size (typesNamed b n) > 1 - then _hqTypeName' b n r - else HQ.fromName n - -_hqTypeAliases :: - Ord n => Names' n -> n -> Reference -> Set (HQ.HashQualified' n) -_hqTypeAliases b n r = Set.map (flip (_hqTypeName b) r) (typeAliases b n r) - -_hqTermAliases :: Ord n => Names' n -> n -> Referent -> Set (HQ.HashQualified' n) -_hqTermAliases b n r = Set.map (flip (_hqTermName b) r) (termAliases b n r) - --- Unconditionally apply hash qualifier long enough to distinguish all the --- References in this Names0. -hqTermName' :: Int -> n -> Referent -> HQ.HashQualified' n -hqTermName' hqLen n r = - HQ.take hqLen $ HQ.fromNamedReferent n r - -hqTypeName' :: Int -> n -> Reference -> HQ.HashQualified' n -hqTypeName' hqLen n r = - HQ.take hqLen $ HQ.fromNamedReference n r - -_hqTermName' :: Names' n -> n -> Referent -> HQ.HashQualified' n -_hqTermName' b n r = - HQ.take (numHashChars b) $ HQ.fromNamedReferent n r - -_hqTypeName' :: Names' n -> n -> Reference -> HQ.HashQualified' n -_hqTypeName' b n r = - HQ.take (numHashChars b) $ HQ.fromNamedReference n r - -fromTerms :: Ord n => [(n, Referent)] -> Names' n -fromTerms ts = Names (R.fromList ts) mempty - -fromTypes :: Ord n => [(n, Reference)] -> Names' n -fromTypes ts = Names mempty (R.fromList ts) - -prefix0 :: Name -> Names0 -> Names0 -prefix0 n (Names terms types) = Names terms' types' where - terms' = R.mapDom (Name.joinDot n) terms - types' = R.mapDom (Name.joinDot n) types - -filter :: Ord n => (n -> Bool) -> Names' n -> Names' n -filter f (Names terms types) = Names (R.filterDom f terms) (R.filterDom f types) - --- currently used for filtering before a conditional `add` -filterByHQs :: Set HashQualified -> Names0 -> Names0 -filterByHQs hqs Names{..} = Names terms' types' where - terms' = R.filter f terms - types' = R.filter g types - f (n, r) = any (HQ.matchesNamedReferent n r) hqs - g (n, r) = any (HQ.matchesNamedReference n r) hqs - -filterBySHs :: Set ShortHash -> Names0 -> Names0 -filterBySHs shs Names{..} = Names terms' types' where - terms' = R.filter f terms - types' = R.filter g types - f (_n, r) = any (`SH.isPrefixOf` Referent.toShortHash r) shs - g (_n, r) = any (`SH.isPrefixOf` Reference.toShortHash r) shs - -filterTypes :: Ord n => (n -> Bool) -> Names' n -> Names' n -filterTypes f (Names terms types) = Names terms (R.filterDom f types) - -difference :: Ord n => Names' n -> Names' n -> Names' n -difference a b = Names (R.difference (terms a) (terms b)) - (R.difference (types a) (types b)) - -contains :: Names' n -> Reference -> Bool -contains names r = - -- this check makes `contains` O(n) instead of O(log n) - (Set.member r . Set.map Referent.toReference . R.ran) (terms names) - || R.memberRan r (types names) - --- | filters out everything from the domain except what's conflicted -conflicts :: Ord n => Names' n -> Names' n -conflicts Names{..} = Names (R.filterManyDom terms) (R.filterManyDom types) - -instance Ord n => Semigroup (Names' n) where (<>) = mappend - -instance Ord n => Monoid (Names' n) where - mempty = Names mempty mempty - Names e1 t1 `mappend` Names e2 t2 = - Names (e1 <> e2) (t1 <> t2) - -instance Show n => Show (Names' n) where - show (Names terms types) = "Terms:\n" ++ - foldMap (\(n, r) -> " " ++ show n ++ " -> " ++ show r ++ "\n") (R.toList terms) ++ "\n" ++ - "Types:\n" ++ - foldMap (\(n, r) -> " " ++ show n ++ " -> " ++ show r ++ "\n") (R.toList types) ++ "\n" - diff --git a/unison-core/src/Unison/Names3.hs b/unison-core/src/Unison/Names3.hs deleted file mode 100644 index fba95b72a7..0000000000 --- a/unison-core/src/Unison/Names3.hs +++ /dev/null @@ -1,240 +0,0 @@ -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Names3 where - -import Unison.Prelude - -import Data.List.Extra (nubOrd) -import Unison.HashQualified (HashQualified) -import qualified Unison.HashQualified as HQ -import qualified Unison.HashQualified' as HQ' -import Unison.Name (Name) -import Unison.Reference as Reference -import Unison.Referent as Referent -import Unison.Util.Relation (Relation) -import qualified Data.Set as Set -import qualified Data.Map as Map -import qualified Unison.Name as Name -import qualified Unison.Names2 -import qualified Unison.Names2 as Names -import qualified Unison.Util.List as List -import qualified Unison.Util.Relation as R -import qualified Unison.ConstructorType as CT - -data Names = Names { currentNames :: Names0, oldNames :: Names0 } deriving Show - -type Names0 = Unison.Names2.Names0 -pattern Names0 terms types = Unison.Names2.Names terms types - -data ResolutionFailure v a - = TermResolutionFailure v a (Set Referent) - | TypeResolutionFailure v a (Set Reference) - deriving (Eq,Ord,Show) - -type ResolutionResult v a r = Either (Seq (ResolutionFailure v a)) r - --- For all names in `ns`, (ex: foo.bar.baz), generate the list of suffixes --- of that name [[foo.bar.baz], [bar.baz], [baz]]. Insert these suffixes --- into a multimap map along with their corresponding refs. Any suffix --- which is unique is added as an entry to `ns`. -suffixify0 :: Names0 -> Names0 -suffixify0 ns = ns <> suffixNs - where - suffixNs = names0 (R.fromList uniqueTerms) (R.fromList uniqueTypes) - terms = List.multimap [ (n,ref) | (n0,ref) <- R.toList (terms0 ns), n <- Name.suffixes n0 ] - types = List.multimap [ (n,ref) | (n0,ref) <- R.toList (types0 ns), n <- Name.suffixes n0 ] - uniqueTerms = [ (n,ref) | (n, nubOrd -> [ref]) <- Map.toList terms ] - uniqueTypes = [ (n,ref) | (n, nubOrd -> [ref]) <- Map.toList types ] - -suffixify :: Names -> Names -suffixify ns = Names (suffixify0 (currentNames ns)) (oldNames ns) - -filterTypes :: (Name -> Bool) -> Names0 -> Names0 -filterTypes = Unison.Names2.filterTypes - --- Simple 2 way diff, has the property that: --- addedNames (diff0 n1 n2) == removedNames (diff0 n2 n1) --- --- `addedNames` are names in `n2` but not `n1` --- `removedNames` are names in `n1` but not `n2` -diff0 :: Names0 -> Names0 -> Diff -diff0 n1 n2 = Diff n1 added removed where - added = Names0 (terms0 n2 `R.difference` terms0 n1) - (types0 n2 `R.difference` types0 n1) - removed = Names0 (terms0 n1 `R.difference` terms0 n2) - (types0 n1 `R.difference` types0 n2) - -data Diff = - Diff { originalNames :: Names0 - , addedNames :: Names0 - , removedNames :: Names0 - } deriving Show - -isEmptyDiff :: Diff -> Bool -isEmptyDiff d = isEmpty0 (addedNames d) && isEmpty0 (removedNames d) - -isEmpty0 :: Names0 -> Bool -isEmpty0 n = R.null (terms0 n) && R.null (types0 n) - --- Add `n1` to `currentNames`, shadowing anything with the same name and --- moving shadowed definitions into `oldNames` so they can can still be --- referenced hash qualified. -push :: Names0 -> Names -> Names -push n1 ns = Names (unionLeft0 n1 cur) (oldNames ns <> shadowed) where - cur = currentNames ns - shadowed = names0 terms' types' where - terms' = R.dom (terms0 n1) R.<| (terms0 cur `R.difference` terms0 n1) - types' = R.dom (types0 n1) R.<| (types0 cur `R.difference` types0 n1) - unionLeft0 :: Names0 -> Names0 -> Names0 - unionLeft0 n1 n2 = names0 terms' types' where - terms' = terms0 n1 <> R.subtractDom (R.dom $ terms0 n1) (terms0 n2) - types' = types0 n1 <> R.subtractDom (R.dom $ types0 n1) (types0 n2) - -unionLeft0 :: Names0 -> Names0 -> Names0 -unionLeft0 = Unison.Names2.unionLeft - -unionLeftName0 :: Names0 -> Names0 -> Names0 -unionLeftName0 = Unison.Names2.unionLeftName - -map0 :: (Name -> Name) -> Names0 -> Names0 -map0 f (Names.Names terms types) = Names.Names terms' types' where - terms' = R.mapDom f terms - types' = R.mapDom f types - -names0 :: Relation Name Referent -> Relation Name Reference -> Names0 -names0 = Unison.Names2.Names - -types0 :: Names0 -> Relation Name Reference -types0 = Names.types - -terms0 :: Names0 -> Relation Name Referent -terms0 = Names.terms - --- if I push an existing name, the pushed reference should be the thing --- if I push a different name for the same thing, i suppose they should coexist --- thus, `unionLeftName0`. -shadowing :: Names0 -> Names -> Names -shadowing prio (Names current old) = - Names (prio `unionLeftName0` current) (current <> old) - -makeAbsolute0:: Names0 -> Names0 -makeAbsolute0 = map0 Name.makeAbsolute - --- do a prefix match on currentNames and, if no match, then check oldNames. -lookupHQType :: HashQualified -> Names -> Set Reference -lookupHQType hq Names{..} = case hq of - HQ.NameOnly n -> R.lookupDom n (Names.types currentNames) - HQ.HashQualified n sh -> case matches sh currentNames of - s | (not . null) s -> s - | otherwise -> matches sh oldNames - where - matches sh ns = Set.filter (Reference.isPrefixOf sh) (R.lookupDom n $ Names.types ns) - HQ.HashOnly sh -> case matches sh currentNames of - s | (not . null) s -> s - | otherwise -> matches sh oldNames - where - matches sh ns = Set.filter (Reference.isPrefixOf sh) (R.ran $ Names.types ns) - -hasTermNamed :: Name -> Names -> Bool -hasTermNamed n ns = not (Set.null $ lookupHQTerm (HQ.NameOnly n) ns) - -hasTypeNamed :: Name -> Names -> Bool -hasTypeNamed n ns = not (Set.null $ lookupHQType (HQ.NameOnly n) ns) - -lookupHQTerm :: HashQualified -> Names -> Set Referent -lookupHQTerm hq Names{..} = case hq of - HQ.NameOnly n -> R.lookupDom n (Names.terms currentNames) - HQ.HashQualified n sh -> case matches sh currentNames of - s | (not . null) s -> s - | otherwise -> matches sh oldNames - where - matches sh ns = Set.filter (Referent.isPrefixOf sh) (R.lookupDom n $ Names.terms ns) - HQ.HashOnly sh -> case matches sh currentNames of - s | (not . null) s -> s - | otherwise -> matches sh oldNames - where - matches sh ns = Set.filter (Referent.isPrefixOf sh) (R.ran $ Names.terms ns) - --- If `r` is in "current" names, look up each of its names, and hash-qualify --- them if they are conflicted names. If `r` isn't in "current" names, look up --- each of its "old" names and hash-qualify them. -typeName :: Int -> Reference -> Names -> Set HQ'.HashQualified -typeName length r Names{..} = - if R.memberRan r . Names.types $ currentNames - then Set.map (\n -> if isConflicted n then hq n else HQ'.fromName n) - (R.lookupRan r . Names.types $ currentNames) - else Set.map hq (R.lookupRan r . Names.types $ oldNames) - where hq n = HQ'.take length (HQ'.fromNamedReference n r) - isConflicted n = R.manyDom n (Names.types currentNames) - -termName :: Int -> Referent -> Names -> Set HQ'.HashQualified -termName length r Names{..} = - if R.memberRan r . Names.terms $ currentNames - then Set.map (\n -> if isConflicted n then hq n else HQ'.fromName n) - (R.lookupRan r . Names.terms $ currentNames) - else Set.map hq (R.lookupRan r . Names.terms $ oldNames) - where hq n = HQ'.take length (HQ'.fromNamedReferent n r) - isConflicted n = R.manyDom n (Names.terms currentNames) - --- Set HashQualified -> Branch m -> Action' m v Names --- Set HashQualified -> Branch m -> Free (Command m i v) Names --- Set HashQualified -> Branch m -> Command m i v Names --- populate historical names -lookupHQPattern :: HQ.HashQualified -> Names -> Set (Reference, Int) -lookupHQPattern hq names = Set.fromList - [ (r, cid) | Referent.Con r cid _ <- toList $ lookupHQTerm hq names ] - --- Finds all the constructors for the given type in the `Names0` -constructorsForType0 :: Reference -> Names0 -> [(Name,Referent)] -constructorsForType0 r ns = let - -- rather than searching all of names, we use the known possible forms - -- that the constructors can take - possibleDatas = [ Referent.Con r cid CT.Data | cid <- [0..] ] - possibleEffects = [ Referent.Con r cid CT.Effect | cid <- [0..] ] - trim [] = [] - trim (h:t) = case R.lookupRan h (terms0 ns) of - s | Set.null s -> [] - | otherwise -> [ (n,h) | n <- toList s ] ++ trim t - in trim possibleEffects ++ trim possibleDatas - --- Given a mapping from name to qualified name, update a `Names`, --- so for instance if the input has [(Some, Optional.Some)], --- and `Optional.Some` is a constructor in the input `Names`, --- the alias `Some` will map to that same constructor and shadow --- anything else that is currently called `Some`. --- --- Only affects `currentNames`. -importing :: [(Name, Name)] -> Names -> Names -importing shortToLongName ns = - ns { currentNames = importing0 shortToLongName (currentNames ns) } - -importing0 :: [(Name, Name)] -> Names0 -> Names0 -importing0 shortToLongName ns = - Names.Names - (foldl' go (terms0 ns) shortToLongName) - (foldl' go (types0 ns) shortToLongName) - where - go :: (Show a, Ord a, Ord b) => Relation a b -> (a, a) -> Relation a b - go m (shortname, qname) = case R.lookupDom qname m of - s | Set.null s -> m - | otherwise -> R.insertManyRan shortname s (R.deleteDom shortname m) - --- Converts a wildcard import into a list of explicit imports, of the form --- [(suffix, full)]. Example: if `io` contains two functions, `foo` and --- `bar`, then `expandWildcardImport io` will produce --- `[(foo, io.foo), (bar, io.bar)]`. -expandWildcardImport :: Name -> Names0 -> [(Name,Name)] -expandWildcardImport prefix ns = - [ (suffix, full) | Just (suffix,full) <- go <$> R.toList (terms0 ns) ] <> - [ (suffix, full) | Just (suffix,full) <- go <$> R.toList (types0 ns) ] - where - go (full, _) = case Name.stripNamePrefix prefix full of - Nothing -> Nothing - Just suffix -> Just (suffix, full) - -deleteTerms0 :: [Name] -> Names0 -> Names0 -deleteTerms0 ns n0 = names0 terms' (types0 n0) - where - terms' = R.subtractDom (Set.fromList ns) (terms0 n0) diff --git a/unison-core/src/Unison/Paths.hs b/unison-core/src/Unison/Paths.hs deleted file mode 100644 index 13ca4645e4..0000000000 --- a/unison-core/src/Unison/Paths.hs +++ /dev/null @@ -1,204 +0,0 @@ -{-# Language DeriveGeneric #-} - -module Unison.Paths where - -import Unison.Prelude - -import Data.List -import Unison.ABT (V) -import Unison.Var (Var) -import qualified Data.Sequence as Sequence -import qualified Unison.ABT as ABT -import qualified Unison.Term as E -import qualified Unison.Type as T - -type Type v = T.Type v () -type Term v = E.Term v () - -data Target v - = Term (Term v) - | Type (Type v) - | Var v - | Declaration v (Term v) deriving Generic - -- Metadata - -vmap :: Ord v2 => (v -> v2) -> Target v -> Target v2 -vmap f (Var v) = Var (f v) -vmap f (Declaration v b) = Declaration (f v) (E.vmap f b) -vmap f (Term t) = Term (E.vmap f t) -vmap f (Type t) = Type (ABT.vmap f t) - -data PathElement - = Fn -- ^ Points at function in a function/type application - | Arg -- ^ Points at the argument of a function/type application - | Body -- ^ Points at the body of a lambda, let, binding, forall, or annotation - | Bound -- ^ Points at the symbol bound by a `let`, `lambda` or `forall` binder - | Binding !Int -- ^ Points at a particular binding in a let - | Index !Int -- ^ Points at the index of a vector - | Annotation -- ^ Points into the annotation - | Input -- ^ Points at the left of an `Arrow` - | Output -- ^ Points at the right of an `Arrow` - deriving (Eq,Ord,Show,Generic) - -focus1 - :: Var v - => PathElement - -> ABT.Path (Target v) (Target (V v)) (Target v) (Target (V v)) [v] -focus1 e = ABT.Path go' - where - go' t = go e t - w = E.vmap ABT.Bound - wt = ABT.vmap ABT.Bound - go Fn (Term (E.App' fn arg)) = Just - (Term fn, \fn -> Term <$> (E.app () <$> asTerm fn <*> pure (w arg)), []) - go Fn (Type (T.App' fn arg)) = - Just - (Type fn, \fn -> Type <$> (T.app () <$> asType fn <*> pure (wt arg)), []) - go Arg (Term (E.App' fn arg)) = - Just (Term arg, \arg -> Term <$> (E.app () (w fn) <$> asTerm arg), []) - go Arg (Type (T.App' fn arg)) = - Just (Type arg, \arg -> Type <$> (T.app () (wt fn) <$> asType arg), []) - go Body (Term (E.LamNamed' v body)) = Just - (Term body, \t -> Term . set <$> asTerm t, [v]) - where set body = ABT.tm (E.Lam (ABT.absr v body)) - go Body (Term (E.Let1NamedTop' top v b body)) = Just - (Term body, \t -> Term . set <$> asTerm t, [v]) - where set body = ABT.tm (E.Let top (w b) (ABT.absr v body)) - go p (Term (ABT.Cycle' vs (ABT.Tm' (E.LetRec top bs body)))) = case p of - Body -> Just (Term body, \body -> Term . set <$> asTerm body, vs) - where set body = ABT.cycler vs (ABT.tm (E.LetRec top (map w bs) body)) - Binding i | i >= 0 && i < length bs -> Just - ( Declaration (vs !! i) (bs !! i) - , \b -> Term . set <$> asDeclaration b - , vs - ) - where - replace f i a vs = map f (take i vs) ++ [a] ++ map f (drop (i + 1) vs) - set (v, b) = - let tm0 = ABT.tm (E.LetRec top (replace w i b bs) (w body)) - v0 = ABT.Bound (vs !! i) - tm = if v /= v0 then ABT.rename v0 v tm0 else tm - in ABT.cycler (replace id i (ABT.unvar v) vs) tm - _ -> Nothing - go Body (Type (T.ForallNamed' v body)) = Just - (Type body, \t -> Type . set <$> asType t, [v]) - where set body = ABT.tm (T.Forall (ABT.absr v body)) - go Body (Declaration v body) = - Just (Term body, \body -> Declaration (ABT.Bound v) <$> asTerm body, []) - go Bound (Declaration v body) = - Just (Var v, \v -> Declaration <$> asVar v <*> pure (w body), []) - go Bound (Term (E.LamNamed' v body)) = - Just (Var v, \v -> Term <$> (E.lam () <$> asVar v <*> pure (w body)), []) - go Bound (Term (E.Let1NamedTop' top v b body)) = Just - ( Var v - , \v -> (\v -> Term $ E.let1 top [(((), v), w b)] (w body)) <$> asVar v - , [] - ) - go Bound (Type (T.ForallNamed' v body)) = Just - (Var v, \v -> Type <$> (T.forall () <$> asVar v <*> pure (wt body)), []) - go (Index i) (Term (E.Sequence' vs)) | i < Sequence.length vs && i >= 0 = Just - ( Term (vs `Sequence.index` i) - , \e -> (\e -> Term $ E.seq' () $ Sequence.update i e (fmap w vs)) <$> asTerm e - , [] - ) - go (Binding i) (Term (E.Let1NamedTop' top v b body)) | i <= 0 = Just - (Declaration v b, set, []) - where - set (Declaration v b) = pure . Term $ E.let1 top [(((), v), b)] (w body) - set _ = Nothing - go Annotation (Term (E.Ann' e t)) = - Just (Type t, \t -> Term . E.ann () (w e) <$> asType t, []) - go Body (Term (E.Ann' body t)) = Just - (Term body, \body -> Term . flip (E.ann ()) (wt t) <$> asTerm body, []) - go Input (Type (T.Arrow' i o)) = Just - (Type i, \i -> Type <$> (T.arrow () <$> asType i <*> pure (wt o)), []) - go Output (Type (T.Arrow' i o)) = - Just (Type o, \o -> Type . T.arrow () (wt i) <$> asType o, []) - go _ _ = Nothing - -type Path = [PathElement] - -focus :: Var v => Path -> Target v -> Maybe (Target v, Target (V v) -> Maybe (Target v), [v]) -focus p t = tweak <$> ABT.focus (foldr ABT.compose ABT.here (map focus1 p)) t where - tweak (get, set, vs) = (get, \t -> vmap ABT.unvar <$> set t, vs) - -at :: Var v => Path -> Target v -> Maybe (Target v) -at path t = (\(a,_,_) -> a) <$> focus path t - -atTerm :: Var v => Path -> Term v -> Maybe (Term v) -atTerm path t = asTerm =<< at path (Term t) - -atType :: Var v => Path -> Type v -> Maybe (Type v) -atType path t = asType =<< at path (Type t) - -modify :: Var v => (Target v -> Target (V v)) -> Path -> Target v -> Maybe (Target v) -modify f path t = focus path t >>= \(at,set,_) -> set (f at) - -modifyTerm :: Var v => (Term v -> Term (V v)) -> Path -> Term v -> Maybe (Term v) -modifyTerm f p t = do - (at,set,_) <- focus p (Term t) - t <- asTerm at - asTerm =<< set (Term $ f t) - -modifyTerm' :: Var v => (Term v -> Term (V v)) -> Path -> Term v -> Term v -modifyTerm' f p t = fromMaybe t $ modifyTerm f p t - -modifyType :: Var v => (Type v -> Type (V v)) -> Path -> Type v -> Maybe (Type v) -modifyType f p t = do - (at,set,_) <- focus p (Type t) - t <- asType at - asType =<< set (Type $ f t) - -inScopeAt :: Var v => Path -> Target v -> [v] -inScopeAt p t = maybe [] (\(_,_,vs) -> vs) (focus p t) - -inScopeAtTerm :: Var v => Path -> Term v -> [v] -inScopeAtTerm p t = inScopeAt p (Term t) - -inScopeAtType :: Var v => Path -> Type v -> [v] -inScopeAtType p t = inScopeAt p (Type t) - -insertTerm :: Var v => Path -> Term v -> Maybe (Term v) -insertTerm at _ | null at = Nothing -insertTerm at ctx = do - let at' = init at - (parent,set,_) <- focus at' (Term ctx) - case parent of - Term (E.Sequence' vs) -> do - i <- listToMaybe [i | Index i <- [last at]] - let v2 = E.seq'() ((E.vmap ABT.Bound <$> Sequence.take (i+1) vs) `mappend` - pure (E.blank ()) `mappend` - (E.vmap ABT.Bound <$> Sequence.drop (i+1) vs)) - asTerm =<< set (Term v2) - _ -> Nothing -- todo - allow other types of insertions, like \x -> y to \x x2 -> y - --- | Return the list of all prefixes of the input path -pathPrefixes :: Path -> [Path] -pathPrefixes = inits - --- | Add an element onto the end of this 'Path' -pathExtend :: PathElement -> Path -> Path -pathExtend e p = p ++ [e] - -parent :: Path -> Maybe Path -parent [] = Nothing -parent p = Just (init p) - -parent' :: Path -> Path -parent' = fromMaybe [] . parent - -asTerm :: Target v -> Maybe (Term v) -asTerm (Term t) = Just t -asTerm _ = Nothing - -asType :: Target v -> Maybe (Type v) -asType (Type t) = Just t -asType _ = Nothing - -asVar :: Target v -> Maybe v -asVar (Var v) = Just v -asVar _ = Nothing - -asDeclaration :: Target v -> Maybe (v, Term v) -asDeclaration (Declaration v b) = Just (v,b) -asDeclaration _ = Nothing diff --git a/unison-core/src/Unison/Pattern.hs b/unison-core/src/Unison/Pattern.hs deleted file mode 100644 index 687710430d..0000000000 --- a/unison-core/src/Unison/Pattern.hs +++ /dev/null @@ -1,165 +0,0 @@ -{-# Language DeriveTraversable, DeriveGeneric, PatternSynonyms, OverloadedStrings #-} - -module Unison.Pattern where - -import Unison.Prelude - -import Data.List (intercalate) -import Data.Foldable as Foldable hiding (foldMap') -import Unison.Reference (Reference) -import qualified Unison.Hashable as H -import qualified Unison.Type as Type -import qualified Data.Set as Set -import qualified Unison.LabeledDependency as LD -import Unison.LabeledDependency (LabeledDependency) - -type ConstructorId = Int - -data Pattern loc - = Unbound loc - | Var loc - | Boolean loc !Bool - | Int loc !Int64 - | Nat loc !Word64 - | Float loc !Double - | Text loc !Text - | Char loc !Char - | Constructor loc !Reference !Int [Pattern loc] - | As loc (Pattern loc) - | EffectPure loc (Pattern loc) - | EffectBind loc !Reference !Int [Pattern loc] (Pattern loc) - | SequenceLiteral loc [Pattern loc] - | SequenceOp loc (Pattern loc) !SeqOp (Pattern loc) - deriving (Ord,Generic,Functor,Foldable,Traversable) - -data SeqOp = Cons - | Snoc - | Concat - deriving (Eq, Show, Ord) - -instance H.Hashable SeqOp where - tokens Cons = [H.Tag 0] - tokens Snoc = [H.Tag 1] - tokens Concat = [H.Tag 2] - -instance Show (Pattern loc) where - show (Unbound _ ) = "Unbound" - show (Var _ ) = "Var" - show (Boolean _ x) = "Boolean " <> show x - show (Int _ x) = "Int " <> show x - show (Nat _ x) = "Nat " <> show x - show (Float _ x) = "Float " <> show x - show (Text _ t) = "Text " <> show t - show (Char _ c) = "Char " <> show c - show (Constructor _ r i ps) = - "Constructor " <> unwords [show r, show i, show ps] - show (As _ p) = "As " <> show p - show (EffectPure _ k) = "EffectPure " <> show k - show (EffectBind _ r i ps k) = - "EffectBind " <> unwords [show r, show i, show ps, show k] - show (SequenceLiteral _ ps) = "Sequence " <> intercalate ", " (fmap show ps) - show (SequenceOp _ ph op pt) = "Sequence " <> show ph <> " " <> show op <> " " <> show pt - -application :: Pattern loc -> Bool -application (Constructor _ _ _ (_ : _)) = True -application _ = False - -loc :: Pattern loc -> loc -loc p = head $ Foldable.toList p - -setLoc :: Pattern loc -> loc -> Pattern loc -setLoc p loc = case p of - EffectBind _ a b c d -> EffectBind loc a b c d - EffectPure _ a -> EffectPure loc a - As _ a -> As loc a - Constructor _ a b c -> Constructor loc a b c - SequenceLiteral _ ps -> SequenceLiteral loc ps - SequenceOp _ ph op pt -> SequenceOp loc ph op pt - x -> fmap (const loc) x - -instance H.Hashable (Pattern p) where - tokens (Unbound _) = [H.Tag 0] - tokens (Var _) = [H.Tag 1] - tokens (Boolean _ b) = H.Tag 2 : [H.Tag $ if b then 1 else 0] - tokens (Int _ n) = H.Tag 3 : [H.Int n] - tokens (Nat _ n) = H.Tag 4 : [H.Nat n] - tokens (Float _ f) = H.Tag 5 : H.tokens f - tokens (Constructor _ r n args) = - [H.Tag 6, H.accumulateToken r, H.Nat $ fromIntegral n, H.accumulateToken args] - tokens (EffectPure _ p) = H.Tag 7 : H.tokens p - tokens (EffectBind _ r n args k) = - [H.Tag 8, H.accumulateToken r, H.Nat $ fromIntegral n, H.accumulateToken args, H.accumulateToken k] - tokens (As _ p) = H.Tag 9 : H.tokens p - tokens (Text _ t) = H.Tag 10 : H.tokens t - tokens (SequenceLiteral _ ps) = H.Tag 11 : concatMap H.tokens ps - tokens (SequenceOp _ l op r) = H.Tag 12 : H.tokens op ++ H.tokens l ++ H.tokens r - tokens (Char _ c) = H.Tag 13 : H.tokens c - -instance Eq (Pattern loc) where - Unbound _ == Unbound _ = True - Var _ == Var _ = True - Boolean _ b == Boolean _ b2 = b == b2 - Int _ n == Int _ m = n == m - Nat _ n == Nat _ m = n == m - Float _ f == Float _ g = f == g - Constructor _ r n args == Constructor _ s m brgs = r == s && n == m && args == brgs - EffectPure _ p == EffectPure _ q = p == q - EffectBind _ r ctor ps k == EffectBind _ r2 ctor2 ps2 k2 = r == r2 && ctor == ctor2 && ps == ps2 && k == k2 - As _ p == As _ q = p == q - Text _ t == Text _ t2 = t == t2 - SequenceLiteral _ ps == SequenceLiteral _ ps2 = ps == ps2 - SequenceOp _ ph op pt == SequenceOp _ ph2 op2 pt2 = ph == ph2 && op == op2 && pt == pt2 - _ == _ = False - -foldMap' :: Monoid m => (Pattern loc -> m) -> Pattern loc -> m -foldMap' f p = case p of - Unbound _ -> f p - Var _ -> f p - Boolean _ _ -> f p - Int _ _ -> f p - Nat _ _ -> f p - Float _ _ -> f p - Text _ _ -> f p - Char _ _ -> f p - Constructor _ _ _ ps -> f p <> foldMap (foldMap' f) ps - As _ p' -> f p <> foldMap' f p' - EffectPure _ p' -> f p <> foldMap' f p' - EffectBind _ _ _ ps p' -> f p <> foldMap (foldMap' f) ps <> foldMap' f p' - SequenceLiteral _ ps -> f p <> foldMap (foldMap' f) ps - SequenceOp _ p1 _ p2 -> f p <> foldMap' f p1 <> foldMap' f p2 - -generalizedDependencies - :: Ord r - => (Reference -> r) - -> (Reference -> ConstructorId -> r) - -> (Reference -> r) - -> (Reference -> ConstructorId -> r) - -> (Reference -> r) - -> Pattern loc - -> Set r -generalizedDependencies literalType dataConstructor dataType effectConstructor effectType - = Set.fromList . foldMap' - (\case - Unbound _ -> mempty - Var _ -> mempty - As _ _ -> mempty - Constructor _ r cid _ -> [dataType r, dataConstructor r cid] - EffectPure _ _ -> [effectType Type.effectRef] - EffectBind _ r cid _ _ -> - [effectType Type.effectRef, effectType r, effectConstructor r cid] - SequenceLiteral _ _ -> [literalType Type.vectorRef] - SequenceOp {} -> [literalType Type.vectorRef] - Boolean _ _ -> [literalType Type.booleanRef] - Int _ _ -> [literalType Type.intRef] - Nat _ _ -> [literalType Type.natRef] - Float _ _ -> [literalType Type.floatRef] - Text _ _ -> [literalType Type.textRef] - Char _ _ -> [literalType Type.charRef] - ) - -labeledDependencies :: Pattern loc -> Set LabeledDependency -labeledDependencies = generalizedDependencies LD.typeRef - LD.dataConstructor - LD.typeRef - LD.effectConstructor - LD.typeRef diff --git a/unison-core/src/Unison/PatternCompat.hs b/unison-core/src/Unison/PatternCompat.hs deleted file mode 100644 index 31ee1c532d..0000000000 --- a/unison-core/src/Unison/PatternCompat.hs +++ /dev/null @@ -1,30 +0,0 @@ -{-# Language PatternSynonyms #-} - -module Unison.PatternCompat where - -import qualified Unison.Pattern as P - -type Pattern = P.Pattern () - -{-# COMPLETE Unbound, Var, Boolean, Int, Nat, Float, Text, Char, Constructor, As, EffectPure, EffectBind, SequenceLiteral, SequenceOp #-} - -pattern Unbound = P.Unbound () -pattern Var = P.Var () -pattern Boolean b = P.Boolean () b -pattern Int n = P.Int () n -pattern Nat n = P.Nat () n -pattern Float n = P.Float () n -pattern Text t = P.Text () t -pattern Char c = P.Char () c -pattern Constructor r cid ps = P.Constructor () r cid ps -pattern As p = P.As () p -pattern EffectPure p = P.EffectPure () p -pattern EffectBind r cid ps k = P.EffectBind () r cid ps k -pattern SequenceLiteral ps = P.SequenceLiteral () ps -pattern SequenceOp ph op pt = P.SequenceOp () ph op pt - -{-# COMPLETE Snoc, Cons, Concat #-} -type SeqOp = P.SeqOp -pattern Snoc = P.Snoc -pattern Cons = P.Cons -pattern Concat = P.Concat diff --git a/unison-core/src/Unison/Prelude.hs b/unison-core/src/Unison/Prelude.hs deleted file mode 100644 index 2bea8d7106..0000000000 --- a/unison-core/src/Unison/Prelude.hs +++ /dev/null @@ -1,62 +0,0 @@ -module Unison.Prelude - ( module X, readUtf8, safeReadUtf8, safeReadUtf8StdIn, writeUtf8, reportBug - ) where - -import Control.Applicative as X -import Control.Exception as X (Exception, SomeException, IOException, try) -import Control.Monad as X -import Control.Monad.Extra as X (ifM, mapMaybeM, unlessM, whenM) -import Control.Monad.IO.Class as X (MonadIO(liftIO)) -import Control.Monad.Trans as X (MonadTrans(lift)) -import Control.Monad.Trans.Maybe as X (MaybeT(MaybeT, runMaybeT)) -import Data.ByteString as X (ByteString) -import Data.Either as X -import Data.Either.Combinators as X (mapLeft, maybeToRight) -import Data.Foldable as X (asum, fold, foldl', for_, toList, traverse_) -import Data.Functor as X -import Data.Int as X -import Data.List as X (foldl1', sortOn) -import Data.Map as X (Map) -import Data.Maybe as X (catMaybes, fromMaybe, isJust, isNothing, listToMaybe, mapMaybe, maybeToList) -import Data.Sequence as X (Seq) -import Data.Set as X (Set) -import Data.String as X (IsString, fromString) -import Data.Text as X (Text) -import Data.Text.Encoding as X (encodeUtf8, decodeUtf8) -import Data.Traversable as X (for) -import Data.Word as X -import Debug.Trace as X -import GHC.Generics as X (Generic, Generic1) -import Safe as X (atMay, headMay, lastMay, readMay) -import Text.Read as X (readMaybe) - -import qualified Data.ByteString as BS - --- Read an entire file strictly assuming UTF8 -readUtf8 :: FilePath -> IO Text -readUtf8 p = decodeUtf8 <$> BS.readFile p - -safeReadUtf8 :: FilePath -> IO (Either IOException Text) -safeReadUtf8 p = try (readUtf8 p) - -safeReadUtf8StdIn :: IO (Either IOException Text) -safeReadUtf8StdIn = try $ decodeUtf8 <$> BS.getContents - -writeUtf8 :: FilePath -> Text -> IO () -writeUtf8 p txt = BS.writeFile p (encodeUtf8 txt) - -reportBug :: String -> String -> String -reportBug bugId msg = unlines [ - "🐞", - "", - msg, - "", - "This is a Unison bug and you can report it here:", "", - "https://github.com/unisonweb/unison/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+" <> bugId <> "+", - "", - "Bug reference: " <> bugId, "", - "If there's already an issue with this reference, you can give a 👍", - "on the issue to let the team know you encountered it, and you can add", - "any additional details you know of to the issue." - ] - diff --git a/unison-core/src/Unison/Reference.hs b/unison-core/src/Unison/Reference.hs deleted file mode 100644 index f007e2b764..0000000000 --- a/unison-core/src/Unison/Reference.hs +++ /dev/null @@ -1,179 +0,0 @@ -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Reference - (Reference, - pattern Builtin, - pattern Derived, - pattern DerivedId, - Id(..), - derivedBase32Hex, - Component, members, - components, - groupByComponent, - componentFor, - unsafeFromText, - idFromText, - isPrefixOf, - fromShortHash, - fromText, - readSuffix, - showShort, - showSuffix, - toId, - toText, - unsafeId, - toShortHash) where - -import Unison.Prelude - -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Data.Text as Text -import qualified Unison.Hash as H -import Unison.Hashable as Hashable -import Unison.ShortHash (ShortHash) -import qualified Unison.ShortHash as SH -import Data.Char (isDigit) - -data Reference - = Builtin Text.Text - -- `Derived` can be part of a strongly connected component. - -- The `Pos` refers to a particular element of the component - -- and the `Size` is the number of elements in the component. - -- Using an ugly name so no one tempted to use this - | DerivedId Id deriving (Eq,Ord,Generic) - -pattern Derived h i n = DerivedId (Id h i n) - --- A good idea, but causes a weird problem with view patterns in PatternP.hs in ghc 8.4.3 ---{-# COMPLETE Builtin, Derived #-} - -data Id = Id H.Hash Pos Size deriving (Eq,Ord,Generic) - -unsafeId :: Reference -> Id -unsafeId (Builtin b) = - error $ "Tried to get the hash of builtin " <> Text.unpack b <> "." -unsafeId (DerivedId x) = x - --- todo: move these to ShortHash module? --- but Show Reference currently depends on SH -toShortHash :: Reference -> ShortHash -toShortHash (Builtin b) = SH.Builtin b -toShortHash (Derived h _ 1) = SH.ShortHash (H.base32Hex h) Nothing Nothing -toShortHash (Derived h i n) = SH.ShortHash (H.base32Hex h) index Nothing - where - -- todo: remove `n` parameter; must also update readSuffix - index = Just $ showSuffix i n -toShortHash (DerivedId _) = error "this should be covered above" - --- toShortHash . fromJust . fromShortHash == id and --- fromJust . fromShortHash . toShortHash == id --- but for arbitrary ShortHashes which may be broken at the wrong boundary, it --- may not be possible to base32Hex decode them. These will return Nothing. --- Also, ShortHashes that include constructor ids will return Nothing; --- try Referent.fromShortHash -fromShortHash :: ShortHash -> Maybe Reference -fromShortHash (SH.Builtin b) = Just (Builtin b) -fromShortHash (SH.ShortHash prefix cycle Nothing) = do - h <- H.fromBase32Hex prefix - case cycle of - Nothing -> Just (Derived h 0 1) - Just t -> case Text.splitOn "c" t of - [i,n] -> Derived h <$> readMay (Text.unpack i) <*> readMay (Text.unpack n) - _ -> Nothing -fromShortHash (SH.ShortHash _prefix _cycle (Just _cid)) = Nothing - --- (3,10) encoded as "3c10" --- (0,93) encoded as "0c93" -showSuffix :: Pos -> Size -> Text -showSuffix i n = Text.pack $ show i <> "c" <> show n - --- todo: don't read or return size; must also update showSuffix and fromText -readSuffix :: Text -> Either String (Pos, Size) -readSuffix t = case Text.breakOn "c" t of - (pos, Text.drop 1 -> size) | Text.all isDigit pos && Text.all isDigit size -> - Right (read (Text.unpack pos), read (Text.unpack size)) - _ -> Left "suffix decoding error" - -isPrefixOf :: ShortHash -> Reference -> Bool -isPrefixOf sh r = SH.isPrefixOf sh (toShortHash r) - -toText :: Reference -> Text -toText = SH.toText . toShortHash - -showShort :: Int -> Reference -> Text -showShort numHashChars = SH.toText . SH.take numHashChars . toShortHash - -type Pos = Word64 -type Size = Word64 - -newtype Component = Component { members :: Set Reference } - --- Gives the component (dependency cycle) that the reference is a part of -componentFor :: Reference -> Component -componentFor b@(Builtin _ ) = Component (Set.singleton b) -componentFor ( DerivedId (Id h _ n)) = Component - (Set.fromList - [ DerivedId (Id h i n) | i <- take (fromIntegral n) [0 ..] ] - ) - -derivedBase32Hex :: Text -> Pos -> Size -> Reference -derivedBase32Hex b32Hex i n = DerivedId (Id (fromMaybe msg h) i n) - where - msg = error $ "Reference.derivedBase32Hex " <> show h - h = H.fromBase32Hex b32Hex - -unsafeFromText :: Text -> Reference -unsafeFromText = either error id . fromText - -idFromText :: Text -> Maybe Id -idFromText s = case fromText s of - Left _ -> Nothing - Right (Builtin _) -> Nothing - Right (DerivedId id) -> pure id - -toId :: Reference -> Maybe Id -toId (DerivedId id) = Just id -toId Builtin{} = Nothing - --- examples: --- `##Text.take` — builtins don’t have cycles --- `#2tWjVAuc7` — derived, no cycle --- `#y9ycWkiC1.y9` — derived, part of cycle --- todo: take a (Reference -> CycleSize) so that `readSuffix` doesn't have to parse the size from the text. -fromText :: Text -> Either String Reference -fromText t = case Text.split (=='#') t of - [_, "", b] -> Right (Builtin b) - [_, h] -> case Text.split (=='.') h of - [hash] -> Right (derivedBase32Hex hash 0 1) - [hash, suffix] -> uncurry (derivedBase32Hex hash) <$> readSuffix suffix - _ -> bail - _ -> bail - where bail = Left $ "couldn't parse a Reference from " <> Text.unpack t - -component :: H.Hash -> [k] -> [(k, Id)] -component h ks = let - size = fromIntegral (length ks) - in [ (k, (Id h i size)) | (k, i) <- ks `zip` [0..]] - -components :: [(H.Hash, [k])] -> [(k, Id)] -components sccs = uncurry component =<< sccs - -groupByComponent :: [(k, Reference)] -> [[(k, Reference)]] -groupByComponent refs = done $ foldl' insert Map.empty refs - where - insert m (k, r@(Derived h _ _)) = - Map.unionWith (<>) m (Map.fromList [(Right h, [(k,r)])]) - insert m (k, r) = - Map.unionWith (<>) m (Map.fromList [(Left r, [(k,r)])]) - done m = sortOn snd <$> toList m - -instance Show Id where show = SH.toString . SH.take 5 . toShortHash . DerivedId -instance Show Reference where show = SH.toString . SH.take 5 . toShortHash - -instance Hashable.Hashable Reference where - tokens (Builtin txt) = [Hashable.Tag 0, Hashable.Text txt] - tokens (DerivedId (Id h i n)) = [Hashable.Tag 1, Hashable.Bytes (H.toBytes h), Hashable.Nat i, Hashable.Nat n] diff --git a/unison-core/src/Unison/Reference/Util.hs b/unison-core/src/Unison/Reference/Util.hs deleted file mode 100644 index 2d63d2d6b1..0000000000 --- a/unison-core/src/Unison/Reference/Util.hs +++ /dev/null @@ -1,22 +0,0 @@ -module Unison.Reference.Util where - -import Unison.Prelude - -import Unison.Reference -import qualified Unison.Reference as Reference -import Unison.Hashable (Hashable1) -import Unison.ABT (Var) -import qualified Unison.ABT as ABT -import qualified Data.Map as Map - -hashComponents :: - (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Var v) - => (Reference.Id -> ABT.Term f v ()) - -> Map v (ABT.Term f v a) - -> Map v (Reference.Id, ABT.Term f v a) -hashComponents embedRef tms = - Map.fromList [ (v, (r,e)) | ((v,e), r) <- cs ] - where cs = components $ ABT.hashComponents ref tms - ref h i n = embedRef (Id h i n) - - diff --git a/unison-core/src/Unison/Referent.hs b/unison-core/src/Unison/Referent.hs deleted file mode 100644 index 700e84ed02..0000000000 --- a/unison-core/src/Unison/Referent.hs +++ /dev/null @@ -1,128 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Referent where - -import Unison.Prelude - -import qualified Data.Char as Char -import qualified Data.Text as Text -import Unison.Hashable (Hashable) -import qualified Unison.Hashable as H -import Unison.Reference (Reference) -import qualified Unison.Reference as R -import Unison.ShortHash (ShortHash) -import qualified Unison.ShortHash as SH - -import Unison.ConstructorType (ConstructorType) -import qualified Unison.ConstructorType as CT - --- Slightly odd naming. This is the "referent of term name in the codebase", --- rather than the target of a Reference. -type Referent = Referent' Reference -pattern Ref :: Reference -> Referent -pattern Ref r = Ref' r -pattern Con :: Reference -> Int -> ConstructorType -> Referent -pattern Con r i t = Con' r i t -{-# COMPLETE Ref, Con #-} - -type Id = Referent' R.Id - -data Referent' r = Ref' r | Con' r Int ConstructorType - deriving (Show, Ord, Eq, Functor) - -type Pos = Word64 -type Size = Word64 - --- referentToTerm moved to Term.fromReferent --- termToReferent moved to Term.toReferent - --- todo: move these to ShortHash module -toShortHash :: Referent -> ShortHash -toShortHash = \case - Ref r -> R.toShortHash r - Con r i _ -> patternShortHash r i - -toShortHashId :: Id -> ShortHash -toShortHashId = toShortHash . fromId - --- also used by HashQualified.fromPattern -patternShortHash :: Reference -> Int -> ShortHash -patternShortHash r i = (R.toShortHash r) { SH.cid = Just . Text.pack $ show i } - -showShort :: Int -> Referent -> Text -showShort numHashChars = SH.toText . SH.take numHashChars . toShortHash - -toText :: Referent -> Text -toText = \case - Ref r -> R.toText r - Con r cid ct -> R.toText r <> "#" <> ctorTypeText ct <> Text.pack (show cid) - -ctorTypeText :: CT.ConstructorType -> Text -ctorTypeText CT.Effect = EffectCtor -ctorTypeText CT.Data = DataCtor - -pattern EffectCtor = "a" -pattern DataCtor = "d" - -toString :: Referent -> String -toString = Text.unpack . toText - -isConstructor :: Referent -> Bool -isConstructor Con{} = True -isConstructor _ = False - -toTermReference :: Referent -> Maybe Reference -toTermReference = \case - Ref r -> Just r - _ -> Nothing - -toReference :: Referent -> Reference -toReference = toReference' - -toReference' :: Referent' r -> r -toReference' = \case - Ref' r -> r - Con' r _i _t -> r - -fromId :: Id -> Referent -fromId = fmap R.DerivedId - -toTypeReference :: Referent -> Maybe Reference -toTypeReference = \case - Con r _i _t -> Just r - _ -> Nothing - -isPrefixOf :: ShortHash -> Referent -> Bool -isPrefixOf sh r = SH.isPrefixOf sh (toShortHash r) - -unsafeFromText :: Text -> Referent -unsafeFromText = fromMaybe (error "invalid referent") . fromText - --- #abc[.xy][#cid] -fromText :: Text -> Maybe Referent -fromText t = either (const Nothing) Just $ - -- if the string has just one hash at the start, it's just a reference - if Text.length refPart == 1 then - Ref <$> R.fromText t - else if Text.all Char.isDigit cidPart then do - r <- R.fromText (Text.dropEnd 1 refPart) - ctorType <- ctorType - let cid = read (Text.unpack cidPart) - pure $ Con r cid ctorType - else - Left ("invalid constructor id: " <> Text.unpack cidPart) - where - ctorType = case Text.take 1 cidPart' of - EffectCtor -> Right CT.Effect - DataCtor -> Right CT.Data - _otherwise -> - Left ("invalid constructor type (expected '" - <> EffectCtor <> "' or '" <> DataCtor <> "'): " <> Text.unpack cidPart') - refPart = Text.dropWhileEnd (/= '#') t - cidPart' = Text.takeWhileEnd (/= '#') t - cidPart = Text.drop 1 cidPart' - -instance Hashable Referent where - tokens (Ref r) = [H.Tag 0] ++ H.tokens r - tokens (Con r i dt) = [H.Tag 2] ++ H.tokens r ++ H.tokens (fromIntegral i :: Word64) ++ H.tokens dt diff --git a/unison-core/src/Unison/Settings.hs b/unison-core/src/Unison/Settings.hs deleted file mode 100644 index 883f7e0a0c..0000000000 --- a/unison-core/src/Unison/Settings.hs +++ /dev/null @@ -1,18 +0,0 @@ -module Unison.Settings where - -debugNoteLoc,debugNoteSummary,debugRevealForalls :: Bool -debugNoteLoc = False -debugNoteSummary = False -debugRevealForalls = False - -renderTermMaxLength :: Int -renderTermMaxLength = 20 - -demoHideVarNumber :: Bool -demoHideVarNumber = False - -removePureEffects :: Bool -removePureEffects = True - -cleanupTypes :: Bool -cleanupTypes = True diff --git a/unison-core/src/Unison/ShortHash.hs b/unison-core/src/Unison/ShortHash.hs deleted file mode 100644 index 7e97b7a722..0000000000 --- a/unison-core/src/Unison/ShortHash.hs +++ /dev/null @@ -1,92 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE RecordWildCards #-} - -module Unison.ShortHash where - -import Unison.Prelude - -import qualified Data.Text as Text - --- Arya created this type to be able to query the Codebase for anonymous definitions. The parsing functions can't fail, because they only try to pull apart the syntactic elements "#" and ".". They don't necessarily produce a meaningful reference; you'll figure that out during base58 decoding. We don't attempt base58 decoding here because the base58 prefix doesn't correspond to anything useful. We'll just compare strings against the codebase or namespace later. --- None of the punctuation is stored here. -data ShortHash - = Builtin Text - | ShortHash { prefix :: Text, cycle :: Maybe Text, cid :: Maybe Text } - deriving (Eq, Ord, Show) - --- currently unused -isConstructor :: ShortHash -> Bool -isConstructor = \case - ShortHash _ _ (Just _) -> True - _ -> False - --- Parse a string like those described in Referent.fromText: --- examples: --- `##Text.take` — builtins don’t have cycles or cids --- `#2tWjVAuc7` — term ref, no cycle --- `#y9ycWkiC1.y9` — term ref, part of cycle --- `#cWkiC1x89#1` — constructor --- `#DCxrnCAPS.WD#0` — constructor of a type in a cycle --- A constructor ID on a builtin is ignored: --- e.g. ##FileIO#2 is parsed as ##FileIO --- Anything to the left of the first # is --- e.g. foo#abc is parsed as #abc --- Anything including and following a third # is ignored. --- e.g. foo#abc#2#hello is parsed as #abc#2 --- Anything after a second . before a second # is ignored. --- e.g. foo#abc.1f.x is parsed as #abc.1f -fromText :: Text -> Maybe ShortHash -fromText t = case Text.split (=='#') t of - [_, "", b] -> Just $ Builtin b -- builtin starts with ## - _ : "" : b : _ -> -- builtin with a CID todo: could be rejected - Just $ Builtin b - [_, h] -> Just $ uncurry ShortHash (getCycle h) Nothing - [_, h, c] -> Just $ uncurry ShortHash (getCycle h) (Just c) - _ : h : c : _garbage -> -- CID with more hash after todo: could be rejected - Just $ uncurry ShortHash (getCycle h) (Just c) - _ -> Nothing - where - getCycle :: Text -> (Text, Maybe Text) - getCycle h = case Text.split (=='.') h of - [] -> ("", Nothing) -- e.g. foo#.1j - [hash] -> (hash, Nothing) - hash : suffix : _garbage -> (hash, Just suffix) - -unsafeFromText :: Text -> ShortHash -unsafeFromText t = fromMaybe - (error . Text.unpack $ "can't parse ShortHash from: " <> t) - (fromText t) - -toText :: ShortHash -> Text -toText (Builtin b) = "##" <> b -toText (ShortHash p i cid) = "#" <> p <> i' <> c' where - i', c' :: Text - i' = maybe "" ("."<>) i - c' = maybe "" ("#" <>) cid - -toString :: ShortHash -> String -toString = Text.unpack . toText - -fromString :: String -> Maybe ShortHash -fromString = fromText . Text.pack - -take :: Int -> ShortHash -> ShortHash -take _ b@(Builtin _) = b -take i s@ShortHash{..} = s { prefix = Text.take i prefix } - --- x `isPrefixOf` y is True iff x might be a shorter version of y --- if a constructor id is provided on the right-hand side, the left-hand side --- needs to match exactly (as of this commit). -isPrefixOf :: ShortHash -> ShortHash -> Bool -isPrefixOf (Builtin t) (Builtin t2) = t `Text.isPrefixOf` t2 -isPrefixOf (ShortHash h n cid) (ShortHash h2 n2 cid2) = - Text.isPrefixOf h h2 && maybePrefixOf n n2 && maybePrefixOf cid cid2 - where - Nothing `maybePrefixOf` Nothing = True - Nothing `maybePrefixOf` Just _ = False - Just _ `maybePrefixOf` Nothing = False - Just a `maybePrefixOf` Just b = a == b -isPrefixOf _ _ = False - ---instance Show ShortHash where --- show = Text.unpack . toText diff --git a/unison-core/src/Unison/Symbol.hs b/unison-core/src/Unison/Symbol.hs deleted file mode 100644 index b1f7b200d3..0000000000 --- a/unison-core/src/Unison/Symbol.hs +++ /dev/null @@ -1,35 +0,0 @@ -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Symbol where - -import Unison.Prelude - -import Unison.Var (Var(..)) -import qualified Data.Set as Set -import qualified Unison.ABT as ABT -import qualified Unison.Var as Var - -data Symbol = Symbol !Word64 Var.Type deriving (Generic) - -instance ABT.Var Symbol where - freshIn vs s | Set.null vs || Set.notMember s vs = s -- already fresh! - freshIn vs s@(Symbol i n) = case Set.elemAt (Set.size vs - 1) vs of - Symbol i2 _ -> if i > i2 then s else Symbol (i2+1) n - -instance Var Symbol where - typed t = Symbol 0 t - typeOf (Symbol _ t) = t - freshId (Symbol id _) = id - freshenId id (Symbol _ n) = Symbol id n - -instance Eq Symbol where - Symbol id1 name1 == Symbol id2 name2 = id1 == id2 && name1 == name2 -instance Ord Symbol where - Symbol id1 name1 `compare` Symbol id2 name2 = (id1,name1) `compare` (id2,name2) -instance Show Symbol where - show (Symbol 0 n) = show n - show (Symbol id n) = show n ++ "-" ++ show id - -symbol :: Text -> Symbol -symbol = Var.named diff --git a/unison-core/src/Unison/Term.hs b/unison-core/src/Unison/Term.hs deleted file mode 100644 index ba00d9b277..0000000000 --- a/unison-core/src/Unison/Term.hs +++ /dev/null @@ -1,1123 +0,0 @@ -{-# LANGUAGE DeriveFoldable #-} -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE DeriveTraversable #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE Rank2Types #-} -{-# LANGUAGE UnicodeSyntax #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Term where - -import Unison.Prelude - -import Prelude hiding (and,or) -import Control.Monad.State (evalState) -import qualified Control.Monad.Writer.Strict as Writer -import Data.Bifunctor (second) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Data.Text as Text -import qualified Data.Sequence as Sequence -import Prelude.Extras (Eq1(..), Show1(..)) -import Text.Show -import qualified Unison.ABT as ABT -import qualified Unison.Blank as B -import qualified Unison.Hash as Hash -import Unison.Hashable (Hashable1, accumulateToken) -import qualified Unison.Hashable as Hashable -import Unison.Names3 ( Names0 ) -import qualified Unison.Names3 as Names -import Unison.Pattern (Pattern) -import qualified Unison.Pattern as Pattern -import Unison.Reference (Reference, pattern Builtin) -import qualified Unison.Reference as Reference -import qualified Unison.Reference.Util as ReferenceUtil -import Unison.Referent (Referent) -import qualified Unison.Referent as Referent -import Unison.Type (Type) -import qualified Unison.Type as Type -import qualified Unison.Util.Relation as Rel -import qualified Unison.ConstructorType as CT -import Unison.Util.List (multimap, validate) -import Unison.Var (Var) -import qualified Unison.Var as Var -import Unsafe.Coerce -import Unison.Symbol (Symbol) -import qualified Unison.Name as Name -import qualified Unison.LabeledDependency as LD -import Unison.LabeledDependency (LabeledDependency) - --- This gets reexported; should maybe live somewhere other than Pattern, though. -type ConstructorId = Pattern.ConstructorId - -data MatchCase loc a = MatchCase (Pattern loc) (Maybe a) a - deriving (Show,Eq,Foldable,Functor,Generic,Generic1,Traversable) - --- | Base functor for terms in the Unison language --- We need `typeVar` because the term and type variables may differ. -data F typeVar typeAnn patternAnn a - = Int Int64 - | Nat Word64 - | Float Double - | Boolean Bool - | Text Text - | Char Char - | Blank (B.Blank typeAnn) - | Ref Reference - -- First argument identifies the data type, - -- second argument identifies the constructor - | Constructor Reference ConstructorId - | Request Reference ConstructorId - | Handle a a - | App a a - | Ann a (Type typeVar typeAnn) - | Sequence (Seq a) - | If a a a - | And a a - | Or a a - | Lam a - -- Note: let rec blocks have an outer ABT.Cycle which introduces as many - -- variables as there are bindings - | LetRec IsTop [a] a - -- Note: first parameter is the binding, second is the expression which may refer - -- to this let bound variable. Constructed as `Let b (abs v e)` - | Let IsTop a a - -- Pattern matching / eliminating data types, example: - -- case x of - -- Just n -> rhs1 - -- Nothing -> rhs2 - -- - -- translates to - -- - -- Match x - -- [ (Constructor 0 [Var], ABT.abs n rhs1) - -- , (Constructor 1 [], rhs2) ] - | Match a [MatchCase patternAnn a] - | TermLink Referent - | TypeLink Reference - deriving (Foldable,Functor,Generic,Generic1,Traversable) - -type IsTop = Bool - --- | Like `Term v`, but with an annotation of type `a` at every level in the tree -type Term v a = Term2 v a a v a --- | Allow type variables and term variables to differ -type Term' vt v a = Term2 vt a a v a --- | Allow type variables, term variables, type annotations and term annotations --- to all differ -type Term2 vt at ap v a = ABT.Term (F vt at ap) v a --- | Like `Term v a`, but with only () for type and pattern annotations. -type Term3 v a = Term2 v () () v a - --- | Terms are represented as ABTs over the base functor F, with variables in `v` -type Term0 v = Term v () --- | Terms with type variables in `vt`, and term variables in `v` -type Term0' vt v = Term' vt v () - --- bindExternals --- :: forall v a b b2 --- . Var v --- => [(v, Term2 v b a v b2)] --- -> [(v, Reference)] --- -> Term2 v b a v a --- -> Term2 v b a v a --- bindBuiltins termBuiltins typeBuiltins = f . g --- where --- f :: Term2 v b a v a -> Term2 v b a v a --- f = typeMap (Type.bindBuiltins typeBuiltins) --- g :: Term2 v b a v a -> Term2 v b a v a --- g = ABT.substsInheritAnnotation termBuiltins -bindNames - :: forall v a . Var v - => Set v - -> Names0 - -> Term v a - -> Names.ResolutionResult v a (Term v a) --- bindNames keepFreeTerms _ _ | trace "Keep free terms:" False --- || traceShow keepFreeTerms False = undefined -bindNames keepFreeTerms ns e = do - let freeTmVars = [ (v,a) | (v,a) <- ABT.freeVarOccurrences keepFreeTerms e ] - -- !_ = trace "free term vars: " () - -- !_ = traceShow $ fst <$> freeTmVars - freeTyVars = [ (v, a) | (v,as) <- Map.toList (freeTypeVarAnnotations e) - , a <- as ] - -- !_ = trace "free type vars: " () - -- !_ = traceShow $ fst <$> freeTyVars - okTm :: (v,a) -> Names.ResolutionResult v a (v, Term v a) - okTm (v,a) = case Rel.lookupDom (Name.fromVar v) (Names.terms0 ns) of - rs | Set.size rs == 1 -> - pure (v, fromReferent a $ Set.findMin rs) - | otherwise -> Left (pure (Names.TermResolutionFailure v a rs)) - okTy (v,a) = case Rel.lookupDom (Name.fromVar v) (Names.types0 ns) of - rs | Set.size rs == 1 -> pure (v, Type.ref a $ Set.findMin rs) - | otherwise -> Left (pure (Names.TypeResolutionFailure v a rs)) - termSubsts <- validate okTm freeTmVars - typeSubsts <- validate okTy freeTyVars - pure . substTypeVars typeSubsts . ABT.substsInheritAnnotation termSubsts $ e - -bindSomeNames - :: forall v a . Var v - => Names0 - -> Term v a - -> Names.ResolutionResult v a (Term v a) --- bindSomeNames ns e | trace "Term.bindSome" False --- || trace "Names =" False --- || traceShow ns False --- || trace "Free type vars:" False --- || traceShow (freeTypeVars e) False --- || traceShow e False --- = undefined -bindSomeNames ns e = bindNames keepFree ns e where - keepFree = Set.difference (freeVars e) - (Set.map Name.toVar $ Rel.dom (Names.terms0 ns)) - --- Prepare a term for type-directed name resolution by replacing --- any remaining free variables with blanks to be resolved by TDNR -prepareTDNR :: Var v => ABT.Term (F vt b ap) v b -> ABT.Term (F vt b ap) v b -prepareTDNR t = fmap fst . ABT.visitPure f $ ABT.annotateBound t - where f (ABT.Term _ (a, bound) (ABT.Var v)) | Set.notMember v bound = - Just $ resolve (a, bound) a (Text.unpack $ Var.name v) - f _ = Nothing - -amap :: Ord v => (a -> a2) -> Term v a -> Term v a2 -amap f = fmap f . patternMap (fmap f) . typeMap (fmap f) - -patternMap :: (Pattern ap -> Pattern ap2) -> Term2 vt at ap v a -> Term2 vt at ap2 v a -patternMap f = go where - go (ABT.Term fvs a t) = ABT.Term fvs a $ case t of - ABT.Abs v t -> ABT.Abs v (go t) - ABT.Var v -> ABT.Var v - ABT.Cycle t -> ABT.Cycle (go t) - ABT.Tm (Match e cases) -> ABT.Tm (Match (go e) [ - MatchCase (f p) (go <$> g) (go a) | MatchCase p g a <- cases ]) - -- Safe since `Match` is only ctor that has embedded `Pattern ap` arg - ABT.Tm ts -> unsafeCoerce $ ABT.Tm (fmap go ts) - -vmap :: Ord v2 => (v -> v2) -> Term v a -> Term v2 a -vmap f = ABT.vmap f . typeMap (ABT.vmap f) - -vtmap :: Ord vt2 => (vt -> vt2) -> Term' vt v a -> Term' vt2 v a -vtmap f = typeMap (ABT.vmap f) - -typeMap - :: Ord vt2 - => (Type vt at -> Type vt2 at2) - -> Term2 vt at ap v a - -> Term2 vt2 at2 ap v a -typeMap f = go - where - go (ABT.Term fvs a t) = ABT.Term fvs a $ case t of - ABT.Abs v t -> ABT.Abs v (go t) - ABT.Var v -> ABT.Var v - ABT.Cycle t -> ABT.Cycle (go t) - ABT.Tm (Ann e t) -> ABT.Tm (Ann (go e) (f t)) - -- Safe since `Ann` is only ctor that has embedded `Type v` arg - -- otherwise we'd have to manually match on every non-`Ann` ctor - ABT.Tm ts -> unsafeCoerce $ ABT.Tm (fmap go ts) - -extraMap' - :: (Ord vt, Ord vt') - => (vt -> vt') - -> (at -> at') - -> (ap -> ap') - -> Term2 vt at ap v a - -> Term2 vt' at' ap' v a -extraMap' vtf atf apf = ABT.extraMap (extraMap vtf atf apf) - -extraMap - :: (Ord vt, Ord vt') - => (vt -> vt') - -> (at -> at') - -> (ap -> ap') - -> F vt at ap a - -> F vt' at' ap' a -extraMap vtf atf apf = \case - Int x -> Int x - Nat x -> Nat x - Float x -> Float x - Boolean x -> Boolean x - Text x -> Text x - Char x -> Char x - Blank x -> Blank (fmap atf x) - Ref x -> Ref x - Constructor x y -> Constructor x y - Request x y -> Request x y - Handle x y -> Handle x y - App x y -> App x y - Ann tm x -> Ann tm (ABT.amap atf (ABT.vmap vtf x)) - Sequence x -> Sequence x - If x y z -> If x y z - And x y -> And x y - Or x y -> Or x y - Lam x -> Lam x - LetRec x y z -> LetRec x y z - Let x y z -> Let x y z - Match tm l -> Match tm (map (matchCaseExtraMap apf) l) - TermLink r -> TermLink r - TypeLink r -> TypeLink r - -matchCaseExtraMap :: (loc -> loc') -> MatchCase loc a -> MatchCase loc' a -matchCaseExtraMap f (MatchCase p x y) = MatchCase (fmap f p) x y - -unannotate - :: forall vt at ap v a . Ord v => Term2 vt at ap v a -> Term0' vt v -unannotate = go - where - go :: Term2 vt at ap v a -> Term0' vt v - go (ABT.out -> ABT.Abs v body) = ABT.abs v (go body) - go (ABT.out -> ABT.Cycle body) = ABT.cycle (go body) - go (ABT.Var' v ) = ABT.var v - go (ABT.Tm' f ) = case go <$> f of - Ann e t -> ABT.tm (Ann e (void t)) - Match scrutinee branches -> - let unann (MatchCase pat guard body) = MatchCase (void pat) guard body - in ABT.tm (Match scrutinee (unann <$> branches)) - f' -> ABT.tm (unsafeCoerce f') - go _ = error "unpossible" - -wrapV :: Ord v => Term v a -> Term (ABT.V v) a -wrapV = vmap ABT.Bound - --- | All variables mentioned in the given term. --- Includes both term and type variables, both free and bound. -allVars :: Ord v => Term v a -> Set v -allVars tm = Set.fromList $ - ABT.allVars tm ++ [ v | tp <- allTypes tm, v <- ABT.allVars tp ] - where - allTypes tm = case tm of - Ann' e tp -> tp : allTypes e - _ -> foldMap allTypes $ ABT.out tm - -freeVars :: Term' vt v a -> Set v -freeVars = ABT.freeVars - -freeTypeVars :: Ord vt => Term' vt v a -> Set vt -freeTypeVars t = Map.keysSet $ freeTypeVarAnnotations t - -freeTypeVarAnnotations :: Ord vt => Term' vt v a -> Map vt [a] -freeTypeVarAnnotations e = multimap $ go Set.empty e where - go bound tm = case tm of - Var' _ -> mempty - Ann' e (Type.stripIntroOuters -> t1) -> let - bound' = case t1 of Type.ForallsNamed' vs _ -> bound <> Set.fromList vs - _ -> bound - in go bound' e <> ABT.freeVarOccurrences bound t1 - ABT.Tm' f -> foldMap (go bound) f - (ABT.out -> ABT.Abs _ body) -> go bound body - (ABT.out -> ABT.Cycle body) -> go bound body - _ -> error "unpossible" - -substTypeVars :: (Ord v, Var vt) - => [(vt, Type vt b)] - -> Term' vt v a - -> Term' vt v a -substTypeVars subs e = foldl' go e subs where - go e (vt, t) = substTypeVar vt t e - --- Capture-avoiding substitution of a type variable inside a term. This --- will replace that type variable wherever it appears in type signatures of --- the term, avoiding capture by renaming ∀-binders. -substTypeVar - :: (Ord v, ABT.Var vt) - => vt - -> Type vt b - -> Term' vt v a - -> Term' vt v a -substTypeVar vt ty = go Set.empty where - go bound tm | Set.member vt bound = tm - go bound tm = let loc = ABT.annotation tm in case tm of - Var' _ -> tm - Ann' e t -> uncapture [] e (Type.stripIntroOuters t) where - fvs = ABT.freeVars ty - -- if the ∀ introduces a variable, v, which is free in `ty`, we pick a new - -- variable name for v which is unique, v', and rename v to v' in e. - uncapture vs e t@(Type.Forall' body) | Set.member (ABT.variable body) fvs = let - v = ABT.variable body - v2 = Var.freshIn (ABT.freeVars t) . Var.freshIn (Set.insert vt fvs) $ v - t2 = ABT.bindInheritAnnotation body (Type.var() v2) - in uncapture ((ABT.annotation t, v2):vs) (renameTypeVar v v2 e) t2 - uncapture vs e t0 = let - t = foldl (\body (loc,v) -> Type.forall loc v body) t0 vs - bound' = case Type.unForalls (Type.stripIntroOuters t) of - Nothing -> bound - Just (vs, _) -> bound <> Set.fromList vs - t' = ABT.substInheritAnnotation vt ty (Type.stripIntroOuters t) - in ann loc (go bound' e) (Type.freeVarsToOuters bound t') - ABT.Tm' f -> ABT.tm' loc (go bound <$> f) - (ABT.out -> ABT.Abs v body) -> ABT.abs' loc v (go bound body) - (ABT.out -> ABT.Cycle body) -> ABT.cycle' loc (go bound body) - _ -> error "unpossible" - -renameTypeVar :: (Ord v, ABT.Var vt) => vt -> vt -> Term' vt v a -> Term' vt v a -renameTypeVar old new = go Set.empty where - go bound tm | Set.member old bound = tm - go bound tm = let loc = ABT.annotation tm in case tm of - Var' _ -> tm - Ann' e t -> let - bound' = case Type.unForalls (Type.stripIntroOuters t) of - Nothing -> bound - Just (vs, _) -> bound <> Set.fromList vs - t' = ABT.rename old new (Type.stripIntroOuters t) - in ann loc (go bound' e) (Type.freeVarsToOuters bound t') - ABT.Tm' f -> ABT.tm' loc (go bound <$> f) - (ABT.out -> ABT.Abs v body) -> ABT.abs' loc v (go bound body) - (ABT.out -> ABT.Cycle body) -> ABT.cycle' loc (go bound body) - _ -> error "unpossible" - --- Converts free variables to bound variables using forall or introOuter. Example: --- --- foo : x -> x --- foo a = --- r : x --- r = a --- r --- --- This becomes: --- --- foo : ∀ x . x -> x --- foo a = --- r : outer x . x -- FYI, not valid syntax --- r = a --- r --- --- More specifically: in the expression `e : t`, unbound lowercase variables in `t` --- are bound with foralls, and any ∀-quantified type variables are made bound in --- `e` and its subexpressions. The result is a term with no lowercase free --- variables in any of its type signatures, with outer references represented --- with explicit `introOuter` binders. The resulting term may have uppercase --- free variables that are still unbound. -generalizeTypeSignatures :: (Var vt, Var v) => Term' vt v a -> Term' vt v a -generalizeTypeSignatures = go Set.empty where - go bound tm = let loc = ABT.annotation tm in case tm of - Var' _ -> tm - Ann' e (Type.generalizeLowercase bound -> t) -> let - bound' = case Type.unForalls t of - Nothing -> bound - Just (vs, _) -> bound <> Set.fromList vs - in ann loc (go bound' e) (Type.freeVarsToOuters bound t) - ABT.Tm' f -> ABT.tm' loc (go bound <$> f) - (ABT.out -> ABT.Abs v body) -> ABT.abs' loc v (go bound body) - (ABT.out -> ABT.Cycle body) -> ABT.cycle' loc (go bound body) - _ -> error "unpossible" - --- nicer pattern syntax - -pattern Var' v <- ABT.Var' v -pattern Cycle' xs t <- ABT.Cycle' xs t -pattern Abs' subst <- ABT.Abs' subst -pattern Int' n <- (ABT.out -> ABT.Tm (Int n)) -pattern Nat' n <- (ABT.out -> ABT.Tm (Nat n)) -pattern Float' n <- (ABT.out -> ABT.Tm (Float n)) -pattern Boolean' b <- (ABT.out -> ABT.Tm (Boolean b)) -pattern Text' s <- (ABT.out -> ABT.Tm (Text s)) -pattern Char' c <- (ABT.out -> ABT.Tm (Char c)) -pattern Blank' b <- (ABT.out -> ABT.Tm (Blank b)) -pattern Ref' r <- (ABT.out -> ABT.Tm (Ref r)) -pattern TermLink' r <- (ABT.out -> ABT.Tm (TermLink r)) -pattern TypeLink' r <- (ABT.out -> ABT.Tm (TypeLink r)) -pattern Builtin' r <- (ABT.out -> ABT.Tm (Ref (Builtin r))) -pattern App' f x <- (ABT.out -> ABT.Tm (App f x)) -pattern Match' scrutinee branches <- (ABT.out -> ABT.Tm (Match scrutinee branches)) -pattern Constructor' ref n <- (ABT.out -> ABT.Tm (Constructor ref n)) -pattern Request' ref n <- (ABT.out -> ABT.Tm (Request ref n)) -pattern RequestOrCtor' ref n <- (unReqOrCtor -> Just (ref, n)) -pattern If' cond t f <- (ABT.out -> ABT.Tm (If cond t f)) -pattern And' x y <- (ABT.out -> ABT.Tm (And x y)) -pattern Or' x y <- (ABT.out -> ABT.Tm (Or x y)) -pattern Handle' h body <- (ABT.out -> ABT.Tm (Handle h body)) -pattern Apps' f args <- (unApps -> Just (f, args)) --- begin pretty-printer helper patterns -pattern AppsPred' f args <- (unAppsPred -> Just (f, args)) -pattern BinaryApp' f arg1 arg2 <- (unBinaryApp -> Just (f, arg1, arg2)) -pattern BinaryApps' apps lastArg <- (unBinaryApps -> Just (apps, lastArg)) -pattern BinaryAppsPred' apps lastArg <- (unBinaryAppsPred -> Just (apps, lastArg)) --- end pretty-printer helper patterns -pattern Ann' x t <- (ABT.out -> ABT.Tm (Ann x t)) -pattern Sequence' xs <- (ABT.out -> ABT.Tm (Sequence xs)) -pattern Lam' subst <- ABT.Tm' (Lam (ABT.Abs' subst)) -pattern LamNamed' v body <- (ABT.out -> ABT.Tm (Lam (ABT.Term _ _ (ABT.Abs v body)))) -pattern LamsNamed' vs body <- (unLams' -> Just (vs, body)) -pattern LamsNamedOpt' vs body <- (unLamsOpt' -> Just (vs, body)) -pattern LamsNamedPred' vs body <- (unLamsPred' -> Just (vs, body)) -pattern LamsNamedOrDelay' vs body <- (unLamsUntilDelay' -> Just (vs, body)) -pattern LamsNamedMatch' vs branches <- (unLamsMatch' -> Just (vs, branches)) -pattern Let1' b subst <- (unLet1 -> Just (_, b, subst)) -pattern Let1Top' top b subst <- (unLet1 -> Just (top, b, subst)) -pattern Let1Named' v b e <- (ABT.Tm' (Let _ b (ABT.out -> ABT.Abs v e))) -pattern Let1NamedTop' top v b e <- (ABT.Tm' (Let top b (ABT.out -> ABT.Abs v e))) -pattern Lets' bs e <- (unLet -> Just (bs, e)) -pattern LetRecNamed' bs e <- (unLetRecNamed -> Just (_,bs,e)) -pattern LetRecNamedTop' top bs e <- (unLetRecNamed -> Just (top,bs,e)) -pattern LetRec' subst <- (unLetRec -> Just (_, subst)) -pattern LetRecTop' top subst <- (unLetRec -> Just (top, subst)) -pattern LetRecNamedAnnotated' ann bs e <- (unLetRecNamedAnnotated -> Just (_, ann, bs,e)) -pattern LetRecNamedAnnotatedTop' top ann bs e <- - (unLetRecNamedAnnotated -> Just (top, ann, bs,e)) - -fresh :: Var v => Term0 v -> v -> v -fresh = ABT.fresh - --- some smart constructors - -var :: a -> v -> Term2 vt at ap v a -var = ABT.annotatedVar - -var' :: Var v => Text -> Term0' vt v -var' = var() . Var.named - -ref :: Ord v => a -> Reference -> Term2 vt at ap v a -ref a r = ABT.tm' a (Ref r) - -refId :: Ord v => a -> Reference.Id -> Term2 vt at ap v a -refId a = ref a . Reference.DerivedId - -termLink :: Ord v => a -> Referent -> Term2 vt at ap v a -termLink a r = ABT.tm' a (TermLink r) - -typeLink :: Ord v => a -> Reference -> Term2 vt at ap v a -typeLink a r = ABT.tm' a (TypeLink r) - -builtin :: Ord v => a -> Text -> Term2 vt at ap v a -builtin a n = ref a (Reference.Builtin n) - -float :: Ord v => a -> Double -> Term2 vt at ap v a -float a d = ABT.tm' a (Float d) - -boolean :: Ord v => a -> Bool -> Term2 vt at ap v a -boolean a b = ABT.tm' a (Boolean b) - -int :: Ord v => a -> Int64 -> Term2 vt at ap v a -int a d = ABT.tm' a (Int d) - -nat :: Ord v => a -> Word64 -> Term2 vt at ap v a -nat a d = ABT.tm' a (Nat d) - -text :: Ord v => a -> Text -> Term2 vt at ap v a -text a = ABT.tm' a . Text - -char :: Ord v => a -> Char -> Term2 vt at ap v a -char a = ABT.tm' a . Char - -watch :: (Var v, Semigroup a) => a -> String -> Term v a -> Term v a -watch a note e = - apps' (builtin a "Debug.watch") [text a (Text.pack note), e] - -watchMaybe :: (Var v, Semigroup a) => Maybe String -> Term v a -> Term v a -watchMaybe Nothing e = e -watchMaybe (Just note) e = watch (ABT.annotation e) note e - -blank :: Ord v => a -> Term2 vt at ap v a -blank a = ABT.tm' a (Blank B.Blank) - -placeholder :: Ord v => a -> String -> Term2 vt a ap v a -placeholder a s = ABT.tm' a . Blank $ B.Recorded (B.Placeholder a s) - -resolve :: Ord v => at -> ab -> String -> Term2 vt ab ap v at -resolve at ab s = ABT.tm' at . Blank $ B.Recorded (B.Resolve ab s) - -constructor :: Ord v => a -> Reference -> ConstructorId -> Term2 vt at ap v a -constructor a ref n = ABT.tm' a (Constructor ref n) - -request :: Ord v => a -> Reference -> ConstructorId -> Term2 vt at ap v a -request a ref n = ABT.tm' a (Request ref n) - --- todo: delete and rename app' to app -app_ :: Ord v => Term0' vt v -> Term0' vt v -> Term0' vt v -app_ f arg = ABT.tm (App f arg) - -app :: Ord v => a -> Term2 vt at ap v a -> Term2 vt at ap v a -> Term2 vt at ap v a -app a f arg = ABT.tm' a (App f arg) - -match :: Ord v => a -> Term2 vt at a v a -> [MatchCase a (Term2 vt at a v a)] -> Term2 vt at a v a -match a scrutinee branches = ABT.tm' a (Match scrutinee branches) - -handle :: Ord v => a -> Term2 vt at ap v a -> Term2 vt at ap v a -> Term2 vt at ap v a -handle a h block = ABT.tm' a (Handle h block) - -and :: Ord v => a -> Term2 vt at ap v a -> Term2 vt at ap v a -> Term2 vt at ap v a -and a x y = ABT.tm' a (And x y) - -or :: Ord v => a -> Term2 vt at ap v a -> Term2 vt at ap v a -> Term2 vt at ap v a -or a x y = ABT.tm' a (Or x y) - -seq :: Ord v => a -> [Term2 vt at ap v a] -> Term2 vt at ap v a -seq a es = seq' a (Sequence.fromList es) - -seq' :: Ord v => a -> Seq (Term2 vt at ap v a) -> Term2 vt at ap v a -seq' a es = ABT.tm' a (Sequence es) - -apps - :: Ord v - => Term2 vt at ap v a - -> [(a, Term2 vt at ap v a)] - -> Term2 vt at ap v a -apps = foldl' (\f (a, t) -> app a f t) - -apps' - :: (Ord v, Semigroup a) - => Term2 vt at ap v a - -> [Term2 vt at ap v a] - -> Term2 vt at ap v a -apps' = foldl' (\f t -> app (ABT.annotation f <> ABT.annotation t) f t) - -iff :: Ord v => a -> Term2 vt at ap v a -> Term2 vt at ap v a -> Term2 vt at ap v a -> Term2 vt at ap v a -iff a cond t f = ABT.tm' a (If cond t f) - -ann_ :: Ord v => Term0' vt v -> Type vt () -> Term0' vt v -ann_ e t = ABT.tm (Ann e t) - -ann :: Ord v - => a - -> Term2 vt at ap v a - -> Type vt at - -> Term2 vt at ap v a -ann a e t = ABT.tm' a (Ann e t) - --- arya: are we sure we want the two annotations to be the same? -lam :: Ord v => a -> v -> Term2 vt at ap v a -> Term2 vt at ap v a -lam a v body = ABT.tm' a (Lam (ABT.abs' a v body)) - -lam' :: Ord v => a -> [v] -> Term2 vt at ap v a -> Term2 vt at ap v a -lam' a vs body = foldr (lam a) body vs - -lam'' :: Ord v => [(a,v)] -> Term2 vt at ap v a -> Term2 vt at ap v a -lam'' vs body = foldr (uncurry lam) body vs - -isLam :: Term2 vt at ap v a -> Bool -isLam t = arity t > 0 - -arity :: Term2 vt at ap v a -> Int -arity (LamNamed' _ body) = 1 + arity body -arity (Ann' e _) = arity e -arity _ = 0 - -unLetRecNamedAnnotated - :: Term' vt v a - -> Maybe - (IsTop, a, [((a, v), Term' vt v a)], Term' vt v a) -unLetRecNamedAnnotated (ABT.CycleA' ann avs (ABT.Tm' (LetRec isTop bs e))) = - Just (isTop, ann, avs `zip` bs, e) -unLetRecNamedAnnotated _ = Nothing - -letRec' - :: (Ord v, Monoid a) - => Bool - -> [(v, Term' vt v a)] - -> Term' vt v a - -> Term' vt v a -letRec' isTop bindings body = - letRec isTop - (foldMap (ABT.annotation . snd) bindings <> ABT.annotation body) - [ ((ABT.annotation b, v), b) | (v,b) <- bindings ] - body - -letRec - :: Ord v - => Bool - -> a - -> [((a, v), Term' vt v a)] - -> Term' vt v a - -> Term' vt v a -letRec _ _ [] e = e -letRec isTop a bindings e = ABT.cycle' - a - (foldr (uncurry ABT.abs' . fst) z bindings) - where z = ABT.tm' a (LetRec isTop (map snd bindings) e) - - --- | Smart constructor for let rec blocks. Each binding in the block may --- reference any other binding in the block in its body (including itself), --- and the output expression may also reference any binding in the block. -letRec_ :: Ord v => IsTop -> [(v, Term0' vt v)] -> Term0' vt v -> Term0' vt v -letRec_ _ [] e = e -letRec_ isTop bindings e = ABT.cycle (foldr (ABT.abs . fst) z bindings) - where - z = ABT.tm (LetRec isTop (map snd bindings) e) - --- | Smart constructor for let blocks. Each binding in the block may --- reference only previous bindings in the block, not including itself. --- The output expression may reference any binding in the block. --- todo: delete me -let1_ :: Ord v => IsTop -> [(v,Term0' vt v)] -> Term0' vt v -> Term0' vt v -let1_ isTop bindings e = foldr f e bindings - where - f (v,b) body = ABT.tm (Let isTop b (ABT.abs v body)) - --- | annotations are applied to each nested Let expression -let1 - :: Ord v - => IsTop - -> [((a, v), Term2 vt at ap v a)] - -> Term2 vt at ap v a - -> Term2 vt at ap v a -let1 isTop bindings e = foldr f e bindings - where f ((ann, v), b) body = ABT.tm' ann (Let isTop b (ABT.abs' ann v body)) - -let1' - :: (Semigroup a, Ord v) - => IsTop - -> [(v, Term2 vt at ap v a)] - -> Term2 vt at ap v a - -> Term2 vt at ap v a -let1' isTop bindings e = foldr f e bindings - where - ann = ABT.annotation - f (v, b) body = ABT.tm' a (Let isTop b (ABT.abs' a v body)) - where a = ann b <> ann body - --- let1' :: Var v => [(Text, Term0 vt v)] -> Term0 vt v -> Term0 vt v --- let1' bs e = let1 [(ABT.v' name, b) | (name,b) <- bs ] e - -unLet1 - :: Var v - => Term' vt v a - -> Maybe (IsTop, Term' vt v a, ABT.Subst (F vt a a) v a) -unLet1 (ABT.Tm' (Let isTop b (ABT.Abs' subst))) = Just (isTop, b, subst) -unLet1 _ = Nothing - --- | Satisfies `unLet (let' bs e) == Just (bs, e)` -unLet - :: Term2 vt at ap v a - -> Maybe ([(IsTop, v, Term2 vt at ap v a)], Term2 vt at ap v a) -unLet t = fixup (go t) - where - go (ABT.Tm' (Let isTop b (ABT.out -> ABT.Abs v t))) = case go t of - (env, t) -> ((isTop, v, b) : env, t) - go t = ([], t) - fixup ([], _) = Nothing - fixup bst = Just bst - --- | Satisfies `unLetRec (letRec bs e) == Just (bs, e)` -unLetRecNamed - :: Term2 vt at ap v a - -> Maybe - ( IsTop - , [(v, Term2 vt at ap v a)] - , Term2 vt at ap v a - ) -unLetRecNamed (ABT.Cycle' vs (ABT.Tm' (LetRec isTop bs e))) - | length vs == length bs = Just (isTop, zip vs bs, e) -unLetRecNamed _ = Nothing - -unLetRec - :: (Monad m, Var v) - => Term2 vt at ap v a - -> Maybe - ( IsTop - , (v -> m v) - -> m - ( [(v, Term2 vt at ap v a)] - , Term2 vt at ap v a - ) - ) -unLetRec (unLetRecNamed -> Just (isTop, bs, e)) = Just - ( isTop - , \freshen -> do - vs <- sequence [ freshen v | (v, _) <- bs ] - let sub = ABT.substsInheritAnnotation (map fst bs `zip` map ABT.var vs) - pure (vs `zip` [ sub b | (_, b) <- bs ], sub e) - ) -unLetRec _ = Nothing - -unApps - :: Term2 vt at ap v a - -> Maybe (Term2 vt at ap v a, [Term2 vt at ap v a]) -unApps t = unAppsPred (t, const True) - --- Same as unApps but taking a predicate controlling whether we match on a given function argument. -unAppsPred :: (Term2 vt at ap v a, Term2 vt at ap v a -> Bool) -> - Maybe (Term2 vt at ap v a, [Term2 vt at ap v a]) -unAppsPred (t, pred) = case go t [] of [] -> Nothing; f:args -> Just (f,args) - where - go (App' i o) acc | pred o = go i (o:acc) - go _ [] = [] - go fn args = fn:args - -unBinaryApp :: Term2 vt at ap v a - -> Maybe (Term2 vt at ap v a, - Term2 vt at ap v a, - Term2 vt at ap v a) -unBinaryApp t = case unApps t of - Just (f, [arg1, arg2]) -> Just (f, arg1, arg2) - _ -> Nothing - --- "((a1 `f1` a2) `f2` a3)" becomes "Just ([(a2, f2), (a1, f1)], a3)" -unBinaryApps - :: Term2 vt at ap v a - -> Maybe - ( [(Term2 vt at ap v a, Term2 vt at ap v a)] - , Term2 vt at ap v a - ) -unBinaryApps t = unBinaryAppsPred (t, const True) - --- Same as unBinaryApps but taking a predicate controlling whether we match on a given binary function. -unBinaryAppsPred :: (Term2 vt at ap v a - ,Term2 vt at ap v a -> Bool) - -> Maybe ([(Term2 vt at ap v a, - Term2 vt at ap v a)], - Term2 vt at ap v a) -unBinaryAppsPred (t, pred) = case unBinaryApp t of - Just (f, x, y) | pred f -> case unBinaryAppsPred (x, pred) of - Just (as, xLast) -> Just ((xLast, f) : as, y) - Nothing -> Just ([(x, f)], y) - _ -> Nothing - -unLams' - :: Term2 vt at ap v a -> Maybe ([v], Term2 vt at ap v a) -unLams' t = unLamsPred' (t, const True) - --- Same as unLams', but always matches. Returns an empty [v] if the term doesn't start with a --- lambda extraction. -unLamsOpt' :: Term2 vt at ap v a -> Maybe ([v], Term2 vt at ap v a) -unLamsOpt' t = case unLams' t of - r@(Just _) -> r - Nothing -> Just ([], t) - --- Same as unLams', but stops at any variable named `()`, which indicates a --- delay (`'`) annotation which we want to preserve. -unLamsUntilDelay' - :: Var v - => Term2 vt at ap v a - -> Maybe ([v], Term2 vt at ap v a) -unLamsUntilDelay' t = case unLamsPred' (t, (/=) $ Var.named "()") of - r@(Just _) -> r - Nothing -> Just ([], t) - --- Same as unLamsUntilDelay', but only matches if the lambda body is a match --- expression, where the scrutinee is also the last argument of the lambda -unLamsMatch' - :: Var v - => Term2 vt at ap v a - -> Maybe ([v], [MatchCase ap (Term2 vt at ap v a)]) -unLamsMatch' t = case unLamsUntilDelay' t of - Just (reverse -> (v1:vs), Match' (Var' v1') branches) | - (v1 == v1') && not (Set.member v1' (Set.unions $ freeVars <$> branches)) -> - Just (reverse vs, branches) - _ -> Nothing - where - freeVars (MatchCase _ g rhs) = - let guardVars = (fromMaybe Set.empty $ ABT.freeVars <$> g) - rhsVars = (ABT.freeVars rhs) - in Set.union guardVars rhsVars - --- Same as unLams' but taking a predicate controlling whether we match on a given binary function. -unLamsPred' :: (Term2 vt at ap v a, v -> Bool) -> - Maybe ([v], Term2 vt at ap v a) -unLamsPred' (LamNamed' v body, pred) | pred v = case unLamsPred' (body, pred) of - Nothing -> Just ([v], body) - Just (vs, body) -> Just (v:vs, body) -unLamsPred' _ = Nothing - -unReqOrCtor :: Term2 vt at ap v a -> Maybe (Reference, ConstructorId) -unReqOrCtor (Constructor' r cid) = Just (r, cid) -unReqOrCtor (Request' r cid) = Just (r, cid) -unReqOrCtor _ = Nothing - --- Dependencies including referenced data and effect decls -dependencies :: (Ord v, Ord vt) => Term2 vt at ap v a -> Set Reference -dependencies t = Set.map (LD.fold id Referent.toReference) (labeledDependencies t) - -typeDependencies :: (Ord v, Ord vt) => Term2 vt at ap v a -> Set Reference -typeDependencies = - Set.fromList . mapMaybe (LD.fold Just (const Nothing)) . toList . labeledDependencies - --- Gets the types to which this term contains references via patterns and --- data constructors. -constructorDependencies - :: (Ord v, Ord vt) => Term2 vt at ap v a -> Set Reference -constructorDependencies = - Set.unions - . generalizedDependencies (const mempty) - (const mempty) - Set.singleton - (const . Set.singleton) - Set.singleton - (const . Set.singleton) - Set.singleton - -generalizedDependencies - :: (Ord v, Ord vt, Ord r) - => (Reference -> r) - -> (Reference -> r) - -> (Reference -> r) - -> (Reference -> ConstructorId -> r) - -> (Reference -> r) - -> (Reference -> ConstructorId -> r) - -> (Reference -> r) - -> Term2 vt at ap v a - -> Set r -generalizedDependencies termRef typeRef literalType dataConstructor dataType effectConstructor effectType - = Set.fromList . Writer.execWriter . ABT.visit' f where - f t@(Ref r) = Writer.tell [termRef r] $> t - f t@(TermLink r) = case r of - Referent.Ref r -> Writer.tell [termRef r] $> t - Referent.Con r id CT.Data -> Writer.tell [dataConstructor r id] $> t - Referent.Con r id CT.Effect -> Writer.tell [effectConstructor r id] $> t - f t@(TypeLink r) = Writer.tell [typeRef r] $> t - f t@(Ann _ typ) = - Writer.tell (map typeRef . toList $ Type.dependencies typ) $> t - f t@(Nat _) = Writer.tell [literalType Type.natRef] $> t - f t@(Int _) = Writer.tell [literalType Type.intRef] $> t - f t@(Float _) = Writer.tell [literalType Type.floatRef] $> t - f t@(Boolean _) = Writer.tell [literalType Type.booleanRef] $> t - f t@(Text _) = Writer.tell [literalType Type.textRef] $> t - f t@(Sequence _) = Writer.tell [literalType Type.vectorRef] $> t - f t@(Constructor r cid) = - Writer.tell [dataType r, dataConstructor r cid] $> t - f t@(Request r cid) = - Writer.tell [effectType r, effectConstructor r cid] $> t - f t@(Match _ cases) = traverse_ goPat cases $> t - f t = pure t - goPat (MatchCase pat _ _) = - Writer.tell . toList $ Pattern.generalizedDependencies literalType - dataConstructor - dataType - effectConstructor - effectType - pat - -labeledDependencies - :: (Ord v, Ord vt) => Term2 vt at ap v a -> Set LabeledDependency -labeledDependencies = generalizedDependencies LD.termRef - LD.typeRef - LD.typeRef - LD.dataConstructor - LD.typeRef - LD.effectConstructor - LD.typeRef - -updateDependencies - :: Ord v - => Map Reference Reference - -> Map Reference Reference - -> Term v a - -> Term v a -updateDependencies termUpdates typeUpdates = ABT.rebuildUp go - where - -- todo: this function might need tweaking if we ever allow type replacements - -- would need to look inside pattern matching and constructor calls - go (Ref r ) = Ref (Map.findWithDefault r r termUpdates) - go (TermLink (Referent.Ref r)) = TermLink (Referent.Ref $ Map.findWithDefault r r termUpdates) - go (TypeLink r) = TypeLink (Map.findWithDefault r r typeUpdates) - go (Ann tm tp) = Ann tm $ Type.updateDependencies typeUpdates tp - go f = f - --- | If the outermost term is a function application, --- perform substitution of the argument into the body -betaReduce :: Var v => Term0 v -> Term0 v -betaReduce (App' (Lam' f) arg) = ABT.bind f arg -betaReduce e = e - -betaNormalForm :: Var v => Term0 v -> Term0 v -betaNormalForm (App' f a) = betaNormalForm (betaReduce (app() (betaNormalForm f) a)) -betaNormalForm e = e - --- x -> f x => f -etaNormalForm :: Eq v => Term0 v -> Term0 v -etaNormalForm (LamNamed' v (App' f (Var' v'))) | v == v' = etaNormalForm f -etaNormalForm t = t - --- This converts `Reference`s it finds that are in the input `Map` --- back to free variables -unhashComponent :: forall v a. Var v - => Map Reference (Term v a) - -> Map Reference (v, Term v a) -unhashComponent m = let - usedVars = foldMap (Set.fromList . ABT.allVars) m - m' :: Map Reference (v, Term v a) - m' = evalState (Map.traverseWithKey assignVar m) usedVars where - assignVar r t = (,t) <$> ABT.freshenS (Var.refNamed r) - unhash1 = ABT.rebuildUp' go where - go e@(Ref' r) = case Map.lookup r m' of - Nothing -> e - Just (v, _) -> var (ABT.annotation e) v - go e = e - in second unhash1 <$> m' - -hashComponents - :: Var v => Map v (Term v a) -> Map v (Reference.Id, Term v a) -hashComponents = ReferenceUtil.hashComponents $ refId () - --- The hash for a constructor -hashConstructor' - :: (Reference -> ConstructorId -> Term0 Symbol) -> Reference -> ConstructorId -> Reference -hashConstructor' f r cid = - let --- this is a bit circuitous, but defining everything in terms of hashComponents --- ensure the hashing is always done in the same way - m = hashComponents (Map.fromList [(Var.named "_" :: Symbol, f r cid)]) - in case toList m of - [(r, _)] -> Reference.DerivedId r - _ -> error "unpossible" - -hashConstructor :: Reference -> ConstructorId -> Reference -hashConstructor = hashConstructor' $ constructor () - -hashRequest :: Reference -> ConstructorId -> Reference -hashRequest = hashConstructor' $ request () - -fromReferent :: Ord v - => a - -> Referent - -> Term2 vt at ap v a -fromReferent a = \case - Referent.Ref r -> ref a r - Referent.Con r i ct -> case ct of - CT.Data -> constructor a r i - CT.Effect -> request a r i - -instance Var v => Hashable1 (F v a p) where - hash1 hashCycle hash e - = let (tag, hashed, varint) = - (Hashable.Tag, Hashable.Hashed, Hashable.Nat . fromIntegral) - in - case e of - -- So long as `Reference.Derived` ctors are created using the same - -- hashing function as is used here, this case ensures that references - -- are 'transparent' wrt hash and hashing is unaffected by whether - -- expressions are linked. So for example `x = 1 + 1` and `y = x` hash - -- the same. - Ref (Reference.Derived h 0 1) -> Hashable.fromBytes (Hash.toBytes h) - Ref (Reference.Derived h i n) -> Hashable.accumulate - [ tag 1 - , hashed $ Hashable.fromBytes (Hash.toBytes h) - , Hashable.Nat i - , Hashable.Nat n - ] - -- Note: start each layer with leading `1` byte, to avoid collisions - -- with types, which start each layer with leading `0`. - -- See `Hashable1 Type.F` - _ -> - Hashable.accumulate - $ tag 1 - : case e of - Nat i -> [tag 64, accumulateToken i] - Int i -> [tag 65, accumulateToken i] - Float n -> [tag 66, Hashable.Double n] - Boolean b -> [tag 67, accumulateToken b] - Text t -> [tag 68, accumulateToken t] - Char c -> [tag 69, accumulateToken c] - Blank b -> tag 1 : case b of - B.Blank -> [tag 0] - B.Recorded (B.Placeholder _ s) -> - [tag 1, Hashable.Text (Text.pack s)] - B.Recorded (B.Resolve _ s) -> - [tag 2, Hashable.Text (Text.pack s)] - Ref (Reference.Builtin name) -> [tag 2, accumulateToken name] - Ref Reference.Derived {} -> - error "handled above, but GHC can't figure this out" - App a a2 -> [tag 3, hashed (hash a), hashed (hash a2)] - Ann a t -> [tag 4, hashed (hash a), hashed (ABT.hash t)] - Sequence as -> tag 5 : varint (Sequence.length as) : map - (hashed . hash) - (toList as) - Lam a -> [tag 6, hashed (hash a)] - -- note: we use `hashCycle` to ensure result is independent of - -- let binding order - LetRec _ as a -> case hashCycle as of - (hs, hash) -> tag 7 : hashed (hash a) : map hashed hs - -- here, order is significant, so don't use hashCycle - Let _ b a -> [tag 8, hashed $ hash b, hashed $ hash a] - If b t f -> - [tag 9, hashed $ hash b, hashed $ hash t, hashed $ hash f] - Request r n -> [tag 10, accumulateToken r, varint n] - Constructor r n -> [tag 12, accumulateToken r, varint n] - Match e branches -> - tag 13 : hashed (hash e) : concatMap h branches - where - h (MatchCase pat guard branch) = concat - [ [accumulateToken pat] - , toList (hashed . hash <$> guard) - , [hashed (hash branch)] - ] - Handle h b -> [tag 15, hashed $ hash h, hashed $ hash b] - And x y -> [tag 16, hashed $ hash x, hashed $ hash y] - Or x y -> [tag 17, hashed $ hash x, hashed $ hash y] - TermLink r -> [tag 18, accumulateToken r] - TypeLink r -> [tag 19, accumulateToken r] - _ -> - error $ "unhandled case in hash: " <> show (void e) - --- mostly boring serialization code below ... - -instance (Eq a, ABT.Var v) => Eq1 (F v a p) where (==#) = (==) -instance (Show v) => Show1 (F v a p) where showsPrec1 = showsPrec - -instance (ABT.Var vt, Eq at, Eq a) => Eq (F vt at p a) where - Int x == Int y = x == y - Nat x == Nat y = x == y - Float x == Float y = x == y - Boolean x == Boolean y = x == y - Text x == Text y = x == y - Char x == Char y = x == y - Blank b == Blank q = b == q - Ref x == Ref y = x == y - TermLink x == TermLink y = x == y - TypeLink x == TypeLink y = x == y - Constructor r cid == Constructor r2 cid2 = r == r2 && cid == cid2 - Request r cid == Request r2 cid2 = r == r2 && cid == cid2 - Handle h b == Handle h2 b2 = h == h2 && b == b2 - App f a == App f2 a2 = f == f2 && a == a2 - Ann e t == Ann e2 t2 = e == e2 && t == t2 - Sequence v == Sequence v2 = v == v2 - If a b c == If a2 b2 c2 = a == a2 && b == b2 && c == c2 - And a b == And a2 b2 = a == a2 && b == b2 - Or a b == Or a2 b2 = a == a2 && b == b2 - Lam a == Lam b = a == b - LetRec _ bs body == LetRec _ bs2 body2 = bs == bs2 && body == body2 - Let _ binding body == Let _ binding2 body2 = - binding == binding2 && body == body2 - Match scrutinee cases == Match s2 cs2 = scrutinee == s2 && cases == cs2 - _ == _ = False - - -instance (Show v, Show a) => Show (F v a0 p a) where - showsPrec = go - where - showConstructor r n = shows r <> s "#" <> shows n - go _ (Int n ) = (if n >= 0 then s "+" else s "") <> shows n - go _ (Nat n ) = shows n - go _ (Float n ) = shows n - go _ (Boolean True ) = s "true" - go _ (Boolean False) = s "false" - go p (Ann t k) = showParen (p > 1) $ shows t <> s ":" <> shows k - go p (App f x) = showParen (p > 9) $ showsPrec 9 f <> s " " <> showsPrec 10 x - go _ (Lam body ) = showParen True (s "λ " <> shows body) - go _ (Sequence vs ) = showListWith shows (toList vs) - go _ (Blank b ) = case b of - B.Blank -> s "_" - B.Recorded (B.Placeholder _ r) -> s ("_" ++ r) - B.Recorded (B.Resolve _ r) -> s r - go _ (Ref r) = s "Ref(" <> shows r <> s ")" - go _ (TermLink r) = s "TermLink(" <> shows r <> s ")" - go _ (TypeLink r) = s "TypeLink(" <> shows r <> s ")" - go _ (Let _ b body) = - showParen True (s "let " <> shows b <> s " in " <> shows body) - go _ (LetRec _ bs body) = showParen - True - (s "let rec" <> shows bs <> s " in " <> shows body) - go _ (Handle b body) = showParen - True - (s "handle " <> shows b <> s " in " <> shows body) - go _ (Constructor r n ) = showConstructor r n - go _ (Match scrutinee cases) = showParen - True - (s "case " <> shows scrutinee <> s " of " <> shows cases) - go _ (Text s ) = shows s - go _ (Char c ) = shows c - go _ (Request r n) = showConstructor r n - go p (If c t f) = - showParen (p > 0) - $ s "if " - <> shows c - <> s " then " - <> shows t - <> s " else " - <> shows f - go p (And x y) = - showParen (p > 0) $ s "and " <> shows x <> s " " <> shows y - go p (Or x y) = - showParen (p > 0) $ s "or " <> shows x <> s " " <> shows y - (<>) = (.) - s = showString diff --git a/unison-core/src/Unison/Type.hs b/unison-core/src/Unison/Type.hs deleted file mode 100644 index a7cbe8bff2..0000000000 --- a/unison-core/src/Unison/Type.hs +++ /dev/null @@ -1,645 +0,0 @@ -{-# LANGUAGE DeriveFoldable #-} -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE DeriveTraversable #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE Rank2Types #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Type where - -import Unison.Prelude - -import qualified Control.Monad.Writer.Strict as Writer -import Data.Functor.Identity (runIdentity) -import Data.Monoid (Any(..)) -import Data.List.Extra (nubOrd) -import qualified Data.Map as Map -import qualified Data.Set as Set -import Prelude.Extras (Eq1(..),Show1(..),Ord1(..)) -import qualified Unison.ABT as ABT -import Unison.Hashable (Hashable1) -import qualified Unison.Hashable as Hashable -import qualified Unison.Kind as K -import Unison.Reference (Reference) -import qualified Unison.Reference as Reference -import qualified Unison.Reference.Util as ReferenceUtil -import Unison.Var (Var) -import qualified Unison.Var as Var -import qualified Unison.Settings as Settings -import qualified Unison.Util.Relation as R -import qualified Unison.Names3 as Names -import qualified Unison.Name as Name -import qualified Unison.Util.List as List - --- | Base functor for types in the Unison language -data F a - = Ref Reference - | Arrow a a - | Ann a K.Kind - | App a a - | Effect a a - | Effects [a] - | Forall a - | IntroOuter a -- binder like ∀, used to introduce variables that are - -- bound by outer type signatures, to support scoped type - -- variables - deriving (Foldable,Functor,Generic,Generic1,Eq,Ord,Traversable) - -instance Eq1 F where (==#) = (==) -instance Ord1 F where compare1 = compare -instance Show1 F where showsPrec1 = showsPrec - --- | Types are represented as ABTs over the base functor F, with variables in `v` -type Type v a = ABT.Term F v a - -wrapV :: Ord v => Type v a -> Type (ABT.V v) a -wrapV = ABT.vmap ABT.Bound - -freeVars :: Type v a -> Set v -freeVars = ABT.freeVars - -bindExternal - :: ABT.Var v => [(v, Reference)] -> Type v a -> Type v a -bindExternal bs = ABT.substsInheritAnnotation [ (v, ref () r) | (v, r) <- bs ] - -bindNames - :: Var v - => Set v - -> Names.Names0 - -> Type v a - -> Names.ResolutionResult v a (Type v a) -bindNames keepFree ns t = let - fvs = ABT.freeVarOccurrences keepFree t - rs = [(v, a, R.lookupDom (Name.fromVar v) (Names.types0 ns)) | (v,a) <- fvs ] - ok (v, a, rs) = if Set.size rs == 1 then pure (v, Set.findMin rs) - else Left (pure (Names.TypeResolutionFailure v a rs)) - in List.validate ok rs <&> \es -> bindExternal es t - -newtype Monotype v a = Monotype { getPolytype :: Type v a } deriving Eq - -instance (Show v) => Show (Monotype v a) where - show = show . getPolytype - --- Smart constructor which checks if a `Type` has no `Forall` quantifiers. -monotype :: ABT.Var v => Type v a -> Maybe (Monotype v a) -monotype t = Monotype <$> ABT.visit isMono t where - isMono (Forall' _) = Just Nothing - isMono _ = Nothing - -arity :: Type v a -> Int -arity (ForallNamed' _ body) = arity body -arity (Arrow' _ o) = 1 + arity o -arity (Ann' a _) = arity a -arity _ = 0 - --- some smart patterns -pattern Ref' r <- ABT.Tm' (Ref r) -pattern Arrow' i o <- ABT.Tm' (Arrow i o) -pattern Arrows' spine <- (unArrows -> Just spine) -pattern EffectfulArrows' fst rest <- (unEffectfulArrows -> Just (fst, rest)) -pattern Ann' t k <- ABT.Tm' (Ann t k) -pattern App' f x <- ABT.Tm' (App f x) -pattern Apps' f args <- (unApps -> Just (f, args)) -pattern Pure' t <- (unPure -> Just t) -pattern Effects' es <- ABT.Tm' (Effects es) --- Effect1' must match at least one effect -pattern Effect1' e t <- ABT.Tm' (Effect e t) -pattern Effect' es t <- (unEffects1 -> Just (es, t)) -pattern Effect'' es t <- (unEffect0 -> (es, t)) --- Effect0' may match zero effects -pattern Effect0' es t <- (unEffect0 -> (es, t)) -pattern Forall' subst <- ABT.Tm' (Forall (ABT.Abs' subst)) -pattern IntroOuter' subst <- ABT.Tm' (IntroOuter (ABT.Abs' subst)) -pattern IntroOuterNamed' v body <- ABT.Tm' (IntroOuter (ABT.out -> ABT.Abs v body)) -pattern ForallsNamed' vs body <- (unForalls -> Just (vs, body)) -pattern ForallNamed' v body <- ABT.Tm' (Forall (ABT.out -> ABT.Abs v body)) -pattern Var' v <- ABT.Var' v -pattern Cycle' xs t <- ABT.Cycle' xs t -pattern Abs' subst <- ABT.Abs' subst - -unPure :: Ord v => Type v a -> Maybe (Type v a) -unPure (Effect'' [] t) = Just t -unPure (Effect'' _ _) = Nothing -unPure t = Just t - -unArrows :: Type v a -> Maybe [Type v a] -unArrows t = - case go t of [_] -> Nothing; l -> Just l - where go (Arrow' i o) = i : go o - go o = [o] - -unEffectfulArrows - :: Type v a -> Maybe (Type v a, [(Maybe [Type v a], Type v a)]) -unEffectfulArrows t = case t of - Arrow' i o -> Just (i, go o) - _ -> Nothing - where - go (Effect1' (Effects' es) (Arrow' i o)) = - (Just $ es >>= flattenEffects, i) : go o - go (Effect1' (Effects' es) t) = [(Just $ es >>= flattenEffects, t)] - go (Arrow' i o) = (Nothing, i) : go o - go t = [(Nothing, t)] - -unApps :: Type v a -> Maybe (Type v a, [Type v a]) -unApps t = case go t [] of - [] -> Nothing - [ _ ] -> Nothing - f : args -> Just (f, args) - where - go (App' i o) acc = go i (o : acc) - go fn args = fn : args - -unIntroOuters :: Type v a -> Maybe ([v], Type v a) -unIntroOuters t = go t [] - where go (IntroOuterNamed' v body) vs = go body (v:vs) - go _body [] = Nothing - go body vs = Just (reverse vs, body) - --- Most code doesn't care about `introOuter` binders and is fine dealing with the --- these outer variable references as free variables. This function strips out --- one or more `introOuter` binders, so `outer a b . (a, b)` becomes `(a, b)`. -stripIntroOuters :: Type v a -> Type v a -stripIntroOuters t = case unIntroOuters t of - Just (_, t) -> t - Nothing -> t - -unForalls :: Type v a -> Maybe ([v], Type v a) -unForalls t = go t [] - where go (ForallNamed' v body) vs = go body (v:vs) - go _body [] = Nothing - go body vs = Just(reverse vs, body) - -unEffect0 :: Ord v => Type v a -> ([Type v a], Type v a) -unEffect0 (Effect1' e a) = (flattenEffects e, a) -unEffect0 t = ([], t) - -unEffects1 :: Ord v => Type v a -> Maybe ([Type v a], Type v a) -unEffects1 (Effect1' (Effects' es) a) = Just (es, a) -unEffects1 _ = Nothing - --- | True if the given type is a function, possibly quantified -isArrow :: ABT.Var v => Type v a -> Bool -isArrow (ForallNamed' _ t) = isArrow t -isArrow (Arrow' _ _) = True -isArrow _ = False - --- some smart constructors - ---vectorOf :: Ord v => a -> Type v a -> Type v ---vectorOf a t = vector `app` t - -ref :: Ord v => a -> Reference -> Type v a -ref a = ABT.tm' a . Ref - -refId :: Ord v => a -> Reference.Id -> Type v a -refId a = ref a . Reference.DerivedId - -termLink :: Ord v => a -> Type v a -termLink a = ABT.tm' a . Ref $ termLinkRef - -typeLink :: Ord v => a -> Type v a -typeLink a = ABT.tm' a . Ref $ typeLinkRef - -derivedBase32Hex :: Ord v => Reference -> a -> Type v a -derivedBase32Hex r a = ref a r - --- derivedBase58' :: Text -> Reference --- derivedBase58' base58 = Reference.derivedBase58 base58 0 1 - -intRef, natRef, floatRef, booleanRef, textRef, charRef, vectorRef, bytesRef, effectRef, termLinkRef, typeLinkRef :: Reference -intRef = Reference.Builtin "Int" -natRef = Reference.Builtin "Nat" -floatRef = Reference.Builtin "Float" -booleanRef = Reference.Builtin "Boolean" -textRef = Reference.Builtin "Text" -charRef = Reference.Builtin "Char" -vectorRef = Reference.Builtin "Sequence" -bytesRef = Reference.Builtin "Bytes" -effectRef = Reference.Builtin "Effect" -termLinkRef = Reference.Builtin "Link.Term" -typeLinkRef = Reference.Builtin "Link.Type" - -builtinIORef, fileHandleRef, threadIdRef, socketRef :: Reference -builtinIORef = Reference.Builtin "IO" -fileHandleRef = Reference.Builtin "Handle" -threadIdRef = Reference.Builtin "ThreadId" -socketRef = Reference.Builtin "Socket" - -builtin :: Ord v => a -> Text -> Type v a -builtin a = ref a . Reference.Builtin - -int :: Ord v => a -> Type v a -int a = ref a intRef - -nat :: Ord v => a -> Type v a -nat a = ref a natRef - -float :: Ord v => a -> Type v a -float a = ref a floatRef - -boolean :: Ord v => a -> Type v a -boolean a = ref a booleanRef - -text :: Ord v => a -> Type v a -text a = ref a textRef - -char :: Ord v => a -> Type v a -char a = ref a charRef - -fileHandle :: Ord v => a -> Type v a -fileHandle a = ref a fileHandleRef - -threadId :: Ord v => a -> Type v a -threadId a = ref a threadIdRef - -builtinIO :: Ord v => a -> Type v a -builtinIO a = ref a builtinIORef - -socket :: Ord v => a -> Type v a -socket a = ref a socketRef - -vector :: Ord v => a -> Type v a -vector a = ref a vectorRef - -bytes :: Ord v => a -> Type v a -bytes a = ref a bytesRef - -effectType :: Ord v => a -> Type v a -effectType a = ref a $ effectRef - -app :: Ord v => a -> Type v a -> Type v a -> Type v a -app a f arg = ABT.tm' a (App f arg) - --- `f x y z` means `((f x) y) z` and the annotation paired with `y` is the one --- meant for `app (f x) y` -apps :: Ord v => Type v a -> [(a, Type v a)] -> Type v a -apps = foldl' go where go f (a, t) = app a f t - -app' :: (Ord v, Semigroup a) => Type v a -> Type v a -> Type v a -app' f arg = app (ABT.annotation f <> ABT.annotation arg) f arg - -apps' :: (Semigroup a, Ord v) => Type v a -> [Type v a] -> Type v a -apps' = foldl app' - -arrow :: Ord v => a -> Type v a -> Type v a -> Type v a -arrow a i o = ABT.tm' a (Arrow i o) - -arrow' :: (Semigroup a, Ord v) => Type v a -> Type v a -> Type v a -arrow' i o = arrow (ABT.annotation i <> ABT.annotation o) i o - -ann :: Ord v => a -> Type v a -> K.Kind -> Type v a -ann a e t = ABT.tm' a (Ann e t) - -forall :: Ord v => a -> v -> Type v a -> Type v a -forall a v body = ABT.tm' a (Forall (ABT.abs' a v body)) - -introOuter :: Ord v => a -> v -> Type v a -> Type v a -introOuter a v body = ABT.tm' a (IntroOuter (ABT.abs' a v body)) - -iff :: Var v => Type v () -iff = forall () aa $ arrows (f <$> [boolean(), a, a]) a - where aa = Var.named "a" - a = var () aa - f x = ((), x) - -iff' :: Var v => a -> Type v a -iff' loc = forall loc aa $ arrows (f <$> [boolean loc, a, a]) a - where aa = Var.named "a" - a = var loc aa - f x = (loc, x) - -iff2 :: Var v => a -> Type v a -iff2 loc = forall loc aa $ arrows (f <$> [a, a]) a - where aa = Var.named "a" - a = var loc aa - f x = (loc, x) - -andor :: Ord v => Type v () -andor = arrows (f <$> [boolean(), boolean()]) $ boolean() - where f x = ((), x) - -andor' :: Ord v => a -> Type v a -andor' a = arrows (f <$> [boolean a, boolean a]) $ boolean a - where f x = (a, x) - -var :: Ord v => a -> v -> Type v a -var = ABT.annotatedVar - -v' :: Var v => Text -> Type v () -v' s = ABT.var (Var.named s) - --- Like `v'`, but creates an annotated variable given an annotation -av' :: Var v => a -> Text -> Type v a -av' a s = ABT.annotatedVar a (Var.named s) - -forall' :: Var v => a -> [Text] -> Type v a -> Type v a -forall' a vs body = foldr (forall a) body (Var.named <$> vs) - -foralls :: Ord v => a -> [v] -> Type v a -> Type v a -foralls a vs body = foldr (forall a) body vs - --- Note: `a -> b -> c` parses as `a -> (b -> c)` --- the annotation associated with `b` will be the annotation for the `b -> c` --- node -arrows :: Ord v => [(a, Type v a)] -> Type v a -> Type v a -arrows ts result = foldr go result ts where - go = uncurry arrow - --- The types of effectful computations -effect :: Ord v => a -> [Type v a] -> Type v a -> Type v a -effect a es (Effect1' fs t) = - let es' = (es >>= flattenEffects) ++ flattenEffects fs - in ABT.tm' a (Effect (ABT.tm' a (Effects es')) t) -effect a es t = ABT.tm' a (Effect (ABT.tm' a (Effects es)) t) - -effects :: Ord v => a -> [Type v a] -> Type v a -effects a es = ABT.tm' a (Effects $ es >>= flattenEffects) - -effect1 :: Ord v => a -> Type v a -> Type v a -> Type v a -effect1 a es (Effect1' fs t) = - let es' = flattenEffects es ++ flattenEffects fs - in ABT.tm' a (Effect (ABT.tm' a (Effects es')) t) -effect1 a es t = ABT.tm' a (Effect es t) - -flattenEffects :: Type v a -> [Type v a] -flattenEffects (Effects' es) = es >>= flattenEffects -flattenEffects es = [es] - --- The types of first-class effect values --- which get deconstructed in effect handlers. -effectV :: Ord v => a -> (a, Type v a) -> (a, Type v a) -> Type v a -effectV builtinA e t = apps (builtin builtinA "Effect") [e, t] - --- Strips effects from a type. E.g. `{e} a` becomes `a`. -stripEffect :: Ord v => Type v a -> ([Type v a], Type v a) -stripEffect (Effect' e t) = case stripEffect t of (ei, t) -> (e ++ ei, t) -stripEffect t = ([], t) - --- The type of the flipped function application operator: --- `(a -> (a -> b) -> b)` -flipApply :: Var v => Type v () -> Type v () -flipApply t = forall() b $ arrow() (arrow() t (var() b)) (var() b) - where b = ABT.fresh t (Var.named "b") - -generalize' :: Var v => Var.Type -> Type v a -> Type v a -generalize' k t = generalize vsk t where - vsk = [ v | v <- Set.toList (freeVars t), Var.typeOf v == k ] - --- | Bind the given variables with an outer `forall`, if they are used in `t`. -generalize :: Ord v => [v] -> Type v a -> Type v a -generalize vs t = foldr f t vs - where - f v t = - if Set.member v (ABT.freeVars t) then forall (ABT.annotation t) v t else t - -unforall :: Type v a -> Type v a -unforall (ForallsNamed' _ t) = t -unforall t = t - -unforall' :: Type v a -> ([v], Type v a) -unforall' (ForallsNamed' vs t) = (vs, t) -unforall' t = ([], t) - -dependencies :: Ord v => Type v a -> Set Reference -dependencies t = Set.fromList . Writer.execWriter $ ABT.visit' f t - where f t@(Ref r) = Writer.tell [r] $> t - f t = pure t - -updateDependencies :: Ord v => Map Reference Reference -> Type v a -> Type v a -updateDependencies typeUpdates = ABT.rebuildUp go - where - go (Ref r) = Ref (Map.findWithDefault r r typeUpdates) - go f = f - -usesEffects :: Ord v => Type v a -> Bool -usesEffects t = getAny . getConst $ ABT.visit go t where - go (Effect1' _ _) = Just (Const (Any True)) - go _ = Nothing - --- Returns free effect variables in the given type, for instance, in: --- --- ∀ e3 . a ->{e,e2} b ->{e3} c --- --- This function would return the set {e, e2}, but not `e3` since `e3` --- is bound by the enclosing forall. -freeEffectVars :: Ord v => Type v a -> Set v -freeEffectVars t = - Set.fromList . join . runIdentity $ - ABT.foreachSubterm go (snd <$> ABT.annotateBound t) - where - go t@(Effects' es) = - let frees = Set.fromList [ v | Var' v <- es >>= flattenEffects ] - in pure . Set.toList $ frees `Set.difference` ABT.annotation t - go t@(Effect1' e _) = - let frees = Set.fromList [ v | Var' v <- flattenEffects e ] - in pure . Set.toList $ frees `Set.difference` ABT.annotation t - go _ = pure [] - -existentializeArrows :: (Ord v, Monad m) => m v -> Type v a -> m (Type v a) -existentializeArrows freshVar = ABT.visit go - where - go t@(Arrow' a b) = case b of - Effect1' _ _ -> Nothing - _ -> Just $ do - e <- freshVar - a <- existentializeArrows freshVar a - b <- existentializeArrows freshVar b - let ann = ABT.annotation t - pure $ arrow ann a (effect ann [var ann e] b) - go _ = Nothing - --- Remove free effect variables from the type that are in the set -removeEffectVars :: ABT.Var v => Set v -> Type v a -> Type v a -removeEffectVars removals t = - let z = effects () [] - t' = ABT.substsInheritAnnotation ((,z) <$> Set.toList removals) t - -- leave explicitly empty `{}` alone - removeEmpty (Effect1' (Effects' []) v) = Just (ABT.visitPure removeEmpty v) - removeEmpty t@(Effect1' e v) = - case flattenEffects e of - [] -> Just (ABT.visitPure removeEmpty v) - es -> Just (effect (ABT.annotation t) es $ ABT.visitPure removeEmpty v) - removeEmpty t@(Effects' es) = - Just $ effects (ABT.annotation t) (es >>= flattenEffects) - removeEmpty _ = Nothing - in ABT.visitPure removeEmpty t' - --- Remove all effect variables from the type. --- Used for type-based search, we apply this transformation to both the --- indexed type and the query type, so the user can supply `a -> b` that will --- match `a ->{e} b` (but not `a ->{IO} b`). -removeAllEffectVars :: ABT.Var v => Type v a -> Type v a -removeAllEffectVars t = let - allEffectVars = foldMap go (ABT.subterms t) - go (Effects' vs) = Set.fromList [ v | Var' v <- vs] - go (Effect1' (Var' v) _) = Set.singleton v - go _ = mempty - (vs, tu) = unforall' t - in generalize vs (removeEffectVars allEffectVars tu) - -removePureEffects :: ABT.Var v => Type v a -> Type v a -removePureEffects t | not Settings.removePureEffects = t - | otherwise = - generalize vs $ removeEffectVars (Set.filter isPure fvs) tu - where - (vs, tu) = unforall' t - fvs = freeEffectVars tu `Set.difference` ABT.freeVars t - -- If an effect variable is mentioned only once, it is on - -- an arrow `a ->{e} b`. Generalizing this to - -- `∀ e . a ->{e} b` gives us the pure arrow `a -> b`. - isPure v = ABT.occurrences v tu <= 1 - -editFunctionResult - :: forall v a - . Ord v - => (Type v a -> Type v a) - -> Type v a - -> Type v a -editFunctionResult f = go - where - go :: Type v a -> Type v a - go (ABT.Term s a t) = case t of - ABT.Tm (Forall t) -> - (\x -> ABT.Term (s <> freeVars x) a . ABT.Tm $ Forall x) $ go t - ABT.Tm (Arrow i o) -> - (\x -> ABT.Term (s <> freeVars x) a . ABT.Tm $ Arrow i x) $ go o - ABT.Abs v r -> - (\x -> ABT.Term (s <> freeVars x) a $ ABT.Abs v x) $ go r - _ -> f (ABT.Term s a t) - -functionResult :: Type v a -> Maybe (Type v a) -functionResult = go False - where - go inArr (ForallNamed' _ body) = go inArr body - go _inArr (Arrow' _i o ) = go True o - go inArr t = if inArr then Just t else Nothing - - --- | Bind all free variables (not in `except`) that start with a lowercase --- letter and are unqualified with an outer `forall`. --- `a -> a` becomes `∀ a . a -> a` --- `B -> B` becomes `B -> B` (not changed) --- `.foo -> .foo` becomes `.foo -> .foo` (not changed) --- `.foo.bar -> blarrg.woot` becomes `.foo.bar -> blarrg.woot` (unchanged) -generalizeLowercase :: Var v => Set v -> Type v a -> Type v a -generalizeLowercase except t = foldr (forall (ABT.annotation t)) t vars - where - vars = - [ v | v <- Set.toList (ABT.freeVars t `Set.difference` except), Var.universallyQuantifyIfFree v ] - --- Convert all free variables in `allowed` to variables bound by an `introOuter`. -freeVarsToOuters :: Ord v => Set v -> Type v a -> Type v a -freeVarsToOuters allowed t = foldr (introOuter (ABT.annotation t)) t vars - where vars = Set.toList $ ABT.freeVars t `Set.intersection` allowed - --- | This function removes all variable shadowing from the types and reduces --- fresh ids to the minimum possible to avoid ambiguity. Useful when showing --- two different types. -cleanupVars :: Var v => [Type v a] -> [Type v a] -cleanupVars ts | not Settings.cleanupTypes = ts -cleanupVars ts = let - changedVars = cleanupVarsMap ts - in cleanupVars1' changedVars <$> ts - --- Compute a variable replacement map from a collection of types, which --- can be passed to `cleanupVars1'`. This is used to cleanup variable ids --- for multiple related types, like when reporting a type error. -cleanupVarsMap :: Var v => [Type v a] -> Map.Map v v -cleanupVarsMap ts = let - varsByName = foldl' step Map.empty (ts >>= ABT.allVars) - step m v = Map.insertWith (++) (Var.name $ Var.reset v) [v] m - changedVars = Map.fromList [ (v, Var.freshenId i v) - | (_, vs) <- Map.toList varsByName - , (v,i) <- nubOrd vs `zip` [0..]] - in changedVars - -cleanupVars1' :: Var v => Map.Map v v -> Type v a -> Type v a -cleanupVars1' = ABT.changeVars - --- | This function removes all variable shadowing from the type and reduces --- fresh ids to the minimum possible to avoid ambiguity. -cleanupVars1 :: Var v => Type v a -> Type v a -cleanupVars1 t | not Settings.cleanupTypes = t -cleanupVars1 t = let [t'] = cleanupVars [t] in t' - --- This removes duplicates and normalizes the order of ability lists -cleanupAbilityLists :: Var v => Type v a -> Type v a -cleanupAbilityLists = ABT.visitPure go - where - -- leave explicitly empty `{}` alone - go (Effect1' (Effects' []) _v) = Nothing - go t@(Effect1' e v) = - let es = Set.toList . Set.fromList $ flattenEffects e - in case es of - [] -> Just (ABT.visitPure go v) - _ -> Just (effect (ABT.annotation t) es $ ABT.visitPure go v) - go _ = Nothing - -cleanups :: Var v => [Type v a] -> [Type v a] -cleanups ts = cleanupVars $ map cleanupAbilityLists ts - -cleanup :: Var v => Type v a -> Type v a -cleanup t | not Settings.cleanupTypes = t -cleanup t = cleanupVars1 . cleanupAbilityLists $ t - -toReference :: (ABT.Var v, Show v) => Type v a -> Reference -toReference (Ref' r) = r --- a bit of normalization - any unused type parameters aren't part of the hash -toReference (ForallNamed' v body) | not (Set.member v (ABT.freeVars body)) = toReference body -toReference t = Reference.Derived (ABT.hash t) 0 1 - -toReferenceMentions :: (ABT.Var v, Show v) => Type v a -> Set Reference -toReferenceMentions ty = - let (vs, _) = unforall' ty - gen ty = generalize (Set.toList (freeVars ty)) $ generalize vs ty - in Set.fromList $ toReference . gen <$> ABT.subterms ty - -hashComponents - :: Var v => Map v (Type v a) -> Map v (Reference.Id, Type v a) -hashComponents = ReferenceUtil.hashComponents $ refId () - -instance Hashable1 F where - hash1 hashCycle hash e = - let - (tag, hashed) = (Hashable.Tag, Hashable.Hashed) - -- Note: start each layer with leading `0` byte, to avoid collisions with - -- terms, which start each layer with leading `1`. See `Hashable1 Term.F` - in Hashable.accumulate $ tag 0 : case e of - Ref r -> [tag 0, Hashable.accumulateToken r] - Arrow a b -> [tag 1, hashed (hash a), hashed (hash b) ] - App a b -> [tag 2, hashed (hash a), hashed (hash b) ] - Ann a k -> [tag 3, hashed (hash a), Hashable.accumulateToken k ] - -- Example: - -- a) {Remote, Abort} (() -> {Remote} ()) should hash the same as - -- b) {Abort, Remote} (() -> {Remote} ()) but should hash differently from - -- c) {Remote, Abort} (() -> {Abort} ()) - Effects es -> let - (hs, _) = hashCycle es - in tag 4 : map hashed hs - Effect e t -> [tag 5, hashed (hash e), hashed (hash t)] - Forall a -> [tag 6, hashed (hash a)] - IntroOuter a -> [tag 7, hashed (hash a)] - -instance Show a => Show (F a) where - showsPrec = go where - go _ (Ref r) = shows r - go p (Arrow i o) = - showParen (p > 0) $ showsPrec (p+1) i <> s" -> " <> showsPrec p o - go p (Ann t k) = - showParen (p > 1) $ shows t <> s":" <> shows k - go p (App f x) = - showParen (p > 9) $ showsPrec 9 f <> s" " <> showsPrec 10 x - go p (Effects es) = showParen (p > 0) $ - s"{" <> shows es <> s"}" - go p (Effect e t) = showParen (p > 0) $ - showParen True $ shows e <> s" " <> showsPrec p t - go p (Forall body) = case p of - 0 -> showsPrec p body - _ -> showParen True $ s"∀ " <> shows body - go p (IntroOuter body) = case p of - 0 -> showsPrec p body - _ -> showParen True $ s"outer " <> shows body - (<>) = (.) - s = showString - diff --git a/unison-core/src/Unison/Util/Components.hs b/unison-core/src/Unison/Util/Components.hs deleted file mode 100644 index 13a049e799..0000000000 --- a/unison-core/src/Unison/Util/Components.hs +++ /dev/null @@ -1,48 +0,0 @@ -module Unison.Util.Components where - -import Unison.Prelude - -import qualified Data.Graph as Graph -import qualified Data.Map as Map -import qualified Data.Set as Set - --- | Order bindings by dependencies and group into components. --- Each component consists of > 1 bindings, each of which depends --- transitively on all other bindings in the component. --- --- 1-element components may or may not depend on themselves. --- --- The order is such that a component at index i will not depend --- on components and indexes > i. But a component at index i does not --- _necessarily_ depend on any components at earlier indices. --- --- Example: --- --- let rec --- ping n = pong (n + 1); --- pong n = ping (n + 1); --- g = id 42; --- y = id "hi" --- id x = x; --- in ping g --- --- `components` would produce `[[ping,pong], [id], [g], [y]]` --- Notice that `id` comes before `g` and `y` in the output, since --- both `g` and `y` depend on `id`. --- --- Uses Tarjan's algorithm: --- https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm -components :: Ord v => (t -> Set v) -> [(v, t)] -> [[(v, t)]] -components freeVars bs = - let varIds = - Map.fromList (map fst bs `zip` reverse [(1 :: Int) .. length bs]) - -- something horribly wrong if this bombs - msg = error "Components.components bug" - varId v = fromMaybe msg $ Map.lookup v varIds - - -- use ints as keys for graph to preserve original source order as much as - -- possible - graph = [ ((v, b), varId v, deps b) | (v, b) <- bs ] - vars = Set.fromList (map fst bs) - deps b = varId <$> Set.toList (Set.intersection vars (freeVars b)) - in Graph.flattenSCC <$> Graph.stronglyConnComp graph diff --git a/unison-core/src/Unison/Util/List.hs b/unison-core/src/Unison/Util/List.hs deleted file mode 100644 index dff640a4a1..0000000000 --- a/unison-core/src/Unison/Util/List.hs +++ /dev/null @@ -1,65 +0,0 @@ -module Unison.Util.List where - -import Unison.Prelude - -import qualified Data.List as List -import qualified Data.Set as Set -import qualified Data.Map as Map - -multimap :: Foldable f => Ord k => f (k, v) -> Map k [v] -multimap kvs = - -- preserve the order of the values from the original list - reverse <$> foldl' step Map.empty kvs - where - step m (k,v) = Map.insertWith (++) k [v] m - -groupBy :: (Foldable f, Ord k) => (v -> k) -> f v -> Map k [v] -groupBy f vs = reverse <$> foldl' step Map.empty vs - where step m v = Map.insertWith (++) (f v) [v] m - --- returns the subset of `f a` which maps to unique `b`s. --- prefers earlier copies, if many `a` map to some `b`. -uniqueBy, nubOrdOn :: (Foldable f, Ord b) => (a -> b) -> f a -> [a] -uniqueBy f as = wrangle' (toList as) Set.empty where - wrangle' [] _ = [] - wrangle' (a:as) seen = - if Set.member b seen - then wrangle' as seen - else a : wrangle' as (Set.insert b seen) - where b = f a -nubOrdOn = uniqueBy - --- prefers later copies -uniqueBy' :: (Foldable f, Ord b) => (a -> b) -> f a -> [a] -uniqueBy' f = reverse . uniqueBy f . reverse . toList - -safeHead :: Foldable f => f a -> Maybe a -safeHead = headMay . toList - -validate :: (Semigroup e, Foldable f) => (a -> Either e b) -> f a -> Either e [b] -validate f as = case partitionEithers (f <$> toList as) of - ([], bs) -> Right bs - (e:es, _) -> Left (foldl' (<>) e es) - --- Intercalate a list with separators determined by inspecting each --- adjacent pair. -intercalateMapWith :: (a -> a -> b) -> (a -> b) -> [a] -> [b] -intercalateMapWith sep f xs = result where - xs' = map f xs - pairs = filter (\p -> length p == 2) $ map (take 2) $ List.tails xs - seps = (flip map) pairs $ \case - x1 : x2 : _ -> sep x1 x2 - _ -> error "bad list length" - paired = zipWith (\sep x -> [sep, x]) seps (drop 1 xs') - result = (take 1 xs') ++ mconcat paired - --- Take runs of consecutive occurrences of r within a list, --- and in each run, overwrite all but the first occurrence of r with w. -quenchRuns :: Eq a => a -> a -> [a] -> [a] -quenchRuns r w = reverse . (go False r w []) where - go inRun r w acc = \case - [] -> acc - h : tl -> - if h == r - then go True r w ((if inRun then w else r) : acc) tl - else go False r w (h : acc) tl diff --git a/unison-core/src/Unison/Util/Monoid.hs b/unison-core/src/Unison/Util/Monoid.hs deleted file mode 100644 index 1c95bcf1fd..0000000000 --- a/unison-core/src/Unison/Util/Monoid.hs +++ /dev/null @@ -1,27 +0,0 @@ -module Unison.Util.Monoid where - -import Unison.Prelude hiding (whenM) - -import Data.List (intersperse) - --- List.intercalate extended to any monoid --- "The type that intercalate should have had to begin with." -intercalateMap :: (Foldable t, Monoid a) => a -> (b -> a) -> t b -> a -intercalateMap separator renderer elements = - mconcat $ intersperse separator (renderer <$> toList elements) - -fromMaybe :: Monoid a => Maybe a -> a -fromMaybe Nothing = mempty -fromMaybe (Just a) = a - -whenM, unlessM :: Monoid a => Bool -> a -> a -whenM True a = a -whenM False _ = mempty -unlessM = whenM . not - -isEmpty, nonEmpty :: (Eq a, Monoid a) => a -> Bool -isEmpty a = a == mempty -nonEmpty = not . isEmpty - -foldMapM :: (Monad m, Foldable f, Monoid b) => (a -> m b) -> f a -> m b -foldMapM f as = foldM (\b a -> fmap (b <>) (f a)) mempty as diff --git a/unison-core/src/Unison/Util/Relation3.hs b/unison-core/src/Unison/Util/Relation3.hs deleted file mode 100644 index 69b6f61d84..0000000000 --- a/unison-core/src/Unison/Util/Relation3.hs +++ /dev/null @@ -1,120 +0,0 @@ -{-# LANGUAGE RecordWildCards #-} - -module Unison.Util.Relation3 where - -import Unison.Prelude hiding (empty, toList) - -import Unison.Util.Relation (Relation) -import qualified Data.Map as Map -import qualified Unison.Hashable as H -import qualified Unison.Util.Relation as R -import Data.Semigroup (Sum(Sum, getSum)) -import Data.Tuple.Extra (uncurry3) - -data Relation3 a b c - = Relation3 - { d1 :: Map a (Relation b c) - , d2 :: Map b (Relation a c) - , d3 :: Map c (Relation a b) - } deriving (Eq,Ord) - -instance (Show a, Show b, Show c) => Show (Relation3 a b c) where - show = show . toList - -d1s :: Relation3 a b c -> Set a -d1s = Map.keysSet . d1 - -d2s :: Relation3 a b c -> Set b -d2s = Map.keysSet . d2 - -d3s :: Relation3 a b c -> Set c -d3s = Map.keysSet . d3 - -filter :: (Ord a, Ord b, Ord c) - => ((a,b,c) -> Bool) -> Relation3 a b c -> Relation3 a b c -filter f = fromList . Prelude.filter f . toList - -member :: (Ord a, Ord b, Ord c) => a -> b -> c -> Relation3 a b c -> Bool -member a b c = R.member b c . lookupD1 a - -lookupD1 :: (Ord a, Ord b, Ord c) => a -> Relation3 a b c -> Relation b c -lookupD1 a = fromMaybe mempty . Map.lookup a . d1 - -lookupD2 :: (Ord a, Ord b, Ord c) => b -> Relation3 a b c -> Relation a c -lookupD2 b = fromMaybe mempty . Map.lookup b . d2 - -lookupD3 :: (Ord a, Ord b, Ord c) => c -> Relation3 a b c -> Relation a b -lookupD3 c = fromMaybe mempty . Map.lookup c . d3 - -size :: (Ord a, Ord b, Ord c) => Relation3 a b c -> Int -size = getSum . foldMap (Sum . R.size) . d1 - -toList :: Relation3 a b c -> [(a,b,c)] -toList = fmap (\(a,(b,c)) -> (a,b,c)) . toNestedList - -toNestedList :: Relation3 a b c -> [(a,(b,c))] -toNestedList r3 = - [ (a,bc) | (a,r2) <- Map.toList $ d1 r3 - , bc <- R.toList r2 ] - -nestD12 :: (Ord a, Ord b, Ord c) => Relation3 a b c -> Relation (a,b) c -nestD12 r = R.fromList [ ((a,b),c) | (a,b,c) <- toList r ] - -fromNestedDom :: (Ord a, Ord b, Ord c) => Relation (a,b) c -> Relation3 a b c -fromNestedDom = fromList . fmap (\((a,b),c) -> (a,b,c)) . R.toList -fromNestedRan :: (Ord a, Ord b, Ord c) => Relation a (b,c) -> Relation3 a b c -fromNestedRan = fromList . fmap (\(a,(b,c)) -> (a,b,c)) . R.toList - -fromList :: (Ord a, Ord b, Ord c) => [(a,b,c)] -> Relation3 a b c -fromList xs = insertAll xs empty - -empty :: (Ord a, Ord b, Ord c) => Relation3 a b c -empty = mempty - -insert, delete - :: (Ord a, Ord b, Ord c) - => a -> b -> c -> Relation3 a b c -> Relation3 a b c -insert a b c Relation3{..} = - Relation3 - (Map.alter (ins b c) a d1) - (Map.alter (ins a c) b d2) - (Map.alter (ins a b) c d3) - where - ins x y = Just . R.insert x y . fromMaybe mempty - -insertAll, deleteAll :: Foldable f => Ord a => Ord b => Ord c - => f (a,b,c) -> Relation3 a b c -> Relation3 a b c -insertAll f r = foldl' (\r x -> uncurry3 insert x r) r f -deleteAll f r = foldl' (\r x -> uncurry3 delete x r) r f - - -difference :: (Ord a, Ord b, Ord c) - => Relation3 a b c - -> Relation3 a b c - -> Relation3 a b c -difference a b = deleteAll (Unison.Util.Relation3.toList b) a - -delete a b c Relation3{..} = - Relation3 - (Map.alter (del b c) a d1) - (Map.alter (del a c) b d2) - (Map.alter (del a b) c d3) - where - del _ _ Nothing = Nothing - del x y (Just r) = - let r' = R.delete x y r - in if r' == mempty then Nothing else Just r' - -instance (Ord a, Ord b, Ord c) => Semigroup (Relation3 a b c) where - (<>) = mappend - -instance (Ord a, Ord b, Ord c) => Monoid (Relation3 a b c) where - mempty = Relation3 mempty mempty mempty - s1 `mappend` s2 = Relation3 d1' d2' d3' where - d1' = Map.unionWith (<>) (d1 s1) (d1 s2) - d2' = Map.unionWith (<>) (d2 s1) (d2 s2) - d3' = Map.unionWith (<>) (d3 s1) (d3 s2) - -instance (H.Hashable d1, H.Hashable d2, H.Hashable d3) - => H.Hashable (Relation3 d1 d2 d3) where - tokens s = [ H.accumulateToken $ toNestedList s ] diff --git a/unison-core/src/Unison/Util/Relation4.hs b/unison-core/src/Unison/Util/Relation4.hs deleted file mode 100644 index f094777798..0000000000 --- a/unison-core/src/Unison/Util/Relation4.hs +++ /dev/null @@ -1,122 +0,0 @@ -{-# LANGUAGE RecordWildCards #-} - -module Unison.Util.Relation4 where - -import Unison.Prelude hiding (toList, empty) -import Prelude -import qualified Data.Map as Map ---import qualified Data.Set as Set -import qualified Unison.Hashable as H -import qualified Unison.Util.Relation as R -import qualified Unison.Util.Relation3 as R3 -import Unison.Util.Relation (Relation) -import Unison.Util.Relation3 (Relation3) -import Data.List.Extra (nubOrd) -import Util (uncurry4) -import Data.Semigroup (Sum(Sum, getSum)) - -data Relation4 a b c d - = Relation4 - { d1 :: Map a (Relation3 b c d) - , d2 :: Map b (Relation3 a c d) - , d3 :: Map c (Relation3 a b d) - , d4 :: Map d (Relation3 a b c) - } deriving (Eq,Ord) - -instance (Show a, Show b, Show c, Show d) => Show (Relation4 a b c d) where - show = show . toList - -size :: (Ord a, Ord b, Ord c, Ord d) => Relation4 a b c d -> Int -size = getSum . foldMap (Sum . R3.size) . d1 - -toNestedList :: Relation4 a b c d -> [(a,(b,(c,d)))] -toNestedList r4 = - [ (a,bcd) - | (a,r3) <- Map.toList $ d1 r4 - , bcd <- R3.toNestedList r3 ] - -toList :: Relation4 a b c d -> [(a,b,c,d)] -toList = fmap (\(a,(b,(c,d))) -> (a,b,c,d)) . toNestedList - -empty :: (Ord a, Ord b, Ord c, Ord d) => Relation4 a b c d -empty = mempty - -fromList :: (Ord a, Ord b, Ord c, Ord d) => [(a,b,c,d)] -> Relation4 a b c d -fromList xs = insertAll xs empty - -filter :: (Ord a, Ord b, Ord c, Ord d) => ((a,b,c,d) -> Bool) -> Relation4 a b c d -> Relation4 a b c d -filter f = fromList . Prelude.filter f . toList - -selectD3 :: (Ord a, Ord b, Ord c, Ord d) - => c -> Relation4 a b c d -> Relation4 a b c d -selectD3 c r = - fromList [ (a,b,c,d) | (a,b,d) <- maybe [] R3.toList $ Map.lookup c (d3 r) ] - -selectD34 :: (Ord a, Ord b, Ord c, Ord d) - => c -> d -> Relation4 a b c d -> Relation4 a b c d -selectD34 c d r = - fromList [ (a,b,c,d) - | (a,b) <- maybe [] (maybe [] R.toList . Map.lookup d . R3.d3) - (Map.lookup c (d3 r)) - ] - -d1set :: Ord a => Relation4 a b c d -> Set a -d1set = Map.keysSet . d1 - -d12 :: (Ord a, Ord b) => Relation4 a b c d -> Relation a b -d12 = R.fromMultimap . fmap (Map.keysSet . R3.d1) . d1 - -d34 :: (Ord c, Ord d) => Relation4 a b c d -> Relation c d -d34 = R.fromMultimap . fmap (Map.keysSet . R3.d3) . d3 - --- todo: make me faster -d12s :: (Ord a, Ord b) => Relation4 a b c d -> [(a,b)] -d12s = nubOrd . fmap (\(a, (b, _)) -> (a,b)) . toNestedList ---e.g. Map.toList (d1 r) >>= \(a, r3) -> (a,) <$> Map.keys (R3.d1 r3) - -insert, delete - :: (Ord a, Ord b, Ord c, Ord d) - => a -> b -> c -> d -> Relation4 a b c d -> Relation4 a b c d -insert a b c d Relation4{..} = - Relation4 - (Map.alter (ins b c d) a d1) - (Map.alter (ins a c d) b d2) - (Map.alter (ins a b d) c d3) - (Map.alter (ins a b c) d d4) - where - ins x y z = Just . R3.insert x y z . fromMaybe mempty - -delete a b c d Relation4{..} = - Relation4 - (Map.alter (del b c d) a d1) - (Map.alter (del a c d) b d2) - (Map.alter (del a b d) c d3) - (Map.alter (del a b c) d d4) - where - del _ _ _ Nothing = Nothing - del x y z (Just r) = - let r' = R3.delete x y z r - in if r' == mempty then Nothing else Just r' - -mapD2 :: (Ord a, Ord b, Ord b', Ord c, Ord d) - => (b -> b') -> Relation4 a b c d -> Relation4 a b' c d -mapD2 f = fromList . fmap (\(a,b,c,d) -> (a, f b, c, d)) . toList - -insertAll :: Foldable f => Ord a => Ord b => Ord c => Ord d - => f (a,b,c,d) -> Relation4 a b c d -> Relation4 a b c d -insertAll f r = foldl' (\r x -> uncurry4 insert x r) r f - -instance (Ord a, Ord b, Ord c, Ord d) => Semigroup (Relation4 a b c d) where - (<>) = mappend - -instance (Ord a, Ord b, Ord c, Ord d) => Monoid (Relation4 a b c d) where - mempty = Relation4 mempty mempty mempty mempty - s1 `mappend` s2 = Relation4 d1' d2' d3' d4' where - d1' = Map.unionWith (<>) (d1 s1) (d1 s2) - d2' = Map.unionWith (<>) (d2 s1) (d2 s2) - d3' = Map.unionWith (<>) (d3 s1) (d3 s2) - d4' = Map.unionWith (<>) (d4 s1) (d4 s2) - -instance (H.Hashable d1, H.Hashable d2, H.Hashable d3, H.Hashable d4) - => H.Hashable (Relation4 d1 d2 d3 d4) where - tokens s = [ H.accumulateToken $ toNestedList s ] diff --git a/unison-core/src/Unison/Util/Set.hs b/unison-core/src/Unison/Util/Set.hs deleted file mode 100644 index ea224e8259..0000000000 --- a/unison-core/src/Unison/Util/Set.hs +++ /dev/null @@ -1,10 +0,0 @@ -{-# LANGUAGE ViewPatterns #-} -module Unison.Util.Set where - -import Data.Set - -symmetricDifference :: Ord a => Set a -> Set a -> Set a -symmetricDifference a b = (a `difference` b) `union` (b `difference` a) - -mapMaybe :: (Ord a, Ord b) => (a -> Maybe b) -> Set a -> Set b -mapMaybe f s = fromList [ r | (f -> Just r) <- toList s ] diff --git a/unison-core/src/Unison/Var.hs b/unison-core/src/Unison/Var.hs deleted file mode 100644 index 22da72d1fe..0000000000 --- a/unison-core/src/Unison/Var.hs +++ /dev/null @@ -1,168 +0,0 @@ -{-# Language OverloadedStrings #-} -{-# Language ViewPatterns #-} -{-# Language PatternSynonyms #-} - -module Unison.Var where - -import Unison.Prelude - -import Data.Char (toLower, isLower) -import Data.Text (pack) -import qualified Data.Text as Text -import qualified Unison.ABT as ABT -import qualified Unison.NameSegment as Name - -import Unison.Util.Monoid (intercalateMap) -import Unison.Reference (Reference) -import qualified Unison.Reference as R - --- | A class for variables. Variables may have auxiliary information which --- may not form part of their identity according to `Eq` / `Ord`. Laws: --- --- * `typeOf (typed n) == n` --- * `typeOf (ABT.freshIn vs v) == typeOf v`: --- `ABT.freshIn` does not alter the name -class (Show v, ABT.Var v) => Var v where - typed :: Type -> v - typeOf :: v -> Type - freshId :: v -> Word64 - freshenId :: Word64 -> v -> v - -freshIn :: ABT.Var v => Set v -> v -> v -freshIn = ABT.freshIn - -named :: Var v => Text -> v -named n = typed (User n) - --- | Variable whose name is derived from the given reference. -refNamed :: Var v => Reference -> v -refNamed ref = named ("ℍ" <> R.toText ref) - -rawName :: Type -> Text -rawName typ = case typ of - User n -> n - Inference Ability -> "𝕖" - Inference Input -> "𝕒" - Inference Output -> "𝕣" - Inference Other -> "𝕩" - Inference PatternPureE -> "𝕞" - Inference PatternPureV -> "𝕧" - Inference PatternBindE -> "𝕞" - Inference PatternBindV -> "𝕧" - Inference TypeConstructor -> "𝕗" - Inference TypeConstructorArg -> "𝕦" - MissingResult -> "_" - Blank -> "_" - Eta -> "_eta" - ANFBlank -> "_anf" - Float -> "_float" - Pattern -> "_pattern" - Irrelevant -> "_irrelevant" - UnnamedWatch k guid -> fromString k <> "." <> guid - -name :: Var v => v -> Text -name v = rawName (typeOf v) <> showid v - where - showid (freshId -> 0) = "" - showid (freshId -> n) = pack (show n) - -uncapitalize :: Var v => v -> v -uncapitalize v = nameds $ go (nameStr v) where - go (c:rest) = toLower c : rest - go n = n - -missingResult, blank, inferInput, inferOutput, inferAbility, - inferPatternPureE, inferPatternPureV, inferPatternBindE, inferPatternBindV, - inferTypeConstructor, inferTypeConstructorArg, - inferOther :: Var v => v -missingResult = typed MissingResult -blank = typed Blank -inferInput = typed (Inference Input) -inferOutput = typed (Inference Output) -inferAbility = typed (Inference Ability) -inferPatternPureE = typed (Inference PatternPureE) -inferPatternPureV = typed (Inference PatternPureV) -inferPatternBindE = typed (Inference PatternBindE) -inferPatternBindV = typed (Inference PatternBindV) -inferTypeConstructor = typed (Inference TypeConstructor) -inferTypeConstructorArg = typed (Inference TypeConstructorArg) -inferOther = typed (Inference Other) - -unnamedTest :: Var v => Text -> v -unnamedTest guid = typed (UnnamedWatch TestWatch guid) - -data Type - -- User provided variables, these should generally be left alone - = User Text - -- Variables created during type inference - | Inference InferenceType - -- Variables created to finish a block that doesn't end with an expression - | MissingResult - -- Variables invented for placeholder values inserted by user or by TDNR - | Blank - -- An unnamed watch expression of the given kind, for instance: - -- - -- test> Ok "oog" - -- has kind "test" - -- > 1 + 1 - -- has kind "" - | UnnamedWatch WatchKind Text -- guid - -- An unnamed variable for constructor eta expansion - | Eta - -- An unnamed variable introduced by ANF transformation - | ANFBlank - -- An unnamed variable for a floated lambda - | Float - -- An unnamed variable introduced from pattern compilation - | Pattern - -- A variable for situations where we need to make up one that - -- definitely won't be used. - | Irrelevant - deriving (Eq,Ord,Show) - -type WatchKind = String - -pattern RegularWatch = "" -pattern TestWatch = "test" - -data InferenceType = - Ability | Input | Output | - PatternPureE | PatternPureV | - PatternBindE | PatternBindV | - TypeConstructor | TypeConstructorArg | - Other - deriving (Eq,Ord,Show) - -reset :: Var v => v -> v -reset v = typed (typeOf v) - -unqualifiedName :: Var v => v -> Text -unqualifiedName = last . Name.segments' . name - -unqualified :: Var v => v -> v -unqualified v = case typeOf v of - User _ -> named . unqualifiedName $ v - _ -> v - -namespaced :: Var v => [v] -> v -namespaced vs = named $ intercalateMap "." name vs - -nameStr :: Var v => v -> String -nameStr = Text.unpack . name - -nameds :: Var v => String -> v -nameds s = named (Text.pack s) - -joinDot :: Var v => v -> v -> v -joinDot prefix v2 = - if name prefix == "." then named (name prefix `mappend` name v2) - else named (name prefix `mappend` "." `mappend` name v2) - -freshNamed :: Var v => Set v -> Text -> v -freshNamed used n = ABT.freshIn used (named n) - -universallyQuantifyIfFree :: forall v . Var v => v -> Bool -universallyQuantifyIfFree v = - ok (name $ reset v) && unqualified v == v - where - ok n = (all isLower . take 1 . Text.unpack) n diff --git a/unison-core/unison-core.cabal b/unison-core/unison-core.cabal deleted file mode 100644 index 84daab4e41..0000000000 --- a/unison-core/unison-core.cabal +++ /dev/null @@ -1,117 +0,0 @@ -cabal-version: 2.2 -name: unison-core -category: Compiler -version: 0.1 -license: MIT -license-file: LICENSE -author: Unison Computing, public benefit corp -maintainer: Paul Chiusano , Runar Bjarnason , Arya Irani -stability: provisional -homepage: http://unisonweb.org -bug-reports: https://github.com/unisonweb/unison/issues -copyright: Copyright (C) 2013-2018 Unison Computing, PBC and contributors -synopsis: Parser and typechecker for the Unison language -description: - -build-type: Simple -extra-source-files: -data-files: - -source-repository head - type: git - location: git://github.com/unisonweb/unison.git - --- `cabal install -foptimized` enables optimizations -flag optimized - manual: True - default: False - -flag quiet - manual: True - default: False - --- NOTE: Keep in sync throughout repo. -common unison-common - default-language: Haskell2010 - default-extensions: - ApplicativeDo, - BlockArguments, - DeriveFunctor, - DerivingStrategies, - DoAndIfThenElse, - FlexibleContexts, - FlexibleInstances, - LambdaCase, - MultiParamTypeClasses, - ScopedTypeVariables, - TupleSections, - TypeApplications - -library - import: unison-common - - hs-source-dirs: src - - exposed-modules: - Unison.ABT - Unison.ABT.Normalized - Unison.Blank - Unison.ConstructorType - Unison.DataDeclaration - Unison.Hash - Unison.HashQualified - Unison.HashQualified' - Unison.Hashable - Unison.Kind - Unison.LabeledDependency - Unison.Name - Unison.Names2 - Unison.Names3 - Unison.NameSegment - Unison.Paths - Unison.Pattern - Unison.PatternCompat - Unison.Prelude - Unison.Reference - Unison.Reference.Util - Unison.Referent - Unison.Settings - Unison.ShortHash - Unison.Symbol - Unison.Term - Unison.Type - Unison.Util.Components - Unison.Util.List - Unison.Util.Monoid - Unison.Util.Relation - Unison.Util.Relation3 - Unison.Util.Relation4 - Unison.Util.Set - Unison.Var - - build-depends: - base, - bytestring, - containers, - cryptonite, - either, - extra, - lens, - prelude-extras, - memory, - mtl, - rfc5051, - safe, - sandi, - text, - transformers, - util, - vector - - ghc-options: -Wall -O0 -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures - - if flag(optimized) - ghc-options: -funbox-strict-fields - - if flag(quiet) - ghc-options: -v0 diff --git a/unison-src/Base.u b/unison-src/Base.u deleted file mode 100644 index 2236899e64..0000000000 --- a/unison-src/Base.u +++ /dev/null @@ -1,444 +0,0 @@ -namespace Nat where - maxNat = 18446744073709551615 - - (-) : Nat -> Nat -> Int - (-) = Nat.sub - -namespace Int where - maxInt = +9223372036854775807 - minInt = -9223372036854775808 - -use Universal == < > >= -use Optional None Some - --- Function composition -dot : (b -> c) -> (a -> b) -> a -> c -dot f g x = f (g x) - --- Function composition -andThen : (a -> b) -> (b -> c) -> a -> c -andThen f g x = g (f x) - -const : a -> b -> a -const a _ = a - -use Tuple Cons - -namespace Tuple where - at1 : Tuple a b -> a - at1 = cases Cons a _ -> a - - at2 : Tuple a (Tuple b c) -> b - at2 = cases Cons _ (Cons b _) -> b - - at3 : Tuple a (Tuple b (Tuple c d)) -> c - at3 = cases Cons _ (Cons _ (Cons c _)) -> c - - at4 : Tuple a (Tuple b (Tuple c (Tuple d e))) -> d - at4 = cases Cons _ (Cons _ (Cons _ (Cons d _))) -> d - -namespace List where - - map : (a -> b) -> [a] -> [b] - map f a = - go i as acc = match List.at i as with - None -> acc - Some a -> go (i + 1) as (acc `snoc` f a) - go 0 a [] - - zip : [a] -> [b] -> [(a,b)] - zip as bs = - go acc i = match (at i as, at i bs) with - (None,_) -> acc - (_,None) -> acc - (Some a, Some b) -> go (acc `snoc` (a,b)) (i + 1) - go [] 0 - - insert : Nat -> a -> [a] -> [a] - insert i a as = take i as ++ [a] ++ drop i as - - replace : Nat -> a -> [a] -> [a] - replace i a as = take i as ++ [a] ++ drop (i + 1) as - - slice : Nat -> Nat -> [a] -> [a] - slice start stopExclusive s = - take (stopExclusive `Nat.drop` start) (drop start s) - - unsafeAt : Nat -> [a] -> a - unsafeAt n as = match at n as with - Some a -> a - None -> Debug.watch "oh noes" (unsafeAt n as) -- Debug.crash "oh noes!" - - foldl : (b -> a -> b) -> b -> [a] -> b - foldl f b as = - go b i = match List.at i as with - None -> b - Some a -> go (f b a) (i + 1) - go b 0 - - foldb : (a -> b) -> (b -> b -> b) -> b -> [a] -> b - foldb f op z as = - if List.size as == 0 then z - else if List.size as == 1 then f (unsafeAt 0 as) - else match halve as with (left, right) -> - foldb f op z left `op` foldb f op z right - - reverse : [a] -> [a] - reverse as = foldl (acc a -> List.cons a acc) [] as - - indexed : [a] -> [(a, Nat)] - indexed as = as `zip` range 0 (size as) - - sortBy : (a -> b) -> [a] -> [a] - sortBy f as = - tweak p = match p with (p1,p2) -> (f p1, p2, p1) - Heap.sort (map tweak (indexed as)) |> map Tuple.at3 - - halve : [a] -> ([a], [a]) - halve s = - n = size s / 2 - (take n s, drop n s) - - unfold : s -> (s -> Optional (a, s)) -> [a] - unfold s0 f = - go f s acc = match f s with - None -> acc - Some (a, s) -> go f s (acc `snoc` a) - go f s0 [] - - uncons : [a] -> Optional (a, [a]) - uncons as = match at 0 as with - None -> None - Some a -> Some (a, drop 1 as) - - unsnoc : [a] -> Optional ([a], a) - unsnoc as = - i = size (drop 1 as) - match at i as with - None -> None - Some a -> Some (take i as, a) - - join : [[a]] -> [a] - join = foldl (++) [] - - flatMap : (a -> [b]) -> [a] -> [b] - flatMap f as = join (map f as) - - range : Nat -> Nat -> [Nat] - range start stopExclusive = - f i = if i < stopExclusive then Some (i, i + 1) else None - unfold start f - - distinct : [a] -> [a] - distinct as = - go i seen acc = match List.at i as with - None -> acc - Some a -> if Set.contains a seen then go (i + 1) seen acc - else go (i + 1) (Set.insert a seen) (acc `snoc` a) - go 0 Set.empty [] - - -- Joins a list of lists in a "fair diagonal" fashion. - -- Adapted from the Haskell version written by Luke Palmer. - diagonal : [[a]] -> [a] - diagonal = - let - x = 23 - stripe = cases - [] -> [] - [] +: xxs -> stripe xxs - (x +: xs) +: xxs -> cons [x] (zipCons xs (stripe xxs)) - zipCons xs ys = match (xs, ys) with - ([], ys) -> ys - (xs, []) -> map (x -> [x]) xs - (x +: xs, y +: ys) -> cons (cons x y) (zipCons xs ys) - List.join `dot` stripe - --- > List.foldb "" (t t2 -> "(" ++ t ++ " " ++ t2 ++ ")") (x -> x) ["Alice", "Bob", "Carol", "Dave", "Eve", "Frank", "Gerald", "Henry"] - --- Sorted maps, represented as a pair of sequences --- Use binary search to do lookups and find insertion points --- This relies on the underlying sequence having efficient --- slicing and concatenation -type Map k v = Map [k] [v] - -use Map Map - -namespace Search where - - indexOf : a -> [a] -> Optional Nat - indexOf a s = - ao = Some a - Search.exact (i -> ao `compare` List.at i s) 0 (size s) - - lubIndexOf' : a -> Nat -> [a] -> Nat - lubIndexOf' a start s = - ao = Some a - Search.lub (i -> ao `compare` List.at i s) start (size s) - - lubIndexOf : a -> [a] -> Nat - lubIndexOf a s = lubIndexOf' a 0 s - - lub : (Nat -> Int) -> Nat -> Nat -> Nat - lub hit bot top = - if bot >= top then top - else - mid = (bot + top) / 2 - match hit mid with - +0 -> mid - -1 -> lub hit bot mid - +1 -> lub hit (mid + 1) top - - exact : (Nat -> Int) -> Nat -> Nat -> Optional Nat - exact hit bot top = - if bot >= top then None - else - mid = (bot + top) / 2 - match hit mid with - +0 -> Some mid - -1 -> exact hit bot mid - +1 -> exact hit (mid + 1) top - --- > ex = [0,2,4,6,77,192,3838,12000] --- > List.map (e -> indexOf e ex) ex --- > lubIndexOf 193 ex - - -(|>) : a -> (a -> b) -> b -a |> f = f a - -(<|) : (a -> b) -> a -> b -f <| a = f a - -id : a -> a -id a = a - -namespace Map where - - empty : Map k v - empty = Map [] [] - - singleton : k -> v -> Map k v - singleton k v = Map [k] [v] - - fromList : [(k,v)] -> Map k v - fromList kvs = - go acc i = match List.at i kvs with - None -> acc - Some (k,v) -> go (insert k v acc) (i + 1) - go empty 0 - - toList : Map k v -> [(k,v)] - toList m = List.zip (keys m) (values m) - - size : Map k v -> Nat - size s = List.size (keys s) - - lookup : k -> Map k v -> Optional v - lookup k = cases - Map ks vs -> match Search.indexOf k ks with - None -> None - Some i -> at i vs - - contains : k -> Map k v -> Boolean - contains k cases Map ks _ -> match Search.indexOf k ks with - None -> false - _ -> true - - insert : k -> v -> Map k v -> Map k v - insert k v = cases Map ks vs -> - use Search lubIndexOf - i = lubIndexOf k ks - match at i ks with - Some k' -> - if k == k' then Map ks (List.replace i v vs) - else Map (List.insert i k ks) (List.insert i v vs) - None -> Map (ks `snoc` k) (vs `snoc` v) - - map : (v -> v2) -> Map k v -> Map k v2 - map f m = Map (keys m) (List.map f (values m)) - - mapKeys : (k -> k2) -> Map k v -> Map k2 v - mapKeys f m = Map (List.map f (keys m)) (values m) - - union : Map k v -> Map k v -> Map k v - union = unionWith (_ v -> v) - - unionWith : (v -> v -> v) -> Map k v -> Map k v -> Map k v - unionWith f m1 m2 = match (m1, m2) with (Map k1 v1, Map k2 v2) -> - go i j ko vo = match (at i k1, at j k2) with - (None, _) -> Map (ko ++ drop j k2) (vo ++ drop j v2) - (_, None) -> Map (ko ++ drop i k1) (vo ++ drop i v1) - (Some kx, Some ky) -> - use List slice unsafeAt - use Search lubIndexOf' - if kx == ky then - go (i + 1) (j + 1) - (ko `snoc` kx) - (vo `snoc` f (unsafeAt i v1) (unsafeAt j v2)) - else if kx < ky then - i' = lubIndexOf' ky i k1 - go i' j (ko ++ slice i i' k1) (vo ++ slice i i' v1) - else - j' = lubIndexOf' kx j k2 - go i j' (ko ++ slice j j' k2) (vo ++ slice j j' v2) - go 0 0 [] [] - - intersect : Map k v -> Map k v -> Map k v - intersect = intersectWith (_ v -> v) - - intersectWith : (v -> v -> v2) -> Map k v -> Map k v -> Map k v2 - intersectWith f m1 m2 = match (m1, m2) with (Map k1 v1, Map k2 v2) -> - go i j ko vo = match (at i k1, at j k2) with - (None, _) -> Map ko vo - (_, None) -> Map ko vo - (Some kx, Some ky) -> - if kx == ky then - go (i + 1) (j + 1) - (ko `snoc` kx) - (vo `snoc` f (List.unsafeAt i v1) (List.unsafeAt j v2)) - else if kx < ky then - i' = Search.lubIndexOf' ky i k1 - go i' j ko vo - else - j' = Search.lubIndexOf' kx j k2 - go i j' ko vo - go 0 0 [] [] - - keys : Map k v -> [k] - keys = cases Map ks _ -> ks - - values : Map k v -> [v] - values = cases Map _ vs -> vs - -namespace Multimap where - - insert : k -> v -> Map k [v] -> Map k [v] - insert k v m = match Map.lookup k m with - None -> Map.insert k [v] m - Some vs -> Map.insert k (vs `snoc` v) m - - lookup : k -> Map k [v] -> [v] - lookup k m = Optional.orDefault [] (Map.lookup k m) - -type Set a = Set (Map a ()) -use Set Set - -namespace Set where - - empty : Set k - empty = Set Map.empty - - underlying : Set k -> Map k () - underlying = cases Set s -> s - - toMap : (k -> v) -> Set k -> Map k v - toMap f = cases Set (Map ks vs) -> Map ks (List.map f ks) - - fromList : [k] -> Set k - fromList ks = Set (Map.fromList (List.map (k -> (k,())) ks)) - - toList : Set k -> [k] - toList = cases Set (Map ks _) -> ks - - contains : k -> Set k -> Boolean - contains k = cases Set m -> Map.contains k m - - insert : k -> Set k -> Set k - insert k = cases Set s -> Set (Map.insert k () s) - - union : Set k -> Set k -> Set k - union s1 s2 = Set (Map.union (underlying s1) (underlying s2)) - - size : Set k -> Nat - size s = Map.size (underlying s) - - intersect : Set k -> Set k -> Set k - intersect s1 s2 = Set (Map.intersect (underlying s1) (underlying s2)) - -type Heap k v = Heap Nat k v [Heap k v] -use Heap Heap - -namespace Heap where - - singleton : k -> v -> Heap k v - singleton k v = Heap 1 k v [] - - size : Heap k v -> Nat - size = cases Heap n _ _ _ -> n - - union : Heap k v -> Heap k v -> Heap k v - union h1 h2 = match (h1, h2) with - (Heap n k1 v1 hs1, Heap m k2 v2 hs2) -> - if k1 >= k2 then Heap (n + m) k1 v1 (cons h2 hs1) - else Heap (n + m) k2 v2 (cons h1 hs2) - - pop : Heap k v -> Optional (Heap k v) - pop h = - go h subs = - use List drop size unsafeAt - if size subs == 0 then h - else if size subs == 1 then h `union` unsafeAt 0 subs - else union h (unsafeAt 0 subs) `union` go (unsafeAt 1 subs) (drop 2 subs) - match List.uncons (children h) with - None -> None - Some (s0, subs) -> Some (go s0 subs) - - children : Heap k v -> [Heap k v] - children = cases Heap _ _ _ cs -> cs - - max : Heap k v -> (k, v) - max = cases Heap _ k v _ -> (k, v) - - maxKey : Heap k v -> k - maxKey = cases Heap _ k _ _ -> k - - fromList : [(k,v)] -> Optional (Heap k v) - fromList kvs = - op a b = match a with - None -> b - Some a -> match b with - None -> Some a - Some b -> Some (union a b) - single = cases - (k, v) -> Some (singleton k v) - List.foldb single op None kvs - - fromKeys : [a] -> Optional (Heap a a) - fromKeys as = fromList (List.map (a -> (a,a)) as) - - sortDescending : [a] -> [a] - sortDescending as = - step = cases - None -> None - Some h -> Some (max h, pop h) - List.unfold (fromKeys as) step |> List.map Tuple.at1 - - sort : [a] -> [a] - sort as = sortDescending as |> List.reverse - --- > sort [11,9,8,4,5,6,7,3,2,10,1] - -namespace Optional where - - map : (a -> b) -> Optional a -> Optional b - map f = cases - None -> None - Some a -> Some (f a) - - orDefault : a -> Optional a -> a - orDefault a = cases - None -> a - Some a -> a - - orElse : Optional a -> Optional a -> Optional a - orElse a b = match a with - None -> b - Some _ -> a - - flatMap : (a -> Optional b) -> Optional a -> Optional b - flatMap f = cases - None -> None - Some a -> f a - - map2 : (a -> b -> c) -> Optional a -> Optional b -> Optional c - map2 f oa ob = flatMap (a -> map (f a) ob) oa diff --git a/unison-src/Cofree.u b/unison-src/Cofree.u deleted file mode 100644 index c697feb1cb..0000000000 --- a/unison-src/Cofree.u +++ /dev/null @@ -1,20 +0,0 @@ -type Cofree f a = Cofree a (f (Cofree f a)) - -type Functor f = Functor (forall a b. (a ->{} b) -> f a ->{} f b) - -use Functor Functor -fmap : Functor f -> (a -> b) -> f a -> f b -fmap fn f = match fn with - Functor map -> map f - -use Cofree Cofree - -namespace Cofree where - - extract : Cofree f a -> a - extract = cases - Cofree a _ -> a - - duplicate : Functor f -> Cofree f a -> Cofree f (Cofree f a) - duplicate f c = match c with - Cofree a p -> Cofree c (fmap f (duplicate f) p) diff --git a/unison-src/EasyTest.u b/unison-src/EasyTest.u deleted file mode 100644 index 4ebef149b5..0000000000 --- a/unison-src/EasyTest.u +++ /dev/null @@ -1,263 +0,0 @@ -use Test Success Status Report Test Scope -use Test.Status Failed Expected Unexpected Pending -use Test.Success Passed Proved -use Test.Report Report -use Test.Test Test -use Test passed proved failed expected unexpected pending finished label -use Test.Scope Scope -use List flatMap - -type Test.Success = Passed Nat | Proved - -type Test.Status = Failed - | Expected Test.Success - | Unexpected Test.Success - | Pending - --- Current scope together with accumulated test report. -type Test.Report = Report (Trie Text Test.Status) - -type Test.Test = Test (Test.Scope -> Test.Report) - -unique type Test.Scope = Scope [Text] - -foldSuccess : (Nat -> r) -> r -> Success -> r -foldSuccess passed proved = cases - Passed n -> passed n - Proved -> proved - -foldStatus : r -> (Success -> r) -> (Success -> r) -> r -> Status -> r -foldStatus failed expected unexpected pending = cases - Failed -> failed - Expected s -> expected s - Unexpected s -> unexpected s - Pending -> pending - -foldReport : (Trie Text Test.Status -> r) -> Report -> r -foldReport k r = case r of Report t -> k t - -foldScope : ([Text] -> r) -> Scope -> r -foldScope k = cases Scope ss -> k ss - -Scope.cons : Text -> Scope -> Scope -Scope.cons n = foldScope (Scope . List.cons n) - --- Basic building blocks of tests -Test.finished : Status -> Test -Test.finished st = - Test (Report . foldScope (sc -> Trie.singleton sc st)) - -Test.failed : Test -Test.failed = finished Failed - -Test.proved : Test -Test.proved = finished <| Expected Proved - -Test.passed : Test -Test.passed = finished . Expected <| Passed 1 - -Test.passedUnexpectedly : Test -Test.passedUnexpectedly = finished . Unexpected <| Passed 1 - -Test.provedUnexpectedly : Test -Test.provedUnexpectedly = finished <| Unexpected Proved - --- Basic test combinators - -Test.modifyStatus : (Status -> Status) -> Test -> Test -Test.modifyStatus f = - cases Test k -> Test (foldReport (Report . map f) . k) - -Test.label : Text -> Test -> Test -Test.label n = cases - Test.Test.Test k -> Test (scope -> k <| Scope.cons n scope) - -use Test.Report combine - -(Test.&&) : Test -> Test -> Test -(Test.&&) a b = match (a,b) with - (Test k1, Test k2) -> - Test ( - scope -> - let r1 = k1 scope - r2 = k2 scope - combine r1 r2) - -Test.passedWith : Text -> Test -Test.passedWith m = label m passed - -Test.provedWith : Text -> Test -Test.provedWith m = label m proved - -Test.failedWith : Text -> Test -Test.failedWith m = Test.label m Test.failed - --- Report combinators - -Test.Report.combine : Report -> Report -> Report -Test.Report.combine r1 r2 = match (r1, r2) with - (Test.Report.Report t1, Test.Report.Report t2) -> - Report <| Trie.unionWith Status.combine t1 t2 - -Test.Report.empty : Report -Test.Report.empty = Report empty - -Test.Report.toCLIResult : Report -> [Test.Result] -Test.Report.toCLIResult r = - descend scope = cases (k, t) -> - go ((if scope != "" then (scope ++ ".") else "") ++ k) t - convert : Text -> Test.Status -> Test.Result - convert scope = cases - Test.Status.Failed -> Test.Result.Fail scope - Test.Status.Expected (Test.Success.Passed n) -> - Test.Result.Ok (scope ++ " : Passed " ++ Nat.toText n ++ " tests.") - Test.Status.Expected (Test.Success.Proved) -> - Test.Result.Ok (scope ++ " : Proved.") - go : Text -> Trie Text Test.Status -> [Test.Result] - go scope t = - rest = flatMap (descend scope) (Map.toList (tail t)) - match head t with - Optional.Some status -> - cons (convert scope status) rest - Optional.None -> rest - match r with Test.Report.Report t -> go "" t - -Test.report : Test -> Report -Test.report = cases Test k -> k (Scope []) - --- Running tests - -Test.run : Test -> [Test.Result] -Test.run = Test.Report.toCLIResult . Test.report - -Test.runAll : [Test] -> [Test.Result] -Test.runAll = flatMap Test.run - --- Status combinators - -Status.combine : Test.Status -> Test.Status -> Test.Status -Status.combine s1 s2 = match (s1, s2) with - (_, Pending) -> Pending - (Pending, _) -> Pending - (Failed, _) -> Failed - (_, Failed) -> Failed - (Unexpected a, Unexpected b) -> Unexpected (Success.combine a b) - (Unexpected a, _) -> Unexpected a - (_, Unexpected b) -> Unexpected b - (Expected a, Expected b) -> Expected (Success.combine a b) - - -Status.pending : Test.Status -> Test.Status -Status.pending = cases - Failed -> Pending - Expected s -> Unexpected s - Unexpected s -> Pending - Pending -> Pending - --- Make a test pending -Test.pending : Test -> Test -Test.pending = modifyStatus Status.pending - -Test.modifyScope : (Scope -> Scope) -> Test -> Test -Test.modifyScope f = cases Test k -> Test (k . f) - -Success.combine s1 s2 = match (s1, s2) with - (Passed n, Passed m) -> Passed (n + m) - (Passed n, Proved) -> Passed n - (Proved, Passed n) -> Passed n - (Proved, Proved) -> Proved - --- Test case generation - --- A domain is either small, in which case we can exhaustively list all the --- values in the domain, or it's large, in which case we can ask for a value --- of a particular size. -type Domain a = Small [a] | Large (Weighted a) - --- The domain of natural numbers is large. -Domain.nats : Domain Nat -Domain.nats = Large Weighted.nats - --- The domain of all integers -Domain.ints : Domain Int -Domain.ints = let - go n = yield n <|> weight 1 - '(go (if n > +0 then negate n else increment (negate n))) - Large (List.foldl (a n -> a <|> yield n) - Weighted.Fail - [+0, +1, -1, maxInt, minInt] <|> go +2) - -use Universal == < > - -namespace Domain where - - -- The threshold of "small" domains. - smallSize = 10000 - - -- The Boolean domain is small - boolean : Domain Boolean - boolean = Small [false, true] - - -- The domain of lists of arbitrary data is large - listsOf : Domain a -> Domain [a] - listsOf d = - Large (Weighted.lists match d with - Domain.Small as -> Weighted.fromList as - Domain.Large w -> w) - - lists : Domain [()] - lists = Domain.listsOf (Small [()]) - - sample : Nat -> Domain a -> [a] - sample n = cases - Domain.Large w -> Weighted.sample n w - Domain.Small xs -> take n xs - - map : (a -> b) -> Domain a -> Domain b - map f = cases - Domain.Large w -> Domain.Large (Weighted.map f w) - Domain.Small as -> Domain.Small (List.map f as) - - pairs : Domain a -> Domain (a,a) - pairs d = lift2 (a b -> (a,b)) d d - - tuples : Domain a -> Domain b -> Domain (Pair a b) - tuples = lift2 (a b -> Pair a b) - - lift2 : (a -> b -> c) -> Domain a -> Domain b -> Domain c - lift2 f da db = let - wa = weighted da - wb = weighted db - wc = mergeWith (a1 a2 -> f a1 a2) wa wb - match (da, db) with - (Domain.Small as, Domain.Small bs) | size as + size bs < smallSize -> - Small (Weighted.sample smallSize wc) - _ -> Large wc - - weighted : Domain a -> Weighted a - weighted = cases - Domain.Small as -> Weighted.fromList as - Domain.Large w -> w - --- Test a property for a given domain up to a maximum size -Test.forAll' : Nat -> Domain a -> (a -> Boolean) -> Test -Test.forAll' maxSize domain property = - check xs s = - List.map ( - cases (c, i) -> - if property c then finished (Expected s) - else label ("test case " ++ Nat.toText i) (finished Failed) - ) (indexed xs) - List.foldb id (Test.&&) proved <| - match domain with - Domain.Small xs -> check (take maxSize xs) Proved - Domain.Large _ -> check (sample maxSize domain) (Passed 1) - -Test.check' : Boolean -> Test -Test.check' b = if b then Test.proved else Test.failed - -Test.forAll : Nat -> Domain a -> (a -> Boolean) -> [Test.Result] -Test.forAll n d p = Test.run (Test.forAll' n d p) - -Test.check : Boolean -> [Test.Result] -Test.check = Test.run . Test.check' diff --git a/unison-src/Trie.u b/unison-src/Trie.u deleted file mode 100644 index 9a54522e18..0000000000 --- a/unison-src/Trie.u +++ /dev/null @@ -1,39 +0,0 @@ -type Trie k v = { head : Optional v, tail : Map k (Trie k v) } - -namespace Trie where - empty : Trie k v - empty = Trie None Map.empty - - lookup : [k] -> Trie k v -> Optional v - lookup path t = match path with - [] -> Trie.head t - p +: ps -> flatMap (lookup ps) (Map.lookup p (Trie.tail t)) - - unionWith : (v -> v -> v) -> Trie k v -> Trie k v -> Trie k v - unionWith f t1 t2 = - h1 = Trie.head t1 - h2 = Trie.head t2 - Trie (map2 f h1 h2 `orElse` h1 `orElse` h2) - (Map.unionWith (unionWith f) (Trie.tail t1) (Trie.tail t2)) - -Trie.union : Trie k v -> Trie k v -> Trie k v -Trie.union = Trie.unionWith const - -Trie.insert : [k] -> v -> Trie k v -> Trie k v -Trie.insert path v t = - Trie.unionWith const (Trie.singleton path v) t - -Trie.singleton : [k] -> v -> Trie k v -Trie.singleton path v = - match path with - [] -> Trie (Some v) empty - k +: ks -> Trie None (Map.fromList [(k, Trie.singleton ks v)]) - -use Trie tail head - -Trie.map : (v1 -> v2) -> Trie k v1 -> Trie k v2 -Trie.map f t = Trie (map f (head t)) (map (Trie.map f) (tail t)) - -Trie.mapKeys : (k1 -> k2) -> Trie k1 v -> Trie k2 v -Trie.mapKeys f t = - Trie (head t) (Map.mapKeys f (Map.map (Trie.mapKeys f) (tail t))) diff --git a/unison-src/WeightedSearch.u b/unison-src/WeightedSearch.u deleted file mode 100644 index 789425191f..0000000000 --- a/unison-src/WeightedSearch.u +++ /dev/null @@ -1,69 +0,0 @@ --- A data structure that allows giving computations weight such that the --- lowest-cost computation will be returned first. Allows searching --- infinite spaces productively. --- --- Adapted from http://hackage.haskell.org/package/weighted-search-0.1.0.1 -use Universal == < > - -type Weighted a - = Fail - | Yield a (Weighted a) - | Weight Nat (() -> Weighted a) - -namespace Weighted where - - weight : Nat ->{e} (() ->{e} Weighted a) ->{e} Weighted a - weight w ws = Weight w ws - - map : (a ->{e} b) -> Weighted a ->{e} Weighted b - map f = cases - Weighted.Fail -> Weighted.Fail - Weighted.Yield x w -> Yield (f x) (map f w) - Weighted.Weight a w -> weight a '(map f !w) - - yield : a -> Weighted a - yield a = Yield a Fail - - flatMap : (a -> Weighted b) -> Weighted a -> Weighted b - flatMap f = cases - Weighted.Fail -> Weighted.Fail - Weighted.Yield x m -> f x <|> flatMap f m - Weighted.Weight w m -> Weight w '(flatMap f !m) - - mergeWith : (a -> b -> c) -> Weighted a -> Weighted b -> Weighted c - mergeWith f as bs = - flatMap (a -> map (b -> f a b) bs) as - - (<|>): Weighted a -> Weighted a -> Weighted a - (<|>) m n = match (m, n) with - (Weighted.Fail, n) -> n - (Weighted.Yield x m, n) -> Yield x (m <|> n) - (Weighted.Weight w m, Weighted.Fail) -> Weight w m - (Weighted.Weight w m, Weighted.Yield x n) -> - Yield x (Weight w m <|> n) - (Weighted.Weight w m, Weighted.Weight w' n) -> - if w < w' then Weight w '(!m <|> Weight (w' `drop` w) n) - else if w == w' then Weight w '(!m <|> !n) - else Weight w '(Weight (w `drop` w') m <|> !n) - - sample : Nat -> Weighted a -> [a] - sample n wsa = - if n > 0 then - match wsa with - Weighted.Fail -> [] - Weighted.Yield a ms -> cons a (sample (n `drop` 1) ms) - Weighted.Weight _ w -> sample n !w - else [] - - nats : Weighted Nat - nats = let - go n = yield n <|> weight 1 '(go (n + 1)) - go 0 - - lists : Weighted a -> Weighted [a] - lists w = yield [] <|> weight 1 '(mergeWith cons w (lists w)) - - fromList : [a] -> Weighted a - fromList = cases - [] -> Weighted.Fail - a +: as -> yield a <|> weight 1 '(fromList as) diff --git a/unison-src/base58.u b/unison-src/base58.u deleted file mode 100644 index a14d6eccce..0000000000 --- a/unison-src/base58.u +++ /dev/null @@ -1,60 +0,0 @@ --- TODO: Characters --- TODO: Bytes - -type Optional a = Some a | None - -type Words = Words (List Nat) -type Integer = Integer - -Integer.zero : Integer -Integer.zero = _ - -shiftLeft : Nat -> Integer -> Integer -shiftLeft x y = _ - -(+) : Integer -> Integer -> Integer -(+) x y = _ - -unfoldRight : ∀ a b . (a -> Optional (a, b)) -> a -> List b -unfoldRight f z = _ - -foldLeft : ∀ a b . a -> (a -> b -> a) -> List b -> a -foldLeft z f s = _ - -toInteger : Nat -> Integer -toInteger x = _ - -bigEndian : Words -> Integer -bigEndian = cases - Words.Words s -> - foldLeft Integer.zero (acc w -> shiftLeft 8 acc + toInteger w) s - --- TODO: Need some conversions between integers and machine integers -divmod : Integer -> Nat -> (Integer, Nat) -divmod x y = _ - -(|>) : ∀ a b c . (a -> b) -> (b -> c) -> a -> c -(|>) g f x = f (g x) - -(==) : Integer -> Nat -> Boolean -(==) a b = _ - -charAt : Nat -> Text -> Text -charAt n = Text.drop n |> Text.take 1 - -codeString : Text -codeString = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" - -base58Encode : Words -> Text -base58Encode ws = - x = bigEndian ws - base58 : Integer -> Optional (Integer, Text) - base58 a = - if a == 0 - then Optional.None - else match divmod a 58 with - (d, m) -> Optional.Some (d, charAt m codeString) - foldLeft "" Text.concatenate (unfoldRight base58 x) - -base58Decode : Text -> Words -base58Decode txt = _ diff --git a/unison-src/basics.u b/unison-src/basics.u deleted file mode 100644 index 073da21f27..0000000000 --- a/unison-src/basics.u +++ /dev/null @@ -1,72 +0,0 @@ - --- Unison is a statically typed functional language - -increment : Nat -> Nat -- signature is optional -increment n = n + 1 - --- Lines starting with `>` are evaluated and printed on every file save. -> increment 99 - -replicate : Nat -> a -> [a] -replicate n a = toSequence (take n (constant a)) - --- this is nice for quick testing! - -> replicate 3 "bye" - --- can ask Unison for the type of any expression just by adding `?` to the end of it - --- > (replicate 4)? - --- here's a more interesting example, mergesort - - --- First we define the merge function, it merges two sorted lists -merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] -merge lte a b = - use Sequence ++ - use Optional None Some - go acc a b = match at 0 a with - None -> acc ++ b - Some hd1 -> match at 0 b with - None -> acc ++ a - Some hd2 -> - if hd1 `lte` hd2 then go (acc `snoc` hd1) (drop 1 a) b - else go (acc `snoc` hd2) a (drop 1 b) - go [] a b - --- let's make sure it works -> merge (<) [1,3,4,99,504,799] [0,19,22,23] - --- looks good, now let's write mergesort - -sort : (a -> a -> Boolean) -> [a] -> [a] -sort lte a = - if Sequence.size a < 2 then a - else - l = sort lte (take (size a / 2) a) - r = sort lte (drop (size a / 2) a) - merge lte l r - --- let's make sure it works - -> sort (<) [3,2,1,1,2,3,9182,1,2,34,1,80] - -> sort (<) ["Dave", "Carol", "Eve", "Alice", "Bob", "Francis", "Hal", "Illy", "Joanna", "Greg", "Karen"] - --- SHIP IT!! 🚢 - --- If you make a mistake, we try to have nice friendly error messages, not: --- 🤖 ERROR DETECTED ⚡️ BEEP BOOP ⚡️ PLS RESUBMIT PROGRAM TO MAINFRAME - --- a few examples of failing programs - - ---err1 = --- a = "3" --- sort (<) [1,2,a] - --- err1a = sort (<) "not a list" - ---err2 : x -> y -> x ---err2 thing1 thing2 = --- if true then thing1 --- else thing2 diff --git a/unison-src/demo/1.u b/unison-src/demo/1.u deleted file mode 100644 index 02ccb456de..0000000000 --- a/unison-src/demo/1.u +++ /dev/null @@ -1,6 +0,0 @@ -increment : Nat -> Nat -increment n = n + 1 - -> x = 1 + 40 -> increment x - diff --git a/unison-src/demo/2.u b/unison-src/demo/2.u deleted file mode 100644 index 530ea9ade4..0000000000 --- a/unison-src/demo/2.u +++ /dev/null @@ -1,46 +0,0 @@ -use Optional None Some -use Universal < - -uncons : [a] -> Optional (a, [a]) -uncons as = match at 0 as with - None -> None - Some hd -> Some (hd, drop 1 as) - -halve : [a] -> ([a], [a]) -halve s = splitAt (size s / 2) s - -splitAt : Nat -> [a] -> ([a], [a]) -splitAt n as = (take n as, drop n as) - -merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] -merge lte a b = - use List ++ - go out a b = match (uncons a, uncons b) with - (None,_) -> out ++ b - (_,None) -> out ++ a - (Some (hA, tA), Some (hB, tB)) -> - if hA `lte` hB then go (out `snoc` hA) tA b - else go (out `snoc` hB) a tB - go [] a b - -sort : (a -> a -> Boolean) -> [a] -> [a] -sort lte as = - if size as < 2 then as - else match halve as with (left, right) -> - l = sort lte left - r = sort lte right - merge lte l r - --- let's make sure it works - -> uncons [1, 2, 3] - -> sort (<) [3,2,1,1,2,3,9182,1,2,34,1,23] - -> sort (<) ["Dave", "Carol", "Eve", "Alice", "Bob", "Francis", "Hal", "Illy", "Joanna", "Greg", "Karen"] - --- these programs have some type errors - --- > sort (<) [3,2,1,1,2,3,9182,1,2,34,1,"oops"] - --- > merge (<) [1,4,5,90,102] ["a", "b"] diff --git a/unison-src/demo/3.u b/unison-src/demo/3.u deleted file mode 100644 index b630facd7d..0000000000 --- a/unison-src/demo/3.u +++ /dev/null @@ -1,115 +0,0 @@ - -type Future a = Future ('{Remote} a) - --- A simple distributed computation ability -ability Remote where - - -- Spawn a new node - spawn : {Remote} Node - - -- Start evaluating a computation on another node - at : Node -> '{Remote} a ->{Remote} Future a - -type Node = Node Nat -- more realistic would be perhaps a (Hostname, PublicKey) pair - -force : Future a ->{Remote} a -force = cases Future.Future r -> !r - --- Let's test out this beast! do we need to deploy our code to some EC2 instances?? --- Gak, no not yet, we just want to test locally, let's write a handler --- for the `Remote` ability that simulates everything locally! - -use Future Future -use Optional None Some -use Monoid Monoid -use List ++ at -use Universal < - -List.map : (a ->{e} b) -> [a] ->{e} [b] -List.map f as = - go f acc as i = match at i as with - None -> acc - Some a -> go f (acc `snoc` f a) as (i + 1) - go f [] as 0 - -type Monoid a = Monoid (a -> a -> a) a - -Monoid.zero = cases Monoid.Monoid op z -> z -Monoid.op = cases Monoid.Monoid op z -> op - -Monoid.orElse m = cases - None -> Monoid.zero m - Some a -> a - -uncons : [a] -> Optional (a, [a]) -uncons as = match at 0 as with - None -> None - Some hd -> Some (hd, drop 1 as) - -dreduce : Monoid a -> [a] ->{Remote} a -dreduce m a = - if size a < 2 then Monoid.orElse m (List.at 0 a) - else - l = Remote.at Remote.spawn '(dreduce m (take (size a / 2) a)) - r = Remote.at Remote.spawn '(dreduce m (drop (size a / 2) a)) - Monoid.op m (force l) (force r) - -dmapReduce : (a ->{Remote} b) -> Monoid b -> [a] ->{Remote} b -dmapReduce f m as = dreduce m (List.map f as) - -dsort2 : (a -> a -> Boolean) -> [a] ->{Remote} [a] -dsort2 lte as = - dreduce (Monoid (merge lte) []) - (List.map (a -> [a]) as) - -halve : [a] -> ([a], [a]) -halve s = splitAt (size s / 2) s - -splitAt : Nat -> [a] -> ([a], [a]) -splitAt n as = (take n as, drop n as) - -Node.increment : Node -> Node -Node.increment n = - use Node.Node -- the constructor - match n with Node n -> Node (n + 1) - -Remote.runLocal : '{Remote} a -> a -Remote.runLocal r = - step nid = cases - {Remote.spawn -> k} -> handle k nid with step (Node.increment nid) - {Remote.at _ t -> k} -> handle k (Future t) with step nid - {a} -> a -- the pure case - handle !r with step (Node.Node 0) - -merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] -merge lte a b = - go out a b = match (uncons a, uncons b) with - (None,_) -> out ++ b - (_,None) -> out ++ a - (Some (hA, tA), Some (hB, tB)) -> - if hA `lte` hB then go (out `snoc` hA) tA b - else go (out `snoc` hB) a tB - go [] a b - -> merge (<) [1,3,4,99,504,799] [0,19,22,23] - -sort : (a -> a -> Boolean) -> [a] -> [a] -sort lte as = - if size as < 2 then as - else match halve as with (left, right) -> - l = sort lte left - r = sort lte right - merge lte l r - -dsort : (a -> a -> Boolean) -> [a] ->{Remote} [a] -dsort lte as = - use Remote at spawn - if size as < 2 then as - else match halve as with (left, right) -> - r1 = at spawn '(dsort lte left) - r2 = at spawn '(dsort lte right) - merge lte (force r1) (force r2) - -> sort (<) [1,2,3,234,6,2,4,66,2,33,4,2,57] - -> Remote.runLocal '(dsort (<) [1,2,3,234,6,2,4,66,2,33,4,2,57]) diff --git a/unison-src/errors/407.u b/unison-src/errors/407.u deleted file mode 100644 index 4a4705a63a..0000000000 --- a/unison-src/errors/407.u +++ /dev/null @@ -1,8 +0,0 @@ -use Universal < - -foo : x -> x -> Nat -foo x y = 42 - - -> foo "hi" 2 - diff --git a/unison-src/errors/X-array.u b/unison-src/errors/X-array.u deleted file mode 100644 index 6323617195..0000000000 --- a/unison-src/errors/X-array.u +++ /dev/null @@ -1,6 +0,0 @@ -type X = S Text | I Nat - -foo : a -> b -> c -> X -foo x y z = X.S "" - -[foo +1 1 1.0, 1] diff --git a/unison-src/errors/abort-ability-checks-against-pure.u b/unison-src/errors/abort-ability-checks-against-pure.u deleted file mode 100644 index 1d41bf7a1e..0000000000 --- a/unison-src/errors/abort-ability-checks-against-pure.u +++ /dev/null @@ -1,9 +0,0 @@ ---Abort -ability Abort where - Abort : forall a . () -> {Abort} a - -bork = u -> 1 + (Abort.Abort ()) - -(bork : () -> {} Nat) - --- failing to fail in commit 2819c206acf80f926c6d970a4ffd47c961fa4502 diff --git a/unison-src/errors/all-errors.u b/unison-src/errors/all-errors.u deleted file mode 100644 index 91d44a3e79..0000000000 --- a/unison-src/errors/all-errors.u +++ /dev/null @@ -1,43 +0,0 @@ -type Optional a = Some a | None - -ability Abort where - Abort : forall a . () -> {Abort} a - -ability Abort2 where - Abort2 : forall a . () -> {Abort2} a - Abort2' : forall a . () -> {Abort2} a - -app : Optional Int -app = Optional.Some 3 - -app' : Optional Int -app' = 3 - -arrow : Int -> Int -> Int -arrow a = 3 - -ability' : Nat -> { Abort } Int -ability' n = Abort2.Abort2 () - -id : forall a . a -> a -id x = 3 - -f2 : forall a . a -> a -> a -f2 x = x - -const : forall a b . a -> b -> a -const a b = 3 - -y : (Optional Int) -y = 3 - -z' : (Optional Int, Optional Text, Optional Float) -z' = (None, 3) - -z : (Optional Int, Optional Text, Optional Float) -z = 3 - -x : () -x = 3 - -() diff --git a/unison-src/errors/check-for-regressions/and.u b/unison-src/errors/check-for-regressions/and.u deleted file mode 100644 index 07af2a22fc..0000000000 --- a/unison-src/errors/check-for-regressions/and.u +++ /dev/null @@ -1,7 +0,0 @@ -and true 3 - --- InSubtype t1=Nat, t2=Boolean --- InCheck e=3, t=Boolean --- InSynthesizeApp t=Boolean -> Boolean, e=3, n=2 --- InAndApp --- InSynthesize e=and true 3 diff --git a/unison-src/errors/check-for-regressions/app-polymorphic.u b/unison-src/errors/check-for-regressions/app-polymorphic.u deleted file mode 100644 index fa91f0d73a..0000000000 --- a/unison-src/errors/check-for-regressions/app-polymorphic.u +++ /dev/null @@ -1,4 +0,0 @@ -foo : a -> a -> a -> a -> a -> a -foo a b c d e = e - -foo 1 2 3 "ha" 5 diff --git a/unison-src/errors/check-for-regressions/app.u b/unison-src/errors/check-for-regressions/app.u deleted file mode 100644 index 6db180418f..0000000000 --- a/unison-src/errors/check-for-regressions/app.u +++ /dev/null @@ -1,4 +0,0 @@ -foo : Nat -> Nat -> Nat -> Nat -> Nat -> Nat -foo a b c d e = a + b + c + d + e - -foo 1 2 3 "ha" 5 diff --git a/unison-src/errors/check-for-regressions/applying-non-function.u b/unison-src/errors/check-for-regressions/applying-non-function.u deleted file mode 100644 index 8aad6fb95a..0000000000 --- a/unison-src/errors/check-for-regressions/applying-non-function.u +++ /dev/null @@ -1,4 +0,0 @@ --- "Hello" "world" - -id a = a -id 1 1 diff --git a/unison-src/errors/check-for-regressions/casebody.u b/unison-src/errors/check-for-regressions/casebody.u deleted file mode 100644 index 48f06838e4..0000000000 --- a/unison-src/errors/check-for-regressions/casebody.u +++ /dev/null @@ -1,3 +0,0 @@ -match 3 with - 3 -> 4 - 4 -> "Surprise!" diff --git a/unison-src/errors/check-for-regressions/caseguard.u b/unison-src/errors/check-for-regressions/caseguard.u deleted file mode 100644 index 751aa609ae..0000000000 --- a/unison-src/errors/check-for-regressions/caseguard.u +++ /dev/null @@ -1,2 +0,0 @@ -match 3 with - 3 | 3 -> 4 diff --git a/unison-src/errors/check-for-regressions/casepattern.u b/unison-src/errors/check-for-regressions/casepattern.u deleted file mode 100644 index d9f2230872..0000000000 --- a/unison-src/errors/check-for-regressions/casepattern.u +++ /dev/null @@ -1,3 +0,0 @@ -match 3 with - 3 -> "Great!" - "Great" -> "Terrible." diff --git a/unison-src/errors/check-for-regressions/ifcond.u b/unison-src/errors/check-for-regressions/ifcond.u deleted file mode 100644 index 4db94c8e04..0000000000 --- a/unison-src/errors/check-for-regressions/ifcond.u +++ /dev/null @@ -1 +0,0 @@ -if 3 then 4 else 5 diff --git a/unison-src/errors/check-for-regressions/ifelse.u b/unison-src/errors/check-for-regressions/ifelse.u deleted file mode 100644 index c9907d3b47..0000000000 --- a/unison-src/errors/check-for-regressions/ifelse.u +++ /dev/null @@ -1 +0,0 @@ -if true then 4 else "Surprise!" diff --git a/unison-src/errors/check-for-regressions/lens.u b/unison-src/errors/check-for-regressions/lens.u deleted file mode 100644 index 9a4e4b1cd0..0000000000 --- a/unison-src/errors/check-for-regressions/lens.u +++ /dev/null @@ -1,9 +0,0 @@ -type Foo a b = Foo a b -use Foo Foo -use Optional Some -setA : Foo a b -> Optional a -> Foo a b -setA foo a = match (foo, a) with - (Foo _ b, Some a) -> Foo a b - _ -> foo - -setA (Foo "hello" 3) (Some 7) diff --git a/unison-src/errors/check-for-regressions/not-and.u b/unison-src/errors/check-for-regressions/not-and.u deleted file mode 100644 index 0563d88621..0000000000 --- a/unison-src/errors/check-for-regressions/not-and.u +++ /dev/null @@ -1,14 +0,0 @@ -notid : Int -> Boolean -notid a = true -and (notid 3) true - --- InSubtype t1=Nat, t2=Int --- InCheck e=3, t=Int --- InSynthesizeApp t=Int -> Boolean, e=3, n=1 --- InSynthesizeApps f=notid1 ft=Int -> Boolean, es=[3] --- InSynthesize e=notid1 3 --- InCheck e=notid1 3, t=Boolean --- InSynthesizeApp t=Boolean -> Boolean -> Boolean, e=notid1 3, n=1 --- InAndApp --- InSynthesize e=and notid1 3 true --- InSynthesize e=Cycle (notid. (let r... diff --git a/unison-src/errors/check-for-regressions/not-and0.u b/unison-src/errors/check-for-regressions/not-and0.u deleted file mode 100644 index e57717510d..0000000000 --- a/unison-src/errors/check-for-regressions/not-and0.u +++ /dev/null @@ -1,9 +0,0 @@ -and (3 : Boolean) true - --- InSubtype t1=Nat, t2=Boolean --- InCheck e=3, t=Boolean --- InSynthesize e=3:Boolean --- InCheck e=3:Boolean, t=Boolean --- InSynthesizeApp t=Boolean -> Boolean -> Boolean, e=3:Boolean, n=1 --- InAndApp --- InSynthesize e=and 3:Boolean true diff --git a/unison-src/errors/check-for-regressions/not-caseguard.u b/unison-src/errors/check-for-regressions/not-caseguard.u deleted file mode 100644 index 45447642d3..0000000000 --- a/unison-src/errors/check-for-regressions/not-caseguard.u +++ /dev/null @@ -1,4 +0,0 @@ -notid : Int -> Boolean -notid a = true -match 3 with - 3 | notid 3 -> 4 diff --git a/unison-src/errors/check-for-regressions/not-caseguard2.u b/unison-src/errors/check-for-regressions/not-caseguard2.u deleted file mode 100644 index 899ca1d150..0000000000 --- a/unison-src/errors/check-for-regressions/not-caseguard2.u +++ /dev/null @@ -1,2 +0,0 @@ -match 3 with - 3 | (3 : Boolean) -> 4 diff --git a/unison-src/errors/check-for-regressions/not-or.u b/unison-src/errors/check-for-regressions/not-or.u deleted file mode 100644 index d7af8da9e9..0000000000 --- a/unison-src/errors/check-for-regressions/not-or.u +++ /dev/null @@ -1 +0,0 @@ -or (3 : Boolean) true diff --git a/unison-src/errors/check-for-regressions/not-vector.u b/unison-src/errors/check-for-regressions/not-vector.u deleted file mode 100644 index 07721922df..0000000000 --- a/unison-src/errors/check-for-regressions/not-vector.u +++ /dev/null @@ -1 +0,0 @@ -[1, +1 : Nat] diff --git a/unison-src/errors/check-for-regressions/or.u b/unison-src/errors/check-for-regressions/or.u deleted file mode 100644 index 842c3d3662..0000000000 --- a/unison-src/errors/check-for-regressions/or.u +++ /dev/null @@ -1 +0,0 @@ -or true 3 diff --git a/unison-src/errors/check-for-regressions/vector.u b/unison-src/errors/check-for-regressions/vector.u deleted file mode 100644 index dc4fc92d9a..0000000000 --- a/unison-src/errors/check-for-regressions/vector.u +++ /dev/null @@ -1 +0,0 @@ -[1, +1] diff --git a/unison-src/errors/compiler-bug.u b/unison-src/errors/compiler-bug.u deleted file mode 100644 index 9349ad0532..0000000000 --- a/unison-src/errors/compiler-bug.u +++ /dev/null @@ -1,5 +0,0 @@ -x = 1 -Foo.y = 4.0 -a = - x + y - () diff --git a/unison-src/errors/console.u b/unison-src/errors/console.u deleted file mode 100644 index 761be8aa84..0000000000 --- a/unison-src/errors/console.u +++ /dev/null @@ -1,19 +0,0 @@ -ability State s where - get : Nat -> {State s} s - set : s -> {State s} () - -ability Console where - read : () -> {Console} (Optional Text) - write : Text -> {Console} () - -fst = cases Tuple.Cons a _ -> a - -snd = cases Tuple.Cons _ (Tuple.Cons b _) -> b - -namespace Console where - - simulate : Request Console a -> {State ([Text], [Text])} a - simulate = cases - {Console.read _ -> k} -> k Optional.None - -Console.simulate diff --git a/unison-src/errors/console2.u b/unison-src/errors/console2.u deleted file mode 100644 index c57b382e3a..0000000000 --- a/unison-src/errors/console2.u +++ /dev/null @@ -1,29 +0,0 @@ -ability State s where - get : {State s} s - set : s -> {State s} () - -ability Console where - read : {Console} (Optional Text) - write : Text -> {Console} () - -fst = cases Tuple.Cons a _ -> a - -snd = cases Tuple.Cons _ (Tuple.Cons b _) -> b - -namespace Console where - - simulate : Request Console a -> {State ([Text], [Text])} a - simulate = cases - {Console.read -> k} -> - io = State.get - ins = fst io - outs = snd io - State.set (drop 1 ins, outs) - k (at 0 ins) -- this is missing recursive call to handle - {Console.write t -> k} -> - io = State.get - ins = fst io - outs = snd io - k (State.set (ins, cons t outs)) -- this is missing recursive call - -() diff --git a/unison-src/errors/cyclic-unguarded.u b/unison-src/errors/cyclic-unguarded.u deleted file mode 100644 index 96d6c671b2..0000000000 --- a/unison-src/errors/cyclic-unguarded.u +++ /dev/null @@ -1,8 +0,0 @@ - -x = y + 1 -y = x + 1 - -> x - - - diff --git a/unison-src/errors/effect-inference1.u b/unison-src/errors/effect-inference1.u deleted file mode 100644 index d65321e992..0000000000 --- a/unison-src/errors/effect-inference1.u +++ /dev/null @@ -1,12 +0,0 @@ -ability Abort where - Abort : forall a . () -> {Abort} a - -foo n = if n >= 1000 then n else !Abort.Abort - -bar : (Nat -> {} Nat) -> Nat -> Nat -bar f i = f i - -bar foo 3 - --- as of 3935b366383fe8184f96cfe714c31ca04234cf27, this typechecks (unexpected) --- and then bombs in the runtime because the Abort ability isn't handled. diff --git a/unison-src/errors/effect_unknown_type.uu b/unison-src/errors/effect_unknown_type.uu deleted file mode 100755 index 37fb492e4b..0000000000 --- a/unison-src/errors/effect_unknown_type.uu +++ /dev/null @@ -1,20 +0,0 @@ -ability T where - a : Unknown -> {T} () - ---b : Unknown ---b = () - ----- - -unison: can't hashComponents if bindings have free variables: - ["Unknown"] - ["T"] -CallStack (from HasCallStack): - error, called at src/Unison/ABT.hs:504:11 in unison-parser-typechecker-0.1-I7C95FdIglBGnISbV534LW:Unison.ABT - --- Typechecker emits a helpful error about b's use of an unknown type, but not a's. --- --- Error for b: --- typechecker.tests/ability_unknown_type.u FAILURE I don't know about the type Unknown. Make sure it's imported and spelled correctly: --- --- 22 | b : Unknown diff --git a/unison-src/errors/empty-block.u b/unison-src/errors/empty-block.u deleted file mode 100644 index 312149d407..0000000000 --- a/unison-src/errors/empty-block.u +++ /dev/null @@ -1 +0,0 @@ -foo = diff --git a/unison-src/errors/ex1.u b/unison-src/errors/ex1.u deleted file mode 100644 index 0a20aec50e..0000000000 --- a/unison-src/errors/ex1.u +++ /dev/null @@ -1,49 +0,0 @@ -use Optional None Some - -foo : Optional a -> [a] -foo = cases - None -> [] - Some a -> [a] - -"hello" `Sequence.cons` foo (Some 3) - --- Sequence.cons has type `a -> [a] -> [a]` --- `a` was determined to be `Text` because "hello" had type `Text`. --- Therefore `foo (Some 3)` was checked against `[Text]` --- but it actually had type `[Nat]`. Use `> why err1` for more detail. --- type Extractor v loc a = Note v loc -> Maybe a --- do --- e <- errorTerm --- b <- isFunctionCall --- if b then do - -- - - --- in reply to `> why err1`: --- `foo` has type `Optional a -> [a]` --- `a` was determined to be `Nat` because --- `Some 3` has type `Optional Nat`. Use `> why err2` for more detail - --- in reply to `> why err2`: --- `Some` has type `a -> Optional a` --- `a` was determinewd to be `Nat` because `3` has type `Nat` - -x = 3 - -and x 4 -------------- generic synthesizeApp possibility --- `and` has type `Boolean -> Boolean -> Boolean` --- .. (no typevars to explain, so skip) --- Therefore `3` was checked against `Boolean`, --- but it actually had type `Nat`. - -------------- specialized "and" possibility --- The arguments to `and` must be of type `Boolean`, --- but `x` has type `Nat`. Use `> why err1` for more detail. - -and 3 4 --- but the literal `3` has type `Nat`. - -match 3 with - 3 -> "text" - 4 -> 4.0 diff --git a/unison-src/errors/fix745.u b/unison-src/errors/fix745.u deleted file mode 100644 index 98af39ab79..0000000000 --- a/unison-src/errors/fix745.u +++ /dev/null @@ -1,19 +0,0 @@ - -unique ability A where a : Nat -unique ability B where b : Nat - -noGood : Nat ->{A} '{B} () -noGood n unit = - -- The A.a should be an ability check failure, since we are in the body - -- of an arrow which only has the {B} ability set. - A.a - B.b - () - -ok : Nat ->{A} '{B} () -ok n = - -- This is okay, because the A.a is being evaluated in the body of an - -- arrow with {A}. The result of the body is another lambda which - -- is allowed {B} requests by type signature of `ok`. - A.a - 'let B.b; () diff --git a/unison-src/errors/handle-inference.u b/unison-src/errors/handle-inference.u deleted file mode 100644 index 8d5dc87c7a..0000000000 --- a/unison-src/errors/handle-inference.u +++ /dev/null @@ -1,22 +0,0 @@ ---handle inference -ability State s where - get : ∀ s . () -> {State s} s - set : ∀ s . s -> {State s} () -state : ∀ a s . s -> Request (State s) a -> a -state s = cases - {a} -> a - {State.get _ -> k} -> handle k s with state s - {State.set s -> k} -> handle k () with state s --- modify : ∀ s . (s -> s) -> {State s} () --- modify f = State.set (f (State.get())) -ex : () -> {State Nat} Nat -ex blah = - State.get() Nat.+ 42 --- note this currently succeeds, the handle block --- gets an inferred type of ∀ a . a, it appears that --- the existential `a` which gets instantiated for the --- state call never gets refined, most likely due to --- missing a subtype check in handle -y : Text -y = handle ex () with state 5 -() diff --git a/unison-src/errors/handler-coverage-checking.uu b/unison-src/errors/handler-coverage-checking.uu deleted file mode 100644 index 134519ef01..0000000000 --- a/unison-src/errors/handler-coverage-checking.uu +++ /dev/null @@ -1,29 +0,0 @@ ---State3 ability -ability State se2 where - put : ∀ se . se -> {State se} () - get : ∀ se . () -> {State se} se - -state : ∀ s a . s -> Request (State s) a -> (s, a) -state woot = cases - { State.get () -> k } -> handle k woot with state woot - { State.put snew -> k } -> handle k () with state snew - -ex1 : (Nat, Nat) -ex1 = handle State.get () with state 42 - -ex1a : (Nat, Nat) -ex1a = handle 49 with state 42 - -ex1b = handle 0 with x -> 10 - -ex1c : Nat -ex1c = handle 0 with x -> 10 - -ex1d = handle 49 with state 42 - -ex2 = handle State.get () with state 42 - -ex3 : (Nat, Nat) -ex3 = ex2 - -() diff --git a/unison-src/errors/id.u b/unison-src/errors/id.u deleted file mode 100644 index e9e8fb0a95..0000000000 --- a/unison-src/errors/id.u +++ /dev/null @@ -1,3 +0,0 @@ -id a = a - -(id 42 : Text) diff --git a/unison-src/errors/io-effect.u b/unison-src/errors/io-effect.u deleted file mode 100644 index 7373163531..0000000000 --- a/unison-src/errors/io-effect.u +++ /dev/null @@ -1,9 +0,0 @@ ---IO ability -ability IO where - launchMissiles : () -> {IO} () --- binding is not guarded by a lambda, it only can access --- ambient abilities (which will be empty) -ex1 : {IO} () -ex1 = IO.launchMissiles() -() - diff --git a/unison-src/errors/io-state1.u b/unison-src/errors/io-state1.u deleted file mode 100644 index a9d1c11c6a..0000000000 --- a/unison-src/errors/io-state1.u +++ /dev/null @@ -1,17 +0,0 @@ ---IO/State1 ability -ability IO where - launchMissiles : {IO} () -ability State se2 where - put : ∀ se . se -> {State se} () - get : ∀ se . () -> {State se} se -foo : () -> {IO} () -foo unit = --- inner binding can't access outer abilities unless it declares --- them explicitly - incBy : Int -> {State Int} () - incBy i = - launchMissiles -- not allowed - y = State.get() - State.put (y Int.+ i) - () -() diff --git a/unison-src/errors/map-reduce.u b/unison-src/errors/map-reduce.u deleted file mode 100644 index d29cc69089..0000000000 --- a/unison-src/errors/map-reduce.u +++ /dev/null @@ -1,102 +0,0 @@ - --- A simple distributed computation ability -ability Remote n where - - -- Spawn a new node, of type `n` - spawn : {Remote n} n - - -- Sequentially evaluate the given thunk on another node - -- then return to the current node when it completes - at : n -> '{Remote n} a -> {Remote n} a - - -- Start a computation running, returning an `r` that can be forced to - -- await the result of the computation - fork : '{Remote n} a -> {Remote n} ('{Remote n} a) - -type Monoid a = Monoid (a -> a -> a) a - -use Nat + - * / == < -use Sequence map take drop size foldLeft halve -use Optional None Some -use Monoid.Monoid -- import the constructor -use Remote fork spawn at - -namespace Monoid where - - zero : Monoid a -> a - zero = cases Monoid _ z -> z - - op : Monoid a -> a -> a -> a - op = cases Monoid op _ -> op - - foldMap : (a -> {e} b) -> Monoid b -> [a] -> {e} b - foldMap f m as = - op = Monoid.op m - -- this line has a type error, `op` is (b -> b -> b) - -- and `zero m` is of type `b`, but `as` is of type `[a]` - -- 👇 - if size as < 2 then Sequence.foldLeft op (zero m) as - else match Sequence.halve as with (l, r) -> foldMap f m l `op` foldMap f m r - - par : Monoid a -> Monoid ('{Remote n} a) - par m = - o = op m - z = zero m - -- note - does not typecheck if flip the order of the constructor! - -- the 'z has type 'a, which fails to match the later remote thunk - Monoid (a1 a2 -> parApply o a1 a2) 'z - -force : '{e} a -> {e} a -force a = !a - -mapReduce : (a -> {Remote n} b) -> Monoid b -> [a] -> {Remote n} b -mapReduce f m a = - force <| Monoid.foldMap (a -> fork '(f a)) (Monoid.par m) a - -namespace Sequence where - - foldLeft : (b -> a -> b) -> b -> [a] -> b - foldLeft f z as = _todo2 - - halve : [a] -> ([a], [a]) - halve as = (take (size as / 2) as, drop (size as / 2) as) - -ex : '{Remote n} Nat -ex = 'let - alice = spawn - bob = spawn - f1 = fork '(1 + 1) - f2 = fork '(2 + 2) - !f1 + !f2 - -parApply : (a -> b -> c) -> '{Remote n} a -> '{Remote n} b -> '{Remote n} c -parApply f a b = 'let - x = fork a - y = fork b - f !x !y - --- this currently crashes the compiler -Remote.runLocal : '{Remote Nat} a -> a -Remote.runLocal r = - step : Nat -> Request (Remote Nat) a -> a - step nid = cases - {a} -> a - {Remote.fork t -> k} -> handle k t with step nid - {Remote.spawn -> k} -> handle k nid with step (nid + 1) - {Remote.at _ t -> k} -> handle k !t with step (nid + 1) - - handle !r with step 0 - -uno : '{e} a -> '{e} a -> {e} a -uno a a2 = !a - -dos : (a -> a -> a) -> '{e} a -> '{e} a -> {e} a -dos f a a2 = f !a !a2 - -(<|) : (i -> o) -> i -> o -f <| i = f i -i |> f = f i - -Stream.fromNat 1 - |> Stream.take 15 - |> Stream.toSequence diff --git a/unison-src/errors/map-traverse3.u b/unison-src/errors/map-traverse3.u deleted file mode 100644 index 724a5bdeee..0000000000 --- a/unison-src/errors/map-traverse3.u +++ /dev/null @@ -1,26 +0,0 @@ ---map/traverse -ability Noop where - noop : a -> {Noop} a - -type List a = Nil | Cons a (List a) - -map : (a ->{} b) -> List a -> List b -map f = cases - List.Nil -> List.Nil - List.Cons h t -> List.Cons (f h) (map f t) - -c = List.Cons - -z : ∀ a . List a -z = List.Nil - -ex = c 1 (c 2 (c 3 z)) - -pureMap : List Text -pureMap = map (a -> "hello") ex - --- this should not typecheck because map is annotated to take a pure function -zappy : '{Noop} (List Nat) -zappy = 'let map (zap -> Noop.noop (zap Nat.+ 1)) ex - -pureMap diff --git a/unison-src/errors/mismatched-braces.u b/unison-src/errors/mismatched-braces.u deleted file mode 100644 index e638736768..0000000000 --- a/unison-src/errors/mismatched-braces.u +++ /dev/null @@ -1,4 +0,0 @@ -} - -x = 3 - diff --git a/unison-src/errors/need-nominal-type.uu b/unison-src/errors/need-nominal-type.uu deleted file mode 100644 index 14b48ed3cc..0000000000 --- a/unison-src/errors/need-nominal-type.uu +++ /dev/null @@ -1,7 +0,0 @@ -type Foo = Foo -type Bar = Bar - -x : Foo -x = Bar.Bar - -x diff --git a/unison-src/errors/poor-error-message/ability-check-fail-by-calling-wrong-function.u b/unison-src/errors/poor-error-message/ability-check-fail-by-calling-wrong-function.u deleted file mode 100644 index f1fcdf7f80..0000000000 --- a/unison-src/errors/poor-error-message/ability-check-fail-by-calling-wrong-function.u +++ /dev/null @@ -1,27 +0,0 @@ -reduce2 : a -> (a -> a -> a) -> Sequence a -> a -reduce2 a0 f s = match at 0 s with - Optional.None -> a0 - Optional.Some a1 -> reduce (f a0 a1) f (drop 1 s) - -() - --- as of commit a48fa3b, we get the following error - - --The expression at Line 18, columns 40-41 (in red below) is requesting - -- {𝛆3} abilitys, but this location only has access to - -- {} - -- - -- 18 | Optional.Some a1 -> reduce (f a0 a1) f (drop 1 s) - -- ^ - -- simple cause: - -- AbilityCheckFailure: ambient={} requested={𝛆3} - --- The problem is that I've accidentally called `reduce` instead of `reduce2`, --- which TDNRs to `Stream.reduce`, which doesn't allow abilitys, and `f` isn't --- restricted to be pure. - --- I'd like to know: --- a) reduce is the built-in --- Stream.reduce : a -> (a ->{} a ->{} a) -> Stream a -> a --- b) maybe those suggestions, like did you mean reduce2 instead of reduce, --- which would typecheck. I understand that would not be a quick fix. diff --git a/unison-src/errors/poor-error-message/consoleh.u b/unison-src/errors/poor-error-message/consoleh.u deleted file mode 100644 index 12b92f50db..0000000000 --- a/unison-src/errors/poor-error-message/consoleh.u +++ /dev/null @@ -1,57 +0,0 @@ --- Token {payload = Semi, start = Pos 51 1, end = Pos 51 1} :| [] --- bootstrap: unison-src/tests/console.uu:51:1: --- unexpected Semi --- expecting : or the rest of infixApp --- 51 | () - -ability State s where - get : {State s} s - set : s -> {State s} () - -ability Console where - read : {Console} (Optional Text) - write : Text -> {Console} () - -fst = cases Tuple.Cons a _ -> a - -snd = cases Tuple.Cons _ (Tuple.Cons b _) -> b - -namespace Console where - - state : s -> Request (State s) a -> a - state s = cases - {State.get -> k} -> handle k s with state s - {State.set s' -> k} -> handle k () with state s' - {a} -> a - - simulate : Request Console d -> {State ([Text], [Text])} d - simulate = cases - {Console.read -> k} -> - io = State.get - ins = fst io - outs = snd io - State.set (drop 1 ins, outs) - -- this really should typecheck but doesn't for some reason - -- error is that `simulate` doesn't check against `Request Console c -> r`, - -- but seems like that `r` should get instantiated as `{State (..)} c`. - handle k (at 0 ins) with simulate - {Console.write t -> k} -> - io = State.get - ins = fst io - outs = snd io - -- same deal here - handle k (State.set (ins, cons t outs)) with simulate - {a} -> a - -(++) = concatenate - -handle - handle - use Console read write - use Optional Some None - write "What's your name?" - match read with - Some name -> write ("Hello" ++ name) - None -> write "Fine, be that way." - with Console.simulate -() diff --git a/unison-src/errors/poor-error-message/doesnt-match-annotation.u b/unison-src/errors/poor-error-message/doesnt-match-annotation.u deleted file mode 100644 index 34d5d146ab..0000000000 --- a/unison-src/errors/poor-error-message/doesnt-match-annotation.u +++ /dev/null @@ -1,5 +0,0 @@ -crazyTuple : a -> b -> (a,b)-- -> (a,b) -crazyTuple a b c = c - -() - diff --git a/unison-src/errors/poor-error-message/function-calls.u b/unison-src/errors/poor-error-message/function-calls.u deleted file mode 100644 index d3109a5cc0..0000000000 --- a/unison-src/errors/poor-error-message/function-calls.u +++ /dev/null @@ -1,11 +0,0 @@ -f1 : Int -> Int -f1 n = n + +1 - -f1 (3 Nat.+ 3) - --- idea: - -- I was expecting Int - -- vs - -- f1 was expecting Int - --- "In the call below" vs "In the call to `foo`" diff --git a/unison-src/errors/poor-error-message/function-calls1.u b/unison-src/errors/poor-error-message/function-calls1.u deleted file mode 100644 index 8a2a41c0f8..0000000000 --- a/unison-src/errors/poor-error-message/function-calls1.u +++ /dev/null @@ -1,16 +0,0 @@ -f1 : Int -> Int -f1 n = n + +1 - -f1 (3 Nat.+ 3) - --- issues: --- - the highlight term is '3' but should be (3 + 3) --- - - - --- Paul Thought: - -- Whenever we synthesize a type, we can set the location - -- of the synthesized type to be the location of the - -- synthesized expression. --- Arya & Runar: isn't that what we already do --- Paul: No, we only something something when we refine an existential diff --git a/unison-src/errors/poor-error-message/function-calls2.u b/unison-src/errors/poor-error-message/function-calls2.u deleted file mode 100644 index 10a3812c8d..0000000000 --- a/unison-src/errors/poor-error-message/function-calls2.u +++ /dev/null @@ -1,19 +0,0 @@ -id : a -> a -id a = a - -f1 : Int -> Int -f1 n = n + +1 - -f1 (id 3) - --- issues: --- - the highlight term is '3' but should be (3 + 3) --- - - - --- Paul Thought: - -- Whenever we synthesize a type, we can set the location - -- of the synthesized type to be the location of the - -- synthesized expression. --- Arya & Runar: isn't that what we already do --- Paul: No, we only something something when we refine an existential diff --git a/unison-src/errors/poor-error-message/function-calls3.u b/unison-src/errors/poor-error-message/function-calls3.u deleted file mode 100644 index 44665aff00..0000000000 --- a/unison-src/errors/poor-error-message/function-calls3.u +++ /dev/null @@ -1,26 +0,0 @@ --- first : a -> a -> a --- first a b = a - -id5 : a -> a -> a -> a -> a -> (a,a,a,a,a) -id5 a b c d e = (a, b, c, d, e) - --- second : a -> a -> a --- second a b = b - -id5 1 +2 3 4 5 - --- (match true with --- true -> first --- false -> second) 1 +2 - --- issues: --- - the highlight term is '3' but should be (3 + 3) --- - - - --- Paul Thought: - -- Whenever we synthesize a type, we can set the location - -- of the synthesized type to be the location of the - -- synthesized expression. --- Arya & Runar: isn't that what we already do --- Paul: No, we only something something when we refine an existential diff --git a/unison-src/errors/poor-error-message/handle.u b/unison-src/errors/poor-error-message/handle.u deleted file mode 100644 index 6f476f6890..0000000000 --- a/unison-src/errors/poor-error-message/handle.u +++ /dev/null @@ -1,40 +0,0 @@ ---Parsing/typechecking... ---Token {payload = Close, start = Pos 27 5, end = Pos 27 5} :| [] ---bootstrap: /Users/pchiusano/work/unison/unison-src/tests/state2.u:27:5: ---unexpected Close --- 27 | let --- - -type Optional a = None | Some a - -ability State s where - put : s -> {State s} () - get : {State s} s - -state : s -> Request (State s) a -> (s, a) -state woot = cases - { State.get -> k } -> handle k woot with state woot - { State.put snew -> k } -> handle k () with state snew - { a } -> (woot, a) - -modify : (s -> s) -> {State s} () -modify f = State.put (f State.get) - -increment : '{State Nat} () -increment = '(modify ((+) 1)) - -first : (a, b) -> a -first = cases (a,_) -> a - -ex : Nat -ex = - result = handle (state 0) - let - x = State.get - !increment - !increment - () - - first result - -() diff --git a/unison-src/errors/poor-error-message/handler-ex.u b/unison-src/errors/poor-error-message/handler-ex.u deleted file mode 100644 index 9e07c1262c..0000000000 --- a/unison-src/errors/poor-error-message/handler-ex.u +++ /dev/null @@ -1,24 +0,0 @@ ---Line 13, columns 44-46 has a type mismatch (in red below): --- --- 13 | {Ask.ask _ -> k} -> handle k () with supply t --- ---The two types involved are: --- --- () (an intrinsic, in blue) --- Text (line 8, columns 30-34, in green) --- --- 8 | supply : Text -> Request (Ask Text) a -> a --- --- Verbiage could be improved, but also the `()` location should --- point to line 22, the `k ()` call. -ability Ask foo where - ask : () -> {Ask a} a - -supply : Text -> Request (Ask Text) a -> a -supply t = cases - {a} -> a - -- `k` should be of type `Text -> Request Ask a`, - -- so calling it with `()` here should be a type error - {Ask.ask _ -> k} -> handle k () with supply t - -supply diff --git a/unison-src/errors/poor-error-message/mismatched-case-result-types.u b/unison-src/errors/poor-error-message/mismatched-case-result-types.u deleted file mode 100644 index e1dd520475..0000000000 --- a/unison-src/errors/poor-error-message/mismatched-case-result-types.u +++ /dev/null @@ -1,20 +0,0 @@ ---mismatched case result types -type Optional a = None | Some a -match Optional.Some 3 with - x -> 1 - y -> "boo" - --- as of 5ae98f7, produces this message: - - --Each case of a match/with expression need to have the same type. - -- Here, one is Nat, and another is Text: - -- - -- 4 | x -> 1 -- x is highlighted - -- 5 | y -> "boo" -- "boo" is highlighted - -- - -- from right here: - -- - -- 4 | x -> 1 -- 1 is highlighted - --- IMO, 1 should be highlighted instead of x on line 12; --- then lines 14-17 would be omitted. diff --git a/unison-src/errors/poor-error-message/notaguard.u b/unison-src/errors/poor-error-message/notaguard.u deleted file mode 100644 index 54c3f0e373..0000000000 --- a/unison-src/errors/poor-error-message/notaguard.u +++ /dev/null @@ -1,21 +0,0 @@ --- Getting the error ---The guard expression for a case has to be Boolean, but this one is a7: --- --- 13 | {Ask.ask -> k} -> handle k () with supply t --- --- from right here: --- --- 8 | supply : Text -> Request (Ask Text) a -> a --- --- --- even though this program doesn't use guards! - -ability Ask a where - ask : {Ask a} a - -supply : Text -> Request (Ask Text) a -> a -supply t = cases - {a} -> "foo" -- a - {Ask.ask -> k} -> handle k () with supply t - -() diff --git a/unison-src/errors/poor-error-message/overapplied-data-constructor-loc.u b/unison-src/errors/poor-error-message/overapplied-data-constructor-loc.u deleted file mode 100644 index 4f9b25c325..0000000000 --- a/unison-src/errors/poor-error-message/overapplied-data-constructor-loc.u +++ /dev/null @@ -1,17 +0,0 @@ --- board piece -type P = X | O | E - -type Board = Board P P - -use Board.Board -use P O X E - -match Board X O X - with Board a b c -> a - - --- gives this error: - -- This looks like a function call, but with a Board where the function should be. Are you missing an operator? - -- ^^^^^ - -- 13 | match Board X O X - -- ^^^^^ diff --git a/unison-src/errors/poor-error-message/pattern-case-location.u b/unison-src/errors/poor-error-message/pattern-case-location.u deleted file mode 100644 index 3cba3a76b2..0000000000 --- a/unison-src/errors/poor-error-message/pattern-case-location.u +++ /dev/null @@ -1,10 +0,0 @@ --- The location of the error is imprecise. It should point to --- the pattern `Bar.Bar`. - -unique type Foo = Foo -unique type Bar = Bar - -foo : Foo -> Foo -foo = cases - Foo.Foo -> Foo - Bar.Bar -> Foo diff --git a/unison-src/errors/poor-error-message/pattern-matching-1.u b/unison-src/errors/poor-error-message/pattern-matching-1.u deleted file mode 100644 index 2e53532d39..0000000000 --- a/unison-src/errors/poor-error-message/pattern-matching-1.u +++ /dev/null @@ -1,28 +0,0 @@ -type Foo0 = Foo0 -type Foo1 a = Foo1 a -type Foo2 a b = Foo2 a b -type Foo3 a b c = Foo3 a b c - -use Foo0 Foo0 -use Foo1 Foo1 -use Foo2 Foo2 - -x = match Foo0 with - Foo0 -> 1 - -y = match Foo1 1 with - Foo1 1 -> 0 - Foo1 _ -> 10 - -z = match Foo2 1 "hi" with - Foo2 x _ -> x - Foo2 1 _ -> 1 - -w = match Foo3.Foo3 1 2 "bye" with - Foo3.Foo3 1 2 x -> Text.concatenate x "bye" - -- where the heck are these locations coming from? - -- I feel, since concatenate isn't polymorphic, that `Text` - -- should come from there, not from `x`. - _ -> () - -() diff --git a/unison-src/errors/poor-error-message/tdnr-demo.u b/unison-src/errors/poor-error-message/tdnr-demo.u deleted file mode 100644 index d244e35e5f..0000000000 --- a/unison-src/errors/poor-error-message/tdnr-demo.u +++ /dev/null @@ -1,55 +0,0 @@ -left = take 3 (fromNat 5) -right = take 10 (fromNat 100) -sum = reduce 0 (+) - -iterate : a -> Nat -> (a -> a) -> Sequence a -iterate a n f = - iterate0 a n f acc = - if n > 0 then - a' = f a - iterate0 a' (n `drop` 1) f (snoc acc a') - else acc - iterate0 a n f [] - -use Optional Some None -reduce : a -> (a -> a -> a) -> Sequence a -> a -reduce a0 f s = match at 0 s with - Optional.None -> a0 - Optional.Some a1 -> reduce (f a0 a1) f (drop 1 s) - -pseudo-Stream : Sequence Nat -pseudo-Stream = iterate 0 200 increment -left2 = take 3 (drop 3 pseudo-Stream) -right2 = take 10 (drop 99 pseudo-Stream) -sum2 = reduce 0 (+) - -(sum (append left right)) == (sum2 (left2 ++ right2)) - --- local definition of `reduce` for Sequence understandably breaks TDNR --- of Stream.reduce on line 3, which makes `sum` on line 3 expect --- `Sequence Nat`, which constrains `append` on line 26 to return --- `Sequence Nat`, so it no longer matches `Stream Nat -> Stream Nat -> 'a`, --- which breaks TDNR of Stream.append, resulting in the error message: - - --Sorry, you hit an error we didn't make a nice message for yet. - -- - --Here is a summary of the Note: - -- simple cause: - -- SolvedBlank: Resolve "append" Line 26, columns 7-13 v=_170 t=Stream Nat -> Stream Nat -> [Nat] - -- path: - -- InSynthesize e=(let reduce1 0 (UInt... - -- InSynthesize e=(let (Sequence.take:... - -- InSynthesize e=(let (Sequence.take:... - -- InSynthesize e=(let (iterate:(𝛆. (a... - -- InSynthesize e=(let (λ (a. (λ (n. (... - -- InSynthesize e=(let reduce1 0 (UInt... - -- InSynthesize e=Cycle (left. (right.... - -- - -- - --I'm not sure what append means at Line 26, columns 7-13 - -- - -- 26 | (sum (append left right)) == (sum2 (left2 ++ right2)) - -- - --Whatever it is, it has a type that conforms to Stream Nat -> Stream Nat -> [Nat] - --- Commenting out the local definition of `reduce` mysteriously fixes TDNR of `append` for the above reasons. diff --git a/unison-src/errors/poor-error-message/token-printing.u b/unison-src/errors/poor-error-message/token-printing.u deleted file mode 100644 index 98d4ad1f9d..0000000000 --- a/unison-src/errors/poor-error-message/token-printing.u +++ /dev/null @@ -1,25 +0,0 @@ --- board piece - -type Board = Board Nat Nat Nat - -use Board.Board - --- uncommenting these gives errors from NPE to array index out of bounds -1, -2 --- x = 1 --- y = 2 - -ex = match Board 77 88 99 - with Board a b c -> c - --- yields: - - --- master> --- I was expecting an indented block following the`of` keyword --- but instead found an outdent: --- --- 12 | with Board a b c -> c --- SourcePos {sourceName = "/Users/pchiusano/work/unison/unison-src/tests/tictactoe0-array-oob1.u", sourceLine = Pos 12, sourceColumn = Pos 3} - --- What's with the `SourcePos` default show instance here? --- Expecting it to just color the token or something diff --git a/unison-src/errors/rank2a.u b/unison-src/errors/rank2a.u deleted file mode 100644 index 4f01835c12..0000000000 --- a/unison-src/errors/rank2a.u +++ /dev/null @@ -1,8 +0,0 @@ - --- We expect this to not typecheck since a `Nat -> Nat` cannot --- be passed where a `∀ a . a -> a` is expected. -rank2a : (Nat -> Nat) -> Nat -rank2a = - inner : (∀ a . a -> a) -> Nat - inner f = 42 - inner diff --git a/unison-src/errors/seq-concat-constant-length.u b/unison-src/errors/seq-concat-constant-length.u deleted file mode 100644 index 5f5ca9fc16..0000000000 --- a/unison-src/errors/seq-concat-constant-length.u +++ /dev/null @@ -1,3 +0,0 @@ -test : [a] -> ([a], [a]) -test l = match l with - x ++ y -> (x, y) diff --git a/unison-src/errors/state4.u b/unison-src/errors/state4.u deleted file mode 100644 index b4890f65e7..0000000000 --- a/unison-src/errors/state4.u +++ /dev/null @@ -1,13 +0,0 @@ ---State4 ability -ability State se2 where - put : ∀ se . se -> {State se} () - get : ∀ se . () -> {State se} se --- binding is not guarded by a lambda, it only can access --- ambient abilities (which will be empty) -ex1 : {State Int} () -ex1 = - y = State.get - State.put (y Int.+ +1) - () -() - diff --git a/unison-src/errors/tdnr.u b/unison-src/errors/tdnr.u deleted file mode 100644 index c1a98cfc0b..0000000000 --- a/unison-src/errors/tdnr.u +++ /dev/null @@ -1,3 +0,0 @@ -foo a b = a + b - -foo diff --git a/unison-src/errors/tdnr2.u b/unison-src/errors/tdnr2.u deleted file mode 100644 index a9924c0703..0000000000 --- a/unison-src/errors/tdnr2.u +++ /dev/null @@ -1 +0,0 @@ -2.0 + 4 diff --git a/unison-src/errors/tdnr3.u b/unison-src/errors/tdnr3.u deleted file mode 100644 index 275847929c..0000000000 --- a/unison-src/errors/tdnr3.u +++ /dev/null @@ -1,10 +0,0 @@ --- + Should get resolved to Nat.+, making this fail - -x : Nat -x = 42 - -Foo.z : Float -Foo.z = 4.0 - -a = x + z - diff --git a/unison-src/errors/term-functor-inspired/effect1.u b/unison-src/errors/term-functor-inspired/effect1.u deleted file mode 100644 index 1c3f007c35..0000000000 --- a/unison-src/errors/term-functor-inspired/effect1.u +++ /dev/null @@ -1,9 +0,0 @@ -ability State s where - get : () -> {State s} s - set : s -> {State s} () - -x : {State Nat} Nat -x = - State.get () - -() diff --git a/unison-src/errors/term-functor-inspired/if-body-mismatch.u b/unison-src/errors/term-functor-inspired/if-body-mismatch.u deleted file mode 100644 index b0371175d8..0000000000 --- a/unison-src/errors/term-functor-inspired/if-body-mismatch.u +++ /dev/null @@ -1,3 +0,0 @@ -if true -then 1 -else 1.0 diff --git a/unison-src/errors/term-functor-inspired/if-cond-not-bool.u b/unison-src/errors/term-functor-inspired/if-cond-not-bool.u deleted file mode 100644 index d132b2abd3..0000000000 --- a/unison-src/errors/term-functor-inspired/if-cond-not-bool.u +++ /dev/null @@ -1 +0,0 @@ -if "true" then 1 else 1 diff --git a/unison-src/errors/term-functor-inspired/mismatched-case-result-types.u b/unison-src/errors/term-functor-inspired/mismatched-case-result-types.u deleted file mode 100644 index 3aed71fd9f..0000000000 --- a/unison-src/errors/term-functor-inspired/mismatched-case-result-types.u +++ /dev/null @@ -1,5 +0,0 @@ ---mismatched case result types -type Optional a = None | Some a -match Optional.Some 3 with - x -> 1 - y -> "boo" diff --git a/unison-src/errors/type-apply.u b/unison-src/errors/type-apply.u deleted file mode 100644 index c44b882242..0000000000 --- a/unison-src/errors/type-apply.u +++ /dev/null @@ -1,15 +0,0 @@ ---Type.apply -type List a = Nil | Cons a (List a) -map : ∀ a b . (a -> b) -> List a -> List b -map f = cases - List.Nil -> List.Nil - List.Cons h t -> List.Cons h (map f t) -- should not typecheck, missing (f h) --- definitely should not typecheck! -map2 : ∀ a . a -map2 = map -c = List.Cons -z = List.Nil -ex = c 1 (c 2 (c 3 z)) -pureMap : List Int -- should fail, output is a `List Text` -pureMap = map (a -> "hi") ex -() diff --git a/unison-src/errors/type-functor-inspired/app2.u b/unison-src/errors/type-functor-inspired/app2.u deleted file mode 100644 index b9b422b846..0000000000 --- a/unison-src/errors/type-functor-inspired/app2.u +++ /dev/null @@ -1,4 +0,0 @@ -type Optional a = Some a | None -app' : Optional Int -app' = 3 -() diff --git a/unison-src/errors/type-functor-inspired/arrow1.u b/unison-src/errors/type-functor-inspired/arrow1.u deleted file mode 100644 index 630e157c2e..0000000000 --- a/unison-src/errors/type-functor-inspired/arrow1.u +++ /dev/null @@ -1,3 +0,0 @@ -arrow : Int -> Int -> Int -arrow a = 3 -() \ No newline at end of file diff --git a/unison-src/errors/type-functor-inspired/effect2.u b/unison-src/errors/type-functor-inspired/effect2.u deleted file mode 100644 index 90615b8ea8..0000000000 --- a/unison-src/errors/type-functor-inspired/effect2.u +++ /dev/null @@ -1,11 +0,0 @@ -ability Abort where - Abort : forall a . () -> {Abort} a - -ability Abort2 where - Abort2 : forall a . () -> {Abort2} a - Abort2' : forall a . () -> {Abort2} a - -ability' : Nat -> { Abort } Int -ability' n = Abort2.Abort2 () - -() diff --git a/unison-src/errors/type-functor-inspired/forall-arrow.u b/unison-src/errors/type-functor-inspired/forall-arrow.u deleted file mode 100644 index 47368c44bb..0000000000 --- a/unison-src/errors/type-functor-inspired/forall-arrow.u +++ /dev/null @@ -1,3 +0,0 @@ -id : forall a . a -> a -id x = 3 -() diff --git a/unison-src/errors/type-functor-inspired/forall-arrow2.u b/unison-src/errors/type-functor-inspired/forall-arrow2.u deleted file mode 100644 index 5512aeb417..0000000000 --- a/unison-src/errors/type-functor-inspired/forall-arrow2.u +++ /dev/null @@ -1,4 +0,0 @@ -f2 : forall a . a -> a -> a -f2 x = x - -() \ No newline at end of file diff --git a/unison-src/errors/type-functor-inspired/forall-arrow3.u b/unison-src/errors/type-functor-inspired/forall-arrow3.u deleted file mode 100644 index c9a4dae153..0000000000 --- a/unison-src/errors/type-functor-inspired/forall-arrow3.u +++ /dev/null @@ -1,4 +0,0 @@ -const : forall a b . a -> b -> a -const a b = 3 - -() \ No newline at end of file diff --git a/unison-src/errors/type-functor-inspired/need-nonstructural-types.uu b/unison-src/errors/type-functor-inspired/need-nonstructural-types.uu deleted file mode 100644 index dc731e635f..0000000000 --- a/unison-src/errors/type-functor-inspired/need-nonstructural-types.uu +++ /dev/null @@ -1,12 +0,0 @@ -ability Abort where - Abort : forall a . () -> {Abort} a - -ability Abort2 where - Abort2 : forall a . () -> {Abort2} a - -ability' : Nat -> { Abort } Int -ability' n = Abort2.Abort2 () - -() - --- oops, Abort and Abort2 end up being synonyms diff --git a/unison-src/errors/type-functor-inspired/parens.u b/unison-src/errors/type-functor-inspired/parens.u deleted file mode 100644 index 22d02da2db..0000000000 --- a/unison-src/errors/type-functor-inspired/parens.u +++ /dev/null @@ -1,4 +0,0 @@ -type Optional a = Some a | None -y : (Optional Int) -y = 3 -() \ No newline at end of file diff --git a/unison-src/errors/type-functor-inspired/subtuple.u b/unison-src/errors/type-functor-inspired/subtuple.u deleted file mode 100644 index f1aab6f7fd..0000000000 --- a/unison-src/errors/type-functor-inspired/subtuple.u +++ /dev/null @@ -1,5 +0,0 @@ -type Optional a = Some a | None -z' : (Optional Int, Optional Text, Optional Float) -z' = (None, 3) - -() \ No newline at end of file diff --git a/unison-src/errors/type-functor-inspired/synthesizeApp.u b/unison-src/errors/type-functor-inspired/synthesizeApp.u deleted file mode 100644 index 833909d07f..0000000000 --- a/unison-src/errors/type-functor-inspired/synthesizeApp.u +++ /dev/null @@ -1,4 +0,0 @@ -foo : a -> a -> Nat -foo x z = 42 - -foo +1 "hi" diff --git a/unison-src/errors/type-functor-inspired/tuple.u b/unison-src/errors/type-functor-inspired/tuple.u deleted file mode 100644 index e7f0019f78..0000000000 --- a/unison-src/errors/type-functor-inspired/tuple.u +++ /dev/null @@ -1,4 +0,0 @@ -type Optional a = Some a | None -z : (Optional Int, Optional Text, Optional Float) -z = 3 -() \ No newline at end of file diff --git a/unison-src/errors/type-functor-inspired/tuple2.u b/unison-src/errors/type-functor-inspired/tuple2.u deleted file mode 100644 index 08351f1a9a..0000000000 --- a/unison-src/errors/type-functor-inspired/tuple2.u +++ /dev/null @@ -1,3 +0,0 @@ -y : (Nat, Optional Int, Text) -y = (42, 3, "") -y diff --git a/unison-src/errors/type-functor-inspired/unit.u b/unison-src/errors/type-functor-inspired/unit.u deleted file mode 100644 index 29b6bdf363..0000000000 --- a/unison-src/errors/type-functor-inspired/unit.u +++ /dev/null @@ -1,3 +0,0 @@ -x : () -x = 3 -() diff --git a/unison-src/errors/unexpected-loop.u b/unison-src/errors/unexpected-loop.u deleted file mode 100644 index 16cada0892..0000000000 --- a/unison-src/errors/unexpected-loop.u +++ /dev/null @@ -1,11 +0,0 @@ ---Abort -ability Abort where - Abort : forall a . () -> {Abort} a - -use Nat + - -bork = u -> 1 + (Abort.Abort ()) - -(bork : Nat) - --- fails with loop instead of with type mismatch in commit 2819c206acf80f926c6d970a4ffd47c961fa4502 diff --git a/unison-src/errors/unresolved-symbol-1.u b/unison-src/errors/unresolved-symbol-1.u deleted file mode 100644 index fd0eef6db0..0000000000 --- a/unison-src/errors/unresolved-symbol-1.u +++ /dev/null @@ -1,6 +0,0 @@ -let - (|>) : a -> (a -> WHat) -> b -- unresolved symbol - a |> f = f a - Stream.fromInt -3 - |> Stream.take 10 - |> Stream.foldLeft +0 (Int.+) diff --git a/unison-src/errors/unsound-cont.u b/unison-src/errors/unsound-cont.u deleted file mode 100644 index f05745d9fa..0000000000 --- a/unison-src/errors/unsound-cont.u +++ /dev/null @@ -1,12 +0,0 @@ - -ability Ask a where - ask : {Ask a} a - -supply : Text -> Request (Ask Text) a -> a -supply t = cases - {a} -> a - -- `k` should be of type `Text -> Request Ask a`, - -- so calling it with `()` here should be a type error - {Ask.ask -> k} -> handle k () with supply t - -() diff --git a/unison-src/example-errors.u b/unison-src/example-errors.u deleted file mode 100644 index e0f248552c..0000000000 --- a/unison-src/example-errors.u +++ /dev/null @@ -1,181 +0,0 @@ --- Each of the programs in this file generates an error of some sort. --- We want error messages to be awesome so for each wrong program we --- give "the ideal error message" and a uniform, simple algorithm for producing --- that message. - -ex1 : Int -ex1 = "hello" - -{- Ideal error: - - Type mismatch in example-errors.u, line 6: `Text` vs `Int` - - | - 6 | ex1 = "hello" - | ^^^^^^^ - - `Text` comes from a literal: - | - 6 | ex1 = "hello" - | ^^^^^^^ - - `Int` comes from type signature: - | - 5 | ex1 : Int - | ^^^^^ - - Thoughts: - - * The first line marker is the _site of the error_ - * The next two line markers are the _provenance of the mismatched types_ - * In this case, the provenance of `Text` is the same location as the error, - but this won't always be the case. Optimized message just omits the - site of the error if it matches the provenance location of either of - the mismatched types. - * The backticks might be in color, formatted as bold, whatever, be - thoughtful about using visual indicators to draw attention to most important - aspects of the message. - * For implementation - when `check e t` hits `Term.Text'`, it does: - `subtype Type.text t`, but `Type.text` requires a `loc`, and we'll provide - `ABT.annotation e`. This logic can go in synthesize. - * Q: should we just ALWAYS set the location of a synthesized type - to be the location of the term that type was synthesized from? - * A: No, - - foo : Text - foo x = - y = x + 1 - x - - - In this example, x will synthesize to the type `Int`, but the location - of that type shouldn't just be - * When you synthesize a type for a lambda, `x -> x` - the location of the synthesized type `∀ a . a -> a` - is just the location of `x -> x`. - The location of the `a` also needs to be this same location. - Might want to have a special kind of location which indicates - that the location came from an inferred type. --} - -ex2 : Int -- `Int` comes from -ex2 = - y = "hello" -- `Text` comes from "hello" - y - -{- Ideal error: - -example-errors.u contained errors: - - The highlighted expression on line 42 - | -42 | y - | ^ - was inferred to have type `Text` but was expected to have type `Int` - - `Int` comes from type signature: - | -39 | ex2 : Int - | ^^^^^ - - `Text` was inferred from a literal: - | -41 | y = "hello" - | ^^^^^^^ - - - Thoughts: - * `y` is the body of the block, and the body of the block is expected to - have type `Int` - * Maybe use bold or some visual indicator rather than the ^^^ nonsense - * Do we include parent scopes? --} - -ex3 = - x = 1 + 1 - if x then 42 else "hkjh" - -{- - -example-errors.u contained 1 error: - --- 1 ------------------------------------------------------- - - The highlighted expression on line 73 - | -73 | if x then 42 else -1 - | ^ - has type `Nat` but was expected to have type `Boolean` - - `Boolean` comes from `if`, whose first argument must be of type `Boolean` - - `Nat` comes from line 72: - | -72 | x = 1 + 1 - | ^ - x = id 42 - ^^^^^ - ^^ - - * "The function <...> expects its th argument to be of type <...>, but - on line it appears to have type <...>." - * `synthesizeApp` can take an argument for what numberth argument it's - testing - * "An if-expression expects its condition to be of type Boolean, but - * In a `synthesizeApp`, report the function input type first, as the - "expected" type. - * `if` and other builtin syntax should have some special treatment. - * Might want signature of function whose application was involved in the - mismatched types. (But don't necessarily want to dump this on users up - front, like GHC tells you the type of every binding in the vicinity) - * Issue maybe not that you didn't know the function's type, but that - you were wrong about the types of the args you passed; also, you might - accidentally omit an argument, supply an extra argument, or supply - arguments in the wrong order. - * We don't bother reporting the other type errors in the same expression, - but we potentially could have an algorithm that could do more fine-grained - recovery. - * Not totally sure on the location of the `Nat`, but the thought - is that if `+` is monomorophic in its return type (always `Nat`), - then it makes sense to attribute the `Nat` of `x` to the `+` call site. - (why not attibute that to the definition site of `+`?) - * Is this inconsistent with treatment of `Boolean` location?? - * Since `if` is monomorphic in its first arg, we could use the same logic to - say that the location of that Boolean argument type is the call site - rather than its definition site. - * When encounter an error for a variable x, can add that to an erroneous - variables set - if you ever encounter a subexpression that references - those variables, skip over it? --} - -ex4 f x = - if f x then f 42 else 50 - -{- - Type mismatch on line : `Nat` vs `Boolean`. - - `Nat` comes from the literal: - | -42 | if f x then f 42 else 50 - | ^^ - ∀ a -> [(a, Tree (Text a)) -> (a -> Text -> Text) -> Tree Text - - ∀ a . Boolean -> a -> a - - Not sure what to report for the origin of the `Boolean`: - - `Boolean` comes from `f 42`?? - `Boolean` comes from `f : Nat -> Boolean`?? - But then why does `f` have that type? - Because `if` takes a `Boolean` as first arg.. - `Boolean` comes from `if` - - `f 42` has type `Boolean` because `f` had to have type `x -> Boolean` - because `f x` was passed as the first argument of `if`: - - if f x then f 42 else 50 - ^^^ - Arya thought - when there's a type mismatch between A and B, and A and B - are both inferred types, might make sense to provide more info about provenance. --} - diff --git a/unison-src/parser-tests/GenerateErrors.hs b/unison-src/parser-tests/GenerateErrors.hs deleted file mode 100644 index 795f17e693..0000000000 --- a/unison-src/parser-tests/GenerateErrors.hs +++ /dev/null @@ -1,48 +0,0 @@ -{- For every file foo.u in the current directory write the parse error to foo.message.txt -} -module GenerateErrors where -import qualified Data.Text as Text -import Data.Text.IO ( readFile ) -import Prelude hiding ( readFile ) -import System.Directory ( listDirectory, getCurrentDirectory ) -import System.FilePath ( takeExtension, dropExtension ) -import System.IO ( putStrLn ) -import qualified Unison.Builtin as B -import Unison.Parser ( Err ) -import qualified Unison.Parsers as P -import Unison.PrintError ( prettyParseError ) -import Unison.Symbol ( Symbol ) -import qualified Unison.Util.ColorText as Color -import Unison.Var ( Var ) - - -unisonFilesInDir :: FilePath -> IO [String] -unisonFilesInDir p = do - files <- listDirectory p - pure $ filter ((==) ".u" . takeExtension) files - -unisonFilesInCurrDir :: IO [String] -unisonFilesInCurrDir = getCurrentDirectory >>= unisonFilesInDir - -errorFileName :: String -> String -errorFileName n = dropExtension n ++ ".message.txt" - -emitAsPlainTextTo :: Var v => String -> Err v -> FilePath -> IO () -emitAsPlainTextTo src e f = writeFile f plainErr - where plainErr = Color.toPlain $ prettyParseError src e - -printError :: Var v => String -> Err v -> IO () -printError src e = putStrLn $ B.showParseError src e - -processFile :: FilePath -> IO () -processFile f = do - content <- Text.unpack <$> readFile f - let res = P.parseFile f content B.names - case res of - Left err -> do - emitAsPlainTextTo content (err :: Err Symbol) (errorFileName f) - printError content err - Right _ -> putStrLn $ - "Error: " ++ f ++ " parses successfully but none of the files in this directory should parse" - -main :: IO () -main = unisonFilesInCurrDir >>= mapM_ processFile diff --git a/unison-src/parser-tests/empty-match-list.message.txt b/unison-src/parser-tests/empty-match-list.message.txt deleted file mode 100644 index d1395d8fed..0000000000 --- a/unison-src/parser-tests/empty-match-list.message.txt +++ /dev/null @@ -1,3 +0,0 @@ -empty-match-list.u:3:5: -unexpected = - 3 | bar = 3 diff --git a/unison-src/parser-tests/empty-match-list.u b/unison-src/parser-tests/empty-match-list.u deleted file mode 100644 index bb5224c2fb..0000000000 --- a/unison-src/parser-tests/empty-match-list.u +++ /dev/null @@ -1,3 +0,0 @@ -foo n = match n with - -bar = 3 diff --git a/unison-src/parser-tests/if-without-condition.message.txt b/unison-src/parser-tests/if-without-condition.message.txt deleted file mode 100644 index 6b3ef1a0ef..0000000000 --- a/unison-src/parser-tests/if-without-condition.message.txt +++ /dev/null @@ -1,3 +0,0 @@ -if-without-condition.u:1:10: -unexpected then - 1 | foo = if then 4 else 8 diff --git a/unison-src/parser-tests/if-without-condition.u b/unison-src/parser-tests/if-without-condition.u deleted file mode 100644 index 15ca6f8d26..0000000000 --- a/unison-src/parser-tests/if-without-condition.u +++ /dev/null @@ -1 +0,0 @@ -foo = if then 4 else 8 diff --git a/unison-src/remote-api.u b/unison-src/remote-api.u deleted file mode 100644 index ac29fba16c..0000000000 --- a/unison-src/remote-api.u +++ /dev/null @@ -1,95 +0,0 @@ -type Either a b = Left a | Right b -type Status = Running | Finished | Canceled | Error Error -type Duration = Seconds Nat --- type Abilities e = Abilities {e} - -ability Remote loc where - fork : loc {e} - -> '{e} a - -> {Remote loc} Future loc a - -forkRegistered : (Future loc a -> {e2} ()) -> loc {e} -> '{e} a - -> {Remote loc, e2} Future loc a -forkRegistered register loc t = - future = Remote.fork loc t - register future - Future.begin future - future - - -ability Error e where error : e ->{Error e} () - -type Future loc a = Future - ('{Remote loc} () -- begin - ,'{Remote loc} () -- cancel - ,'{Remote loc} Status -- status - ,'{Remote loc, Error Future.Error} a -- join - ) -type Future.Error = UnknownFuture | UnreachableLocation | UnresponsiveLocation | Terminated | AbilityCheckFailure - --- Ability.check : Abilities {a} -> Request {b} x -> Boolean --- Ability.check = _ - --- Remote.server : (loc {e} -> {e} a) -> {e} a --- Remote.server computation = - -Future.join : Future loc a ->{Remote loc, Error Future.Error} a -Future.join = cases Future.Future (b, c, s, j) -> !j - -Future.cancel : Future loc a ->{Remote loc} () -Future.cancel = cases Future.Future (b, c, s, j) -> !c - -Future.status : Future loc a ->{Remote loc} Status -Future.status = cases Future.Future (b, c, s, j) -> !s - -Future.begin : Future loc a ->{Remote loc} () -Future.begin = cases Future.Future (b, c, s, j) -> !b - - -type UnitLoc e = UnitLoc - --- Remote.runSequential : '{Remote UnitLoc, Error e} a -> Either e a --- Remote.runSequential r = --- step : Request {Remote UnitLoc} a -> a --- step = cases --- {a} -> a --- {Remote.fork loc t -> k} -> --- join = Right !t --- cancel = () --- status = Finished --- keepalive d = () --- handle k (Future ('join, 'cancel, 'status, keepalive)) with step --- err : Request {Error e} a -> Either e a --- err = cases --- {a} -> Right a --- {Error.error t -> k} ->handle k (Left t) with err --- handle handle !r with step with err - --- > Remote.runSequential - --- use Optional Some None --- use Either Left Right --- Either.join : Either a (Either a b) -> Either a b --- Either.join = cases --- Left a -> Left a --- Right e -> e --- --- parMergeSort : (a -> a -> Boolean) -> [a] ->{Remote UnitLoc, Error} [a] --- parMergeSort (<) as = --- -- merge : [a] -> [a] -> [a] -> [a] --- merge z l r = --- l0 = at 0 l --- r0 = at 0 r --- match (l0, r0) with --- (None, _) -> z ++ r --- (_, None) -> z ++ l --- (Some l0, Some r0) -> --- if l0 < r0 --- then merge (z `snoc` l0) (drop 1 l) r --- else merge (z `snoc` r0) l (drop 1 r) --- split = size as / 2 --- if split == 0 then as --- else --- fl = Remote.fork UnitLoc '(parMergeSort (<) (take split as)) --- fr = Remote.fork UnitLoc '(parMergeSort (<) (drop split as)) --- merge [] (Future.join fl) (Future.join fr) diff --git a/unison-src/remote.u b/unison-src/remote.u deleted file mode 100644 index 08bfe7d1dc..0000000000 --- a/unison-src/remote.u +++ /dev/null @@ -1,67 +0,0 @@ - --- A simple distributed computation ability -ability Remote where - - -- Spawn a new node - spawn : {Remote} Node - - -- Sequentially evaluate the given thunk on another node - -- then return to the current node when it completes - at : n -> '{Remote} a -> {Remote} a - - -- Start a computation running, returning an `r` that can be forced to - -- await the result of the computation - fork : '{Remote} a -> {Remote} ('{Remote} a) - -type Node = Node Nat -- more realistic would be perhaps a (Hostname, PublicKey) pair - -replicate : Nat -> a -> [a] -replicate n a = toSequence (take n (constant a)) - --- here's a simple usage of it - this ships the program `replicate n a` --- to another node and evaluates it there before returning to the current node - -ex1 : Nat -> a -> {Remote} [a] -ex1 n a = - node = Remote.spawn -- conjures up a new node! - Remote.at node '(replicate n a) -- and transports a computation to it! - --- Let's test out this beast! do we need to deploy our code to some EC2 instances?? --- Gak, no not yet, we just want to test locally, let's write a handler --- for the `Remote` ability that simulates everything locally! - -Remote.runLocal : '{Remote} a -> a -Remote.runLocal r = - step nid = cases - {a} -> a - {Remote.fork t -> k} -> handle k t with step nid - {Remote.spawn -> k} -> handle k nid with step (Node.increment nid) - {Remote.at _ t -> k} -> handle k !t with step nid - handle !r with step (Node.Node 0) - --- Q: where do these nodes come from? that depends on the handler - --- you might have a handler like this, or a handler backed by an autoscaling EC2 pool... - --- easy peasy, let's give it a go... - -> Remote.runLocal '(ex1 10 "hi") - --- let's do some stuff in parallel on multiple nodes - -ex2 n = - -- spin up two remote computations on fresh nodes, in parallel, then combine their results - r1 = Remote.forkAt Remote.spawn '(replicate n "hi") -- returns a 'future' - r2 = Remote.forkAt Remote.spawn '(replicate n "there") - !r1 ++ !r2 - -> Remote.runLocal '(ex2 5) - --- little helper functions used above - -Remote.forkAt : Node -> '{Remote} a -> {Remote} ('{Remote} a) -Remote.forkAt node r = Remote.fork '(Remote.at node r) - -Node.increment : Node -> Node -Node.increment n = - use Node.Node -- the constructor - match n with Node n -> Node (n + 1) diff --git a/unison-src/sheepshead.u b/unison-src/sheepshead.u deleted file mode 100644 index d0f0f8d90e..0000000000 --- a/unison-src/sheepshead.u +++ /dev/null @@ -1,39 +0,0 @@ -type Suit = Club | Spade | Heart | Diamond -type Card = Card Rank Suit -type Rank = A | K | Q | J | _10 | _9 | _8 | _7 -type NonEmpty a = NonEmpty a [a] - -use Rank A K Q J _10 _9 _8 _7 -use Suit Club Spade Heart Diamond -use NonEmpty NonEmpty -use Optional Some None - -namespace Suit where - all = [Club, Spade, Heart, Diamond] - -namespace Rank where - all = [A, _10, K, Q, J, _9, _8, _7] - points = cases - A -> 11 - _10 -> 10 - K -> 4 - Q -> 3 - J -> 2 - _ -> 0 - toText = cases - A -> "A" - K -> "K" - Q -> "Q" - J -> "J" - _10 -> "10" - _9 -> "9" - _8 -> "8" - _7 -> "7" - -namespace NonEmpty where - toList = cases - NonEmpty h t -> Sequence.cons h t - fromList : [a] -> Optional (NonEmpty a) - fromList l = match Sequence.at 0 l with - None -> None - Some a -> Some (NonEmpty a (Sequence.drop 1 l)) diff --git a/unison-src/tests/324.u b/unison-src/tests/324.u deleted file mode 100644 index e7e5c45417..0000000000 --- a/unison-src/tests/324.u +++ /dev/null @@ -1,7 +0,0 @@ -foo a b = - if a Text.== "" then - match Text.size b with - 1 -> false - _ -> true - else - true diff --git a/unison-src/tests/344.uu b/unison-src/tests/344.uu deleted file mode 100644 index 6749329c28..0000000000 --- a/unison-src/tests/344.uu +++ /dev/null @@ -1,5 +0,0 @@ -ability Either a b where - left : a -> {Either a b} () - right : b -> {Either a b} () - -type Either a b = Left a | Right b diff --git a/unison-src/tests/514.u b/unison-src/tests/514.u deleted file mode 100644 index 4177359481..0000000000 --- a/unison-src/tests/514.u +++ /dev/null @@ -1,13 +0,0 @@ - --- all these can be added - -idNat : Nat -> Nat -idNat x = x - -idInt : Int -> Int -idInt x = x - -idPoly x = x - -idPoly2 : x -> x -idPoly2 y = y diff --git a/unison-src/tests/595.u b/unison-src/tests/595.u deleted file mode 100644 index b6383b6b58..0000000000 --- a/unison-src/tests/595.u +++ /dev/null @@ -1,13 +0,0 @@ - -type Any = Any (∀ r . (∀ a . a -> r) -> r) - --- also typechecks as expected -any : a -> Any -any a = Any.Any (k -> k a) - ---- -This typechecks fine, as expected, but try to `add` to codebase, get: - -unison: unknown var in environment: "r" environment = [Right User "a"] -CallStack (from HasCallStack): - error, called at src/Unison/ABT.hs:632:19 in unison-parser-typechecker-0.1-JxZSVhIPWTr4SazQ0mw03q:Unison.ABT diff --git a/unison-src/tests/868.u b/unison-src/tests/868.u deleted file mode 100644 index 21cef2773a..0000000000 --- a/unison-src/tests/868.u +++ /dev/null @@ -1,8 +0,0 @@ -type Choice = First | Second -type Wrapper = Wrapper Choice - -broken = match Wrapper.Wrapper Choice.Second with - Wrapper.Wrapper Choice.First -> true - _ -> false - -> broken diff --git a/unison-src/tests/868.ur b/unison-src/tests/868.ur deleted file mode 100644 index c508d5366f..0000000000 --- a/unison-src/tests/868.ur +++ /dev/null @@ -1 +0,0 @@ -false diff --git a/unison-src/tests/a-tale-of-two-optionals.u b/unison-src/tests/a-tale-of-two-optionals.u deleted file mode 100644 index d91fafa6e6..0000000000 --- a/unison-src/tests/a-tale-of-two-optionals.u +++ /dev/null @@ -1,13 +0,0 @@ -type Optional a = None | Some a - -Optional.isEmpty : Optional a -> Boolean -Optional.isEmpty = cases - Optional.None -> true - Optional.Some _ -> false - -increment x = x + 1 - -(|>) : forall a b . a -> (a -> b) -> b -a |> f = f a - -> Optional.Some 4 diff --git a/unison-src/tests/ability-inference-fail.uu b/unison-src/tests/ability-inference-fail.uu deleted file mode 100644 index e0dfbf2d7f..0000000000 --- a/unison-src/tests/ability-inference-fail.uu +++ /dev/null @@ -1,35 +0,0 @@ -ability Emit a where - emit : a ->{Emit a} () - -type Stream a = Stream ('{Emit a} ()) - -use Stream Stream -use Optional None Some - -namespace Stream where - - unfold : s -> (s -> Optional (a, s)) -> Stream a - unfold s f = Stream 'let - -- step : (s -> Optional (a,s)) -> s ->{Emit a} () - step f s = match f s with - None -> () - Some (a, s) -> emit a - step f s - step f s - ---- - -I found a value of type a where I expected to find one of type a: - - 11 | unfold : s -> (s -> Optional (a, s)) -> Stream a - 12 | unfold s f = Stream 'let - 13 | -- step : (s -> Optional (a,s)) -> s ->{Emit a} () - 14 | step f s = match f s with - 15 | None -> () - 16 | Some (a, s) -> emit a - 17 | step f s - 18 | step f s - - from right here: - - 4 | type Stream a = Stream ('{Emit a} ()) diff --git a/unison-src/tests/ability-keyword.u b/unison-src/tests/ability-keyword.u deleted file mode 100644 index afe11e7a94..0000000000 --- a/unison-src/tests/ability-keyword.u +++ /dev/null @@ -1,7 +0,0 @@ - -ability Foo where - foo : {Foo} Text - -x = 'let - y = Foo.foo - () diff --git a/unison-src/tests/abort.u b/unison-src/tests/abort.u deleted file mode 100644 index f5649ac457..0000000000 --- a/unison-src/tests/abort.u +++ /dev/null @@ -1,13 +0,0 @@ ---Abort -ability Abort where - Abort : forall a . () -> {Abort} a -eff : forall a b . (a -> b) -> b -> Request Abort a -> b -eff f z = cases - { Abort.Abort _ -> k } -> z - { a } -> f a --- heff : Nat -heff = handle Abort.Abort () with eff (x -> x Nat.+ 2) 1 -hudy : Nat -hudy = handle 42 with eff (x -> x Nat.+ 2) 1 -bork : () -> {Abort} Nat -bork = u -> 1 Nat.+ (Abort.Abort ()) diff --git a/unison-src/tests/ask-inferred.u b/unison-src/tests/ask-inferred.u deleted file mode 100644 index 266eb12e2c..0000000000 --- a/unison-src/tests/ask-inferred.u +++ /dev/null @@ -1,23 +0,0 @@ ---Ask inferred - -ability Ask a where - ask : {Ask a} a - -ability AskU where - ask : {AskU} Nat - -use Nat + - -ability AskT where - ask : {AskT} Text - -x = '(Ask.ask + 1) -x2 = '(Ask.ask + AskU.ask) - -x3 = '(Ask.ask + AskU.ask + size AskT.ask) - -y : '{Ask Nat} Nat -y = '(!x) - -y2 : '{Ask Nat, AskU} Nat -y2 = x2 diff --git a/unison-src/tests/boolean-ops-in-sequence.u b/unison-src/tests/boolean-ops-in-sequence.u deleted file mode 100644 index afc21f3477..0000000000 --- a/unison-src/tests/boolean-ops-in-sequence.u +++ /dev/null @@ -1 +0,0 @@ -test = true || false && true diff --git a/unison-src/tests/builtin-arity-0-evaluation.u b/unison-src/tests/builtin-arity-0-evaluation.u deleted file mode 100644 index c857b3c259..0000000000 --- a/unison-src/tests/builtin-arity-0-evaluation.u +++ /dev/null @@ -1,3 +0,0 @@ -use Universal == - -> Text.empty == "" diff --git a/unison-src/tests/builtin-arity-0-evaluation.ur b/unison-src/tests/builtin-arity-0-evaluation.ur deleted file mode 100644 index 27ba77ddaf..0000000000 --- a/unison-src/tests/builtin-arity-0-evaluation.ur +++ /dev/null @@ -1 +0,0 @@ -true diff --git a/unison-src/tests/caseguard.u b/unison-src/tests/caseguard.u deleted file mode 100644 index 16e949af5e..0000000000 --- a/unison-src/tests/caseguard.u +++ /dev/null @@ -1,15 +0,0 @@ --- Used to fail on the guard --- --- typechecker.tests/caseguard.u FAILURE I'm not sure what x means at line 3, columns 9-10 --- --- 3 | x | x == "woot" -> false --- --- Whatever it is, it has a type that conforms to Text. - -use Universal == - -f = cases - x | x == "woot" -> false - y | y == "foo" -> true - --- > f "woot" diff --git a/unison-src/tests/cce.u b/unison-src/tests/cce.u deleted file mode 100644 index bd67157035..0000000000 --- a/unison-src/tests/cce.u +++ /dev/null @@ -1,116 +0,0 @@ -use Universal < - -type Future a = Future ('{Remote} a) - --- A simple distributed computation ability -ability Remote where - - -- Spawn a new node - spawn : {Remote} Node - - -- Sequentially evaluate the given thunk on another node - -- then return to the current node when it completes - at : n -> '{Remote} a -> {Remote} a - - -- Start a computation running, returning an `r` that can be forced to - -- await the result of the computation - fork : '{Remote} a ->{Remote} Future a - -type Node = Node Nat -- more realistic would be perhaps a (Hostname, PublicKey) pair - -force : Future a ->{Remote} a -force = cases Future.Future r -> !r - -Future.fromThunk : '{Remote} a -> Future a -Future.fromThunk = Future.Future - --- Let's test out this beast! do we need to deploy our code to some EC2 instances?? --- Gak, no not yet, we just want to test locally, let's write a handler --- for the `Remote` ability that simulates everything locally! - -Remote.runLocal : '{Remote} a -> a -Remote.runLocal r = - step nid = cases - {a} -> a - {Remote.fork t -> k} -> handle k (Future.fromThunk t) with step nid - {Remote.spawn -> k} -> handle k nid with step (Node.increment nid) - {Remote.at _ t -> k} -> handle k !t with step nid - handle !r with step (Node.Node 0) - -Remote.forkAt : Node -> '{Remote} a ->{Remote} (Future a) -Remote.forkAt node r = Remote.fork '(Remote.at node r) - -use Optional None Some -use Monoid Monoid -use List ++ - -List.map : (a ->{e} b) -> [a] ->{e} [b] -List.map f as = - go f acc as i = match List.at i as with - None -> acc - Some a -> go f (acc `snoc` f a) as (i + 1) - go f [] as 0 - -type Monoid a = Monoid (a -> a -> a) a - -Monoid.zero = cases Monoid.Monoid op z -> z -Monoid.op = cases Monoid.Monoid op z -> op - -Monoid.orElse m = cases - None -> Monoid.zero m - Some a -> a - -merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] -merge lte a b = - go acc a b = match List.at 0 a with - None -> acc ++ b - Some hd1 -> match at 0 b with - None -> acc ++ a - Some hd2 -> - if hd1 `lte` hd2 then go (acc `snoc` hd1) (drop 1 a) b - else go (acc `snoc` hd2) a (drop 1 b) - go [] a b - -dmap : (a ->{Remote} b) -> [a] ->{Remote} [b] -dmap f as = - bs = List.map (a -> Remote.forkAt Remote.spawn '(f a)) as - List.map force bs - -dreduce : Monoid a -> [a] ->{Remote} a -dreduce m a = - if size a < 2 then Monoid.orElse m (List.at 0 a) - else - l = Remote.forkAt Remote.spawn '(dreduce m (List.take (size a / 2) a)) - r = Remote.forkAt Remote.spawn '(dreduce m (List.drop (size a / 2) a)) - Monoid.op m (force l) (force r) - -dmapReduce : (a ->{Remote} b) -> Monoid b -> [a] ->{Remote} b -dmapReduce f m as = dreduce m (dmap f as) - -dsort : (a -> a -> Boolean) -> [a] ->{Remote} [a] -dsort lte a = - dmapReduce (a -> [a]) (Monoid (merge lte) []) a - -sort : (a -> a -> Boolean) -> [a] -> [a] -sort lte a = - if List.size a < 2 then a - else - l = sort lte (take (size a / 2) a) - r = sort lte (drop (size a / 2) a) - merge lte l r - -Node.increment : Node -> Node -Node.increment n = - use Node Node -- the constructor - match n with Node n -> Node (n + 1) - -> Remote.runLocal '(dsort (<) [3,2,1,1,2,3,9182,1,2,34,1,23]) - ----- - -java.lang.ClassCastException: org.unisonweb.UnboxedType$Nat$ cannot be cast to org.unisonweb.Value$Lambda - at org.unisonweb.compilation.package$.org$unisonweb$compilation$package$$$anonfun$dynamicCall$1(compilation.scala:747) - at org.unisonweb.compilation.package$$anonfun$dynamicCall$2.apply(compilation.scala:714) - at org.unisonweb.compilation.package$.org$unisonweb$compilation$package$$$anonfun$compileMatchCase$3(compilation.scala:332) - at org.unisonweb.compilation.package$$anonfun$compileMatchCase$6.apply(compilation.scala:324) - at org.unisonweb.compilation.package$.org$unisonweb$compilation$package$$$anonfun$compile$12(compilation.scala:1070) diff --git a/unison-src/tests/cce.ur b/unison-src/tests/cce.ur deleted file mode 100644 index f168e469e2..0000000000 --- a/unison-src/tests/cce.ur +++ /dev/null @@ -1 +0,0 @@ -[1, 1, 1, 1, 2, 2, 2, 3, 3, 23, 34, 9182] diff --git a/unison-src/tests/compose-inference.u b/unison-src/tests/compose-inference.u deleted file mode 100644 index 984abc6467..0000000000 --- a/unison-src/tests/compose-inference.u +++ /dev/null @@ -1,4 +0,0 @@ - -f `compose` g = x -> f (g x) - -> compose diff --git a/unison-src/tests/console.u b/unison-src/tests/console.u deleted file mode 100644 index aa5a5ab07a..0000000000 --- a/unison-src/tests/console.u +++ /dev/null @@ -1,54 +0,0 @@ -ability State s where - get : {State s} s - set : s -> {State s} () - -ability Console where - read : {Console} (Optional Text) - write : Text -> {Console} () - -fst = cases Tuple.Cons a _ -> a - ---TODO type is wrongly being inferred (or at least displayed) as `Tuple a (Tuple a b) ->{} a` -snd = cases Tuple.Cons _ (Tuple.Cons b _) -> b - -namespace Console where - - state : s -> Request (State s) a -> a - state s = cases - {State.get -> k} -> handle k s with state s - {State.set s' -> k} -> handle k () with state s' - {a} -> a - - simulate : Request Console d -> {State ([Text], [Text])} d - simulate = cases - {Console.read -> k} -> - io = State.get - ins = fst io - outs = snd io - State.set (drop 1 ins, outs) - -- this really should typecheck but doesn't for some reason - -- error is that `simulate` doesn't check against `Request Console c -> r`, - -- but seems like that `r` should get instantiated as `{State (..)} c`. - handle k (at 0 ins) with simulate - {Console.write t -> k} -> - io = State.get - ins = fst io - outs = snd io - -- same deal here - handle k (State.set (ins, cons t outs)) with simulate - {a} -> a - -(++) = (Text.++) - -x = handle - handle - use Console read write - use Optional Some None - write "What's your name?" - match read with - Some name -> write ("Hello" ++ name) - None -> write "Fine, be that way." - with Console.simulate - with Console.state ([],[]) - -> x diff --git a/unison-src/tests/console1.u b/unison-src/tests/console1.u deleted file mode 100644 index d7726842c6..0000000000 --- a/unison-src/tests/console1.u +++ /dev/null @@ -1,45 +0,0 @@ --- This confusingly gives an error that --- it doesn't know what `Console.simulate` is. - -ability State s where - get : {State s} s - set : s -> {State s} () - -ability Console where - read : {Console} (Optional Text) - write : Text -> {Console} () - -use Console simulate - -fst = cases Tuple.Cons a _ -> a - -snd = cases Tuple.Cons _ (Tuple.Cons b _) -> b - -namespace Console where - - simulate : Request Console a -> {State ([Text], [Text])} a - simulate = cases - {Console.read -> k} -> handle - io = State.get - ins = fst io - outs = snd io - State.set (drop 1 ins, outs) - k (at 0 ins) - with simulate - - {Console.write t -> k} -> handle - io = State.get - ins = fst io - outs = snd io - State.set (ins, outs ++ [t]) - k () - with simulate - -e = 'let handle - use Console read write - use Optional Some None - write "What's your name?" - match read with - Some name -> write ("Hello" ++ name) - None -> write "Fine, be that way." - with simulate diff --git a/unison-src/tests/data-references-builtins.u b/unison-src/tests/data-references-builtins.u deleted file mode 100644 index 099ef4e284..0000000000 --- a/unison-src/tests/data-references-builtins.u +++ /dev/null @@ -1,4 +0,0 @@ ---data references builtins -type StringOrInt = S Text | I Nat -> [StringOrInt.S "YO", StringOrInt.I 1] - diff --git a/unison-src/tests/delay.u b/unison-src/tests/delay.u deleted file mode 100644 index 0935bbabb3..0000000000 --- a/unison-src/tests/delay.u +++ /dev/null @@ -1,37 +0,0 @@ - -type Foo a = Foo a - -(+) = (Nat.+) - --- The type 'a is sugar for `() -> a`. --- The term 'a is sugar for `() -> a`. --- !a forces a delayed expression (equivalent to `a()`) - -woot : 'Nat -woot = '42 - --- A 'a can also be created by prefixing `let` with a ' -woot2 : 'Nat -woot2 = 'let - x = 1 - y = 2 - x + y - --- ' has higher precedence than -> in type signatures --- and a lower precedence than type application -woot3 : 'Nat -> Nat -woot3 x = !x + 1 - -woot4 : ∀ a . 'Foo a -> Foo a -woot4 foo = !foo - -woot4Usage = woot4 '(Foo.Foo 19) - -woot4Usage2 = - foo = 'let - x : Nat - x = 99 - Foo.Foo (x + x) - woot4 foo - -> woot4Usage2 diff --git a/unison-src/tests/delay_parse.u b/unison-src/tests/delay_parse.u deleted file mode 100644 index 525f62eaa4..0000000000 --- a/unison-src/tests/delay_parse.u +++ /dev/null @@ -1,20 +0,0 @@ -ability T where - foo : {T} () - --- parses fine -a : () -> {T} () -a x = () - --- parses fine -b : () -> '() -b = x -> (y -> ()) - --- parse error -c : () -> {T} '() -c = x -> (y -> ()) - --- parses fine with extra parentheses -d : () -> {T} ('()) -d = x -> (y -> ()) - - diff --git a/unison-src/tests/effect-instantiation.u b/unison-src/tests/effect-instantiation.u deleted file mode 100644 index 5ec6e1679b..0000000000 --- a/unison-src/tests/effect-instantiation.u +++ /dev/null @@ -1,10 +0,0 @@ - -blah : a -> a -> a -blah a a2 = a2 - -ability Foo where - foo : {Foo} Text - --- previously this didn't work as first argument was pure --- and second argument was impure -> blah '("hello!") 'Foo.foo diff --git a/unison-src/tests/effect-instantiation2.u b/unison-src/tests/effect-instantiation2.u deleted file mode 100644 index 6a12abb9ab..0000000000 --- a/unison-src/tests/effect-instantiation2.u +++ /dev/null @@ -1,8 +0,0 @@ - -woot : a -> a -> a -woot a a2 = a - -ability Hi where - hi : Float ->{Hi} Int - -> woot Float.floor Hi.hi diff --git a/unison-src/tests/effect1.u b/unison-src/tests/effect1.u deleted file mode 100644 index 81c772401b..0000000000 --- a/unison-src/tests/effect1.u +++ /dev/null @@ -1,8 +0,0 @@ - -eff : forall a b . (a -> b) -> b -> Request Abort a -> b -eff f z = cases - { Abort.Abort _ -> k } -> z - { a } -> f a - -ability Abort where - Abort : forall a . () -> {Abort} a diff --git a/unison-src/tests/empty-above-the-fold.u b/unison-src/tests/empty-above-the-fold.u deleted file mode 100644 index edeba5919a..0000000000 --- a/unison-src/tests/empty-above-the-fold.u +++ /dev/null @@ -1,6 +0,0 @@ --- Empty files and all-comment files parse fine, so this one should too. ----- Anything below this line is ignored by Unison. - --- /Users/arya/unison/unison-src/tests/empty-above-the-fold.u:1:1: --- unexpected end of input --- expecting ability, ability, or use diff --git a/unison-src/tests/fib4.ur b/unison-src/tests/fib4.ur deleted file mode 100644 index 42c52724b0..0000000000 --- a/unison-src/tests/fib4.ur +++ /dev/null @@ -1 +0,0 @@ -2249999 diff --git a/unison-src/tests/fix1640.u b/unison-src/tests/fix1640.u deleted file mode 100644 index 1e339c8387..0000000000 --- a/unison-src/tests/fix1640.u +++ /dev/null @@ -1,25 +0,0 @@ - -unique type Color = Red | Black -unique type RBTree a = Leaf | Tree Color (RBTree a) a (RBTree a) - --- interesting, this typechecks fine -isRed = cases - Color.Red -> true - Color.Black -> false - --- as does this -RBTree.isRed1 = cases - RBTree.Tree _ _ _ _ -> true - _ -> false - --- but this did not (before this fix) -RBTree.isRed = cases - RBTree.Tree Color.Red _ _ _ -> true - _ -> false - --- In fixing this bug, I noticed that the parser would previously reject --- this perfectly cromulent pattern match, so I fixed that too. -thisIsTotallyLegit = cases - [RBTree.Tree _ _ _ _] -> true - _ -> false - diff --git a/unison-src/tests/fix528.u b/unison-src/tests/fix528.u deleted file mode 100644 index c0dff14ec0..0000000000 --- a/unison-src/tests/fix528.u +++ /dev/null @@ -1,12 +0,0 @@ - -(|>) : a -> (a -> b) -> b -a |> f = f a - -ex1 = "bob" |> (Text.++) "hi, " - -type Woot = Woot Text Int Nat - -ex2 = match 0 |> Woot "Zonk" +10 with - Woot.Woot _ i _ -> i - -> (ex1, ex2) diff --git a/unison-src/tests/fix528.ur b/unison-src/tests/fix528.ur deleted file mode 100644 index 9131151dcf..0000000000 --- a/unison-src/tests/fix528.ur +++ /dev/null @@ -1 +0,0 @@ -("hi, bob", +10) diff --git a/unison-src/tests/fix739.u b/unison-src/tests/fix739.u deleted file mode 100644 index 28d36405c4..0000000000 --- a/unison-src/tests/fix739.u +++ /dev/null @@ -1,4 +0,0 @@ -type MonoidRec a = { - combine : a -> a -> a, - empty : a -} diff --git a/unison-src/tests/force.u b/unison-src/tests/force.u deleted file mode 100644 index b4e1d2bdf8..0000000000 --- a/unison-src/tests/force.u +++ /dev/null @@ -1,9 +0,0 @@ -ability Woot where woot : {Woot} Text - -force : '{e} a ->{e} a -force a = !a - -ex : '{Woot} Text -ex = '(force 'Woot.woot) - -> ex diff --git a/unison-src/tests/guard-boolean-operators.u b/unison-src/tests/guard-boolean-operators.u deleted file mode 100644 index a5da96a178..0000000000 --- a/unison-src/tests/guard-boolean-operators.u +++ /dev/null @@ -1,11 +0,0 @@ -type Foo = Foo Boolean Boolean - -f : Foo -> Boolean -f = cases - Foo.Foo a b | a || b -> true - _ -> false - -g : Foo -> Boolean -g = cases - Foo.Foo a b | a && b -> true - _ -> false diff --git a/unison-src/tests/handler-stacking.u b/unison-src/tests/handler-stacking.u deleted file mode 100644 index bfc1e3c129..0000000000 --- a/unison-src/tests/handler-stacking.u +++ /dev/null @@ -1,34 +0,0 @@ -use State get put -use Writer tell - -> handle - handle replicate 5 main - with writerHandler [] - with stateHandler "hello" - - -main = '(tell get) - -replicate : Nat -> '{e} () -> {e} () -replicate n x = - if n Nat.== 0 then () else - !x - replicate (n `drop` 1) x - -ability State a where - get : {State a} a - put : a -> {State a} () - -ability Writer w where - tell : w -> {Writer w} () - -stateHandler : s -> Request {State s} a -> (s, a) -stateHandler s = cases - { State.get -> k } -> handle k s with stateHandler s - { State.put s -> k } -> handle k () with stateHandler s - { a } -> (s, a) - -writerHandler : [w] -> Request {Writer w} a -> ([w], a) -writerHandler ww = cases - { Writer.tell w -> k } -> handle k () with writerHandler (ww `snoc` w) - { a } -> (ww, a) diff --git a/unison-src/tests/hang.u b/unison-src/tests/hang.u deleted file mode 100644 index 2313c4f017..0000000000 --- a/unison-src/tests/hang.u +++ /dev/null @@ -1,88 +0,0 @@ - -use Universal == < - -type Future a = Future ('{Remote} a) - --- A simple distributed computation ability -ability Remote where - - -- Spawn a new node - spawn : {Remote} Node - - -- Sequentially evaluate the given thunk on another node - -- then return to the current node when it completes - at : n -> '{Remote} a -> {Remote} a - - -- Start a computation running, returning an `r` that can be forced to - -- await the result of the computation - fork : '{Remote} a ->{Remote} Future a - -type Node = Node Nat -- more realistic would be perhaps a (Hostname, PublicKey) pair - -force : Future a ->{Remote} a -force = cases Future.Future r -> !r - --- Let's test out this beast! do we need to deploy our code to some EC2 instances?? --- Gak, no not yet, we just want to test locally, let's write a handler --- for the `Remote` ability that simulates everything locally! - -Remote.runLocal : '{Remote} a -> a -Remote.runLocal r = - use Future Future - step nid = cases - {a} -> a - {Remote.fork t -> k} -> handle k (Future t) with step nid - {Remote.spawn -> k} -> handle k nid with step (Node.increment nid) - {Remote.at _ t -> k} -> handle k !t with step nid - handle !r with step (Node.Node 0) - -Remote.forkAt : Node -> '{Remote} a ->{Remote} (Future a) -Remote.forkAt node r = Remote.fork '(Remote.at node r) - -use Optional None Some -use Monoid Monoid -use List ++ - -List.map : (a ->{e} b) -> [a] ->{e} [b] -List.map f as = - go f acc as i = match List.at i as with - None -> acc - Some a -> go f (acc `snoc` f a) as (i + 1) - go f [] as 0 - -merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] -merge lte a b = - go acc a b = match at 0 a with - None -> acc ++ b - Some hd1 -> match at 0 b with - None -> acc ++ a - Some hd2 -> - if hd1 `lte` hd2 then go (acc `snoc` hd1) (drop 1 a) b - else go (acc `snoc` hd2) a (drop 1 b) - go [] a b - -dsort2 : (a -> a -> Boolean) -> [a] ->{Remote} [a] -dsort2 lte as = - if size as < 2 then as - else match halve as with - None -> as - Some (left, right) -> - use Remote forkAt spawn - l = forkAt spawn '(dsort2 lte left) - r = forkAt spawn '(dsort2 lte right) - merge lte (force l) (force r) - -isEmpty : [a] -> Boolean -isEmpty a = size a == 0 - -halve : [a] -> Optional ([a], [a]) -halve as = - if isEmpty as then None - else Some (take (size as / 2) as, drop (size as / 2) as) - -Node.increment : Node -> Node -Node.increment n = - use Node Node -- the constructor - match n with Node n -> Node (n + 1) - -> Remote.runLocal '(dsort2 (<) [3,2,1,1,2,3,9182,1,2,34,1,23]) diff --git a/unison-src/tests/id.u b/unison-src/tests/id.u deleted file mode 100644 index 7d0bd3d4d2..0000000000 --- a/unison-src/tests/id.u +++ /dev/null @@ -1,5 +0,0 @@ -id : a -> a -id x = x - -> id - diff --git a/unison-src/tests/if.u b/unison-src/tests/if.u deleted file mode 100644 index e3af85295c..0000000000 --- a/unison-src/tests/if.u +++ /dev/null @@ -1,2 +0,0 @@ -foo = if true then true else false -> foo diff --git a/unison-src/tests/imports.u b/unison-src/tests/imports.u deleted file mode 100644 index 0748ae2845..0000000000 --- a/unison-src/tests/imports.u +++ /dev/null @@ -1,22 +0,0 @@ -use . Int -- imports `.Int` from root path and gives it the local name `Int` - --- This brings `None` into scope unqualified -use Optional None - --- '.' is optional, this brings `None` and `Some` into --- scope unqualified -use Optional None Some - --- Can import operators this way also --- no need to put them in parens -use Nat + - --- Later imports shadow earlier ones -use Nat - * / -use Nat drop * -use Nat drop --- use Int + -- this would cause type error below! - -> match Some (100 + 200 / 3 * 2) with - Optional.None -> 19 - Some 200 -> 20 diff --git a/unison-src/tests/imports2.u b/unison-src/tests/imports2.u deleted file mode 100644 index 73c38949a7..0000000000 --- a/unison-src/tests/imports2.u +++ /dev/null @@ -1,12 +0,0 @@ --- This gives the error: --- I'm not sure what Optional.orElse means at Line 12 --- which is weird because it means Optional.orElse, which is defined here. - -use Optional Some None orElse - -Optional.orElse a b = - match a with - None -> b - a -> a - -> orElse diff --git a/unison-src/tests/inner-lambda1.u b/unison-src/tests/inner-lambda1.u deleted file mode 100644 index 4213c556b3..0000000000 --- a/unison-src/tests/inner-lambda1.u +++ /dev/null @@ -1,15 +0,0 @@ -use Nat drop >= -use Optional None Some - -search : (Nat -> Int) -> Nat -> Nat -> Optional Nat -search hit bot top = - -- go : Nat -> Nat -> Optional Nat - go bot top = - if bot >= top then None - else - mid = (bot + top) / 2 - match hit mid with - +0 -> Some mid - -1 -> go bot (mid `drop` 1) - +1 -> go (mid + 1) top - go bot top diff --git a/unison-src/tests/inner-lambda2.u b/unison-src/tests/inner-lambda2.u deleted file mode 100644 index 329a296e6c..0000000000 --- a/unison-src/tests/inner-lambda2.u +++ /dev/null @@ -1,16 +0,0 @@ - -use Nat drop >= -use Optional None Some - -search : (Nat -> Int) -> Nat -> Nat -> Optional Nat -search hit bot top = - go : Nat -> Nat -> Optional Nat - go bot top = - if bot >= top then None - else - mid = (bot + top) / 2 - match hit mid with - +0 -> Some mid - -1 -> go bot (mid `drop` 1) - +1 -> go (mid + 1) top - go bot top diff --git a/unison-src/tests/io-state2.u b/unison-src/tests/io-state2.u deleted file mode 100644 index e5ac00d21c..0000000000 --- a/unison-src/tests/io-state2.u +++ /dev/null @@ -1,23 +0,0 @@ ---IO/State2 ability -ability IO where - launchMissiles : {IO} () - -foo : Int -> {IO} Int -foo unit = - incBy : Int -> {IO, State Int} Int - incBy i = - IO.launchMissiles -- OK, since declared by `incBy` signature - y = State.get - State.put (y Int.+ i) - +42 - +43 - -type Optional a = - Some a | None - -ability State se2 where - put : ∀ se . se -> {State se} () - get : ∀ se . {State se} se - - - diff --git a/unison-src/tests/io-state3.u b/unison-src/tests/io-state3.u deleted file mode 100644 index ca05a59cd0..0000000000 --- a/unison-src/tests/io-state3.u +++ /dev/null @@ -1,10 +0,0 @@ ---IO3 ability -ability IO where - launchMissiles : () -> {IO} () --- binding IS guarded, so its body can access whatever abilities --- are declared by the type of the binding --- ambient abilities (which will be empty) -ex1 : () -> {IO} () -ex1 unit = IO.launchMissiles() - - diff --git a/unison-src/tests/keyword-parse.u b/unison-src/tests/keyword-parse.u deleted file mode 100644 index e54ac9592d..0000000000 --- a/unison-src/tests/keyword-parse.u +++ /dev/null @@ -1,4 +0,0 @@ -f x = x - -> (f false) && false --- and false false diff --git a/unison-src/tests/lambda-closing-over-effectful-fn.u b/unison-src/tests/lambda-closing-over-effectful-fn.u deleted file mode 100644 index 867fc85c2f..0000000000 --- a/unison-src/tests/lambda-closing-over-effectful-fn.u +++ /dev/null @@ -1,10 +0,0 @@ -use Optional None Some - -unfold : s -> (s ->{z} Optional (a, s)) ->{z} [a] -unfold s f = - go s acc = match f s with - None -> acc - Some (hd, s) -> go s (acc `List.snoc` hd) - go s [] - -> unfold 0 (n -> if n Nat.< 5 then Some (n, n + 1) else None) diff --git a/unison-src/tests/lambda-closing-over-effectful-fn.ur b/unison-src/tests/lambda-closing-over-effectful-fn.ur deleted file mode 100644 index 2fdebcae86..0000000000 --- a/unison-src/tests/lambda-closing-over-effectful-fn.ur +++ /dev/null @@ -1 +0,0 @@ -[0,1,2,3,4] diff --git a/unison-src/tests/links.u b/unison-src/tests/links.u deleted file mode 100644 index 67a6f68b81..0000000000 --- a/unison-src/tests/links.u +++ /dev/null @@ -1,13 +0,0 @@ - - -natLink : Link.Type -natLink = typeLink Nat - -takeLink : Link.Term -takeLink = termLink List.take - -dropLink : Link.Term -dropLink = termLink List.drop - -> (takeLink == dropLink, natLink == typeLink Nat) - diff --git a/unison-src/tests/links.ur b/unison-src/tests/links.ur deleted file mode 100644 index e868f25d0e..0000000000 --- a/unison-src/tests/links.ur +++ /dev/null @@ -1 +0,0 @@ -(false, true) diff --git a/unison-src/tests/map-traverse.u b/unison-src/tests/map-traverse.u deleted file mode 100644 index 980927ca77..0000000000 --- a/unison-src/tests/map-traverse.u +++ /dev/null @@ -1,30 +0,0 @@ ---map/traverse -ability Noop where - noop : ∀ a . a -> {Noop} a - -ability Noop2 where - noop2 : ∀ a . a -> a -> {Noop2} a - -type List a = Nil | Cons a (List a) - -map : ∀ a b e . (a -> {e} b) -> List a -> {e} (List b) -map f = cases - List.Nil -> List.Nil - List.Cons h t -> List.Cons (f h) (map f t) - -c = List.Cons -z : ∀ a . List a -z = List.Nil - -ex = (c 1 (c 2 (c 3 z))) - -pureMap : List Text -pureMap = map (a -> "hello") ex - --- `map` is ability polymorphic -zappy : () -> {Noop} (List Nat) -zappy u = map (zap -> (Noop.noop (zap Nat.+ 1))) ex - --- mixing multiple abilitys in a call to `map` works fine -zappy2 : () -> {Noop, Noop2} (List Nat) -zappy2 u = map (zap -> Noop.noop (zap Nat.+ Noop2.noop2 2 7)) ex diff --git a/unison-src/tests/map-traverse2.u b/unison-src/tests/map-traverse2.u deleted file mode 100644 index 61ee14c168..0000000000 --- a/unison-src/tests/map-traverse2.u +++ /dev/null @@ -1,32 +0,0 @@ ---map/traverse -ability Noop where - noop : a -> {Noop} a - -ability Noop2 where - noop2 : a -> a -> {Noop2} a - -type List a = Nil | Cons a (List a) - -map : (a -> b) -> List a -> List b -map f = cases - List.Nil -> List.Nil - List.Cons h t -> List.Cons (f h) (map f t) - -c = List.Cons - -z : ∀ a . List a -z = List.Nil - -ex = (c 1 (c 2 (c 3 z))) - -pureMap : List Text -pureMap = map (a -> "hello") ex - --- `map` is ability polymorphic -zappy : '{Noop} (List Nat) -zappy = 'let map (zap -> Noop.noop (zap Nat.+ 1)) ex - --- mixing multiple abilitys in a call to `map` works fine -zappy2 : '{Noop, Noop2} (List Nat) -zappy2 = 'let - map (zap -> Noop.noop (zap Nat.+ Noop2.noop2 2 7)) ex diff --git a/unison-src/tests/mergesort.u b/unison-src/tests/mergesort.u deleted file mode 100644 index 1f46d7ba26..0000000000 --- a/unison-src/tests/mergesort.u +++ /dev/null @@ -1,26 +0,0 @@ -use Universal < - -> sort (<) [9234,23,1,3,6,2,3,51,24,1,3,55,2,1] - -halveWith : ([a] -> [a] -> b) -> [a] -> b -halveWith k a = k (take (size a / 2) a) (drop (size a / 2) a) - -sort : (a -> a -> Boolean) -> [a] -> [a] -sort lte a = - if size a < 2 then a - else halveWith (l r -> merge lte (sort lte l) (sort lte r)) a - -merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] -merge lte a b = - use List ++ - use Optional None Some - go acc a b = match at 0 a with - None -> acc ++ b - Some hd1 -> match at 0 b with - None -> acc ++ a - Some hd2 -> - if hd1 `lte` hd2 then - go (acc `snoc` hd1) (drop 1 a) b - else - go (acc `snoc` hd2) a (drop 1 b) - go [] a b diff --git a/unison-src/tests/methodical/abilities.u b/unison-src/tests/methodical/abilities.u deleted file mode 100644 index 50ed1b5c5f..0000000000 --- a/unison-src/tests/methodical/abilities.u +++ /dev/null @@ -1,18 +0,0 @@ - --- ABILITIES - -ability A where - woot : {A} Nat - -unA = cases - {a} -> a - {A.woot -> k} -> handle k 10 with unA - --- This verifies that the continuation captures local variables -a1 = handle - x = 42 - y = A.woot - x - with unA - -> a1 -- should be 42 diff --git a/unison-src/tests/methodical/abilities.ur b/unison-src/tests/methodical/abilities.ur deleted file mode 100644 index d81cc0710e..0000000000 --- a/unison-src/tests/methodical/abilities.ur +++ /dev/null @@ -1 +0,0 @@ -42 diff --git a/unison-src/tests/methodical/apply-constructor.u b/unison-src/tests/methodical/apply-constructor.u deleted file mode 100644 index a652f0cba4..0000000000 --- a/unison-src/tests/methodical/apply-constructor.u +++ /dev/null @@ -1,29 +0,0 @@ - --- Now check exact and underapply cases for constructors --- (overapply of a constructor is always a type error) - -type Woot = Woot Nat Nat Nat Nat - -toSeq : Woot -> [Nat] -toSeq = cases - Woot a b c d -> [a,b,c,d] - -use Woot Woot - -exactt = Woot 1 2 3 4 - -underapply0t = - p1 = 1 - f = Woot p1 - f 2 3 4 - -underapply1t = - p2 = 2 - f = Woot 1 p2 - f 3 4 - -underapply2t = - f = Woot 1 2 3 - f 4 - -> (toSeq exactt, toSeq underapply0t, toSeq underapply1t, toSeq underapply2t) diff --git a/unison-src/tests/methodical/apply-constructor.ur b/unison-src/tests/methodical/apply-constructor.ur deleted file mode 100644 index 9aec08492d..0000000000 --- a/unison-src/tests/methodical/apply-constructor.ur +++ /dev/null @@ -1 +0,0 @@ -([1,2,3,4], [1,2,3,4], [1,2,3,4], [1,2,3,4]) diff --git a/unison-src/tests/methodical/apply.u b/unison-src/tests/methodical/apply.u deleted file mode 100644 index 67134523ed..0000000000 --- a/unison-src/tests/methodical/apply.u +++ /dev/null @@ -1,43 +0,0 @@ --- FUNCTION APPLICATION. There are several cases to check: --- * Exact application, underapplication, overapplication --- * Closure formation (used by data types and builtins) vs specialization --- * Overapplication yielding a request - -fn p1 p2 p3 p4 = [p1, p2, p3, p4] - -exact = - p1 = 1 - p2 = 2 - fn p1 p2 3 4 - -underapply0 = - f = fn 1 - f 2 3 4 - -underapply1 = - f = fn 1 2 - f 3 4 - -underapply2 = - f = fn 1 2 3 - f 4 - -fn2 p1 p2 = - f p3 p4 = [p1, p2] ++ [p3, p4] - f - -exact1 = - f = fn2 1 2 - f 3 4 - -overapply1 = fn2 1 2 3 4 - -overapply2 = - f = fn2 1 2 3 - f 4 - -overapplyN = - id x = x - id id id id id 99 - -> (exact, underapply0, underapply1, underapply2, exact1, overapply1, overapply2, overapplyN) diff --git a/unison-src/tests/methodical/apply.ur b/unison-src/tests/methodical/apply.ur deleted file mode 100644 index fc9cf62f12..0000000000 --- a/unison-src/tests/methodical/apply.ur +++ /dev/null @@ -1 +0,0 @@ -([1,2,3,4], [1,2,3,4], [1,2,3,4], [1,2,3,4], [1,2,3,4], [1,2,3,4], [1,2,3,4], 99) diff --git a/unison-src/tests/methodical/builtin-nat-to-float.u b/unison-src/tests/methodical/builtin-nat-to-float.u deleted file mode 100644 index 4b8e15e48f..0000000000 --- a/unison-src/tests/methodical/builtin-nat-to-float.u +++ /dev/null @@ -1 +0,0 @@ -> .builtin.Nat.toFloat 4 diff --git a/unison-src/tests/methodical/builtin-nat-to-float.ur b/unison-src/tests/methodical/builtin-nat-to-float.ur deleted file mode 100644 index 5186d07068..0000000000 --- a/unison-src/tests/methodical/builtin-nat-to-float.ur +++ /dev/null @@ -1 +0,0 @@ -4.0 diff --git a/unison-src/tests/methodical/builtins.u b/unison-src/tests/methodical/builtins.u deleted file mode 100644 index 597f2d2272..0000000000 --- a/unison-src/tests/methodical/builtins.u +++ /dev/null @@ -1,14 +0,0 @@ -use Optional Some None - -> natTextRoundTrip = - Nat.fromText (Nat.toText 123) Universal.== Some 123 - -> intTextRoundTripPos = - Int.fromText (Int.toText +123) Universal.== Some +123 - -> intTextRoundTripNeg = - Int.fromText (Int.toText -123) Universal.== Some -123 - -> intFloatRoundTrip = - Float.round (Int.toFloat +123) Universal.== +123 - diff --git a/unison-src/tests/methodical/cycle-minimize.u b/unison-src/tests/methodical/cycle-minimize.u deleted file mode 100644 index fc6356e719..0000000000 --- a/unison-src/tests/methodical/cycle-minimize.u +++ /dev/null @@ -1,11 +0,0 @@ - -ability SpaceAttack where - launchMissiles : Text -> () - --- should typecheck fine, as the `launchMissiles "saturn"` --- gets moved out of the `ping` / `pong` cycle -ex x = - ping x = pong (x + 1) - launchMissiles "saturn" - pong x = ping (x `Nat.drop` 1) - launchMissiles "neptune" diff --git a/unison-src/tests/methodical/dots.u b/unison-src/tests/methodical/dots.u deleted file mode 100644 index dcd584a560..0000000000 --- a/unison-src/tests/methodical/dots.u +++ /dev/null @@ -1,28 +0,0 @@ - --- You can define an operator called dot -(.) f g x = f (g x) - -id : ∀ a. a -> a -- dot still fine in type parser -id x = x - -id2 = id . id - --- You need a space or delimiter char after the dot, --- otherwise Unison assumes it's a rooted name - this will look for --- a term called `zonk` in the root: --- --- foo = id .zonk - --- You can define qualified functions -(base.function..) f g x = f (g x) - --- looks weird, but consistent syntax with any other infix binding -object oop.syntax.. method = method object - -ex = - use base.function . - (id . id) 42 - -ex2 = use oop.syntax .; 42 . id . id - -> (ex, ex2) diff --git a/unison-src/tests/methodical/dots.ur b/unison-src/tests/methodical/dots.ur deleted file mode 100644 index 7b97e73294..0000000000 --- a/unison-src/tests/methodical/dots.ur +++ /dev/null @@ -1 +0,0 @@ -(42,42) diff --git a/unison-src/tests/methodical/empty.u b/unison-src/tests/methodical/empty.u deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/unison-src/tests/methodical/empty2.u b/unison-src/tests/methodical/empty2.u deleted file mode 100644 index 9cfa2b17cc..0000000000 --- a/unison-src/tests/methodical/empty2.u +++ /dev/null @@ -1 +0,0 @@ --- This file is empty but has a comment diff --git a/unison-src/tests/methodical/empty3.u b/unison-src/tests/methodical/empty3.u deleted file mode 100644 index 3ee1a03056..0000000000 --- a/unison-src/tests/methodical/empty3.u +++ /dev/null @@ -1,3 +0,0 @@ ---- - -This file is empty but has some stuff below the fold diff --git a/unison-src/tests/methodical/exponential.u b/unison-src/tests/methodical/exponential.u deleted file mode 100644 index d7597653a9..0000000000 --- a/unison-src/tests/methodical/exponential.u +++ /dev/null @@ -1,5 +0,0 @@ -use Float exp log logBase - -> (exp 0.0, - log (exp 1.0), - logBase 10.0 100.0) \ No newline at end of file diff --git a/unison-src/tests/methodical/exponential.ur b/unison-src/tests/methodical/exponential.ur deleted file mode 100644 index 78041679ec..0000000000 --- a/unison-src/tests/methodical/exponential.ur +++ /dev/null @@ -1,3 +0,0 @@ -(1.0, - 1.0, - 2.0) \ No newline at end of file diff --git a/unison-src/tests/methodical/float.u b/unison-src/tests/methodical/float.u deleted file mode 100644 index 5fde45c0b4..0000000000 --- a/unison-src/tests/methodical/float.u +++ /dev/null @@ -1,15 +0,0 @@ -use Float abs max min toText fromText -use Optional Some None - -withDefault : Optional a -> a -> a -withDefault opt d = match opt with - Some x -> x - None -> d - -> (abs 1.1, - abs -1.1, - max 1.1 1.5, - min 1.1 1.5, - toText 1.1, - withDefault (fromText "1.5") -1.0, - withDefault (fromText "Hello world!") -1.0) diff --git a/unison-src/tests/methodical/float.ur b/unison-src/tests/methodical/float.ur deleted file mode 100644 index 1bbbd63b9b..0000000000 --- a/unison-src/tests/methodical/float.ur +++ /dev/null @@ -1,7 +0,0 @@ -(1.1, - 1.1, - 1.5, - 1.1, - "1.1", - 1.5, - -1.0) diff --git a/unison-src/tests/methodical/hyperbolic.u b/unison-src/tests/methodical/hyperbolic.u deleted file mode 100644 index 4d5850ad20..0000000000 --- a/unison-src/tests/methodical/hyperbolic.u +++ /dev/null @@ -1,8 +0,0 @@ -use Float acosh asinh atanh cosh sinh tanh - -> (acosh 1.0, - asinh 0.0, - atanh 0.0, - cosh 0.0, - sinh 0.0, - tanh 0.0) \ No newline at end of file diff --git a/unison-src/tests/methodical/hyperbolic.ur b/unison-src/tests/methodical/hyperbolic.ur deleted file mode 100644 index 4556da9ce8..0000000000 --- a/unison-src/tests/methodical/hyperbolic.ur +++ /dev/null @@ -1,6 +0,0 @@ -(0.0, - 0.0, - 0.0, - 1.0, - 0.0, - 0.0) \ No newline at end of file diff --git a/unison-src/tests/methodical/int.u b/unison-src/tests/methodical/int.u deleted file mode 100644 index 8fe2cb90c5..0000000000 --- a/unison-src/tests/methodical/int.u +++ /dev/null @@ -1,24 +0,0 @@ -use Int increment isEven isOdd signum negate mod pow shiftLeft shiftRight truncate0 toText fromText toFloat leadingZeros trailingZeros and or xor complement - -withDefault : Optional a -> a -> a -withDefault opt d = match opt with - Some x -> x - None -> d - -> (increment +3, - isEven +3, - isOdd +3, - signum +3, - negate +3, - mod +10 +3, - pow +10 3, - shiftLeft +7 2, - shiftRight +7 2, - truncate0 +3, - truncate0 -3, - withDefault (fromText "3x") -1, - withDefault (fromText "+3") -1, - leadingZeros +0, - leadingZeros +1, - leadingZeros +8, - toFloat +3) \ No newline at end of file diff --git a/unison-src/tests/methodical/int.ur b/unison-src/tests/methodical/int.ur deleted file mode 100644 index 7b7bd0c1d9..0000000000 --- a/unison-src/tests/methodical/int.ur +++ /dev/null @@ -1,17 +0,0 @@ -(+4, - false, - true, - +1, - -3, - +1, - +1000, - +28, - +1, - 3, - 0, - -1, - +3, - +64, - +63, - +60, - 3.0) diff --git a/unison-src/tests/methodical/let.u b/unison-src/tests/methodical/let.u deleted file mode 100644 index 97a9690226..0000000000 --- a/unison-src/tests/methodical/let.u +++ /dev/null @@ -1,12 +0,0 @@ - --- LET -c0 = - a = 1000 - b = 100 - c = 10 - d = 1 - [a + b + c + d, b + c + d, c + d, d] - --- Make sure we can push values onto the stack and reference them as expected -> c0 - diff --git a/unison-src/tests/methodical/let.ur b/unison-src/tests/methodical/let.ur deleted file mode 100644 index ad1a2189f6..0000000000 --- a/unison-src/tests/methodical/let.ur +++ /dev/null @@ -1 +0,0 @@ -[1111,111,11,1] diff --git a/unison-src/tests/methodical/literals.u b/unison-src/tests/methodical/literals.u deleted file mode 100644 index f8cb3a2945..0000000000 --- a/unison-src/tests/methodical/literals.u +++ /dev/null @@ -1,12 +0,0 @@ - --- LITERALS -ln = 10 -li = +10 -lf = 10.0 -lt = "text" -lc = ?ひ -lb = true -sn = [1,2,3,4,5,6,ln] - --- Make sure we can compile all literals -> (ln, li, lf, lt, lc, lb, sn) diff --git a/unison-src/tests/methodical/literals.ur b/unison-src/tests/methodical/literals.ur deleted file mode 100644 index af9c7335a3..0000000000 --- a/unison-src/tests/methodical/literals.ur +++ /dev/null @@ -1 +0,0 @@ -(10, +10, 10.0, "text", ?ひ, true, [1,2,3,4,5,6,10]) diff --git a/unison-src/tests/methodical/loop.u b/unison-src/tests/methodical/loop.u deleted file mode 100644 index 7d5a2484f4..0000000000 --- a/unison-src/tests/methodical/loop.u +++ /dev/null @@ -1,8 +0,0 @@ - -use Universal == - -loop acc n = - if n == 0 then acc - else loop (acc + n) (n `drop` 1) - -> loop 0 10000 diff --git a/unison-src/tests/methodical/nat.u b/unison-src/tests/methodical/nat.u deleted file mode 100644 index baa4a3c95f..0000000000 --- a/unison-src/tests/methodical/nat.u +++ /dev/null @@ -1,31 +0,0 @@ -use Nat drop fromText increment isEven isOdd mod pow shiftLeft shiftRight sub toFloat toInt toText trailingZeros leadingZeros and or xor complement - -withDefault : Optional a -> a -> a -withDefault opt d = match opt with - Some x -> x - None -> d - -> (withDefault (fromText "3") 0, - drop 3 2, - increment 3, - isEven 3, - isOdd 3, - mod 10 3, - pow 10 3, - shiftLeft 7 2, - shiftRight 7 2, - trailingZeros 0, - leadingZeros 1, - leadingZeros 8, - trailingZeros 0, - trailingZeros 1, - trailingZeros 8, - 7 `and` 10, - 7 `or` 10, - 7 `xor` 14, - complement 0, - sub 3 2, - toFloat 3, - toInt 3, - toText 3) - \ No newline at end of file diff --git a/unison-src/tests/methodical/nat.ur b/unison-src/tests/methodical/nat.ur deleted file mode 100644 index 77e2ca1f9c..0000000000 --- a/unison-src/tests/methodical/nat.ur +++ /dev/null @@ -1,23 +0,0 @@ -(3, - 1, - 4, - false, - true, - 1, - 1000, - 28, - 1, - 64, - 63, - 60, - 64, - 0, - 3, - 2, - 15, - 9, - 18446744073709551615, - +1, - 3.0, - +3, - "3") diff --git a/unison-src/tests/methodical/overapply-ability.u b/unison-src/tests/methodical/overapply-ability.u deleted file mode 100644 index 539871c4f4..0000000000 --- a/unison-src/tests/methodical/overapply-ability.u +++ /dev/null @@ -1,47 +0,0 @@ - --- A corner case in the runtime is when a function is being overapplied and --- the exactly applied function requests an ability (and returns a new function) - -ability Zing where - zing : Nat -> {Zing} (Nat -> Nat) - zing2 : Nat -> Nat ->{Zing} (Nat -> Nat -> [Nat]) - -unzing = cases - {a} -> a - {Zing.zing n -> k} -> handle k (x -> x `drop` n) with unzing - {Zing.zing2 n1 n2 -> k} -> handle k (n3 n4 -> [n1, n2, n3, n4]) with unzing - -exacth = handle - f = Zing.zing 3 - f 20 + 1 - with unzing - -overapplyh = handle - Zing.zing 3 20 + 1 - with unzing - --- SEQUENCES with abilities - -sequence1 = handle [Zing.zing 1 4] with unzing -sequence2 = handle [Zing.zing 1 4, Zing.zing 1 4] with unzing -sequence3 = handle [Zing.zing 1 4, Zing.zing 2 4, Zing.zing 3 4, Zing.zing 4 4] with unzing - --- Overapply of requests - -overapplyh2 = handle Zing.zing2 1 2 3 4 with unzing - -overapplyh3a = handle Zing.zing2 1 2 3 4 ++ [5] with unzing - -overapplyh3b = handle Zing.zing2 1 2 3 4 ++ [5, Zing.zing 2 8] with unzing - -overapplyh3c = handle Zing.zing2 1 2 3 4 ++ [5, Zing.zing 2 7 + 1] with unzing - -> (exacth, - overapplyh, - sequence1, - sequence2, - sequence3, - overapplyh2, - overapplyh3a, - overapplyh3b, - overapplyh3c) diff --git a/unison-src/tests/methodical/overapply-ability.ur b/unison-src/tests/methodical/overapply-ability.ur deleted file mode 100644 index 879c57ffb3..0000000000 --- a/unison-src/tests/methodical/overapply-ability.ur +++ /dev/null @@ -1 +0,0 @@ -(18, 18, [3], [3, 3], [3, 2, 1, 0], [1,2,3,4], [1,2,3,4,5], [1,2,3,4,5,6], [1,2,3,4,5,6]) diff --git a/unison-src/tests/methodical/parens.u b/unison-src/tests/methodical/parens.u deleted file mode 100644 index d99181ac07..0000000000 --- a/unison-src/tests/methodical/parens.u +++ /dev/null @@ -1,27 +0,0 @@ -ex1 = '(let - use List ++ - [1] ++ [3]) - -ex2 = '(let - use List ++ - [1] ++ [3] -) - -ex3 = '(let - use List ++ - [1] ++ [3] - ) - -ex4 = '( - let - use List ++ - [1] ++ [3]; [4] -) - -ex5 = '( - let - use List ++ - [1] ++ [3] - ) - -> (ex1, ex2, ex3, ex4, ex5) diff --git a/unison-src/tests/methodical/pattern-matching.u b/unison-src/tests/methodical/pattern-matching.u deleted file mode 100644 index 4feedfacf2..0000000000 --- a/unison-src/tests/methodical/pattern-matching.u +++ /dev/null @@ -1,28 +0,0 @@ - -use Universal == - --- PATTERN MATCHING - -pat1 x y p = match p with x0 -> (x0, x, y, p) - -pat2 x y p = match p with _ -> (x, y, p) - -pat3 x y = cases (x, y) -> (y, x) - -pat4 x y = cases (p1, _) -> (x, y, p1) - -pat5 x y = cases (_, p2) -> (x, y, p2) - -pat6 x y = cases (p1, _) -> (x + y : Nat, p1) - -pat7 x y = cases - (p1, _) | p1 == 9 -> (x + y : Nat, p1) - (p1, _) | true -> (0, p1) - -> (pat1 0 1 (2, 3), - pat2 0 1 "hi", - pat3 0 1 (2, 3), - pat4 0 1 (2, 3), - pat5 0 1 (3, 2), - pat6 1 2 (3, 4), - pat7 1 2 (20, 10)) diff --git a/unison-src/tests/methodical/pattern-matching.ur b/unison-src/tests/methodical/pattern-matching.ur deleted file mode 100644 index 6782995807..0000000000 --- a/unison-src/tests/methodical/pattern-matching.ur +++ /dev/null @@ -1,7 +0,0 @@ -(((2,3), 0, 1, (2,3)), - (0, 1, "hi"), - (3, 2), - (0, 1, 2), - (0, 1, 2), - (3, 3), - (0, 20)) diff --git a/unison-src/tests/methodical/power.u b/unison-src/tests/methodical/power.u deleted file mode 100644 index 055298e622..0000000000 --- a/unison-src/tests/methodical/power.u +++ /dev/null @@ -1,4 +0,0 @@ -use Float pow sqrt - -> (pow 10.0 2.0, - sqrt 4.0) \ No newline at end of file diff --git a/unison-src/tests/methodical/power.ur b/unison-src/tests/methodical/power.ur deleted file mode 100644 index b25571e9c5..0000000000 --- a/unison-src/tests/methodical/power.ur +++ /dev/null @@ -1,2 +0,0 @@ -(100.0, - 2.0 \ No newline at end of file diff --git a/unison-src/tests/methodical/rank2.u b/unison-src/tests/methodical/rank2.u deleted file mode 100644 index 6afe1e6ed8..0000000000 --- a/unison-src/tests/methodical/rank2.u +++ /dev/null @@ -1,9 +0,0 @@ - --- This should typecheck, since `∀ a . a -> a` can be passed in place of --- a `Nat -> Nat`. Verifies that subtyping of `->` is contravariant in the --- input types. -rank2b : (∀ a . a -> a) -> Nat -rank2b = - inner : (Nat -> Nat) -> Nat - inner f = 42 - inner diff --git a/unison-src/tests/methodical/rounding.u b/unison-src/tests/methodical/rounding.u deleted file mode 100644 index ac0023f875..0000000000 --- a/unison-src/tests/methodical/rounding.u +++ /dev/null @@ -1,8 +0,0 @@ -use Float ceiling floor round truncate - -> (ceiling 1.1, - floor 1.7, - round 1.1, - round 1.7, - truncate 1.1, - truncate -1.1) \ No newline at end of file diff --git a/unison-src/tests/methodical/rounding.ur b/unison-src/tests/methodical/rounding.ur deleted file mode 100644 index 14a1c53a4d..0000000000 --- a/unison-src/tests/methodical/rounding.ur +++ /dev/null @@ -1,6 +0,0 @@ -(+2, - +1, - +1, - +2, - +1, - -1) diff --git a/unison-src/tests/methodical/scopedtypevars.u b/unison-src/tests/methodical/scopedtypevars.u deleted file mode 100644 index aaf8904bde..0000000000 --- a/unison-src/tests/methodical/scopedtypevars.u +++ /dev/null @@ -1,29 +0,0 @@ - -ex1 : x -> y -> x -ex1 a b = - temp : x -- refers to the variable in the outer scope - temp = a - a - -ex2 : x -> y -> x -ex2 a b = - id : ∀ x . x -> x -- doesn't refer the variable in outer scope - id x = x - id 42 - id a - -merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] -merge lte a b = - use List ++ drop at snoc - use Optional None Some - go : [a] -> [a] -> [a] -> [a] -- refers to the outer `a` type - go acc a b = match at 0 a with - None -> acc ++ b - Some hd1 -> match at 0 b with - None -> acc ++ a - Some hd2 -> - if hd1 `lte` hd2 then - go (acc `snoc` hd1) (drop 1 a) b - else - go (acc `snoc` hd2) a (drop 1 b) - go [] a b diff --git a/unison-src/tests/methodical/semis.u b/unison-src/tests/methodical/semis.u deleted file mode 100644 index 5853665f8f..0000000000 --- a/unison-src/tests/methodical/semis.u +++ /dev/null @@ -1,13 +0,0 @@ - --- explicit semicolons allowed inside the block parser --- no need for a space either before or after -x = 0; 1 -y = 0;1 -z = x;y -(**) = x ; y -a = (**);(**);z -p = 1;-- comments are okay after a semi - 1 -- and you can put a semi between lines, - -- even if a virtual semi would have been emitted - -> (x,y,z,(**),a,p) diff --git a/unison-src/tests/methodical/semis.ur b/unison-src/tests/methodical/semis.ur deleted file mode 100644 index 4573fd6ebc..0000000000 --- a/unison-src/tests/methodical/semis.ur +++ /dev/null @@ -1 +0,0 @@ -(1,1,1,1,1,1) diff --git a/unison-src/tests/methodical/trig.u b/unison-src/tests/methodical/trig.u deleted file mode 100644 index a7461ab36c..0000000000 --- a/unison-src/tests/methodical/trig.u +++ /dev/null @@ -1,9 +0,0 @@ -use Float acos asin atan atan2 cos sin tan - -> (cos 0.0, - sin 0.0, - tan 0.0, - acos (cos 0.0), - asin (sin 0.0), - atan (tan 0.0), - atan2 0.0 0.0) diff --git a/unison-src/tests/methodical/trig.ur b/unison-src/tests/methodical/trig.ur deleted file mode 100644 index 10b3c403d4..0000000000 --- a/unison-src/tests/methodical/trig.ur +++ /dev/null @@ -1,7 +0,0 @@ -(1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0) \ No newline at end of file diff --git a/unison-src/tests/methodical/universals.u b/unison-src/tests/methodical/universals.u deleted file mode 100644 index 29d5cb117e..0000000000 --- a/unison-src/tests/methodical/universals.u +++ /dev/null @@ -1,20 +0,0 @@ -use Universal == < > <= >= compare - -> ([1,2,3] `compare` [1,2,3], - [1,2,3] `compare` [1], - [1] `compare` [1,2,3], - ?a `compare` ?b, - ("hi", "there") == ("hi", "there"), - 1 < 1, - 1 < 2, - 2 < 1, - 1 <= 2, - 1 <= 1, - 2 <= 1, - 0 > 1, - 1 > 0, - 0 > 0, - 0 >= 0, - 1 >= 0, - 0 >= 1) - diff --git a/unison-src/tests/methodical/universals.ur b/unison-src/tests/methodical/universals.ur deleted file mode 100644 index 6b7d581ce3..0000000000 --- a/unison-src/tests/methodical/universals.ur +++ /dev/null @@ -1,17 +0,0 @@ -(+0, - +1, - -1, - -1, - true, - false, - true, - false, - true, - true, - false, - false, - true, - false, - true, - true, - false) diff --git a/unison-src/tests/methodical/wildcardimports.u b/unison-src/tests/methodical/wildcardimports.u deleted file mode 100644 index 4882ed8e79..0000000000 --- a/unison-src/tests/methodical/wildcardimports.u +++ /dev/null @@ -1,6 +0,0 @@ - -use Text - --- note: this `drop` call would be ambiguous normally (`Bytes.drop`, `Nat.drop`...) --- but the wildcard import of `Text` brings it into scope -foo x = drop 10 x diff --git a/unison-src/tests/multiple-effects.u b/unison-src/tests/multiple-effects.u deleted file mode 100644 index a10e82736b..0000000000 --- a/unison-src/tests/multiple-effects.u +++ /dev/null @@ -1,17 +0,0 @@ -ability State s where - get : {State s} s - set : s -> {State s} () - -ability Console where - read : {Console} (Optional Text) - write : Text -> {Console} () - -namespace Console where - state : s -> Request (State s) a -> a - state s = cases - {State.get -> k} -> handle k s with state s - {State.set s' -> k} -> handle k () with state s' - {a} -> a - -multiHandler : s -> [w] -> Nat -> Request {State s, Console} a -> () -multiHandler _ _ _ _ = () diff --git a/unison-src/tests/one-liners.uu b/unison-src/tests/one-liners.uu deleted file mode 100644 index e8ee998e18..0000000000 --- a/unison-src/tests/one-liners.uu +++ /dev/null @@ -1,2 +0,0 @@ -(if true then 1 else 2) : Nat -(if true then (x -> x) else (x -> x) : forall a . a -> a) diff --git a/unison-src/tests/parenthesized-blocks.u b/unison-src/tests/parenthesized-blocks.u deleted file mode 100644 index 5824dbbec0..0000000000 --- a/unison-src/tests/parenthesized-blocks.u +++ /dev/null @@ -1,5 +0,0 @@ - -x = (if true then 1 else 0) + 1 -y = (match 1 with 1 -> 1) + 1 - -> (x, y) diff --git a/unison-src/tests/parenthesized-blocks.ur b/unison-src/tests/parenthesized-blocks.ur deleted file mode 100644 index 529bf66567..0000000000 --- a/unison-src/tests/parenthesized-blocks.ur +++ /dev/null @@ -1 +0,0 @@ -(2, 2) diff --git a/unison-src/tests/pattern-match-seq.u b/unison-src/tests/pattern-match-seq.u deleted file mode 100644 index 58c0a04840..0000000000 --- a/unison-src/tests/pattern-match-seq.u +++ /dev/null @@ -1,86 +0,0 @@ -use Optional None Some - -optionToList : Optional a -> [a] -optionToList = cases - Some a -> [a] - None -> [] - -lenLit : [a] -> Nat -lenLit = cases - [] -> 0 - [_] -> 1 - [_, _] -> 2 - [_, _, _] -> 3 - -lenCons : [a] -> Nat -lenCons = cases - [] -> 0 - _ +: t -> 1 + lenCons t - _ +: (_ +: t) -> 2 + lenCons t - -lenSnoc : [a] -> Nat -lenSnoc = cases - [] -> 0 - t :+ _ -> 1 + lenSnoc t - -lenConcat1 : [a] -> Nat -lenConcat1 = cases - [] -> 0 - [_] ++ tail -> 1 + lenConcat1 tail - -lenConcat2 : [a] -> Nat -lenConcat2 = cases - [] -> 0 - prefix ++ [_] -> 1 + lenConcat2 prefix - -head : [a] -> Optional a -head = cases - h +: _ -> Some h - _ -> None - -firstTwo : [a] -> Optional (a, a) -firstTwo = cases - x +: (y +: _) -> Some (x, y) - _ -> None - -lastTwo : [a] -> Optional (a, a) -lastTwo = cases - _ :+ x :+ y -> Some (x, y) - _ -> None - -middle : [a] -> Optional [a] -middle = cases - [_] ++ m ++ [_] -> Some m - _ -> None - -middleNel : [a] -> Optional (a, [a]) -middleNel = cases - [_] ++ (h +: t) ++ [_] -> Some (h, t) - _ -> None - -splitAtFour : [a] -> ([a], [a]) -splitAtFour l = match l with - [a] ++ x@(b +: (c +: y@([] :+ d))) ++ tail -> ([a, b, c, d], tail) - _ -> (l, []) - -> ( - lenLit [1, 2, 3], - lenCons [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - lenSnoc [1, 2, 3, 4, 5, 6, 7, 8], - lenConcat1 [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], - lenConcat2 [1, 2, 3, 4, 5], - optionToList (head []), - optionToList (head [1, 2, 3]), - optionToList (firstTwo []), - optionToList (firstTwo [1]), - optionToList (firstTwo [1, 2, 3]), - optionToList (lastTwo []), - optionToList (lastTwo [1]), - optionToList (lastTwo [1, 2, 3]), - optionToList (middle []), - optionToList (middle [1, 2]), - optionToList (middle [1, 2, 3, 4, 5, 6]), - optionToList (middleNel []), - optionToList (middleNel [1, 2]), - optionToList (middleNel [1, 2, 3, 4, 5, 6]), - splitAtFour [1, 2, 3, 4, 5, 6, 7]) diff --git a/unison-src/tests/pattern-match-seq.ur b/unison-src/tests/pattern-match-seq.ur deleted file mode 100644 index 323ca8182f..0000000000 --- a/unison-src/tests/pattern-match-seq.ur +++ /dev/null @@ -1,20 +0,0 @@ -( 3, - 15, - 8, - 11, - 5, - [], - [1], - [], - [], - [(1, 2)], - [], - [], - [(2, 3)], - [], - [[]], - [[2, 3, 4, 5]], - [], - [], - [(2, [3, 4, 5])], - ([1, 2, 3, 4], [5, 6, 7]) ) diff --git a/unison-src/tests/pattern-matching.u b/unison-src/tests/pattern-matching.u deleted file mode 100644 index a1403ac474..0000000000 --- a/unison-src/tests/pattern-matching.u +++ /dev/null @@ -1,36 +0,0 @@ -type Foo0 = Foo0 -type Foo1 a = Foo1 a -type Foo2 a b = Foo2 a b -type Foo3 a b c = Foo3 a b c -type List a = Nil | Cons a (List a) - -use Foo0 Foo0 -use Foo1 Foo1 -use Foo2 Foo2 - -x = match Foo0 with - Foo0 -> 1 - -y = match Foo1 1 with - Foo1 1 -> 0 - Foo1 _ -> 10 - -z = match Foo2 1 "hi" with - Foo2 x _ -> x - Foo2 1 _ -> 1 - -w = match Foo3.Foo3 1 2 "bye" with - Foo3.Foo3 1 2 x -> x Text.++ "bye" - _ -> "" - -w2 = cases - Foo3.Foo3 1 4 x -> x Text.++ "bye" - Foo3.Foo3 x y z -> z Text.++ z - _ -> "hi" - -len : List a -> Nat -len = cases - List.Nil -> 0 - List.Cons _ t -> len t + 1 - -> (w, w2, len) diff --git a/unison-src/tests/pattern-matching2.u b/unison-src/tests/pattern-matching2.u deleted file mode 100644 index 7bd1bf069b..0000000000 --- a/unison-src/tests/pattern-matching2.u +++ /dev/null @@ -1,21 +0,0 @@ -type Foo0 = Foo0 -type Foo1 a = Foo1 a -type Foo2 a b = Foo2 a b -type Foo3 a b c = Foo3 a b c - -use Foo0 Foo0 -use Foo1 Foo1 -use Foo2 Foo2 - -x = match Foo0 with - Foo0 -> 1 - -y = match Foo1 1 with - Foo1 1 -> 0 - Foo1 _ -> 10 - -z = match Foo2 1 "hi" with - Foo2 x "bye" -> x - Foo2 1 "hi" -> 1 - -> z diff --git a/unison-src/tests/pattern-typing-bug.u b/unison-src/tests/pattern-typing-bug.u deleted file mode 100644 index 5ac1d44814..0000000000 --- a/unison-src/tests/pattern-typing-bug.u +++ /dev/null @@ -1,9 +0,0 @@ -type Value = String Text - | Bool Boolean - -f : Value -> Nat -f = cases - Value.Bool true -> 3 - _ -> 4 - -> f (Value.String "foo") diff --git a/unison-src/tests/pattern-typing-bug.ur b/unison-src/tests/pattern-typing-bug.ur deleted file mode 100644 index b8626c4cff..0000000000 --- a/unison-src/tests/pattern-typing-bug.ur +++ /dev/null @@ -1 +0,0 @@ -4 diff --git a/unison-src/tests/pattern-weirdness.u b/unison-src/tests/pattern-weirdness.u deleted file mode 100644 index e139078013..0000000000 --- a/unison-src/tests/pattern-weirdness.u +++ /dev/null @@ -1,16 +0,0 @@ -go = - a = 1 - match "" with - pos | false -> a - _ -> a - -> go - --- 7 | > go --- ⧩ --- "" - --- should be 1, not "" - --- seems to have something to do with the wildcard + guard, as changing or --- or deleting that line makes the problem go away diff --git a/unison-src/tests/pattern-weirdness.ur b/unison-src/tests/pattern-weirdness.ur deleted file mode 100644 index d00491fd7e..0000000000 --- a/unison-src/tests/pattern-weirdness.ur +++ /dev/null @@ -1 +0,0 @@ -1 diff --git a/unison-src/tests/quote-parse-bug.uu b/unison-src/tests/quote-parse-bug.uu deleted file mode 100644 index 5b7935fa38..0000000000 --- a/unison-src/tests/quote-parse-bug.uu +++ /dev/null @@ -1,6 +0,0 @@ - -blah : a -> a -> a -blah a a2 = a2 - -> blah '"hi" '"arya" - diff --git a/unison-src/tests/r0.u b/unison-src/tests/r0.u deleted file mode 100644 index c878aa490a..0000000000 --- a/unison-src/tests/r0.u +++ /dev/null @@ -1,5 +0,0 @@ -r0 : Nat -r0 = match Optional.Some 3 with - x -> 1 - -> r0 diff --git a/unison-src/tests/r1.u b/unison-src/tests/r1.u deleted file mode 100644 index 855e2d2bf1..0000000000 --- a/unison-src/tests/r1.u +++ /dev/null @@ -1,6 +0,0 @@ ---r1 -type Optional a = None | Some a -r1 : Nat -r1 = match Optional.Some 3 with - x -> 1 - diff --git a/unison-src/tests/r10.u b/unison-src/tests/r10.u deleted file mode 100644 index 6cc2d5a6ce..0000000000 --- a/unison-src/tests/r10.u +++ /dev/null @@ -1,5 +0,0 @@ -r10 : Nat -r10 = match 1 with - 1 | true -> 3 - _ -> 4 - diff --git a/unison-src/tests/r11.u b/unison-src/tests/r11.u deleted file mode 100644 index 0e94ea072c..0000000000 --- a/unison-src/tests/r11.u +++ /dev/null @@ -1,7 +0,0 @@ -use Universal == - -r11 : Nat -r11 = match 1 with - 1 | 2 == 3 -> 4 - _ -> 5 - diff --git a/unison-src/tests/r12.u b/unison-src/tests/r12.u deleted file mode 100644 index 6c3a0de852..0000000000 --- a/unison-src/tests/r12.u +++ /dev/null @@ -1,4 +0,0 @@ -r12 : Nat -r12 = (x -> x) 64 - - diff --git a/unison-src/tests/r13.u b/unison-src/tests/r13.u deleted file mode 100644 index f44048ea00..0000000000 --- a/unison-src/tests/r13.u +++ /dev/null @@ -1,5 +0,0 @@ -r13 : (Nat, Text) -r13 = - id = ((x -> x): forall a. a -> a) - (id 10, id "foo") - diff --git a/unison-src/tests/r14.u b/unison-src/tests/r14.u deleted file mode 100644 index 3922c6613a..0000000000 --- a/unison-src/tests/r14.u +++ /dev/null @@ -1,4 +0,0 @@ -r14 : (forall a . a -> a) -> (Nat, Text) -r14 id = (id 10, id "foo") - - diff --git a/unison-src/tests/r2.u b/unison-src/tests/r2.u deleted file mode 100644 index a3b925bc1e..0000000000 --- a/unison-src/tests/r2.u +++ /dev/null @@ -1,6 +0,0 @@ -type Optional a = None | Some a -r2 : Nat -r2 = match Optional.Some true with - Optional.Some true -> 1 - Optional.Some false -> 0 - diff --git a/unison-src/tests/r3.u b/unison-src/tests/r3.u deleted file mode 100644 index 74b76105f8..0000000000 --- a/unison-src/tests/r3.u +++ /dev/null @@ -1,6 +0,0 @@ -r3 : Nat -r3 = match Optional.Some true with - Optional.Some true -> 1 - Optional.Some false -> 0 - - diff --git a/unison-src/tests/r4negate.u b/unison-src/tests/r4negate.u deleted file mode 100644 index ea19a3b4bb..0000000000 --- a/unison-src/tests/r4negate.u +++ /dev/null @@ -1,5 +0,0 @@ -r4 : Int -> Int -r4 x = match x with - +1 -> -1 - _ -> Int.negate x - diff --git a/unison-src/tests/r4x.u b/unison-src/tests/r4x.u deleted file mode 100644 index 1e7123f6ec..0000000000 --- a/unison-src/tests/r4x.u +++ /dev/null @@ -1,3 +0,0 @@ -r4 : Int -> Int -r4 = cases - +1 -> +1 diff --git a/unison-src/tests/r5.u b/unison-src/tests/r5.u deleted file mode 100644 index 249bf9e034..0000000000 --- a/unison-src/tests/r5.u +++ /dev/null @@ -1,6 +0,0 @@ -r5 : Float -r5 = match 2.2 with - 2.2 -> 3.0 - _ -> 1.0 - - diff --git a/unison-src/tests/r6.u b/unison-src/tests/r6.u deleted file mode 100644 index 34a3ab4224..0000000000 --- a/unison-src/tests/r6.u +++ /dev/null @@ -1,4 +0,0 @@ -r6 : () -r6 = match () with - () -> () - diff --git a/unison-src/tests/r7.0.u b/unison-src/tests/r7.0.u deleted file mode 100644 index fe8fbbdb34..0000000000 --- a/unison-src/tests/r7.0.u +++ /dev/null @@ -1,6 +0,0 @@ -r7 : Nat -r7 = match () with - () -> 1 - -> r7 - diff --git a/unison-src/tests/r7.1.u b/unison-src/tests/r7.1.u deleted file mode 100644 index 5f1eab0958..0000000000 --- a/unison-src/tests/r7.1.u +++ /dev/null @@ -1,5 +0,0 @@ -r7 : Nat -r7 = match () with - x@() -> 1 - - diff --git a/unison-src/tests/r7.2.u b/unison-src/tests/r7.2.u deleted file mode 100644 index 5ca723ffc1..0000000000 --- a/unison-src/tests/r7.2.u +++ /dev/null @@ -1,4 +0,0 @@ -r7 : () -r7 = match () with - x@() -> x - diff --git a/unison-src/tests/r8.u b/unison-src/tests/r8.u deleted file mode 100644 index f745983558..0000000000 --- a/unison-src/tests/r8.u +++ /dev/null @@ -1,5 +0,0 @@ -r8 = match (1,(2,(3,(4,(5,(6,(7,8))))))) with - (x,(y,(_,_))) -> 0 - -> r8 - diff --git a/unison-src/tests/r9.u b/unison-src/tests/r9.u deleted file mode 100644 index e274d714d1..0000000000 --- a/unison-src/tests/r9.u +++ /dev/null @@ -1,11 +0,0 @@ -r9 : Nat -r9 = match 1 with - 9 -> 9 - 8 -> 8 - 7 -> 7 - 6 -> 6 - 5 -> 5 - _ -> 1 - -> r9 - diff --git a/unison-src/tests/rainbow.u b/unison-src/tests/rainbow.u deleted file mode 100644 index 378118d1d5..0000000000 --- a/unison-src/tests/rainbow.u +++ /dev/null @@ -1,32 +0,0 @@ --- Hits all the syntactic elements listed in SyntaxHighlights.hs. --- Use the 'view' command to see this in colour. - -rainbow : Int ->{Ask Int} Int -rainbow x = - use Int isEven - number = 3 - text = "hello" - float = 3.14 - bool = false - lam z = - use Nat * + - z + 1 * 2 - seq = [1, 2, 3] - delay : '(Int -> Boolean) - delay _ = isEven - force = !delay +2 - a = if isEven x then Either.Left 0 else Either.Right 0 - b = if isEven x then 1 else 0 - c = match x with _ -> 3 - d = (Ask.ask : Int) - +42 - -ability Ask a where - ask : {Ask a} a - -type Either a b = Left a | Right b - -unique ability Zang where - zang : {Zang} Nat - -> () diff --git a/unison-src/tests/records.u b/unison-src/tests/records.u deleted file mode 100644 index 2528896a65..0000000000 --- a/unison-src/tests/records.u +++ /dev/null @@ -1,12 +0,0 @@ - -type Point x y = { x : x, y : y } - -type Point2 = { point2 : Nat, f : Nat } - -type Monoid a = { zero : a, plus : a -> a -> a } - -> Point.x.set 10 (Point 0 0) -> Point.x (Point 10 0) -> Point.y (Point 0 10) -> Point.x.modify ((+) 1) (Point 0 0) -> Point.y.modify ((+) 1) (Point 0 0) diff --git a/unison-src/tests/runtime-crash.uu b/unison-src/tests/runtime-crash.uu deleted file mode 100644 index dc5ba83a32..0000000000 --- a/unison-src/tests/runtime-crash.uu +++ /dev/null @@ -1,13 +0,0 @@ - -drop1 = Text.drop 1 - -> drop1 "heyo" - ---- - -gives a runtime error - - -unison: user error (type error, expecting N, got "heyo") - -which indicates it is going to the runtime stack to get that `1` value, -rather than pulling it from the arguments diff --git a/unison-src/tests/sequence-at-0.u b/unison-src/tests/sequence-at-0.u deleted file mode 100644 index 37f6429351..0000000000 --- a/unison-src/tests/sequence-at-0.u +++ /dev/null @@ -1,2 +0,0 @@ -> match at 0 [100] with - Optional.Some _ -> "Hooray!" diff --git a/unison-src/tests/sequence-literal-argument-parsing.u b/unison-src/tests/sequence-literal-argument-parsing.u deleted file mode 100644 index 8005a67566..0000000000 --- a/unison-src/tests/sequence-literal-argument-parsing.u +++ /dev/null @@ -1,5 +0,0 @@ -type X a = X [a] - -f : X a -> a -f = cases - X.X [b] -> b diff --git a/unison-src/tests/sequence-literal.u b/unison-src/tests/sequence-literal.u deleted file mode 100644 index f2d43795cd..0000000000 --- a/unison-src/tests/sequence-literal.u +++ /dev/null @@ -1,19 +0,0 @@ -a = [1,2,3] -b = [1 ,2 ,3 - ] -c = [ 1 , 2 , 3 ] -d = [ 1 - , 2 - , 3 ] -e = [ 1 - , 2 - , 3 - ] -f = - [ 1 - , 2 - , 3 - ] -g = [ 1 - , 2, - 3 ] diff --git a/unison-src/tests/soe.u b/unison-src/tests/soe.u deleted file mode 100644 index 0acf142239..0000000000 --- a/unison-src/tests/soe.u +++ /dev/null @@ -1,124 +0,0 @@ - -use Universal == < - -type Future a = Future ('{Remote} a) - --- A simple distributed computation ability -ability Remote where - - -- Spawn a new node - spawn : {Remote} Node - - -- Sequentially evaluate the given thunk on another node - -- then return to the current node when it completes - at : n -> '{Remote} a -> {Remote} a - - -- Start a computation running, returning an `r` that can be forced to - -- await the result of the computation - fork : '{Remote} a ->{Remote} Future a - -type Node = Node Nat -- more realistic would be perhaps a (Hostname, PublicKey) pair - -force : Future a ->{Remote} a -force = cases Future.Future r -> !r - --- Let's test out this beast! do we need to deploy our code to some EC2 instances?? --- Gak, no not yet, we just want to test locally, let's write a handler --- for the `Remote` ability that simulates everything locally! - -Remote.runLocal : '{Remote} a -> a -Remote.runLocal r = - use Future Future - step nid = cases - {a} -> a - {Remote.fork t -> k} -> handle k (Future t) with step nid - {Remote.spawn -> k} -> handle k nid with step (Node.increment nid) - {Remote.at _ t -> k} -> handle k !t with step nid - handle !r with step (Node.Node 0) - -Remote.forkAt : Node -> '{Remote} a ->{Remote} (Future a) -Remote.forkAt node r = Remote.fork '(Remote.at node r) - -use Optional None Some -use Monoid Monoid -use List ++ - -List.map : (a ->{e} b) -> [a] ->{e} [b] -List.map f as = - go f acc as i = match List.at i as with - None -> acc - Some a -> go f (acc `snoc` f a) as (i + 1) - go f [] as 0 - -type Monoid a = Monoid (a -> a -> a) a - -Monoid.zero = cases Monoid.Monoid op z -> z -Monoid.op = cases Monoid.Monoid op z -> op - -Monoid.orElse m = cases - None -> Monoid.zero m - Some a -> a - -merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] -merge lte a b = - go acc a b = match List.at 0 a with - None -> acc ++ b - Some hd1 -> match at 0 b with - None -> acc ++ a - Some hd2 -> - if hd1 `lte` hd2 then go (acc `snoc` hd1) (drop 1 a) b - else go (acc `snoc` hd2) a (drop 1 b) - go [] a b - -dmap : (a ->{Remote} b) -> [a] ->{Remote} [b] -dmap f as = - bs = List.map (a -> Remote.forkAt Remote.spawn '(f a)) as - List.map force bs - -dreduce : Monoid a -> [a] ->{Remote} a -dreduce m a = - if size a < 2 then Monoid.orElse m (List.at 0 a) - else - l = Remote.forkAt Remote.spawn '(dreduce m (take (size a / 2) a)) - r = Remote.forkAt Remote.spawn '(dreduce m (drop (size a / 2) a)) - Monoid.op m (force l) (force r) - -dmapReduce : (a ->{Remote} b) -> Monoid b -> [a] ->{Remote} b -dmapReduce f m as = dreduce m (List.map f as) - -dsort : (a -> a -> Boolean) -> [a] ->{Remote} [a] -dsort lte a = - dmapReduce (a -> [a]) (Monoid (merge lte) []) a - -sort : (a -> a -> Boolean) -> [a] -> [a] -sort lte a = - if List.size a < 2 then a - else - l = sort lte (take (size a / 2) a) - r = sort lte (drop (size a / 2) a) - merge lte l r - -Node.increment : Node -> Node -Node.increment n = - use Node Node -- the constructor - match n with Node n -> Node (n + 1) - -> Remote.runLocal '(dsort (<) [3,2,1,1,2,3,9182,1,2,34,1,23]) - -halve : [a] -> Optional ([a], [a]) -halve a = - if size a == 0 then None - else Some (take (size a / 2) a, drop (size a / 2) a) - -foldMap : (a -> b) -> Monoid b -> [a] -> b -foldMap f m a = - base a = match List.at 0 a with - None -> zero m - Some a -> f a - if size a < 2 then base a - else match halve a with - None -> zero m - Some (l, r) -> op m (foldMap f m l) (foldMap f m r) - -> foldMap (x -> x) (Monoid (+) 0) [1] -> Remote.runLocal '(dmap (x -> x + 1) [1,2,3,4]) diff --git a/unison-src/tests/soe2.u b/unison-src/tests/soe2.u deleted file mode 100644 index 86001e78f0..0000000000 --- a/unison-src/tests/soe2.u +++ /dev/null @@ -1,47 +0,0 @@ -use Universal == < -use Optional None Some - -uncons : [a] -> Optional (a, [a]) -uncons a = match at 0 a with - None -> None - Some hd -> Some (hd, drop 1 a) - -merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] -merge lte a b = - go acc a b = match (uncons a, uncons b) with - (None, _) -> acc ++ b - (_, None) -> acc ++ a - (Some (h1,t1), Some (h2,t2)) -> - if h1 `lte` h2 then go (acc `snoc` h1) (drop 1 a) b - else go (acc `snoc` h2) a (drop 1 b) - go [] a b - --- let's make sure it works -> merge (<) [1,3,4,99,504,799] [0,19,22,23] - -isEmpty : [a] -> Boolean -isEmpty a = size a == 0 - -halve : [a] -> Optional ([a], [a]) -halve as = - if isEmpty as then None - else Some (take (size as / 2) as, drop (size as / 2) as) - -sort : (a -> a -> Boolean) -> [a] -> [a] -sort lte as = if size as < 2 then as else match halve as with - None -> as - Some (left, right) -> - l = sort lte left - r = sort lte right - merge lte l r - --- let's make sure it works - -> sort (<) [3,2,1,1,2,3,9182,1,2,34,1,23] - --- > sort (<) ["Dave", "Carol", "Eve", "Alice", "Bob", "Francis", "Hal", "Illy", "Joanna", "Greg", "Karen"] - --- > sort (<) [3,2,1,1,2,3,9182,1,2,34,1,"oops"] - --- > merge (<) [1,4,5,90,102] ["a", "b"] - diff --git a/unison-src/tests/spurious-ability-fail-underapply.u b/unison-src/tests/spurious-ability-fail-underapply.u deleted file mode 100644 index 6d3c1fe79f..0000000000 --- a/unison-src/tests/spurious-ability-fail-underapply.u +++ /dev/null @@ -1,8 +0,0 @@ -ability Woot where - woot : {Woot} Nat - -wha : ((a ->{Woot} a) -> a ->{Woot} a) -> Nat -wha f = - blah a = f' a - f' = f blah - 42 diff --git a/unison-src/tests/spurious-ability-fail.u b/unison-src/tests/spurious-ability-fail.u deleted file mode 100644 index 4bee905a42..0000000000 --- a/unison-src/tests/spurious-ability-fail.u +++ /dev/null @@ -1,16 +0,0 @@ ---The expression in red needs the {𝛆} ability, but this location only has access to the {𝛆} ability, --- --- 8 | odd n = if n == 1 then true else even2 (n `drop` 1) - -use Universal == - -even : Nat -> Boolean -even n = if n == 0 then true else odd (n `drop` 1) - -odd : Nat -> Boolean -odd n = if n == 1 then true else even2 (n `drop` 1) - -even2 = even - -increment : Nat -> Nat -- signature is optional -increment n = n + 1 diff --git a/unison-src/tests/state1.u b/unison-src/tests/state1.u deleted file mode 100644 index 61b0e2cb98..0000000000 --- a/unison-src/tests/state1.u +++ /dev/null @@ -1,15 +0,0 @@ ---State1 ability -ability State se2 where - put : ∀ se . se -> {State se} () - get : ∀ se . () -> {State se} se - --- state : ∀ s a . s -> Request (State s) a -> (s, a) -state woot = cases - { State.put snew -> k } -> handle k () with state snew - { State.get () -> k } -> handle k woot with state woot - { a } -> (woot, a) - -blah : ∀ s a . s -> Request (State s) a -> (s, a) -blah = state - -> () diff --git a/unison-src/tests/state1a.u b/unison-src/tests/state1a.u deleted file mode 100644 index 471170b869..0000000000 --- a/unison-src/tests/state1a.u +++ /dev/null @@ -1,11 +0,0 @@ ---State1a ability -ability State se2 where - put : ∀ se . se -> {State se} () - get : ∀ se . {State se} se -id : Int -> Int -id i = i -foo : () -> {State Int} Int -foo unit = id (State.get Int.+ State.get) - -> () - diff --git a/unison-src/tests/state2.u b/unison-src/tests/state2.u deleted file mode 100644 index 62337b1074..0000000000 --- a/unison-src/tests/state2.u +++ /dev/null @@ -1,11 +0,0 @@ ---State2 ability -ability State se2 where - put : ∀ se . se -> {State se} () - get : ∀ se . () -> {State se} se -state : ∀ s a . s -> Request (State s) a -> (s, a) -state woot = cases - { State.get () -> k } -> handle k woot with state woot - { State.put snew -> k } -> handle k () with state snew - { a } -> (woot, a) - -> () diff --git a/unison-src/tests/state2a-min.u b/unison-src/tests/state2a-min.u deleted file mode 100644 index 63a632a703..0000000000 --- a/unison-src/tests/state2a-min.u +++ /dev/null @@ -1,17 +0,0 @@ ---State2 ability -ability State s where - put : s -> {State s} () - -state : s -> Request (State s) a -> a -state s = cases - { State.put snew -> k } -> handle k () with state snew - { a } -> a - -ex : Text -ex = handle - State.put (11 + 1) - State.put (5 + 5) - "hello" - with state 10 - -> ex diff --git a/unison-src/tests/state2a-min.ur b/unison-src/tests/state2a-min.ur deleted file mode 100644 index 3580093b9d..0000000000 --- a/unison-src/tests/state2a-min.ur +++ /dev/null @@ -1 +0,0 @@ -"hello" diff --git a/unison-src/tests/state2a.u b/unison-src/tests/state2a.u deleted file mode 100644 index c2dcc58a00..0000000000 --- a/unison-src/tests/state2a.u +++ /dev/null @@ -1,50 +0,0 @@ ---State2 ability - -type Optional a = None | Some a - -ability State s where - put : s -> {State s} () - get : {State s} s - -state : s -> Request (State s) a -> (s, a) -state s = cases - { State.get -> k } -> handle k s with state s - { State.put snew -> k } -> handle k () with state snew - { a } -> (s, a) - -modify : (s ->{} s) ->{State s} () -modify f = - s = State.get - s2 = f s - State.put s2 - -increment : '{State Nat} () -increment = '(modify ((+) 1)) - -second : (a, b) -> b -second = cases (_,b) -> b - -first : (a, b) -> a -first = cases (a,_) -> a - -ex : Text -ex = - result : (Nat, Text) - result = handle - State.put (11 + 1) - x = State.get - State.put (5 + 5) - "hello" - with state 10 - - second result - -> ex - -modify2 : (s -> s) ->{State s} () -modify2 f = - s = State.get - s2 = f s - State.put s2 - ---- diff --git a/unison-src/tests/state2a.uu b/unison-src/tests/state2a.uu deleted file mode 100644 index 82a2306eb9..0000000000 --- a/unison-src/tests/state2a.uu +++ /dev/null @@ -1,30 +0,0 @@ ---State2 ability - -type Optional a = None | Some a - -ability State s where - put : s -> {State s} () - get : {State s} s - -state : s -> Request (State s) a -> (s, a) -state s = cases - { State.get -> k } -> handle k s with state s - { State.put snew -> k } -> handle k () with state snew - { a } -> (s, a) - -modify3 : (s -> s) -> () -modify3 f = - s = State.get - s2 = f s - State.put s2 - ---- - -limitation here is that inferred ability vars can't refer to universal vars in -the same type signature - -the inferred abilities are existentials, which are allocated up front, so -they can't bind to the universals nor does that really make sense - -would need some nondeterminism or multiple phases in the typechecking process to -do better diff --git a/unison-src/tests/state2b-min.u b/unison-src/tests/state2b-min.u deleted file mode 100644 index 257ca9e3e3..0000000000 --- a/unison-src/tests/state2b-min.u +++ /dev/null @@ -1,15 +0,0 @@ ---State2 ability -ability State s where - put : s -> {State s} () - -state : s -> Request (State s) a -> s -state s = cases - { State.put snew -> k } -> handle k () with state snew - { a } -> s - -> handle - State.put (11 + 1) - State.put (5 + 15) - () - with state 10 - -- should be 20 diff --git a/unison-src/tests/state2b-min.ur b/unison-src/tests/state2b-min.ur deleted file mode 100644 index 209e3ef4b6..0000000000 --- a/unison-src/tests/state2b-min.ur +++ /dev/null @@ -1 +0,0 @@ -20 diff --git a/unison-src/tests/state2b.u b/unison-src/tests/state2b.u deleted file mode 100644 index b036ed0283..0000000000 --- a/unison-src/tests/state2b.u +++ /dev/null @@ -1,39 +0,0 @@ ---State2 ability - -type Optional a = None | Some a - -ability State s where - put : s -> {State s} () - get : {State s} s - -state : s -> Request (State s) a -> (s, a) -state s = cases - { State.get -> k } -> handle k s with state s - { State.put snew -> k } -> handle k () with state snew - { a } -> (s, a) - -modify : (s ->{} s) -> {State s} () -modify f = State.put (f State.get) - -increment : '{State Nat} () -increment = '(modify ((+) 1)) - -second : (a, b) -> b -second = cases (_,b) -> b - -first : (a, b) -> a -first = cases (a,_) -> a - -ex : Nat -ex = - result = handle - State.put (11 + 1) - State.put (5 + 15) - () - with state 10 - - first result - --- should return `20`, but actually returns `12` --- seems like only one `put` is actually being run -> ex diff --git a/unison-src/tests/state3.u b/unison-src/tests/state3.u deleted file mode 100644 index cc15016819..0000000000 --- a/unison-src/tests/state3.u +++ /dev/null @@ -1,30 +0,0 @@ ---State3 ability -ability State se2 where - put : ∀ se . se -> {State se} () - get : ∀ se . () -> {State se} se - -state : ∀ s a . s -> Request (State s) a -> (s, a) -state woot = cases - { State.get () -> k } -> handle k woot with state woot - { State.put snew -> k } -> handle k () with state snew - { a } -> (woot, a) - -ex1 : (Nat, Nat) -ex1 = handle State.get () with state 42 - -ex1a : (Nat, Nat) -ex1a = handle 49 with state 42 - -ex1b = handle 0 with x -> 10 - -ex1c : Nat -ex1c = handle 0 with x -> 10 - -ex1d = handle 49 with state 42 - -ex2 = handle State.get () with state 42 - -ex3 : (Nat, Nat) -ex3 = ex2 - -> ex3 diff --git a/unison-src/tests/state4.u b/unison-src/tests/state4.u deleted file mode 100644 index 3db4bd9c40..0000000000 --- a/unison-src/tests/state4.u +++ /dev/null @@ -1,26 +0,0 @@ -ability State s where - put : s -> {State s} () - get : {State s} s - -state : s -> Request (State s) a -> s -state s = cases - { State.get -> k } -> handle k s with state s - { State.put snew -> k } -> handle k () with state snew - { a } -> s - -modify : (s ->{} s) -> {State s} () -modify f = State.put (f State.get) - -increment : '{State Nat} () -increment = '(modify ((+) 1)) - -ex : Nat -ex = handle - State.put (11 + 1) - !increment - !increment - !increment - State.get -- should be 15, amirite?? - with state 10 - -> ex diff --git a/unison-src/tests/state4.ur b/unison-src/tests/state4.ur deleted file mode 100644 index 60d3b2f4a4..0000000000 --- a/unison-src/tests/state4.ur +++ /dev/null @@ -1 +0,0 @@ -15 diff --git a/unison-src/tests/state4a.u b/unison-src/tests/state4a.u deleted file mode 100644 index 04544e9451..0000000000 --- a/unison-src/tests/state4a.u +++ /dev/null @@ -1,26 +0,0 @@ -ability State s where - put : s -> {State s} () - get : {State s} s - -state : s -> Request (State s) a -> s -state s = cases - { State.get -> k } -> handle k s with state s - { State.put snew -> k } -> handle k () with state snew - { a } -> s - -modify : (s ->{} s) -> {State s} () -modify f = State.put (f State.get) - -increment : '{State Nat} () -increment = '(modify ((+) 1)) - -ex : Nat -ex = handle - State.put (11 + 1) - -- !increment - -- !increment - -- !increment - State.get -- should be 15, amirite?? - with state 10 - -> ex diff --git a/unison-src/tests/state4a.ur b/unison-src/tests/state4a.ur deleted file mode 100644 index 48082f72f0..0000000000 --- a/unison-src/tests/state4a.ur +++ /dev/null @@ -1 +0,0 @@ -12 diff --git a/unison-src/tests/stream.u b/unison-src/tests/stream.u deleted file mode 100644 index b8ea9eb914..0000000000 --- a/unison-src/tests/stream.u +++ /dev/null @@ -1,72 +0,0 @@ -ability Emit a where - emit : a ->{Emit a} () - -type Stream e a r = Stream ('{e, Emit a} r) - -use Stream Stream -use Optional None Some -use Universal == - -namespace Stream where - - -- unfold : s -> (s ->{} Optional (a, s)) -> Stream e a () - unfold s f = - step s = match f s with - None -> () - Some (a, s) -> emit a - step s - Stream '(step s) - - run : Stream e a r ->{e, Emit a} r - run = cases Stream c -> !c - - run' = cases Stream s -> s - - (++) : Stream {e} a r -> Stream {e} a r -> Stream {e} a r - s1 ++ s2 = Stream '(forceBoth (run' s1) (run' s2)) - - from : Nat -> Stream e Nat () - from n = unfold n (n -> Some (n, n + 1)) - - -- take : Nat -> Stream {} a () -> Stream {} a () - take n s = - step n = cases - {Emit.emit a -> k} -> - if n Nat.== 0 then () - else - Emit.emit a - handle k () with step (n `drop` 1) - {r} -> () - Stream ' handle run s with step n - - -- map : (a -> b) -> Stream {e} a r -> Stream {e} b r - map f s = - step = cases - {r} -> r - {Emit.emit a -> k} -> - Emit.emit (f a) - handle k () with step - Stream ' handle run s with step - - -- toSeq : Stream {e} a r ->{e} [a] - toSeq s = - step acc = cases - {Emit.emit a -> k} -> handle k () with step (acc `snoc` a) - {_} -> acc - handle run s with step [] - - fromSeq : [a] -> Stream e a () - fromSeq a = - step a = match List.at 0 a with - None -> None - Some h -> Some (h, drop 1 a) - unfold a step - -> toSeq (Stream.take 7 (Stream.map (x -> x + 10) (from 0))) --- > toSeq (Stream.fromSeq [1,2,3] ++ Stream.fromSeq [4,5,6]) --- > toSeq (Stream.take 20 (from 0)) - --- run two thunks in sequence -forceBoth a b = - !a - !b diff --git a/unison-src/tests/stream2.uu b/unison-src/tests/stream2.uu deleted file mode 100644 index fd2862d479..0000000000 --- a/unison-src/tests/stream2.uu +++ /dev/null @@ -1,81 +0,0 @@ -ability Emit a where - emit : a ->{Emit a} () - -type Stream e a r = Stream ('{e, Emit a} r) - -use Stream Stream -use Optional None Some -use Universal == - -namespace Stream where - - step : - (a ->{e} b) -> - Request {Emit a} r ->{e, Emit b} r - step f = cases - {r} -> r - {Emit.emit a -> k} -> - Emit.emit (f a) - handle k () with step f - - -- map : (a -> b) -> Stream {e} a r -> Stream {e} b r - map : (a ->{e} b) - -> Stream {e} a r - -> Stream {e} b r - map f s = Stream ' handle run s with step f - - run : Stream e a r ->{e, Emit a} r - run = cases Stream c -> !c - - --- - -- run' = cases Stream s -> s - -- unfold : s -> (s ->{} Optional (a, s)) -> Stream e a () - unfold s f = - step = cases - None -> () - Some (a, s) -> emit a - step s - Stream '(step s) - - - (++) : Stream {e} a r -> Stream {e} a r -> Stream {e} a r - s1 ++ s2 = Stream '(run' s1 !! run' s2) - - from : Nat -> Stream e Nat () - from n = unfold n (n -> Some (n, n + 1)) - - -- take : Nat -> Stream {} a () -> Stream {} a () - take n s = - step n = cases - {Emit.emit a -> k} -> - if n Nat.== 0 then () - else - Emit.emit a - handle k () with step (n `drop` 1) - {r} -> () - Stream ' handle run s with step n - - ---- - -- toSeq : Stream {e} a r ->{e} [a] - toSeq s = - step acc = cases - {Emit.emit a -> k} -> handle k () with step (acc `snoc` a) - {_} -> acc - handle run s with step [] - - fromSeq : [a] -> Stream e a () - fromSeq a = - step a = match List.at 0 a with - None -> None - Some h -> Some (h, drop 1 a) - unfold a step - -> toSeq (Stream.take 7 (Stream.map (x -> x + 10) (from 0))) --- > toSeq (Stream.fromSeq [1,2,3] ++ Stream.fromSeq [4,5,6]) --- > toSeq (Stream.take 20 (from 0)) - --- run two thunks in sequence -a !! b = - !a - !b diff --git a/unison-src/tests/stream3.uu b/unison-src/tests/stream3.uu deleted file mode 100644 index 3e6a2d5e8d..0000000000 --- a/unison-src/tests/stream3.uu +++ /dev/null @@ -1,71 +0,0 @@ -ability Emit a where - emit : a ->{Emit a} () - -type Stream e a r = Stream ('{e, Emit a} r) - -use Stream Stream -use Optional None Some -use Universal == - -namespace Stream where - - step : - (a ->{e} b) -> - Request {Emit a} r ->{e, Emit b} r - step f = cases - {r} -> r - {Emit.emit a -> k} -> - Emit.emit (f a) - handle k () with step f - - - -- map : (a -> b) -> Stream {e} a r -> Stream {e} b r - -- map : (a ->{e} b) - -- -> Stream {e} a r - -- -> Stream {e} b r - -- 0. this gets a weird type - map f s = Stream ' handle run s with step f - - -- 1. inferred type of `map` required an `o -> o` for some reason - map1 f s = - step f = cases - {r} -> r - {Emit.emit a -> k} -> - Emit.emit (f a) - handle k () with step f - Stream ' handle run s with step f - - -- 2. gets the same weird type - map2 f s = - step : - (a ->{e} b) -> - Request {Emit a} r ->{e, Emit b} r - step f = cases - {r} -> r - {Emit.emit a -> k} -> - Emit.emit (f a) - handle k () with step f - Stream ' handle run s with step f - - run : Stream e a r ->{e, Emit a} r - run = cases Stream c -> !c - -ability Abort where - abort : {Abort} a - ---- --- x : Stream {Abort} Nat () -x = Stream 'let - Emit.emit 1 - Abort.abort - Emit.emit 2 - ---- -I found a value of type Var User "a"-94 where I expected to find one of type b96: - - 24 | -> Stream {e} b r - 25 | map f s = Stream ' handle run s with step f - - from right here: - - 22 | map : (a ->{e} b) diff --git a/unison-src/tests/suffix-resolve.u b/unison-src/tests/suffix-resolve.u deleted file mode 100644 index 1e3fc5563c..0000000000 --- a/unison-src/tests/suffix-resolve.u +++ /dev/null @@ -1,23 +0,0 @@ - --- This file shows that any unique suffix can be used to refer --- to a definition. - --- no imports needed here, even though FQN is builtin.Int -foo : Int -foo = +1 - --- no imports needed here, even though FQNs are builtin.Optional.{None,Some} -ex1 = cases - None -> 0 - Some a -> a + 1 - --- you can still use the -ex2 = cases - Optional.None -> 99 - Optional.Some _ -> 0 - -ex3 = builtin.Optional.None - --- TDNR would have handled this one before, but TDNR can't do --- type resolution or pattern resolution -zoink = Some 42 diff --git a/unison-src/tests/tdnr.u b/unison-src/tests/tdnr.u deleted file mode 100644 index cf29ddf4ae..0000000000 --- a/unison-src/tests/tdnr.u +++ /dev/null @@ -1,4 +0,0 @@ --- Should resolve + with Type-directeded name resolution - -x : Nat -x = 4 + 2 diff --git a/unison-src/tests/tdnr2.u b/unison-src/tests/tdnr2.u deleted file mode 100644 index ae1d855278..0000000000 --- a/unison-src/tests/tdnr2.u +++ /dev/null @@ -1,13 +0,0 @@ -x : Nat -x = 42 + 2 - -y : Int -y = +42 + -2 - -z : Float -z = 42.0 - 2.0 - -foo a b = (a + b) + 3 - -bar a b = 3 + b + a - diff --git a/unison-src/tests/tdnr3.u b/unison-src/tests/tdnr3.u deleted file mode 100644 index b57cb6aad1..0000000000 --- a/unison-src/tests/tdnr3.u +++ /dev/null @@ -1,6 +0,0 @@ --- Local definitions should be resolved by type - -Foo.bar x = x + 1 - -z = bar 99 - diff --git a/unison-src/tests/tdnr4.u b/unison-src/tests/tdnr4.u deleted file mode 100644 index e07bdfdd43..0000000000 --- a/unison-src/tests/tdnr4.u +++ /dev/null @@ -1,4 +0,0 @@ -x = None - -y = Some 10 - diff --git a/unison-src/tests/text-escaping.u b/unison-src/tests/text-escaping.u deleted file mode 100644 index d1dfa01b1b..0000000000 --- a/unison-src/tests/text-escaping.u +++ /dev/null @@ -1,10 +0,0 @@ -id x = x - -x = id ("\n") - -find : Text -> Text -find s = match (Text.take 1 s) with - "\n" -> "found" - _ -> "not found" - -> (x, find "\nbar") diff --git a/unison-src/tests/text-escaping.ur b/unison-src/tests/text-escaping.ur deleted file mode 100644 index 7b7648190e..0000000000 --- a/unison-src/tests/text-escaping.ur +++ /dev/null @@ -1 +0,0 @@ -("\n", "found") diff --git a/unison-src/tests/text-pattern.u b/unison-src/tests/text-pattern.u deleted file mode 100644 index c906396156..0000000000 --- a/unison-src/tests/text-pattern.u +++ /dev/null @@ -1,6 +0,0 @@ -foo = cases - "xyz" -> false - "abc" -> true - _ -> false - -> (foo "abc", foo "xyz", foo "hello, world") diff --git a/unison-src/tests/text-pattern.ur b/unison-src/tests/text-pattern.ur deleted file mode 100644 index 8733091c2a..0000000000 --- a/unison-src/tests/text-pattern.ur +++ /dev/null @@ -1 +0,0 @@ -(true, false, false) diff --git a/unison-src/tests/tictactoe.u b/unison-src/tests/tictactoe.u deleted file mode 100644 index 4f9c6c56e7..0000000000 --- a/unison-src/tests/tictactoe.u +++ /dev/null @@ -1,43 +0,0 @@ --- board piece -type P = X | O | E - -type Board = Board P P P P P P P P P - -use Board Board -use P O X E -use Optional Some None - -orElse a b = - match a with - None -> b - a -> a - -namespace P where - (/=) : P -> P -> Boolean - a /= b = not (a == b) - (==) : P -> P -> Boolean - a == b = match (a,b) with - (X,X) -> true - (O,O) -> true - _ -> false - -isWin : Board -> Optional P -isWin board = - same : P -> P -> P -> Optional P - same a b c = if ((a P.== b) && (a P.== c)) && (a P./= E) - then Some a - else None - match board with - -- vertical top/center/bottom - -- horizontal left/center/right - -- diagonal rising/falling - Board a b c - d e f - g h i -> - (same a b c `orElse` same d e f `orElse` same g h i `orElse` - same a d g `orElse` same b e h `orElse` same c f i `orElse` - same a e i `orElse` same g e c) - -> isWin (Board X O X - O X X - O E X) diff --git a/unison-src/tests/tictactoe0-array-oob1.u b/unison-src/tests/tictactoe0-array-oob1.u deleted file mode 100644 index 22989cd6e6..0000000000 --- a/unison-src/tests/tictactoe0-array-oob1.u +++ /dev/null @@ -1,12 +0,0 @@ --- board piece - -type Board = Board Nat Nat Nat - -use Board Board - --- uncommenting these gives errors from NPE to array index out of bounds -1, -2 --- x = 1 --- y = 2 - -ex = match Board 77 88 99 with - Board a b c -> c diff --git a/unison-src/tests/tictactoe0-npe.u b/unison-src/tests/tictactoe0-npe.u deleted file mode 100644 index d1845df897..0000000000 --- a/unison-src/tests/tictactoe0-npe.u +++ /dev/null @@ -1,17 +0,0 @@ --- board piece -type P = X | O | E - -type Board = Board P P P P P P P P P - -use Board Board -use P O X E - -whatevs a b c = a - -b = Board X O X O X X O E X -x = 1 -y = 2 -z = 3 - -ex = match b with - Board a b c d e f g h i -> a diff --git a/unison-src/tests/tictactoe0.u b/unison-src/tests/tictactoe0.u deleted file mode 100644 index 1cab582c4b..0000000000 --- a/unison-src/tests/tictactoe0.u +++ /dev/null @@ -1,43 +0,0 @@ --- board piece -type P = X | O | E - -type Board = Board P P P P P P P P P - -use Board Board -use P O X E -use Optional Some None - -orElse a b = - match a with - None -> b - a -> a - -namespace P where - (/=) : P -> P -> Boolean - a /= b = not (a == b) - (==) : P -> P -> Boolean - a == b = match (a,b) with - (X,X) -> true - (O,O) -> true - _ -> false - - -b = (Board X O X - O X X - O E X) - -isWin board = - same : P -> P -> P -> Optional P - same a b c = if ((a P.== b) && (a P.== c)) && (a P./= E) - then Some a - else None - match board with - -- vertical top/center/bottom - -- horizontal left/center/right - -- diagonal rising/falling - Board a b c - d e f - g h i -> (same a b c) - -> isWin b --- Some 3 diff --git a/unison-src/tests/tictactoe2.u b/unison-src/tests/tictactoe2.u deleted file mode 100644 index eb71dd8abf..0000000000 --- a/unison-src/tests/tictactoe2.u +++ /dev/null @@ -1,63 +0,0 @@ --- board piece -type P = X | O | E - -type Board = Board P P P P P P P P P - -use Board Board -use P O X E -use Optional Some None - -namespace P where - (/=) : P -> P -> Boolean - a /= b = not (a == b) - (==) : P -> P -> Boolean - a == b = match (a,b) with - (X,X) -> true - (O,O) -> true - _ -> false - -isWin : Board -> Optional P -isWin board = - same : P -> P -> P -> Optional P - same a b c = if ((a P.== b) && (a P.== c)) && (a P./= E) - then Some a - else None - match board with - -- vertical top/center/bottom - -- horizontal left/center/right - -- diagonal rising/falling - Board a b c - _ _ _ - _ _ _ -> same a b c - - Board _ _ _ - a b c - _ _ _ -> same a b c - - Board _ _ _ - _ _ _ - a b c -> same a b c - - Board a _ _ - b _ _ - c _ _ -> same a b c - - Board _ a _ - _ b _ - _ c _ -> same a b c - - Board _ _ a - _ _ b - _ _ c -> same a b c - - Board a _ _ - _ b _ - _ _ c -> same a b c - - Board _ _ a - _ b _ - c _ _ -> same a b c - -x = isWin (Board X O X - O X X - O E X) diff --git a/unison-src/tests/tuple.u b/unison-src/tests/tuple.u deleted file mode 100644 index c568307866..0000000000 --- a/unison-src/tests/tuple.u +++ /dev/null @@ -1,4 +0,0 @@ -(+) = (Nat.+) - -> match (1,2,3,4) with - (a,b,c,d) -> (a + b, c + d) diff --git a/unison-src/tests/tuple.ur b/unison-src/tests/tuple.ur deleted file mode 100644 index cdaaab5be0..0000000000 --- a/unison-src/tests/tuple.ur +++ /dev/null @@ -1 +0,0 @@ -(3,7) diff --git a/unison-src/tests/type-application.u b/unison-src/tests/type-application.u deleted file mode 100644 index ae54823ad7..0000000000 --- a/unison-src/tests/type-application.u +++ /dev/null @@ -1,11 +0,0 @@ - -ability Foo where - foo : {Foo} Nat - -type Wrap a = Wrap Nat - -blah : Wrap {Foo} -> Nat -blah = cases - Wrap.Wrap n -> n + 1 - -> blah (Wrap 99) diff --git a/unison-src/tests/underscore-parsing.u b/unison-src/tests/underscore-parsing.u deleted file mode 100644 index 928cebb1a2..0000000000 --- a/unison-src/tests/underscore-parsing.u +++ /dev/null @@ -1,7 +0,0 @@ -_prefix = 1 -prefix_ _x = _x -_prefix_ _ = 2 - -_x `_infix` y_ = (_x, y_) -x_ `infix_` _y = (x_, _y) -_ `_infix_` _ = () diff --git a/unison-src/tests/ungeneralize-bug.uu b/unison-src/tests/ungeneralize-bug.uu deleted file mode 100644 index 5a5448ed17..0000000000 --- a/unison-src/tests/ungeneralize-bug.uu +++ /dev/null @@ -1,22 +0,0 @@ - -use Foo Foo -use Optional Some None - -type Foo a b = Foo a (Optional b) - -foo : Foo a b -> (b -> c) -> Foo a c -foo x f = match x with - Foo a None -> Foo a None - --- --- 🌻 /Users/pchiusano/work/unison/unison-src/tests/typechecker-bug.u has changed, reloading... --- I found a value of type b where I expected to find one of type c: --- --- 7 | foo : Foo a b -> (b -> c) -> Foo a c --- 8 | foo x f = match x with --- 9 | Foo a None -> Foo a None --- --- from right here: --- --- 7 | foo : Foo a b -> (b -> c) -> Foo a c --- shouldn't be a type error diff --git a/unison-src/tests/unique.u b/unison-src/tests/unique.u deleted file mode 100644 index 29b9745d66..0000000000 --- a/unison-src/tests/unique.u +++ /dev/null @@ -1,28 +0,0 @@ - -unique ability Zing where zing : {Zang} Nat - -unique[asdlfkjasdflkj] ability Zang where - zang : {Zing} Nat - -unique - ability Blarg where - oog : {Blarg} Text - -unique type Bool = T | F - -unique[sdalfkjsdf] type BetterBool = Ya | Nah - -unique[asdflkajsdf] type Day - = Sun | Mon | Tue | Wed | Thu | Fri | Sat - -id x = x - -unique type Day2 - = Sun - | Mon - | Tue - | Wed - | Thu - | Fri - | Sat - diff --git a/unison-src/tests/void.u b/unison-src/tests/void.u deleted file mode 100644 index a4e646ad32..0000000000 --- a/unison-src/tests/void.u +++ /dev/null @@ -1,3 +0,0 @@ -type Void = - -> 3 diff --git a/unison-src/transcripts/addupdatemessages.md b/unison-src/transcripts/addupdatemessages.md deleted file mode 100644 index 68f7be5a52..0000000000 --- a/unison-src/transcripts/addupdatemessages.md +++ /dev/null @@ -1,63 +0,0 @@ -# Adds and updates - -Let's set up some definitions to start: - -```ucm:hide -.> builtins.merge -``` - -```unison -x = 1 -y = 2 - -type X = One Nat -type Y = Two Nat Nat -``` - -Expected: `x` and `y`, `X`, and `Y` exist as above. UCM tells you this. - -```ucm -.scratch> add -``` - -Let's add an alias for `1` and `One`: - -```unison -z = 1 - -type Z = One Nat -``` - -Expected: `z` is now `1`. UCM tells you that this definition is also called `x`. -Also, `Z` is an alias for `X`. - -```ucm -.scratch> add -``` - -Let's update something that has an alias (to a value that doesn't have a name already): - -```unison -x = 3 -type X = Three Nat Nat Nat -``` - -Expected: `x` is now `3` and `X` has constructor `Three`. UCM tells you the old definitions were also called `z` and `Z` and these names have also been updated. - -```ucm -.scratch> update -``` - -Update it to something that already exists with a different name: - -```unison -x = 2 -type X = Two Nat Nat -``` - -Expected: `x` is now `2` and `X` is `Two`. UCM says the old definition was also named `z/Z`, and was also updated. And it says the new definition is also named `y/Y`. - -```ucm -.scratch> update -``` - diff --git a/unison-src/transcripts/addupdatemessages.output.md b/unison-src/transcripts/addupdatemessages.output.md deleted file mode 100644 index 424c9b08f2..0000000000 --- a/unison-src/transcripts/addupdatemessages.output.md +++ /dev/null @@ -1,159 +0,0 @@ -# Adds and updates - -Let's set up some definitions to start: - -```unison -x = 1 -y = 2 - -type X = One Nat -type Y = Two Nat Nat -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - type X - type Y - x : Nat - y : Nat - -``` -Expected: `x` and `y`, `X`, and `Y` exist as above. UCM tells you this. - -```ucm - ☝️ The namespace .scratch is empty. - -.scratch> add - - ⍟ I've added these definitions: - - type X - type Y - x : Nat - y : Nat - -``` -Let's add an alias for `1` and `One`: - -```unison -z = 1 - -type Z = One Nat -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - type Z - (also named X) - z : Nat - (also named x) - -``` -Expected: `z` is now `1`. UCM tells you that this definition is also called `x`. -Also, `Z` is an alias for `X`. - -```ucm -.scratch> add - - ⍟ I've added these definitions: - - type Z - (also named X) - z : Nat - (also named x) - -``` -Let's update something that has an alias (to a value that doesn't have a name already): - -```unison -x = 3 -type X = Three Nat Nat Nat -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - type X - (The old definition is also named Z. I'll update this - name too.) - x : Nat - (The old definition is also named z. I'll update this - name too.) - -``` -Expected: `x` is now `3` and `X` has constructor `Three`. UCM tells you the old definitions were also called `z` and `Z` and these names have also been updated. - -```ucm -.scratch> update - - ⍟ I've updated these names to your new definition: - - type X - (The old definition was also named Z. I updated this name - too.) - x : Nat - (The old definition was also named z. I updated this name - too.) - -``` -Update it to something that already exists with a different name: - -```unison -x = 2 -type X = Two Nat Nat -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - type X - (The old definition is also named Z. I'll update this - name too.) - (The new definition is already named Y as well.) - x : Nat - (The old definition is also named z. I'll update this - name too.) - (The new definition is already named y as well.) - -``` -Expected: `x` is now `2` and `X` is `Two`. UCM says the old definition was also named `z/Z`, and was also updated. And it says the new definition is also named `y/Y`. - -```ucm -.scratch> update - - ⍟ I've updated these names to your new definition: - - type X - (The old definition was also named Z. I updated this name - too.) - (The new definition is already named Y as well.) - x : Nat - (The old definition was also named z. I updated this name - too.) - (The new definition is already named y as well.) - -``` diff --git a/unison-src/transcripts/alias-many.md b/unison-src/transcripts/alias-many.md deleted file mode 100644 index bd92d3bf31..0000000000 --- a/unison-src/transcripts/alias-many.md +++ /dev/null @@ -1,130 +0,0 @@ -```ucm:hide -.> builtins.merge -``` -```unison:hide:all -List.adjacentPairs : [a] -> [(a, a)] -List.adjacentPairs as = - go xs acc = - match xs with - [x, y] ++ t -> go t (acc :+ (x, y)) - _ -> acc - go as [] - -List.all : (a -> Boolean) -> [a] -> Boolean -List.all p xs = - match xs with - [] -> true - x +: xs -> (p x) && (List.all p xs) - -List.any : (a -> Boolean) -> [a] -> Boolean -List.any p xs = - match xs with - [] -> false - x +: xs -> (p x) || (List.any p xs) - -List.chunk : Nat -> [a] -> [[a]] -List.chunk n as = - go acc rest = - match splitAt n rest with - (c, []) -> acc :+ c - (c, cs) -> go (acc :+ c) cs - go [] as - -List.chunksOf : Nat -> [a] -> [[a]] -List.chunksOf n text = - go acc text = - p = splitAt n text - match p with - ([], _) -> acc - (a, b) -> go (acc :+ a) b - go [] text - -List.dropWhile : (a -> Boolean) -> [a] -> [a] -List.dropWhile p xs = - match xs with - i +: l -> if p i then List.dropWhile p l else xs - _ -> [] - -List.first : [a] -> Optional a -List.first a = List.at 0 a - -List.init : [a] -> Optional [a] -List.init as = - match as with - [] -> None - as :+ _ -> Some as - -List.intersperse : a -> [a] -> [a] -List.intersperse a as = - go acc as = - match as with - [] -> acc - [x] -> acc :+ x - x +: xs -> go (acc :+ x :+ a) xs - go [] as - -List.isEmpty : [a] -> Boolean -List.isEmpty as = List.size as == 0 - -List.last : [a] -> Optional a -List.last as = - match as with - [] -> None - _ :+ a -> Some a - -List.replicate : Nat -> a -> [a] -List.replicate n a = - go n acc = if n == 0 then acc else go (Nat.drop n 1) (a +: acc) - go n [] - -List.splitAt : Nat -> [a] -> ([a], [a]) -List.splitAt n as = (List.take n as, List.drop n as) - -List.tail : [a] -> Optional [a] -List.tail as = - match as with - [] -> None - _ +: as -> Some as - -List.takeWhile : (a ->{𝕖} Boolean) -> [a] ->{𝕖} [a] -List.takeWhile p xs = - go xs acc = - match xs with - x +: xs -> if p x then go xs (acc :+ x) else acc - _ -> acc - go xs [] -``` -```ucm:hide -.runar> add -``` - -The `alias.many` command can be used to copy definitions from the current namespace into your curated one. -The names that will be used in the target namespace are the names you specify, relative to the current namespace: - -``` -.> help alias.many - - alias.many (or copy) - `alias.many [relative2...] ` creates aliases `relative1`, `relative2`, ... - in the namespace `namespace`. - `alias.many foo.foo bar.bar .quux` creates aliases `.quux.foo.foo` and `.quux.bar.bar`. -``` - -Let's try it! - -```ucm -.> cd .builtin -.builtin> find -.builtin> alias.many 94-104 .mylib -``` - -I want to incorporate a few more from another namespace: -```ucm -.builtin> cd .runar -.runar> find -.runar> alias.many 1-15 .mylib -.runar> cd .mylib -.mylib> find -``` - -Thanks, `alias.many`! diff --git a/unison-src/transcripts/alias-many.output.md b/unison-src/transcripts/alias-many.output.md deleted file mode 100644 index 63a660e51e..0000000000 --- a/unison-src/transcripts/alias-many.output.md +++ /dev/null @@ -1,379 +0,0 @@ -The `alias.many` command can be used to copy definitions from the current namespace into your curated one. -The names that will be used in the target namespace are the names you specify, relative to the current namespace: - -``` -.> help alias.many - - alias.many (or copy) - `alias.many [relative2...] ` creates aliases `relative1`, `relative2`, ... - in the namespace `namespace`. - `alias.many foo.foo bar.bar .quux` creates aliases `.quux.foo.foo` and `.quux.bar.bar`. - -``` - -Let's try it! - -```ucm -.> cd .builtin - -.builtin> find - - 1. builtin type Boolean - 2. Boolean.not : Boolean -> Boolean - 3. builtin type Bytes - 4. Bytes.++ : Bytes -> Bytes -> Bytes - 5. Bytes.at : Nat -> Bytes -> Optional Nat - 6. Bytes.drop : Nat -> Bytes -> Bytes - 7. Bytes.empty : Bytes - 8. Bytes.flatten : Bytes -> Bytes - 9. Bytes.fromList : [Nat] -> Bytes - 10. Bytes.size : Bytes -> Nat - 11. Bytes.take : Nat -> Bytes -> Bytes - 12. Bytes.toList : Bytes -> [Nat] - 13. builtin type Char - 14. Char.fromNat : Nat -> Char - 15. Char.toNat : Char -> Nat - 16. Debug.watch : Text -> a -> a - 17. unique type Doc - 18. Doc.Blob : Text -> Doc - 19. Doc.Evaluate : Term -> Doc - 20. Doc.Join : [Doc] -> Doc - 21. Doc.Link : Link -> Doc - 22. Doc.Signature : Term -> Doc - 23. Doc.Source : Link -> Doc - 24. type Either a b - 25. Either.Left : a -> Either a b - 26. Either.Right : b -> Either a b - 27. builtin type Float - 28. Float.* : Float -> Float -> Float - 29. Float.+ : Float -> Float -> Float - 30. Float.- : Float -> Float -> Float - 31. Float./ : Float -> Float -> Float - 32. Float.abs : Float -> Float - 33. Float.acos : Float -> Float - 34. Float.acosh : Float -> Float - 35. Float.asin : Float -> Float - 36. Float.asinh : Float -> Float - 37. Float.atan : Float -> Float - 38. Float.atan2 : Float -> Float -> Float - 39. Float.atanh : Float -> Float - 40. Float.ceiling : Float -> Int - 41. Float.cos : Float -> Float - 42. Float.cosh : Float -> Float - 43. Float.eq : Float -> Float -> Boolean - 44. Float.exp : Float -> Float - 45. Float.floor : Float -> Int - 46. Float.fromText : Text -> Optional Float - 47. Float.gt : Float -> Float -> Boolean - 48. Float.gteq : Float -> Float -> Boolean - 49. Float.log : Float -> Float - 50. Float.logBase : Float -> Float -> Float - 51. Float.lt : Float -> Float -> Boolean - 52. Float.lteq : Float -> Float -> Boolean - 53. Float.max : Float -> Float -> Float - 54. Float.min : Float -> Float -> Float - 55. Float.pow : Float -> Float -> Float - 56. Float.round : Float -> Int - 57. Float.sin : Float -> Float - 58. Float.sinh : Float -> Float - 59. Float.sqrt : Float -> Float - 60. Float.tan : Float -> Float - 61. Float.tanh : Float -> Float - 62. Float.toText : Float -> Text - 63. Float.truncate : Float -> Int - 64. builtin type Int - 65. Int.* : Int -> Int -> Int - 66. Int.+ : Int -> Int -> Int - 67. Int.- : Int -> Int -> Int - 68. Int./ : Int -> Int -> Int - 69. Int.and : Int -> Int -> Int - 70. Int.complement : Int -> Int - 71. Int.eq : Int -> Int -> Boolean - 72. Int.fromText : Text -> Optional Int - 73. Int.gt : Int -> Int -> Boolean - 74. Int.gteq : Int -> Int -> Boolean - 75. Int.increment : Int -> Int - 76. Int.isEven : Int -> Boolean - 77. Int.isOdd : Int -> Boolean - 78. Int.leadingZeros : Int -> Nat - 79. Int.lt : Int -> Int -> Boolean - 80. Int.lteq : Int -> Int -> Boolean - 81. Int.mod : Int -> Int -> Int - 82. Int.negate : Int -> Int - 83. Int.or : Int -> Int -> Int - 84. Int.pow : Int -> Nat -> Int - 85. Int.shiftLeft : Int -> Nat -> Int - 86. Int.shiftRight : Int -> Nat -> Int - 87. Int.signum : Int -> Int - 88. Int.toFloat : Int -> Float - 89. Int.toText : Int -> Text - 90. Int.trailingZeros : Int -> Nat - 91. Int.truncate0 : Int -> Nat - 92. Int.xor : Int -> Int -> Int - 93. unique type Link - 94. builtin type Link.Term - 95. Link.Term : Term -> Link - 96. builtin type Link.Type - 97. Link.Type : Type -> Link - 98. builtin type List - 99. List.++ : [a] -> [a] -> [a] - 100. List.+: : a -> [a] -> [a] - 101. List.:+ : [a] -> a -> [a] - 102. List.at : Nat -> [a] -> Optional a - 103. List.cons : a -> [a] -> [a] - 104. List.drop : Nat -> [a] -> [a] - 105. List.empty : [a] - 106. List.size : [a] -> Nat - 107. List.snoc : [a] -> a -> [a] - 108. List.take : Nat -> [a] -> [a] - 109. builtin type Nat - 110. Nat.* : Nat -> Nat -> Nat - 111. Nat.+ : Nat -> Nat -> Nat - 112. Nat./ : Nat -> Nat -> Nat - 113. Nat.and : Nat -> Nat -> Nat - 114. Nat.complement : Nat -> Nat - 115. Nat.drop : Nat -> Nat -> Nat - 116. Nat.eq : Nat -> Nat -> Boolean - 117. Nat.fromText : Text -> Optional Nat - 118. Nat.gt : Nat -> Nat -> Boolean - 119. Nat.gteq : Nat -> Nat -> Boolean - 120. Nat.increment : Nat -> Nat - 121. Nat.isEven : Nat -> Boolean - 122. Nat.isOdd : Nat -> Boolean - 123. Nat.leadingZeros : Nat -> Nat - 124. Nat.lt : Nat -> Nat -> Boolean - 125. Nat.lteq : Nat -> Nat -> Boolean - 126. Nat.mod : Nat -> Nat -> Nat - 127. Nat.or : Nat -> Nat -> Nat - 128. Nat.pow : Nat -> Nat -> Nat - 129. Nat.shiftLeft : Nat -> Nat -> Nat - 130. Nat.shiftRight : Nat -> Nat -> Nat - 131. Nat.sub : Nat -> Nat -> Int - 132. Nat.toFloat : Nat -> Float - 133. Nat.toInt : Nat -> Int - 134. Nat.toText : Nat -> Text - 135. Nat.trailingZeros : Nat -> Nat - 136. Nat.xor : Nat -> Nat -> Nat - 137. type Optional a - 138. Optional.None : Optional a - 139. Optional.Some : a -> Optional a - 140. builtin type Request - 141. type SeqView a b - 142. SeqView.VElem : a -> b -> SeqView a b - 143. SeqView.VEmpty : SeqView a b - 144. unique type Test.Result - 145. Test.Result.Fail : Text -> Result - 146. Test.Result.Ok : Text -> Result - 147. builtin type Text - 148. Text.!= : Text -> Text -> Boolean - 149. Text.++ : Text -> Text -> Text - 150. Text.drop : Nat -> Text -> Text - 151. Text.empty : Text - 152. Text.eq : Text -> Text -> Boolean - 153. Text.fromCharList : [Char] -> Text - 154. Text.gt : Text -> Text -> Boolean - 155. Text.gteq : Text -> Text -> Boolean - 156. Text.lt : Text -> Text -> Boolean - 157. Text.lteq : Text -> Text -> Boolean - 158. Text.size : Text -> Nat - 159. Text.take : Nat -> Text -> Text - 160. Text.toCharList : Text -> [Char] - 161. Text.uncons : Text -> Optional (Char, Text) - 162. Text.unsnoc : Text -> Optional (Text, Char) - 163. type Tuple a b - 164. Tuple.Cons : a -> b -> Tuple a b - 165. type Unit - 166. Unit.Unit : () - 167. Universal.< : a -> a -> Boolean - 168. Universal.<= : a -> a -> Boolean - 169. Universal.== : a -> a -> Boolean - 170. Universal.> : a -> a -> Boolean - 171. Universal.>= : a -> a -> Boolean - 172. Universal.compare : a -> a -> Int - 173. bug : a -> b - 174. unique type io2.BufferMode - 175. io2.BufferMode.BlockBuffering : BufferMode - 176. io2.BufferMode.LineBuffering : BufferMode - 177. io2.BufferMode.NoBuffering : BufferMode - 178. io2.BufferMode.SizedBlockBuffering : Nat -> BufferMode - 179. unique type io2.FileMode - 180. io2.FileMode.Append : FileMode - 181. io2.FileMode.Read : FileMode - 182. io2.FileMode.ReadWrite : FileMode - 183. io2.FileMode.Write : FileMode - 184. builtin type io2.Handle - 185. builtin type io2.IO - 186. io2.IO.clientSocket : Text - -> Text - ->{IO} Either IOError Socket - 187. io2.IO.closeFile : Handle ->{IO} Either IOError () - 188. io2.IO.closeSocket : Socket ->{IO} Either IOError () - 189. io2.IO.createDirectory : Text ->{IO} Either IOError () - 190. io2.IO.fileExists : Text ->{IO} Either IOError Boolean - 191. io2.IO.forkComp : '{IO} Either IOError a - ->{IO} Either IOError ThreadId - 192. io2.IO.getBuffering : Handle - ->{IO} Either IOError BufferMode - 193. io2.IO.getCurrentDirectory : '{IO} Either IOError Text - 194. io2.IO.getFileSize : Text ->{IO} Either IOError Nat - 195. io2.IO.getFileTimestamp : Text ->{IO} Either IOError Nat - 196. io2.IO.getLine : Handle ->{IO} Either IOError Text - 197. io2.IO.getTempDirectory : '{IO} Either IOError Text - 198. io2.IO.getText : Handle ->{IO} Either IOError Text - 199. io2.IO.handlePosition : Handle ->{IO} Either IOError Int - 200. io2.IO.isDirectory : Text ->{IO} Either IOError Boolean - 201. io2.IO.isFileEOF : Handle ->{IO} Either IOError Boolean - 202. io2.IO.isFileOpen : Handle ->{IO} Either IOError Boolean - 203. io2.IO.isSeekable : Handle ->{IO} Either IOError Boolean - 204. io2.IO.listen : Socket ->{IO} Either IOError () - 205. io2.IO.openFile : Text ->{IO} Either IOError Handle - 206. io2.IO.putText : Handle -> Text ->{IO} Either IOError () - 207. io2.IO.removeDirectory : Text ->{IO} Either IOError () - 208. io2.IO.removeFile : Text ->{IO} Either IOError () - 209. io2.IO.renameDirectory : Text - -> Text - ->{IO} Either IOError () - 210. io2.IO.renameFile : Text -> Text ->{IO} Either IOError () - 211. io2.IO.seekHandle : Handle - -> FileMode - -> Int - ->{IO} Either IOError () - 212. io2.IO.serverSocket : Text - -> Text - ->{IO} Either IOError Socket - 213. io2.IO.setBuffering : Handle - -> BufferMode - ->{IO} Either IOError () - 214. io2.IO.setCurrentDirectory : Text - ->{IO} Either IOError () - 215. io2.IO.socketAccept : Socket ->{IO} Either IOError Socket - 216. io2.IO.socketReceive : Socket - -> Nat - ->{IO} Either IOError Bytes - 217. io2.IO.socketSend : Socket - -> Bytes - ->{IO} Either IOError () - 218. io2.IO.stdHandle : Nat -> Optional Handle - 219. io2.IO.systemTime : '{IO} Either IOError Nat - 220. unique type io2.IOError - 221. io2.IOError.AlreadyExists : IOError - 222. io2.IOError.EOF : IOError - 223. io2.IOError.IllegalOperation : IOError - 224. io2.IOError.NoSuchThing : IOError - 225. io2.IOError.PermissionDenied : IOError - 226. io2.IOError.ResourceBusy : IOError - 227. io2.IOError.ResourceExhausted : IOError - 228. io2.IOError.UserError : IOError - 229. builtin type io2.Socket - 230. builtin type io2.ThreadId - 231. todo : a -> b - - -.builtin> alias.many 94-104 .mylib - - Here's what changed in .mylib : - - Added definitions: - - 1. builtin type Link.Term - 2. builtin type Link.Type - 3. builtin type List - 4. Link.Term : Term -> Link - 5. Link.Type : Type -> Link - 6. List.++ : [a] -> [a] -> [a] - 7. ┌ List.+: : a -> [a] -> [a] - 8. └ List.cons : a -> [a] -> [a] - 9. List.:+ : [a] -> a -> [a] - 10. List.at : Nat -> [a] -> Optional a - 11. List.drop : Nat -> [a] -> [a] - - Tip: You can use `undo` or `reflog` to undo this change. - -``` -I want to incorporate a few more from another namespace: -```ucm -.builtin> cd .runar - -.runar> find - - 1. List.adjacentPairs : [a] -> [(a, a)] - 2. List.all : (a ->{𝕖} Boolean) ->{𝕖} [a] ->{𝕖} Boolean - 3. List.any : (a ->{𝕖} Boolean) ->{𝕖} [a] ->{𝕖} Boolean - 4. List.chunk : Nat -> [a] -> [[a]] - 5. List.chunksOf : Nat -> [a] -> [[a]] - 6. List.dropWhile : (a ->{𝕖} Boolean) ->{𝕖} [a] ->{𝕖} [a] - 7. List.first : [a] -> Optional a - 8. List.init : [a] -> Optional [a] - 9. List.intersperse : a -> [a] -> [a] - 10. List.isEmpty : [a] -> Boolean - 11. List.last : [a] -> Optional a - 12. List.replicate : Nat -> a -> [a] - 13. List.splitAt : Nat -> [a] -> ([a], [a]) - 14. List.tail : [a] -> Optional [a] - 15. List.takeWhile : (a ->{𝕖} Boolean) -> [a] ->{𝕖} [a] - - -.runar> alias.many 1-15 .mylib - - Here's what changed in .mylib : - - Added definitions: - - 1. List.adjacentPairs : [a] -> [(a, a)] - 2. List.all : (a ->{𝕖} Boolean) - ->{𝕖} [a] - ->{𝕖} Boolean - 3. List.any : (a ->{𝕖} Boolean) - ->{𝕖} [a] - ->{𝕖} Boolean - 4. List.chunk : Nat -> [a] -> [[a]] - 5. List.chunksOf : Nat -> [a] -> [[a]] - 6. List.dropWhile : (a ->{𝕖} Boolean) - ->{𝕖} [a] - ->{𝕖} [a] - 7. List.first : [a] -> Optional a - 8. List.init : [a] -> Optional [a] - 9. List.intersperse : a -> [a] -> [a] - 10. List.isEmpty : [a] -> Boolean - 11. List.last : [a] -> Optional a - 12. List.replicate : Nat -> a -> [a] - 13. List.splitAt : Nat -> [a] -> ([a], [a]) - 14. List.tail : [a] -> Optional [a] - 15. List.takeWhile : (a ->{𝕖} Boolean) -> [a] ->{𝕖} [a] - - Tip: You can use `undo` or `reflog` to undo this change. - -.runar> cd .mylib - -.mylib> find - - 1. builtin type Link.Term - 2. Link.Term : Term -> Link - 3. builtin type Link.Type - 4. Link.Type : Type -> Link - 5. builtin type List - 6. List.++ : [a] -> [a] -> [a] - 7. List.+: : a -> [a] -> [a] - 8. List.:+ : [a] -> a -> [a] - 9. List.adjacentPairs : [a] -> [(a, a)] - 10. List.all : (a ->{𝕖} Boolean) ->{𝕖} [a] ->{𝕖} Boolean - 11. List.any : (a ->{𝕖} Boolean) ->{𝕖} [a] ->{𝕖} Boolean - 12. List.at : Nat -> [a] -> Optional a - 13. List.chunk : Nat -> [a] -> [[a]] - 14. List.chunksOf : Nat -> [a] -> [[a]] - 15. List.cons : a -> [a] -> [a] - 16. List.drop : Nat -> [a] -> [a] - 17. List.dropWhile : (a ->{𝕖} Boolean) ->{𝕖} [a] ->{𝕖} [a] - 18. List.first : [a] -> Optional a - 19. List.init : [a] -> Optional [a] - 20. List.intersperse : a -> [a] -> [a] - 21. List.isEmpty : [a] -> Boolean - 22. List.last : [a] -> Optional a - 23. List.replicate : Nat -> a -> [a] - 24. List.splitAt : Nat -> [a] -> ([a], [a]) - 25. List.tail : [a] -> Optional [a] - 26. List.takeWhile : (a ->{𝕖} Boolean) -> [a] ->{𝕖} [a] - - -``` -Thanks, `alias.many! diff --git a/unison-src/transcripts/ambiguous-metadata.md b/unison-src/transcripts/ambiguous-metadata.md deleted file mode 100644 index 09d5dfa8b3..0000000000 --- a/unison-src/transcripts/ambiguous-metadata.md +++ /dev/null @@ -1,17 +0,0 @@ - -## An example scenario that surfaces an 'ambiguous metadata' error. - -```unison:hide -foo.doc = [: a :] -boo.doc = [: b :] -x = 1 -``` - -```ucm:hide:all -.> add -``` - -```ucm:error -.> merge foo boo -.> link boo.doc x -``` \ No newline at end of file diff --git a/unison-src/transcripts/ambiguous-metadata.output.md b/unison-src/transcripts/ambiguous-metadata.output.md deleted file mode 100644 index c0d958fc6d..0000000000 --- a/unison-src/transcripts/ambiguous-metadata.output.md +++ /dev/null @@ -1,42 +0,0 @@ - -## An example scenario that surfaces an 'ambiguous metadata' error. - -```unison -foo.doc = [: a :] -boo.doc = [: b :] -x = 1 -``` - -```ucm -.> merge foo boo - - Here's what's changed in boo after the merge: - - New name conflicts: - - 1. doc#tj3gfqdnje : #v00j3buk6m - ↓ - 2. ┌ doc#d4ormokpf9 : #v00j3buk6m - 3. └ doc#tj3gfqdnje : #v00j3buk6m - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -.> link boo.doc x - - ⚠️ - - I'm not sure which metadata value you're referring to since - there are multiple matches: - - foo.doc - boo.doc#tj3gfqdnje - - Tip: Try again and supply one of the above definitions - explicitly. - - I didn't make any changes. - -``` diff --git a/unison-src/transcripts/blocks.md b/unison-src/transcripts/blocks.md deleted file mode 100644 index c7f4277090..0000000000 --- a/unison-src/transcripts/blocks.md +++ /dev/null @@ -1,177 +0,0 @@ -## Blocks and scoping - -```ucm:hide -.> builtins.merge -``` - -### Names introduced by a block shadow names introduced in outer scopes - -For example: - -```unison -ex thing = - thing y = y - -- refers to `thing` in this block - -- not the argument to `ex` - bar x = thing x + 1 - bar 42 - -> ex "hello" -``` - -### Whether a block shadows outer names doesn't depend on the order of bindings in the block - -The `thing` reference in `bar` refers to the one declared locally in the block that `bar` is part of. This is true even if the declaration which shadows the outer name appears later in the block, for instance: - -```unison -ex thing = - bar x = thing x + 1 - thing y = y - bar 42 - -> ex "hello" -``` - -### Blocks use lexical scoping and can only reference definitions in parent scopes or in the same block - -This is just the normal lexical scoping behavior. For example: - -```unison -ex thing = - bar x = thing x + 1 -- references outer `thing` - baz z = - thing y = y -- shadows the outer `thing` - thing z -- references the inner `thing` - bar 42 - -> ex (x -> x * 100) -``` - -Here's another example, showing that bindings cannot reference bindings declared in blocks nested in the _body_ (the final expression) of a block: - -```unison -ex thing = - bar x = thing x + 1 -- refers to outer thing - let - thing y = y - bar 42 - -> ex (x -> x * 100) -``` - -### Blocks can define one or more functions which are recursive or mutually recursive - -We call these groups of definitions that reference each other in a block _cycles_. For instance: - -```unison -sumTo n = - -- A recursive function, defined inside a block - go acc n = - if n == 0 then acc - else go (acc + n) (n `drop` 1) - go 0 n - -ex n = - -- Two mutually recursive functions, defined in a block - ping x = pong (x + 1) - pong x = ping (x + 2) - ping 42 -``` - -The `go` function is a one-element cycle (it reference itself), and `ping` and `pong` form a two-element cycle. - -### Cyclic references or forward reference must be guarded - -For instance, this works: - -```unison -ex n = - ping x = pong + 1 + x - pong = 42 - ping 0 -``` - -Since the forward reference to `pong` appears inside `ping`. - -This, however, will not compile: - -```unison:error -ex n = - pong = ping + 1 - ping = 42 - pong -``` - -This also won't compile; it's a cyclic reference that isn't guarded: - -```unison:error -ex n = - loop = loop - loop -``` - -This, however, will compile. This also shows that `'expr` is another way of guarding a definition. - -```unison -ex n = - loop = '(!loop) - !loop -``` - -Just don't try to run it as it's an infinite loop! - -### Cyclic definitions in a block don't have access to any abilities - -The reason is it's unclear what the order should be of any requests that are made. It can also be viewed of a special case of the restriction that elements of a cycle must all be guarded. Here's an example: - -```unison:error -ability SpaceAttack where - launchMissiles : Text -> Nat - -ex n = - zap1 = launchMissiles "neptune" + zap2 - zap2 = launchMissiles "pluto" + zap1 - zap1 -``` - -### The _body_ of recursive functions can certainly access abilities - -For instance, this works fine: - -```unison -ability SpaceAttack where - launchMissiles : Text -> Nat - -ex n = - zap1 planet = launchMissiles planet + zap2 planet - zap2 planet = launchMissiles planet + zap1 planet - zap1 "pluto" -``` - -### Unrelated definitions not part of a cycle and are moved after the cycle - -For instance, `zap` here isn't considered part of the cycle (it doesn't reference `ping` or `pong`), so this typechecks fine: - -```unison -ability SpaceAttack where - launchMissiles : Text -> Nat - -ex n = - ping x = pong (x + 1) - zap = launchMissiles "neptune" - pong x = ping (x + 2) - ping 42 -``` - -This is actually parsed as if you moved `zap` after the cycle it find itself a part of: - -```unison -ability SpaceAttack where - launchMissiles : Text -> Nat - -ex n = - ping x = pong (x + 1) - pong x = ping (x + 2) - zap = launchMissiles "neptune" - ping 42 -``` diff --git a/unison-src/transcripts/blocks.output.md b/unison-src/transcripts/blocks.output.md deleted file mode 100644 index cca31f6bd2..0000000000 --- a/unison-src/transcripts/blocks.output.md +++ /dev/null @@ -1,339 +0,0 @@ -## Blocks and scoping - -### Names introduced by a block shadow names introduced in outer scopes - -For example: - -```unison -ex thing = - thing y = y - -- refers to `thing` in this block - -- not the argument to `ex` - bar x = thing x + 1 - bar 42 - -> ex "hello" -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - ex : thing -> Nat - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 8 | > ex "hello" - ⧩ - 43 - -``` -### Whether a block shadows outer names doesn't depend on the order of bindings in the block - -The `thing` reference in `bar` refers to the one declared locally in the block that `bar` is part of. This is true even if the declaration which shadows the outer name appears later in the block, for instance: - -```unison -ex thing = - bar x = thing x + 1 - thing y = y - bar 42 - -> ex "hello" -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - ex : thing -> Nat - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 6 | > ex "hello" - ⧩ - 43 - -``` -### Blocks use lexical scoping and can only reference definitions in parent scopes or in the same block - -This is just the normal lexical scoping behavior. For example: - -```unison -ex thing = - bar x = thing x + 1 -- references outer `thing` - baz z = - thing y = y -- shadows the outer `thing` - thing z -- references the inner `thing` - bar 42 - -> ex (x -> x * 100) -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - ex : (Nat ->{𝕖} Nat) ->{𝕖} Nat - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 8 | > ex (x -> x * 100) - ⧩ - 4201 - -``` -Here's another example, showing that bindings cannot reference bindings declared in blocks nested in the _body_ (the final expression) of a block: - -```unison -ex thing = - bar x = thing x + 1 -- refers to outer thing - let - thing y = y - bar 42 - -> ex (x -> x * 100) -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - ex : (Nat ->{𝕖} Nat) ->{𝕖} Nat - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 7 | > ex (x -> x * 100) - ⧩ - 4201 - -``` -### Blocks can define one or more functions which are recursive or mutually recursive - -We call these groups of definitions that reference each other in a block _cycles_. For instance: - -```unison -sumTo n = - -- A recursive function, defined inside a block - go acc n = - if n == 0 then acc - else go (acc + n) (n `drop` 1) - go 0 n - -ex n = - -- Two mutually recursive functions, defined in a block - ping x = pong (x + 1) - pong x = ping (x + 2) - ping 42 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - ex : n -> 𝕣 - sumTo : Nat -> Nat - -``` -The `go` function is a one-element cycle (it reference itself), and `ping` and `pong` form a two-element cycle. - -### Cyclic references or forward reference must be guarded - -For instance, this works: - -```unison -ex n = - ping x = pong + 1 + x - pong = 42 - ping 0 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - ex : n -> Nat - -``` -Since the forward reference to `pong` appears inside `ping`. - -This, however, will not compile: - -```unison -ex n = - pong = ping + 1 - ping = 42 - pong -``` - -```ucm - - These definitions depend on each other cyclically but aren't guarded by a lambda: pong9 - 2 | pong = ping + 1 - 3 | ping = 42 - - -``` -This also won't compile; it's a cyclic reference that isn't guarded: - -```unison -ex n = - loop = loop - loop -``` - -```ucm - - These definitions depend on each other cyclically but aren't guarded by a lambda: loop9 - 2 | loop = loop - - -``` -This, however, will compile. This also shows that `'expr` is another way of guarding a definition. - -```unison -ex n = - loop = '(!loop) - !loop -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - ex : n -> 𝕣 - -``` -Just don't try to run it as it's an infinite loop! - -### Cyclic definitions in a block don't have access to any abilities - -The reason is it's unclear what the order should be of any requests that are made. It can also be viewed of a special case of the restriction that elements of a cycle must all be guarded. Here's an example: - -```unison -ability SpaceAttack where - launchMissiles : Text -> Nat - -ex n = - zap1 = launchMissiles "neptune" + zap2 - zap2 = launchMissiles "pluto" + zap1 - zap1 -``` - -```ucm - - These definitions depend on each other cyclically but aren't guarded by a lambda: zap19, zap210 - 5 | zap1 = launchMissiles "neptune" + zap2 - 6 | zap2 = launchMissiles "pluto" + zap1 - - -``` -### The _body_ of recursive functions can certainly access abilities - -For instance, this works fine: - -```unison -ability SpaceAttack where - launchMissiles : Text -> Nat - -ex n = - zap1 planet = launchMissiles planet + zap2 planet - zap2 planet = launchMissiles planet + zap1 planet - zap1 "pluto" -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - ability SpaceAttack - ex : n ->{SpaceAttack} Nat - -``` -### Unrelated definitions not part of a cycle and are moved after the cycle - -For instance, `zap` here isn't considered part of the cycle (it doesn't reference `ping` or `pong`), so this typechecks fine: - -```unison -ability SpaceAttack where - launchMissiles : Text -> Nat - -ex n = - ping x = pong (x + 1) - zap = launchMissiles "neptune" - pong x = ping (x + 2) - ping 42 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - ability SpaceAttack - ex : n ->{SpaceAttack} 𝕣 - -``` -This is actually parsed as if you moved `zap` after the cycle it find itself a part of: - -```unison -ability SpaceAttack where - launchMissiles : Text -> Nat - -ex n = - ping x = pong (x + 1) - pong x = ping (x + 2) - zap = launchMissiles "neptune" - ping 42 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - ability SpaceAttack - ex : n ->{SpaceAttack} 𝕣 - -``` diff --git a/unison-src/transcripts/builtins-merge.md b/unison-src/transcripts/builtins-merge.md deleted file mode 100644 index 28bfb426ca..0000000000 --- a/unison-src/transcripts/builtins-merge.md +++ /dev/null @@ -1,6 +0,0 @@ -The `builtins.merge` command adds the known builtins to a `builtin` subnamespace within the current namespace. - -```ucm -.tmp> builtins.merge -.tmp> ls builtin -``` diff --git a/unison-src/transcripts/builtins-merge.output.md b/unison-src/transcripts/builtins-merge.output.md deleted file mode 100644 index fe37524d90..0000000000 --- a/unison-src/transcripts/builtins-merge.output.md +++ /dev/null @@ -1,50 +0,0 @@ -The `builtins.merge` command adds the known builtins to a `builtin` subnamespace within the current namespace. - -```ucm - ☝️ The namespace .tmp is empty. - -.tmp> builtins.merge - - Done. - -.tmp> ls builtin - - 1. Boolean (builtin type) - 2. Boolean/ (1 definition) - 3. Bytes (builtin type) - 4. Bytes/ (9 definitions) - 5. Char (builtin type) - 6. Char/ (2 definitions) - 7. Debug/ (1 definition) - 8. Doc (type) - 9. Doc/ (6 definitions) - 10. Either (type) - 11. Either/ (2 definitions) - 12. Float (builtin type) - 13. Float/ (36 definitions) - 14. Int (builtin type) - 15. Int/ (28 definitions) - 16. Link (type) - 17. Link/ (4 definitions) - 18. List (builtin type) - 19. List/ (10 definitions) - 20. Nat (builtin type) - 21. Nat/ (27 definitions) - 22. Optional (type) - 23. Optional/ (2 definitions) - 24. Request (builtin type) - 25. SeqView (type) - 26. SeqView/ (2 definitions) - 27. Test/ (3 definitions) - 28. Text (builtin type) - 29. Text/ (15 definitions) - 30. Tuple (type) - 31. Tuple/ (1 definition) - 32. Unit (type) - 33. Unit/ (1 definition) - 34. Universal/ (6 definitions) - 35. bug (a -> b) - 36. io2/ (57 definitions) - 37. todo (a -> b) - -``` diff --git a/unison-src/transcripts/bytesFromList.md b/unison-src/transcripts/bytesFromList.md deleted file mode 100644 index 9da15329f3..0000000000 --- a/unison-src/transcripts/bytesFromList.md +++ /dev/null @@ -1,11 +0,0 @@ - -```ucm:hide -.> builtins.merge -``` - -This should render as `Bytes.fromList [1,2,3,4]`, not `##Bytes.fromSequence [1,2,3,4]`: - -```unison -> Bytes.fromList [1,2,3,4] -``` - diff --git a/unison-src/transcripts/bytesFromList.output.md b/unison-src/transcripts/bytesFromList.output.md deleted file mode 100644 index 9ba8af1b32..0000000000 --- a/unison-src/transcripts/bytesFromList.output.md +++ /dev/null @@ -1,21 +0,0 @@ - -This should render as `Bytes.fromList [1,2,3,4]`, not `##Bytes.fromSequence [1,2,3,4]`: - -```unison -> Bytes.fromList [1,2,3,4] -``` - -```ucm - - ✅ - - scratch.u changed. - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 1 | > Bytes.fromList [1,2,3,4] - ⧩ - fromList [1, 2, 3, 4] - -``` diff --git a/unison-src/transcripts/cd-back.md b/unison-src/transcripts/cd-back.md deleted file mode 100644 index 7a7df60eb5..0000000000 --- a/unison-src/transcripts/cd-back.md +++ /dev/null @@ -1,46 +0,0 @@ -## Switching between namespaces / projects / branches / modules - -Unison uses the same organizational element to represent directories, projects, sub-projects, forks, modules, etc.; currently called a "namespace". - -Namespaces are trees that contain definitions of "types" and "terms", "patches", and other child namespaces. - -We're still working out what a nice codebase layout might be (feel free to write up a blog post if you find one that works well for you), but in this example, we have these, along with their children (not shown): - -> .libs.base -> .libs.megaparser.master -> .libs.megaparser.v1 -> .libs.megaparser.v2 -> .arya.base -> .arya.myproject -> .pullrequests.runarorama.base_3.base -> .pullrequests.runarorama.base_3.head -> .pullrequests.runarorama.base_3.merged -> .temp - -```ucm:hide -.> builtins.merge -.> move.namespace builtin .arya.base -``` - -```ucm -.> cd arya.base -.arya.base> find Boolean -``` -```ucm:hide -.arya.base> cd .arya.myproject -``` - -blah blah blah more stuff about project management and patches and the value of working from the appropriate namespace, and what that is in any given case - -We can pop back to the previous namespace with the `back` command. - -```ucm:hide -.arya.myproject> back -``` -```ucm:hide -.arya.base> back -``` -```ucm:error -.> back -``` -😬 Right, ok. diff --git a/unison-src/transcripts/cd-back.output.md b/unison-src/transcripts/cd-back.output.md deleted file mode 100644 index 9b89c23353..0000000000 --- a/unison-src/transcripts/cd-back.output.md +++ /dev/null @@ -1,40 +0,0 @@ -## Switching between namespaces / projects / branches / modules - -Unison uses the same organizational element to represent directories, projects, sub-projects, forks, modules, etc.; currently called a "namespace". - -Namespaces are trees that contain definitions of "types" and "terms", "patches", and other child namespaces. - -We're still working out what a nice codebase layout might be (feel free to write up a blog post if you find one that works well for you), but in this example, we have these, along with their children (not shown): - -> .libs.base -> .libs.megaparser.master -> .libs.megaparser.v1 -> .libs.megaparser.v2 -> .arya.base -> .arya.myproject -> .pullrequests.runarorama.base_3.base -> .pullrequests.runarorama.base_3.head -> .pullrequests.runarorama.base_3.merged -> .temp - -```ucm -.> cd arya.base - -.arya.base> find Boolean - - 1. builtin type Boolean - 2. Boolean.not : Boolean -> Boolean - - -``` -blah blah blah more stuff about project management and patches and the value of working from the appropriate namespace, and what that is in any given case - -We can pop back to the previous namespace with the `back` command. - -```ucm -.> back - - You're already at the very beginning! 🙂 - -``` -😬 Right, ok. diff --git a/unison-src/transcripts/check763.md b/unison-src/transcripts/check763.md deleted file mode 100644 index 3bb162b344..0000000000 --- a/unison-src/transcripts/check763.md +++ /dev/null @@ -1,17 +0,0 @@ -Regression test for https://github.com/unisonweb/unison/issues/763 - -```ucm:hide -.> builtins.merge -``` - -```unison -(+-+) : Nat -> Nat -> Nat -(+-+) x y = x * y -``` - -```ucm -.> add -.> move.term +-+ boppitybeep -.> move.term boppitybeep +-+ -``` - diff --git a/unison-src/transcripts/check763.output.md b/unison-src/transcripts/check763.output.md deleted file mode 100644 index 568f1d330b..0000000000 --- a/unison-src/transcripts/check763.output.md +++ /dev/null @@ -1,34 +0,0 @@ -Regression test for https://github.com/unisonweb/unison/issues/763 - -```unison -(+-+) : Nat -> Nat -> Nat -(+-+) x y = x * y -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - +-+ : Nat -> Nat -> Nat - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - +-+ : Nat -> Nat -> Nat - -.> move.term +-+ boppitybeep - - Done. - -.> move.term boppitybeep +-+ - - Done. - -``` diff --git a/unison-src/transcripts/check873.md b/unison-src/transcripts/check873.md deleted file mode 100644 index 7145186286..0000000000 --- a/unison-src/transcripts/check873.md +++ /dev/null @@ -1,17 +0,0 @@ -See [this ticket](https://github.com/unisonweb/unison/issues/873); the point being, this shouldn't crash the runtime. :) - -```ucm:hide -.> builtins.merge -``` - -```unison -(-) = builtin.Nat.sub -``` - -```ucm -.> add -``` - -```unison -baz x = x - 1 -``` diff --git a/unison-src/transcripts/check873.output.md b/unison-src/transcripts/check873.output.md deleted file mode 100644 index 8935c2336f..0000000000 --- a/unison-src/transcripts/check873.output.md +++ /dev/null @@ -1,40 +0,0 @@ -See [this ticket](https://github.com/unisonweb/unison/issues/873); the point being, this shouldn't crash the runtime. :) - -```unison -(-) = builtin.Nat.sub -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - - : Nat -> Nat -> Int - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - - : Nat -> Nat -> Int - -``` -```unison -baz x = x - 1 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - baz : Nat -> Int - -``` diff --git a/unison-src/transcripts/copy-patch.md b/unison-src/transcripts/copy-patch.md deleted file mode 100644 index 11f54e99fc..0000000000 --- a/unison-src/transcripts/copy-patch.md +++ /dev/null @@ -1,39 +0,0 @@ -# Test that copying a patch works as expected - -```unison -x = 1 -``` - -```ucm -.> add -``` - -Change the definition of `x` so something goes in our patch: - -```unison -x = 2 -``` - -```ucm -.> update foo.patch -``` - -Copy the patch and make sure it's still there. - -```ucm -.> copy.patch foo.patch bar.patch -.> view.patch foo.patch -.> view.patch bar.patch -``` - -Now move the patch. - -```ucm -.> move.patch foo.patch qux.patch -``` - -The moved patch should be gone. - -```ucm -.> view.patch foo.patch -``` diff --git a/unison-src/transcripts/copy-patch.output.md b/unison-src/transcripts/copy-patch.output.md deleted file mode 100644 index 2fc3645c30..0000000000 --- a/unison-src/transcripts/copy-patch.output.md +++ /dev/null @@ -1,91 +0,0 @@ -# Test that copying a patch works as expected - -```unison -x = 1 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - x : ##Nat - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - x : ##Nat - -``` -Change the definition of `x` so something goes in our patch: - -```unison -x = 2 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - x : ##Nat - -``` -```ucm -.> update foo.patch - - ⍟ I've updated these names to your new definition: - - x : ##Nat - -``` -Copy the patch and make sure it's still there. - -```ucm -.> copy.patch foo.patch bar.patch - - Done. - -.> view.patch foo.patch - - Edited Terms: #jk19sm5bf8 -> x - - Tip: To remove entries from a patch, use - delete.term-replacement or delete.type-replacement, as - appropriate. - -.> view.patch bar.patch - - Edited Terms: #jk19sm5bf8 -> x - - Tip: To remove entries from a patch, use - delete.term-replacement or delete.type-replacement, as - appropriate. - -``` -Now move the patch. - -```ucm -.> move.patch foo.patch qux.patch - - Done. - -``` -The moved patch should be gone. - -```ucm -.> view.patch foo.patch - - This patch is empty. - -``` diff --git a/unison-src/transcripts/create-author.md b/unison-src/transcripts/create-author.md deleted file mode 100644 index 69b45b19cd..0000000000 --- a/unison-src/transcripts/create-author.md +++ /dev/null @@ -1,17 +0,0 @@ -```ucm:hide -.> builtins.mergeio -``` - -Demonstrating `create.author`: - -```unison:hide -def1 = 1 -def2 = 2 -``` - -```ucm -.foo> add -.foo> create.author alicecoder "Alice McGee" -.foo> view 3 -.foo> link metadata.authors.alicecoder def1 def2 -``` diff --git a/unison-src/transcripts/create-author.output.md b/unison-src/transcripts/create-author.output.md deleted file mode 100644 index 69d0df22cd..0000000000 --- a/unison-src/transcripts/create-author.output.md +++ /dev/null @@ -1,44 +0,0 @@ -Demonstrating `create.author`: - -```unison -def1 = 1 -def2 = 2 -``` - -```ucm - ☝️ The namespace .foo is empty. - -.foo> add - - ⍟ I've added these definitions: - - def1 : Nat - def2 : Nat - -.foo> create.author alicecoder "Alice McGee" - - Added definitions: - - 1. metadata.authors.alicecoder : Author - 2. metadata.authors.alicecoder.guid : GUID - 3. metadata.copyrightHolders.alicecoder : CopyrightHolder - - Tip: Add License values for alicecoder under metadata. - -.foo> view 3 - - metadata.copyrightHolders.alicecoder : CopyrightHolder - metadata.copyrightHolders.alicecoder = - CopyrightHolder alicecoder.guid "Alice McGee" - -.foo> link metadata.authors.alicecoder def1 def2 - - Updates: - - 1. foo.def1 : Nat - + 2. authors.alicecoder : Author - - 3. foo.def2 : Nat - + 4. authors.alicecoder : Author - -``` diff --git a/unison-src/transcripts/delete.md b/unison-src/transcripts/delete.md deleted file mode 100644 index 4fcf029d5f..0000000000 --- a/unison-src/transcripts/delete.md +++ /dev/null @@ -1,100 +0,0 @@ -# Delete - -```ucm:hide -.> builtins.merge -``` - -The delete command can delete both terms and types. - -First, let's make sure it complains when we try to delete a name that doesn't -exist. - -```ucm:error -.> delete foo -``` - -Now for some easy cases. Deleting an unambiguous term, then deleting an -unambiguous type. - -```unison:hide -foo = 1 -type Foo = Foo Nat -``` - -```ucm -.> add -.> delete foo -.> delete Foo -.> delete Foo.Foo -``` - -How about an ambiguous term? - -```unison:hide -foo = 1 -``` - -```ucm -.a> add -``` - -```unison:hide -foo = 2 -``` - -```ucm -.b> add -.a> merge .b -``` - -A delete should remove both versions of the term. - -```ucm -.a> delete foo -``` - -```ucm:error -.a> ls -``` - -Let's repeat all that on a type, for completeness. - -```unison:hide -type Foo = Foo Nat -``` - -```ucm -.a> add -``` - -```unison:hide -type Foo = Foo Boolean -``` - -```ucm -.b> add -.a> merge .b -``` - -```ucm -.a> delete Foo -``` - -```ucm -.a> delete Foo.Foo -``` - -Finally, let's try to delete a term and a type with the same name. - -```unison:hide -foo = 1 -type foo = Foo Nat -``` - -```ucm -.> add -``` - -```ucm -.> delete foo -``` diff --git a/unison-src/transcripts/delete.output.md b/unison-src/transcripts/delete.output.md deleted file mode 100644 index 61af13bc5f..0000000000 --- a/unison-src/transcripts/delete.output.md +++ /dev/null @@ -1,237 +0,0 @@ -# Delete - -The delete command can delete both terms and types. - -First, let's make sure it complains when we try to delete a name that doesn't -exist. - -```ucm -.> delete foo - - ⚠️ - - I don't know about that name. - -``` -Now for some easy cases. Deleting an unambiguous term, then deleting an -unambiguous type. - -```unison -foo = 1 -type Foo = Foo Nat -``` - -```ucm -.> add - - ⍟ I've added these definitions: - - type Foo - foo : Nat - -.> delete foo - - Removed definitions: - - 1. foo : Nat - - Tip: You can use `undo` or `reflog` to undo this change. - -.> delete Foo - - Removed definitions: - - 1. type Foo - - Tip: You can use `undo` or `reflog` to undo this change. - -.> delete Foo.Foo - - Removed definitions: - - 1. Foo.Foo : Nat -> #d97e0jhkmd - - Tip: You can use `undo` or `reflog` to undo this change. - -``` -How about an ambiguous term? - -```unison -foo = 1 -``` - -```ucm - ☝️ The namespace .a is empty. - -.a> add - - ⍟ I've added these definitions: - - foo : Nat - -``` -```unison -foo = 2 -``` - -```ucm - ☝️ The namespace .b is empty. - -.b> add - - ⍟ I've added these definitions: - - foo : Nat - -.a> merge .b - - Here's what's changed in the current namespace after the - merge: - - New name conflicts: - - 1. foo#jk19sm5bf8 : Nat - ↓ - 2. ┌ foo#0ja1qfpej6 : Nat - 3. └ foo#jk19sm5bf8 : Nat - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -``` -A delete should remove both versions of the term. - -```ucm -.a> delete foo - - Removed definitions: - - 1. a.foo#jk19sm5bf8 : Nat - - Name changes: - - Original Changes - 2. a.foo#0ja1qfpej6 ┐ 3. a.foo#0ja1qfpej6 (removed) - 4. b.foo ┘ - - Tip: You can use `undo` or `reflog` to undo this change. - -``` -```ucm -.a> ls - - nothing to show - -``` -Let's repeat all that on a type, for completeness. - -```unison -type Foo = Foo Nat -``` - -```ucm -.a> add - - ⍟ I've added these definitions: - - type Foo - -``` -```unison -type Foo = Foo Boolean -``` - -```ucm -.b> add - - ⍟ I've added these definitions: - - type Foo - -.a> merge .b - - Here's what's changed in the current namespace after the - merge: - - New name conflicts: - - 1. type Foo#d97e0jhkmd - - ↓ - 2. ┌ type Foo#d97e0jhkmd - - 3. └ type Foo#gq9inhvg9h - - - 4. Foo.Foo#d97e0jhkmd#0 : Nat -> Foo#d97e0jhkmd - ↓ - 5. ┌ Foo.Foo#d97e0jhkmd#0 : Nat -> Foo#d97e0jhkmd - 6. └ Foo.Foo#gq9inhvg9h#0 : Boolean -> Foo#gq9inhvg9h - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -``` -```ucm -.a> delete Foo - - Removed definitions: - - 1. type a.Foo#d97e0jhkmd - - Name changes: - - Original Changes - 2. a.Foo#gq9inhvg9h ┐ 3. a.Foo#gq9inhvg9h (removed) - 4. b.Foo ┘ - - Tip: You can use `undo` or `reflog` to undo this change. - -``` -```ucm -.a> delete Foo.Foo - - Removed definitions: - - 1. a.Foo.Foo#d97e0jhkmd#0 : Nat -> #d97e0jhkmd - - Name changes: - - Original Changes - 2. a.Foo.Foo#gq9inhvg9h#0 ┐ 3. a.Foo.Foo#gq9inhvg9h#0 (removed) - 4. b.Foo.Foo ┘ - - Tip: You can use `undo` or `reflog` to undo this change. - -``` -Finally, let's try to delete a term and a type with the same name. - -```unison -foo = 1 -type foo = Foo Nat -``` - -```ucm -.> add - - ⍟ I've added these definitions: - - type foo - foo : Nat - -``` -```ucm -.> delete foo - - Removed definitions: - - 1. type foo - 2. foo : Nat - - Tip: You can use `undo` or `reflog` to undo this change. - -``` diff --git a/unison-src/transcripts/deleteReplacements.md b/unison-src/transcripts/deleteReplacements.md deleted file mode 100644 index f08f81fca2..0000000000 --- a/unison-src/transcripts/deleteReplacements.md +++ /dev/null @@ -1,46 +0,0 @@ -# Deleting term and type replacements from patches - -```unison -x = 1 -``` - -```ucm -.> add -``` - -```unison -x = 2 -``` - -```ucm -.> update -.> view.patch -``` - -```ucm -.> delete.term-replacement #jk19 -.> view.patch -``` - -```unison -type Foo = Foo -``` - -```ucm -.> add -``` - -```unison -type Foo = Foo | Bar -``` - -```ucm -.> update -.> view.patch -``` - -```ucm -.> delete.type-replacement #568rsi7o3g -.> view.patch -``` - diff --git a/unison-src/transcripts/deleteReplacements.output.md b/unison-src/transcripts/deleteReplacements.output.md deleted file mode 100644 index 4def203f84..0000000000 --- a/unison-src/transcripts/deleteReplacements.output.md +++ /dev/null @@ -1,132 +0,0 @@ -# Deleting term and type replacements from patches - -```unison -x = 1 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - x : ##Nat - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - x : ##Nat - -``` -```unison -x = 2 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - x : ##Nat - -``` -```ucm -.> update - - ⍟ I've updated these names to your new definition: - - x : ##Nat - -.> view.patch - - Edited Terms: x#jk19sm5bf8 -> x - - Tip: To remove entries from a patch, use - delete.term-replacement or delete.type-replacement, as - appropriate. - -``` -```ucm -.> delete.term-replacement #jk19 - - Done. - -.> view.patch - - This patch is empty. - -``` -```unison -type Foo = Foo -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - type Foo - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - type Foo - -``` -```unison -type Foo = Foo | Bar -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - type Foo - -``` -```ucm -.> update - - ⍟ I've updated these names to your new definition: - - type Foo - -.> view.patch - - Edited Types: Foo#568rsi7o3g -> Foo - - Tip: To remove entries from a patch, use - delete.term-replacement or delete.type-replacement, as - appropriate. - -``` -```ucm -.> delete.type-replacement #568rsi7o3g - - Done. - -.> view.patch - - This patch is empty. - -``` diff --git a/unison-src/transcripts/dependents-dependencies-debugfile.md b/unison-src/transcripts/dependents-dependencies-debugfile.md deleted file mode 100644 index 0bebc6f1cb..0000000000 --- a/unison-src/transcripts/dependents-dependencies-debugfile.md +++ /dev/null @@ -1,38 +0,0 @@ -```ucm:hide -.> builtins.merge -``` - -### `debug.file` -I can use `debug.file` to see the hashes of the last typechecked file. - -Given this .u file: -```unison:hide -type outside.A = A Nat outside.B -type outside.B = B Int -outside.c = 3 -outside.d = c < (p + 1) - -type inside.M = M outside.A -inside.p = c -inside.q x = x + p * p -inside.r = d -``` -```ucm -.> debug.file -``` - -This will help me make progress in some situations when UCM is being deficient or broken. - -### `dependents` / `dependencies` -But wait, there's more. I can check the dependencies and dependents of a definition: -```ucm -.> add -.> dependents q -.> dependencies q -.> dependencies B -.> dependencies d -.> dependents d -.> -``` - -We don't have an index for dependents of constructors, but iirc if you ask for that, it will show you dependents of the type that provided the constructor. diff --git a/unison-src/transcripts/dependents-dependencies-debugfile.output.md b/unison-src/transcripts/dependents-dependencies-debugfile.output.md deleted file mode 100644 index b86c321eb4..0000000000 --- a/unison-src/transcripts/dependents-dependencies-debugfile.output.md +++ /dev/null @@ -1,93 +0,0 @@ -### `debug.file` -I can use `debug.file` to see the hashes of the last typechecked file. - -Given this .u file: -```unison -type outside.A = A Nat outside.B -type outside.B = B Int -outside.c = 3 -outside.d = c < (p + 1) - -type inside.M = M outside.A -inside.p = c -inside.q x = x + p * p -inside.r = d -``` - -```ucm -.> debug.file - - type inside.M#4idrjau939 - type outside.A#0n4pbd0q9u - type outside.B#muulibntaq - inside.p#fiupm7pl7o - inside.q#l5pndeifuh - inside.r#im2kiu2hmn - outside.c#msp7bv40rv - outside.d#6cdi7g1oi2 - -``` -This will help me make progress in some situations when UCM is being deficient or broken. - -### `dependents` / `dependencies` -But wait, there's more. I can check the dependencies and dependents of a definition: -```ucm -.> add - - ⍟ I've added these definitions: - - type inside.M - type outside.A - type outside.B - inside.p : Nat - inside.q : Nat -> Nat - inside.r : Boolean - outside.c : Nat - outside.d : Boolean - -.> dependents q - - #l5pndeifuh doesn't have any dependents. - -.> dependencies q - - Dependencies of #l5pndeifuh: - - Reference Name - 1. ##Nat.* builtin.Nat.* - 2. ##Nat.+ builtin.Nat.+ - 3. #fiupm7pl7o inside.p - -.> dependencies B - - Dependencies of #muulibntaq: - - Reference Name - 1. ##Int builtin.Int - - Dependencies of #muulibntaq#0: - - Reference Name - 1. ##Int builtin.Int - 2. #muulibntaq outside.B - -.> dependencies d - - Dependencies of #6cdi7g1oi2: - - Reference Name - 1. ##Nat builtin.Nat - 2. ##Nat.+ builtin.Nat.+ - 3. ##Universal.< builtin.Universal.< - 4. #fiupm7pl7o inside.p - 5. #msp7bv40rv outside.c - -.> dependents d - - Dependents of #6cdi7g1oi2: - - Reference Name - 1. #im2kiu2hmn inside.r - -``` -We don't have an index for dependents of constructors, but iirc if you ask for that, it will show you dependents of the type that provided the constructor. diff --git a/unison-src/transcripts/diff.md b/unison-src/transcripts/diff.md deleted file mode 100644 index 4d08c841f3..0000000000 --- a/unison-src/transcripts/diff.md +++ /dev/null @@ -1,201 +0,0 @@ -```ucm:hide -.> builtins.mergeio -``` - -```unison:hide -x = 23 -``` - -```ucm -.b1> add -.b1> alias.term x fslkdjflskdjflksjdf -.> fork b1 b2 -.b2> alias.term x abc -``` -```unison:hide -fslkdjflskdjflksjdf = 663 -``` -```ucm -.b0> add -.> merge b0 b1 -.> diff.namespace b1 b2 -``` -Things we want to test: - -* Diffing identical namespaces -* Adds, removes, updates (with and without metadata updates) - * Adds with multiple names - * Adds with multiple names and different metadata on each -* Moved and copied definitions - * Moves that have more that 1 initial or final name -* ... terms and types -* New patches, modified patches, deleted patches, moved patches -* With and without propagated updates - -```unison:hide -fromJust = 1 -b = 2 -bdependent = b -c = 3 -helloWorld = '(printLine "Hello, world!") - -type A a = A Nat -ability X a1 a2 where x : Nat -``` - -```ucm -.ns1> add -.ns1> alias.term fromJust fromJust' -.ns1> alias.term helloWorld helloWorld2 -.ns1> link b fromJust -.ns1> fork .ns1 .ns2 -.ns1> cd . -``` -Here's what we've done so far: -```ucm -.> diff.namespace nothing ns1 -.> diff.namespace ns1 ns2 -``` - -```unison:hide -fromJust = "asldkfjasldkfj" -``` - -```ucm -.ns1b> add -.> merge ns1b ns1 -``` - -```unison:hide -fromJust = 99 -b = "oog" -d = 4 -e = 5 -f = 6 -unique type Y a b = Y a b -``` - -```ucm -.ns2> update -.ns2> links fromJust -.> diff.namespace ns1 ns2 -.> alias.term ns2.d ns2.d' -.> alias.type ns2.A ns2.A' -.> alias.type ns2.X ns2.X' -.> diff.namespace ns1 ns2 -.> link ns1.c ns2.f -.> link ns2.c ns2.c -.> diff.namespace ns1 ns2 -.> unlink ns2.b ns2.fromJust -.> diff.namespace ns1 ns2 -.> alias.type ns1.X ns1.X2 -.> alias.type ns2.A' ns2.A'' -.> view.patch ns2.patch -.> fork ns2 ns3 -.> alias.term ns2.fromJust' ns2.yoohoo -.> delete.term ns2.fromJust' -.> diff.namespace ns3 ns2 -``` -```unison:hide -bdependent = "banana" -``` -```ucm -.ns3> update -.> diff.namespace ns2 ns3 -``` - - -## Two different auto-propagated changes creating a name conflict -Currently, the auto-propagated name-conflicted definitions are not explicitly -shown, only their also-conflicted dependency is shown. -```unison:hide -a = 333 -b = a + 1 -``` -```ucm -.nsx> add -.> fork nsx nsy -.> fork nsx nsz -``` -```unison:hide -a = 444 -``` -```ucm -.nsy> update -``` -```unison:hide -a = 555 -``` -```ucm -.nsz> update -.> merge nsy nsw -``` -```ucm:error -.> merge nsz nsw -``` -```ucm -.> diff.namespace nsx nsw -.nsw> view a b -``` -```unison -a = 777 -``` - -```ucm:error -.nsw> update -.nsw> view a b -``` - -## - -Updates: -- 1 to 1 - -New name conflicts: -- updates where RHS has multiple hashes (excluding when RHS=LHS) - - 1. foo#jk19sm5bf8 : Nat - do we want to force a hashqualified? Arya thinks so - ↓ - 2. ┌ foo#0ja1qfpej6 : Nat - 3. └ foo#jk19sm5bf8 : Nat - -Resolved name conflicts: -- updates where LHS had multiple hashes and RHS has one - - 4. ┌ bar#0ja1qfpej6 : Nat - 5. └ bar#jk19sm5bf8 : Nat - ↓ - 6. bar#jk19sm5bf8 : Nat - -## Display issues to fixup - -- [d] Do we want to surface new edit conflicts in patches? -- [t] two different auto-propagated changes creating a name conflict should show - up somewhere besides the auto-propagate count -- [t] Things look screwy when the type signature doesn't fit and has to get broken - up into multiple lines. Maybe just disallow that? -- [d] Delete blank line in between copies / renames entries if all entries are 1 to 1 - see todo in the code -- [x] incorrectly calculated bracket alignment on hashqualified "Name changes" (delete.output.md) -- [x] just handle deletion of isPropagated in propagate function, leave HandleInput alone (assuming this does the trick) -- [x] might want unqualified names to be qualified sometimes: -- [x] if a name is updated to a not-yet-named reference, it's shown as both an update and an add -- [x] similarly, if a conflicted name is resolved by deleting the last name to - a reference, I (arya) suspect it will show up as a Remove -- [d] Maybe group and/or add headings to the types, constructors, terms -- [x] check whether creating a name conflict + adding metadata puts the update - in both categories; if it does, then filter out metadataUpdates from the - other categories -- [x] add tagging of propagated updates to test propagated updates output -- [x] missing old names in deletion ppe (delete.output.md) (superseded by \#1143) -- [x] delete.term has some bonkers output -- [x] Make a decision about how we want to show constructors in the diff -- [x] When you delete a name with metadata, it also shows up in updates section - with the deleted metadata. -- [x] An add with new metadata is getting characterized as an update -- [x] can there be a metadata-only update where it's not a singleton old and new reference -- [x] 12.patch patch needs a space -- [x] This looks like garbage -- [x] Extra 2 blank lines at the end of the add section -- [x] Fix alignment issues with buildTable, convert to column3M (to be written) -- [x] adding an alias is showing up as an Add and a Copy; should just show as Copy -- [x] removing one of multiple aliases appears in removes + moves + copies section -- [x] some overlapping cases between Moves and Copies^ -- [x] Maybe don't list the type signature twice for aliases? diff --git a/unison-src/transcripts/diff.output.md b/unison-src/transcripts/diff.output.md deleted file mode 100644 index 7420b0c7b2..0000000000 --- a/unison-src/transcripts/diff.output.md +++ /dev/null @@ -1,733 +0,0 @@ -```unison -x = 23 -``` - -```ucm - ☝️ The namespace .b1 is empty. - -.b1> add - - ⍟ I've added these definitions: - - x : Nat - -.b1> alias.term x fslkdjflskdjflksjdf - - Done. - -.> fork b1 b2 - - Done. - -.b2> alias.term x abc - - Done. - -``` -```unison -fslkdjflskdjflksjdf = 663 -``` - -```ucm - ☝️ The namespace .b0 is empty. - -.b0> add - - ⍟ I've added these definitions: - - fslkdjflskdjflksjdf : Nat - -.> merge b0 b1 - - Here's what's changed in b1 after the merge: - - New name conflicts: - - 1. fslkdjflskdjflksjdf#4kipsv2tm6 : Nat - ↓ - 2. ┌ fslkdjflskdjflksjdf#4kipsv2tm6 : Nat - 3. └ fslkdjflskdjflksjdf#s5tu4n7rlb : Nat - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -.> diff.namespace b1 b2 - - Resolved name conflicts: - - 1. ┌ fslkdjflskdjflksjdf#4kipsv2tm6 : Nat - 2. └ fslkdjflskdjflksjdf#s5tu4n7rlb : Nat - ↓ - 3. fslkdjflskdjflksjdf#4kipsv2tm6 : Nat - - Name changes: - - Original Changes - 4. fslkdjflskdjflksjdf#4kipsv2tm6 ┐ 5. abc (added) - 6. x ┘ 7. fslkdjflskdjflksjdf (added) - 8. fslkdjflskdjflksjdf#4kipsv2tm6 (removed) - -``` -Things we want to test: - -* Diffing identical namespaces -* Adds, removes, updates (with and without metadata updates) - * Adds with multiple names - * Adds with multiple names and different metadata on each -* Moved and copied definitions - * Moves that have more that 1 initial or final name -* ... terms and types -* New patches, modified patches, deleted patches, moved patches -* With and without propagated updates - -```unison -fromJust = 1 -b = 2 -bdependent = b -c = 3 -helloWorld = '(printLine "Hello, world!") - -type A a = A Nat -ability X a1 a2 where x : Nat -``` - -```ucm - ☝️ The namespace .ns1 is empty. - -.ns1> add - - ⍟ I've added these definitions: - - type A a - ability X a1 a2 - b : Nat - bdependent : Nat - c : Nat - fromJust : Nat - helloWorld : '{io.IO} () - -.ns1> alias.term fromJust fromJust' - - Done. - -.ns1> alias.term helloWorld helloWorld2 - - Done. - -.ns1> link b fromJust - - Updates: - - 1. ns1.fromJust : Nat - + 2. b : Nat - - 3. ns1.fromJust' : Nat - + 4. b : Nat - -.ns1> fork .ns1 .ns2 - - Done. - -.ns1> cd . - -``` -Here's what we've done so far: -```ucm -.> diff.namespace nothing ns1 - - Added definitions: - - 1. type A a - 2. ability X a1 a2 - 3. A.A : Nat -> A a - 4. X.x : {X a1 a2} Nat - 5. b : Nat - 6. bdependent : Nat - 7. c : Nat - 8. ┌ fromJust : Nat (+1 metadata) - 9. └ fromJust' : Nat (+1 metadata) - 10. ┌ helloWorld : '{io.IO} () - 11. └ helloWorld2 : '{io.IO} () - -.> diff.namespace ns1 ns2 - - The namespaces are identical. - -``` -```unison -fromJust = "asldkfjasldkfj" -``` - -```ucm - ☝️ The namespace .ns1b is empty. - -.ns1b> add - - ⍟ I've added these definitions: - - fromJust : Text - -.> merge ns1b ns1 - - Here's what's changed in ns1 after the merge: - - New name conflicts: - - 1. fromJust#jk19sm5bf8 : Nat - ↓ - 2. ┌ fromJust#hs2i9lcgkd : Text - 3. └ fromJust#jk19sm5bf8 : Nat - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -``` -```unison -fromJust = 99 -b = "oog" -d = 4 -e = 5 -f = 6 -unique type Y a b = Y a b -``` - -```ucm -.ns2> update - - ⍟ I've added these definitions: - - unique type Y a b - d : Nat - e : Nat - f : Nat - - ⍟ I've updated these names to your new definition: - - b : Text - fromJust : Nat - (The old definition was also named fromJust'. I updated - this name too.) - -.ns2> links fromJust - - 1. b : Text - - Tip: Try using `display 1` to display the first result or - `view 1` to view its source. - -.> diff.namespace ns1 ns2 - - Resolved name conflicts: - - 1. ┌ fromJust#hs2i9lcgkd : Text - 2. └ fromJust#jk19sm5bf8 : Nat - ↓ - 3. fromJust#1o1iq26cq7 : Nat - - 4. ns1.b : Nat - + 5. ns2.b : Text - - Updates: - - 6. b : Nat - ↓ - 7. b : Text - - 8. fromJust' : Nat - ↓ - 9. fromJust' : Nat - - 10. ns1.b : Nat - + 11. ns2.b : Text - - There were 1 auto-propagated updates. - - Added definitions: - - 12. unique type Y a b - 13. Y.Y : a -> b -> Y a b - 14. d : Nat - 15. e : Nat - 16. f : Nat - - 17. patch patch (added 2 updates) - -.> alias.term ns2.d ns2.d' - - Done. - -.> alias.type ns2.A ns2.A' - - Done. - -.> alias.type ns2.X ns2.X' - - Done. - -.> diff.namespace ns1 ns2 - - Resolved name conflicts: - - 1. ┌ fromJust#hs2i9lcgkd : Text - 2. └ fromJust#jk19sm5bf8 : Nat - ↓ - 3. fromJust#1o1iq26cq7 : Nat - - 4. ns1.b : Nat - + 5. ns2.b : Text - - Updates: - - 6. b : Nat - ↓ - 7. b : Text - - 8. fromJust' : Nat - ↓ - 9. fromJust' : Nat - - 10. ns1.b : Nat - + 11. ns2.b : Text - - There were 1 auto-propagated updates. - - Added definitions: - - 12. unique type Y a b - 13. Y.Y : a -> b -> Y a b - 14. ┌ d : Nat - 15. └ d' : Nat - 16. e : Nat - 17. f : Nat - - 18. patch patch (added 2 updates) - - Name changes: - - Original Changes - 19. A 20. A' (added) - - 21. X 22. X' (added) - -.> link ns1.c ns2.f - - Updates: - - 1. ns2.f : Nat - + 2. c : Nat - -.> link ns2.c ns2.c - - Updates: - - 1. ns2.c : Nat - + 2. c : Nat - -.> diff.namespace ns1 ns2 - - Resolved name conflicts: - - 1. ┌ fromJust#hs2i9lcgkd : Text - 2. └ fromJust#jk19sm5bf8 : Nat - ↓ - 3. fromJust#1o1iq26cq7 : Nat - - 4. ns1.b : Nat - + 5. ns2.b : Text - - Updates: - - 6. b : Nat - ↓ - 7. b : Text - - 8. c : Nat - + 9. c : Nat - - 10. fromJust' : Nat - ↓ - 11. fromJust' : Nat - - 12. ns1.b : Nat - + 13. ns2.b : Text - - There were 1 auto-propagated updates. - - Added definitions: - - 14. unique type Y a b - 15. Y.Y : a -> b -> Y a b - 16. ┌ d : Nat - 17. └ d' : Nat - 18. e : Nat - 19. f : Nat (+1 metadata) - - 20. patch patch (added 2 updates) - - Name changes: - - Original Changes - 21. A 22. A' (added) - - 23. X 24. X' (added) - -.> unlink ns2.b ns2.fromJust - - I didn't make any changes. - -.> diff.namespace ns1 ns2 - - Resolved name conflicts: - - 1. ┌ fromJust#hs2i9lcgkd : Text - 2. └ fromJust#jk19sm5bf8 : Nat - ↓ - 3. fromJust#1o1iq26cq7 : Nat - - 4. ns1.b : Nat - + 5. ns2.b : Text - - Updates: - - 6. b : Nat - ↓ - 7. b : Text - - 8. c : Nat - + 9. c : Nat - - 10. fromJust' : Nat - ↓ - 11. fromJust' : Nat - - 12. ns1.b : Nat - + 13. ns2.b : Text - - There were 1 auto-propagated updates. - - Added definitions: - - 14. unique type Y a b - 15. Y.Y : a -> b -> Y a b - 16. ┌ d : Nat - 17. └ d' : Nat - 18. e : Nat - 19. f : Nat (+1 metadata) - - 20. patch patch (added 2 updates) - - Name changes: - - Original Changes - 21. A 22. A' (added) - - 23. X 24. X' (added) - -.> alias.type ns1.X ns1.X2 - - Done. - -.> alias.type ns2.A' ns2.A'' - - Done. - -.> view.patch ns2.patch - - Edited Terms: - ns1.b -> ns2.b - ns1.fromJust' -> ns2.fromJust - - Tip: To remove entries from a patch, use - delete.term-replacement or delete.type-replacement, as - appropriate. - -.> fork ns2 ns3 - - Done. - -.> alias.term ns2.fromJust' ns2.yoohoo - - Done. - -.> delete.term ns2.fromJust' - - Name changes: - - Original Changes - 1. ns2.fromJust ┐ 2. ns2.fromJust' (removed) - 3. ns2.fromJust' │ - 4. ns2.yoohoo │ - 5. ns3.fromJust │ - 6. ns3.fromJust' ┘ - - Tip: You can use `undo` or `reflog` to undo this change. - -.> diff.namespace ns3 ns2 - - Name changes: - - Original Changes - 1. fromJust ┐ 2. yoohoo (added) - 3. fromJust' ┘ 4. fromJust' (removed) - -``` -```unison -bdependent = "banana" -``` - -```ucm -.ns3> update - - ⍟ I've updated these names to your new definition: - - bdependent : Text - -.> diff.namespace ns2 ns3 - - Updates: - - 1. bdependent : Text - ↓ - 2. bdependent : Text - - 3. patch patch (added 1 updates) - - Name changes: - - Original Changes - 4. fromJust ┐ 5. fromJust' (added) - 6. yoohoo ┘ 7. yoohoo (removed) - -``` -## Two different auto-propagated changes creating a name conflict -Currently, the auto-propagated name-conflicted definitions are not explicitly -shown, only their also-conflicted dependency is shown. -```unison -a = 333 -b = a + 1 -``` - -```ucm - ☝️ The namespace .nsx is empty. - -.nsx> add - - ⍟ I've added these definitions: - - a : Nat - b : Nat - -.> fork nsx nsy - - Done. - -.> fork nsx nsz - - Done. - -``` -```unison -a = 444 -``` - -```ucm -.nsy> update - - ⍟ I've updated these names to your new definition: - - a : Nat - -``` -```unison -a = 555 -``` - -```ucm -.nsz> update - - ⍟ I've updated these names to your new definition: - - a : Nat - -.> merge nsy nsw - - Here's what's changed in nsw after the merge: - - Added definitions: - - 1. a : Nat - 2. b : Nat (+1 metadata) - - 3. patch patch (added 1 updates) - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -``` -```ucm -.> merge nsz nsw - - Here's what's changed in nsw after the merge: - - New name conflicts: - - 1. a#ekguc9h648 : Nat - ↓ - 2. ┌ a#5f8uodgrtf : Nat - 3. └ a#ekguc9h648 : Nat - - Updates: - - 4. b#be9a2abbbg : Nat - - There were 1 auto-propagated updates. - - 5. patch patch (added 1 updates) - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - - I tried to auto-apply the patch, but couldn't because it - contained contradictory entries. - -``` -```ucm -.> diff.namespace nsx nsw - - New name conflicts: - - 1. a#8ss2r9gqe7 : Nat - ↓ - 2. ┌ a#5f8uodgrtf : Nat - 3. └ a#ekguc9h648 : Nat - - Updates: - - There were 2 auto-propagated updates. - - Added definitions: - - 4. patch patch (added 2 updates) - -.nsw> view a b - - a#5f8uodgrtf : Nat - a#5f8uodgrtf = 555 - - a#ekguc9h648 : Nat - a#ekguc9h648 = 444 - - b#be9a2abbbg : Nat - b#be9a2abbbg = - use Nat + - a#ekguc9h648 + 1 - - b#kut4vstim7 : Nat - b#kut4vstim7 = - use Nat + - a#5f8uodgrtf + 1 - -``` -```unison -a = 777 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - x These definitions would fail on `add` or `update`: - - Reason - conflicted a : Nat - - Tip: Use `help filestatus` to learn more. - -``` -```ucm -.nsw> update - - x These definitions failed: - - Reason - conflicted a : Nat - - Tip: Use `help filestatus` to learn more. - - I tried to auto-apply the patch, but couldn't because it - contained contradictory entries. - -.nsw> view a b - - a#5f8uodgrtf : Nat - a#5f8uodgrtf = 555 - - a#ekguc9h648 : Nat - a#ekguc9h648 = 444 - - b#be9a2abbbg : Nat - b#be9a2abbbg = - use Nat + - a#ekguc9h648 + 1 - - b#kut4vstim7 : Nat - b#kut4vstim7 = - use Nat + - a#5f8uodgrtf + 1 - -``` -## - -Updates: -- 1 to 1 - -New name conflicts: -- updates where RHS has multiple hashes (excluding when RHS=LHS) - - 1. foo#jk19sm5bf8 : Nat - do we want to force a hashqualified? Arya thinks so - ↓ - 2. ┌ foo#0ja1qfpej6 : Nat - 3. └ foo#jk19sm5bf8 : Nat - -Resolved name conflicts: -- updates where LHS had multiple hashes and RHS has one - - 4. ┌ bar#0ja1qfpej6 : Nat - 5. └ bar#jk19sm5bf8 : Nat - ↓ - 6. bar#jk19sm5bf8 : Nat - -## Display issues to fixup - -- [d] Do we want to surface new edit conflicts in patches? -- [t] two different auto-propagated changes creating a name conflict should show - up somewhere besides the auto-propagate count -- [t] Things look screwy when the type signature doesn't fit and has to get broken - up into multiple lines. Maybe just disallow that? -- [d] Delete blank line in between copies / renames entries if all entries are 1 to 1 - see todo in the code -- [x] incorrectly calculated bracket alignment on hashqualified "Name changes" (delete.output.md) -- [x] just handle deletion of isPropagated in propagate function, leave HandleInput alone (assuming this does the trick) -- [x] might want unqualified names to be qualified sometimes: -- [x] if a name is updated to a not-yet-named reference, it's shown as both an update and an add -- [x] similarly, if a conflicted name is resolved by deleting the last name to - a reference, I (arya) suspect it will show up as a Remove -- [d] Maybe group and/or add headings to the types, constructors, terms -- [x] check whether creating a name conflict + adding metadata puts the update - in both categories; if it does, then filter out metadataUpdates from the - other categories -- [x] add tagging of propagated updates to test propagated updates output -- [x] missing old names in deletion ppe (delete.output.md) (superseded by \#1143) -- [x] delete.term has some bonkers output -- [x] Make a decision about how we want to show constructors in the diff -- [x] When you delete a name with metadata, it also shows up in updates section - with the deleted metadata. -- [x] An add with new metadata is getting characterized as an update -- [x] can there be a metadata-only update where it's not a singleton old and new reference -- [x] 12.patch patch needs a space -- [x] This looks like garbage -- [x] Extra 2 blank lines at the end of the add section -- [x] Fix alignment issues with buildTable, convert to column3M (to be written) -- [x] adding an alias is showing up as an Add and a Copy; should just show as Copy -- [x] removing one of multiple aliases appears in removes + moves + copies section -- [x] some overlapping cases between Moves and Copies^ -- [x] Maybe don't list the type signature twice for aliases? diff --git a/unison-src/transcripts/doc-formatting.md b/unison-src/transcripts/doc-formatting.md deleted file mode 100644 index f5b816f1d5..0000000000 --- a/unison-src/transcripts/doc-formatting.md +++ /dev/null @@ -1,254 +0,0 @@ -This transcript explains a few minor details about doc parsing and pretty-printing, both from a user point of view and with some implementation notes. The later stuff is meant more as unit testing than for human consumption. (The ucm `add` commands and their output are hidden for brevity.) - -Docs can be used as inline code comments. - -```ucm:hide -.> builtins.merge -``` - -```unison -foo : Nat -> Nat -foo n = - [: do the thing :] - n + 1 -``` - -```ucm:hide -.> add -``` -```ucm -.> view foo -``` - -Note that `@` and `:]` must be escaped within docs. - -```unison -escaping = [: Docs look [: like \@this \:] :] -``` - -```ucm:hide -.> add -``` -```ucm -.> view escaping -``` - -(Alas you can't have `\@` or `\:]` in your doc, as there's currently no way to 'unescape' them.) - -```unison --- Note that -- comments are preserved within doc literals. -commented = [: - example: - - -- a comment - f x = x + 1 -:] -``` - -```ucm:hide -.> add -``` -```ucm -.> view commented -``` - -### Indenting, and paragraph reflow - -Handling of indenting in docs between the parser and pretty-printer is a bit fiddly. - -```unison --- The leading and trailing spaces are stripped from the stored Doc by the --- lexer, and one leading and trailing space is inserted again on view/edit --- by the pretty-printer. -doc1 = [: hi :] -``` - -```ucm:hide -.> add -``` -```ucm -.> view doc1 -``` - -```unison --- Lines (apart from the first line, i.e. the bit between the [: and the --- first newline) are unindented until at least one of --- them hits the left margin (by a post-processing step in the parser). --- You may not notice this because the pretty-printer indents them again on --- view/edit. -doc2 = [: hello - - foo - - bar - and the rest. :] -``` - -```ucm:hide -.> add -``` -```ucm -.> view doc2 -``` - -```unison -doc3 = [: When Unison identifies a paragraph, it removes any newlines from it before storing it, and then reflows the paragraph text to fit the display window on display/view/edit. - -For these purposes, a paragraph is any sequence of non-empty lines that have zero indent (after the unindenting mentioned above.) - - - So this is not a paragraph, even - though you might want it to be. - - And this text | as a paragraph - is not treated | either. - -Note that because of the special treatment of the first line mentioned above, where its leading space is removed, it is always treated as a paragraph. - :] -``` - -```ucm:hide -.> add -``` -```ucm -.> view doc3 -``` - -```unison -doc4 = [: Here's another example of some paragraphs. - - All these lines have zero indent. - - - Apart from this one. :] -``` - -```ucm:hide -.> add -``` -```ucm -.> view doc4 -``` - -```unison --- The special treatment of the first line does mean that the following --- is pretty-printed not so prettily. To fix that we'd need to get the --- lexer to help out with interpreting doc literal indentation (because --- it knows what columns the `[:` was in.) -doc5 = [: - foo - - bar - and the rest. :] -``` - -```ucm:hide -.> add -``` -```ucm -.> view doc5 -``` - -```unison --- You can do the following to avoid that problem. -doc6 = [: - - foo - - bar - and the rest. - :] -``` - -```ucm:hide -.> add -``` -```ucm -.> view doc6 -``` - -### More testing - -```unison --- Check empty doc works. -empty = [::] - -expr = foo 1 -``` -```ucm:hide -.> add -``` -```ucm -.> view empty -``` - -```unison -test1 = [: -The internal logic starts to get hairy when you use the \@ features, for example referencing a name like @List.take. Internally, the text between each such usage is its own blob (blob ends here --> @List.take), so paragraph reflow has to be aware of multiple blobs to do paragraph reflow (or, more accurately, to do the normalization step where newlines with a paragraph are removed.) - -Para to reflow: lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor ending in ref @List.take - -@List.take starting para lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. - -Middle of para: lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor @List.take lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. - - - non-para line (@List.take) with ref @List.take - Another non-para line - @List.take starting non-para line - - - non-para line with ref @List.take -before a para-line lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. - - - non-para line followed by a para line starting with ref -@List.take lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. - -a para-line ending with ref lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor @List.take - - non-para line - -para line lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor - @List.take followed by non-para line starting with ref. - -@[signature] List.take - -@[source] foo - -@[evaluate] expr - -@[include] doc1 - --- note the leading space below - @[signature] List.take - -:] -``` -```ucm:hide -.> add -``` -```ucm -.> view test1 -``` - -```unison --- Regression test for #1363 - preservation of spaces after @ directives in first line when unindenting -reg1363 = [: `@List.take foo` bar - baz :] -``` -```ucm:hide -.> add -``` -```ucm -.> view reg1363 -``` - -```unison --- Demonstrate doc display when whitespace follows a @[source] or @[evaluate] --- whose output spans multiple lines. - -test2 = [: - Take a look at this: - @[source] foo ▶ bar -:] -``` -```ucm:hide -.> add -``` -View is fine. -```ucm -.> view test2 -``` -But note it's not obvious how display should best be handling this. At the moment it just does the simplest thing: -```ucm -.> display test2 -``` diff --git a/unison-src/transcripts/doc-formatting.output.md b/unison-src/transcripts/doc-formatting.output.md deleted file mode 100644 index 81149a2c5a..0000000000 --- a/unison-src/transcripts/doc-formatting.output.md +++ /dev/null @@ -1,512 +0,0 @@ -This transcript explains a few minor details about doc parsing and pretty-printing, both from a user point of view and with some implementation notes. The later stuff is meant more as unit testing than for human consumption. (The ucm `add` commands and their output are hidden for brevity.) - -Docs can be used as inline code comments. - -```unison -foo : Nat -> Nat -foo n = - [: do the thing :] - n + 1 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - foo : Nat -> Nat - -``` -```ucm -.> view foo - - foo : Nat -> Nat - foo n = - use Nat + - [: do the thing :] - n + 1 - -``` -Note that `@` and `:]` must be escaped within docs. - -```unison -escaping = [: Docs look [: like \@this \:] :] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - escaping : Doc - -``` -```ucm -.> view escaping - - escaping : Doc - escaping = [: Docs look [: like \@this \:] :] - -``` -(Alas you can't have `\@` or `\:]` in your doc, as there's currently no way to 'unescape' them.) - -```unison --- Note that -- comments are preserved within doc literals. -commented = [: - example: - - -- a comment - f x = x + 1 -:] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - commented : Doc - -``` -```ucm -.> view commented - - commented : Doc - commented = - [: - example: - - -- a comment - f x = x + 1 - :] - -``` -### Indenting, and paragraph reflow - -Handling of indenting in docs between the parser and pretty-printer is a bit fiddly. - -```unison --- The leading and trailing spaces are stripped from the stored Doc by the --- lexer, and one leading and trailing space is inserted again on view/edit --- by the pretty-printer. -doc1 = [: hi :] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - doc1 : Doc - -``` -```ucm -.> view doc1 - - doc1 : Doc - doc1 = [: hi :] - -``` -```unison --- Lines (apart from the first line, i.e. the bit between the [: and the --- first newline) are unindented until at least one of --- them hits the left margin (by a post-processing step in the parser). --- You may not notice this because the pretty-printer indents them again on --- view/edit. -doc2 = [: hello - - foo - - bar - and the rest. :] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - doc2 : Doc - -``` -```ucm -.> view doc2 - - doc2 : Doc - doc2 = - [: hello - - foo - - bar - and the rest. :] - -``` -```unison -doc3 = [: When Unison identifies a paragraph, it removes any newlines from it before storing it, and then reflows the paragraph text to fit the display window on display/view/edit. - -For these purposes, a paragraph is any sequence of non-empty lines that have zero indent (after the unindenting mentioned above.) - - - So this is not a paragraph, even - though you might want it to be. - - And this text | as a paragraph - is not treated | either. - -Note that because of the special treatment of the first line mentioned above, where its leading space is removed, it is always treated as a paragraph. - :] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - doc3 : Doc - -``` -```ucm -.> view doc3 - - doc3 : Doc - doc3 = - [: When Unison identifies a paragraph, it removes any newlines - from it before storing it, and then reflows the paragraph text - to fit the display window on display/view/edit. - - For these purposes, a paragraph is any sequence of non-empty - lines that have zero indent (after the unindenting mentioned - above.) - - - So this is not a paragraph, even - though you might want it to be. - - And this text | as a paragraph - is not treated | either. - - Note that because of the special treatment of the first line - mentioned above, where its leading space is removed, it is always - treated as a paragraph. - :] - -``` -```unison -doc4 = [: Here's another example of some paragraphs. - - All these lines have zero indent. - - - Apart from this one. :] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - doc4 : Doc - -``` -```ucm -.> view doc4 - - doc4 : Doc - doc4 = - [: Here's another example of some paragraphs. - - All these lines have zero indent. - - - Apart from this one. :] - -``` -```unison --- The special treatment of the first line does mean that the following --- is pretty-printed not so prettily. To fix that we'd need to get the --- lexer to help out with interpreting doc literal indentation (because --- it knows what columns the `[:` was in.) -doc5 = [: - foo - - bar - and the rest. :] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - doc5 : Doc - -``` -```ucm -.> view doc5 - - doc5 : Doc - doc5 = - [: - foo - - bar - and the rest. :] - -``` -```unison --- You can do the following to avoid that problem. -doc6 = [: - - foo - - bar - and the rest. - :] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - doc6 : Doc - -``` -```ucm -.> view doc6 - - doc6 : Doc - doc6 = - [: - - foo - - bar - and the rest. - :] - -``` -### More testing - -```unison --- Check empty doc works. -empty = [::] - -expr = foo 1 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - empty : Doc - expr : Nat - -``` -```ucm -.> view empty - - empty : Doc - empty = [: :] - -``` -```unison -test1 = [: -The internal logic starts to get hairy when you use the \@ features, for example referencing a name like @List.take. Internally, the text between each such usage is its own blob (blob ends here --> @List.take), so paragraph reflow has to be aware of multiple blobs to do paragraph reflow (or, more accurately, to do the normalization step where newlines with a paragraph are removed.) - -Para to reflow: lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor ending in ref @List.take - -@List.take starting para lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. - -Middle of para: lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor @List.take lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. - - - non-para line (@List.take) with ref @List.take - Another non-para line - @List.take starting non-para line - - - non-para line with ref @List.take -before a para-line lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. - - - non-para line followed by a para line starting with ref -@List.take lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. - -a para-line ending with ref lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor @List.take - - non-para line - -para line lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor - @List.take followed by non-para line starting with ref. - -@[signature] List.take - -@[source] foo - -@[evaluate] expr - -@[include] doc1 - --- note the leading space below - @[signature] List.take - -:] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - test1 : Doc - -``` -```ucm -.> view test1 - - test1 : Doc - test1 = - [: - The internal logic starts to get hairy when you use the \@ features, - for example referencing a name like @List.take. Internally, - the text between each such usage is its own blob (blob ends here - --> @List.take), so paragraph reflow has to be aware of multiple - blobs to do paragraph reflow (or, more accurately, to do the - normalization step where newlines with a paragraph are removed.) - - Para to reflow: lorem ipsum dolor lorem ipsum dolor lorem ipsum - dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem - ipsum dolor ending in ref @List.take - - @List.take starting para lorem ipsum dolor lorem ipsum dolor - lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum - dolor lorem ipsum dolor. - - Middle of para: lorem ipsum dolor lorem ipsum dolor lorem ipsum - dolor @List.take lorem ipsum dolor lorem ipsum dolor lorem ipsum - dolor lorem ipsum dolor. - - - non-para line (@List.take) with ref @List.take - Another non-para line - @List.take starting non-para line - - - non-para line with ref @List.take - before a para-line lorem ipsum dolor lorem ipsum dolor lorem - ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor - lorem ipsum dolor lorem ipsum dolor. - - - non-para line followed by a para line starting with ref - @List.take lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor - lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum - dolor lorem ipsum dolor. - - a para-line ending with ref lorem ipsum dolor lorem ipsum dolor - lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum - dolor lorem ipsum dolor lorem ipsum dolor @List.take - - non-para line - - para line lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor - lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum - dolor lorem ipsum dolor - @List.take followed by non-para line starting with ref. - - @[signature] List.take - - @[source] foo - - @[evaluate] expr - - @[include] doc1 - - -- note the leading space below - @[signature] List.take - - :] - -``` -```unison --- Regression test for #1363 - preservation of spaces after @ directives in first line when unindenting -reg1363 = [: `@List.take foo` bar - baz :] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - reg1363 : Doc - -``` -```ucm -.> view reg1363 - - reg1363 : Doc - reg1363 = [: `@List.take foo` bar baz :] - -``` -```unison --- Demonstrate doc display when whitespace follows a @[source] or @[evaluate] --- whose output spans multiple lines. - -test2 = [: - Take a look at this: - @[source] foo ▶ bar -:] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - test2 : Doc - -``` -View is fine. -```ucm -.> view test2 - - test2 : Doc - test2 = - [: - Take a look at this: - @[source] foo ▶ bar - :] - -``` -But note it's not obvious how display should best be handling this. At the moment it just does the simplest thing: -```ucm -.> display test2 - - - Take a look at this: - foo n = - use Nat + - [: do the thing :] - n + 1 ▶ bar - - -``` diff --git a/unison-src/transcripts/docs.md b/unison-src/transcripts/docs.md deleted file mode 100644 index 0ce76d7bab..0000000000 --- a/unison-src/transcripts/docs.md +++ /dev/null @@ -1,95 +0,0 @@ -# Documenting Unison code - -```ucm:hide -.> builtins.merge -``` - -Unison documentation is written in Unison. Documentation is a value of the following type: - -```ucm -.> view builtin.Doc -``` - -You can create these `Doc` values with ordinary code, or you can use the special syntax. A value of type `Doc` can be created via syntax like: - -```unison -use .builtin - -doc1 = [: This is some documentation. - -It can span multiple lines. - -Can link to definitions like @List.drop or @List - -:] -``` - -Syntax: - -`[:` starts a documentation block; `:]` finishes it. Within the block: - -* Links to definitions are done with `@List`. `\@` (and `\:]`) if you want to escape. -* `@[signature] List.take` expands to the type signature of `List.take` -* `@[source] List.map` expands to the full source of `List.map` -* `@[include] someOtherDoc`, inserts a value `someOtherDoc : Doc` here. -* `@[evaluate] someDefinition` expands to the result of evaluating `someDefinition`, which must be a pre-existing definition in the codebase (can't be an arbitrary expression). - -### An example - -We are going to document `List.take` using some verbiage and a few examples. First we have to add the examples to the codebase: - -```unison -List.take.ex1 = take 0 [1,2,3,4,5] -List.take.ex2 = take 2 [1,2,3,4,5] -``` - -```ucm -.> add -``` - -And now let's write our docs and reference these examples: - -```unison -use .builtin - -docs.List.take = [: -`@List.take n xs` returns the first `n` elements of `xs`. (No need to add line breaks manually. The display command will do wrapping of text for you. Indent any lines where you don't want it to do this.) - -## Examples: - - @[source] List.take.ex1 - 🔽 - @List.take.ex1 = @[evaluate] List.take.ex1 - - - @[source] List.take.ex2 - 🔽 - @List.take.ex2 = @[evaluate] List.take.ex2 -:] -``` - -Let's add it to the codebase, and link it to the definition: - -```ucm -.> add -.> link docs.List.take builtin.List.take -``` - -Now that documentation is linked to the definition. We can view it if we like: - -```ucm -.> links builtin.List.take builtin.Doc -.> display 1 -``` - -Or there's also a convenient function, `docs`, which shows the `Doc` values that are linked to a definition. It's implemented in terms of `links` and `display`: - -```ucm -.> docs builtin.List.take -``` - -Note that if we view the source of the documentation, the various references are *not* expanded. - -```ucm -.> view docs.List.take -``` diff --git a/unison-src/transcripts/docs.output.md b/unison-src/transcripts/docs.output.md deleted file mode 100644 index cdc1b91284..0000000000 --- a/unison-src/transcripts/docs.output.md +++ /dev/null @@ -1,211 +0,0 @@ -# Documenting Unison code - -Unison documentation is written in Unison. Documentation is a value of the following type: - -```ucm -.> view builtin.Doc - - unique type builtin.Doc - = Link Link - | Source Link - | Blob Text - | Join [builtin.Doc] - | Signature Term - | Evaluate Term - -``` -You can create these `Doc` values with ordinary code, or you can use the special syntax. A value of type `Doc` can be created via syntax like: - -```unison -use .builtin - -doc1 = [: This is some documentation. - -It can span multiple lines. - -Can link to definitions like @List.drop or @List - -:] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - doc1 : Doc - -``` -Syntax: - -`[:` starts a documentation block; `:]` finishes it. Within the block: - -* Links to definitions are done with `@List`. `\@` (and `\:]`) if you want to escape. -* `@[signature] List.take` expands to the type signature of `List.take` -* `@[source] List.map` expands to the full source of `List.map` -* `@[include] someOtherDoc`, inserts a value `someOtherDoc : Doc` here. -* `@[evaluate] someDefinition` expands to the result of evaluating `someDefinition`, which must be a pre-existing definition in the codebase (can't be an arbitrary expression). - -### An example - -We are going to document `List.take` using some verbiage and a few examples. First we have to add the examples to the codebase: - -```unison -List.take.ex1 = take 0 [1,2,3,4,5] -List.take.ex2 = take 2 [1,2,3,4,5] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - List.take.ex1 : [Nat] - List.take.ex2 : [Nat] - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - List.take.ex1 : [Nat] - List.take.ex2 : [Nat] - -``` -And now let's write our docs and reference these examples: - -```unison -use .builtin - -docs.List.take = [: -`@List.take n xs` returns the first `n` elements of `xs`. (No need to add line breaks manually. The display command will do wrapping of text for you. Indent any lines where you don't want it to do this.) - -## Examples: - - @[source] List.take.ex1 - 🔽 - @List.take.ex1 = @[evaluate] List.take.ex1 - - - @[source] List.take.ex2 - 🔽 - @List.take.ex2 = @[evaluate] List.take.ex2 -:] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - docs.List.take : Doc - -``` -Let's add it to the codebase, and link it to the definition: - -```ucm -.> add - - ⍟ I've added these definitions: - - docs.List.take : Doc - -.> link docs.List.take builtin.List.take - - Updates: - - 1. builtin.List.take : Nat -> [a] -> [a] - + 2. docs.List.take : Doc - -``` -Now that documentation is linked to the definition. We can view it if we like: - -```ucm -.> links builtin.List.take builtin.Doc - - 1. docs.List.take : Doc - - Tip: Try using `display 1` to display the first result or - `view 1` to view its source. - -.> display 1 - - - `builtin.List.take n xs` returns the first `n` elements of `xs`. - (No need to add line breaks manually. The display command will - do wrapping of text for you. Indent any lines where you don't - want it to do this.) - - ## Examples: - - List.take.ex1 = builtin.List.take 0 [1, 2, 3, 4, 5] - 🔽 - ex1 = [] - - - List.take.ex2 = builtin.List.take 2 [1, 2, 3, 4, 5] - 🔽 - ex2 = [1, 2] - - -``` -Or there's also a convenient function, `docs`, which shows the `Doc` values that are linked to a definition. It's implemented in terms of `links` and `display`: - -```ucm -.> docs builtin.List.take - - - `builtin.List.take n xs` returns the first `n` elements of `xs`. - (No need to add line breaks manually. The display command will - do wrapping of text for you. Indent any lines where you don't - want it to do this.) - - ## Examples: - - List.take.ex1 = builtin.List.take 0 [1, 2, 3, 4, 5] - 🔽 - ex1 = [] - - - List.take.ex2 = builtin.List.take 2 [1, 2, 3, 4, 5] - 🔽 - ex2 = [1, 2] - - -``` -Note that if we view the source of the documentation, the various references are *not* expanded. - -```ucm -.> view docs.List.take - - docs.List.take : Doc - docs.List.take = - [: - `@builtin.List.take n xs` returns the first `n` elements of `xs`. - (No need to add line breaks manually. The display command will - do wrapping of text for you. Indent any lines where you don't - want it to do this.) - - ## Examples: - - @[source] ex1 - 🔽 - @ex1 = @[evaluate] ex1 - - - @[source] ex2 - 🔽 - @ex2 = @[evaluate] ex2 - :] - -``` diff --git a/unison-src/transcripts/emptyCodebase.md b/unison-src/transcripts/emptyCodebase.md deleted file mode 100644 index a9ea55b850..0000000000 --- a/unison-src/transcripts/emptyCodebase.md +++ /dev/null @@ -1,27 +0,0 @@ -# The empty codebase - -The Unison codebase, when first initialized, contains no definitions in its namespace. - -Not even `Nat` or `+`! - -BEHOLD!!! - -```ucm:error -.> ls -``` - -Technically, the definitions all exist, but they have no names. `builtins.merge` brings them into existence, under the current namespace: - -```ucm -.foo> builtins.merge -.foo> ls -``` - -And for a limited time, you can get even more builtin goodies: - -```ucm -.foo> builtins.mergeio -.foo> ls -``` - -More typically, you'd start out by pulling `base`. diff --git a/unison-src/transcripts/emptyCodebase.output.md b/unison-src/transcripts/emptyCodebase.output.md deleted file mode 100644 index 5630895a62..0000000000 --- a/unison-src/transcripts/emptyCodebase.output.md +++ /dev/null @@ -1,41 +0,0 @@ -# The empty codebase - -The Unison codebase, when first initialized, contains no definitions in its namespace. - -Not even `Nat` or `+`! - -BEHOLD!!! - -```ucm -.> ls - - nothing to show - -``` -Technically, the definitions all exist, but they have no names. `builtins.merge` brings them into existence, under the current namespace: - -```ucm - ☝️ The namespace .foo is empty. - -.foo> builtins.merge - - Done. - -.foo> ls - - 1. builtin/ (231 definitions) - -``` -And for a limited time, you can get even more builtin goodies: - -```ucm -.foo> builtins.mergeio - - Done. - -.foo> ls - - 1. builtin/ (394 definitions) - -``` -More typically, you'd start out by pulling `base. diff --git a/unison-src/transcripts/errors/ucm-hide-all-error.md b/unison-src/transcripts/errors/ucm-hide-all-error.md deleted file mode 100644 index dcf94d8d32..0000000000 --- a/unison-src/transcripts/errors/ucm-hide-all-error.md +++ /dev/null @@ -1,12 +0,0 @@ - -### Transcript parser hidden errors - -Dangerous scary words! - -When an expected error is not encountered in a `ucm:hide:all` block -then the transcript parser should print the stanza -and surface a helpful message. - -```ucm:hide:all:error -.> history -``` diff --git a/unison-src/transcripts/errors/ucm-hide-all-error.output.md b/unison-src/transcripts/errors/ucm-hide-all-error.output.md deleted file mode 100644 index e3a9558abd..0000000000 --- a/unison-src/transcripts/errors/ucm-hide-all-error.output.md +++ /dev/null @@ -1,17 +0,0 @@ - -### Transcript parser hidden errors - -Dangerous scary words! - -When an expected error is not encountered in a `ucm:hide:all` block -then the transcript parser should print the stanza -and surface a helpful message. - -```ucm -.> history -``` - - -🛑 - -The transcript was expecting an error in the stanza above, but did not encounter one. diff --git a/unison-src/transcripts/errors/ucm-hide-all.md b/unison-src/transcripts/errors/ucm-hide-all.md deleted file mode 100644 index 22950a9334..0000000000 --- a/unison-src/transcripts/errors/ucm-hide-all.md +++ /dev/null @@ -1,12 +0,0 @@ - -### Transcript parser hidden errors - -Dangerous scary words! - -When an error is encountered in a `ucm:hide:all` block -then the transcript parser should print the stanza -and surface a helpful message. - -```ucm:hide:all -.> move.namespace foo bar -``` diff --git a/unison-src/transcripts/errors/ucm-hide-all.output.md b/unison-src/transcripts/errors/ucm-hide-all.output.md deleted file mode 100644 index e626779a3b..0000000000 --- a/unison-src/transcripts/errors/ucm-hide-all.output.md +++ /dev/null @@ -1,17 +0,0 @@ - -### Transcript parser hidden errors - -Dangerous scary words! - -When an error is encountered in a `ucm:hide:all` block -then the transcript parser should print the stanza -and surface a helpful message. - -```ucm -.> move.namespace foo bar -``` - - -🛑 - -The transcript failed due to an error encountered in the stanza above. diff --git a/unison-src/transcripts/errors/ucm-hide-error.md b/unison-src/transcripts/errors/ucm-hide-error.md deleted file mode 100644 index 68da57efc2..0000000000 --- a/unison-src/transcripts/errors/ucm-hide-error.md +++ /dev/null @@ -1,12 +0,0 @@ - -### Transcript parser hidden errors - -Dangerous scary words! - -When an expected error is not encountered in a `ucm:hide` block -then the transcript parser should print the stanza -and surface a helpful message. - -```ucm:hide:error -.> history -``` diff --git a/unison-src/transcripts/errors/ucm-hide-error.output.md b/unison-src/transcripts/errors/ucm-hide-error.output.md deleted file mode 100644 index 0056a35888..0000000000 --- a/unison-src/transcripts/errors/ucm-hide-error.output.md +++ /dev/null @@ -1,17 +0,0 @@ - -### Transcript parser hidden errors - -Dangerous scary words! - -When an expected error is not encountered in a `ucm:hide` block -then the transcript parser should print the stanza -and surface a helpful message. - -```ucm -.> history -``` - - -🛑 - -The transcript was expecting an error in the stanza above, but did not encounter one. diff --git a/unison-src/transcripts/errors/ucm-hide.md b/unison-src/transcripts/errors/ucm-hide.md deleted file mode 100644 index aa725ada4c..0000000000 --- a/unison-src/transcripts/errors/ucm-hide.md +++ /dev/null @@ -1,12 +0,0 @@ - -### Transcript parser hidden errors - -Dangerous scary words! - -When an error is encountered in a `ucm:hide` block -then the transcript parser should print the stanza -and surface a helpful message. - -```ucm:hide -.> move.namespace foo bar -``` diff --git a/unison-src/transcripts/errors/ucm-hide.output.md b/unison-src/transcripts/errors/ucm-hide.output.md deleted file mode 100644 index e012f7457f..0000000000 --- a/unison-src/transcripts/errors/ucm-hide.output.md +++ /dev/null @@ -1,17 +0,0 @@ - -### Transcript parser hidden errors - -Dangerous scary words! - -When an error is encountered in a `ucm:hide` block -then the transcript parser should print the stanza -and surface a helpful message. - -```ucm -.> move.namespace foo bar -``` - - -🛑 - -The transcript failed due to an error encountered in the stanza above. diff --git a/unison-src/transcripts/errors/unison-hide-all-error.md b/unison-src/transcripts/errors/unison-hide-all-error.md deleted file mode 100644 index 0364b35fdf..0000000000 --- a/unison-src/transcripts/errors/unison-hide-all-error.md +++ /dev/null @@ -1,10 +0,0 @@ - -### Transcript parser hidden errors - -When an expected error is not encountered in a `unison:hide:all:error` block -then the transcript parser should print the stanza -and surface a helpful message. - -```unison:hide:all:error -myVal = 3 -``` \ No newline at end of file diff --git a/unison-src/transcripts/errors/unison-hide-all-error.output.md b/unison-src/transcripts/errors/unison-hide-all-error.output.md deleted file mode 100644 index 3c3e6f3e5f..0000000000 --- a/unison-src/transcripts/errors/unison-hide-all-error.output.md +++ /dev/null @@ -1,16 +0,0 @@ - -### Transcript parser hidden errors - -When an expected error is not encountered in a `unison:hide:all:error` block -then the transcript parser should print the stanza -and surface a helpful message. - -```unison -myVal = 3 -``` - - - -🛑 - -The transcript was expecting an error in the stanza above, but did not encounter one. diff --git a/unison-src/transcripts/errors/unison-hide-all.md b/unison-src/transcripts/errors/unison-hide-all.md deleted file mode 100644 index b722caad70..0000000000 --- a/unison-src/transcripts/errors/unison-hide-all.md +++ /dev/null @@ -1,10 +0,0 @@ - -### Transcript parser hidden errors - -When an error is encountered in a `unison:hide:all` block -then the transcript parser should print the stanza -and surface a helpful message. - -```unison:hide:all -g 3 -``` diff --git a/unison-src/transcripts/errors/unison-hide-all.output.md b/unison-src/transcripts/errors/unison-hide-all.output.md deleted file mode 100644 index 7c4d462c08..0000000000 --- a/unison-src/transcripts/errors/unison-hide-all.output.md +++ /dev/null @@ -1,16 +0,0 @@ - -### Transcript parser hidden errors - -When an error is encountered in a `unison:hide:all` block -then the transcript parser should print the stanza -and surface a helpful message. - -```unison -g 3 -``` - - - -🛑 - -The transcript failed due to an error encountered in the stanza above. diff --git a/unison-src/transcripts/errors/unison-hide-error.md b/unison-src/transcripts/errors/unison-hide-error.md deleted file mode 100644 index 1ab6e675d3..0000000000 --- a/unison-src/transcripts/errors/unison-hide-error.md +++ /dev/null @@ -1,10 +0,0 @@ - -### Transcript parser hidden errors - -When an expected error is not encountered in a `unison:hide:error` block -then the transcript parser should print the stanza -and surface a helpful message. - -```unison:hide:error -myVal = 3 -``` \ No newline at end of file diff --git a/unison-src/transcripts/errors/unison-hide-error.output.md b/unison-src/transcripts/errors/unison-hide-error.output.md deleted file mode 100644 index 30ab85dc58..0000000000 --- a/unison-src/transcripts/errors/unison-hide-error.output.md +++ /dev/null @@ -1,16 +0,0 @@ - -### Transcript parser hidden errors - -When an expected error is not encountered in a `unison:hide:error` block -then the transcript parser should print the stanza -and surface a helpful message. - -```unison -myVal = 3 -``` - - - -🛑 - -The transcript was expecting an error in the stanza above, but did not encounter one. diff --git a/unison-src/transcripts/errors/unison-hide.md b/unison-src/transcripts/errors/unison-hide.md deleted file mode 100644 index 52b5ef4000..0000000000 --- a/unison-src/transcripts/errors/unison-hide.md +++ /dev/null @@ -1,10 +0,0 @@ - -### Transcript parser hidden errors - -When an error is encountered in a `unison:hide` block -then the transcript parser should print the stanza -and surface a helpful message. - -```unison:hide -g 3 -``` diff --git a/unison-src/transcripts/errors/unison-hide.output.md b/unison-src/transcripts/errors/unison-hide.output.md deleted file mode 100644 index 0b369a71ac..0000000000 --- a/unison-src/transcripts/errors/unison-hide.output.md +++ /dev/null @@ -1,16 +0,0 @@ - -### Transcript parser hidden errors - -When an error is encountered in a `unison:hide` block -then the transcript parser should print the stanza -and surface a helpful message. - -```unison -g 3 -``` - - - -🛑 - -The transcript failed due to an error encountered in the stanza above. diff --git a/unison-src/transcripts/escape-sequences.md b/unison-src/transcripts/escape-sequences.md deleted file mode 100644 index fc7955ff3d..0000000000 --- a/unison-src/transcripts/escape-sequences.md +++ /dev/null @@ -1,5 +0,0 @@ -```unison -> "Rúnar" -> "῎Ανδρα μοι ἔννεπε, Μοῦσα, πολύτροπον" -> "古池や蛙飛びこむ水の音" -``` diff --git a/unison-src/transcripts/escape-sequences.output.md b/unison-src/transcripts/escape-sequences.output.md deleted file mode 100644 index f0f0947cfa..0000000000 --- a/unison-src/transcripts/escape-sequences.output.md +++ /dev/null @@ -1,28 +0,0 @@ -```unison -> "Rúnar" -> "῎Ανδρα μοι ἔννεπε, Μοῦσα, πολύτροπον" -> "古池や蛙飛びこむ水の音" -``` - -```ucm - - ✅ - - scratch.u changed. - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 1 | > "Rúnar" - ⧩ - "Rúnar" - - 2 | > "῎Ανδρα μοι ἔννεπε, Μοῦσα, πολύτροπον" - ⧩ - "῎Ανδρα μοι ἔννεπε, Μοῦσα, πολύτροπον" - - 3 | > "古池や蛙飛びこむ水の音" - ⧩ - "古池や蛙飛びこむ水の音" - -``` diff --git a/unison-src/transcripts/find-patch.md b/unison-src/transcripts/find-patch.md deleted file mode 100644 index f990704bc2..0000000000 --- a/unison-src/transcripts/find-patch.md +++ /dev/null @@ -1,28 +0,0 @@ -# find.patch Test - -```ucm:hide -.> builtins.merge -``` - -```unison test.u -hey = "yello" -``` - -```ucm -.> add -``` - -Update - -```unison test.u -hey = "hello" -``` - -Update - -```ucm -.> update -.> find.patch -.> view.patch patch -.> view.patch 1 -``` diff --git a/unison-src/transcripts/find-patch.output.md b/unison-src/transcripts/find-patch.output.md deleted file mode 100644 index d021f0cc6d..0000000000 --- a/unison-src/transcripts/find-patch.output.md +++ /dev/null @@ -1,81 +0,0 @@ -# find.patch Test - -```unison ---- -title: test.u ---- -hey = "yello" - -``` - - -```ucm - - I found and typechecked these definitions in test.u. If you do - an `add` or `update`, here's how your codebase would change: - - ⍟ These new definitions are ok to `add`: - - hey : Text - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - hey : Text - -``` -Update - -```unison ---- -title: test.u ---- -hey = "hello" - -``` - - -```ucm - - I found and typechecked these definitions in test.u. If you do - an `add` or `update`, here's how your codebase would change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - hey : Text - -``` -Update - -```ucm -.> update - - ⍟ I've updated these names to your new definition: - - hey : Text - -.> find.patch - - 1. patch - -.> view.patch patch - - Edited Terms: hey#8e79ctircj -> hey - - Tip: To remove entries from a patch, use - delete.term-replacement or delete.type-replacement, as - appropriate. - -.> view.patch 1 - - Edited Terms: hey#8e79ctircj -> hey - - Tip: To remove entries from a patch, use - delete.term-replacement or delete.type-replacement, as - appropriate. - -``` diff --git a/unison-src/transcripts/fix-1381-excess-propagate.md b/unison-src/transcripts/fix-1381-excess-propagate.md deleted file mode 100644 index 84da98c5bc..0000000000 --- a/unison-src/transcripts/fix-1381-excess-propagate.md +++ /dev/null @@ -1,28 +0,0 @@ -We were seeing an issue where (it seemed) that every namespace that was visited during a propagate would get a new history node, even when it didn't contain any dependents. - -Example: -```unison:hide -a = "a term" -X.foo = "a namespace" -``` - -```ucm -.> add -``` - -Here is an update which should not affect `X`: -```unison:hide -a = "an update" -``` -```ucm -.> update -``` - -As of the time of this writing, the history for `X` should be a single node, `#4eeuo5bsfr`; -```ucm -.> history X -``` -however, as of release/M1i, we saw an extraneous node appear. If your `ucm` is fixed, you won't see it below: -```ucm:error -.> history #7nl6ppokhg -``` diff --git a/unison-src/transcripts/fix-1381-excess-propagate.output.md b/unison-src/transcripts/fix-1381-excess-propagate.output.md deleted file mode 100644 index 7b6db698c5..0000000000 --- a/unison-src/transcripts/fix-1381-excess-propagate.output.md +++ /dev/null @@ -1,55 +0,0 @@ -We were seeing an issue where (it seemed) that every namespace that was visited during a propagate would get a new history node, even when it didn't contain any dependents. - -Example: -```unison -a = "a term" -X.foo = "a namespace" -``` - -```ucm -.> add - - ⍟ I've added these definitions: - - X.foo : ##Text - a : ##Text - -``` -Here is an update which should not affect `X`: -```unison -a = "an update" -``` - -```ucm -.> update - - ⍟ I've updated these names to your new definition: - - a : ##Text - -``` -As of the time of this writing, the history for `X` should be a single node, `#4eeuo5bsfr`; -```ucm -.> history X - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #4eeuo5bsfr - - + Adds / updates: - - foo - - □ #7asfbtqmoj (start of history) - -``` -however, as of release/M1i, we saw an extraneous node appear. If your `ucm` is fixed, you won't see it below: -```ucm -.> history #7nl6ppokhg - - 😶 - - I don't know of a namespace with that hash. - -``` diff --git a/unison-src/transcripts/fix-big-list-crash.md b/unison-src/transcripts/fix-big-list-crash.md deleted file mode 100644 index 22be8f0cb1..0000000000 --- a/unison-src/transcripts/fix-big-list-crash.md +++ /dev/null @@ -1,13 +0,0 @@ -#### Big list crash - -```ucm:hide -.> builtins.merge -``` - -Big lists have been observed to crash, while in the garbage collection step. - -```unison -unique type Direction = U | D | L | R - -x = [(R,1005),(U,563),(R,417),(U,509),(L,237),(U,555),(R,397),(U,414),(L,490),(U,336),(L,697),(D,682),(L,180),(U,951),(L,189),(D,547),(R,697),(U,583),(L,172),(D,859),(L,370),(D,114),(L,519),(U,829),(R,389),(U,608),(R,66),(D,634),(L,320),(D,49),(L,931),(U,137),(L,349),(D,689),(L,351),(D,829),(R,819),(D,138),(L,118),(D,849),(R,230),(U,858),(L,509),(D,311),(R,815),(U,217),(R,359),(U,840),(R,77),(U,230),(R,361),(U,322),(R,300),(D,646),(R,348),(U,815),(R,793),(D,752),(R,967),(U,128),(R,948),(D,499),(R,359),(U,572),(L,566),(U,815),(R,630),(D,290),(L,829),(D,736),(R,358),(U,778),(R,891),(U,941),(R,544),(U,889),(L,920),(U,913),(L,447),(D,604),(R,538),(U,818),(L,215),(D,437),(R,447),(U,576),(R,452),(D,794),(R,864),(U,269),(L,325),(D,35),(L,268),(D,639),(L,101),(U,777),(L,776),(U,958),(R,105),(U,517),(R,667),(D,423),(R,603),(U,469),(L,125),(D,919),(R,879),(U,994),(R,665),(D,377),(R,456),(D,570),(L,685),(U,291),(R,261),(U,846),(R,840),(U,418),(L,974),(D,270),(L,312),(D,426),(R,621),(D,334),(L,855),(D,378),(R,694),(U,845),(R,481),(U,895),(L,362),(D,840),(L,712),(U,57),(R,276),(D,643),(R,566),(U,348),(R,361),(D,144),(L,287),(D,864),(L,556),(U,610),(L,927),(U,322),(R,271),(D,90),(L,741),(U,446),(R,181),(D,527),(R,56),(U,805),(L,907),(D,406),(L,286),(U,873),(L,79),(D,280),(L,153),(D,377),(R,253),(D,61),(R,475),(D,804),(R,788),(U,393),(L,660),(U,314),(R,489),(D,491),(L,234),(D,712),(L,253),(U,651),(L,777),(D,726),(R,146),(U,47),(R,630),(U,517),(R,226),(U,624),(L,834),(D,153),(L,513),(U,799),(R,287),(D,868),(R,982),(U,390),(L,296),(D,373),(R,9),(U,994),(R,105),(D,673),(L,657),(D,868),(R,738),(D,277),(R,374),(U,828),(R,860),(U,247),(R,484),(U,986),(L,723),(D,847),(L,578),(U,487),(L,51),(D,865),(L,328),(D,199),(R,812),(D,726),(R,355),(D,463),(R,761),(U,69),(R,508),(D,753),(L,81),(D,50),(L,345),(D,66),(L,764),(D,466),(L,975),(U,619),(R,59),(D,788),(L,737),(D,360),(R,14),(D,253),(L,512),(D,417),(R,828),(D,188),(L,394),(U,212),(R,658),(U,369),(R,920),(U,927),(L,339),(U,552),(R,856),(D,458),(R,407),(U,41),(L,930),(D,460),(R,809),(U,467),(L,410),(D,800),(L,135),(D,596),(R,678),(D,4),(L,771),(D,637),(L,876),(U,192),(L,406),(D,136),(R,666),(U,730),(R,711),(D,291),(L,586),(U,845),(R,606),(U,2),(L,228),(D,759),(R,244),(U,946),(R,948),(U,160),(R,397),(U,134),(R,188),(U,850),(R,623),(D,315),(L,219),(D,450),(R,489),(U,374),(R,299),(D,474),(L,767),(D,679),(L,160),(D,403),(L,708)] -``` diff --git a/unison-src/transcripts/fix-big-list-crash.output.md b/unison-src/transcripts/fix-big-list-crash.output.md deleted file mode 100644 index cba6fa6be1..0000000000 --- a/unison-src/transcripts/fix-big-list-crash.output.md +++ /dev/null @@ -1,22 +0,0 @@ -#### Big list crash - -Big lists have been observed to crash, while in the garbage collection step. - -```unison -unique type Direction = U | D | L | R - -x = [(R,1005),(U,563),(R,417),(U,509),(L,237),(U,555),(R,397),(U,414),(L,490),(U,336),(L,697),(D,682),(L,180),(U,951),(L,189),(D,547),(R,697),(U,583),(L,172),(D,859),(L,370),(D,114),(L,519),(U,829),(R,389),(U,608),(R,66),(D,634),(L,320),(D,49),(L,931),(U,137),(L,349),(D,689),(L,351),(D,829),(R,819),(D,138),(L,118),(D,849),(R,230),(U,858),(L,509),(D,311),(R,815),(U,217),(R,359),(U,840),(R,77),(U,230),(R,361),(U,322),(R,300),(D,646),(R,348),(U,815),(R,793),(D,752),(R,967),(U,128),(R,948),(D,499),(R,359),(U,572),(L,566),(U,815),(R,630),(D,290),(L,829),(D,736),(R,358),(U,778),(R,891),(U,941),(R,544),(U,889),(L,920),(U,913),(L,447),(D,604),(R,538),(U,818),(L,215),(D,437),(R,447),(U,576),(R,452),(D,794),(R,864),(U,269),(L,325),(D,35),(L,268),(D,639),(L,101),(U,777),(L,776),(U,958),(R,105),(U,517),(R,667),(D,423),(R,603),(U,469),(L,125),(D,919),(R,879),(U,994),(R,665),(D,377),(R,456),(D,570),(L,685),(U,291),(R,261),(U,846),(R,840),(U,418),(L,974),(D,270),(L,312),(D,426),(R,621),(D,334),(L,855),(D,378),(R,694),(U,845),(R,481),(U,895),(L,362),(D,840),(L,712),(U,57),(R,276),(D,643),(R,566),(U,348),(R,361),(D,144),(L,287),(D,864),(L,556),(U,610),(L,927),(U,322),(R,271),(D,90),(L,741),(U,446),(R,181),(D,527),(R,56),(U,805),(L,907),(D,406),(L,286),(U,873),(L,79),(D,280),(L,153),(D,377),(R,253),(D,61),(R,475),(D,804),(R,788),(U,393),(L,660),(U,314),(R,489),(D,491),(L,234),(D,712),(L,253),(U,651),(L,777),(D,726),(R,146),(U,47),(R,630),(U,517),(R,226),(U,624),(L,834),(D,153),(L,513),(U,799),(R,287),(D,868),(R,982),(U,390),(L,296),(D,373),(R,9),(U,994),(R,105),(D,673),(L,657),(D,868),(R,738),(D,277),(R,374),(U,828),(R,860),(U,247),(R,484),(U,986),(L,723),(D,847),(L,578),(U,487),(L,51),(D,865),(L,328),(D,199),(R,812),(D,726),(R,355),(D,463),(R,761),(U,69),(R,508),(D,753),(L,81),(D,50),(L,345),(D,66),(L,764),(D,466),(L,975),(U,619),(R,59),(D,788),(L,737),(D,360),(R,14),(D,253),(L,512),(D,417),(R,828),(D,188),(L,394),(U,212),(R,658),(U,369),(R,920),(U,927),(L,339),(U,552),(R,856),(D,458),(R,407),(U,41),(L,930),(D,460),(R,809),(U,467),(L,410),(D,800),(L,135),(D,596),(R,678),(D,4),(L,771),(D,637),(L,876),(U,192),(L,406),(D,136),(R,666),(U,730),(R,711),(D,291),(L,586),(U,845),(R,606),(U,2),(L,228),(D,759),(R,244),(U,946),(R,948),(U,160),(R,397),(U,134),(R,188),(U,850),(R,623),(D,315),(L,219),(D,450),(R,489),(U,374),(R,299),(D,474),(L,767),(D,679),(L,160),(D,403),(L,708)] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - unique type Direction - x : [(Direction, Nat)] - -``` diff --git a/unison-src/transcripts/fix1063.md b/unison-src/transcripts/fix1063.md deleted file mode 100644 index 1806b0e30a..0000000000 --- a/unison-src/transcripts/fix1063.md +++ /dev/null @@ -1,17 +0,0 @@ -Tests that functions named `.` are rendered correctly. - -```ucm:hide -.> builtins.merge -``` - -``` unison -(.) f g x = f (g x) - -noop = not . not -``` - -``` ucm -.> add -.> view noop -``` - diff --git a/unison-src/transcripts/fix1063.output.md b/unison-src/transcripts/fix1063.output.md deleted file mode 100644 index 2020d47682..0000000000 --- a/unison-src/transcripts/fix1063.output.md +++ /dev/null @@ -1,35 +0,0 @@ -Tests that functions named `.` are rendered correctly. - -```unison -(.) f g x = f (g x) - -noop = not . not -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - . : ∀ o 𝕖 i1 i. - (i1 ->{𝕖} o) -> (i ->{𝕖} i1) -> i ->{𝕖} o - noop : Boolean -> Boolean - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - . : ∀ o 𝕖 i1 i. (i1 ->{𝕖} o) -> (i ->{𝕖} i1) -> i ->{𝕖} o - noop : Boolean -> Boolean - -.> view noop - - noop : Boolean -> Boolean - noop = not . not - -``` diff --git a/unison-src/transcripts/fix1334.md b/unison-src/transcripts/fix1334.md deleted file mode 100644 index 4f01d6adc8..0000000000 --- a/unison-src/transcripts/fix1334.md +++ /dev/null @@ -1,36 +0,0 @@ -Previously, the `alias.term` and `alias.type` would fail if the source argument was hash-only, and there was no way to create an alias for a definition that didn't already have a name. Also, the `replace.term` and `replace.type` _only_ worked on hashes, and they had to be _full_ hashes. - -With this PR, the source of an alias can be a short hash (even of a definition that doesn't currently have a name in the namespace) along with a name or hash-qualified name from the current namespace as usual, and the arguments to `replace.term` and `replace.type` can be a short hash, a name, or a hash-qualified name. - -Let's make some hash-only aliases, now that we can. :mad-with-power-emoji: - -```ucm -.> alias.type ##Nat Cat -.> alias.term ##Nat.+ please_fix_763.+ -``` - -And some functions that use them: -```unison -f = 3 -g = 4 -h = f + 1 - -> h -``` - -```ucm -.> add -``` - -We used to have to know the full hash for a definition to be able to use the `replace.*` commands, but now we don't: -```ucm -.> names g -.> replace.term f g -.> names g -.> view.patch -``` - -The value of `h` should have been updated too: -```unison -> h -``` diff --git a/unison-src/transcripts/fix1334.output.md b/unison-src/transcripts/fix1334.output.md deleted file mode 100644 index f846f2acbf..0000000000 --- a/unison-src/transcripts/fix1334.output.md +++ /dev/null @@ -1,101 +0,0 @@ -Previously, the `alias.term` and `alias.type` would fail if the source argument was hash-only, and there was no way to create an alias for a definition that didn't already have a name. Also, the `replace.term` and `replace.type` _only_ worked on hashes, and they had to be _full_ hashes. - -With this PR, the source of an alias can be a short hash (even of a definition that doesn't currently have a name in the namespace) along with a name or hash-qualified name from the current namespace as usual, and the arguments to `replace.term` and `replace.type` can be a short hash, a name, or a hash-qualified name. - -Let's make some hash-only aliases, now that we can. :mad-with-power-emoji: - -```ucm -.> alias.type ##Nat Cat - - Done. - -.> alias.term ##Nat.+ please_fix_763.+ - - Done. - -``` -And some functions that use them: -```unison -f = 3 -g = 4 -h = f + 1 - -> h -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - f : Cat - g : Cat - h : Cat - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 5 | > h - ⧩ - 4 - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - f : Cat - g : Cat - h : Cat - -``` -We used to have to know the full hash for a definition to be able to use the `replace.*` commands, but now we don't: -```ucm -.> names g - - Term - Hash: #52addbrohu - Names: g - -.> replace.term f g - - Done. - -.> names g - - Term - Hash: #52addbrohu - Names: f g - -.> view.patch - - Edited Terms: f#msp7bv40rv -> f - - Tip: To remove entries from a patch, use - delete.term-replacement or delete.type-replacement, as - appropriate. - -``` -The value of `h` should have been updated too: -```unison -> h -``` - -```ucm - - ✅ - - scratch.u changed. - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 1 | > h - ⧩ - 5 - -``` diff --git a/unison-src/transcripts/fix1356.md b/unison-src/transcripts/fix1356.md deleted file mode 100644 index f932e8b4f2..0000000000 --- a/unison-src/transcripts/fix1356.md +++ /dev/null @@ -1,41 +0,0 @@ -##### This transcript reproduces the failure to unlink documentation - -```ucm:hide -.> builtins.merge -``` - -Step 1: code a term and documentation for it -```unison -x = 1 -x.doc = [: I am the documentation for x:] -``` - -Step 2: add term and documentation, link, and check the documentation -```ucm -.> add -.> link x.doc x -.> docs x -``` - -Step 3: Oops I don't like the doc, so I will re-code it! -```unison -x.doc = [: I am the documentation for x, and I now look better:] -``` - -Step 4: I add it and expect to see it -```ucm -.> update -.> docs x -``` - -That works great. Let's relink the old doc too. - -```ucm -.> link #v8f1hhvs57 x -``` - -Let's check that we see both docs: - -```ucm -.> docs x -``` diff --git a/unison-src/transcripts/fix1356.output.md b/unison-src/transcripts/fix1356.output.md deleted file mode 100644 index cc40c7b5e1..0000000000 --- a/unison-src/transcripts/fix1356.output.md +++ /dev/null @@ -1,94 +0,0 @@ -##### This transcript reproduces the failure to unlink documentation - -Step 1: code a term and documentation for it -```unison -x = 1 -x.doc = [: I am the documentation for x:] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - x : Nat - x.doc : Doc - -``` -Step 2: add term and documentation, link, and check the documentation -```ucm -.> add - - ⍟ I've added these definitions: - - x : Nat - x.doc : Doc - -.> link x.doc x - - Updates: - - 1. x : Nat - + 2. doc : Doc - -.> docs x - - I am the documentation for x - -``` -Step 3: Oops I don't like the doc, so I will re-code it! -```unison -x.doc = [: I am the documentation for x, and I now look better:] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - x.doc : Doc - -``` -Step 4: I add it and expect to see it -```ucm -.> update - - ⍟ I've updated these names to your new definition: - - x.doc : Doc - -.> docs x - - I am the documentation for x, and I now look better - -``` -That works great. Let's relink the old doc too. - -```ucm -.> link #v8f1hhvs57 x - - Updates: - - 1. x : Nat - + 2. #v8f1hhvs57 : Doc - -``` -Let's check that we see both docs: - -```ucm -.> docs x - - 1. x.doc : Doc - 2. #v8f1hhvs57 : Doc - - Tip: Try using `display 1` to display the first result or - `view 1` to view its source. - -``` diff --git a/unison-src/transcripts/fix689.md b/unison-src/transcripts/fix689.md deleted file mode 100644 index a156daa6aa..0000000000 --- a/unison-src/transcripts/fix689.md +++ /dev/null @@ -1,13 +0,0 @@ -Tests the fix for https://github.com/unisonweb/unison/issues/689 - -```ucm:hide -.> builtins.merge -``` - -``` unison -ability SystemTime where - systemTime : ##Nat - -tomorrow = '(SystemTime.systemTime + 24 * 60 * 60) -``` - diff --git a/unison-src/transcripts/fix689.output.md b/unison-src/transcripts/fix689.output.md deleted file mode 100644 index e4d39e5bcc..0000000000 --- a/unison-src/transcripts/fix689.output.md +++ /dev/null @@ -1,21 +0,0 @@ -Tests the fix for https://github.com/unisonweb/unison/issues/689 - -```unison -ability SystemTime where - systemTime : ##Nat - -tomorrow = '(SystemTime.systemTime + 24 * 60 * 60) -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - ability SystemTime - tomorrow : '{SystemTime} Nat - -``` diff --git a/unison-src/transcripts/fix849.md b/unison-src/transcripts/fix849.md deleted file mode 100644 index 4d111f9cc1..0000000000 --- a/unison-src/transcripts/fix849.md +++ /dev/null @@ -1,12 +0,0 @@ - -```ucm:hide -.> builtins.merge -``` - -See [this ticket](https://github.com/unisonweb/unison/issues/849). - -```unison -x = 42 - -> x -``` diff --git a/unison-src/transcripts/fix849.output.md b/unison-src/transcripts/fix849.output.md deleted file mode 100644 index e9ec2183d7..0000000000 --- a/unison-src/transcripts/fix849.output.md +++ /dev/null @@ -1,27 +0,0 @@ - -See [this ticket](https://github.com/unisonweb/unison/issues/849). - -```unison -x = 42 - -> x -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - x : Nat - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 3 | > x - ⧩ - 42 - -``` diff --git a/unison-src/transcripts/fix942.md b/unison-src/transcripts/fix942.md deleted file mode 100644 index 5c12cb8c06..0000000000 --- a/unison-src/transcripts/fix942.md +++ /dev/null @@ -1,37 +0,0 @@ -```ucm:hide -.> builtins.merge -``` - -First we add some code: - -```unison -x = 0 -y = x + 1 -z = y + 2 -``` - -```ucm -.> add -``` - -Now we edit `x` to be `7`, which should make `z` equal `10`: - -```unison -x = 7 -``` - -```ucm -.> update -.> view x y z -``` - -Uh oh! `z` is still referencing the old version. Just to confirm: - -```unison -test> t1 = if z == 3 then [Fail "nooo!!!"] else [Ok "great"] -``` - -```ucm -.> add -.> test -``` diff --git a/unison-src/transcripts/fix942.output.md b/unison-src/transcripts/fix942.output.md deleted file mode 100644 index c33368e139..0000000000 --- a/unison-src/transcripts/fix942.output.md +++ /dev/null @@ -1,114 +0,0 @@ -First we add some code: - -```unison -x = 0 -y = x + 1 -z = y + 2 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - x : Nat - y : Nat - z : Nat - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - x : Nat - y : Nat - z : Nat - -``` -Now we edit `x` to be `7`, which should make `z` equal `10`: - -```unison -x = 7 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - x : Nat - -``` -```ucm -.> update - - ⍟ I've updated these names to your new definition: - - x : Nat - -.> view x y z - - x : Nat - x = 7 - - y : Nat - y = - use Nat + - x + 1 - - z : Nat - z = - use Nat + - y + 2 - -``` -Uh oh! `z` is still referencing the old version. Just to confirm: - -```unison -test> t1 = if z == 3 then [Fail "nooo!!!"] else [Ok "great"] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - t1 : [Result] - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 1 | test> t1 = if z == 3 then [Fail "nooo!!!"] else [Ok "great"] - - ✅ Passed great - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - t1 : [Result] - -.> test - - Cached test results (`help testcache` to learn more) - - ◉ t1 great - - ✅ 1 test(s) passing - - Tip: Use view t1 to view the source of a test. - -``` diff --git a/unison-src/transcripts/fix987.md b/unison-src/transcripts/fix987.md deleted file mode 100644 index 28e39518de..0000000000 --- a/unison-src/transcripts/fix987.md +++ /dev/null @@ -1,37 +0,0 @@ - -```ucm:hide -.> builtins.merge -``` - -First we'll add a definition: - -```unison -ability DeathStar where - attack : Text -> () - -spaceAttack1 x = - y = attack "saturn" - z = attack "neptune" - "All done" -``` - -Add it to the codebase: - -```ucm -.> add -``` - -Now we'll try to add a different definition that runs the actions in a different order. This should work fine: - -```unison -spaceAttack2 x = - z = attack "neptune" - y = attack "saturn" - "All done" -``` - -```ucm -.> add -``` - -Previously, this would fail because the hashing algorithm was being given one big let rec block whose binding order was normalized. diff --git a/unison-src/transcripts/fix987.output.md b/unison-src/transcripts/fix987.output.md deleted file mode 100644 index ecf3169535..0000000000 --- a/unison-src/transcripts/fix987.output.md +++ /dev/null @@ -1,65 +0,0 @@ - -First we'll add a definition: - -```unison -ability DeathStar where - attack : Text -> () - -spaceAttack1 x = - y = attack "saturn" - z = attack "neptune" - "All done" -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - ability DeathStar - spaceAttack1 : x ->{DeathStar} Text - -``` -Add it to the codebase: - -```ucm -.> add - - ⍟ I've added these definitions: - - ability DeathStar - spaceAttack1 : x ->{DeathStar} Text - -``` -Now we'll try to add a different definition that runs the actions in a different order. This should work fine: - -```unison -spaceAttack2 x = - z = attack "neptune" - y = attack "saturn" - "All done" -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - spaceAttack2 : x ->{DeathStar} Text - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - spaceAttack2 : x ->{DeathStar} Text - -``` -Previously, this would fail because the hashing algorithm was being given one big let rec block whose binding order was normalized. diff --git a/unison-src/transcripts/hello.md b/unison-src/transcripts/hello.md deleted file mode 100644 index 322036b93b..0000000000 --- a/unison-src/transcripts/hello.md +++ /dev/null @@ -1,69 +0,0 @@ - -# Hello! - -```ucm:hide -.> builtins.merge -``` - -This markdown file is also a Unison transcript file. Transcript files are an easy way to create self-documenting Unison programs, libraries, and tutorials. - -The format is just a regular markdown file with some fenced code blocks that are typechecked and elaborated by `ucm`. For example, you can call this transcript via: - -``` -$ ucm transcript hello.md -``` - -This runs it on a freshly generated empty codebase. Alternately `ucm transcript.fork -codebase /path/to/code hello.md` runs the transcript on a freshly generated copy of the provided codebase. Do `ucm help` to learn more about usage. - -Fenced code blocks of type `unison` and `ucm` are treated specially: - -* `ucm` blocks are executed, and the output is interleaved into the output markdown file after each command, replacing the original `ucm` block. -* `unison` blocks are typechecked, and a `ucm` block with the output of typechecking and execution of the file is inserted immediately afterwards. - -Take a look at [the elaborated output](hello.output.md) to see what this file looks like after passing through the transcript runner. - -## Let's try it out!! - -In the `unison` fenced block, you can give an (optional) file name (defaults to `scratch.u`), like so: - -```unison myfile.u -x = 42 -``` - -Let's go ahead and add that to the codebase, then make sure it's there: - -```ucm -.> add -.> view x -``` - -If `view` returned no results, the transcript would fail at this point. - -## Hiding output - -You may not always want to view the output of typechecking and evaluation every time, in which case, you can add `:hide` to the block. For instance: - -```unison:hide -y = 99 -``` - -This works for `ucm` blocks as well. - -```ucm:hide -.> rename.term x answerToUltimateQuestionOfLife -``` - -Doing `unison:hide:all` hides the block altogether, both input and output - this is useful for doing behind-the-scenes control of `ucm`'s state. - -```unison:hide:all -> [: you won't see me :] -``` - -## Expecting failures - -Sometimes, you have a block which you are _expecting_ to fail, perhaps because you're illustrating how something would be a type error. Adding `:error` to the block will check for this. For instance, this program has a type error: - -```unison:error -hmm : .builtin.Nat -hmm = "Not, in fact, a number" -``` diff --git a/unison-src/transcripts/hello.output.md b/unison-src/transcripts/hello.output.md deleted file mode 100644 index fd94c280a6..0000000000 --- a/unison-src/transcripts/hello.output.md +++ /dev/null @@ -1,92 +0,0 @@ - -# Hello! - -This markdown file is also a Unison transcript file. Transcript files are an easy way to create self-documenting Unison programs, libraries, and tutorials. - -The format is just a regular markdown file with some fenced code blocks that are typechecked and elaborated by `ucm`. For example, you can call this transcript via: - -``` -$ ucm transcript hello.md - -``` - -This runs it on a freshly generated empty codebase. Alternately `ucm transcript.fork -codebase /path/to/code hello.md` runs the transcript on a freshly generated copy of the provided codebase. Do `ucm help` to learn more about usage. - -Fenced code blocks of type `unison` and `ucm` are treated specially: - -* `ucm` blocks are executed, and the output is interleaved into the output markdown file after each command, replacing the original `ucm` block. -* `unison` blocks are typechecked, and a `ucm` block with the output of typechecking and execution of the file is inserted immediately afterwards. - -Take a look at [the elaborated output](hello.output.md) to see what this file looks like after passing through the transcript runner. - -## Let's try it out!! - -In the `unison` fenced block, you can give an (optional) file name (defaults to `scratch.u`), like so: - -```unison ---- -title: myfile.u ---- -x = 42 - -``` - - -```ucm - - I found and typechecked these definitions in myfile.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - x : Nat - -``` -Let's go ahead and add that to the codebase, then make sure it's there: - -```ucm -.> add - - ⍟ I've added these definitions: - - x : Nat - -.> view x - - x : Nat - x = 42 - -``` -If `view` returned no results, the transcript would fail at this point. - -## Hiding output - -You may not always want to view the output of typechecking and evaluation every time, in which case, you can add `:hide` to the block. For instance: - -```unison -y = 99 -``` - -This works for `ucm` blocks as well. - -Doing `unison:hide:all` hides the block altogether, both input and output - this is useful for doing behind-the-scenes control of `ucm`'s state. - -## Expecting failures - -Sometimes, you have a block which you are _expecting_ to fail, perhaps because you're illustrating how something would be a type error. Adding `:error` to the block will check for this. For instance, this program has a type error: - -```unison -hmm : .builtin.Nat -hmm = "Not, in fact, a number" -``` - -```ucm - - I found a value of type builtin.Text where I expected to find one of type builtin.Nat: - - 1 | hmm : .builtin.Nat - 2 | hmm = "Not, in fact, a number" - - -``` diff --git a/unison-src/transcripts/link.md b/unison-src/transcripts/link.md deleted file mode 100644 index 46720e385e..0000000000 --- a/unison-src/transcripts/link.md +++ /dev/null @@ -1,70 +0,0 @@ -# Linking definitions to metadata - -```ucm:hide -.> builtins.mergeio -``` - -The `link` and `unlink` commands can be used to manage metadata linked to definitions. For example, you can link documentation to a definition: - -```unison -use .builtin - -coolFunction x = x * 2 - -coolFunction.doc = [: This is a cool function. :] -``` - -```ucm -.> add -.> link coolFunction.doc coolFunction -``` - -You can use arbitrary Unison values and link them as metadata to definitions: - -```unison -toCopyrightHolder author = match author with - Author guid name -> CopyrightHolder guid name - -alice = Author (GUID Bytes.empty) "Alice Coder" - -coolFunction.license = License [toCopyrightHolder alice] [Year 2020] licenses.mit - -licenses.mit = LicenseType [: -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -:] -``` - -```ucm -.> add -.> link coolFunction.license coolFunction -.> link alice coolFunction -``` - -We can look at the links we have: - -```ucm -.> links coolFunction -``` - -We can link the same metadata simultaneously to multiple definitions: - -```unison -myLibrary.f x = x + 1 -myLibrary.g x = x + 2 -myLibrary.h x = x + 3 -``` - -```ucm -.> add -.> cd myLibrary -.myLibrary> find -.myLibrary> link .alice 1-3 -.myLibrary> links f -.myLibrary> links g -.myLibrary> links h -.myLibrary> history -``` diff --git a/unison-src/transcripts/link.output.md b/unison-src/transcripts/link.output.md deleted file mode 100644 index b85aa09ca8..0000000000 --- a/unison-src/transcripts/link.output.md +++ /dev/null @@ -1,202 +0,0 @@ -# Linking definitions to metadata - -The `link` and `unlink` commands can be used to manage metadata linked to definitions. For example, you can link documentation to a definition: - -```unison -use .builtin - -coolFunction x = x * 2 - -coolFunction.doc = [: This is a cool function. :] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - coolFunction : Nat -> Nat - coolFunction.doc : Doc - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - coolFunction : Nat -> Nat - coolFunction.doc : Doc - -.> link coolFunction.doc coolFunction - - Updates: - - 1. coolFunction : Nat -> Nat - + 2. doc : Doc - -``` -You can use arbitrary Unison values and link them as metadata to definitions: - -```unison -toCopyrightHolder author = match author with - Author guid name -> CopyrightHolder guid name - -alice = Author (GUID Bytes.empty) "Alice Coder" - -coolFunction.license = License [toCopyrightHolder alice] [Year 2020] licenses.mit - -licenses.mit = LicenseType [: -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -:] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - alice : Author - coolFunction.license : License - licenses.mit : LicenseType - toCopyrightHolder : Author -> CopyrightHolder - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - alice : Author - coolFunction.license : License - licenses.mit : LicenseType - toCopyrightHolder : Author -> CopyrightHolder - -.> link coolFunction.license coolFunction - - Updates: - - 1. coolFunction : Nat -> Nat - + 2. license : License - -.> link alice coolFunction - - Updates: - - 1. coolFunction : Nat -> Nat - + 2. alice : Author - -``` -We can look at the links we have: - -```ucm -.> links coolFunction - - 1. alice : Author - 2. coolFunction.license : License - 3. coolFunction.doc : Doc - - Tip: Try using `display 1` to display the first result or - `view 1` to view its source. - -``` -We can link the same metadata simultaneously to multiple definitions: - -```unison -myLibrary.f x = x + 1 -myLibrary.g x = x + 2 -myLibrary.h x = x + 3 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - myLibrary.f : Nat -> Nat - myLibrary.g : Nat -> Nat - myLibrary.h : Nat -> Nat - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - myLibrary.f : Nat -> Nat - myLibrary.g : Nat -> Nat - myLibrary.h : Nat -> Nat - -.> cd myLibrary - -.myLibrary> find - - 1. f : Nat -> Nat - 2. g : Nat -> Nat - 3. h : Nat -> Nat - - -.myLibrary> link .alice 1-3 - - Updates: - - 1. myLibrary.f : Nat -> Nat - + 2. alice : Author - - 3. myLibrary.g : Nat -> Nat - + 4. alice : Author - - 5. myLibrary.h : Nat -> Nat - + 6. alice : Author - -.myLibrary> links f - - 1. .alice : Author - - Tip: Try using `display 1` to display the first result or - `view 1` to view its source. - -.myLibrary> links g - - 1. .alice : Author - - Tip: Try using `display 1` to display the first result or - `view 1` to view its source. - -.myLibrary> links h - - 1. .alice : Author - - Tip: Try using `display 1` to display the first result or - `view 1` to view its source. - -.myLibrary> history - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #mquil07fad - - - - ⊙ #4nfhqq566a - - + Adds / updates: - - f g h - - □ #7asfbtqmoj (start of history) - -``` diff --git a/unison-src/transcripts/merge.md b/unison-src/transcripts/merge.md deleted file mode 100644 index 51633493b5..0000000000 --- a/unison-src/transcripts/merge.md +++ /dev/null @@ -1,101 +0,0 @@ - -```ucm:hide -.> builtins.merge -``` - -# How merging works - -Suppose we have two branches, `P1` and `P2`, and a subnamespace, `foo`, which we'll refer to with `P1.foo` , `P2.foo`. This doc explains how `merge(P1,P2)` is computed, including the `merge(P1,P2).foo` subnamespace. - -`LCA(P1,P2)` is the lowest common ancestor of `P1` and `P2`. To compute `merge(P1,P2)`, we: - -1. Compute `LCA(P1,P2)` and do a three way merge of that level of the tree, using the algorithm below. What about the children of `P1` and `P2`? Let's just consider a child namespace `foo`. There are a few cases: - 1. `P1` and `P2` both have foo as a child namespace. Then `merge(P1,P2).foo == merge(P1.foo, P2.foo)` - 2. `P1` has `foo` as a child namespace, but `P2` does not (or vice versa). Then we have two subcases: - 1. `LCA(P1,P2)` has no `foo`. This means that `foo` child namespace was added by `P1`. The merged result for the `foo` subnamespace is just `P1.foo`. - 2. `LCA(P1,P2)` does have `foo`. This means that `P2` _deleted_ the `foo` subnamespace. The merged result for the `foo` subnamespace is then `merge(P1.foo, cons empty LCA(P1,P2).foo)`. This does a history-preserving delete of all the definitions that existed at the `LCA` point in history. - 1. Example is like if `P1` added a new definition `foo.bar = 23` after the `LCA`, then `foo.bar` will exist in the merged result, but all the definitions that existed in `foo` at the time of the `LCA` will be deleted in the result. - -### Diff-based 3-way merge algorithm - -Standard 3 way merge algorithm to merge `a` and `b`: - -* Let `lca = LCA(a,b)` -* merged result is: `apply(diff(lca,a) <> diff(lca,b), lca)` - -Relies on some diff combining operation `<>`. - -```unison:hide -foo.w = 2 -foo.x = 1 -baz.x = 3 -quux.x = 4 -``` - -```ucm -.P0> add -``` - -Now P0 has 3 sub-namespaces. -* foo will be modified definition-wise in each branch -* baz will be deleted in the P2 branch and left alone in P1 -* quux will be deleted in the P2 branch and added to in P1 -* P1 will add a bar sub-namespace - -```ucm -.P0> fork .P0 .P1 -.P0> fork .P0 .P2 -``` - -```unison:hide -foo.y = 2483908 -bar.y = 383 -quux.y = 333 -``` - -```ucm -.P1> add -.P1> delete.term foo.w -``` - -We added to `foo`, `bar` and `baz`, and deleted `foo.w`, which should stay deleted in the merge. - -```unison:hide -foo.z = +28348 -``` - -```ucm -.P2> add -.P2> delete.namespace baz -.P2> delete.namespace quux -.P2> find -``` - -We added `foo.z`, deleted whole namespaces `baz` and `quux` which should stay -deleted in the merge. - -Now we'll try merging `P1` and `P2` back into `P0`. We should see the union of all their definitions in the merged version of `P0`. - -This should succeed and the resulting P0 namespace should have `foo`, `bar` -and `quux` namespaces. - -```ucm -.P0> merge .P1 -.P0> merge .P2 -.P0> find -.P0> view foo.x foo.y foo.z bar.y quux.y -``` - -These test that things we expect to be deleted are still deleted. - -```ucm:error -.> view P0.foo.w -``` - -```ucm:error -.> view P0.baz.x -``` - -```ucm:error -.> view P0.quux.x -``` diff --git a/unison-src/transcripts/merge.output.md b/unison-src/transcripts/merge.output.md deleted file mode 100644 index ff64e189bb..0000000000 --- a/unison-src/transcripts/merge.output.md +++ /dev/null @@ -1,227 +0,0 @@ - -# How merging works - -Suppose we have two branches, `P1` and `P2`, and a subnamespace, `foo`, which we'll refer to with `P1.foo` , `P2.foo`. This doc explains how `merge(P1,P2)` is computed, including the `merge(P1,P2).foo` subnamespace. - -`LCA(P1,P2)` is the lowest common ancestor of `P1` and `P2`. To compute `merge(P1,P2)`, we: - -1. Compute `LCA(P1,P2)` and do a three way merge of that level of the tree, using the algorithm below. What about the children of `P1` and `P2`? Let's just consider a child namespace `foo`. There are a few cases: - 1. `P1` and `P2` both have foo as a child namespace. Then `merge(P1,P2).foo == merge(P1.foo, P2.foo)` - 2. `P1` has `foo` as a child namespace, but `P2` does not (or vice versa). Then we have two subcases: - 1. `LCA(P1,P2)` has no `foo`. This means that `foo` child namespace was added by `P1`. The merged result for the `foo` subnamespace is just `P1.foo`. - 2. `LCA(P1,P2)` does have `foo`. This means that `P2` _deleted_ the `foo` subnamespace. The merged result for the `foo` subnamespace is then `merge(P1.foo, cons empty LCA(P1,P2).foo)`. This does a history-preserving delete of all the definitions that existed at the `LCA` point in history. - 1. Example is like if `P1` added a new definition `foo.bar = 23` after the `LCA`, then `foo.bar` will exist in the merged result, but all the definitions that existed in `foo` at the time of the `LCA` will be deleted in the result. - -### Diff-based 3-way merge algorithm - -Standard 3 way merge algorithm to merge `a` and `b`: - -* Let `lca = LCA(a,b)` -* merged result is: `apply(diff(lca,a) <> diff(lca,b), lca)` - -Relies on some diff combining operation `<>`. - -```unison -foo.w = 2 -foo.x = 1 -baz.x = 3 -quux.x = 4 -``` - -```ucm - ☝️ The namespace .P0 is empty. - -.P0> add - - ⍟ I've added these definitions: - - baz.x : Nat - foo.w : Nat - foo.x : Nat - quux.x : Nat - -``` -Now P0 has 3 sub-namespaces. -* foo will be modified definition-wise in each branch -* baz will be deleted in the P2 branch and left alone in P1 -* quux will be deleted in the P2 branch and added to in P1 -* P1 will add a bar sub-namespace - -```ucm -.P0> fork .P0 .P1 - - Done. - -.P0> fork .P0 .P2 - - Done. - -``` -```unison -foo.y = 2483908 -bar.y = 383 -quux.y = 333 -``` - -```ucm -.P1> add - - ⍟ I've added these definitions: - - bar.y : Nat - foo.y : Nat - quux.y : Nat - -.P1> delete.term foo.w - - Name changes: - - Original Changes - 1. P0.foo.w ┐ 2. P1.foo.w (removed) - 3. P1.foo.w │ - 4. P2.foo.w ┘ - - Tip: You can use `undo` or `reflog` to undo this change. - -``` -We added to `foo`, `bar` and `baz`, and deleted `foo.w`, which should stay deleted in the merge. - -```unison -foo.z = +28348 -``` - -```ucm -.P2> add - - ⍟ I've added these definitions: - - foo.z : Int - -.P2> delete.namespace baz - - Removed definitions: - - 1. x : Nat - - Tip: You can use `undo` or `reflog` to undo this change. - -.P2> delete.namespace quux - - Removed definitions: - - 1. x : Nat - - Tip: You can use `undo` or `reflog` to undo this change. - -.P2> find - - 1. foo.w : Nat - 2. foo.x : Nat - 3. foo.z : Int - - -``` -We added `foo.z`, deleted whole namespaces `baz` and `quux` which should stay -deleted in the merge. - -Now we'll try merging `P1` and `P2` back into `P0`. We should see the union of all their definitions in the merged version of `P0`. - -This should succeed and the resulting P0 namespace should have `foo`, `bar` -and `quux` namespaces. - -```ucm -.P0> merge .P1 - - Here's what's changed in the current namespace after the - merge: - - Added definitions: - - 1. bar.y : Nat - 2. foo.y : Nat - 3. quux.y : Nat - - Removed definitions: - - 4. foo.w : Nat - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -.P0> merge .P2 - - Here's what's changed in the current namespace after the - merge: - - Added definitions: - - 1. foo.z : Int - - Removed definitions: - - 2. baz.x : Nat - 3. quux.x : Nat - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -.P0> find - - 1. bar.y : Nat - 2. foo.x : Nat - 3. foo.y : Nat - 4. foo.z : Int - 5. quux.y : Nat - - -.P0> view foo.x foo.y foo.z bar.y quux.y - - bar.y : Nat - bar.y = 383 - - foo.x : Nat - foo.x = 1 - - foo.y : Nat - foo.y = 2483908 - - foo.z : Int - foo.z = +28348 - - quux.y : Nat - quux.y = 333 - -``` -These test that things we expect to be deleted are still deleted. - -```ucm -.> view P0.foo.w - - ⚠️ - - The following names were not found in the codebase. Check your spelling. - P0.foo.w - -``` -```ucm -.> view P0.baz.x - - ⚠️ - - The following names were not found in the codebase. Check your spelling. - P0.baz.x - -``` -```ucm -.> view P0.quux.x - - ⚠️ - - The following names were not found in the codebase. Check your spelling. - P0.quux.x - -``` diff --git a/unison-src/transcripts/mergeloop.md b/unison-src/transcripts/mergeloop.md deleted file mode 100644 index bb03d5d258..0000000000 --- a/unison-src/transcripts/mergeloop.md +++ /dev/null @@ -1,51 +0,0 @@ -# Merge loop test - -This tests for regressions of https://github.com/unisonweb/unison/issues/1276 where trivial merges cause loops in the history. - -Let's make three identical namespaces with different histories: - -```unison -a = 1 -``` - -```ucm -.x> add -``` - -```unison -b = 2 -``` - -```ucm -.x> add -``` - -```unison -b = 2 -``` - -```ucm -.y> add -``` - -```unison -a = 1 -``` - -```ucm -.y> add -``` - -```unison -a = 1 -b = 2 -``` - -```ucm -.z> add -.> merge x y -.> merge y z -.> history z -``` - - diff --git a/unison-src/transcripts/mergeloop.output.md b/unison-src/transcripts/mergeloop.output.md deleted file mode 100644 index 3f319c351b..0000000000 --- a/unison-src/transcripts/mergeloop.output.md +++ /dev/null @@ -1,143 +0,0 @@ -# Merge loop test - -This tests for regressions of https://github.com/unisonweb/unison/issues/1276 where trivial merges cause loops in the history. - -Let's make three identical namespaces with different histories: - -```unison -a = 1 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - a : ##Nat - -``` -```ucm - ☝️ The namespace .x is empty. - -.x> add - - ⍟ I've added these definitions: - - a : ##Nat - -``` -```unison -b = 2 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - b : ##Nat - -``` -```ucm -.x> add - - ⍟ I've added these definitions: - - b : ##Nat - -``` -```unison -b = 2 -``` - -```ucm - - I found and typechecked the definitions in scratch.u. This - file has been previously added to the codebase. - -``` -```ucm - ☝️ The namespace .y is empty. - -.y> add - - ⍟ I've added these definitions: - - b : ##Nat - -``` -```unison -a = 1 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - a : ##Nat - -``` -```ucm -.y> add - - ⍟ I've added these definitions: - - a : ##Nat - -``` -```unison -a = 1 -b = 2 -``` - -```ucm - - I found and typechecked the definitions in scratch.u. This - file has been previously added to the codebase. - -``` -```ucm - ☝️ The namespace .z is empty. - -.z> add - - ⍟ I've added these definitions: - - a : ##Nat - b : ##Nat - -.> merge x y - - Nothing changed as a result of the merge. - -.> merge y z - - Nothing changed as a result of the merge. - -.> history z - - Note: The most recent namespace hash is immediately below this - message. - - - - This segment of history starts with a merge. Use - `history #som3n4m3space` to view history starting from a given - namespace hash. - - ⊙ #0ucrusr0bl - ⑃ - #0lf1cvdccp - #ofcsecdak0 - -``` diff --git a/unison-src/transcripts/merges.md b/unison-src/transcripts/merges.md deleted file mode 100644 index f026b7dfac..0000000000 --- a/unison-src/transcripts/merges.md +++ /dev/null @@ -1,119 +0,0 @@ -# Forking and merging namespaces in `ucm` - -```ucm:hide -.> builtins.merge -``` - -The Unison namespace is a versioned tree of names that map to Unison definitions. You can change this namespace and fork and merge subtrees of it. Let's start by introducing a few definitions into a new namespace, `foo`: - -```unison -x = 42 -``` - -```ucm -.> add -``` - -Let's move `x` into a new namespace, `master`: - -```ucm -.> rename.term x master.x -``` - -If you want to do some experimental work in a namespace without disturbing anyone else, you can `fork` it (which is a shorthand for `copy.namespace`). This creates a copy of it, preserving its history. - -> __Note:__ these copies are very efficient to create as they just have pointers into the same underlying definitions. Create as many as you like. - -Let's go ahead and do this: - -``` -.> fork master feature1 -.> view master.x -.> view feature1.x -``` - -Great! We can now do some further work in the `feature1` branch, then merge it back into `master` when we're ready. - -```unison -y = "hello" -``` - -```ucm -.feature1> add -.master> merge .feature1 -.master> view y -``` - -> Note: `merge src`, with one argument, merges `src` into the current namespace. You can also do `merge src dest` to merge into any destination namespace. - -Notice that `master` now has the definition of `y` we wrote. - -We can also delete the fork if we're done with it. (Don't worry, it's still in the `history` and can be resurrected at any time.) - -```ucm -.> delete.namespace .feature1 -.> history -``` - -To resurrect an old version of a namespace, you can learn its hash via the `history` command, then use `fork #namespacehash .newname`. - -## Concurrent edits and merges - -In the above scenario the destination namespace (`master`) was strictly behind the source namespace, so the merge didn't have anything interesting to do (Git would call this a "fast forward" merge). In other cases, the source and destination namespaces will each have changes the other doesn't know about, and the merge needs to something more interesting. That's okay too, and Unison will merge those results, using a 3-way merge algorithm. - -> __Note:__ When merging nested namespaces, Unison actually uses a recursive 3-way merge, so it finds a different (and possibly closer) common ancestor at each level of the tree. - -Let's see how this works. We are going to create a copy of `master`, add and delete some definitions in `master` and in the fork, then merge. - -```ucm -.> fork master feature2 -``` - -Here's one fork, we add `z` and delete `x`: - -```unison -z = 99 -``` - -```ucm -.feature2> add -.feature2> delete.term x -``` - -And here's the other fork, where we update `y` and add a new definition, `frobnicate`: - -```unison -master.y = "updated y" -master.frobnicate n = n + 1 -``` - -```ucm -.> update -.> view master.y -.> view master.frobnicate -``` - -At this point, `master` and `feature2` both have some changes the other doesn't know about. Let's merge them. - -```ucm -.> merge feature2 master -``` - -Notice that `x` is deleted in the merged branch (it was deleted in `feature2` and untouched by `master`): - -```ucm:error -.> view master.x -``` - -And notice that `y` has the most recent value, and that `z` and `frobnicate` both exist as well: - -```ucm -.> view master.y -.> view master.z -.> view master.frobnicate -``` - -## FAQ - -* What happens if namespace1 deletes a name that namespace2 has updated? A: ??? -* ... diff --git a/unison-src/transcripts/merges.output.md b/unison-src/transcripts/merges.output.md deleted file mode 100644 index 86d2fd1529..0000000000 --- a/unison-src/transcripts/merges.output.md +++ /dev/null @@ -1,410 +0,0 @@ -# Forking and merging namespaces in `ucm` - -The Unison namespace is a versioned tree of names that map to Unison definitions. You can change this namespace and fork and merge subtrees of it. Let's start by introducing a few definitions into a new namespace, `foo`: - -```unison -x = 42 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - x : Nat - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - x : Nat - -``` -Let's move `x` into a new namespace, `master`: - -```ucm -.> rename.term x master.x - - Done. - -``` -If you want to do some experimental work in a namespace without disturbing anyone else, you can `fork` it (which is a shorthand for `copy.namespace`). This creates a copy of it, preserving its history. - -> __Note:__ these copies are very efficient to create as they just have pointers into the same underlying definitions. Create as many as you like. - -Let's go ahead and do this: - -``` -.> fork master feature1 -.> view master.x -.> view feature1.x - -``` - -Great! We can now do some further work in the `feature1` branch, then merge it back into `master` when we're ready. - -```unison -y = "hello" -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - y : Text - -``` -```ucm - ☝️ The namespace .feature1 is empty. - -.feature1> add - - ⍟ I've added these definitions: - - y : Text - -.master> merge .feature1 - - Here's what's changed in the current namespace after the - merge: - - Added definitions: - - 1. y : Text - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -.master> view y - - y : Text - y = "hello" - -``` -> Note: `merge src`, with one argument, merges `src` into the current namespace. You can also do `merge src dest` to merge into any destination namespace. - -Notice that `master` now has the definition of `y` we wrote. - -We can also delete the fork if we're done with it. (Don't worry, it's still in the `history` and can be resurrected at any time.) - -```ucm -.> delete.namespace .feature1 - - Removed definitions: - - 1. y : Text - - Tip: You can use `undo` or `reflog` to undo this change. - -.> history - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #krakhj8q5b - - - Deletes: - - feature1.y - - ⊙ #j0lfs5asc7 - - + Adds / updates: - - master.y - - = Copies: - - Original name New name(s) - feature1.y master.y - - ⊙ #ej6ll3ac1h - - + Adds / updates: - - feature1.y - - ⊙ #gidrg9pool - - > Moves: - - Original name New name - x master.x - - ⊙ #sdinucmq5a - - + Adds / updates: - - x - - ⊙ #pc4d0e58h3 - - + Adds / updates: - - builtin.Boolean builtin.Boolean.not builtin.Bytes - builtin.Bytes.++ builtin.Bytes.at builtin.Bytes.drop - builtin.Bytes.empty builtin.Bytes.flatten - builtin.Bytes.fromList builtin.Bytes.size - builtin.Bytes.take builtin.Bytes.toList builtin.Char - builtin.Char.fromNat builtin.Char.toNat - builtin.Debug.watch builtin.Doc builtin.Doc.Blob - builtin.Doc.Evaluate builtin.Doc.Join builtin.Doc.Link - builtin.Doc.Signature builtin.Doc.Source builtin.Either - builtin.Either.Left builtin.Either.Right builtin.Float - builtin.Float.* builtin.Float.+ builtin.Float.- - builtin.Float./ builtin.Float.abs builtin.Float.acos - builtin.Float.acosh builtin.Float.asin builtin.Float.asinh - builtin.Float.atan builtin.Float.atan2 builtin.Float.atanh - builtin.Float.ceiling builtin.Float.cos builtin.Float.cosh - builtin.Float.eq builtin.Float.exp builtin.Float.floor - builtin.Float.fromText builtin.Float.gt builtin.Float.gteq - builtin.Float.log builtin.Float.logBase builtin.Float.lt - builtin.Float.lteq builtin.Float.max builtin.Float.min - builtin.Float.pow builtin.Float.round builtin.Float.sin - builtin.Float.sinh builtin.Float.sqrt builtin.Float.tan - builtin.Float.tanh builtin.Float.toText - builtin.Float.truncate builtin.Int builtin.Int.* - builtin.Int.+ builtin.Int.- builtin.Int./ builtin.Int.and - builtin.Int.complement builtin.Int.eq builtin.Int.fromText - builtin.Int.gt builtin.Int.gteq builtin.Int.increment - builtin.Int.isEven builtin.Int.isOdd - builtin.Int.leadingZeros builtin.Int.lt builtin.Int.lteq - builtin.Int.mod builtin.Int.negate builtin.Int.or - builtin.Int.pow builtin.Int.shiftLeft - builtin.Int.shiftRight builtin.Int.signum - builtin.Int.toFloat builtin.Int.toText - builtin.Int.trailingZeros builtin.Int.truncate0 - builtin.Int.xor builtin.Link builtin.Link.Term##Link.Term - builtin.Link.Term#quh#0 builtin.Link.Type##Link.Type - builtin.Link.Type#quh#1 builtin.List builtin.List.++ - builtin.List.+: builtin.List.:+ builtin.List.at - builtin.List.cons builtin.List.drop builtin.List.empty - builtin.List.size builtin.List.snoc builtin.List.take - builtin.Nat builtin.Nat.* builtin.Nat.+ builtin.Nat./ - builtin.Nat.and builtin.Nat.complement builtin.Nat.drop - builtin.Nat.eq builtin.Nat.fromText builtin.Nat.gt - builtin.Nat.gteq builtin.Nat.increment builtin.Nat.isEven - builtin.Nat.isOdd builtin.Nat.leadingZeros builtin.Nat.lt - builtin.Nat.lteq builtin.Nat.mod builtin.Nat.or - builtin.Nat.pow builtin.Nat.shiftLeft - builtin.Nat.shiftRight builtin.Nat.sub builtin.Nat.toFloat - builtin.Nat.toInt builtin.Nat.toText - builtin.Nat.trailingZeros builtin.Nat.xor builtin.Optional - builtin.Optional.None builtin.Optional.Some - builtin.Request builtin.SeqView builtin.SeqView.VElem - builtin.SeqView.VEmpty builtin.Test.Result - builtin.Test.Result.Fail builtin.Test.Result.Ok - builtin.Text builtin.Text.!= builtin.Text.++ - builtin.Text.drop builtin.Text.empty builtin.Text.eq - builtin.Text.fromCharList builtin.Text.gt - builtin.Text.gteq builtin.Text.lt builtin.Text.lteq - builtin.Text.size builtin.Text.take - builtin.Text.toCharList builtin.Text.uncons - builtin.Text.unsnoc builtin.Tuple builtin.Tuple.Cons - builtin.Unit builtin.Unit.Unit builtin.Universal.< - builtin.Universal.<= builtin.Universal.== - builtin.Universal.> builtin.Universal.>= - builtin.Universal.compare builtin.bug - builtin.io2.BufferMode - builtin.io2.BufferMode.BlockBuffering - builtin.io2.BufferMode.LineBuffering - builtin.io2.BufferMode.NoBuffering - builtin.io2.BufferMode.SizedBlockBuffering - builtin.io2.FileMode builtin.io2.FileMode.Append - builtin.io2.FileMode.Read builtin.io2.FileMode.ReadWrite - builtin.io2.FileMode.Write builtin.io2.Handle - builtin.io2.IO builtin.io2.IO.clientSocket - builtin.io2.IO.closeFile builtin.io2.IO.closeSocket - builtin.io2.IO.createDirectory builtin.io2.IO.fileExists - builtin.io2.IO.forkComp builtin.io2.IO.getBuffering - builtin.io2.IO.getCurrentDirectory - builtin.io2.IO.getFileSize builtin.io2.IO.getFileTimestamp - builtin.io2.IO.getLine builtin.io2.IO.getTempDirectory - builtin.io2.IO.getText builtin.io2.IO.handlePosition - builtin.io2.IO.isDirectory builtin.io2.IO.isFileEOF - builtin.io2.IO.isFileOpen builtin.io2.IO.isSeekable - builtin.io2.IO.listen builtin.io2.IO.openFile - builtin.io2.IO.putText builtin.io2.IO.removeDirectory - builtin.io2.IO.removeFile builtin.io2.IO.renameDirectory - builtin.io2.IO.renameFile builtin.io2.IO.seekHandle - builtin.io2.IO.serverSocket builtin.io2.IO.setBuffering - builtin.io2.IO.setCurrentDirectory - builtin.io2.IO.socketAccept builtin.io2.IO.socketReceive - builtin.io2.IO.socketSend builtin.io2.IO.stdHandle - builtin.io2.IO.systemTime builtin.io2.IOError - builtin.io2.IOError.AlreadyExists builtin.io2.IOError.EOF - builtin.io2.IOError.IllegalOperation - builtin.io2.IOError.NoSuchThing - builtin.io2.IOError.PermissionDenied - builtin.io2.IOError.ResourceBusy - builtin.io2.IOError.ResourceExhausted - builtin.io2.IOError.UserError builtin.io2.Socket - builtin.io2.ThreadId builtin.todo - - □ #7asfbtqmoj (start of history) - -``` -To resurrect an old version of a namespace, you can learn its hash via the `history` command, then use `fork #namespacehash .newname`. - -## Concurrent edits and merges - -In the above scenario the destination namespace (`master`) was strictly behind the source namespace, so the merge didn't have anything interesting to do (Git would call this a "fast forward" merge). In other cases, the source and destination namespaces will each have changes the other doesn't know about, and the merge needs to something more interesting. That's okay too, and Unison will merge those results, using a 3-way merge algorithm. - -> __Note:__ When merging nested namespaces, Unison actually uses a recursive 3-way merge, so it finds a different (and possibly closer) common ancestor at each level of the tree. - -Let's see how this works. We are going to create a copy of `master`, add and delete some definitions in `master` and in the fork, then merge. - -```ucm -.> fork master feature2 - - Done. - -``` -Here's one fork, we add `z` and delete `x`: - -```unison -z = 99 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - z : Nat - -``` -```ucm -.feature2> add - - ⍟ I've added these definitions: - - z : Nat - -.feature2> delete.term x - - Name changes: - - Original Changes - 1. feature2.x ┐ 2. feature2.x (removed) - 3. master.x ┘ - - Tip: You can use `undo` or `reflog` to undo this change. - -``` -And here's the other fork, where we update `y` and add a new definition, `frobnicate`: - -```unison -master.y = "updated y" -master.frobnicate n = n + 1 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - master.frobnicate : Nat -> Nat - master.y : Text - -``` -```ucm -.> update - - ⍟ I've added these definitions: - - master.frobnicate : Nat -> Nat - - ⍟ I've updated these names to your new definition: - - master.y : Text - (The old definition was also named feature2.y. I updated - this name too.) - -.> view master.y - - feature2.y : Text - feature2.y = "updated y" - -.> view master.frobnicate - - master.frobnicate : Nat -> Nat - master.frobnicate n = - use Nat + - n + 1 - -``` -At this point, `master` and `feature2` both have some changes the other doesn't know about. Let's merge them. - -```ucm -.> merge feature2 master - - Here's what's changed in master after the merge: - - Added definitions: - - 1. z : Nat - - Removed definitions: - - 2. x : Nat - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -``` -Notice that `x` is deleted in the merged branch (it was deleted in `feature2` and untouched by `master`): - -```ucm -.> view master.x - - ⚠️ - - The following names were not found in the codebase. Check your spelling. - master.x - -``` -And notice that `y` has the most recent value, and that `z` and `frobnicate` both exist as well: - -```ucm -.> view master.y - - feature2.y : Text - feature2.y = "updated y" - -.> view master.z - - feature2.z : Nat - feature2.z = 99 - -.> view master.frobnicate - - master.frobnicate : Nat -> Nat - master.frobnicate n = - use Nat + - n + 1 - -``` -## FAQ - -* What happens if namespace1 deletes a name that namespace2 has updated? A: ??? -* ... diff --git a/unison-src/transcripts/names.md b/unison-src/transcripts/names.md deleted file mode 100644 index 88f405dbf7..0000000000 --- a/unison-src/transcripts/names.md +++ /dev/null @@ -1,20 +0,0 @@ - Example uses of the `names` command and output -```ucm:hide -.> alias.type ##Int .builtins.Int -``` - -```unison:hide -type IntTriple = IntTriple (Int, Int, Int) -intTriple = IntTriple(+1, +1, +1) -``` - -```ucm:hide -.> add -``` - -```ucm -.> alias.type IntTriple namespc.another.TripleInt -.> alias.term intTriple namespc.another.tripleInt -.> names IntTriple -.> names intTriple -``` \ No newline at end of file diff --git a/unison-src/transcripts/names.output.md b/unison-src/transcripts/names.output.md deleted file mode 100644 index 9f9cb40a2f..0000000000 --- a/unison-src/transcripts/names.output.md +++ /dev/null @@ -1,32 +0,0 @@ - Example uses of the `names` command and output -```unison -type IntTriple = IntTriple (Int, Int, Int) -intTriple = IntTriple(+1, +1, +1) -``` - -```ucm -.> alias.type IntTriple namespc.another.TripleInt - - Done. - -.> alias.term intTriple namespc.another.tripleInt - - Done. - -.> names IntTriple - - Type - Hash: #170h4ackk7 - Names: IntTriple namespc.another.TripleInt - - Term - Hash: #170h4ackk7#0 - Names: IntTriple.IntTriple - -.> names intTriple - - Term - Hash: #uif14vd2oj - Names: intTriple namespc.another.tripleInt - -``` diff --git a/unison-src/transcripts/numbered-args.md b/unison-src/transcripts/numbered-args.md deleted file mode 100644 index 86bf9a2147..0000000000 --- a/unison-src/transcripts/numbered-args.md +++ /dev/null @@ -1,56 +0,0 @@ -# Using numbered arguments in UCM - -```ucm:hide -.> builtins.merge -``` - -First lets add some contents to our codebase. - -```unison -foo = "foo" -bar = "bar" -baz = "baz" -qux = "qux" -quux = "quux" -corge = "corge" -``` - -```ucm -.temp> add -``` - -We can get the list of things in the namespace, and UCM will give us a numbered -list: - -```ucm -.temp> find -``` - -We can ask to `view` the second element of this list: - -```ucm -.temp> find -.temp> view 2 -``` - -And we can `view` multiple elements by separating with spaces: - -```ucm -.temp> find -.temp> view 2 3 5 -``` - -We can also ask for a range: - -```ucm -.temp> find -.temp> view 2-4 -``` - -And we can ask for multiple ranges and use mix of ranges and numbers: - -```ucm -.temp> find -.temp> view 1-3 4 5-6 -``` - diff --git a/unison-src/transcripts/numbered-args.output.md b/unison-src/transcripts/numbered-args.output.md deleted file mode 100644 index 50ba5ba06e..0000000000 --- a/unison-src/transcripts/numbered-args.output.md +++ /dev/null @@ -1,162 +0,0 @@ -# Using numbered arguments in UCM - -First lets add some contents to our codebase. - -```unison -foo = "foo" -bar = "bar" -baz = "baz" -qux = "qux" -quux = "quux" -corge = "corge" -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - bar : Text - baz : Text - corge : Text - foo : Text - quux : Text - qux : Text - -``` -```ucm - ☝️ The namespace .temp is empty. - -.temp> add - - ⍟ I've added these definitions: - - bar : Text - baz : Text - corge : Text - foo : Text - quux : Text - qux : Text - -``` -We can get the list of things in the namespace, and UCM will give us a numbered -list: - -```ucm -.temp> find - - 1. bar : Text - 2. baz : Text - 3. corge : Text - 4. foo : Text - 5. quux : Text - 6. qux : Text - - -``` -We can ask to `view` the second element of this list: - -```ucm -.temp> find - - 1. bar : Text - 2. baz : Text - 3. corge : Text - 4. foo : Text - 5. quux : Text - 6. qux : Text - - -.temp> view 2 - - baz : Text - baz = "baz" - -``` -And we can `view` multiple elements by separating with spaces: - -```ucm -.temp> find - - 1. bar : Text - 2. baz : Text - 3. corge : Text - 4. foo : Text - 5. quux : Text - 6. qux : Text - - -.temp> view 2 3 5 - - baz : Text - baz = "baz" - - corge : Text - corge = "corge" - - quux : Text - quux = "quux" - -``` -We can also ask for a range: - -```ucm -.temp> find - - 1. bar : Text - 2. baz : Text - 3. corge : Text - 4. foo : Text - 5. quux : Text - 6. qux : Text - - -.temp> view 2-4 - - baz : Text - baz = "baz" - - corge : Text - corge = "corge" - - foo : Text - foo = "foo" - -``` -And we can ask for multiple ranges and use mix of ranges and numbers: - -```ucm -.temp> find - - 1. bar : Text - 2. baz : Text - 3. corge : Text - 4. foo : Text - 5. quux : Text - 6. qux : Text - - -.temp> view 1-3 4 5-6 - - bar : Text - bar = "bar" - - baz : Text - baz = "baz" - - corge : Text - corge = "corge" - - foo : Text - foo = "foo" - - quux : Text - quux = "quux" - - qux : Text - qux = "qux" - -``` diff --git a/unison-src/transcripts/propagate.md b/unison-src/transcripts/propagate.md deleted file mode 100644 index cb2a7cc314..0000000000 --- a/unison-src/transcripts/propagate.md +++ /dev/null @@ -1,134 +0,0 @@ -# Propagating type edits - -```ucm:hide -.> builtins.merge -``` - -We introduce a type `Foo` with a function dependent `fooToInt`. - -```unison -use .builtin - -unique type Foo = Foo - -fooToInt : Foo -> Int -fooToInt _ = +42 -``` - -And then we add it. - -```ucm -.subpath> add -.subpath> find.verbose -.subpath> view fooToInt -``` - -Then if we change the type `Foo`... - -```unison -unique type Foo = Foo | Bar -``` - -and update the codebase to use the new type `Foo`... - -```ucm -.subpath> update -``` - -... it should automatically propagate the type to `fooToInt`. - -```ucm -.subpath> view fooToInt -``` - -### Preserving user type variables - -We make a term that has a dependency on another term and also a non-redundant -user-provided type signature. - -```unison -use .builtin - -someTerm : Optional foo -> Optional foo -someTerm x = x - -otherTerm : Optional baz -> Optional baz -otherTerm y = someTerm y -``` - -Add that to the codebase: - -```ucm -.subpath.preserve> add -``` - -Let's now edit the dependency: - -```unison -use .builtin - -someTerm : Optional x -> Optional x -someTerm _ = None -``` - -Update... - -```ucm -.subpath.preserve> update -``` - -Now the type of `someTerm` should be `Optional x -> Optional x` and the -type of `otherTerm` should remain the same. - -```ucm -.subpath.preserve> view someTerm -.subpath.preserve> view otherTerm -``` - -### Propagation only applies to the local branch - -Cleaning up a bit... - -```ucm -.> delete.namespace subpath -``` - -Now, we make two terms, where one depends on the other. - -```unison -use .builtin - -someTerm : Optional foo -> Optional foo -someTerm x = x - -otherTerm : Optional baz -> Optional baz -otherTerm y = someTerm y -``` - -We'll make two copies of this namespace. - -```ucm -.subpath.one> add -.subpath> fork one two -``` - -Now let's edit one of the terms... - -```unison -use .builtin - -someTerm : Optional x -> Optional x -someTerm _ = None -``` - -... in one of the namespaces... - -```ucm -.subpath.one> update -``` - -The other namespace should be left alone. - -```ucm -.subpath.two> view someTerm -``` diff --git a/unison-src/transcripts/propagate.output.md b/unison-src/transcripts/propagate.output.md deleted file mode 100644 index 70b7c12ce0..0000000000 --- a/unison-src/transcripts/propagate.output.md +++ /dev/null @@ -1,280 +0,0 @@ -# Propagating type edits - -We introduce a type `Foo` with a function dependent `fooToInt`. - -```unison -use .builtin - -unique type Foo = Foo - -fooToInt : Foo -> Int -fooToInt _ = +42 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - unique type Foo - fooToInt : Foo -> Int - -``` -And then we add it. - -```ucm - ☝️ The namespace .subpath is empty. - -.subpath> add - - ⍟ I've added these definitions: - - unique type Foo - fooToInt : Foo -> Int - -.subpath> find.verbose - - 1. -- #qae64o6am81hoadf7eabd909gojboi5iu3g9deip79ro18f11bbhir2vg51grg4m72kr5ikdovi6aupttet0nsqil7f0df9nqr10hqg - unique type Foo - - 2. -- #qae64o6am81hoadf7eabd909gojboi5iu3g9deip79ro18f11bbhir2vg51grg4m72kr5ikdovi6aupttet0nsqil7f0df9nqr10hqg#0 - Foo.Foo : Foo - - 3. -- #hvtmbg1bd8of81n2os4ginnnen13njh47294uandlohooq0ej971u6tl5cdsfq237lec1tc007oajc4dee1fmnflqi6ogom3ecemu5g - fooToInt : Foo -> Int - - - -.subpath> view fooToInt - - fooToInt : Foo -> Int - fooToInt _ = +42 - -``` -Then if we change the type `Foo`... - -```unison -unique type Foo = Foo | Bar -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - unique type Foo - -``` -and update the codebase to use the new type `Foo`... - -```ucm -.subpath> update - - ⍟ I've updated these names to your new definition: - - unique type Foo - -``` -... it should automatically propagate the type to `fooToInt`. - -```ucm -.subpath> view fooToInt - - fooToInt : Foo -> Int - fooToInt _ = +42 - -``` -### Preserving user type variables - -We make a term that has a dependency on another term and also a non-redundant -user-provided type signature. - -```unison -use .builtin - -someTerm : Optional foo -> Optional foo -someTerm x = x - -otherTerm : Optional baz -> Optional baz -otherTerm y = someTerm y -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - otherTerm : Optional baz -> Optional baz - someTerm : Optional foo -> Optional foo - -``` -Add that to the codebase: - -```ucm - ☝️ The namespace .subpath.preserve is empty. - -.subpath.preserve> add - - ⍟ I've added these definitions: - - otherTerm : Optional baz -> Optional baz - someTerm : Optional foo -> Optional foo - -``` -Let's now edit the dependency: - -```unison -use .builtin - -someTerm : Optional x -> Optional x -someTerm _ = None -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - someTerm : Optional x -> Optional x - -``` -Update... - -```ucm -.subpath.preserve> update - - ⍟ I've updated these names to your new definition: - - someTerm : Optional x -> Optional x - -``` -Now the type of `someTerm` should be `Optional x -> Optional x` and the -type of `otherTerm` should remain the same. - -```ucm -.subpath.preserve> view someTerm - - someTerm : Optional x -> Optional x - someTerm _ = None - -.subpath.preserve> view otherTerm - - otherTerm : Optional baz -> Optional baz - otherTerm y = someTerm y - -``` -### Propagation only applies to the local branch - -Cleaning up a bit... - -```ucm -.> delete.namespace subpath - - Removed definitions: - - 1. unique type Foo - 2. Foo.Bar : #16d2id848g - 3. Foo.Foo : #16d2id848g - 4. fooToInt : #16d2id848g -> Int - 5. preserve.otherTerm : Optional baz -> Optional baz - 6. preserve.someTerm : Optional x -> Optional x - 7. patch patch - 8. patch preserve.patch - - Tip: You can use `undo` or `reflog` to undo this change. - -``` -Now, we make two terms, where one depends on the other. - -```unison -use .builtin - -someTerm : Optional foo -> Optional foo -someTerm x = x - -otherTerm : Optional baz -> Optional baz -otherTerm y = someTerm y -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - otherTerm : Optional baz -> Optional baz - someTerm : Optional foo -> Optional foo - -``` -We'll make two copies of this namespace. - -```ucm - ☝️ The namespace .subpath.one is empty. - -.subpath.one> add - - ⍟ I've added these definitions: - - otherTerm : Optional baz -> Optional baz - someTerm : Optional foo -> Optional foo - -.subpath> fork one two - - Done. - -``` -Now let's edit one of the terms... - -```unison -use .builtin - -someTerm : Optional x -> Optional x -someTerm _ = None -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - someTerm : Optional x -> Optional x - -``` -... in one of the namespaces... - -```ucm -.subpath.one> update - - ⍟ I've updated these names to your new definition: - - someTerm : Optional x -> Optional x - -``` -The other namespace should be left alone. - -```ucm -.subpath.two> view someTerm - - someTerm : Optional foo -> Optional foo - someTerm x = x - -``` diff --git a/unison-src/transcripts/redundant.output.md b/unison-src/transcripts/redundant.output.md deleted file mode 100644 index b778734cd7..0000000000 --- a/unison-src/transcripts/redundant.output.md +++ /dev/null @@ -1,45 +0,0 @@ -The same kind of thing happens with `map`. Are we saying this is incorrect behaviour? - -```unison -map : (a -> b) -> [a] -> [b] -map f = cases - x +: xs -> f x +: map f xs - [] -> [] -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - map : (a ->{𝕖} b) ->{𝕖} [a] ->{𝕖} [b] - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - map : (a ->{𝕖} b) ->{𝕖} [a] ->{𝕖} [b] - -.> view map - - map : (a -> b) -> [a] -> [b] - map f = cases - x +: xs -> - use builtin.List +: - f x +: map f xs - [] -> [] - -.> find map - - 1. map : (a ->{𝕖} b) ->{𝕖} [a] ->{𝕖} [b] - - -``` diff --git a/unison-src/transcripts/reflog.md b/unison-src/transcripts/reflog.md deleted file mode 100644 index 202dc50820..0000000000 --- a/unison-src/transcripts/reflog.md +++ /dev/null @@ -1,31 +0,0 @@ -```ucm:hide -.> builtins.merge -``` - -First we make two changes to the codebase, so that there's more than one line -for the `reflog` command to display: - -```unison -x = 1 -``` -```ucm -.> add -``` -```unison -y = 2 -``` -```ucm -.> add -.> view y -``` -```ucm -.> reflog -``` - -If we `reset-root` to its previous value, `y` disappears. -```ucm -.> reset-root 2 -``` -```ucm:error -.> view y -``` diff --git a/unison-src/transcripts/reflog.output.md b/unison-src/transcripts/reflog.output.md deleted file mode 100644 index 1f050afaca..0000000000 --- a/unison-src/transcripts/reflog.output.md +++ /dev/null @@ -1,90 +0,0 @@ -First we make two changes to the codebase, so that there's more than one line -for the `reflog` command to display: - -```unison -x = 1 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - x : Nat - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - x : Nat - -``` -```unison -y = 2 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - y : Nat - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - y : Nat - -.> view y - - y : Nat - y = 2 - -``` -```ucm -.> reflog - - Here is a log of the root namespace hashes, starting with the - most recent, along with the command that got us there. Try: - - `fork 2 .old` - `fork #7eiv5s0sk9 .old` to make an old namespace - accessible again, - - `reset-root #7eiv5s0sk9` to reset the root namespace and - its history to that of the - specified namespace. - - 1. #kmsvfuu78t : add - 2. #7eiv5s0sk9 : add - 3. #pc4d0e58h3 : builtins.merge - 4. #7asfbtqmoj : (initial reflogged namespace) - -``` -If we `reset-root` to its previous value, `y` disappears. -```ucm -.> reset-root 2 - - Done. - -``` -```ucm -.> view y - - ⚠️ - - The following names were not found in the codebase. Check your spelling. - y - -``` diff --git a/unison-src/transcripts/resolve.md b/unison-src/transcripts/resolve.md deleted file mode 100644 index 2d03b9dfd3..0000000000 --- a/unison-src/transcripts/resolve.md +++ /dev/null @@ -1,115 +0,0 @@ -# Resolving edit conflicts in `ucm` - -```ucm:hide -.> builtins.merge -``` - -The `ucm` tool tracks edits to hashes in an object called a _patch_. When patches get merged, sometimes those patches will have conflicting edits. The `replace.term` command helps resolve such conflicts. - -First, let's make a new namespace, `example.resolve`: - -```ucm -.> cd example.resolve -``` - -Now let's add a term named `a.foo`: - -```unison -a.foo = 42 -``` - -```ucm -.example.resolve> add -``` - -We'll fork the namespace `a` into a new namespace `b`, so we can edit the two concurrently. - -```ucm -.example.resolve> fork a b -``` - -We'll also make a second fork `c` which we'll use as the target for our patch later. - -```ucm -.example.resolve> fork a c -``` - -Now let's make a change to `foo` in the `a` namespace: - -```ucm -.example.resolve> cd a -``` - -```unison -foo = 43 -``` - -```ucm -.example.resolve.a> update -``` - -And make a different change in the `b` namespace: - -```ucm -.example.resolve> cd .example.resolve.b -``` - -```unison -foo = 44 -``` - -```ucm -.example.resolve.b> update -``` - -The `a` and `b` namespaces now each contain a patch named `patch`. We can view these: - -```ucm -.example.resolve.b> cd .example.resolve -.example.resolve> view.patch a.patch -.example.resolve> view.patch b.patch -``` - -Let's now merge these namespaces into `c`: - -```ucm -.example.resolve> merge a c -``` -```ucm:error -.example.resolve> merge b c -``` - -The namespace `c` now has an edit conflict, since the term `foo` was edited in two different ways. - -```ucm -.example.resolve> cd c -.example.resolve.c> todo -``` - -We see that `#44954ulpdf` (the original hash of `a.foo`) got replaced with _both_ the `#8e68dvpr0a` and `#jdqoenu794`. - -We can resolve this conflict by picking one of the terms as the "winner": - -```ucm -.example.resolve.c> replace.term #44954ulpdf #8e68dvpr0a -``` - -This changes the merged `c.patch` so that only the edit from #44954ulpdf to #8e68dvpr0a remains: - -```ucm -.example.resolve.c> view.patch -``` - -We still have a remaining _name conflict_ since it just so happened that both of the definitions in the edits were named `foo`. - -```ucm -.example.resolve.c> todo -``` - -We can resolve the name conflict by deleting one of the names. - -```ucm -.example.resolve.c> delete.term foo#jdqoenu794 -``` - -And that's how you resolve edit conflicts with UCM. diff --git a/unison-src/transcripts/resolve.output.md b/unison-src/transcripts/resolve.output.md deleted file mode 100644 index bd319c1ed0..0000000000 --- a/unison-src/transcripts/resolve.output.md +++ /dev/null @@ -1,259 +0,0 @@ -# Resolving edit conflicts in `ucm` - -The `ucm` tool tracks edits to hashes in an object called a _patch_. When patches get merged, sometimes those patches will have conflicting edits. The `replace.term` command helps resolve such conflicts. - -First, let's make a new namespace, `example.resolve`: - -```ucm -.> cd example.resolve - - ☝️ The namespace .example.resolve is empty. - -``` -Now let's add a term named `a.foo`: - -```unison -a.foo = 42 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - a.foo : Nat - -``` -```ucm -.example.resolve> add - - ⍟ I've added these definitions: - - a.foo : Nat - -``` -We'll fork the namespace `a` into a new namespace `b`, so we can edit the two concurrently. - -```ucm -.example.resolve> fork a b - - Done. - -``` -We'll also make a second fork `c` which we'll use as the target for our patch later. - -```ucm -.example.resolve> fork a c - - Done. - -``` -Now let's make a change to `foo` in the `a` namespace: - -```ucm -.example.resolve> cd a - -``` -```unison -foo = 43 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - foo : Nat - -``` -```ucm -.example.resolve.a> update - - ⍟ I've updated these names to your new definition: - - foo : Nat - -``` -And make a different change in the `b` namespace: - -```ucm -.example.resolve> cd .example.resolve.b - -``` -```unison -foo = 44 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - foo : Nat - -``` -```ucm -.example.resolve.b> update - - ⍟ I've updated these names to your new definition: - - foo : Nat - -``` -The `a` and `b` namespaces now each contain a patch named `patch`. We can view these: - -```ucm -.example.resolve.b> cd .example.resolve - -.example.resolve> view.patch a.patch - - Edited Terms: c.foo -> a.foo - - Tip: To remove entries from a patch, use - delete.term-replacement or delete.type-replacement, as - appropriate. - -.example.resolve> view.patch b.patch - - Edited Terms: c.foo -> b.foo - - Tip: To remove entries from a patch, use - delete.term-replacement or delete.type-replacement, as - appropriate. - -``` -Let's now merge these namespaces into `c`: - -```ucm -.example.resolve> merge a c - - Here's what's changed in c after the merge: - - Updates: - - 1. foo : Nat - ↓ - 2. foo : Nat - - Added definitions: - - 3. patch patch (added 1 updates) - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -``` -```ucm -.example.resolve> merge b c - - Here's what's changed in c after the merge: - - New name conflicts: - - 1. foo#jdqoenu794 : Nat - ↓ - 2. ┌ foo#8e68dvpr0a : Nat - 3. └ foo#jdqoenu794 : Nat - - Updates: - - 4. patch patch (added 1 updates) - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - - I tried to auto-apply the patch, but couldn't because it - contained contradictory entries. - -``` -The namespace `c` now has an edit conflict, since the term `foo` was edited in two different ways. - -```ucm -.example.resolve> cd c - -.example.resolve.c> todo - - ❓ - - These definitions were edited differently in namespaces that - have been merged into this one. You'll have to tell me what to - use as the new definition: - - The term #44954ulpdf was replaced with foo#8e68dvpr0a and - foo#jdqoenu794 - -``` -We see that `#44954ulpdf` (the original hash of `a.foo`) got replaced with _both_ the `#8e68dvpr0a` and `#jdqoenu794`. - -We can resolve this conflict by picking one of the terms as the "winner": - -```ucm -.example.resolve.c> replace.term #44954ulpdf #8e68dvpr0a - - Done. - -``` -This changes the merged `c.patch` so that only the edit from #44954ulpdf to #8e68dvpr0a remains: - -```ucm -.example.resolve.c> view.patch - - Edited Terms: #44954ulpdf -> foo#8e68dvpr0a - - Tip: To remove entries from a patch, use - delete.term-replacement or delete.type-replacement, as - appropriate. - -``` -We still have a remaining _name conflict_ since it just so happened that both of the definitions in the edits were named `foo`. - -```ucm -.example.resolve.c> todo - - ❓ - - These terms have conflicting definitions: foo - - Tip: This occurs when merging branches that both independently - introduce the same name. Use `view foo` to see the - conflicting defintions, then use `move.term` to resolve - the conflicts. - -``` -We can resolve the name conflict by deleting one of the names. - -```ucm -.example.resolve.c> delete.term foo#jdqoenu794 - - Resolved name conflicts: - - 1. ┌ example.resolve.c.foo#8e68dvpr0a : Nat - 2. └ example.resolve.c.foo#jdqoenu794 : Nat - ↓ - 3. example.resolve.c.foo#8e68dvpr0a : Nat - - Name changes: - - Original Changes - 4. example.resolve.a.foo ┐ 5. example.resolve.c.foo#jdqoenu794 (removed) - 6. example.resolve.c.foo#jdqoenu794 ┘ - - Tip: You can use `undo` or `reflog` to undo this change. - -``` -And that's how you resolve edit conflicts with UCM. diff --git a/unison-src/transcripts/squash.md b/unison-src/transcripts/squash.md deleted file mode 100644 index a5c00185a6..0000000000 --- a/unison-src/transcripts/squash.md +++ /dev/null @@ -1,132 +0,0 @@ - -```ucm:hide -.> builtins.merge -``` - -# Squash merges - -`squash src dest` merges can be used to merge from `src` to `dest`, discarding the history of `src`. It's useful when the source namespace history is irrelevant or has a bunch of churn you wish to discard. Often when merging small pull requests, you'll use a squash merge. - -Let's look at some examples. We'll start with a namespace with just the builtins. Let's take a look at the hash of this namespace: - -```ucm -.> history builtin -.> fork builtin builtin2 -``` - -(We make a copy of `builtin` for use later in this transcript.) - -Now suppose we `fork` a copy of builtin, then rename `Nat.+` to `frobnicate`, then rename it back. Notice this produces multiple entries in the history: - -```ucm -.> fork builtin mybuiltin -.mybuiltin> rename.term Nat.+ Nat.frobnicate -.mybuiltin> rename.term Nat.frobnicate Nat.+ -.mybuiltin> history -``` - -If we merge that back into `builtin`, we get that same chain of history: - -```ucm -.> merge mybuiltin builtin -.> history builtin -``` - -Let's try again, but using a `merge.squash` (or just `squash`) instead. The history will be unchanged: - -```ucm -.> merge.squash mybuiltin builtin2 -.> history builtin2 -``` - -The churn that happened in `mybuiltin` namespace ended up back in the same spot, so the squash merge of that namespace with our original namespace had no effect. - -## Another example - -Let's look at a more interesting example, where the two namespaces have diverged a bit. Here's our starting namespace: - -```unison:hide -x = 1 -``` - -```ucm -.trunk> add -.> fork trunk alice -.> fork trunk bob -``` - -Alice now does some hacking: - -```unison:hide -radNumber = 348 -bodaciousNumero = 2394 -neatoFun x = x -``` - -```ucm -.alice> add -.alice> rename.term radNumber superRadNumber -.alice> rename.term neatoFun productionReadyId -``` - -Meanwhile, Bob does his own hacking: - -```unison:hide -whatIsLove = "?" -babyDon'tHurtMe = ".. Don't hurt me..." -no more = no more -``` - -```ucm -.bob> add -``` - -At this point, Alice and Bob both have some history beyond what's in trunk: - -```ucm -.> history trunk -.> history alice -.> history bob -``` - -Alice then squash merges into `trunk`, as does Bob. It's as if Alice and Bob both made their changes in one single commit. - -```ucm -.> merge.squash alice trunk -.> history trunk -.> merge.squash bob trunk -.> history trunk -``` - -Since squash merges don't produce any merge nodes, we can `undo` a couple times to get back to our starting state: - -```ucm -.> undo -.> undo -.> history trunk -``` - -This time, we'll first squash Alice and Bob's changes together before squashing their combined changes into `trunk`. The resulting `trunk` will have just a single entry in it, combining both Alice and Bob's changes: - -```ucm -.> squash alice bob -.> squash bob trunk -.> history trunk -``` - -So, there you have it. With squashing, you can control the granularity of your history. - -## Throwing out all history - -Another thing we can do is `squash` into an empty namespace. This effectively makes a copy of the namespace, but without any of its history: - -```ucm -.> squash alice nohistoryalice -.> history nohistoryalice -``` - -There's nothing really special here, `squash src dest` discards `src` history that comes after the LCA of `src` and `dest`, it's just that in the case of an empty namespace, that LCA is the beginning of time (the empty namespace), so all the history of `src` is discarded. - -## Caveats - -If you `squash mystuff trunk`, you're discarding any history of `mystuff` and just cons'ing onto the history of `trunk`. Thus, don't expect to be able to `merge trunk mystuff` later and get great results. Squashing should only be used when you don't care about the history (and you know others haven't pulled and built on your line of history being discarded, so they don't care about the history either). diff --git a/unison-src/transcripts/squash.output.md b/unison-src/transcripts/squash.output.md deleted file mode 100644 index 5c46b8b97c..0000000000 --- a/unison-src/transcripts/squash.output.md +++ /dev/null @@ -1,472 +0,0 @@ - -# Squash merges - -`squash src dest` merges can be used to merge from `src` to `dest`, discarding the history of `src`. It's useful when the source namespace history is irrelevant or has a bunch of churn you wish to discard. Often when merging small pull requests, you'll use a squash merge. - -Let's look at some examples. We'll start with a namespace with just the builtins. Let's take a look at the hash of this namespace: - -```ucm -.> history builtin - - Note: The most recent namespace hash is immediately below this - message. - - - - □ #tt9sn32lfj (start of history) - -.> fork builtin builtin2 - - Done. - -``` -(We make a copy of `builtin` for use later in this transcript.) - -Now suppose we `fork` a copy of builtin, then rename `Nat.+` to `frobnicate`, then rename it back. Notice this produces multiple entries in the history: - -```ucm -.> fork builtin mybuiltin - - Done. - -.mybuiltin> rename.term Nat.+ Nat.frobnicate - - Done. - -.mybuiltin> rename.term Nat.frobnicate Nat.+ - - Done. - -.mybuiltin> history - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #jr4tifejsn - - > Moves: - - Original name New name - Nat.frobnicate Nat.+ - - ⊙ #68dokn5l8c - - > Moves: - - Original name New name - Nat.+ Nat.frobnicate - - □ #tt9sn32lfj (start of history) - -``` -If we merge that back into `builtin`, we get that same chain of history: - -```ucm -.> merge mybuiltin builtin - - Nothing changed as a result of the merge. - -.> history builtin - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #jr4tifejsn - - > Moves: - - Original name New name - Nat.frobnicate Nat.+ - - ⊙ #68dokn5l8c - - > Moves: - - Original name New name - Nat.+ Nat.frobnicate - - □ #tt9sn32lfj (start of history) - -``` -Let's try again, but using a `merge.squash` (or just `squash`) instead. The history will be unchanged: - -```ucm -.> merge.squash mybuiltin builtin2 - - Nothing changed as a result of the merge. - - 😶 - - builtin2 was already up-to-date with mybuiltin. - -.> history builtin2 - - Note: The most recent namespace hash is immediately below this - message. - - - - □ #tt9sn32lfj (start of history) - -``` -The churn that happened in `mybuiltin` namespace ended up back in the same spot, so the squash merge of that namespace with our original namespace had no effect. - -## Another example - -Let's look at a more interesting example, where the two namespaces have diverged a bit. Here's our starting namespace: - -```unison -x = 1 -``` - -```ucm - ☝️ The namespace .trunk is empty. - -.trunk> add - - ⍟ I've added these definitions: - - x : Nat - -.> fork trunk alice - - Done. - -.> fork trunk bob - - Done. - -``` -Alice now does some hacking: - -```unison -radNumber = 348 -bodaciousNumero = 2394 -neatoFun x = x -``` - -```ucm -.alice> add - - ⍟ I've added these definitions: - - bodaciousNumero : Nat - neatoFun : x -> x - radNumber : Nat - -.alice> rename.term radNumber superRadNumber - - Done. - -.alice> rename.term neatoFun productionReadyId - - Done. - -``` -Meanwhile, Bob does his own hacking: - -```unison -whatIsLove = "?" -babyDon'tHurtMe = ".. Don't hurt me..." -no more = no more -``` - -```ucm -.bob> add - - ⍟ I've added these definitions: - - babyDon'tHurtMe : Text - no : more -> 𝕣 - whatIsLove : Text - -``` -At this point, Alice and Bob both have some history beyond what's in trunk: - -```ucm -.> history trunk - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #3p3anl2oil - - + Adds / updates: - - x - - □ #7asfbtqmoj (start of history) - -.> history alice - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #t85a26latn - - > Moves: - - Original name New name - neatoFun productionReadyId - - ⊙ #01scl44n4i - - > Moves: - - Original name New name - radNumber superRadNumber - - ⊙ #094h7rbo3m - - + Adds / updates: - - bodaciousNumero neatoFun radNumber - - ⊙ #3p3anl2oil - - + Adds / updates: - - x - - □ #7asfbtqmoj (start of history) - -.> history bob - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #g0mn0tn7ap - - + Adds / updates: - - babyDon'tHurtMe no whatIsLove - - ⊙ #3p3anl2oil - - + Adds / updates: - - x - - □ #7asfbtqmoj (start of history) - -``` -Alice then squash merges into `trunk`, as does Bob. It's as if Alice and Bob both made their changes in one single commit. - -```ucm -.> merge.squash alice trunk - - Here's what's changed in trunk after the merge: - - Added definitions: - - 1. bodaciousNumero : Nat - 2. productionReadyId : x -> x - 3. superRadNumber : Nat - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -.> history trunk - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #tcbafrhd81 - - + Adds / updates: - - bodaciousNumero productionReadyId superRadNumber - - ⊙ #3p3anl2oil - - + Adds / updates: - - x - - □ #7asfbtqmoj (start of history) - -.> merge.squash bob trunk - - Here's what's changed in trunk after the merge: - - Added definitions: - - 1. babyDon'tHurtMe : Text - 2. no : more -> 𝕣 - 3. whatIsLove : Text - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -.> history trunk - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #5grq7ao0b4 - - + Adds / updates: - - babyDon'tHurtMe no whatIsLove - - ⊙ #tcbafrhd81 - - + Adds / updates: - - bodaciousNumero productionReadyId superRadNumber - - ⊙ #3p3anl2oil - - + Adds / updates: - - x - - □ #7asfbtqmoj (start of history) - -``` -Since squash merges don't produce any merge nodes, we can `undo` a couple times to get back to our starting state: - -```ucm -.> undo - - Here's the changes I undid - - Name changes: - - Original Changes - 1. bob.babyDon'tHurtMe 2. trunk.babyDon'tHurtMe (added) - - 3. bob.no 4. trunk.no (added) - - 5. bob.whatIsLove 6. trunk.whatIsLove (added) - -.> undo - - Here's the changes I undid - - Name changes: - - Original Changes - 1. alice.bodaciousNumero 2. trunk.bodaciousNumero (added) - - 3. alice.productionReadyId 4. trunk.productionReadyId (added) - - 5. alice.superRadNumber 6. trunk.superRadNumber (added) - -.> history trunk - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #3p3anl2oil - - + Adds / updates: - - x - - □ #7asfbtqmoj (start of history) - -``` -This time, we'll first squash Alice and Bob's changes together before squashing their combined changes into `trunk`. The resulting `trunk` will have just a single entry in it, combining both Alice and Bob's changes: - -```ucm -.> squash alice bob - - Here's what's changed in bob after the merge: - - Added definitions: - - 1. bodaciousNumero : Nat - 2. productionReadyId : x -> x - 3. superRadNumber : Nat - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -.> squash bob trunk - - Here's what's changed in trunk after the merge: - - Added definitions: - - 1. babyDon'tHurtMe : Text - 2. bodaciousNumero : Nat - 3. no : more -> 𝕣 - 4. productionReadyId : x -> x - 5. superRadNumber : Nat - 6. whatIsLove : Text - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -.> history trunk - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #8t5skhmd1g - - + Adds / updates: - - babyDon'tHurtMe bodaciousNumero no productionReadyId - superRadNumber whatIsLove - - ⊙ #3p3anl2oil - - + Adds / updates: - - x - - □ #7asfbtqmoj (start of history) - -``` -So, there you have it. With squashing, you can control the granularity of your history. - -## Throwing out all history - -Another thing we can do is `squash` into an empty namespace. This effectively makes a copy of the namespace, but without any of its history: - -```ucm -.> squash alice nohistoryalice - - Here's what's changed in nohistoryalice after the merge: - - Added definitions: - - 1. bodaciousNumero : Nat - 2. productionReadyId : x -> x - 3. superRadNumber : Nat - 4. x : Nat - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -.> history nohistoryalice - - Note: The most recent namespace hash is immediately below this - message. - - ⊙ #fs1a0n3q3r - - + Adds / updates: - - bodaciousNumero productionReadyId superRadNumber x - - □ #7asfbtqmoj (start of history) - -``` -There's nothing really special here, `squash src dest` discards `src` history that comes after the LCA of `src` and `dest`, it's just that in the case of an empty namespace, that LCA is the beginning of time (the empty namespace), so all the history of `src` is discarded. - -## Caveats - -If you `squash mystuff trunk`, you're discarding any history of `mystuff` and just cons'ing onto the history of `trunk`. Thus, don't expect to be able to `merge trunk mystuff later and get great results. Squashing should only be used when you don't care about the history (and you know others haven't pulled and built on your line of history being discarded, so they don't care about the history either). diff --git a/unison-src/transcripts/suffixes.md b/unison-src/transcripts/suffixes.md deleted file mode 100644 index 359e40b7ed..0000000000 --- a/unison-src/transcripts/suffixes.md +++ /dev/null @@ -1,40 +0,0 @@ -# Suffix-based resolution of names - -```ucm:hide -.> builtins.merge -``` - -Any unique name suffix can be used to refer to a definition. For instance: - -```unison:hide --- No imports needed even though FQN is `builtin.{Int,Nat}` -foo.bar.a : Int -foo.bar.a = +99 - --- No imports needed even though FQN is `builtin.Optional.{None,Some}` -optional.isNone = cases - None -> true - Some _ -> false -``` - -This also affects commands like find. Notice lack of qualified names in output: - -```ucm -.> add -.> find take -``` - -The `view` and `display` commands also benefit from this: - -```ucm -.> view List.drop -.> display bar.a -``` - -In the signature, we don't see `base.Nat`, just `Nat`. The full declaration name is still shown for each search result though. - -Type-based search also benefits from this, we can just say `Nat` rather than `.base.Nat`: - -```ucm -.> find : Nat -> [a] -> [a] -``` diff --git a/unison-src/transcripts/suffixes.output.md b/unison-src/transcripts/suffixes.output.md deleted file mode 100644 index 5e815bdbbd..0000000000 --- a/unison-src/transcripts/suffixes.output.md +++ /dev/null @@ -1,57 +0,0 @@ -# Suffix-based resolution of names - -Any unique name suffix can be used to refer to a definition. For instance: - -```unison --- No imports needed even though FQN is `builtin.{Int,Nat}` -foo.bar.a : Int -foo.bar.a = +99 - --- No imports needed even though FQN is `builtin.Optional.{None,Some}` -optional.isNone = cases - None -> true - Some _ -> false -``` - -This also affects commands like find. Notice lack of qualified names in output: - -```ucm -.> add - - ⍟ I've added these definitions: - - foo.bar.a : Int - optional.isNone : Optional a -> Boolean - -.> find take - - 1. builtin.Bytes.take : Nat -> Bytes -> Bytes - 2. builtin.List.take : Nat -> [a] -> [a] - 3. builtin.Text.take : Nat -> Text -> Text - - -``` -The `view` and `display` commands also benefit from this: - -```ucm -.> view List.drop - - -- builtin.List.drop is built-in. - -.> display bar.a - - +99 - -``` -In the signature, we don't see `base.Nat`, just `Nat`. The full declaration name is still shown for each search result though. - -Type-based search also benefits from this, we can just say `Nat` rather than `.base.Nat`: - -```ucm -.> find : Nat -> [a] -> [a] - - 1. builtin.List.drop : Nat -> [a] -> [a] - 2. builtin.List.take : Nat -> [a] -> [a] - - -``` diff --git a/unison-src/transcripts/todo-bug-builtins.md b/unison-src/transcripts/todo-bug-builtins.md deleted file mode 100644 index c7d88fb784..0000000000 --- a/unison-src/transcripts/todo-bug-builtins.md +++ /dev/null @@ -1,27 +0,0 @@ -# The `todo` and `bug` builtin - -```ucm:hide -.> builtins.merge -``` - -`todo` and `bug` have type `a -> b`. They take a message or a value of type `a` and crash during runtime displaying `a` in ucm. -```unison:error -> todo "implement me later" -``` -```unison:error -> bug "there's a bug in my code" -``` - -## Todo -`todo` is useful if you want to come back to a piece of code later but you want your project to compile. -```unison -complicatedMathStuff x = todo "Come back and to something with x here" -``` - -## Bug -`bug` is used to indicate that a particular branch is not expected to execute. -```unison -test = match true with - true -> "Yay" - false -> bug "Wow, that's unexpected" -``` diff --git a/unison-src/transcripts/todo-bug-builtins.output.md b/unison-src/transcripts/todo-bug-builtins.output.md deleted file mode 100644 index 3a62517f8c..0000000000 --- a/unison-src/transcripts/todo-bug-builtins.output.md +++ /dev/null @@ -1,89 +0,0 @@ -# The `todo` and `bug` builtin - -`todo` and `bug` have type `a -> b`. They take a message or a value of type `a` and crash during runtime displaying `a` in ucm. -```unison -> todo "implement me later" -``` - -```ucm - - ✅ - - scratch.u changed. - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 💔💥 - - I've encountered a call to builtin.todo with the following - value: - - "implement me later" - - I'm sorry this message doesn't have more detail about the - location of the failure. My makers plan to fix this in a - future release. 😢 - -``` -```unison -> bug "there's a bug in my code" -``` - -```ucm - - ✅ - - scratch.u changed. - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 💔💥 - - I've encountered a call to builtin.bug with the following - value: - - "there's a bug in my code" - - I'm sorry this message doesn't have more detail about the - location of the failure. My makers plan to fix this in a - future release. 😢 - -``` -## Todo -`todo` is useful if you want to come back to a piece of code later but you want your project to compile. -```unison -complicatedMathStuff x = todo "Come back and to something with x here" -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - complicatedMathStuff : x -> 𝕣 - -``` -## Bug -`bug` is used to indicate that a particular branch is not expected to execute. -```unison -test = match true with - true -> "Yay" - false -> bug "Wow, that's unexpected" -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - test : Text - -``` diff --git a/unison-src/transcripts/transcript-parser-commands.md b/unison-src/transcripts/transcript-parser-commands.md deleted file mode 100644 index e39fd10885..0000000000 --- a/unison-src/transcripts/transcript-parser-commands.md +++ /dev/null @@ -1,41 +0,0 @@ -### Transcript parser operations - -```ucm:hide -.> builtins.merge -``` - -The transcript parser is meant to parse `ucm` and `unison` blocks. - -```unison -x = 1 -``` - -```ucm -.> add -``` - -```unison:hide:error:scratch.u -z -``` - -```ucm:error -.> delete foo -``` - -```ucm :error -.> delete lineToken.call -``` - -However handling of blocks of other languages should be supported. - -```python -some python code -``` - -```c_cpp -some C++ code -``` - -```c9search -some cloud9 code -``` diff --git a/unison-src/transcripts/transcript-parser-commands.output.md b/unison-src/transcripts/transcript-parser-commands.output.md deleted file mode 100644 index 1a1cdbc916..0000000000 --- a/unison-src/transcripts/transcript-parser-commands.output.md +++ /dev/null @@ -1,72 +0,0 @@ -### Transcript parser operations - -The transcript parser is meant to parse `ucm` and `unison` blocks. - -```unison -x = 1 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - x : Nat - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - x : Nat - -``` -```unison ---- -title: :scratch.u ---- -z - -``` - - -```ucm -.> delete foo - - ⚠️ - - I don't know about that name. - -``` -```ucm -.> delete lineToken.call - - ⚠️ - - I don't know about that name. - -``` -However handling of blocks of other languages should be supported. - -```python - -some python code - -``` - -```c_cpp - -some C++ code - -``` - -```c9search - -some cloud9 code - -``` - diff --git a/unison-src/transcripts/unitnamespace.md b/unison-src/transcripts/unitnamespace.md deleted file mode 100644 index 3fcee464f8..0000000000 --- a/unison-src/transcripts/unitnamespace.md +++ /dev/null @@ -1,9 +0,0 @@ -```unison -foo = "bar" -``` - -```ucm -.> cd () -.()> add -.> delete.namespace () -``` diff --git a/unison-src/transcripts/unitnamespace.output.md b/unison-src/transcripts/unitnamespace.output.md deleted file mode 100644 index 1e3a726877..0000000000 --- a/unison-src/transcripts/unitnamespace.output.md +++ /dev/null @@ -1,35 +0,0 @@ -```unison -foo = "bar" -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - foo : ##Text - -``` -```ucm -.> cd () - - ☝️ The namespace .() is empty. - -.()> add - - ⍟ I've added these definitions: - - foo : ##Text - -.> delete.namespace () - - Removed definitions: - - 1. foo : ##Text - - Tip: You can use `undo` or `reflog` to undo this change. - -``` diff --git a/yaks/easytest/LICENSE b/yaks/easytest/LICENSE deleted file mode 100644 index 5575aa473b..0000000000 --- a/yaks/easytest/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2013, Paul Chiusano - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/yaks/easytest/README.markdown b/yaks/easytest/README.markdown deleted file mode 100644 index 561cb8016e..0000000000 --- a/yaks/easytest/README.markdown +++ /dev/null @@ -1,264 +0,0 @@ -EasyTest is a simple testing toolkit, meant to replace most uses of QuickCheck, SmallCheck, HUnit, and frameworks like Tasty, etc. Here's an example usage: - -```Haskell -module Main where - -import EasyTest -import Control.Applicative -import Control.Monad - -suite :: Test () -suite = tests - [ scope "addition.ex1" $ expect (1 + 1 == 2) - , scope "addition.ex2" $ expect (2 + 3 == 5) - , scope "list.reversal" . fork $ do - -- generate lists from size 0 to 10, of Ints in (0,43) - -- shorthand: listsOf [0..10] (int' 0 43) - ns <- [0..10] `forM` \n -> replicateM n (int' 0 43) - ns `forM_` \ns -> expect (reverse (reverse ns) == ns) - -- equivalent to `scope "addition.ex3"` - , scope "addition" . scope "ex3" $ expect (3 + 3 == 6) - , scope "always passes" $ do - note "I'm running this test, even though it always passes!" - ok -- like `pure ()`, but records a success result - , scope "failing test" $ crash "oh noes!!" ] - --- NB: `run suite` would run all tests, but we only run --- tests whose scopes are prefixed by "addition" -main = runOnly "addition" suite -``` - -This generates the output: - -``` -Randomness seed for this run is 5104092164859451056 -Raw test output to follow ... ------------------------------------------------------------- -OK addition.ex1 -OK addition.ex2 -OK addition.ex3 ------------------------------------------------------------- -✅ 3 tests passed, no failures! 👍 🎉 -``` - -The idea here is to write tests with ordinary Haskell code, with control flow explicit and under programmer control. Tests are values of type `Test a`, and `Test` forms a monad with access to: - -* repeatable randomness (the `random` and `random'` functions for random and bounded random values, or handy specialized `int`, `int'`, `double`, `double'`, etc) -* I/O (via `liftIO` or `EasyTest.io`, which is an alias for `liftIO`) -* failure (via `crash`, which yields a stack trace, or `fail`, which does not) -* logging (via `note`, `noteScoped`, or `note'`) -* hierarchically-named subcomputations which can be switched on and off (in the above code, notice that only the tests scoped under `"addition"` are run, and we could do `run` instead of `runOnly` if we wanted to run the whole suite) -* parallelism (note the `fork` which runs that subtree of the test suite in a parallel thread). -* conjunction of tests via `MonadPlus` (the `<|>` operation runs both tests, even if the first test fails, and the `tests` function used above is just `msum`). - -Using any or all of these capabilities, you assemble `Test` values into a "test suite" (just another `Test` value) using ordinary Haskell code, not framework magic. Notice that to generate a list of random values, we just `replicateM` and `forM` as usual. If this gets tedious... we can factor this logic out into helper functions! For instance: - -```Haskell -listOf :: Int -> Test a -> Test [a] -listOf = replicateM - -listsOf :: [Int] -> Test a -> Test [[a]] -listsOf sizes gen = sizes `forM` \n -> listOf n gen - -ex :: Test () -ex = do - ns <- listsOf [0..100] int - ns `forM_` \ns -> expect (reverse (reverse ns) == ns) -``` - -This library is opinionated and might not be for everyone. If you're curious about any of the design decisions made, see [my rationale](#rationale) for writing it. - -### User guide - -The simplest tests are `ok`, `crash`, and `expect`: - -```Haskell --- Record a success -ok :: Test () - --- Record a failure -crash :: String -> Test a - --- Record a success if `True`, otherwise record a failure -expect :: Bool -> Test () -``` - -NB: `fail` is equivalent to `crash`, but doesn't provide a stack trace on failure. - -We can lift I/O into `Test` using `io` (or `liftIO`, but I always forget where to import that from): - -``` -io :: IO a -> Test a -``` - -`Test` is also a `Monad`. Note that `return` and `pure` do not record a result. Use `ok`, `expect`, or `crash` for that purpose. - -We often want to label tests so we can see when they succeed or fail. For that we use `scope`: - -``` --- | Label a test. Can be nested. A `'.'` is placed between nested --- scopes, so `scope "foo" . scope "bar"` is equivalent to `scope "foo.bar"` -scope :: String -> Test a -> Test a -``` - -Here's an example usage, putting all these primitives together: - -```Haskell -module Main where - -import EasyTest (ok, scope, crash, expect, run) - -suite :: Test () -suite = do - ok - scope "test-crash" $ crash "oh noes!" - expect (1 + 1 == 2) - -main = run suite -``` - -This example is _sequencing_ the `ok`, `crash`, and `expect` monadically, so the test halts at the first failure. The output is: - -``` -Randomness seed for this run is 1830293182471192517 -Raw test output to follow ... ------------------------------------------------------------- -test-crash FAILURE oh noes! CallStack (from HasCallStack): - crash, called at /Users/pchiusano/code/easytest/tests/Suite.hs:10:24 in main:Main -OK -FAILED test-crash ------------------------------------------------------------- - - - 1 passed - 1 FAILED (failed scopes below) - "test-crash" - - To rerun with same random seed: - - EasyTest.rerun 1830293182471192517 - EasyTest.rerunOnly 1830293182471192517 "test-crash" - - ------------------------------------------------------------- -❌ -``` - -In the output (which is streamed to the console), we get a stack trace pointing to the line where `crash` was called (`..tests/Suite.hs:10:24`), information about failing tests, and instructions for rerunning the tests with an identical random seed (in this case, there's no randomness, so `rerun` would work fine, but if our test generated random data, we might want to rerun with the exact same random numbers). - -The last line of the output always indicates success or failure of the overall suite... and information about any failing tests is _immediately_ above that. You should NEVER have to scroll through a bunch of test output just to find out which tests actually failed! Also, the streaming output always has `OK` or `FAILED` as the _leftmost_ text for ease of scanning. - -If you try running a test suite that has no results recorded (like if you have a typo in a call to `runOnly`, or you forget to use `ok` or `expect` to record a test result), you'll see a warning like this: - -``` -😶 hmm ... no test results recorded -Tip: use `ok`, `expect`, or `crash` to record results -Tip: if running via `runOnly` or `rerunOnly`, check for typos -``` - -The various `run` functions (`run`, `runOnly`, `rerun`, and `rerunOnly`) all exit the process with a nonzero status in the event of a failure, so they can be used for continuous integration or test running tools that key off the process exit code to determine whether the suite succeeded or failed. For instance, here's the relevant portion of a typical cabal file: - -``` --- Preferred way to run EasyTest-based test suite -executable runtests - main-is: NameOfYourTestSuite.hs - ghc-options: -w -threaded -rtsopts -with-rtsopts=-N -v0 - hs-source-dirs: tests - other-modules: - build-depends: - base, - easytest - --- I really have no idea why you'd ever use this, unless you --- really feel the need to run your tests via cabal's "test runner" --- which "conveniently" hides all output unless you pass it some --- random flag I never remember -test-suite tests - type: exitcode-stdio-1.0 - main-is: NameOfYourTestSuite.hs - ghc-options: -w -threaded -rtsopts -with-rtsopts=-N -v0 - hs-source-dirs: tests - other-modules: - build-depends: - base, - easytest -``` - -For tests that are logically separate, we usually combine them into a suite using `tests` (which is just `msum`), as in: - -```Haskell -suite = tests - [ scope "ex1" $ expect (1 + 1 == 2) - , scope "ex2" $ expect (2 + 2 == 4) ] - --- equivalently -suite = - (scope "ex1" $ expect (1 + 1 == 2)) <|> - (scope "ex2" $ expect (2 + 2 == 4)) -``` - -Importantly, each branch of a `<|>` or `tests` gets its own copy of the randomness source, so even when branches of the test suite are switched on or off, the randomness received by a branch is the same. This is important for being able to quickly iterate on a test failure! - -Sometimes, tests take a while to run and we want to make use of parallelism. For that, use `EasyTest.fork` or `fork'`: - -```Haskell --- | Run a test in a separate thread, not blocking for its result. -fork :: Test a -> Test () - --- | Run a test in a separate thread, not blocking for its result, but --- return a future which can be used to block on the result. -fork' :: Test a -> Test (Test a) -``` - -Note: There's no "framework global" parallelism configuration setting. - -We often want to generate random data for testing purposes: - -```Haskell -reverseTest :: Test () -reverseTest = scope "list reversal" $ do - nums <- listsOf [0..100] (int' 0 99) - nums `forM_` \nums -> expect (reverse (reverse nums) == nums) -``` - -Tip: generate your test cases in order of increasing size. If you get a failure, your test case is closer to "minimal". - -The above code generates lists of sizes `0` through `100`, consisting of `Int` values in the range `0` through `99`. `int' :: Int -> Int -> Test Int`, and there are analogous functions for `Double`, `Word`, etc. The most general functions are: - -```Haskell -random :: Random a => Test a -random' :: Random a => a -> a -> Test a -``` - -The functions `int`, `char`, `bool`, `double`, etc are just specialized aliases for `random`, and `int'`, `char'`, etc are just aliases for `random'`. The aliases are sometimes useful in situations where use of the generic `random` or `random'` would require type annotations. - -If our list reversal test failed, we might use `runOnly "list reversal"` or `rerunOnly "list reversal"` to rerun just that subtree of the test suite, and we might add some additional diagnostics to see what was going on: - -```Haskell -reverseTest :: Test () -reverseTest = scope "list reversal" $ do - nums <- listsOf [0..100] (int' 0 99) - nums `forM_` \nums -> do - note $ "nums: " ++ show nums - let r = reverse (reverse nums) - note $ "reverse (reverse nums): " ++ show r - expect (r == nums) -``` - -The idea is that these sorts of detailed diagnostics are added lazily (and temporarily) to find and fix failing tests. You can also add diagnostics via `io (putStrLn "blah")`, but if you have tests running in parallel this can sometimes get confusing. - -That's it! Just use ordinary monadic code to generate any testing data and to run your tests. - -### Why? - -Here's some of my thinking in the design of this library: - -* Testing should uncomplicated, minimal friction, and ideally: FUN. If I have to think too much or remember arbitrary framework magic, I get irritated. -* A lot of testing frameworks are weirdly optimized for adding lots of diagnostic information up front, as if whatever diagnostic information you happen to think to capture will be exactly what is needed to fix whatever bugs your tests reveal. In my experience this is almost never the case, so EasyTest takes the opposite approach: be EXTREMELY LAZY about adding diagnostics and labeling subexpressions, but make it trivial to reproduce failing tests without running your entire suite. If a test fails, you can easily rerun just that test, with the exact same random seed, and add whatever diagnostics or print statements you need to track down what's wrong. And EasyTest helpfully tells you how to do this rerunning whenever your tests fail, because otherwise I'd never remember. (Again: keep the friction LOW!) -* Another reason not to add diagnostics up front: you avoid needing to remember two different versions of every function or operator (the one you use in your regular code, and the one you use with your testing "framework" to supply diagnostics). HUnit has operators named `(@=?)`, `(~?=)`, and a bunch of others for asserting equality with diagnostics on failure. QuickCheck has `(.&&.)` and `(.||.)`. Just... no. -* HUnit, QuickCheck, SmallCheck, Tasty, and whatever else are frameworks that hide control flow from the programmer and make some forms of control flow difficult or impossible to specify (for instance, you can't do I/O in your regular QuickCheck tests... unless you use `Test.QuickCheck.Monadic`, which has yet another API you have to learn!). In contrast, EasyTest is just a single data type with a monadic API and a few helper functions. You assemble your tests using ordinary monadic code, and there is never any magic. Want to abstract over something? _Write a regular function._ Need to generate some testing data? Write regular functions. -* "How do I modify the number of generated test cases for QuickCheck for just one of my properties?" Or control the maximum size for these `Gen` and `Arbitrary` types? Some arbitrary "configuration setting" that you have to look up every time. No thanks! -* Seriously, global configuration settings are evil! I want fine-grained control over the amount of parallelism, test case sizes, and so on. And if I find I'm repeating myself a lot... I'll _introduce a regular Haskell variable or function!_. DOWN WITH FRAMEWORKS AND THEIR DAMN CONFIGURATION SETTINGS!! -* Most of the functionality of QuickCheck is overkill anyway! There's no need for `Arbitrary` instances (explicit generation is totally fine, and even preferred in most cases), `Coarbitrary` (cute, but not useful when the HOF you are testing is parametric), or shrinking (just generate your test cases in increasing sizes, and your first failure will be the smallest!). - -I hope that you enjoy writing your tests with this library! diff --git a/yaks/easytest/easytest.cabal b/yaks/easytest/easytest.cabal deleted file mode 100644 index a20f20e5f4..0000000000 --- a/yaks/easytest/easytest.cabal +++ /dev/null @@ -1,95 +0,0 @@ -cabal-version: 2.2 -name: easytest -category: Compiler -version: 0.1 -license: MIT -license-file: LICENSE -author: Paul Chiusano -maintainer: Paul Chiusano -stability: provisional -homepage: http://unisonweb.org -bug-reports: https://github.com/unisonweb/unison/issues -copyright: Copyright (C) 2016 Paul Chiusano and contributors -synopsis: Simple, expressive testing library - -build-type: Simple -extra-source-files: -data-files: - -source-repository head - type: git - location: git://github.com/unisonweb/unison.git - --- `cabal install -foptimized` enables optimizations -flag optimized - manual: True - default: False - -flag quiet - manual: True - default: False - --- NOTE: Keep in sync throughout repo. -common unison-common - default-language: Haskell2010 - default-extensions: - ApplicativeDo, - BlockArguments, - DeriveFunctor, - DerivingStrategies, - DoAndIfThenElse, - FlexibleContexts, - FlexibleInstances, - LambdaCase, - MultiParamTypeClasses, - ScopedTypeVariables, - TupleSections, - TypeApplications - -library - import: unison-common - - hs-source-dirs: src - - exposed-modules: - EasyTest - - -- these bounds could probably be made looser - build-depends: - async >= 2.1.1, - base >= 4.3, - mtl >= 2.0.1, - containers >= 0.4.0, - stm >= 2.4, - random >= 1.1 - - ghc-options: -Wall -fno-warn-name-shadowing - - if flag(optimized) - ghc-options: -funbox-strict-fields -O2 - - if flag(quiet) - ghc-options: -v0 - --- Preferred way to run EasyTest-based test suite -executable runtests - import: unison-common - main-is: Suite.hs - ghc-options: -w -threaded -rtsopts -with-rtsopts=-N -v0 - hs-source-dirs: tests - other-modules: - build-depends: - base, - easytest - --- I really have no idea why you'd ever use this, just use an executable as above -test-suite tests - import: unison-common - type: exitcode-stdio-1.0 - main-is: Suite.hs - ghc-options: -w -threaded -rtsopts -with-rtsopts=-N -v0 - hs-source-dirs: tests - other-modules: - build-depends: - base, - easytest diff --git a/yaks/easytest/src/EasyTest.hs b/yaks/easytest/src/EasyTest.hs deleted file mode 100644 index 3ec96f125b..0000000000 --- a/yaks/easytest/src/EasyTest.hs +++ /dev/null @@ -1,458 +0,0 @@ -{-# Language BangPatterns #-} -{-# Language FunctionalDependencies #-} -{-# Language GeneralizedNewtypeDeriving #-} - -module EasyTest where - -import Control.Applicative -import Control.Concurrent -import Control.Concurrent.STM -import Control.Exception -import Control.Monad -import Control.Monad.IO.Class -import Control.Monad.Reader -import Data.List -import Data.Map (Map) -import Data.Word -import GHC.Stack -import System.Random (Random) -import qualified Control.Concurrent.Async as A -import qualified Data.Map as Map -import qualified System.Random as Random - -data Status = Failed | Passed !Int | Skipped | Pending - -combineStatus :: Status -> Status -> Status -combineStatus Skipped s = s -combineStatus s Skipped = s -combineStatus _ Pending = Pending -combineStatus Pending _ = Pending -combineStatus Failed _ = Failed -combineStatus _ Failed = Failed -combineStatus (Passed n) (Passed m) = Passed (n + m) - -data Env = - Env { rng :: TVar Random.StdGen - , messages :: String - , results :: TBQueue (Maybe (TMVar (String, Status))) - , note_ :: String -> IO () - , allow :: String } - -newtype Test a = Test (ReaderT Env IO (Maybe a)) - -io :: IO a -> Test a -io = liftIO - -atomicLogger :: IO (String -> IO ()) -atomicLogger = do - lock <- newMVar () - pure $ \msg -> - -- force msg before acquiring lock - let dummy = foldl' (\_ ch -> ch == 'a') True msg - in dummy `seq` bracket (takeMVar lock) (\_ -> putMVar lock ()) (\_ -> putStrLn msg) - -expect' :: HasCallStack => Bool -> Test () -expect' False = crash "unexpected" -expect' True = pure () - -expect :: HasCallStack => Bool -> Test () -expect False = crash "unexpected" -expect True = ok - -expectEqual :: (Eq a, Show a) => a -> a -> Test () -expectEqual expected actual = if expected == actual then ok - else crash $ unlines ["", show actual, "** did not equal expected value **", show expected] - -expectNotEqual :: (Eq a, Show a) => a -> a -> Test () -expectNotEqual forbidden actual = - if forbidden /= actual then ok - else crash $ unlines ["", show actual, "** did equal the forbidden value **", show forbidden] - -expectJust :: HasCallStack => Maybe a -> Test a -expectJust Nothing = crash "expected Just, got Nothing" -expectJust (Just a) = ok >> pure a - -expectRight :: HasCallStack => Either e a -> Test a -expectRight (Left _) = crash "expected Right, got Left" -expectRight (Right a) = ok >> pure a - -expectLeft :: HasCallStack => Either e a -> Test e -expectLeft (Left e) = ok >> pure e -expectLeft (Right _) = crash "expected Left, got Right" - -tests :: [Test ()] -> Test () -tests = msum - --- | Run all tests whose scope starts with the given prefix -runOnly :: String -> Test a -> IO () -runOnly prefix t = do - logger <- atomicLogger - seed <- abs <$> Random.randomIO :: IO Int - run' seed logger prefix t - --- | Run all tests with the given seed and whose scope starts with the given prefix -rerunOnly :: Int -> String -> Test a -> IO () -rerunOnly seed prefix t = do - logger <- atomicLogger - run' seed logger prefix t - -run :: Test a -> IO () -run = runOnly "" - -rerun :: Int -> Test a -> IO () -rerun seed = rerunOnly seed [] - -run' :: Int -> (String -> IO ()) -> String -> Test a -> IO () -run' seed note allow (Test t) = do - let !rng = Random.mkStdGen seed - resultsQ <- atomically (newTBQueue 50) - rngVar <- newTVarIO rng - note $ "Randomness seed for this run is " ++ show seed ++ "" - results <- atomically $ newTVar Map.empty - rs <- A.async . forever $ do - -- note, totally fine if this bombs once queue is empty - Just result <- atomically $ readTBQueue resultsQ - (msgs, passed) <- atomically $ takeTMVar result - atomically $ modifyTVar results (Map.insertWith combineStatus msgs passed) - resultsMap <- readTVarIO results - case Map.findWithDefault Skipped msgs resultsMap of - Skipped -> pure () - Pending -> note $ "🚧 " ++ msgs - Passed n -> note $ "\129412 " ++ (if n <= 1 then msgs else "(" ++ show n ++ ") " ++ msgs) - Failed -> note $ "💥 " ++ msgs - let line = "------------------------------------------------------------" - note "Raw test output to follow ... " - note line - e <- try (runReaderT (void t) (Env rngVar [] resultsQ note allow)) :: IO (Either SomeException ()) - case e of - Left e -> note $ "Exception while running tests: " ++ show e - Right () -> pure () - atomically $ writeTBQueue resultsQ Nothing - _ <- A.waitCatch rs - resultsMap <- readTVarIO results - let - resultsList = Map.toList resultsMap - succeededList = [ n | (_, Passed n) <- resultsList ] - succeeded = length succeededList - -- totalTestCases = foldl' (+) 0 succeededList - failures = [ a | (a, Failed) <- resultsList ] - failed = length failures - pendings = [ a | (a, Pending) <- resultsList ] - pending = length pendings - pendingSuffix = if pending == 0 then "👍 🎉" else "" - testsPlural n = show n ++ " " ++ if n == 1 then "test" else "tests" - note line - note "\n" - when (pending > 0) $ do - note $ "🚧 " ++ testsPlural pending ++ " still pending (pending scopes below):" - note $ " " ++ intercalate "\n " (map (show . takeWhile (/= '\n')) pendings) - case failures of - [] -> - case succeeded of - 0 -> do - note "😶 hmm ... no test results recorded" - note "Tip: use `ok`, `expect`, or `crash` to record results" - note "Tip: if running via `runOnly` or `rerunOnly`, check for typos" - n -> note $ "✅ " ++ testsPlural n ++ " passed, no failures! " ++ pendingSuffix - (hd:_) -> do - note $ " " ++ show succeeded ++ (if failed == 0 then " PASSED" else " passed") - note $ " " ++ show (length failures) ++ (if failed == 0 then " failed" else " FAILED (failed scopes below)") - note $ " " ++ intercalate "\n " (map (show . takeWhile (/= '\n')) failures) - note "" - note " To rerun with same random seed:\n" - note $ " EasyTest.rerun " ++ show seed - note $ " EasyTest.rerunOnly " ++ show seed ++ " " ++ "\"" ++ hd ++ "\"" - note "\n" - note line - note "❌" - fail "test failures" - --- | Label a test. Can be nested. A `'.'` is placed between nested --- scopes, so `scope "foo" . scope "bar"` is equivalent to `scope "foo.bar"` -scope :: String -> Test a -> Test a -scope msg (Test t) = wrap . Test $ do - env <- ask - let messages' = case messages env of [] -> msg; ms -> ms ++ ('.':msg) - if null (allow env) || take (length (allow env)) msg `isPrefixOf` allow env - then liftIO $ - runReaderT t (env {messages = messages', allow = drop (length msg + 1) (allow env)}) - else putResult Skipped >> pure Nothing - --- | Log a message -note :: String -> Test () -note msg = do - note_ <- asks note_ - liftIO $ note_ msg - pure () - --- | Log a showable value -note' :: Show s => s -> Test () -note' = note . show - --- | Generate a random value -random :: Random a => Test a -random = do - rng <- asks rng - liftIO . atomically $ do - rng0 <- readTVar rng - let (a, rng1) = Random.random rng0 - writeTVar rng rng1 - pure a - --- | Generate a bounded random value. Inclusive on both sides. -random' :: Random a => a -> a -> Test a -random' lower upper = do - rng <- asks rng - liftIO . atomically $ do - rng0 <- readTVar rng - let (a, rng1) = Random.randomR (lower,upper) rng0 - writeTVar rng rng1 - pure a - -bool :: Test Bool -bool = random - -word8 :: Test Word8 -word8 = random - --- | Generate a random `Char` -char :: Test Char -char = random - --- | Generate a random `Int` -int :: Test Int -int = random - --- | Generate a random `Double` -double :: Test Double -double = random - --- | Generate a random `Word` -word :: Test Word -word = random - --- | Generate a random `Int` in the given range --- Note: `int' 0 5` includes both `0` and `5` -int' :: Int -> Int -> Test Int -int' = random' - --- | Generate a random `Char` in the given range --- Note: `char' 'a' 'z'` includes both `'a'` and `'z'`. -char' :: Char -> Char -> Test Char -char' = random' - --- | Generate a random `Double` in the given range --- Note: `double' 0 1` includes both `0` and `1`. -double' :: Double -> Double -> Test Double -double' = random' - --- | Generate a random `Double` in the given range --- Note: `word' 0 10` includes both `0` and `10`. -word' :: Word -> Word -> Test Word -word' = random' - --- | Generate a random `Double` in the given range --- Note: `word8' 0 10` includes both `0` and `10`. -word8' :: Word8 -> Word8 -> Test Word8 -word8' = random' - --- | Sample uniformly from the given list of possibilities -pick :: [a] -> Test a -pick as = let n = length as; ind = picker n as in do - i <- int' 0 (n - 1) - Just a <- pure (ind i) - pure a - -picker :: Int -> [a] -> (Int -> Maybe a) -picker _ [] = const Nothing -picker _ [a] = \i -> if i == 0 then Just a else Nothing -picker size as = go where - lsize = size `div` 2 - rsize = size - lsize - (l,r) = splitAt lsize as - lpicker = picker lsize l - rpicker = picker rsize r - go i = if i < lsize then lpicker i else rpicker (i - lsize) - --- | Alias for `replicateM` -listOf :: Int -> Test a -> Test [a] -listOf = replicateM - --- | Generate a list of lists of the given sizes, --- an alias for `sizes `forM` \n -> listOf n gen` -listsOf :: [Int] -> Test a -> Test [[a]] -listsOf sizes gen = sizes `forM` \n -> listOf n gen - --- | Alias for `liftA2 (,)`. -pair :: Test a -> Test b -> Test (a,b) -pair = liftA2 (,) - --- | Generate a `Data.Map k v` of the given size. -mapOf :: Ord k => Int -> Test k -> Test v -> Test (Map k v) -mapOf n k v = Map.fromList <$> listOf n (pair k v) - --- | Generate a `[Data.Map k v]` of the given sizes. -mapsOf :: Ord k => [Int] -> Test k -> Test v -> Test [Map k v] -mapsOf sizes k v = sizes `forM` \n -> mapOf n k v - --- | Catch all exceptions that could occur in the given `Test` -wrap :: Test a -> Test a -wrap (Test t) = Test $ do - env <- ask - lift $ runWrap env t - -runWrap :: Env -> ReaderT Env IO (Maybe a) -> IO (Maybe a) -runWrap env t = do - e <- try $ runReaderT t env - case e of - Left e -> do - note_ env (messages env ++ " EXCEPTION!!!: " ++ show (e :: SomeException)) - runReaderT (putResult Failed) env - pure Nothing - Right a -> pure a - --- | A test with a setup and teardown -using :: IO r -> (r -> IO ()) -> (r -> Test a) -> Test a -using r cleanup use = Test $ do - r <- liftIO r - env <- ask - let Test t = use r - a <- liftIO (runWrap env t) - liftIO (cleanup r) - pure a - --- | The current scope -currentScope :: Test String -currentScope = asks messages - --- | Prepend the current scope to a logging message -noteScoped :: String -> Test () -noteScoped msg = do - s <- currentScope - note (s ++ (if null s then "" else " ") ++ msg) - --- | Record a successful test at the current scope -ok :: Test () -ok = Test (Just <$> putResult (Passed 1)) - --- | Skip any tests depending on the return value. -done :: Test a -done = Test (pure Nothing) - --- | Explicitly skip this test -skip :: Test () -skip = Test (Nothing <$ putResult Skipped) - --- | Record a failure at the current scope -crash :: HasCallStack => String -> Test a -crash msg = do - let trace = callStack - msg' = msg ++ " " ++ prettyCallStack trace - Test (Just <$> putResult Failed) >> noteScoped ("FAILURE " ++ msg') >> Test (pure Nothing) - --- | Overwrites the env so that note_ (the logger) is a no op -nologging :: HasCallStack => Test a -> Test a -nologging (Test t) = Test $ do - env <- ask - liftIO $ runReaderT t (env {note_ = \_ -> pure ()}) - --- | Run a test under a new scope, without logs and suppressing all output -attempt :: Test a -> Test (Maybe a) -attempt (Test t) = nologging $ do - env <- ask - let msg = "internal attempt" - let messages' = case messages env of [] -> msg; ms -> ms ++ ('.':msg) - liftIO $ runWrap env { messages = messages', allow = "not visible" } t - --- | Placeholder wrapper for a failing test. The test being wrapped is expected/known to fail. --- Will produce a failure if the test being wrapped suddenly becomes a success. -pending :: HasCallStack => Test a -> Test a -pending test = do - m <- attempt test - case m of - Just _ -> - crash "This pending test should not pass!" - Nothing -> - ok >> Test (pure Nothing) - -putResult :: Status -> ReaderT Env IO () -putResult passed = do - msgs <- asks messages - allow <- asks (null . allow) - r <- liftIO . atomically $ newTMVar (msgs, if allow then passed else Skipped) - q <- asks results - lift . atomically $ writeTBQueue q (Just r) - -instance MonadReader Env Test where - ask = Test $ do - allow <- asks (null . allow) - if allow then Just <$> ask else pure Nothing - local f (Test t) = Test (local f t) - reader f = Test (Just <$> reader f) - -instance Monad Test where - return a = Test $ do - allow <- asks (null . allow) - pure $ if allow then Just a else Nothing - Test a >>= f = Test $ do - a <- a - case a of - Nothing -> pure Nothing - Just a -> let Test t = f a in t - -instance MonadFail Test where - fail = crash - -instance Functor Test where - fmap = liftM - -instance Applicative Test where - pure = return - (<*>) = ap - -instance MonadIO Test where - liftIO io = do - s <- asks (null . allow) - if s then - wrap $ Test (Just <$> liftIO io) - else - Test (pure Nothing) - -instance Alternative Test where - empty = Test (pure Nothing) - Test t1 <|> Test t2 = Test $ do - env <- ask - (rng1, rng2) <- liftIO . atomically $ do - currentRng <- readTVar (rng env) - let (rng1, rng2) = Random.split currentRng - (,) <$> newTVar rng1 <*> newTVar rng2 - lift $ do - r1 <- runWrap (env { rng = rng1 }) t1 - (<|> r1) <$> runWrap (env { rng = rng2 }) t2 - -instance MonadPlus Test where - mzero = empty - mplus = (<|>) - --- | Run a test in a separate thread, not blocking for its result. -fork :: Test a -> Test () -fork t = void (fork' t) - --- | Run a test in a separate thread, return a future which can be used --- to block on its result. -fork' :: Test a -> Test (Test a) -fork' (Test t) = do - env <- ask - tmvar <- liftIO newEmptyTMVarIO - liftIO . atomically $ writeTBQueue (results env) (Just tmvar) - r <- liftIO . A.async $ runWrap env t - waiter <- liftIO . A.async $ do - e <- A.waitCatch r - _ <- atomically $ tryPutTMVar tmvar (messages env, Skipped) - case e of - Left _ -> pure Nothing - Right a -> pure a - pure $ do - a <- liftIO (A.wait waiter) - case a of Nothing -> empty - Just a -> pure a diff --git a/yaks/easytest/tests/Suite.hs b/yaks/easytest/tests/Suite.hs deleted file mode 100644 index 77aad62ae3..0000000000 --- a/yaks/easytest/tests/Suite.hs +++ /dev/null @@ -1,34 +0,0 @@ -module Main where - -import EasyTest -import Control.Applicative -import Control.Monad - -suite1 :: Test () -suite1 = tests - [ scope "a" ok - , scope "b.c" ok - , scope "b" ok - , scope "b" . scope "c" . scope "d" $ ok - , scope "c" ok ] - -suite2 :: Test () -suite2 = tests - [ scope "pending.failure" (pending (expectEqual True False)) - --, scope "pending.success" (pending ok) - ] - -reverseTest :: Test () -reverseTest = scope "list reversal" $ do - nums <- listsOf [0..100] (int' 0 99) - nums `forM_` \nums -> expect (reverse (reverse nums) == nums) - -main :: IO () -main = do - run suite1 - runOnly "a" suite1 - runOnly "b" suite1 - runOnly "b" $ tests [suite1, scope "xyz" (crash "never run")] - runOnly "b.c" $ tests [suite1, scope "b" (crash "never run")] - run reverseTest - run suite2 From b4e5a8cad518420723a207801b37a8425fbe214e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 16 Sep 2020 11:51:18 -0400 Subject: [PATCH 002/225] write term component --- .../U/Codebase/Sqlite/Serialization.hs | 181 ++++++++++++++++-- .../U/Codebase/Sqlite/Term/Format.hs | 13 +- codebase2/codebase/U/Codebase/Term.hs | 22 +-- .../U/Util/Serialization.hs | 39 ++-- 4 files changed, 203 insertions(+), 52 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index babaa6ba67..c473576ae1 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -7,7 +7,7 @@ module U.Codebase.Sqlite.Serialization where import Data.Bits (Bits) import Data.Bytes.Get (MonadGet, getWord8) import Data.Bytes.Put (MonadPut, putWord8) -import Data.Bytes.Serial (SerialEndian (serializeBE), deserialize, serialize) +import Data.Bytes.Serial (SerialEndian (serializeBE), deserialize, deserializeBE, serialize) import Data.Bytes.VarInt (VarInt (VarInt), unVarInt) import Data.Int (Int64) import Data.List (elemIndex) @@ -86,15 +86,15 @@ getABT getVar getA getF = getList getVar >>= go [] _ -> unknownTag "getABT" tag {- -Write -- [ ] term component -- [ ] types of terms -- [ ] decl component -- [ ] causal -- [ ] full branch -- [ ] diff branch -- [ ] full patch -- [ ] diff patch +Write/Read +- [x][x] term component +- [x][ ] types of terms +- [ ][ ] decl component +- [ ][ ] causal +- [ ][ ] full branch +- [ ][ ] diff branch +- [ ][ ] full patch +- [ ][ ] diff patch - [ ] add to dependents index - [ ] add to type index @@ -106,23 +106,37 @@ putLocalIds LocalIds {..} = do putFoldable putVarInt textLookup putFoldable putVarInt objectLookup +getLocalIds :: MonadGet m => m LocalIds +getLocalIds = + LocalIds + <$> getVector getVarInt + <*> getVector getVarInt + putUnit :: Applicative m => () -> m () putUnit _ = pure () getUnit :: Applicative m => m () getUnit = pure () +putTermFormat :: MonadPut m => TermFormat.TermFormat -> m () +putTermFormat = \case + TermFormat.Term c -> putWord8 0 *> putTermComponent c + +getTermFormat :: MonadGet m => m TermFormat.TermFormat +getTermFormat = getWord8 >>= \case + 0 -> TermFormat.Term <$> getTermComponent + other -> unknownTag "getTermFormat" other + putTermComponent :: MonadPut m => TermFormat.LocallyIndexedComponent -> m () putTermComponent TermFormat.LocallyIndexedComponent {..} = do - putWord8 0 -- this format putLocalIds lookup putFramedArray putTermElement component where - go :: MonadPut m => (a -> m ()) -> TermFormat.F a -> m () - go putChild = \case + putF :: MonadPut m => (a -> m ()) -> TermFormat.F a -> m () + putF putChild = \case Term.Int n -> putWord8 0 *> putInt n Term.Nat n -> @@ -168,7 +182,7 @@ putTermComponent TermFormat.LocallyIndexedComponent {..} = do Term.TypeLink r -> putWord8 21 *> putReference r putTermElement :: MonadPut m => TermFormat.Term -> m () - putTermElement = putABT putSymbol putUnit go + putTermElement = putABT putSymbol putUnit putF putSymbol :: MonadPut m => TermFormat.Symbol -> m () putSymbol (TermFormat.Symbol n t) = putVarInt n >> putText t putReferent :: MonadPut m => Referent' TermFormat.TermRef TermFormat.TypeRef -> m () @@ -180,11 +194,11 @@ putTermComponent TermFormat.LocallyIndexedComponent {..} = do putWord8 1 putReference r putVarInt i - putMatchCase :: MonadPut m => (a -> m ()) -> Term.MatchCase TermFormat.TypeRef a -> m () + putMatchCase :: MonadPut m => (a -> m ()) -> Term.MatchCase TermFormat.LocalTextId TermFormat.TypeRef a -> m () putMatchCase putChild (Term.MatchCase pat guard body) = putPattern pat *> putMaybe putChild guard *> putChild body where - putPattern :: MonadPut m => Term.Pattern TermFormat.TypeRef -> m () + putPattern :: MonadPut m => Term.Pattern TermFormat.LocalTextId TermFormat.TypeRef -> m () putPattern p = case p of Term.PUnbound -> putWord8 0 Term.PVar -> putWord8 1 @@ -212,7 +226,7 @@ putTermComponent TermFormat.LocallyIndexedComponent {..} = do *> putPattern l *> putSeqOp op *> putPattern r - Term.PText t -> putWord8 12 *> putText t + Term.PText t -> putWord8 12 *> putVarInt t Term.PChar c -> putWord8 13 *> putChar c where putSeqOp :: MonadPut m => Term.SeqOp -> m () @@ -220,6 +234,103 @@ putTermComponent TermFormat.LocallyIndexedComponent {..} = do putSeqOp Term.PSnoc = putWord8 1 putSeqOp Term.PConcat = putWord8 2 +getTermComponent :: MonadGet m => m TermFormat.LocallyIndexedComponent +getTermComponent = + TermFormat.LocallyIndexedComponent + <$> getLocalIds <*> getFramedArray getTermElement + where + getF :: MonadGet m => m a -> m (TermFormat.F a) + getF getChild = getWord8 >>= \case + 0 -> Term.Int <$> getInt + 1 -> Term.Nat <$> getNat + 2 -> Term.Float <$> getFloat + 3 -> Term.Boolean <$> getBoolean + 4 -> Term.Text <$> getVarInt + 5 -> Term.Ref <$> getRecursiveReference + 6 -> Term.Constructor <$> getReference <*> getVarInt + 7 -> Term.Request <$> getReference <*> getVarInt + 8 -> Term.Handle <$> getChild <*> getChild + 9 -> Term.App <$> getChild <*> getChild + 10 -> Term.Ann <$> getChild <*> getType getReference + 11 -> Term.Sequence <$> getSequence getChild + 12 -> Term.If <$> getChild <*> getChild <*> getChild + 13 -> Term.And <$> getChild <*> getChild + 14 -> Term.Or <$> getChild <*> getChild + 15 -> Term.Lam <$> getChild + 16 -> Term.LetRec <$> getList getChild <*> getChild + 17 -> Term.Let <$> getChild <*> getChild + 18 -> + Term.Match + <$> getChild + <*> getList + (Term.MatchCase <$> getPattern <*> getMaybe getChild <*> getChild) + 19 -> Term.Char <$> getChar + 20 -> Term.TermLink <$> getReferent + 21 -> Term.TypeLink <$> getReference + tag -> unknownTag "getTerm" tag + getTermElement :: MonadGet m => m TermFormat.Term + getTermElement = getABT getSymbol getUnit getF + getReferent :: MonadGet m => m (Referent' TermFormat.TermRef TermFormat.TypeRef) + getReferent = getWord8 >>= \case + 0 -> Referent.Ref <$> getRecursiveReference + 1 -> Referent.Con <$> getReference <*> getVarInt + x -> unknownTag "getTermComponent" x + getPattern :: MonadGet m => m (Term.Pattern TermFormat.LocalTextId TermFormat.TypeRef) + getPattern = getWord8 >>= \case + 0 -> pure Term.PUnbound + 1 -> pure Term.PVar + 2 -> Term.PBoolean <$> getBoolean + 3 -> Term.PInt <$> getInt + 4 -> Term.PNat <$> getNat + 5 -> Term.PFloat <$> getFloat + 6 -> Term.PConstructor <$> getReference <*> getVarInt <*> getList getPattern + 7 -> Term.PAs <$> getPattern + 8 -> Term.PEffectPure <$> getPattern + 9 -> + Term.PEffectBind + <$> getReference + <*> getVarInt + <*> getList getPattern + <*> getPattern + 10 -> Term.PSequenceLiteral <$> getList getPattern + 11 -> + Term.PSequenceOp + <$> getPattern + <*> getSeqOp + <*> getPattern + 12 -> Term.PText <$> getVarInt + 13 -> Term.PChar <$> getChar + x -> unknownTag "Pattern" x + where + getSeqOp :: MonadGet m => m Term.SeqOp + getSeqOp = getWord8 >>= \case + 0 -> pure Term.PCons + 1 -> pure Term.PSnoc + 2 -> pure Term.PConcat + tag -> unknownTag "SeqOp" tag + +getType :: MonadGet m => m r -> m (Type.TypeR r TermFormat.Symbol) +getType getReference = getABT getSymbol getUnit go + where + go getChild = getWord8 >>= \case + 0 -> Type.Ref <$> getReference + 1 -> Type.Arrow <$> getChild <*> getChild + 2 -> Type.Ann <$> getChild <*> getKind + 3 -> Type.App <$> getChild <*> getChild + 4 -> Type.Effect <$> getChild <*> getChild + 5 -> Type.Effects <$> getList getChild + 6 -> Type.Forall <$> getChild + 7 -> Type.IntroOuter <$> getChild + tag -> unknownTag "getType" tag + getKind :: MonadGet m => m Kind + getKind = getWord8 >>= \case + 0 -> pure Kind.Star + 1 -> Kind.Arrow <$> getKind <*> getKind + tag -> unknownTag "getKind" tag + +getSymbol :: MonadGet m => m TermFormat.Symbol +getSymbol = TermFormat.Symbol <$> getVarInt <*> getText + putReference :: (MonadPut m, Integral t, Bits t, Integral r, Bits r) => Reference' t r -> @@ -230,6 +341,14 @@ putReference = \case ReferenceDerived (Reference.Id r index) -> putWord8 1 *> putVarInt r *> putVarInt index +getReference :: + (MonadGet m, Integral t, Bits t, Integral r, Bits r) => + m (Reference' t r) +getReference = getWord8 >>= \case + 0 -> ReferenceBuiltin <$> getVarInt + 1 -> ReferenceDerived <$> (Reference.Id <$> getVarInt <*> getVarInt) + x -> unknownTag "getRecursiveReference" x + putRecursiveReference :: (MonadPut m, Integral t, Bits t, Integral r, Bits r) => Reference' t (Maybe r) -> @@ -240,19 +359,42 @@ putRecursiveReference = \case ReferenceDerived (Reference.Id r index) -> putWord8 1 *> putMaybe putVarInt r *> putVarInt index +getRecursiveReference :: + (MonadGet m, Integral t, Bits t, Integral r, Bits r) => + m (Reference' t (Maybe r)) +getRecursiveReference = getWord8 >>= \case + 0 -> ReferenceBuiltin <$> getVarInt + 1 -> ReferenceDerived <$> (Reference.Id <$> getMaybe getVarInt <*> getVarInt) + x -> unknownTag "getRecursiveReference" x + putInt :: MonadPut m => Int64 -> m () putInt = serializeBE +getInt :: MonadGet m => m Int64 +getInt = deserializeBE + putNat :: MonadPut m => Word64 -> m () putNat = serializeBE -putFloat = serializeBE +getNat :: MonadGet m => m Word64 +getNat = deserializeBE + putFloat :: MonadPut m => Double -> m () +putFloat = serializeBE + +getFloat :: MonadGet m => m Double +getFloat = deserializeBE putBoolean :: MonadPut m => Bool -> m () putBoolean False = putWord8 0 putBoolean True = putWord8 1 +getBoolean :: MonadGet m => m Bool +getBoolean = getWord8 >>= \case + 0 -> pure False + 1 -> pure True + x -> unknownTag "Boolean" x + putType :: (MonadPut m, Ord v) => (r -> m ()) -> @@ -298,6 +440,3 @@ unknownTag msg tag = "unknown tag " ++ show tag ++ " while deserializing: " ++ msg - --- putSymbol :: MonadPut m => Symbol -> m () --- putSymbol (Symbol id typ) = putLength id *> putText (Var.rawName typ) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs index 0e759464d1..9b9045437e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs @@ -1,19 +1,24 @@ {-# LANGUAGE DerivingVia #-} + module U.Codebase.Sqlite.Term.Format where +-- import U.Codebase.Sqlite.DbId + import Data.Text (Text) -import U.Codebase.Reference (Reference') +import Data.Bits (Bits) +import Data.Vector (Vector) import Data.Word (Word64) +import U.Codebase.Reference (Reference') import U.Codebase.Referent (Referent') --- import U.Codebase.Sqlite.DbId import U.Codebase.Sqlite.LocalIds import qualified U.Codebase.Term as Term import qualified U.Core.ABT as ABT -import Data.Bits (Bits) -- Int, because that's what Data.Vector.(!) takes newtype LocalTermId = LocalTermId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 + newtype LocalTypeId = LocalTypeId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 + newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 type TermRef = Reference' LocalTextId (Maybe LocalTermId) @@ -22,7 +27,7 @@ type TypeRef = Reference' LocalTextId LocalTypeId data LocallyIndexedComponent = LocallyIndexedComponent { lookup :: LocalIds, - component :: [Term] + component :: Vector Term } type F = diff --git a/codebase2/codebase/U/Codebase/Term.hs b/codebase2/codebase/U/Codebase/Term.hs index d03fcfe976..7e7cc5d3e2 100644 --- a/codebase2/codebase/U/Codebase/Term.hs +++ b/codebase2/codebase/U/Codebase/Term.hs @@ -14,11 +14,11 @@ import Data.Int (Int64) import Data.Sequence (Seq) import Data.Text (Text) import Data.Word (Word64) -import U.Util.Hash (Hash) import GHC.Generics (Generic, Generic1) import U.Codebase.Reference (Reference, Reference') import U.Codebase.Referent (Referent') import U.Codebase.Type (TypeR) +import U.Util.Hash (Hash) import qualified U.Core.ABT as ABT import qualified U.Util.Hashable as H @@ -74,29 +74,29 @@ data F' text termRef typeRef termLink typeLink vt a -- Match x -- [ (Constructor 0 [Var], ABT.abs n rhs1) -- , (Constructor 1 [], rhs2) ] - Match a [MatchCase typeRef a] + Match a [MatchCase text typeRef a] | TermLink termLink | TypeLink typeLink deriving (Foldable, Functor, Traversable) -data MatchCase r a = MatchCase (Pattern r) (Maybe a) a +data MatchCase t r a = MatchCase (Pattern t r) (Maybe a) a deriving (Foldable, Functor, Generic, Generic1, Traversable) -data Pattern r +data Pattern t r = PUnbound | PVar | PBoolean !Bool | PInt !Int64 | PNat !Word64 | PFloat !Double - | PText !Text + | PText !t | PChar !Char - | PConstructor !r !Int [Pattern r] - | PAs (Pattern r) - | PEffectPure (Pattern r) - | PEffectBind !r !Int [Pattern r] (Pattern r) - | PSequenceLiteral [Pattern r] - | PSequenceOp (Pattern r) !SeqOp (Pattern r) + | PConstructor !r !Int [Pattern t r] + | PAs (Pattern t r) + | PEffectPure (Pattern t r) + | PEffectBind !r !Int [Pattern t r] (Pattern t r) + | PSequenceLiteral [Pattern t r] + | PSequenceOp (Pattern t r) !SeqOp (Pattern t r) deriving (Generic, Functor, Foldable, Traversable) data SeqOp diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index 7d107a68cd..38257d0813 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -8,32 +8,34 @@ module U.Util.Serialization where import Control.Monad (replicateM) -import Data.Bits (setBit, shiftR, clearBit, (.|.), shiftL, testBit, Bits) +import Data.Bits ((.|.), Bits, clearBit, setBit, shiftL, shiftR, testBit) import Data.ByteString (ByteString, readFile, writeFile) import qualified Data.ByteString as BS import qualified Data.ByteString.Short as BSS import Data.ByteString.Short (ShortByteString) -import Data.Bytes.Get (getWord8, MonadGet, getByteString, getBytes, runGetS, skip) -import Data.Bytes.Put (putWord8, MonadPut, putByteString, runPutS) -import Data.Bytes.VarInt (VarInt(VarInt)) +import Data.Bytes.Get (MonadGet, getByteString, getBytes, getWord8, runGetS, skip) +import Data.Bytes.Put (MonadPut, putByteString, putWord8, runPutS) +import Data.Bytes.VarInt (VarInt (VarInt)) import Data.Foldable (Foldable (toList), traverse_) import Data.List.Extra (dropEnd) +import Data.Sequence (Seq) +import qualified Data.Sequence as Seq import Data.Text (Text) import Data.Text.Encoding (decodeUtf8, encodeUtf8) -import Data.Vector (Vector) -import qualified Data.Vector as Vector import Data.Text.Short (ShortText) import qualified Data.Text.Short as TS import qualified Data.Text.Short.Unsafe as TSU +import Data.Vector (Vector) +import qualified Data.Vector as Vector +import Data.Word (Word8) import System.FilePath (takeDirectory) import UnliftIO (MonadIO, liftIO) import UnliftIO.Directory (createDirectoryIfMissing, doesFileExist) import Prelude hiding (readFile, writeFile) -import Data.Word (Word8) type Get a = forall m. MonadGet m => m a -type Put a = forall m. MonadPut m => a -> m () +type Put a = forall m. MonadPut m => a -> m () -- todo: do we use this? data Format a = Format @@ -74,15 +76,15 @@ putVarInt n {-# INLINE putVarInt #-} getVarInt :: (MonadGet m, Num b, Bits b) => m b -getVarInt = getWord8 >>= getVarInt +getVarInt = getWord8 >>= getVarInt where - getVarInt :: (MonadGet m, Num b, Bits b) => Word8 -> m b - getVarInt n - | testBit n 7 = do - VarInt m <- getWord8 >>= getVarInt - return $ shiftL m 7 .|. clearBit (fromIntegral n) 7 - | otherwise = return $ fromIntegral n - {-# INLINE getVarInt #-} + getVarInt :: (MonadGet m, Num b, Bits b) => Word8 -> m b + getVarInt n + | testBit n 7 = do + VarInt m <- getWord8 >>= getVarInt + return $ shiftL m 7 .|. clearBit (fromIntegral n) 7 + | otherwise = return $ fromIntegral n + {-# INLINE getVarInt #-} {-# INLINE getVarInt #-} putText :: MonadPut m => Text -> m () @@ -138,6 +140,11 @@ getVector getA = do length <- getVarInt Vector.replicateM length getA +getSequence :: MonadGet m => m a -> m (Seq a) +getSequence getA = do + length <- getVarInt + Seq.replicateM length getA + getFramed :: MonadGet m => Get a -> m (Maybe a) getFramed get = do size <- getVarInt From bb80842f3da8411cf9c869f1279c2341d02103f9 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 17 Sep 2020 23:47:55 -0400 Subject: [PATCH 003/225] putDecl / getDecl --- .../U/Codebase/Sqlite/Decl/Format.hs | 19 +++-- .../U/Codebase/Sqlite/Serialization.hs | 76 +++++++++++++++---- .../U/Codebase/Sqlite/Symbol.hs | 5 ++ .../U/Codebase/Sqlite/Term/Format.hs | 7 +- .../unison-codebase-sqlite.cabal | 1 + 5 files changed, 80 insertions(+), 28 deletions(-) create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Symbol.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs index b99fc86c67..4379a4f7a5 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs @@ -1,32 +1,35 @@ +{-# LANGUAGE DerivingVia #-} module U.Codebase.Sqlite.Decl.Format where import U.Codebase.Decl (DeclType, Modifier) import U.Codebase.Reference (Reference') --- import U.Codebase.Sqlite.DbId +import U.Codebase.Sqlite.Symbol import qualified U.Codebase.Type as Type import qualified U.Core.ABT as ABT import U.Codebase.Sqlite.LocalIds +import Data.Word (Word64) +import Data.Bits (Bits) +import Data.Vector (Vector) -- | Add new formats here -data TermFormat v = Term (LocallyIndexedComponent v) +data DeclFormat = Decl LocallyIndexedComponent -- | V1: Decls included `Hash`es inline -- V2: Instead of `Hash`, we use a smaller index. -data LocallyIndexedComponent v = LocallyIndexedComponent +data LocallyIndexedComponent = LocallyIndexedComponent { lookup :: LocalIds, - component :: [Decl v] + component :: Vector (Decl Symbol) } data Decl v = DataDeclaration { declType :: DeclType, modifier :: Modifier, bound :: [v], - constructors' :: [Type v] + constructors :: [Type v] } type Type v = ABT.Term (Type.F' TypeRef) v () type TypeRef = Reference' LocalTextId (Maybe LocalTypeId) --- Int, because that's what Data.Vector.(!) takes -newtype LocalTextId = LocalTextId Int -newtype LocalTypeId = LocalTypeId Int +newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalTypeId = LocalTypeId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index c473576ae1..9462b60025 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -21,11 +21,14 @@ import U.Codebase.Referent (Referent') import qualified U.Codebase.Referent as Referent import U.Codebase.Sqlite.LocalIds import qualified U.Codebase.Sqlite.Term.Format as TermFormat +import qualified U.Codebase.Sqlite.Decl.Format as DeclFormat +import U.Codebase.Sqlite.Symbol import qualified U.Codebase.Term as Term import qualified U.Codebase.Type as Type import qualified U.Core.ABT as ABT import U.Util.Serialization import Prelude hiding (getChar, putChar) +import qualified U.Codebase.Decl as Decl putABT :: (MonadPut m, Foldable f, Functor f, Ord v) => @@ -86,15 +89,16 @@ getABT getVar getA getF = getList getVar >>= go [] _ -> unknownTag "getABT" tag {- -Write/Read -- [x][x] term component -- [x][ ] types of terms -- [ ][ ] decl component -- [ ][ ] causal -- [ ][ ] full branch -- [ ][ ] diff branch -- [ ][ ] full patch -- [ ][ ] diff patch +put/get/write/read +- [x][x][ ][ ] term component +- [x][x][ ][ ] types of terms +- [x][x][ ][ ] decl component +- [ ][ ][ ][ ] causal +- [ ][ ][ ][ ] full branch +- [ ][ ][ ][ ] diff branch +- [ ][ ][ ][ ] full patch +- [ ][ ][ ][ ] diff patch +- [ ] O(1) framed array access? - [ ] add to dependents index - [ ] add to type index @@ -183,8 +187,6 @@ putTermComponent TermFormat.LocallyIndexedComponent {..} = do putWord8 21 *> putReference r putTermElement :: MonadPut m => TermFormat.Term -> m () putTermElement = putABT putSymbol putUnit putF - putSymbol :: MonadPut m => TermFormat.Symbol -> m () - putSymbol (TermFormat.Symbol n t) = putVarInt n >> putText t putReferent :: MonadPut m => Referent' TermFormat.TermRef TermFormat.TypeRef -> m () putReferent = \case Referent.Ref r -> do @@ -309,7 +311,7 @@ getTermComponent = 2 -> pure Term.PConcat tag -> unknownTag "SeqOp" tag -getType :: MonadGet m => m r -> m (Type.TypeR r TermFormat.Symbol) +getType :: MonadGet m => m r -> m (Type.TypeR r Symbol) getType getReference = getABT getSymbol getUnit go where go getChild = getWord8 >>= \case @@ -328,8 +330,54 @@ getType getReference = getABT getSymbol getUnit go 1 -> Kind.Arrow <$> getKind <*> getKind tag -> unknownTag "getKind" tag -getSymbol :: MonadGet m => m TermFormat.Symbol -getSymbol = TermFormat.Symbol <$> getVarInt <*> getText +putDeclFormat :: MonadPut m => DeclFormat.DeclFormat -> m () +putDeclFormat = \case + DeclFormat.Decl c -> putWord8 0 *> putDeclComponent c + +getDeclFormat :: MonadGet m => m DeclFormat.DeclFormat +getDeclFormat = getWord8 >>= \case + 0 -> DeclFormat.Decl <$> getDeclComponent + other -> unknownTag "DeclFormat" other + +-- |These use a framed array for randomer access +putDeclComponent :: MonadPut m => DeclFormat.LocallyIndexedComponent -> m () +putDeclComponent DeclFormat.LocallyIndexedComponent {..} = do + putLocalIds lookup + putFramedArray putDeclElement component + where + putDeclElement DeclFormat.DataDeclaration{..} = do + putDeclType declType + putModifier modifier + putFoldable putSymbol bound + putFoldable (putType putRecursiveReference putSymbol) constructors + putDeclType Decl.Data = putWord8 0 + putDeclType Decl.Effect = putWord8 1 + putModifier Decl.Structural = putWord8 0 + putModifier (Decl.Unique t) = putWord8 1 *> putText t + +getDeclComponent :: MonadGet m => m DeclFormat.LocallyIndexedComponent +getDeclComponent = + DeclFormat.LocallyIndexedComponent <$> getLocalIds <*> getFramedArray getDeclElement + where + getDeclElement = DeclFormat.DataDeclaration + <$> getDeclType + <*> getModifier + <*> getList getSymbol + <*> getList (getType getRecursiveReference) + getDeclType = getWord8 >>= \case + 0 -> pure Decl.Data + 1 -> pure Decl.Effect + other -> unknownTag "DeclType" other + getModifier = getWord8 >>= \case + 0 -> pure Decl.Structural + 1 -> Decl.Unique <$> getText + other -> unknownTag "DeclModifier" other + +getSymbol :: MonadGet m => m Symbol +getSymbol = Symbol <$> getVarInt <*> getText + +putSymbol :: MonadPut m => Symbol -> m () +putSymbol (Symbol n t) = putVarInt n >> putText t putReference :: (MonadPut m, Integral t, Bits t, Integral r, Bits r) => diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Symbol.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Symbol.hs new file mode 100644 index 0000000000..bc3ae38f80 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Symbol.hs @@ -0,0 +1,5 @@ +module U.Codebase.Sqlite.Symbol where + +import Data.Word (Word64) +import Data.Text (Text) +data Symbol = Symbol !Word64 !Text deriving (Eq, Ord, Show) \ No newline at end of file diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs index 9b9045437e..3724e9055f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs @@ -4,21 +4,18 @@ module U.Codebase.Sqlite.Term.Format where -- import U.Codebase.Sqlite.DbId -import Data.Text (Text) import Data.Bits (Bits) import Data.Vector (Vector) import Data.Word (Word64) import U.Codebase.Reference (Reference') import U.Codebase.Referent (Referent') import U.Codebase.Sqlite.LocalIds +import U.Codebase.Sqlite.Symbol import qualified U.Codebase.Term as Term import qualified U.Core.ABT as ABT --- Int, because that's what Data.Vector.(!) takes newtype LocalTermId = LocalTermId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 - newtype LocalTypeId = LocalTypeId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 - newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 type TermRef = Reference' LocalTextId (Maybe LocalTermId) @@ -37,5 +34,3 @@ type Term = ABT.Term F Symbol () data TermFormat = Term LocallyIndexedComponent - -data Symbol = Symbol !Word64 !Text deriving (Eq, Ord, Show) \ No newline at end of file diff --git a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal index 49e3b1fb7b..ff695d8923 100644 --- a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal +++ b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal @@ -31,6 +31,7 @@ library U.Codebase.Sqlite.Reference U.Codebase.Sqlite.Referent U.Codebase.Sqlite.Serialization + U.Codebase.Sqlite.Symbol U.Codebase.Sqlite.Term.Format U.Codebase.Sqlite.Types From 39943b43f7f9d065b8b5ef54d997eac8c8a8e364 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 18 Sep 2020 18:20:13 -0400 Subject: [PATCH 004/225] put branch, branchdiff, patchdiff --- .../U/Codebase/Sqlite/Branch/Diff.hs | 24 +-- .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 4 +- .../U/Codebase/Sqlite/Patch/Diff.hs | 5 +- .../U/Codebase/Sqlite/Patch/TermEdit.hs | 4 +- .../U/Codebase/Sqlite/Reference.hs | 12 +- .../U/Codebase/Sqlite/Referent.hs | 9 +- .../U/Codebase/Sqlite/Serialization.hs | 160 ++++++++++++------ codebase2/codebase/U/Codebase/Referent.hs | 4 +- .../U/Util/Serialization.hs | 17 +- 9 files changed, 154 insertions(+), 85 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs index 325c35be73..84fd801497 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs @@ -2,22 +2,22 @@ module U.Codebase.Sqlite.Branch.Diff where import Data.Map (Map) import Data.Set (Set) -import U.Codebase.Sqlite.Branch.MetadataSet import U.Codebase.Sqlite.DbId import U.Codebase.Sqlite.Reference import U.Codebase.Sqlite.Referent - -data Diff = Diff - { reference :: BranchId, - add :: DiffSlice, - remove :: DiffSlice - } +import U.Codebase.Sqlite.Patch.Diff type NameSegment = TextId +type Metadata = Reference +data PatchOp = PatchRemove | PatchAdd | PatchEdit PatchDiff +data AddRemove a = AddRemove { add :: Set a, remove :: Set a } -data DiffSlice = DiffSlice - { terms :: Map NameSegment (Set Referent), - types :: Map NameSegment (Set Reference), - termMetadata :: Map NameSegment (Map Referent MetadataSetFormat), - typeMetadata :: Map NameSegment (Map Reference MetadataSetFormat) +data Diff = Diff + { reference :: BranchId, + terms :: Map NameSegment (AddRemove Referent), + types :: Map NameSegment (AddRemove Reference), + termMetadata :: Map NameSegment (Map Referent (AddRemove Metadata)), + typeMetadata :: Map NameSegment (Map Reference (AddRemove Metadata)), + patches :: Map NameSegment PatchOp } + diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index 05e7b7339c..4bcedabdb0 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -18,6 +18,6 @@ newtype TextId = TextId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enu newtype TermId = TermId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via ObjectId newtype DeclId = DeclId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via ObjectId -newtype PatchId = PatchId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via ObjectId +newtype PatchId = PatchId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via ObjectId -newtype BranchId = BranchId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via Word64 +newtype BranchId = BranchId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs index fe3f56f639..8f1cdb48ce 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs @@ -6,11 +6,12 @@ import U.Codebase.Sqlite.Reference import U.Codebase.Sqlite.Patch.TermEdit import U.Codebase.Sqlite.Patch.TypeEdit import U.Codebase.Sqlite.DbId +import Data.Set (Set) data PatchDiff = PatchDiff { reference :: PatchId , addedTermEdits :: Map Referent TermEdit , addedTypeEdits :: Map Reference TypeEdit - , removedTermEdits :: Map Referent TermEdit - , removedTypeEdits :: Map Reference TypeEdit + , removedTermEdits :: Set Referent + , removedTypeEdits :: Set Reference } deriving (Eq, Ord, Show) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs index 12d2fd39ef..bccc8160af 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs @@ -1,8 +1,8 @@ module U.Codebase.Sqlite.Patch.TermEdit where -import U.Codebase.Sqlite.Reference (Reference) +import U.Codebase.Sqlite.Referent (Referent) -data TermEdit = Replace Reference Typing | Deprecate +data TermEdit = Replace Referent Typing | Deprecate deriving (Eq, Ord, Show) -- Replacements with the Same type can be automatically propagated. diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs index aa42fc12fc..882fa93b74 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs @@ -1,13 +1,7 @@ module U.Codebase.Sqlite.Reference where import U.Codebase.Sqlite.DbId -import Data.Word (Word64) +import U.Codebase.Reference (Reference', Id') -data Reference = Builtin TextId | Derived Id - deriving (Eq, Ord, Show) - -data Id = Id ObjectId ComponentIndex - deriving (Eq, Ord, Show) - -newtype ComponentIndex = ComponentIndex Word64 - deriving (Eq, Ord, Show) +type Reference = Reference' TextId ObjectId +type Id = Id' ObjectId diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs index 738da2ae27..3abecc4aa2 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs @@ -1,11 +1,6 @@ module U.Codebase.Sqlite.Referent where -import Data.Word (Word64) import U.Codebase.Sqlite.Reference (Reference) +import U.Codebase.Referent (Referent') -data Referent = Ref Reference | Con Reference ConstructorIndex - deriving (Eq, Ord, Show) - -newtype ConstructorIndex = ConstructorIndex Word64 - deriving (Eq, Ord, Show) - +type Referent = Referent' Reference Reference diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 9462b60025..39668f28b2 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -20,8 +20,9 @@ import qualified U.Codebase.Reference as Reference import U.Codebase.Referent (Referent') import qualified U.Codebase.Referent as Referent import U.Codebase.Sqlite.LocalIds -import qualified U.Codebase.Sqlite.Term.Format as TermFormat +import qualified U.Codebase.Sqlite.Branch.Format as BranchFormat import qualified U.Codebase.Sqlite.Decl.Format as DeclFormat +import qualified U.Codebase.Sqlite.Term.Format as TermFormat import U.Codebase.Sqlite.Symbol import qualified U.Codebase.Term as Term import qualified U.Codebase.Type as Type @@ -29,6 +30,12 @@ import qualified U.Core.ABT as ABT import U.Util.Serialization import Prelude hiding (getChar, putChar) import qualified U.Codebase.Decl as Decl +import qualified U.Codebase.Sqlite.Branch.Full as BranchFull +import qualified U.Codebase.Sqlite.Branch.Diff as BranchDiff +import qualified U.Codebase.Sqlite.Patch.Diff as PatchDiff +import qualified U.Codebase.Sqlite.Branch.MetadataSet as MetadataSet +import qualified U.Codebase.Sqlite.Patch.TermEdit as TermEdit +import qualified U.Codebase.Sqlite.Patch.TypeEdit as TypeEdit putABT :: (MonadPut m, Foldable f, Functor f, Ord v) => @@ -93,11 +100,11 @@ put/get/write/read - [x][x][ ][ ] term component - [x][x][ ][ ] types of terms - [x][x][ ][ ] decl component -- [ ][ ][ ][ ] causal -- [ ][ ][ ][ ] full branch -- [ ][ ][ ][ ] diff branch +- [-][-][ ][ ] causal +- [x][ ][ ][ ] full branch +- [x][ ][ ][ ] diff branch - [ ][ ][ ][ ] full patch -- [ ][ ][ ][ ] diff patch +- [x][ ][ ][ ] diff patch - [ ] O(1) framed array access? - [ ] add to dependents index @@ -182,20 +189,11 @@ putTermComponent TermFormat.LocallyIndexedComponent {..} = do Term.Char c -> putWord8 19 *> putChar c Term.TermLink r -> - putWord8 20 *> putReferent r + putWord8 20 *> putReferent putRecursiveReference putReference r Term.TypeLink r -> putWord8 21 *> putReference r putTermElement :: MonadPut m => TermFormat.Term -> m () putTermElement = putABT putSymbol putUnit putF - putReferent :: MonadPut m => Referent' TermFormat.TermRef TermFormat.TypeRef -> m () - putReferent = \case - Referent.Ref r -> do - putWord8 0 - putRecursiveReference r - Referent.Con r i -> do - putWord8 1 - putReference r - putVarInt i putMatchCase :: MonadPut m => (a -> m ()) -> Term.MatchCase TermFormat.LocalTextId TermFormat.TypeRef a -> m () putMatchCase putChild (Term.MatchCase pat guard body) = putPattern pat *> putMaybe putChild guard *> putChild body @@ -333,52 +331,118 @@ getType getReference = getABT getSymbol getUnit go putDeclFormat :: MonadPut m => DeclFormat.DeclFormat -> m () putDeclFormat = \case DeclFormat.Decl c -> putWord8 0 *> putDeclComponent c + where + -- |These use a framed array for randomer access + putDeclComponent :: MonadPut m => DeclFormat.LocallyIndexedComponent -> m () + putDeclComponent DeclFormat.LocallyIndexedComponent {..} = do + putLocalIds lookup + putFramedArray putDeclElement component + where + putDeclElement DeclFormat.DataDeclaration{..} = do + putDeclType declType + putModifier modifier + putFoldable putSymbol bound + putFoldable (putType putRecursiveReference putSymbol) constructors + putDeclType Decl.Data = putWord8 0 + putDeclType Decl.Effect = putWord8 1 + putModifier Decl.Structural = putWord8 0 + putModifier (Decl.Unique t) = putWord8 1 *> putText t getDeclFormat :: MonadGet m => m DeclFormat.DeclFormat getDeclFormat = getWord8 >>= \case 0 -> DeclFormat.Decl <$> getDeclComponent other -> unknownTag "DeclFormat" other - --- |These use a framed array for randomer access -putDeclComponent :: MonadPut m => DeclFormat.LocallyIndexedComponent -> m () -putDeclComponent DeclFormat.LocallyIndexedComponent {..} = do - putLocalIds lookup - putFramedArray putDeclElement component where - putDeclElement DeclFormat.DataDeclaration{..} = do - putDeclType declType - putModifier modifier - putFoldable putSymbol bound - putFoldable (putType putRecursiveReference putSymbol) constructors - putDeclType Decl.Data = putWord8 0 - putDeclType Decl.Effect = putWord8 1 - putModifier Decl.Structural = putWord8 0 - putModifier (Decl.Unique t) = putWord8 1 *> putText t - -getDeclComponent :: MonadGet m => m DeclFormat.LocallyIndexedComponent -getDeclComponent = - DeclFormat.LocallyIndexedComponent <$> getLocalIds <*> getFramedArray getDeclElement + getDeclComponent :: MonadGet m => m DeclFormat.LocallyIndexedComponent + getDeclComponent = + DeclFormat.LocallyIndexedComponent <$> getLocalIds <*> getFramedArray getDeclElement + where + getDeclElement = DeclFormat.DataDeclaration + <$> getDeclType + <*> getModifier + <*> getList getSymbol + <*> getList (getType getRecursiveReference) + getDeclType = getWord8 >>= \case + 0 -> pure Decl.Data + 1 -> pure Decl.Effect + other -> unknownTag "DeclType" other + getModifier = getWord8 >>= \case + 0 -> pure Decl.Structural + 1 -> Decl.Unique <$> getText + other -> unknownTag "DeclModifier" other + +putBranchFormat :: MonadPut m => BranchFormat.BranchFormat -> m () +putBranchFormat = \case + BranchFormat.Full b -> putWord8 0 *> putBranchFull b + BranchFormat.Diff d -> putWord8 1 *> putBranchDiff d + where + putReferent' = putReferent putReference putReference + putBranchFull (BranchFull.Branch terms types patches children) = do + putMap putVarInt (putMap putReferent' putMetadataSetFormat) terms + putMap putVarInt (putMap putReference putMetadataSetFormat) types + putMap putVarInt putVarInt patches + putMap putVarInt putVarInt children + putMetadataSetFormat = \case + MetadataSet.Inline s -> putWord8 0 *> putFoldable putReference s + putBranchDiff (BranchDiff.Diff ref terms types termMD typeMD patches) = do + putVarInt ref + putMap putVarInt (putAddRemove putReferent') terms + putMap putVarInt (putAddRemove putReference) types + putMap putVarInt (putMap putReferent' (putAddRemove putReference)) termMD + putMap putVarInt (putMap putReference (putAddRemove putReference)) typeMD + putMap putVarInt putPatchOp patches + where + putAddRemove put (BranchDiff.AddRemove adds removes) = do + putFoldable put adds + putFoldable put removes + putPatchOp BranchDiff.PatchRemove = putWord8 0 + putPatchOp BranchDiff.PatchAdd = putWord8 1 + putPatchOp (BranchDiff.PatchEdit (PatchDiff.PatchDiff r atm atp rtm rtp)) = do + putWord8 2 + putVarInt r + putMap putReferent' putTermEdit atm + putMap putReference putTypeEdit atp + putFoldable putReferent' rtm + putFoldable putReference rtp + +putTermEdit :: MonadPut m => TermEdit.TermEdit -> m () +putTermEdit TermEdit.Deprecate = putWord8 0 +putTermEdit (TermEdit.Replace r t) = putWord8 1 *> putReferent' r *> putTyping t + where + putTyping TermEdit.Same = putWord8 0 + putTyping TermEdit.Subtype = putWord8 1 + putTyping TermEdit.Different = putWord8 2 + putReferent' = putReferent putReference putReference + +putTypeEdit :: MonadPut m => TypeEdit.TypeEdit -> m () +putTypeEdit TypeEdit.Deprecate = putWord8 0 +putTypeEdit (TypeEdit.Replace r) = putWord8 1 *> putReference r + +getBranchFormat :: MonadGet m => m BranchFormat.BranchFormat +getBranchFormat = getWord8 >>= \case + 0 -> getBranchFull + 1 -> getBranchDiff + other -> unknownTag "BranchFormat" other where - getDeclElement = DeclFormat.DataDeclaration - <$> getDeclType - <*> getModifier - <*> getList getSymbol - <*> getList (getType getRecursiveReference) - getDeclType = getWord8 >>= \case - 0 -> pure Decl.Data - 1 -> pure Decl.Effect - other -> unknownTag "DeclType" other - getModifier = getWord8 >>= \case - 0 -> pure Decl.Structural - 1 -> Decl.Unique <$> getText - other -> unknownTag "DeclModifier" other - + getBranchFull = error "todo" + getBranchDiff = error "todo" + getSymbol :: MonadGet m => m Symbol getSymbol = Symbol <$> getVarInt <*> getText putSymbol :: MonadPut m => Symbol -> m () putSymbol (Symbol n t) = putVarInt n >> putText t +putReferent :: MonadPut m => (r1 -> m ()) -> (r2 -> m ()) -> Referent' r1 r2 -> m () +putReferent putRefRef putConRef = \case + Referent.Ref r -> do + putWord8 0 + putRefRef r + Referent.Con r i -> do + putWord8 1 + putConRef r + putVarInt i + putReference :: (MonadPut m, Integral t, Bits t, Integral r, Bits r) => Reference' t r -> diff --git a/codebase2/codebase/U/Codebase/Referent.hs b/codebase2/codebase/U/Codebase/Referent.hs index 705bb6164c..cbd708433c 100644 --- a/codebase2/codebase/U/Codebase/Referent.hs +++ b/codebase2/codebase/U/Codebase/Referent.hs @@ -19,7 +19,7 @@ type ConstructorIndex = Word64 data Referent' rTm rTp = Ref rTm | Con rTp ConstructorIndex - deriving (Eq, Ord) + deriving (Eq, Ord, Show) instance Hashable Referent where tokens (Ref r) = Hashable.Tag 0 : Hashable.tokens r @@ -29,5 +29,5 @@ type Id = Id' Hash Hash data Id' hTm hTp = RefId (Reference.Id' hTm) | ConId (Reference.Id' hTp) ConstructorIndex - deriving (Eq, Ord, Functor) + deriving (Eq, Ord, Show, Functor) diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index 38257d0813..5bc24f5fa4 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -32,6 +32,9 @@ import System.FilePath (takeDirectory) import UnliftIO (MonadIO, liftIO) import UnliftIO.Directory (createDirectoryIfMissing, doesFileExist) import Prelude hiding (readFile, writeFile) +import Data.Map (Map) +import qualified Data.Map as Map +import Control.Applicative (Applicative(liftA2)) type Get a = forall m. MonadGet m => m a @@ -195,4 +198,16 @@ unsafeFramedArrayLookup :: MonadGet m => Get a -> Int -> m a unsafeFramedArrayLookup getA index = do offsets <- getVector getVarInt skip (Vector.unsafeIndex offsets index) - getA \ No newline at end of file + getA + +putMap :: MonadPut m => (a -> m ()) -> (b -> m ()) -> Map a b -> m () +putMap putA putB m = putFoldable (putPair putA putB) (Map.toList m) + +getMap :: (MonadGet m, Ord a) => m a -> m b -> m (Map a b) +getMap getA getB = Map.fromList <$> getList (getPair getA getB) + +putPair :: MonadPut m => (a -> m ()) -> (b -> m ()) -> (a,b) -> m () +putPair putA putB (a,b) = putA a *> putB b + +getPair :: MonadGet m => m a -> m b -> m (a,b) +getPair = liftA2 (,) From 572922be84b6c77d4b2569e8a09354a7bf967652 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 18 Sep 2020 18:30:32 -0400 Subject: [PATCH 005/225] switch from one hash LUT per component to one per definition --- .../U/Codebase/Sqlite/Decl/Format.hs | 6 +- .../U/Codebase/Sqlite/Serialization.hs | 99 ++++++++++--------- .../U/Codebase/Sqlite/Term/Format.hs | 6 +- 3 files changed, 56 insertions(+), 55 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs index 4379a4f7a5..b9022fe681 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs @@ -16,10 +16,8 @@ data DeclFormat = Decl LocallyIndexedComponent -- | V1: Decls included `Hash`es inline -- V2: Instead of `Hash`, we use a smaller index. -data LocallyIndexedComponent = LocallyIndexedComponent - { lookup :: LocalIds, - component :: Vector (Decl Symbol) - } +data LocallyIndexedComponent = + LocallyIndexedComponent (Vector (LocalIds, Decl Symbol)) data Decl v = DataDeclaration { declType :: DeclType, diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 39668f28b2..bbbf4c61f7 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -142,9 +142,8 @@ putTermComponent :: MonadPut m => TermFormat.LocallyIndexedComponent -> m () -putTermComponent TermFormat.LocallyIndexedComponent {..} = do - putLocalIds lookup - putFramedArray putTermElement component +putTermComponent (TermFormat.LocallyIndexedComponent v) = + putFramedArray (putPair putLocalIds putTermElement) v where putF :: MonadPut m => (a -> m ()) -> TermFormat.F a -> m () putF putChild = \case @@ -237,7 +236,10 @@ putTermComponent TermFormat.LocallyIndexedComponent {..} = do getTermComponent :: MonadGet m => m TermFormat.LocallyIndexedComponent getTermComponent = TermFormat.LocallyIndexedComponent - <$> getLocalIds <*> getFramedArray getTermElement + <$> getFramedArray (getPair getLocalIds getTermElement) + +getTermElement :: MonadGet m => m TermFormat.Term +getTermElement = getABT getSymbol getUnit getF where getF :: MonadGet m => m a -> m (TermFormat.F a) getF getChild = getWord8 >>= \case @@ -268,46 +270,49 @@ getTermComponent = 20 -> Term.TermLink <$> getReferent 21 -> Term.TypeLink <$> getReference tag -> unknownTag "getTerm" tag - getTermElement :: MonadGet m => m TermFormat.Term - getTermElement = getABT getSymbol getUnit getF - getReferent :: MonadGet m => m (Referent' TermFormat.TermRef TermFormat.TypeRef) - getReferent = getWord8 >>= \case - 0 -> Referent.Ref <$> getRecursiveReference - 1 -> Referent.Con <$> getReference <*> getVarInt - x -> unknownTag "getTermComponent" x - getPattern :: MonadGet m => m (Term.Pattern TermFormat.LocalTextId TermFormat.TypeRef) - getPattern = getWord8 >>= \case - 0 -> pure Term.PUnbound - 1 -> pure Term.PVar - 2 -> Term.PBoolean <$> getBoolean - 3 -> Term.PInt <$> getInt - 4 -> Term.PNat <$> getNat - 5 -> Term.PFloat <$> getFloat - 6 -> Term.PConstructor <$> getReference <*> getVarInt <*> getList getPattern - 7 -> Term.PAs <$> getPattern - 8 -> Term.PEffectPure <$> getPattern - 9 -> - Term.PEffectBind - <$> getReference - <*> getVarInt - <*> getList getPattern - <*> getPattern - 10 -> Term.PSequenceLiteral <$> getList getPattern - 11 -> - Term.PSequenceOp - <$> getPattern - <*> getSeqOp - <*> getPattern - 12 -> Term.PText <$> getVarInt - 13 -> Term.PChar <$> getChar - x -> unknownTag "Pattern" x where - getSeqOp :: MonadGet m => m Term.SeqOp - getSeqOp = getWord8 >>= \case - 0 -> pure Term.PCons - 1 -> pure Term.PSnoc - 2 -> pure Term.PConcat - tag -> unknownTag "SeqOp" tag + getReferent :: MonadGet m => m (Referent' TermFormat.TermRef TermFormat.TypeRef) + getReferent = getWord8 >>= \case + 0 -> Referent.Ref <$> getRecursiveReference + 1 -> Referent.Con <$> getReference <*> getVarInt + x -> unknownTag "getTermComponent" x + getPattern :: MonadGet m => m (Term.Pattern TermFormat.LocalTextId TermFormat.TypeRef) + getPattern = getWord8 >>= \case + 0 -> pure Term.PUnbound + 1 -> pure Term.PVar + 2 -> Term.PBoolean <$> getBoolean + 3 -> Term.PInt <$> getInt + 4 -> Term.PNat <$> getNat + 5 -> Term.PFloat <$> getFloat + 6 -> Term.PConstructor <$> getReference <*> getVarInt <*> getList getPattern + 7 -> Term.PAs <$> getPattern + 8 -> Term.PEffectPure <$> getPattern + 9 -> + Term.PEffectBind + <$> getReference + <*> getVarInt + <*> getList getPattern + <*> getPattern + 10 -> Term.PSequenceLiteral <$> getList getPattern + 11 -> + Term.PSequenceOp + <$> getPattern + <*> getSeqOp + <*> getPattern + 12 -> Term.PText <$> getVarInt + 13 -> Term.PChar <$> getChar + x -> unknownTag "Pattern" x + where + getSeqOp :: MonadGet m => m Term.SeqOp + getSeqOp = getWord8 >>= \case + 0 -> pure Term.PCons + 1 -> pure Term.PSnoc + 2 -> pure Term.PConcat + tag -> unknownTag "SeqOp" tag + +-- getNthTermElement :: MonadGet m => Int -> m (LocalIds, TermFormat.Term) +-- getNthTermElement i = do + getType :: MonadGet m => m r -> m (Type.TypeR r Symbol) getType getReference = getABT getSymbol getUnit go @@ -334,9 +339,8 @@ putDeclFormat = \case where -- |These use a framed array for randomer access putDeclComponent :: MonadPut m => DeclFormat.LocallyIndexedComponent -> m () - putDeclComponent DeclFormat.LocallyIndexedComponent {..} = do - putLocalIds lookup - putFramedArray putDeclElement component + putDeclComponent (DeclFormat.LocallyIndexedComponent v) = + putFramedArray (putPair putLocalIds putDeclElement) v where putDeclElement DeclFormat.DataDeclaration{..} = do putDeclType declType @@ -355,7 +359,8 @@ getDeclFormat = getWord8 >>= \case where getDeclComponent :: MonadGet m => m DeclFormat.LocallyIndexedComponent getDeclComponent = - DeclFormat.LocallyIndexedComponent <$> getLocalIds <*> getFramedArray getDeclElement + DeclFormat.LocallyIndexedComponent <$> + getFramedArray (getPair getLocalIds getDeclElement) where getDeclElement = DeclFormat.DataDeclaration <$> getDeclType diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs index 3724e9055f..64952f2635 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs @@ -22,10 +22,8 @@ type TermRef = Reference' LocalTextId (Maybe LocalTermId) type TypeRef = Reference' LocalTextId LocalTypeId -data LocallyIndexedComponent = LocallyIndexedComponent - { lookup :: LocalIds, - component :: Vector Term - } +data LocallyIndexedComponent = + LocallyIndexedComponent (Vector (LocalIds, Term)) type F = Term.F' LocalTextId TermRef TypeRef (Referent' TermRef TypeRef) TypeRef Symbol From 832bc5fa03d6246085809bbde827ae65c3793599 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 18 Sep 2020 18:42:01 -0400 Subject: [PATCH 006/225] deserialize individual component elements --- .../U/Codebase/Sqlite/Serialization.hs | 41 +++++++++++-------- .../U/Util/Serialization.hs | 6 +-- 2 files changed, 27 insertions(+), 20 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index bbbf4c61f7..c8bd052884 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -310,9 +310,9 @@ getTermElement = getABT getSymbol getUnit getF 2 -> pure Term.PConcat tag -> unknownTag "SeqOp" tag --- getNthTermElement :: MonadGet m => Int -> m (LocalIds, TermFormat.Term) --- getNthTermElement i = do - +lookupTermElement :: MonadGet m => Reference.ComponentIndex -> m (LocalIds, TermFormat.Term) +lookupTermElement = + unsafeFramedArrayLookup (getPair getLocalIds getTermElement) . fromIntegral getType :: MonadGet m => m r -> m (Type.TypeR r Symbol) getType getReference = getABT getSymbol getUnit go @@ -361,20 +361,27 @@ getDeclFormat = getWord8 >>= \case getDeclComponent = DeclFormat.LocallyIndexedComponent <$> getFramedArray (getPair getLocalIds getDeclElement) - where - getDeclElement = DeclFormat.DataDeclaration - <$> getDeclType - <*> getModifier - <*> getList getSymbol - <*> getList (getType getRecursiveReference) - getDeclType = getWord8 >>= \case - 0 -> pure Decl.Data - 1 -> pure Decl.Effect - other -> unknownTag "DeclType" other - getModifier = getWord8 >>= \case - 0 -> pure Decl.Structural - 1 -> Decl.Unique <$> getText - other -> unknownTag "DeclModifier" other + +getDeclElement :: MonadGet m => m (DeclFormat.Decl Symbol) +getDeclElement = DeclFormat.DataDeclaration + <$> getDeclType + <*> getModifier + <*> getList getSymbol + <*> getList (getType getRecursiveReference) + where + getDeclType = getWord8 >>= \case + 0 -> pure Decl.Data + 1 -> pure Decl.Effect + other -> unknownTag "DeclType" other + getModifier = getWord8 >>= \case + 0 -> pure Decl.Structural + 1 -> Decl.Unique <$> getText + other -> unknownTag "DeclModifier" other + +lookupDeclElement :: + MonadGet m => Reference.ComponentIndex -> m (LocalIds, DeclFormat.Decl Symbol) +lookupDeclElement = + unsafeFramedArrayLookup (getPair getLocalIds getDeclElement) . fromIntegral putBranchFormat :: MonadPut m => BranchFormat.BranchFormat -> m () putBranchFormat = \case diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index 5bc24f5fa4..a9c58bcb2b 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -185,8 +185,8 @@ getFramedArray getA = do -- | Look up a 0-based index in a framed array, O(num array elements), -- because it reads the start indices for all elements first. -- This could be skipped if the indices had a fixed size instead of varint -framedArrayLookup :: MonadGet m => Get a -> Int -> m (Maybe a) -framedArrayLookup getA index = do +lookupFramedArray :: MonadGet m => m a -> Int -> m (Maybe a) +lookupFramedArray getA index = do offsets <- getVector getVarInt if index > Vector.length offsets then pure Nothing @@ -194,7 +194,7 @@ framedArrayLookup getA index = do skip (Vector.unsafeIndex offsets index) Just <$> getA -unsafeFramedArrayLookup :: MonadGet m => Get a -> Int -> m a +unsafeFramedArrayLookup :: MonadGet m => m a -> Int -> m a unsafeFramedArrayLookup getA index = do offsets <- getVector getVarInt skip (Vector.unsafeIndex offsets index) From be18badbcf64c96ca900333df663b1c3b1a1bc80 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 21 Sep 2020 13:21:09 -0400 Subject: [PATCH 007/225] put patch --- .../U/Codebase/Sqlite/Patch/Format.hs | 7 +- .../U/Codebase/Sqlite/Serialization.hs | 104 +++++++++++------- 2 files changed, 69 insertions(+), 42 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs index ba4a0b6f14..55cbf9106e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs @@ -1 +1,6 @@ -module U.Codebase.Sqlite.Patch.Format where \ No newline at end of file +module U.Codebase.Sqlite.Patch.Format where + +import U.Codebase.Sqlite.Patch.Diff +import U.Codebase.Sqlite.Patch.Full + +data PatchFormat = Full Patch | Diff PatchDiff diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index c8bd052884..42427de109 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -12,30 +12,32 @@ import Data.Bytes.VarInt (VarInt (VarInt), unVarInt) import Data.Int (Int64) import Data.List (elemIndex) import qualified Data.Set as Set -import U.Codebase.Kind (Kind) import Data.Word (Word64) +import qualified U.Codebase.Decl as Decl +import U.Codebase.Kind (Kind) import qualified U.Codebase.Kind as Kind import U.Codebase.Reference (Reference' (ReferenceBuiltin, ReferenceDerived)) import qualified U.Codebase.Reference as Reference import U.Codebase.Referent (Referent') import qualified U.Codebase.Referent as Referent -import U.Codebase.Sqlite.LocalIds +import qualified U.Codebase.Sqlite.Branch.Diff as BranchDiff import qualified U.Codebase.Sqlite.Branch.Format as BranchFormat +import qualified U.Codebase.Sqlite.Branch.Full as BranchFull +import qualified U.Codebase.Sqlite.Branch.MetadataSet as MetadataSet import qualified U.Codebase.Sqlite.Decl.Format as DeclFormat -import qualified U.Codebase.Sqlite.Term.Format as TermFormat +import U.Codebase.Sqlite.LocalIds +import qualified U.Codebase.Sqlite.Patch.Diff as PatchDiff +import qualified U.Codebase.Sqlite.Patch.Format as PatchFormat +import qualified U.Codebase.Sqlite.Patch.Full as PatchFull +import qualified U.Codebase.Sqlite.Patch.TermEdit as TermEdit +import qualified U.Codebase.Sqlite.Patch.TypeEdit as TypeEdit import U.Codebase.Sqlite.Symbol +import qualified U.Codebase.Sqlite.Term.Format as TermFormat import qualified U.Codebase.Term as Term import qualified U.Codebase.Type as Type import qualified U.Core.ABT as ABT import U.Util.Serialization import Prelude hiding (getChar, putChar) -import qualified U.Codebase.Decl as Decl -import qualified U.Codebase.Sqlite.Branch.Full as BranchFull -import qualified U.Codebase.Sqlite.Branch.Diff as BranchDiff -import qualified U.Codebase.Sqlite.Patch.Diff as PatchDiff -import qualified U.Codebase.Sqlite.Branch.MetadataSet as MetadataSet -import qualified U.Codebase.Sqlite.Patch.TermEdit as TermEdit -import qualified U.Codebase.Sqlite.Patch.TypeEdit as TypeEdit putABT :: (MonadPut m, Foldable f, Functor f, Ord v) => @@ -101,11 +103,14 @@ put/get/write/read - [x][x][ ][ ] types of terms - [x][x][ ][ ] decl component - [-][-][ ][ ] causal +- [x][x][ ][ ] BranchFormat - [x][ ][ ][ ] full branch - [x][ ][ ][ ] diff branch -- [ ][ ][ ][ ] full patch +- [x][ ][ ][ ] PatchFormat +- [x][ ][ ][ ] full patch - [x][ ][ ][ ] diff patch - [ ] O(1) framed array access? +- [ ] tests for framed array access - [ ] add to dependents index - [ ] add to type index @@ -311,7 +316,7 @@ getTermElement = getABT getSymbol getUnit getF tag -> unknownTag "SeqOp" tag lookupTermElement :: MonadGet m => Reference.ComponentIndex -> m (LocalIds, TermFormat.Term) -lookupTermElement = +lookupTermElement = unsafeFramedArrayLookup (getPair getLocalIds getTermElement) . fromIntegral getType :: MonadGet m => m r -> m (Type.TypeR r Symbol) @@ -337,12 +342,12 @@ putDeclFormat :: MonadPut m => DeclFormat.DeclFormat -> m () putDeclFormat = \case DeclFormat.Decl c -> putWord8 0 *> putDeclComponent c where - -- |These use a framed array for randomer access + -- These use a framed array for randomer access putDeclComponent :: MonadPut m => DeclFormat.LocallyIndexedComponent -> m () putDeclComponent (DeclFormat.LocallyIndexedComponent v) = putFramedArray (putPair putLocalIds putDeclElement) v where - putDeclElement DeclFormat.DataDeclaration{..} = do + putDeclElement DeclFormat.DataDeclaration {..} = do putDeclType declType putModifier modifier putFoldable putSymbol bound @@ -358,29 +363,30 @@ getDeclFormat = getWord8 >>= \case other -> unknownTag "DeclFormat" other where getDeclComponent :: MonadGet m => m DeclFormat.LocallyIndexedComponent - getDeclComponent = - DeclFormat.LocallyIndexedComponent <$> - getFramedArray (getPair getLocalIds getDeclElement) + getDeclComponent = + DeclFormat.LocallyIndexedComponent + <$> getFramedArray (getPair getLocalIds getDeclElement) getDeclElement :: MonadGet m => m (DeclFormat.Decl Symbol) -getDeclElement = DeclFormat.DataDeclaration - <$> getDeclType - <*> getModifier - <*> getList getSymbol - <*> getList (getType getRecursiveReference) +getDeclElement = + DeclFormat.DataDeclaration + <$> getDeclType + <*> getModifier + <*> getList getSymbol + <*> getList (getType getRecursiveReference) where - getDeclType = getWord8 >>= \case - 0 -> pure Decl.Data - 1 -> pure Decl.Effect - other -> unknownTag "DeclType" other - getModifier = getWord8 >>= \case - 0 -> pure Decl.Structural - 1 -> Decl.Unique <$> getText - other -> unknownTag "DeclModifier" other - -lookupDeclElement :: + getDeclType = getWord8 >>= \case + 0 -> pure Decl.Data + 1 -> pure Decl.Effect + other -> unknownTag "DeclType" other + getModifier = getWord8 >>= \case + 0 -> pure Decl.Structural + 1 -> Decl.Unique <$> getText + other -> unknownTag "DeclModifier" other + +lookupDeclElement :: MonadGet m => Reference.ComponentIndex -> m (LocalIds, DeclFormat.Decl Symbol) -lookupDeclElement = +lookupDeclElement = unsafeFramedArrayLookup (getPair getLocalIds getDeclElement) . fromIntegral putBranchFormat :: MonadPut m => BranchFormat.BranchFormat -> m () @@ -409,18 +415,34 @@ putBranchFormat = \case putFoldable put removes putPatchOp BranchDiff.PatchRemove = putWord8 0 putPatchOp BranchDiff.PatchAdd = putWord8 1 - putPatchOp (BranchDiff.PatchEdit (PatchDiff.PatchDiff r atm atp rtm rtp)) = do - putWord8 2 - putVarInt r - putMap putReferent' putTermEdit atm - putMap putReference putTypeEdit atp - putFoldable putReferent' rtm - putFoldable putReference rtp + putPatchOp (BranchDiff.PatchEdit d) = putWord8 2 *> putPatchDiff d + +putPatchFormat :: MonadPut m => PatchFormat.PatchFormat -> m () +putPatchFormat = \case + PatchFormat.Full p -> putWord8 0 *> putPatchFull p + PatchFormat.Diff p -> putWord8 1 *> putPatchDiff p + +putPatchFull :: MonadPut m => PatchFull.Patch -> m () +putPatchFull (PatchFull.Patch termEdits typeEdits) = do + putMap putReferent' putTermEdit termEdits + putMap putReference putTypeEdit typeEdits + where + putReferent' = putReferent putReference putReference + +putPatchDiff :: MonadPut m => PatchDiff.PatchDiff -> m () +putPatchDiff (PatchDiff.PatchDiff r atm atp rtm rtp) = do + putVarInt r + putMap putReferent' putTermEdit atm + putMap putReference putTypeEdit atp + putFoldable putReferent' rtm + putFoldable putReference rtp + where + putReferent' = putReferent putReference putReference putTermEdit :: MonadPut m => TermEdit.TermEdit -> m () putTermEdit TermEdit.Deprecate = putWord8 0 putTermEdit (TermEdit.Replace r t) = putWord8 1 *> putReferent' r *> putTyping t - where + where putTyping TermEdit.Same = putWord8 0 putTyping TermEdit.Subtype = putWord8 1 putTyping TermEdit.Different = putWord8 2 From 1c77516161635351aa834d6c4610a4e895866728 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 21 Sep 2020 19:32:59 -0400 Subject: [PATCH 008/225] query primitives and corrected indexes for numerous sql tables --- .../U/Codebase/Sqlite/ObjectType.hs | 21 ++ .../U/Codebase/Sqlite/Queries.hs | 237 +++++++++++++++++- .../codebase-sqlite/sql/create-index.sql | 78 ++---- codebase2/codebase-sqlite/sql/create.sql | 34 +-- .../unison-codebase-sqlite.cabal | 2 + codebase2/util/U/Util/Base32Hex.hs | 6 +- 6 files changed, 292 insertions(+), 86 deletions(-) create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs new file mode 100644 index 0000000000..ba8f730a4c --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs @@ -0,0 +1,21 @@ +module U.Codebase.Sqlite.ObjectType where + +import Database.SQLite.Simple.FromField (FromField(..)) +import Database.SQLite.Simple.ToField (ToField(..)) +import Database.SQLite.Simple (SQLData(SQLInteger)) + +-- |Don't reorder these, they are part of the database +data ObjectType + = TermComponent -- 0 + | TermComponentTypes -- 1 + | DeclComponent -- 2 + | Namespace -- 3 + | Patch -- 4 + -- -- | LocalIds -- 5 -- future? + deriving (Eq, Ord, Show, Enum) + +instance ToField ObjectType where + toField = SQLInteger . fromIntegral . fromEnum + +instance FromField ObjectType where + fromField = fmap toEnum . fromField \ No newline at end of file diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index f024bbd94f..236d338ba7 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -1,9 +1,234 @@ +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE FlexibleInstances #-} +{-# OPTIONS_GHC -Wno-orphans #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE QuasiQuotes #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE TemplateHaskell #-} + module U.Codebase.Sqlite.Queries where --- import Data.String.Here.Uninterpolated (here) --- import qualified Database.SQLite.Simple as SQLite --- import Database.SQLite.Simple (FromRow, Connection, Only(..), ToRow(..), SQLData(SQLNull,SQLText)) --- import Database.SQLite.Simple.FromField --- import Database.SQLite.Simple.ToField --- import Data.Maybe (fromJust) +import Control.Monad.IO.Class (MonadIO) +import Control.Monad.Reader (MonadReader (ask)) +import Data.ByteString (ByteString) +import Control.Monad.Trans (MonadIO (liftIO)) +import Data.Maybe (fromJust) +import Data.String.Here.Uninterpolated (here) +import Data.Text (Text) +import qualified Database.SQLite.Simple as SQLite +import Database.SQLite.Simple ((:.) (..), Connection, FromRow, Only (..), SQLData (SQLNull), ToRow (..)) +import Data.Word (Word64) +import Database.SQLite.Simple.FromField +import Database.SQLite.Simple.ToField +import U.Codebase.Reference (Reference' (ReferenceBuiltin, ReferenceDerived)) +import qualified U.Codebase.Reference as Reference +import qualified U.Codebase.Referent as Referent +import U.Codebase.Sqlite.ObjectType +import U.Util.Base32Hex (Base32Hex (..)) +import U.Util.Hashable (Hashable) + +-- * types +type DB m = (MonadIO m, MonadReader Connection m) + +newtype HashId = HashId Word64 deriving (Eq, Ord) deriving (Hashable, FromField, ToField) via Word64 +newtype TextId = TextId Word64 deriving (Eq, Ord) deriving (Hashable, FromField, ToField) via Word64 + +newtype ObjectId = ObjectId Word64 deriving (Eq, Ord, Hashable, FromField, ToField) via Word64 +newtype TypeId = TypeId ObjectId deriving (FromField, ToField) via ObjectId +newtype TermId = TermCycleId ObjectId deriving (FromField, ToField) via ObjectId +newtype DeclId = DeclCycleId ObjectId deriving (FromField, ToField) via ObjectId +newtype CausalHashId = CausalHashId HashId deriving (Hashable, FromField, ToField) via HashId +newtype CausalOldHashId = CausalOldHashId HashId deriving (Hashable, FromField, ToField) via HashId +newtype NamespaceHashId = NamespaceHashId ObjectId deriving (Hashable, FromField, ToField) via ObjectId + +type DerivedReferent = Referent.Id' ObjectId ObjectId +type DerivedReference = Reference.Id' ObjectId +-- * main squeeze + +saveHash :: DB m => Base32Hex -> m HashId +saveHash base32 = execute sql (Only base32) >> queryOne (loadHash base32) + where sql = [here| INSERT OR IGNORE INTO hash (base32) VALUES (?) |] + +loadHash :: DB m => Base32Hex -> m (Maybe HashId) +loadHash base32 = queryOnly sql (Only base32) + where sql = [here| SELECT id FROM hash WHERE base32 = ? |] + +loadHashById :: DB m => HashId -> m (Maybe Base32Hex) +loadHashById h = queryOnly sql (Only h) + where sql = [here| SELECT base32 FROM hash WHERE id = ? |] + +saveText :: DB m => Text -> m TextId +saveText t = execute sql (Only t) >> queryOne (loadText t) + where sql = [here| INSERT OR IGNORE INTO text (text) VALUES (?) |] + +loadText :: DB m => Text -> m (Maybe TextId) +loadText t = queryOnly sql (Only t) + where sql = [here| SELECT id FROM text WHERE text = ? |] + +loadTextById :: DB m => TextId -> m (Maybe Text) +loadTextById h = queryOnly sql (Only h) + where sql = [here| SELECT text FROM text WHERE id = ? |] + +saveHashObject :: DB m => HashId -> ObjectId -> Int -> m () +saveHashObject hId oId version = execute sql (hId, oId, version) where + sql = [here| + INSERT OR IGNORE INTO hash_object (hash_id, object_id, version) + VALUES (?, ?, ?) + |] + +saveObject :: DB m => HashId -> ObjectType -> ByteString -> m ObjectId +saveObject h t blob = + execute sql (h, t, blob) >> queryOne (objectByPrimaryHashId h) + where + sql = [here| + INSERT OR IGNORE INTO object (primary_hash_id, type_id, bytes) + VALUES (?, ?, ?) + |] + +loadObjectById :: DB m => ObjectId -> m (Maybe ByteString) +loadObjectById oId = queryOnly sql (Only oId) where sql = [here| + SELECT bytes FROM object WHERE id = ? +|] + +objectByPrimaryHashId :: DB m => HashId -> m (Maybe ObjectId) +objectByPrimaryHashId h = queryOnly sql (Only h) where sql = [here| + SELECT id FROM object WHERE primary_hash_id = ? +|] + +updateObjectBlob :: DB m => ObjectId -> ByteString -> m () +updateObjectBlob oId bs = execute sql (oId, bs) where sql = [here| + UPDATE object SET bytes = ? WHERE id = ? +|] + +-- |Maybe we would generalize this to something other than NamespaceHash if we +-- end up wanting to store other kinds of Causals here too. +saveCausal :: DB m => CausalHashId -> NamespaceHashId -> m () +saveCausal self value = execute sql (self, value) where sql = [here| + INSERT OR IGNORE INTO causal (self_hash_id, value_hash_id) VALUES (?, ?) +|] + +loadCausalValueHash :: DB m => CausalHashId -> m (Maybe NamespaceHashId) +loadCausalValueHash hash = queryOnly sql (Only hash) where sql = [here| + SELECT value_hash_id FROM causal WHERE self_hash_id = ? +|] + +saveCausalOld :: DB m => HashId -> CausalHashId -> m () +saveCausalOld v1 v2 = execute sql (v1, v2) where sql = [here| + INSERT OR IGNORE INTO causal_old (old_hash_id, new_hash_id) VALUES (?, ?) +|] + +loadCausalHashIdByCausalOldHash :: DB m => CausalOldHashId -> m (Maybe CausalHashId) +loadCausalHashIdByCausalOldHash id = queryOnly sql (Only id) where sql = [here| + SELECT new_hash_id FROM causal_old where old_hash_id = ? +|] + +loadOldCausalValueHash :: DB m => CausalOldHashId -> m (Maybe NamespaceHashId) +loadOldCausalValueHash id = queryOnly sql (Only id) where sql = [here| + SELECT value_hash_id FROM causal + INNER JOIN causal_old ON self_hash_id = new_hash_id + WHERE old_hash_id = ? +|] + +saveCausalParent :: DB m => CausalHashId -> CausalHashId -> m () +saveCausalParent child parent = execute sql (child, parent) where + sql = [here| + INSERT OR IGNORE INTO causal_parent (causal_id, parent_id) VALUES (?, ?) + |] + +loadCausalParents :: DB m => CausalHashId -> m [CausalHashId] +loadCausalParents h = queryList sql (Only h) where sql = [here| + SELECT parent_id FROM causal_parent WHERE causal_id = ? +|] + +-- * Index-building +addToTypeIndex :: DB m => Reference' TextId HashId -> DerivedReferent -> m () +addToTypeIndex tp tm = execute sql (tp :. tm) where sql = [here| + INSERT OR IGNORE INTO find_type_index ( + type_reference_builtin, + type_reference_hash_id, + type_reference_component_index, + term_referent_object_id, + term_referent_component_index, + term_referent_constructor_index + ) VALUES (?, ?, ?, ?, ?, ?) +|] + +addToTypeMentionsIndex :: DB m => Reference' TextId HashId -> DerivedReferent -> m () +addToTypeMentionsIndex tp tm = execute sql (tp :. tm) where sql = [here| + INSERT OR IGNORE INTO find_type_mentions_index ( + type_reference_builtin, + type_reference_hash_id, + type_reference_component_index, + term_referent_object_id, + term_referent_component_index, + term_referent_constructor_index + ) VALUES (?, ?, ?, ?, ?, ?) +|] + +addToDependentsIndex :: DB m => Reference' TextId ObjectId -> DerivedReference -> m () +addToDependentsIndex dependency dependent = execute sql (dependency :. dependent) + where sql = [here| + INSERT OR IGNORE INTO dependents_index ( + dependency_builtin, + dependency_object_id, + dependency_component_index, + dependent_object_id, + dependent_component_index + ) VALUES (?, ?, ?, ?, ?) + |] + + +-- * helper functions +queryList :: (DB f, ToRow q, FromField b) => SQLite.Query -> q -> f [b] +queryList q r = map fromOnly <$> query q r +queryMaybe :: (DB f, ToRow q, FromRow b) => SQLite.Query -> q -> f (Maybe b) +queryMaybe q r = headMay <$> query q r + +queryOnly :: (DB f, ToRow q, FromField b) => SQLite.Query -> q -> f (Maybe b) +queryOnly q r = fmap fromOnly <$> queryMaybe q r + +queryOne :: Functor f => f (Maybe b) -> f b +queryOne = fmap fromJust + +queryExists :: (DB m, ToRow q) => SQLite.Query -> q -> m Bool +queryExists q r = not . null . map (id @SQLData) <$> queryList q r + +query :: (DB m, ToRow q, SQLite.FromRow r) => SQLite.Query -> q -> m [r] +query q r = do c <- ask; liftIO $ SQLite.query c q r +execute :: (DB m, ToRow q) => SQLite.Query -> q -> m () +execute q r = do c <- ask; liftIO $ SQLite.execute c q r + +headMay :: [a] -> Maybe a +headMay [] = Nothing +headMay (a:_) = Just a + +-- * orphan instances +deriving via Text instance ToField Base32Hex +deriving via Text instance FromField Base32Hex + +instance ToRow (Reference' TextId HashId) where + -- | builtinId, hashId, componentIndex + toRow = \case + ReferenceBuiltin t -> toRow (Only t) ++ [SQLNull, SQLNull] + ReferenceDerived (Reference.Id h i) -> SQLNull : toRow (Only h) ++ toRow (Only i) + +instance ToRow (Reference' TextId ObjectId) where + -- | builtinId, hashId, componentIndex + toRow = \case + ReferenceBuiltin t -> toRow (Only t) ++ [SQLNull, SQLNull] + ReferenceDerived (Reference.Id h i) -> SQLNull : toRow (Only h) ++ toRow (Only i) + +instance ToRow (Reference.Id' ObjectId) where + -- | builtinId, hashId, componentIndex + toRow = \case + Reference.Id h i -> toRow (Only h) ++ toRow (Only i) +instance ToRow DerivedReferent where + -- | objectId, componentIndex, constructorIndex + toRow = \case + Referent.RefId (Reference.Id h i) -> toRow (Only h) ++ toRow (Only i) ++ [SQLNull] + Referent.ConId (Reference.Id h i) cid -> toRow (Only h) ++ toRow (Only i) ++ toRow (Only cid) diff --git a/codebase2/codebase-sqlite/sql/create-index.sql b/codebase2/codebase-sqlite/sql/create-index.sql index db2cc5a779..285732429c 100644 --- a/codebase2/codebase-sqlite/sql/create-index.sql +++ b/codebase2/codebase-sqlite/sql/create-index.sql @@ -1,64 +1,13 @@ ---CREATE TABLE reference_derived ( --- id INTEGER NOT NULL PRIMARY KEY, --- hash_id INTEGER NOT NULL REFERENCES hash(id), --- component_index INTEGER NOT NULL, --- UNIQUE (hash_id, component_index) ---); ---CREATE INDEX reference_derived_hash_id ON reference_derived(hash_id); --- ---CREATE TABLE reference ( --- id INTEGER NOT NULL PRIMARY KEY, --- builtin TEXT, -- a builtin name, or null --- reference_derived_id INTEGER REFERENCES reference_derived(id), --- UNIQUE(builtin, reference_derived_id), --- -- exactly one should be null --- CHECK (builtin IS NULL <> reference_derived_id IS NULL) ---); --- ----- `Referent' ReferenceDerivedId` but without `ConstructorType`, ----- which is linked to the object. ---CREATE TABLE referent_derived ( --- id INTEGER NOT NULL PRIMARY KEY, --- reference_derived_id INTEGER NOT NULL REFERENCES reference_derived(id), --- constructor_id INTEGER, --- UNIQUE(reference_derived_id, constructor_id) ---); --- ----- just using rowid since we don't need joins ----- index terms by types ---CREATE TABLE find_type_index ( --- type_reference_id INTEGER NOT NULL REFERENCES reference(id), --- referent_derived_id INTEGER NOT NULL REFERENCES referent_derived(id) ---); ---CREATE INDEX find_type_index_reference ON find_type_index(type_reference_id); ---CREATE INDEX find_type_index_referent ON find_type_index(referent_derived_id); --- ---CREATE TABLE find_type_mentions_index ( --- type_reference_id INTEGER NOT NULL REFERENCES reference(id), --- referent_id INTEGER NOT NULL REFERENCES referent_derived(id) ---); ---CREATE INDEX find_type_mentions_index_reference ON find_type_mentions_index(type_reference_id); ---CREATE INDEX find_type_mentions_index_referent ON find_type_mentions_index(referent_id); --- ---CREATE TABLE dependents_index ( --- dependency_id INTEGER NOT NULL REFERENCES reference(id), --- dependent_id INTEGER NOT NULL REFERENCES reference_derived(id) ---); ---CREATE INDEX dependents_index_dependency ON dependents_index(dependency_id); ---CREATE INDEX dependents_index_dependent ON dependents_index(dependent_id); - +-- find type index uses hash-based references instead of component-based +-- references, because they may be arbitrary types, not just the head +-- types that are stored in the codebase. CREATE TABLE find_type_index ( - type_reference_builtin TEXT NULL, + type_reference_builtin INTEGER NULL REFERENCES text(id), type_reference_hash_id INTEGER NULL REFERENCES hash(id), type_reference_component_index INTEGER NULL, term_referent_object_id INTEGER NOT NULL REFERENCES hash(id), term_referent_component_index INTEGER NOT NULL, term_referent_constructor_index INTEGER NULL, - PRIMARY KEY( - type_reference_builtin, - type_reference_derived_hash_id, - type_reference_derived_component_index - ), UNIQUE ( term_referent_derived_object_id, term_referent_derived_component_index, @@ -73,19 +22,19 @@ CREATE TABLE find_type_index ( type_reference_derived_component_index IS NULL ) ); +CREATE INDEX find_type_index_type ON find_type_index ( + type_reference_builtin, + type_reference_derived_hash_id, + type_reference_derived_component_index +); CREATE TABLE find_type_mentions_index ( - type_reference_builtin TEXT NULL, + type_reference_builtin INTEGER NULL REFERENCES text(id), type_reference_hash_id INTEGER NULL REFERENCES hash(id), type_reference_component_index INTEGER NULL, term_referent_object_id INTEGER NOT NULL REFERENCES hash(id), term_referent_derived_component_index INTEGER NOT NULL, term_referent_constructor_index INTEGER NULL, - PRIMARY KEY( - type_reference_builtin, - type_reference_derived_hash_id, - type_reference_derived_component_index - ), CHECK ( type_reference_builtin IS NULL = type_reference_derived_hash_id IS NOT NULL @@ -95,9 +44,14 @@ CREATE TABLE find_type_mentions_index ( type_reference_derived_component_index IS NULL ) ); +CREATE INDEX find_type_mentions_index_type ON find_type_mentions_index ( + type_reference_builtin INTEGER NULL REFERENCES text(id), + type_reference_hash_id INTEGER NULL REFERENCES hash(id), + type_reference_component_index INTEGER NULL +); CREATE TABLE dependents_index ( - dependency_builtin TEXT NULL, + dependency_builtin INTEGER NULL REFERENCES text(id), dependency_object_id INTEGER NULL REFERENCES hash(id), dependency_component_index INTEGER NULL dependent_object_id INTEGER NOT NULL REFERENCES hash(id), diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 48ecca5a52..5ba8e0004f 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -7,6 +7,10 @@ CREATE TABLE hash ( ); CREATE INDEX hash_base32 ON hash(base32); +CREATE TABLE text ( + id INTEGER PRIMARY KEY, + text TEXT UNIQUE NOT NULL +); -- just came up with this, a layer of indirection to allow multiple hash_ids to -- reference the same object. -- so: SELECT object.id, bytes FROM object @@ -33,8 +37,8 @@ INSERT INTO object_type_description (id, description) VALUES (1, "Types of Term Component"), -- [Nat -> Nat] (2, "Decl Component"), -- unique type Animal = Cat | Dog | Mouse (3, "Namespace"), -- a one-level slice - (4, "Patch"), -- replace term #abc with term #def - (5, "Local Text/Object Lookup") + (4, "Patch") -- replace term #abc with term #def + -- (5, "Local Text/Object Lookup") -- future ; -- How should objects be linked to hashes? (and old hashes) @@ -88,17 +92,17 @@ CREATE TABLE causal_old ( new_hash_id INTEGER NOT NULL REFERENCES hash(id) ); --- |Links a referent to its type's object -CREATE TABLE type_of_referent ( - object_id INTEGER NOT NULL REFERENCES object(id), - component_index INTEGER NOT NULL, - constructor_index INTEGER NULL, - bytes BLOB NOT NULL, - PRIMARY KEY (object_id, component_index, constructor_index) -); +-- -- |Links a referent to its type's object +-- CREATE TABLE type_of_referent ( +-- object_id INTEGER NOT NULL REFERENCES object(id), +-- component_index INTEGER NOT NULL, +-- constructor_index INTEGER NULL, +-- bytes BLOB NOT NULL, +-- PRIMARY KEY (object_id, component_index, constructor_index) +-- ); ---CREATE TABLE type_of_referent ( --- referent_derived_id INTEGER NOT NULL PRIMARY KEY REFERENCES referent_derived(id), --- type_object_id INTEGER NOT NULL REFERENCES object(id) ---); ---CREATE INDEX type_of_referent_object_id ON type_of_referent(type_object_id); +-- --CREATE TABLE type_of_referent ( +-- -- referent_derived_id INTEGER NOT NULL PRIMARY KEY REFERENCES referent_derived(id), +-- -- type_object_id INTEGER NOT NULL REFERENCES object(id) +-- --); +-- --CREATE INDEX type_of_referent_object_id ON type_of_referent(type_object_id); diff --git a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal index ff695d8923..adcac26082 100644 --- a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal +++ b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal @@ -23,11 +23,13 @@ library U.Codebase.Sqlite.Causal U.Codebase.Sqlite.DbId U.Codebase.Sqlite.LocalIds + U.Codebase.Sqlite.ObjectType U.Codebase.Sqlite.Patch.Format U.Codebase.Sqlite.Patch.Full U.Codebase.Sqlite.Patch.Diff U.Codebase.Sqlite.Patch.TermEdit U.Codebase.Sqlite.Patch.TypeEdit + U.Codebase.Sqlite.Queries U.Codebase.Sqlite.Reference U.Codebase.Sqlite.Referent U.Codebase.Sqlite.Serialization diff --git a/codebase2/util/U/Util/Base32Hex.hs b/codebase2/util/U/Util/Base32Hex.hs index 3e1110a996..f5c091bf4b 100644 --- a/codebase2/util/U/Util/Base32Hex.hs +++ b/codebase2/util/U/Util/Base32Hex.hs @@ -3,7 +3,7 @@ module U.Util.Base32Hex - (Base32Hex, fromByteString, toByteString, toText, textToByteString) + (Base32Hex(UnsafeBase32Hex), fromByteString, toByteString, toText, textToByteString) where import Data.Text (Text) @@ -13,7 +13,7 @@ import qualified Data.Text as Text import Data.Text.Encoding (encodeUtf8, decodeUtf8) import Data.Maybe (fromJust) -newtype Base32Hex = Base32Hex { toText :: Text } +newtype Base32Hex = UnsafeBase32Hex { toText :: Text } deriving (Eq, Ord, Show) -- | Return the lowercase unpadded base32Hex encoding of this 'ByteString'. @@ -22,7 +22,7 @@ fromByteString :: ByteString -> Base32Hex fromByteString bs = -- we're using an uppercase encoder that adds padding, so we drop the -- padding and convert it to lowercase - Base32Hex . Text.toLower . Text.dropWhileEnd (== '=') . decodeUtf8 $ + UnsafeBase32Hex . Text.toLower . Text.dropWhileEnd (== '=') . decodeUtf8 $ Codec.Binary.Base32Hex.encode bs -- by not exporting the Base32Hex constructor, we can trust that it's valid From 51772f6d5ba825f055b79ade5add48b2063bdf8e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 25 Sep 2020 10:55:14 -0400 Subject: [PATCH 009/225] create syncv1v2 package --- codebase-convert-1to2/app/Main.hs | 2 + .../lib/U/Codebase/Convert/SyncV1V2.hs | 933 ++++++++++++++++++ .../unison-codebase-convert-1to2.cabal | 60 ++ .../codebase/Unison/Codebase/V1/Branch/Raw.hs | 4 +- hie.yaml | 6 + stack.yaml | 1 + 6 files changed, 1004 insertions(+), 2 deletions(-) create mode 100644 codebase-convert-1to2/app/Main.hs create mode 100644 codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs create mode 100644 codebase-convert-1to2/unison-codebase-convert-1to2.cabal diff --git a/codebase-convert-1to2/app/Main.hs b/codebase-convert-1to2/app/Main.hs new file mode 100644 index 0000000000..a1e202107f --- /dev/null +++ b/codebase-convert-1to2/app/Main.hs @@ -0,0 +1,2 @@ +main :: IO () +main = putStrLn "Hello, World!" \ No newline at end of file diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs new file mode 100644 index 0000000000..be386f8a90 --- /dev/null +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -0,0 +1,933 @@ +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE DeriveFunctor #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE DerivingStrategies #-} +module U.Codebase.Convert.SyncV1V2 where + +import qualified U.Util.Hashable as H +import Database.SQLite.Simple.FromField (FromField) +import U.Util.Hash (Hash) +import qualified Unison.Codebase.V1.Branch.Raw as V1 +import Data.Text (Text) +import Database.SQLite.Simple.ToField (ToField) +import qualified Unison.Codebase.V1.Reference as V1.Reference + +newtype V1 a = V1 {runV1 :: a} deriving (Eq, Ord, Show) + +newtype V2 a = V2 {runV2 :: a} + deriving (Eq, Ord, Show, Functor) + deriving (FromField, H.Accumulate, H.Hashable) via a + +data V1EntityRef + = Decl1 (V1 Hash) -- this will refer to the whole component + | Term1 (V1 Hash) -- ditto + | Patch1 (V1 V1.EditHash) + | Branch1 (V1 V1.BranchHash) + deriving (Eq, Ord, Show) + +v1EntityRefToHash :: V1EntityRef -> V1 Hash +v1EntityRefToHash = \case + Decl1 h -> h + Term1 h -> h + Patch1 (V1 (V1.EditHash h)) -> V1 h + Branch1 (V1 (V1.BranchHash h)) -> V1 h + +newtype Base32HexPrefix = Base32HexPrefix Text + deriving (Show) via Text + deriving (ToField) via Text + deriving (FromField) via Text + +-- newtype PatchHash h = PatchHash h +-- newtype NamespaceHash h = NamespaceHash h +newtype CausalHash h = CausalHash h + +-- -- |things that appear in a deserialized RawBranch +-- type V2EntityRef = +-- V2EntityRefH +-- Hash +-- (PatchHash Hash) +-- (NamespaceHash Hash) +-- (CausalHash Hash) + +-- -- -- |things that appear in a serialized RawBranch +-- -- type V2EntityRefS = +-- -- V2EntityRefH +-- -- Db.ObjectId +-- -- (PatchHash Db.ObjectId) +-- -- (NamespaceHash Db.NamespaceHashId) +-- -- (CausalHash Db.CausalHashId) + +-- -- data V2EntityRefH hr hp hn hc +-- -- = Decl2 V2.ReferenceId +-- -- | Term2 Reference.Id +-- -- | Patch2 PatchHash +-- -- | NamespaceHash2 NamespaceHash +-- -- | CausalHash2 CausalHash + +-- initializeV2DB :: MonadIO m => m () +-- initializeV2DB = error "todo" + +data FatalError + = NoRootBranch + | MissingBranch (V1 Hash) + | MissingPatch (V1 Hash) + | MissingTerm V1.Reference.Id + | MissingTermHash (V1 Hash) + | MissingTypeOfTerm V1.Reference.Id + | MissingDecl V1.Reference.Id + | MissingDeclHash (V1 Hash) + | InvalidBranch (V1 Hash) + | InvalidPatch (V1 Hash) + | InvalidTerm V1.Reference.Id + | InvalidTypeOfTerm V1.Reference.Id + | InvalidDecl V1.Reference.Id + +-- type Type = Type.Type Symbol Ann + +-- type Term = Term.Term Symbol Ann + +-- type Decl = DD.Decl Symbol Ann + +-- type Patch = Patch.Patch V1.Reference + +-- -- the H stands for "for hashing" +-- -- the S stands for "for serialization" +-- type Term2ComponentH = [Term2 Hash] + +-- type Term2ComponentS = [Term2 Db.ObjectId] + +-- type Decl2ComponentH = [Decl2 (Maybe Hash)] + +-- type Decl2S = Decl2 Db.ObjectId +-- type Decl2ComponentS = [Decl2S] + + +-- -- these have maybes in them to indicate a self-component reference +-- type Term2 h = V2.Term h + +-- type Decl2 h = DD.DeclR (V2.Reference h) Symbol () + +-- -- for indexing +-- type Decl2I = DD.DeclR (V2.Reference Db.ObjectId) Symbol () + +-- type Term2S = Term2 Db.ObjectId + +-- type Type2S = V2.Type Db.ObjectId + +-- -- what about referent types in the index? + +-- -- type CtorType2S = Type.TypeH Db.ObjectId Symbol Ann +-- -- type Term2S = Term.TermH (Maybe Db.ObjectId) Symbol Ann +-- type Patch2S = Patch.Patch (V2.Reference Db.ObjectId) + +-- --type Term2S = ABT.Term (Term.F' (Maybe TermId) DeclId (Type.TypeH DeclId Symbol ()) Void ()) Symbol () +-- --alternative representation if embedded +-- --type Term2S = ABT.Term (Term.F' (Maybe TermId) DeclId TypeId Void ()) Symbol () + +-- fmtV :: S.Format Symbol +-- fmtV = S.V1.formatSymbol + +-- getV :: S.Get Symbol +-- getV = S.get S.V1.formatSymbol + +-- putV :: S.Put Symbol +-- putV = S.put fmtV + +-- fmtA :: S.Format Ann +-- fmtA = V1.formatAnn + +-- getA :: S.Get Ann +-- getA = S.get fmtA + +-- putA :: S.Put Ann +-- putA = S.put fmtA + +-- -- todo: this just converts a whole codebase, which we need to do locally \ +-- -- but we also want some code that imports just a particular sub-branch. +-- syncV1V2 :: forall m. MonadIO m => Connection -> CodebasePath -> m (Either FatalError ()) +-- syncV1V2 c rootDir = liftIO $ SQLite.withTransaction c . runExceptT . flip runReaderT c $ do +-- v1RootHash <- getV1RootBranchHash rootDir >>= maybe (throwError NoRootBranch) pure +-- -- starting from the root namespace, convert all entities you can find +-- convertEntities [Branch1 v1RootHash] +-- v2RootHash <- v2CausalHashForV1BranchHash v1RootHash +-- setV2Root v2RootHash +-- error "todo: compressEntities and vacuum db" v2RootHash + +-- -- Incorporating diff construction into the conversion is tough because +-- -- a) I was thinking we'd represent an older version as a diff against the +-- -- newer version, but the newer version hasn't been fully constructed +-- -- until the older versions have been converted and hashed. +-- -- b) If we just store all the old versions uncompressed, it might be too big. +-- -- (But no bigger than the v1 db.) But if that is okay, we can compress and +-- -- vacuum them afterwards. + +-- pure () +-- where +-- setV2Root = error "todo: setV2Root" +-- v2CausalHashForV1BranchHash = error "todo: v2CausalHashForV1BranchHash" +-- convertEntities :: +-- forall m. +-- DB m => +-- MonadError FatalError m => +-- [V1EntityRef] -> +-- m () +-- convertEntities [] = pure () +-- convertEntities all@(h : rest) = do +-- termDirComponents <- componentMapForDir (V1.termsDir rootDir) +-- declsDirComponents <- componentMapForDir (V1.typesDir rootDir) +-- case h of +-- Term1 h -> +-- -- if this hash is already associated to an object +-- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do +-- -- load a cycle from disk +-- e <- loadTerm1 rootDir termDirComponents h +-- matchTerm1Dependencies h e >>= \case +-- Left missing -> convertEntities (missing ++ all) +-- Right lookup -> do +-- convertTerm1 lookup h e +-- convertEntities rest +-- Decl1 h -> +-- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do +-- d <- loadDecl1 rootDir declsDirComponents h +-- matchDecl1Dependencies h d >>= \case +-- Left missing -> convertEntities (missing ++ all) +-- Right lookup -> do +-- convertDecl1 (error "todo: lookup") h d +-- convertEntities rest +-- Patch1 h -> +-- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do +-- p <- loadPatch1 rootDir h +-- matchPatch1Dependencies ("patch " ++ show h) p >>= \case +-- Left missing -> convertEntities (missing ++ all) +-- Right lookup -> do +-- -- hashId <- Db.saveHashByteString (runV1 h) +-- -- savePatch hashId (Patch.hmap (lookup . V1) p) +-- error "todo" +-- convertEntities rest +-- Branch1 (V1.unRawHash -> h) -> +-- ifM (existsObjectWithHash h) (convertEntities rest) $ do +-- cb <- loadCausalBranch1 rootDir (V1 h) +-- matchCausalBranch1Dependencies ("branch " ++ show h) cb >>= \case +-- Left missing -> convertEntities (missing ++ all) +-- Right (lookupObject, lookupCausal) -> do +-- convertCausalBranch1 lookupObject lookupCausal cb +-- convertEntities rest + +-- -- | load a causal branch raw thingo +-- loadCausalBranch1 :: +-- MonadIO m => +-- MonadError FatalError m => +-- CodebasePath -> +-- V1 Hash -> +-- m (V1.Causal.Raw V1.Branch.Raw V1.Branch.Raw) +-- loadCausalBranch1 rootDir h = do +-- let file = V1.branchPath rootDir (V1.RawHash (runV1 h)) +-- ifM +-- (doesFileExist file) +-- ( S.getFromFile' (S.V1.getCausal0 S.V1.getRawBranch) file >>= \case +-- Left err -> throwError $ InvalidBranch h +-- Right c0 -> pure c0 +-- ) +-- (throwError $ MissingBranch h) + +-- primaryHashByHash1 :: DB m => V2.ObjectType -> Hash -> m (Maybe Hash) +-- primaryHashByHash1 t h = +-- Db.query sql (t, Base32Hex.fromHash h) <&> \case +-- [Only h] -> Just (Base32Hex.toHash h) +-- [] -> Nothing +-- hs -> +-- error $ +-- "hash2ForHash1 " ++ show t ++ " " +-- ++ take 10 (show h) +-- ++ " = " +-- ++ (show . map (take 10 . show)) hs +-- where +-- sql = +-- [here| +-- SELECT v2hash.base32 +-- FROM hash AS v2hash +-- INNER JOIN object ON object.primary_hash_id = v2hash.id +-- INNER JOIN hash_object ON object.id = hash_object.object_id +-- INNER JOIN hash AS v1hash ON hash_object.hash_id = v1hash.id +-- WHERE object.type_id = ? AND v1hash.base32 = ? +-- |] + +-- loadBranch1 :: +-- forall m. +-- MonadIO m => +-- MonadError FatalError m => +-- m V1.Branch.Raw +-- loadBranch1 = error "todo: loadBranch1" + +-- -- ifM (not <$> doesFileExist (V1.branchPath root h)) +-- -- (throwError $ MissingBranch h) +-- -- (do +-- -- branch1 <- loadBranch1 +-- -- ) + +-- newtype MdValuesR r = MdValues (Set r) + +-- deriving via +-- (Set r) +-- instance +-- Hashable r => Hashable (MdValuesR r) + +-- -- this is the version we'll hash +-- type RawBranch = +-- RawBranchH +-- (V2.Referent Hash Hash) -- terms +-- (V2.Reference Hash) -- types +-- (V2.Reference Hash) -- metadata +-- (V2 Hash) -- patches +-- (V2 Hash) -- children + +-- -- this is the version that closely corresponds to the db schema +-- type RawBranch2S = +-- RawBranchH +-- (V2.Referent Db.ObjectId Db.ObjectId) -- terms +-- (V2.Reference Db.ObjectId) -- types +-- (V2.Reference Db.ObjectId) -- metadata +-- Db.ObjectId -- patches +-- Db.CausalHashId -- children + +-- data RawBranchH termRef typeRef mdRef pRef cRef = RawBranch +-- { terms :: Map (NameSegment, termRef) (Set mdRef), +-- types :: Map (NameSegment, typeRef) (Set mdRef), +-- patches :: Map NameSegment pRef, +-- children :: Map NameSegment cRef +-- } + +-- type RawCausal = RawCausalH Db.CausalHashId Db.NamespaceHashId + +-- data RawCausalH hCausal hValue = RawCausal +-- { causalHash :: hCausal, +-- valueHash :: hValue, +-- parents :: [hCausal] +-- } + +-- instance Hashable RawBranch where +-- tokens b = +-- [ H.accumulateToken (terms b), +-- H.accumulateToken (types b), +-- H.accumulateToken (patches b), +-- H.accumulateToken (children b) +-- ] + +-- instance Hashable RawCausal where +-- tokens c = +-- [ H.accumulateToken (causalHash c), +-- H.accumulateToken (valueHash c), +-- H.accumulateToken (parents c) +-- ] + +-- getV1RootBranchHash :: MonadIO m => CodebasePath -> m (Maybe V1.Branch.Hash) +-- getV1RootBranchHash root = listDirectory (V1.branchHeadDir root) <&> \case +-- [single] -> Just . V1.Branch.Hash . Hash.unsafeFromBase32Hex $ Text.pack single +-- _ -> Nothing + +-- -- | Look for an ObjectId corresponding to the provided V1 hash. +-- -- Returns Left if not found. +-- lookupObject :: DB m => V1EntityRef -> m (Either V1EntityRef (V1 Hash, (V2 Hash, Db.ObjectId))) +-- lookupObject r@(runV1 . v1EntityRefToHash -> h) = +-- getObjectIdByBase32Hex (Base32Hex.fromHash h) <&> \case +-- Nothing -> Left r +-- Just i -> Right (V1 h, i) + +-- -- | Look for a CausalHashId corresponding to the provided V1 hash. +-- -- Returns Left if not found. +-- lookupCausal :: DB m => V1.Branch.Hash -> m (Either (V1 Hash) (V1 Hash, (V2 Hash, Db.CausalHashId))) +-- lookupCausal (V1.unRawHash -> h) = +-- Db.queryMaybe sql (Only (Base32Hex.fromHash h)) <&> \case +-- Nothing -> Left (V1 h) +-- Just (v2Hash, id) -> Right (V1 h, (Base32Hex.toHash <$> v2Hash, id)) +-- where +-- sql = +-- [here| +-- SELECT new_hash.base32, new_hash_id +-- FROM causal_old +-- INNER JOIN hash old_hash ON old_hash_id = old_hash.id +-- INNER JOIN hash new_hash ON new_hash_id = new_hash.id +-- WHERE old_hash.base32 = ? +-- |] + +-- -- | no Maybes here, as all relevant ObjectId can be known in advance +-- saveTypeBlobForReferent :: DB m => V2.ReferentId Db.ObjectId -> Type2S -> m () +-- saveTypeBlobForReferent r type2s = +-- let blob = S.putBytes (S.V1.putTypeR (V2.putReference V2.putObjectId) putV V2.putUnit) type2s +-- in Db.saveTypeOfReferent r blob + +-- -- | Multiple hashes can map to a single object! +-- getObjectIdByBase32Hex :: DB m => Base32Hex -> m (Maybe (V2 Hash, Db.ObjectId)) +-- getObjectIdByBase32Hex h = +-- fmap (first (V2 . Base32Hex.toHash)) <$> Db.queryMaybe sql (Only h) +-- where +-- sql = +-- [here| +-- SELECT object.id +-- FROM hash +-- INNER JOIN hash_object ON hash_object.hash_id = hash.id +-- INNER JOIN object ON hash_object.object_id = object.id +-- WHERE hash.base32 = ? +-- |] + +-- augmentLookup :: Ord a => (a -> b) -> Map a b -> a -> b +-- augmentLookup f m a = fromMaybe (f a) (Map.lookup a m) + +-- saveReferenceAsReference2 :: DB m => Reference -> m (V2.Reference Db.HashId) +-- saveReferenceAsReference2 = mapMOf Db.referenceTraversal Db.saveHashByteString + +-- -- | load a term component by its hash. +-- -- A v1 term component is split across an arbitrary number of files. +-- -- We have to 1) figure out what all the filenames are (preloaded into +-- -- `termDirComponents`), 2) load them all, +-- loadTerm1 :: +-- MonadIO m => +-- MonadError FatalError m => +-- CodebasePath -> +-- Map (V1 Hash) [Reference.Id] -> +-- V1 Hash -> +-- m [(Term, Type)] +-- loadTerm1 rootDir componentsFromDir h = case Map.lookup h componentsFromDir of +-- Nothing -> throwError $ MissingTermHash h +-- Just set -> case toList set of +-- [] -> error "Just [] shouldn't occur here." +-- Reference.Id h _i n : _etc -> for [0 .. n -1] \i -> do +-- let r = Reference.Id h i n +-- term <- +-- V1.FC.getTerm (S.get fmtV) (S.get fmtA) rootDir r +-- >>= maybe (throwError $ MissingTerm r) pure +-- typeOfTerm <- +-- V1.FC.getTypeOfTerm (S.get fmtV) (S.get fmtA) rootDir r +-- >>= maybe (throwError $ MissingTypeOfTerm r) pure +-- pure (term, typeOfTerm) + +-- loadDecl1 :: +-- MonadIO m => +-- MonadError FatalError m => +-- CodebasePath -> +-- Map (V1 Hash) [Reference.Id] -> +-- V1 Hash -> +-- m [Decl] +-- loadDecl1 rootDir componentsFromDir h = case Map.lookup h componentsFromDir of +-- Nothing -> throwError $ MissingDeclHash h +-- Just set -> case toList set of +-- [] -> error "Just [] shouldn't occur here." +-- Reference.Id h _i n : _etc -> for [0 .. n -1] \i -> do +-- let r = Reference.Id h i n +-- V1.FC.getDecl (S.get fmtV) (S.get fmtA) rootDir r +-- >>= maybe (throwError $ MissingDecl r) pure + +-- -- | load a patch +-- loadPatch1 :: (MonadIO m, MonadError FatalError m) => [Char] -> V1 Hash -> m (Patch.Patch Reference) +-- loadPatch1 rootDir h = do +-- let file = V1.editsPath rootDir (runV1 h) +-- ifM +-- (doesFileExist file) +-- ( S.getFromFile' S.V1.getEdits file >>= \case +-- Left _err -> throwError (InvalidPatch h) +-- Right edits -> pure edits +-- ) +-- (throwError $ MissingPatch h) + +-- -- 3) figure out what their combined dependencies are +-- matchTerm1Dependencies :: +-- DB m => +-- V1 Hash -> +-- [(Term, Type)] -> +-- m (Either [V1EntityRef] (V1 Hash -> (V2 Hash, Db.ObjectId))) +-- matchTerm1Dependencies componentHash tms = +-- let -- Get a list of Eithers corresponding to the non-self dependencies of this term. +-- lookupDependencyObjects :: +-- DB m => (Term, Type) -> m [Either V1EntityRef (V1 Hash, (V2 Hash, Db.ObjectId))] +-- lookupDependencyObjects (term, typeOfTerm) = traverse lookupObject deps +-- where +-- (termTypeDeps, termTermDeps) = +-- partitionEithers +-- . map LD.toReference +-- . toList +-- $ Term.labeledDependencies term +-- deps = +-- nubOrd $ +-- [Decl1 (V1 h) | Reference.Derived h _i _n <- toList $ Type.dependencies typeOfTerm] +-- <> [Decl1 (V1 h) | Reference.Derived h _i _n <- termTypeDeps] +-- <> [ Term1 (V1 h) | Reference.Derived h _i _n <- termTermDeps, h /= runV1 componentHash -- don't include self-refs 😬 +-- ] +-- in do +-- -- check the lefts, if empty then everything is on the right; +-- -- else return left. +-- (missing, found) <- partitionEithers <$> foldMapM lookupDependencyObjects tms +-- pure $ case missing of +-- [] -> Right (makeLookup found $ "term " ++ show componentHash) +-- missing -> Left missing + +-- matchDecl1Dependencies :: +-- DB m => V1 Hash -> [Decl] -> m (Either [V1EntityRef] (V1 Hash -> Db.ObjectId)) +-- matchDecl1Dependencies componentHash decls = error "todo" -- let +-- -- lookupDependencyObjects +-- -- :: DB m => Decl -> m [Either V1EntityRef (V1 Hash, (V2 Hash, Db.ObjectId))] +-- -- lookupDependencyObjects decl = traverse lookupObject . nubOrd $ +-- -- [ Decl1 (V1 h) | Reference.Derived h _i _n <- toList (DD.declDependencies decl) +-- -- , V1 h /= componentHash ] +-- -- in do +-- -- (missing, found) <- partitionEithers <$> foldMapM lookupDependencyObjects decls +-- -- pure $ case missing of +-- -- [] -> Right (makeLookup found $ "decl " ++ show componentHash) +-- -- missing -> Left missing + +-- matchPatch1Dependencies :: +-- DB m => +-- String -> +-- Patch -> +-- m (Either [V1EntityRef] (V1 Hash -> (V2 Hash, Db.ObjectId))) +-- matchPatch1Dependencies description (Patch.Patch tms tps) = do +-- deps :: [Either V1EntityRef (V1 Hash, (V2 Hash, Db.ObjectId))] <- +-- traverse lookupObject . nubOrd $ +-- [ Term1 (V1 h) | (r, e) <- Relation.toList tms, Reference.Derived h _i _n <- r : TermEdit.references e +-- ] +-- ++ [ Decl1 (V1 h) | (r, e) <- Relation.toList tps, Reference.Derived h _i _n <- r : TypeEdit.references e +-- ] +-- let (missing, found) = partitionEithers deps +-- pure $ case missing of +-- [] -> Right (makeLookup found description) +-- missing -> Left missing + +-- -- | multiple lookups needed in converting branch +-- -- data CBDepLookup +-- matchCausalBranch1Dependencies :: +-- DB m => +-- String -> +-- V1.Causal.Raw V1.Branch.Raw V1.Branch.Raw -> +-- m (Either [V1EntityRef] (V1 Hash -> Db.ObjectId, V1 Hash -> (V2 Hash, Db.CausalHashId))) +-- matchCausalBranch1Dependencies description cb@(V1.Causal.rawHead -> b) = do +-- deps <- +-- traverse lookupObject . nubOrd $ +-- -- history +-- [Branch1 h | h <- V1.Causal.rawTails cb] +-- ++ +-- -- terms +-- [ Term1 (V1 h) +-- | Referent.Ref (Reference.Derived h _i _n) <- +-- (toList . Relation.dom . Star3.d1 . V1.Branch._termsR) b +-- ] +-- ++ [ Term1 (V1 h) +-- | Referent.Ref (Reference.Derived h _i _n) <- +-- (toList . Relation.dom . Star3.d1 . V1.Branch._termsR) b +-- ] +-- ++ +-- -- term metadata +-- [ Term1 (V1 h) +-- | Reference.Derived h _i _n <- +-- (map snd . toList . Relation.ran . Star3.d3 . V1.Branch._termsR) b +-- ] +-- ++ +-- -- types +-- [ Decl1 (V1 h) +-- | Reference.Derived h _i _n <- +-- (toList . Relation.dom . Star3.d1 . V1.Branch._typesR) b +-- ] +-- ++ +-- -- type metadata +-- [ Term1 (V1 h) +-- | Reference.Derived h _i _n <- +-- (map snd . toList . Relation.ran . Star3.d3 . V1.Branch._typesR) b +-- ] +-- ++ [Branch1 h | h <- toList (V1.Branch._childrenR b)] +-- ++ [Patch1 (V1 h) | h <- toList (V1.Branch._editsR b)] + +-- causalParents <- traverse lookupCausal (V1.Causal.rawTails cb) + +-- let (missingEntities, foundObjects) = partitionEithers deps +-- let (missingParents, foundParents) = partitionEithers causalParents + +-- error "todo" + +-- -- pure $ case missingEntities of +-- -- [] -> Right ( makeLookup foundObjects description +-- -- , makeCausalLookup foundParents description ) +-- -- missing -> Left missing + +-- makeCausalLookup :: [(V1 Hash, (V2 Hash, Db.CausalHashId))] -> String -> V1 Hash -> (V2 Hash, Db.CausalHashId) +-- makeCausalLookup l description a = +-- let m = Map.fromList l +-- in case Map.lookup a m of +-- Just b -> b +-- Nothing -> +-- error $ +-- "Somehow I don't have the CausalHashId for " +-- ++ show (Base32Hex.fromHash (runV1 a)) +-- ++ " in the map for " +-- ++ description + +-- makeLookup :: [(V1 Hash, (V2 Hash, Db.ObjectId))] -> String -> V1 Hash -> (V2 Hash, Db.ObjectId) +-- makeLookup l lookupDescription a = +-- let m = Map.fromList l +-- in case Map.lookup a m of +-- Just b -> b +-- Nothing -> +-- error $ +-- "Somehow I don't have the ObjectId for " +-- ++ show (Base32Hex.fromHash (runV1 a)) +-- ++ " in the map for " +-- ++ lookupDescription + +-- -- +-- createTypeSearchIndicesForReferent :: DB m => (V2.ReferentId Db.ObjectId) -> Type -> m () +-- createTypeSearchIndicesForReferent r typ = do +-- let typeForIndexing = Type.removeAllEffectVars typ + +-- -- add the term to the type index +-- typeReferenceForIndexing :: (V2.Reference Db.HashId) <- +-- saveReferenceAsReference2 (Type.toReference typeForIndexing) + +-- Db.addToFindByTypeIndex r typeReferenceForIndexing + +-- -- add the term to the type mentions index +-- typeMentionsForIndexing :: [V2.Reference Db.HashId] <- +-- traverse +-- saveReferenceAsReference2 +-- (toList $ Type.toReferenceMentions typeForIndexing) + +-- traverse_ (Db.addToFindByTypeMentionsIndex r) typeMentionsForIndexing +-- where +-- addTermToFindByTypeIndex :: DB m => (V2.ReferentId Db.ObjectId) -> Reference -> m () +-- addTermToFindByTypeIndex termRef typeRef = do +-- typeRef2 :: (V2.Reference Db.HashId) <- +-- saveReferenceAsReference2 typeRef +-- Db.addToFindByTypeIndex termRef typeRef2 +-- addTermToTypeMentionsIndex :: +-- (DB m, Foldable f) => (V2.ReferentId Db.ObjectId) -> f Reference -> m () +-- addTermToTypeMentionsIndex termRef typeRefs = do +-- typeRefs2 :: [V2.Reference Db.HashId] <- +-- traverse saveReferenceAsReference2 (toList typeRefs) +-- traverse_ (Db.addToFindByTypeMentionsIndex termRef) typeRefs2 + +-- createDependencyIndexForTerm :: DB m => V2.ReferenceId Db.ObjectId -> Term2 Db.ObjectId-> m () +-- createDependencyIndexForTerm tmRef@(V2.ReferenceId selfId _i) tm = error "todo" + +-- -- let +-- -- -- get the term dependencies +-- -- dependencies :: Set (Reference.ReferenceH Db.ObjectId) +-- -- dependencies = Term.dependencies $ Term.hmap (fromMaybe selfId) tm +-- -- -- and convert them to Reference2 +-- -- dependencies2 :: [V2.Reference Db.ObjectId] +-- -- dependencies2 = over Db.referenceTraversal id <$> toList dependencies +-- -- -- and then add all of these to the dependency index +-- -- in traverse_ (Db.addDependencyToIndex tmRef) dependencies2 + +-- createDependencyIndexForDecl :: DB m => V2.ReferenceId Db.ObjectId -> Decl2S -> m () +-- createDependencyIndexForDecl tmRef@(V2.ReferenceId selfId _i) decl = +-- traverse_ (Db.addDependencyToIndex tmRef) +-- . toList +-- . DD.declDependencies +-- $ DD.rmapDecl (fmap $ fromMaybe selfId) decl + +-- saveTermComponent :: DB m => V1 Hash -> V2 Hash -> Term2ComponentS -> m Db.ObjectId +-- saveTermComponent h1 h2 component = do +-- h1Id <- Db.saveHashByteString (runV1 h1) +-- h2Id <- Db.saveHashByteString (runV2 h2) +-- o <- Db.saveObject h2Id V2.TermComponent blob +-- Db.saveHashObject h1Id o 1 +-- Db.saveHashObject h2Id o 2 +-- pure o +-- where +-- blob = S.putBytes (S.V1.putFoldable V2.putTerm) component + +-- saveDeclComponent :: DB m => Db.HashId -> [Decl2S] -> m Db.ObjectId +-- saveDeclComponent h component = error "todo" -- do +-- -- o <- Db.saveObject h V2.DeclComponent blob +-- -- Db.saveHashObject h o 2 +-- -- pure o +-- -- where +-- -- blob = S.putBytes (S.V1.putFoldable (V2.putDecl putObjectId putV putA)) component + +-- savePatch :: DB m => Db.HashId -> Patch2S -> m () +-- savePatch h p = do +-- o <- Db.saveObject h V2.Patch (S.putBytes V2.putEdits p) +-- Db.saveHashObject h o 2 + +-- -- saveBranch :: DB m => Db.HashId -> + +-- -- | Loads a dir with format /base32-encoded-reference.id... +-- -- into a map from Hash to component references +-- componentMapForDir :: forall m. MonadIO m => FilePath -> m (Map (V1 Hash) [Reference.Id]) +-- componentMapForDir root = listDirectory root <&> foldl' insert mempty +-- where +-- insert m filename = case V1.componentIdFromString filename of +-- Nothing -> m -- skip silently +-- Just r@(Reference.Id h _i _n) -> +-- Map.unionWith (<>) m (Map.singleton (V1 h) [r]) + +-- existsObjectWithHash :: DB m => Hash -> m Bool +-- existsObjectWithHash h = Db.queryExists sql [Base32Hex.fromHash h] +-- where +-- sql = +-- [here| +-- SELECT 1 +-- FROM hash INNER JOIN hash_object ON hash.id = hash_object.hash_id +-- WHERE base32 = ? +-- |] + +-- -- | Given a V1 term component, convert and save it to the V2 codebase +-- -- Pre-requisite: all hash-identified entities in the V1 component have +-- -- already been converted and added to the V2 codebase, apart from self- +-- -- references. +-- convertTerm1 :: DB m => (V1 Hash -> (V2 Hash, Db.ObjectId)) -> V1 Hash -> [(Term, Type)] -> m () +-- convertTerm1 lookup hash1 v1component = do + +-- -- construct v2 term component for hashing +-- let v2componentH :: Term2ComponentH = +-- map (buildTerm2H (fst . lookup) hash1 . fst) v1component +-- -- note: we'd need some special care here if we want to make sure that this +-- -- hash function is identity for simple references +-- let hash2 = V2 (H.accumulate' v2componentH) + +-- -- construct v2 term component for serializing +-- let v2componentS :: [Term2 Db.ObjectId] = +-- map (buildTerm2S (snd . lookup) hash1 . fst) v1component + +-- -- serialize the v2 term component +-- componentObjectId :: Db.ObjectId <- saveTermComponent hash1 hash2 v2componentS + +-- -- construct v2 types for each component element, and save the types to the +-- -- to the indices +-- for_ (zip3 [0 ..] v1component v2componentS) $ \(i, (_term1, typ1), term2) -> do +-- let r = V2.ReferenceId componentObjectId i +-- let rt = V2.ReferentIdRef r + +-- saveTypeBlobForReferent rt (buildTermType2S (snd . lookup) typ1) +-- createTypeSearchIndicesForReferent rt typ1 +-- createDependencyIndexForTerm r term2 + +-- convertDecl1 :: DB m => (V1 Hash -> (V2 Hash, Db.ObjectId)) -> V1 Hash -> [Decl] -> m () +-- convertDecl1 lookup hash1 v1component = do +-- -- construct v2 decl component for hashing +-- let v2componentH :: Decl2ComponentH = +-- map (buildDecl2H (fst . lookup) hash1) v1component +-- let hash2 = V2 (H.hash v2componentH) + +-- let v2componentS :: Decl2ComponentS = +-- map (buildDecl2S (snd . lookup) hash1) v1component + +-- componentObjectId :: Db.ObjectId <- saveDeclComponent hash1 hash2 v2ComponentS + +-- let v2componentI :: [Decl2I] = +-- map (buildDecl2I hash2) v2componentH + +-- for_ (zip v2componentI [0..]) $ \(decl2, i) -> do +-- let r = V2.ReferenceId componentObjectId i + +-- for_ (zip +-- (DD.constructorTypes (DD.asDataDecl decl2)) +-- [0..]) $ \(type2, j) -> do +-- let rt = V2.ReferentIdCon r j +-- saveTypeBlobForReferent rt type2 +-- createTypeSearchIndicesForReferent rt type1 -- type1 because `find` uses Hashes + +-- createDependencyIndexForDecl r decl2 + +-- convertCausalBranch1 :: +-- DB m => +-- (V1 Hash -> Db.ObjectId) -> +-- (V1 Hash -> (V2 Hash, Db.CausalHashId)) -> +-- -- -> V1 Hash +-- V1.Causal.Raw V1.Branch.Raw V1.Branch.Raw -> +-- m () +-- convertCausalBranch1 lookupObject lookupCausal causalBranch1 = error "todo" -- do +-- -- let branch1Hash = V1.currentHash causalBranch1 +-- -- rawBranch2 :: RawBranch = convertBranch1 (V1.rawHead causalBranch1) + +-- -- -- branch2Id <- Db.saveObject branch1Hash +-- -- branch2Hash :: V2 Hash = H.hash rawBranch2 +-- -- lookupObject <- pure () +-- -- -- rawCausal2 :: RawCausal = convertCausal1 +-- -- -- rawBranch2S +-- -- -- rawCausal2 :: RawCausal <- convertCausal1 lookup rawBranch2 (V1.rawTails causalBranch1) + +-- -- -- rawBranch2S +-- -- -- saveBranch2 rawBranch2 +-- -- -- saveCausal2 rawCausal2 +-- -- error "todo" +-- -- -- Due to switching reference types, and reference's hash's having previously +-- -- -- incorporated the extra `len` field, definitions and patches will not end up +-- -- -- having the same hashes as before. :-\ +-- -- -- This means we have to hash them again and perhaps store old and new hashes +-- -- -- separately. +-- -- where +-- -- indexBranch2S :: RawBranch -> RawBranch2S +-- -- indexBranch2S b = RawBranch +-- -- (Map.fromList +-- -- [((ns, over Db.referent2Traversal (lookupObject . V1) r), +-- -- Set.map (over Db.reference2Traversal (lookupObject . V1)) mds) +-- -- |((ns, r), mds) <- Map.toList (terms b)]) +-- -- (Map.fromList +-- -- [((ns, over Db.reference2Traversal (lookupObject . V1) r), +-- -- Set.map (over Db.reference2Traversal (lookupObject . V1)) mds) +-- -- |((ns, r), mds) <- Map.toList (types b)]) +-- -- (Map.fromList []) +-- -- (Map.fromList []) +-- -- -- <$> tms <*> tps <*> pchs <*> chn where +-- -- -- tms = Map.fromList <$> traverse indexTerm (Map.toList (terms b)) +-- -- -- indexTerm :: DB m +-- -- -- => ((NameSegment, Db.Referent2 Hash), Set (V2.Reference Hash)) +-- -- -- -> m ((NameSegment, Db.Referent2 Db.ObjectId), Set (V2.Reference Db.ObjectId)) +-- -- -- indexTerm ((ns, r), mds) = (,) <$> k <*> v where +-- -- -- k = (ns, over Db.referentTraversal lookupObject r) +-- -- -- v = Set.map + +-- -- convertBranch1 :: V1.Branch.Raw -> RawBranch +-- -- convertBranch1 b = RawBranch +-- -- -- terms +-- -- (Map.fromList +-- -- [ ((ns, over Db.referentTraversal id r), mdSet) +-- -- | (r, ns) <- Relation.toList . Star3.d1 $ V1.Branch._termsR b +-- -- , let mdSet :: Set (V2.Reference Hash) +-- -- mdSet = Set.fromList +-- -- . fmap (over Db.referenceTraversal id . snd) +-- -- . Set.toList +-- -- . Relation.lookupDom r +-- -- . Star3.d3 +-- -- $ V1.Branch._termsR b +-- -- ]) +-- -- -- types +-- -- (Map.fromList +-- -- [ ((ns, over Db.referenceTraversal id r), mdSet) +-- -- | (r, ns) <- Relation.toList . Star3.d1 $ V1.Branch._typesR b +-- -- , let mdSet :: Set (V2.Reference Hash) +-- -- mdSet = Set.fromList +-- -- . fmap (over Db.referenceTraversal id . snd) +-- -- . Set.toList +-- -- . Relation.lookupDom r +-- -- . Star3.d3 +-- -- $ V1.Branch._typesR b +-- -- ]) +-- -- -- patches +-- -- (V1.Branch._editsR b) +-- -- -- children +-- -- (runV2 . fst . lookupCausal . V1 . V1.unRawHash <$> V1.Branch._childrenR b) + +-- voidTermAnnotations :: +-- V1.TermR tmRef tpRef tmLink tpLink (V1.TypeR tpRef vt at) blankRepr ap v a -> +-- V1.TermR tmRef tpRef tmLink tpLink (V1.TypeR tpRef vt ()) Void () v () +-- voidTermAnnotations = +-- void . Term.extraMap id id id id void undefined (const ()) + +-- ----- graveyard +-- ---- |True if `h` (just the hash!) is interned in the DB +-- --knownHash :: DB m => Hash -> m Bool +-- --knownHash h = anyExists $ Db.query sql [Base32Hex.fromHash h] where +-- -- sql = [here| SELECT 1 FROM hash WHERE base32 = ? |] + +-- --saveReference :: DB m => ReferenceH h -> m Db.ReferenceId +-- --saveReference r = insert r >> fmap fromJust (loadReference r) where +-- -- insert = \case +-- -- Reference.Builtin t -> execute sql (Just t, Nothing) +-- -- Reference.DerivedId idH -> do +-- -- rdId <- saveReferenceDerived idH +-- -- Db.execute sql (Nothing, Just rdId) +-- -- sql = [here| +-- -- INSERT OR IGNORE INTO reference (builtin, reference_derived_id) +-- -- VALUES (?, ?) +-- -- |] + +-- --loadReferenceByHashId :: DB m => ReferenceH HashId -> m (Maybe ReferenceId) +-- --loadReferenceByHashId = \case +-- -- Reference.Builtin t -> queryMaybe sqlb (Only t) +-- -- Reference.DerivedId idH -> +-- -- loadReferenceDerivedByHashId idH >>= \case +-- -- Nothing -> pure Nothing +-- -- Just rdId -> queryMaybe sqld (Only rdId) +-- -- where +-- -- sqlb = [here| SELECT id FROM reference WHERE builtin = ? |] +-- -- sqld = [here| SELECT id FROM reference WHERE reference_derived_id = ? |] + +-- --saveReferenceDerived :: DB m => Reference.Id -> m Db.ReferenceDerivedId +-- --saveReferenceDerived r@(Reference.Id h i _n) = do +-- -- hashId <- saveHashByteString h +-- -- saveReferenceDerivedByHashId (Reference.IdH hashId i _n) +-- -- +-- --saveReferenceDerivedByHashId :: DB m => Reference.IdH Db.HashId -> m Db.ReferenceDerivedId +-- --saveReferenceDerivedByHashId r@(Reference.IdH hashId i _n) = +-- -- insert hashId i >> fmap fromJust (loadReferenceDerivedByHashId r) where +-- -- insert h i = liftIO $ execute sql (h, i) where +-- -- sql = [here| +-- -- INSERT OR IGNORE INTO reference_derived (hash_id, component_index) +-- -- VALUES (?, ?) +-- -- |] +-- -- +-- --loadReferenceDerivedByHashId :: DB m => Reference.IdH Db.HashId -> m (Maybe Db.ReferenceDerivedId) +-- --loadReferenceDerivedByHashId (Reference.IdH h i _n) = +-- -- queryMaybe sql (h, i) where +-- -- sql = [here| +-- -- SELECT id FROM reference_derived +-- -- WHERE hash_id = ? AND component_index = ? +-- -- |] + +-- --saveReferentDerived :: DB m => Referent.Id -> m ReferentDerivedId +-- --saveReferentDerived = error "todo" +-- --loadReferentDerived :: DB m => Referent.Id -> m (Maybe ReferentDerivedId) +-- --loadReferentDerived = error "todo" +-- -- +-- --saveReferentDerivedByReferenceDerivedId :: DB m => Referent' ReferenceDerivedId -> m ReferentDerivedId +-- --saveReferentDerivedByReferenceDerivedId r = do +-- -- liftIO $ execute sql r +-- -- fmap fromJust (loadReferenceDerivedByReferenceDerivedId r) +-- -- where +-- -- sql = [here| +-- -- INSERT OR IGNORE INTO referent_derived +-- -- (reference_derived_id, constructor_id, constructor_type) +-- -- VALUES (?, ?, ?) +-- -- |] +-- --loadReferentDerivedByReferenceDerivedId :: DB m => Referent' ReferenceDerivedId -> m (Maybe ReferentDerivedId) +-- --loadReferentDerivedByReferenceDerivedId r = queryMaybe . query sql r where +-- -- sql = [here| +-- -- SELECT id FROM referent_derived +-- -- WHERE reference_derived_id = ? +-- -- AND constructor_id = ? +-- -- AND constructor_type = ? +-- -- |] + +-- buildTerm2H :: (V1 Hash -> V2 Hash) -> V1 Hash -> Term -> Term2 Hash +-- buildTerm2H lookup hash1 = +-- voidTermAnnotations . Term.rmap +-- (over Db.referenceTraversal (fmap runV2 . lookupTerm . V1)) +-- (over Db.referenceTraversal (runV2 . lookupType . V1)) +-- ( over Db.referent2ConTraversal (runV2 . lookupType . V1) +-- . over Db.referentRefTraversal (fmap runV2 . lookupTerm . V1) +-- ) +-- where +-- lookupTerm :: V1 Hash -> Maybe (V2 Hash) +-- lookupTerm h | h == hash1 = Nothing +-- lookupTerm h = Just (lookup h) +-- lookupType :: V1 Hash -> V2 Hash +-- lookupType = lookup + +-- buildTerm2S :: (V1 Hash -> Db.ObjectId) -> V1 Hash -> Term -> Term2 Db.ObjectId +-- buildTerm2S lookup hash1 = +-- voidTermAnnotations . Term.rmap +-- (over Db.referenceTraversal (lookupTerm . V1)) +-- (over Db.referenceTraversal (lookupType . V1)) +-- ( over Db.referent2ConTraversal (lookupType . V1) +-- . over Db.referentRefTraversal (lookupTerm . V1) +-- ) +-- where +-- lookupTerm :: V1 Hash -> Maybe Db.ObjectId +-- lookupTerm h | h == hash1 = Nothing +-- lookupTerm h = Just (lookup h) +-- lookupType :: V1 Hash -> Db.ObjectId +-- lookupType = lookup + +-- buildTermType2S :: (V1 Hash -> Db.ObjectId) -> Type -> Type2S +-- buildTermType2S lookup = +-- void . Type.rmap (over Db.referenceTraversal (lookup . V1)) + +-- buildDecl2H :: (V1 Hash -> V2 Hash) -> V1 Hash -> Decl -> Decl2 Hash +-- buildDecl2H lookup = +-- void . DD.rmapDecl (over Db.referenceTraversal (fmap runV2 . lookup' . V1)) +-- where +-- lookup' :: V1 Hash -> Maybe (V2 hash) +-- lookup' h | h == hash1 = Nothing +-- lookup' h = Just (lookup h) + +-- buildDecl2I :: V2 Hash -> Decl2 Hash -> Decl2I +-- buildDecl2I self = +-- DD.rmapDecl (over Db.reference2Traversal (fmap runV2 . fromMaybe self . V2)) diff --git a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal new file mode 100644 index 0000000000..f590bddc02 --- /dev/null +++ b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal @@ -0,0 +1,60 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-codebase-convert-1to2 +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +executable uconvert12 + -- import: unison-common + main-is: Main.hs + hs-source-dirs: app + ghc-options: -Wall -threaded -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -rtsopts -with-rtsopts=-I0 -optP-Wno-nonportable-include-path + -- other-modules: + build-depends: + base, + -- containers, + -- configurator, + -- directory, + -- errors, + -- filepath, + -- megaparsec, + -- safe, + -- shellmet, + -- template-haskell, + -- temporary, + -- text, + unison-codebase-convert-1to2 + +library + hs-source-dirs: lib + exposed-modules: + U.Codebase.Convert.SyncV1V2 + -- other-modules: + -- other-extensions: + build-depends: + base, + bytes, + bytestring, + containers, + extra, + here, + lens, + mtl, + safe, + text, + sqlite-simple, + unliftio, + unison-core, + unison-codebase1, + unison-codebase-sqlite, + unison-util + default-language: Haskell2010 diff --git a/codebase1/codebase/Unison/Codebase/V1/Branch/Raw.hs b/codebase1/codebase/Unison/Codebase/V1/Branch/Raw.hs index c3585d1460..ed75abf149 100644 --- a/codebase1/codebase/Unison/Codebase/V1/Branch/Raw.hs +++ b/codebase1/codebase/Unison/Codebase/V1/Branch/Raw.hs @@ -22,8 +22,8 @@ type MetadataValue = Reference -- `(Type, Value)` is the metadata value itself along with its type. type Star r n = Star3 r n MetadataType (MetadataType, MetadataValue) -newtype EditHash = EditHash Hash.Hash -newtype BranchHash = BranchHash Hash.Hash deriving Show +newtype EditHash = EditHash Hash.Hash deriving (Eq, Ord, Show) +newtype BranchHash = BranchHash Hash.Hash deriving (Eq, Ord, Show) -- The raw Branch data Raw = Raw diff --git a/hie.yaml b/hie.yaml index 451395ac90..039b9feec1 100644 --- a/hie.yaml +++ b/hie.yaml @@ -1,5 +1,11 @@ cradle: stack: + - path: "codebase-convert-1to2/app/Main.hs" + component: "unison-codebase-convert-1to2:exe:uconvert12" + + - path: "codebase-convert-1to2/lib" + component: "unison-codebase-convert-1to2:lib" + - path: "codebase1/codebase/." component: "unison-codebase1:lib" diff --git a/stack.yaml b/stack.yaml index 512f61f79c..b566938bd6 100644 --- a/stack.yaml +++ b/stack.yaml @@ -8,6 +8,7 @@ packages: # - yaks/easytest # - parser-typechecker # - unison-core +- codebase-convert-1to2 - codebase1/codebase - codebase2/codebase - codebase2/codebase-sqlite From ae3ea8a68917ecce47841e07122d22e480598eae Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 5 Oct 2020 22:45:42 -0400 Subject: [PATCH 010/225] wip --- .../lib/U/Codebase/Convert/SyncV1V2.hs | 675 +++++++++++------- .../unison-codebase-convert-1to2.cabal | 4 +- codebase1/codebase/Unison/Codebase/V1/ABT.hs | 62 +- .../Unison/Codebase/V1/FileCodebase.hs | 105 +-- .../codebase/Unison/Codebase/V1/Reference.hs | 2 + .../Unison/Codebase/V1/Serialization/V1.hs | 41 +- .../U/Codebase/Sqlite/Queries.hs | 22 + codebase2/codebase/U/Codebase/Referent.hs | 1 - codebase2/codebase/U/Codebase/Term.hs | 68 +- codebase2/codebase/U/Codebase/Type.hs | 3 +- codebase2/core/U/Core/ABT.hs | 10 + codebase2/util/U/Util/Base32Hex.hs | 18 +- codebase2/util/U/Util/Components.hs | 48 ++ codebase2/util/U/Util/Monoid.hs | 27 + codebase2/util/unison-util.cabal | 4 +- 15 files changed, 735 insertions(+), 355 deletions(-) create mode 100644 codebase2/util/U/Util/Components.hs create mode 100644 codebase2/util/U/Util/Monoid.hs diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index be386f8a90..64607b5a69 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -1,16 +1,83 @@ +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE BlockArguments #-} + +{-# OPTIONS_GHC -Wno-incomplete-patterns #-} +{-# OPTIONS_GHC -Wno-unused-imports #-} +{-# OPTIONS_GHC -Wno-unused-do-bind #-} +{-# OPTIONS_GHC -Wno-unused-local-binds #-} +{-# OPTIONS_GHC -Wno-unused-matches #-} + module U.Codebase.Convert.SyncV1V2 where import qualified U.Util.Hashable as H import Database.SQLite.Simple.FromField (FromField) import U.Util.Hash (Hash) +import qualified Unison.Codebase.V1.FileCodebase as V1 import qualified Unison.Codebase.V1.Branch.Raw as V1 import Data.Text (Text) import Database.SQLite.Simple.ToField (ToField) import qualified Unison.Codebase.V1.Reference as V1.Reference +import UnliftIO (liftIO, MonadIO) +import Database.SQLite.Simple (Connection) +import qualified Database.SQLite.Simple as SQLite +import Control.Monad.Except (MonadError, throwError, runExceptT) +import Control.Monad.Reader (ReaderT(runReaderT)) +import qualified U.Util.Base32Hex as Base32Hex +import qualified U.Util.Hash as Hash +import qualified Data.Text as Text +import UnliftIO.Directory (listDirectory) +import Data.Functor ((<&>)) +import qualified U.Codebase.Reference as V2.Reference +import qualified U.Codebase.Sqlite.Reference as V2S.Reference +import Unison.Codebase.V1.FileCodebase (CodebasePath) +import U.Codebase.Sqlite.Queries (DB) +import Data.Map (Map) +import qualified Data.Map as Map +import Control.Monad.Extra (ifM) +import Data.String.Here.Uninterpolated (here) +import qualified U.Codebase.Sqlite.Queries as Db +import qualified Unison.Codebase.V1.Symbol as V1.Symbol +import qualified Unison.Codebase.V1.Term as V1.Term +import qualified Unison.Codebase.V1.Type as V1.Type +import qualified Unison.Codebase.V1.DataDeclaration as V1.DD +import qualified U.Codebase.Sqlite.Term.Format as V2.TermFormat +import qualified U.Codebase.Term as V2.Term +import qualified U.Codebase.Sqlite.Symbol as V2.Symbol +import qualified U.Codebase.Type as V2.Type +import qualified U.Codebase.Decl as V2.Decl +import qualified U.Codebase.Sqlite.Decl.Format as V2.DeclFormat +import qualified Unison.Codebase.V1.FileCodebase as V1.FC +import qualified Unison.Codebase.V1.Serialization.Serialization as V1.S +import qualified Unison.Codebase.V1.Serialization.V1 as V1.S +import Data.Bytes.Get (MonadGet) +import Data.Foldable (for_, Foldable(toList)) +import Data.Traversable (for) +import qualified Unison.Codebase.V1.LabeledDependency as V1.LD +import Data.List.Extra (nubOrd) +import Data.Either (partitionEithers) +import U.Util.Base32Hex (Base32Hex) +import Data.Bifunctor (Bifunctor(first)) +import qualified U.Codebase.Referent as V2.Referent +import qualified U.Codebase.Sqlite.ObjectType as V2.OT +import qualified U.Util.Serialization as S +import qualified U.Codebase.Sqlite.Serialization as S.V2 +import U.Util.Monoid (foldMapM) +import Data.Foldable (Foldable(foldl')) +import qualified Unison.Codebase.V1.ABT as V1.ABT +import qualified Data.Set as Set +import qualified Unison.Codebase.V1.Type.Kind as V1.Kind +import qualified U.Codebase.Kind as V2.Kind +import qualified U.Core.ABT as V2.ABT +import qualified Unison.Codebase.V1.Term.Pattern as V1.Pattern +import qualified Unison.Codebase.V1.Referent as V1.Referent newtype V1 a = V1 {runV1 :: a} deriving (Eq, Ord, Show) @@ -21,16 +88,16 @@ newtype V2 a = V2 {runV2 :: a} data V1EntityRef = Decl1 (V1 Hash) -- this will refer to the whole component | Term1 (V1 Hash) -- ditto - | Patch1 (V1 V1.EditHash) - | Branch1 (V1 V1.BranchHash) + | Patch1 V1.EditHash + | Branch1 V1.BranchHash deriving (Eq, Ord, Show) v1EntityRefToHash :: V1EntityRef -> V1 Hash v1EntityRefToHash = \case Decl1 h -> h Term1 h -> h - Patch1 (V1 (V1.EditHash h)) -> V1 h - Branch1 (V1 (V1.BranchHash h)) -> V1 h + Patch1 (V1.EditHash h) -> V1 h + Branch1 (V1.BranchHash h) -> V1 h newtype Base32HexPrefix = Base32HexPrefix Text deriving (Show) via Text @@ -41,6 +108,13 @@ newtype Base32HexPrefix = Base32HexPrefix Text -- newtype NamespaceHash h = NamespaceHash h newtype CausalHash h = CausalHash h +-- 1. Load a V1 component (Hash, [V1Term]) +-- 2. Convert its dependencies before continuing +-- 3. Construct the hashable data structure +-- 4. Serialize the hashable data structure + +-- unhashTermComponent :: V1 Hash -> [V1Term.Term] -> [V2.Term Symbol] + -- -- |things that appear in a deserialized RawBranch -- type V2EntityRef = -- V2EntityRefH @@ -49,20 +123,20 @@ newtype CausalHash h = CausalHash h -- (NamespaceHash Hash) -- (CausalHash Hash) --- -- -- |things that appear in a serialized RawBranch --- -- type V2EntityRefS = --- -- V2EntityRefH --- -- Db.ObjectId --- -- (PatchHash Db.ObjectId) --- -- (NamespaceHash Db.NamespaceHashId) --- -- (CausalHash Db.CausalHashId) - --- -- data V2EntityRefH hr hp hn hc --- -- = Decl2 V2.ReferenceId --- -- | Term2 Reference.Id --- -- | Patch2 PatchHash --- -- | NamespaceHash2 NamespaceHash --- -- | CausalHash2 CausalHash +-- |things that appear in a serialized RawBranch +-- type V2EntityRefS = +-- V2EntityRef +-- Db.ObjectId +-- (PatchHash Db.ObjectId) +-- (NamespaceHash Db.NamespaceHashId) +-- (CausalHash Db.CausalHashId) + +-- data V2EntityRef hr hp hn hc +-- = Decl2 V2.Reference.Id +-- | Term2 V2.Reference.Id +-- | Patch2 PatchHash +-- | NamespaceHash2 V2NamespaceHash +-- | CausalHash2 CausalHash -- initializeV2DB :: MonadIO m => m () -- initializeV2DB = error "todo" @@ -82,40 +156,39 @@ data FatalError | InvalidTypeOfTerm V1.Reference.Id | InvalidDecl V1.Reference.Id --- type Type = Type.Type Symbol Ann +type V1Type = V1.Type.Type V1.Symbol.Symbol () +type V1Term = V1.Term.Term V1.Symbol.Symbol () +type V1Decl = V1.DD.Decl V1.Symbol.Symbol () --- type Term = Term.Term Symbol Ann +type V2TermH = V2.Term.Term V2.Symbol.Symbol +type V2TypeT = V2.Type.TypeT V2.Symbol.Symbol +type V2TermComponentH = [V2TermH] +type V2TermComponentS = V2.TermFormat.LocallyIndexedComponent --- type Decl = DD.Decl Symbol Ann +type V2DeclH = V2.Decl.Decl V2.Symbol.Symbol +type V2DeclComponentH = [V2DeclH] +type V2DeclComponentS = V2.DeclFormat.LocallyIndexedComponent -- type Patch = Patch.Patch V1.Reference - -- -- the H stands for "for hashing" -- -- the S stands for "for serialization" -- type Term2ComponentH = [Term2 Hash] - -- type Term2ComponentS = [Term2 Db.ObjectId] - -- type Decl2ComponentH = [Decl2 (Maybe Hash)] - -- type Decl2S = Decl2 Db.ObjectId -- type Decl2ComponentS = [Decl2S] -- -- these have maybes in them to indicate a self-component reference -- type Term2 h = V2.Term h - -- type Decl2 h = DD.DeclR (V2.Reference h) Symbol () -- -- for indexing -- type Decl2I = DD.DeclR (V2.Reference Db.ObjectId) Symbol () - -- type Term2S = Term2 Db.ObjectId - -- type Type2S = V2.Type Db.ObjectId -- -- what about referent types in the index? - -- -- type CtorType2S = Type.TypeH Db.ObjectId Symbol Ann -- -- type Term2S = Term.TermH (Maybe Db.ObjectId) Symbol Ann -- type Patch2S = Patch.Patch (V2.Reference Db.ObjectId) @@ -142,76 +215,76 @@ data FatalError -- putA :: S.Put Ann -- putA = S.put fmtA --- -- todo: this just converts a whole codebase, which we need to do locally \ --- -- but we also want some code that imports just a particular sub-branch. --- syncV1V2 :: forall m. MonadIO m => Connection -> CodebasePath -> m (Either FatalError ()) --- syncV1V2 c rootDir = liftIO $ SQLite.withTransaction c . runExceptT . flip runReaderT c $ do --- v1RootHash <- getV1RootBranchHash rootDir >>= maybe (throwError NoRootBranch) pure --- -- starting from the root namespace, convert all entities you can find --- convertEntities [Branch1 v1RootHash] --- v2RootHash <- v2CausalHashForV1BranchHash v1RootHash --- setV2Root v2RootHash --- error "todo: compressEntities and vacuum db" v2RootHash - --- -- Incorporating diff construction into the conversion is tough because --- -- a) I was thinking we'd represent an older version as a diff against the --- -- newer version, but the newer version hasn't been fully constructed --- -- until the older versions have been converted and hashed. --- -- b) If we just store all the old versions uncompressed, it might be too big. --- -- (But no bigger than the v1 db.) But if that is okay, we can compress and --- -- vacuum them afterwards. - --- pure () --- where --- setV2Root = error "todo: setV2Root" --- v2CausalHashForV1BranchHash = error "todo: v2CausalHashForV1BranchHash" --- convertEntities :: --- forall m. --- DB m => --- MonadError FatalError m => --- [V1EntityRef] -> --- m () --- convertEntities [] = pure () --- convertEntities all@(h : rest) = do --- termDirComponents <- componentMapForDir (V1.termsDir rootDir) --- declsDirComponents <- componentMapForDir (V1.typesDir rootDir) --- case h of --- Term1 h -> --- -- if this hash is already associated to an object --- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do --- -- load a cycle from disk --- e <- loadTerm1 rootDir termDirComponents h --- matchTerm1Dependencies h e >>= \case --- Left missing -> convertEntities (missing ++ all) --- Right lookup -> do --- convertTerm1 lookup h e --- convertEntities rest --- Decl1 h -> --- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do --- d <- loadDecl1 rootDir declsDirComponents h --- matchDecl1Dependencies h d >>= \case --- Left missing -> convertEntities (missing ++ all) --- Right lookup -> do --- convertDecl1 (error "todo: lookup") h d --- convertEntities rest --- Patch1 h -> --- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do --- p <- loadPatch1 rootDir h --- matchPatch1Dependencies ("patch " ++ show h) p >>= \case --- Left missing -> convertEntities (missing ++ all) --- Right lookup -> do --- -- hashId <- Db.saveHashByteString (runV1 h) --- -- savePatch hashId (Patch.hmap (lookup . V1) p) --- error "todo" --- convertEntities rest --- Branch1 (V1.unRawHash -> h) -> --- ifM (existsObjectWithHash h) (convertEntities rest) $ do --- cb <- loadCausalBranch1 rootDir (V1 h) --- matchCausalBranch1Dependencies ("branch " ++ show h) cb >>= \case --- Left missing -> convertEntities (missing ++ all) --- Right (lookupObject, lookupCausal) -> do --- convertCausalBranch1 lookupObject lookupCausal cb --- convertEntities rest +-- todo: this just converts a whole codebase, which we need to do locally \ +-- but we also want some code that imports just a particular sub-branch. +syncV1V2 :: forall m. MonadIO m => Connection -> CodebasePath -> m (Either FatalError ()) +syncV1V2 c rootDir = liftIO $ SQLite.withTransaction c . runExceptT . flip runReaderT c $ do + v1RootHash <- getV1RootBranchHash rootDir >>= maybe (throwError NoRootBranch) pure + -- starting from the root namespace, convert all entities you can find + convertEntities [Branch1 v1RootHash] + v2RootHash <- v2CausalHashForV1BranchHash v1RootHash + setV2Root v2RootHash + error "todo: compressEntities and vacuum db" v2RootHash + + -- Incorporating diff construction into the conversion is tough because + -- a) I was thinking we'd represent an older version as a diff against the + -- newer version, but the newer version hasn't been fully constructed + -- until the older versions have been converted and hashed. + -- b) If we just store all the old versions uncompressed, it might be too big. + -- (But no bigger than the v1 db.) But if that is okay, we can compress and + -- vacuum them afterwards. + + pure () + where + setV2Root = error "todo: setV2Root" + v2CausalHashForV1BranchHash = error "todo: v2CausalHashForV1BranchHash" + convertEntities :: + forall m. + DB m => + MonadError FatalError m => + [V1EntityRef] -> + m () + convertEntities [] = pure () + convertEntities all@(h : rest) = do + termDirComponents <- componentMapForDir (V1.termsDir rootDir) + declsDirComponents <- componentMapForDir (V1.typesDir rootDir) + case h of + Term1 h -> + -- if this hash is already associated to an object + ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do + -- load a cycle from disk + e <- loadTerm1 rootDir termDirComponents h + matchTerm1Dependencies h e >>= \case + Left missing -> convertEntities (missing ++ all) + Right (getHash, getObjId, getTextId) -> do + convertTerm1 getHash getObjId getTextId h e + convertEntities rest + -- Decl1 h -> + -- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do + -- d <- loadDecl1 rootDir declsDirComponents h + -- matchDecl1Dependencies h d >>= \case + -- Left missing -> convertEntities (missing ++ all) + -- Right lookup -> do + -- convertDecl1 (error "todo: lookup") h d + -- convertEntities rest + -- Patch1 h -> + -- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do + -- p <- loadPatch1 rootDir h + -- matchPatch1Dependencies ("patch " ++ show h) p >>= \case + -- Left missing -> convertEntities (missing ++ all) + -- Right lookup -> do + -- -- hashId <- Db.saveHashByteString (runV1 h) + -- -- savePatch hashId (Patch.hmap (lookup . V1) p) + -- error "todo" + -- convertEntities rest + -- Branch1 (V1.BranchHash h) -> + -- ifM (existsObjectWithHash h) (convertEntities rest) $ do + -- cb <- loadCausalBranch1 rootDir (V1 h) + -- matchCausalBranch1Dependencies ("branch " ++ show h) cb >>= \case + -- Left missing -> convertEntities (missing ++ all) + -- Right (lookupObject, lookupCausal) -> do + -- convertCausalBranch1 lookupObject lookupCausal cb + -- convertEntities rest -- -- | load a causal branch raw thingo -- loadCausalBranch1 :: @@ -320,18 +393,18 @@ data FatalError -- H.accumulateToken (parents c) -- ] --- getV1RootBranchHash :: MonadIO m => CodebasePath -> m (Maybe V1.Branch.Hash) --- getV1RootBranchHash root = listDirectory (V1.branchHeadDir root) <&> \case --- [single] -> Just . V1.Branch.Hash . Hash.unsafeFromBase32Hex $ Text.pack single --- _ -> Nothing +getV1RootBranchHash :: MonadIO m => CodebasePath -> m (Maybe V1.BranchHash) +getV1RootBranchHash root = listDirectory (V1.branchHeadDir root) <&> \case + [single] -> Just . V1.BranchHash . Hash.fromBase32Hex . Base32Hex.UnsafeBase32Hex $ Text.pack single + _ -> Nothing --- -- | Look for an ObjectId corresponding to the provided V1 hash. --- -- Returns Left if not found. --- lookupObject :: DB m => V1EntityRef -> m (Either V1EntityRef (V1 Hash, (V2 Hash, Db.ObjectId))) --- lookupObject r@(runV1 . v1EntityRefToHash -> h) = --- getObjectIdByBase32Hex (Base32Hex.fromHash h) <&> \case --- Nothing -> Left r --- Just i -> Right (V1 h, i) +-- | Look for an ObjectId corresponding to the provided V1 hash. +-- Returns Left if not found. +lookupObject :: DB m => V1EntityRef -> m (Either V1EntityRef (V1 Hash, (V2 Hash, Db.ObjectId))) +lookupObject r@(runV1 . v1EntityRefToHash -> h) = + getObjectIdByBase32Hex (Hash.toBase32Hex h) <&> \case + Nothing -> Left r + Just i -> Right (V1 h, i) -- -- | Look for a CausalHashId corresponding to the provided V1 hash. -- -- Returns Left if not found. @@ -356,19 +429,10 @@ data FatalError -- let blob = S.putBytes (S.V1.putTypeR (V2.putReference V2.putObjectId) putV V2.putUnit) type2s -- in Db.saveTypeOfReferent r blob --- -- | Multiple hashes can map to a single object! --- getObjectIdByBase32Hex :: DB m => Base32Hex -> m (Maybe (V2 Hash, Db.ObjectId)) --- getObjectIdByBase32Hex h = --- fmap (first (V2 . Base32Hex.toHash)) <$> Db.queryMaybe sql (Only h) --- where --- sql = --- [here| --- SELECT object.id --- FROM hash --- INNER JOIN hash_object ON hash_object.hash_id = hash.id --- INNER JOIN object ON hash_object.object_id = object.id --- WHERE hash.base32 = ? --- |] +-- | Multiple hashes can map to a single object! +getObjectIdByBase32Hex :: DB m => Base32Hex -> m (Maybe (V2 Hash, Db.ObjectId)) +getObjectIdByBase32Hex h = + fmap (first (V2 . Hash.fromBase32Hex)) <$> Db.objectAndPrimaryHashByAnyHash h -- augmentLookup :: Ord a => (a -> b) -> Map a b -> a -> b -- augmentLookup f m a = fromMaybe (f a) (Map.lookup a m) @@ -376,30 +440,30 @@ data FatalError -- saveReferenceAsReference2 :: DB m => Reference -> m (V2.Reference Db.HashId) -- saveReferenceAsReference2 = mapMOf Db.referenceTraversal Db.saveHashByteString --- -- | load a term component by its hash. --- -- A v1 term component is split across an arbitrary number of files. --- -- We have to 1) figure out what all the filenames are (preloaded into --- -- `termDirComponents`), 2) load them all, --- loadTerm1 :: --- MonadIO m => --- MonadError FatalError m => --- CodebasePath -> --- Map (V1 Hash) [Reference.Id] -> --- V1 Hash -> --- m [(Term, Type)] --- loadTerm1 rootDir componentsFromDir h = case Map.lookup h componentsFromDir of --- Nothing -> throwError $ MissingTermHash h --- Just set -> case toList set of --- [] -> error "Just [] shouldn't occur here." --- Reference.Id h _i n : _etc -> for [0 .. n -1] \i -> do --- let r = Reference.Id h i n --- term <- --- V1.FC.getTerm (S.get fmtV) (S.get fmtA) rootDir r --- >>= maybe (throwError $ MissingTerm r) pure --- typeOfTerm <- --- V1.FC.getTypeOfTerm (S.get fmtV) (S.get fmtA) rootDir r --- >>= maybe (throwError $ MissingTypeOfTerm r) pure --- pure (term, typeOfTerm) +-- | load a term component by its hash. +-- A v1 term component is split across an arbitrary number of files. +-- We have to 1) figure out what all the filenames are (preloaded into +-- `termDirComponents`), 2) load them all, +loadTerm1 :: + MonadIO m => + MonadError FatalError m => + CodebasePath -> + Map (V1 Hash) [V1.Reference.Id] -> + V1 Hash -> + m [(V1Term, V1Type)] +loadTerm1 rootDir componentsFromDir h = case Map.lookup h componentsFromDir of + Nothing -> throwError $ MissingTermHash h + Just set -> case toList set of + [] -> error "Just [] shouldn't occur here." + V1.Reference.Id h _i n : _etc -> for [0 .. n -1] \i -> do + let r = V1.Reference.Id h i n + term <- + V1.FC.getTerm rootDir r + >>= maybe (throwError $ MissingTerm r) pure + typeOfTerm <- + V1.FC.getTypeOfTerm rootDir r + >>= maybe (throwError $ MissingTypeOfTerm r) pure + pure (term, typeOfTerm) -- loadDecl1 :: -- MonadIO m => @@ -429,36 +493,37 @@ data FatalError -- ) -- (throwError $ MissingPatch h) --- -- 3) figure out what their combined dependencies are --- matchTerm1Dependencies :: --- DB m => --- V1 Hash -> --- [(Term, Type)] -> --- m (Either [V1EntityRef] (V1 Hash -> (V2 Hash, Db.ObjectId))) --- matchTerm1Dependencies componentHash tms = --- let -- Get a list of Eithers corresponding to the non-self dependencies of this term. --- lookupDependencyObjects :: --- DB m => (Term, Type) -> m [Either V1EntityRef (V1 Hash, (V2 Hash, Db.ObjectId))] --- lookupDependencyObjects (term, typeOfTerm) = traverse lookupObject deps --- where --- (termTypeDeps, termTermDeps) = --- partitionEithers --- . map LD.toReference --- . toList --- $ Term.labeledDependencies term --- deps = --- nubOrd $ --- [Decl1 (V1 h) | Reference.Derived h _i _n <- toList $ Type.dependencies typeOfTerm] --- <> [Decl1 (V1 h) | Reference.Derived h _i _n <- termTypeDeps] --- <> [ Term1 (V1 h) | Reference.Derived h _i _n <- termTermDeps, h /= runV1 componentHash -- don't include self-refs 😬 --- ] --- in do --- -- check the lefts, if empty then everything is on the right; --- -- else return left. --- (missing, found) <- partitionEithers <$> foldMapM lookupDependencyObjects tms --- pure $ case missing of --- [] -> Right (makeLookup found $ "term " ++ show componentHash) --- missing -> Left missing +-- 3) figure out what their combined dependencies are +matchTerm1Dependencies :: + DB m => + V1 Hash -> + [(V1Term, V1Type)] -> + m (Either [V1EntityRef] (V1 Hash -> V2 Hash, V2 Hash -> Db.ObjectId, Text -> Db.TextId)) +matchTerm1Dependencies componentHash tms = + let -- Get a list of Eithers corresponding to the non-self dependencies of this term. + lookupDependencyObjects :: + DB m => (V1Term, V1Type) -> m [Either V1EntityRef (V1 Hash, (V2 Hash, Db.ObjectId))] + lookupDependencyObjects (term, typeOfTerm) = traverse lookupObject deps + where + (termTypeDeps, termTermDeps) = + partitionEithers + . map V1.LD.toReference + . toList + $ V1.Term.labeledDependencies term + deps = + nubOrd $ + [Decl1 (V1 h) | V1.Reference.Derived h _i _n <- toList $ V1.Type.dependencies typeOfTerm] + <> [Decl1 (V1 h) | V1.Reference.Derived h _i _n <- termTypeDeps] + <> [ Term1 (V1 h) | V1.Reference.Derived h _i _n <- termTermDeps, h /= runV1 componentHash -- don't include self-refs 😬 + ] + in do + -- check the lefts, if empty then everything is on the right; + -- else return left. + (missing, found) <- partitionEithers <$> foldMapM lookupDependencyObjects tms + -- pure $ case missing of + -- [] -> Right (makeLookup found $ "term " ++ show componentHash) + -- missing -> Left missing + error "todo" -- matchDecl1Dependencies :: -- DB m => V1 Hash -> [Decl] -> m (Either [V1EntityRef] (V1 Hash -> Db.ObjectId)) @@ -558,17 +623,17 @@ data FatalError -- ++ " in the map for " -- ++ description --- makeLookup :: [(V1 Hash, (V2 Hash, Db.ObjectId))] -> String -> V1 Hash -> (V2 Hash, Db.ObjectId) --- makeLookup l lookupDescription a = --- let m = Map.fromList l --- in case Map.lookup a m of --- Just b -> b --- Nothing -> --- error $ --- "Somehow I don't have the ObjectId for " --- ++ show (Base32Hex.fromHash (runV1 a)) --- ++ " in the map for " --- ++ lookupDescription +makeLookup :: [(V1 Hash, (V2 Hash, Db.ObjectId))] -> String -> V1 Hash -> (V2 Hash, Db.ObjectId) +makeLookup l lookupDescription a = + let m = Map.fromList l + in case Map.lookup a m of + Just b -> b + Nothing -> + error $ + "Somehow I don't have the ObjectId for " + ++ show (Hash.toBase32Hex (runV1 a)) + ++ " in the map for " + ++ lookupDescription -- -- -- createTypeSearchIndicesForReferent :: DB m => (V2.ReferentId Db.ObjectId) -> Type -> m () @@ -621,16 +686,16 @@ data FatalError -- . DD.declDependencies -- $ DD.rmapDecl (fmap $ fromMaybe selfId) decl --- saveTermComponent :: DB m => V1 Hash -> V2 Hash -> Term2ComponentS -> m Db.ObjectId --- saveTermComponent h1 h2 component = do --- h1Id <- Db.saveHashByteString (runV1 h1) --- h2Id <- Db.saveHashByteString (runV2 h2) --- o <- Db.saveObject h2Id V2.TermComponent blob --- Db.saveHashObject h1Id o 1 --- Db.saveHashObject h2Id o 2 --- pure o --- where --- blob = S.putBytes (S.V1.putFoldable V2.putTerm) component +saveTermComponent :: DB m => V1 Hash -> V2 Hash -> V2TermComponentS -> m Db.ObjectId +saveTermComponent h1 h2 component = do + h1Id <- Db.saveHashHash (runV1 h1) + h2Id <- Db.saveHashHash (runV2 h2) + o <- Db.saveObject h2Id V2.OT.TermComponent blob + Db.saveHashObject h1Id o 1 + Db.saveHashObject h2Id o 2 + pure o + where + blob = S.putBytes S.V2.putTermFormat (V2.TermFormat.Term component) -- saveDeclComponent :: DB m => Db.HashId -> [Decl2S] -> m Db.ObjectId -- saveDeclComponent h component = error "todo" -- do @@ -647,56 +712,170 @@ data FatalError -- -- saveBranch :: DB m => Db.HashId -> --- -- | Loads a dir with format /base32-encoded-reference.id... --- -- into a map from Hash to component references --- componentMapForDir :: forall m. MonadIO m => FilePath -> m (Map (V1 Hash) [Reference.Id]) --- componentMapForDir root = listDirectory root <&> foldl' insert mempty --- where --- insert m filename = case V1.componentIdFromString filename of --- Nothing -> m -- skip silently --- Just r@(Reference.Id h _i _n) -> --- Map.unionWith (<>) m (Map.singleton (V1 h) [r]) - --- existsObjectWithHash :: DB m => Hash -> m Bool --- existsObjectWithHash h = Db.queryExists sql [Base32Hex.fromHash h] --- where --- sql = --- [here| --- SELECT 1 --- FROM hash INNER JOIN hash_object ON hash.id = hash_object.hash_id --- WHERE base32 = ? --- |] - --- -- | Given a V1 term component, convert and save it to the V2 codebase --- -- Pre-requisite: all hash-identified entities in the V1 component have --- -- already been converted and added to the V2 codebase, apart from self- --- -- references. --- convertTerm1 :: DB m => (V1 Hash -> (V2 Hash, Db.ObjectId)) -> V1 Hash -> [(Term, Type)] -> m () --- convertTerm1 lookup hash1 v1component = do - --- -- construct v2 term component for hashing --- let v2componentH :: Term2ComponentH = --- map (buildTerm2H (fst . lookup) hash1 . fst) v1component --- -- note: we'd need some special care here if we want to make sure that this --- -- hash function is identity for simple references --- let hash2 = V2 (H.accumulate' v2componentH) - --- -- construct v2 term component for serializing --- let v2componentS :: [Term2 Db.ObjectId] = --- map (buildTerm2S (snd . lookup) hash1 . fst) v1component - --- -- serialize the v2 term component --- componentObjectId :: Db.ObjectId <- saveTermComponent hash1 hash2 v2componentS - --- -- construct v2 types for each component element, and save the types to the --- -- to the indices --- for_ (zip3 [0 ..] v1component v2componentS) $ \(i, (_term1, typ1), term2) -> do --- let r = V2.ReferenceId componentObjectId i --- let rt = V2.ReferentIdRef r - --- saveTypeBlobForReferent rt (buildTermType2S (snd . lookup) typ1) --- createTypeSearchIndicesForReferent rt typ1 --- createDependencyIndexForTerm r term2 +-- | Loads a dir with format /base32-encoded-reference.id... +-- into a map from Hash to component references +componentMapForDir :: forall m. MonadIO m => FilePath -> m (Map (V1 Hash) [V1.Reference.Id]) +componentMapForDir root = listDirectory root <&> foldl' insert mempty + where + insert m filename = case V1.componentIdFromString filename of + Nothing -> m -- skip silently + Just r@(V1.Reference.Id h _i _n) -> + Map.unionWith (<>) m (Map.singleton (V1 h) [r]) + +existsObjectWithHash :: DB m => Hash -> m Bool +existsObjectWithHash = Db.objectExistsWithHash . Hash.toBase32Hex + +convertABT :: forall f v a f' v' a' . Ord v' => (f (V1.ABT.Term f v a) -> f' (V2.ABT.Term f' v' a')) -> (v -> v') -> (a -> a') -> V1.ABT.Term f v a -> V2.ABT.Term f' v' a' +convertABT ff fv fa = goTerm where + goTerm :: V1.ABT.Term f v a -> V2.ABT.Term f' v' a' + goTerm (V1.ABT.Term vs a out) = V2.ABT.Term (Set.map fv vs) (fa a) (goABT out) + goABT :: V1.ABT.ABT f v (V1.ABT.Term f v a) -> V2.ABT.ABT f' v' (V2.ABT.Term f' v' a') + goABT = \case + V1.ABT.Var v -> V2.ABT.Var (fv v) + V1.ABT.Cycle t -> V2.ABT.Cycle (goTerm t) + V1.ABT.Abs v t -> V2.ABT.Abs (fv v) (goTerm t) + V1.ABT.Tm ft -> V2.ABT.Tm (ff ft) + +convertSymbol :: V1.Symbol.Symbol -> V2.Symbol.Symbol +convertSymbol (V1.Symbol.Symbol id name) = V2.Symbol.Symbol id name + +convertKind :: V1.Kind.Kind -> V2.Kind.Kind +convertKind = \case + V1.Kind.Star -> V2.Kind.Star + V1.Kind.Arrow i o -> V2.Kind.Arrow (convertKind i) (convertKind o) + +-- | Given a V1 term component, convert and save it to the V2 codebase +-- Pre-requisite: all hash-identified entities in the V1 component have +-- already been converted and added to the V2 codebase, apart from self- +-- references. +convertTerm1 :: DB m => (V1 Hash -> V2 Hash) -> (V2 Hash -> Db.ObjectId) -> (Text -> Db.TextId) -> V1 Hash -> [(V1Term, V1Type)] -> m () +convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do + + -- construct v2 term component for hashing + let + buildTermType2H :: (V1 Hash -> V2 Hash) -> V1Type -> V2TypeT + buildTermType2H lookup = goType where + goType :: V1Type -> V2TypeT + goType = convertABT goABT convertSymbol (const ()) + goABT :: V1.Type.F V1Type -> V2.Type.FT V2TypeT + goABT = \case + V1.Type.Ref r -> V2.Type.Ref case r of + V1.Reference.Builtin t -> + V2.Reference.ReferenceBuiltin t + V1.Reference.Derived h i _n -> + V2.Reference.ReferenceDerived + (V2.Reference.Id (runV2 . lookup $ V1 h) i) + V1.Type.Arrow i o -> V2.Type.Arrow (goType i) (goType o) + V1.Type.Ann a k -> V2.Type.Ann (goType a) (convertKind k) + V1.Type.App f x -> V2.Type.App (goType f) (goType x) + V1.Type.Effect e b -> V2.Type.Effect (goType e) (goType b) + V1.Type.Effects as -> V2.Type.Effects (goType <$> as) + V1.Type.Forall a -> V2.Type.Forall (goType a) + V1.Type.IntroOuter a -> V2.Type.IntroOuter (goType a) + buildTerm2H :: (V1 Hash -> V2 Hash) -> V1 Hash -> V1Term -> V2TermH + buildTerm2H lookup self = goTerm where + goTerm = convertABT goABT convertSymbol (const ()) + goABT :: V1.Term.F V1.Symbol.Symbol () V1Term -> V2.Term.F V2.Symbol.Symbol V2TermH + lookupTermLink = \case + V1.Referent.Ref r -> V2.Referent.Ref (lookupTerm r) + V1.Referent.Con r i _ct -> V2.Referent.Con (lookupType r) (fromIntegral i) + lookupTerm = \case + V1.Reference.Builtin t -> V2.Reference.ReferenceBuiltin t + V1.Reference.Derived h i _n -> + V2.Reference.ReferenceDerived + (V2.Reference.Id + (if V1 h == self then Nothing + else (Just . runV2.lookup $ V1 h)) i) + lookupType = \case + V1.Reference.Builtin t -> V2.Reference.ReferenceBuiltin t + V1.Reference.Derived h i _n -> + V2.Reference.ReferenceDerived + (V2.Reference.Id (runV2 . lookup $ V1 h) i) + goABT = \case + V1.Term.Int i -> V2.Term.Int i + V1.Term.Nat n -> V2.Term.Nat n + V1.Term.Float f -> V2.Term.Float f + V1.Term.Boolean b -> V2.Term.Boolean b + V1.Term.Text t -> V2.Term.Text t + V1.Term.Char c -> V2.Term.Char c + V1.Term.Ref r -> V2.Term.Ref (lookupTerm r) + V1.Term.Constructor r i -> + V2.Term.Constructor (lookupType r) (fromIntegral i) + V1.Term.Request r i -> + V2.Term.Constructor (lookupType r) (fromIntegral i) + V1.Term.Handle b h -> V2.Term.Handle (goTerm b) (goTerm h) + V1.Term.App f a -> V2.Term.App (goTerm f) (goTerm a) + V1.Term.Ann e t -> V2.Term.Ann (goTerm e) (buildTermType2H lookup t) + V1.Term.Sequence as -> V2.Term.Sequence (goTerm <$> as) + V1.Term.If c t f -> V2.Term.If (goTerm c) (goTerm t) (goTerm f) + V1.Term.And a b -> V2.Term.And (goTerm a) (goTerm b) + V1.Term.Or a b -> V2.Term.Or (goTerm a) (goTerm b) + V1.Term.Lam a -> V2.Term.Lam (goTerm a) + V1.Term.LetRec _ bs body -> V2.Term.LetRec (goTerm <$> bs) (goTerm body) + V1.Term.Let _ b body -> V2.Term.Let (goTerm b) (goTerm body) + V1.Term.Match e cases -> V2.Term.Match (goTerm e) (goCase <$> cases) + V1.Term.TermLink r -> V2.Term.TermLink (lookupTermLink r) + V1.Term.TypeLink r -> V2.Term.TypeLink (lookupType r) + goCase (V1.Term.MatchCase p g b) = + V2.Term.MatchCase (goPat p) (goTerm <$> g) (goTerm b) + goPat = \case + V1.Pattern.Unbound -> V2.Term.PUnbound + V1.Pattern.Var -> V2.Term.PVar + V1.Pattern.Boolean b -> V2.Term.PBoolean b + V1.Pattern.Int i -> V2.Term.PInt i + V1.Pattern.Nat n -> V2.Term.PNat n + V1.Pattern.Float d -> V2.Term.PFloat d + V1.Pattern.Text t -> V2.Term.PText t + V1.Pattern.Char c -> V2.Term.PChar c + V1.Pattern.Constructor r i ps -> + V2.Term.PConstructor (lookupType r) i (goPat <$> ps) + V1.Pattern.As p -> V2.Term.PAs (goPat p) + V1.Pattern.EffectPure p -> V2.Term.PEffectPure (goPat p) + V1.Pattern.EffectBind r i ps k -> + V2.Term.PEffectBind (lookupType r) i (goPat <$> ps) (goPat k) + V1.Pattern.SequenceLiteral ps -> V2.Term.PSequenceLiteral (goPat <$> ps) + V1.Pattern.SequenceOp p op p2 -> + V2.Term.PSequenceOp (goPat p) (goSeqOp op) (goPat p2) + goSeqOp = \case + V1.Pattern.Cons -> V2.Term.PCons + V1.Pattern.Snoc -> V2.Term.PSnoc + V1.Pattern.Concat -> V2.Term.PConcat + + -- |this function assumes that the terms are already in their canonical order + buildTermComponent2S :: (V2 Hash -> Db.ObjectId) -> V2 Hash -> V2TermComponentH -> V2TermComponentS + buildTermComponent2S getId h terms = + -- collect the local id values + error "not implemented" + + v2types :: [V2TypeT] = + map (buildTermType2H lookup1 . snd) v1component + + -- |may need an extra pass to put them into their canonical order + -- or a proof that none is needed + v2componentH :: V2TermComponentH = error "todo" $ + map (buildTerm2H lookup1 hash1 . fst) v1component + + -- note: we'd need some special care here if we want to make sure that this + -- hash function is identity for simple references + hash2 = error "todo" -- V2 (H.accumulate' v2componentH) + + -- construct v2 term component for serializing + v2componentS :: V2TermComponentS = + buildTermComponent2S lookup2 hash2 v2componentH + + -- -- serialize the v2 term component + -- componentObjectId :: Db.ObjectId <- saveTermComponent hash1 hash2 v2componentS + + -- -- construct v2 types for each component element, and save the types to the + -- -- to the indices + -- for_ (zip3 [0 ..] v1component v2componentS) $ \(i, (_term1, typ1), term2) -> do + -- let r = V2.Reference.Id componentObjectId i + -- let rt = V2.Referent.RefId r + + -- saveTypeBlobForReferent rt (buildTermType2S (snd . lookup) typ1) + -- createTypeSearchIndicesForReferent rt typ1 + -- createDependencyIndexForTerm r term2 + error "todo" -- convertDecl1 :: DB m => (V1 Hash -> (V2 Hash, Db.ObjectId)) -> V1 Hash -> [Decl] -> m () -- convertDecl1 lookup hash1 v1component = do diff --git a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal index f590bddc02..0aaba54159 100644 --- a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal +++ b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal @@ -55,6 +55,8 @@ library unliftio, unison-core, unison-codebase1, + unison-codebase, unison-codebase-sqlite, - unison-util + unison-util, + unison-util-serialization default-language: Haskell2010 diff --git a/codebase1/codebase/Unison/Codebase/V1/ABT.hs b/codebase1/codebase/Unison/Codebase/V1/ABT.hs index 7b51fe8741..deaffaa51a 100644 --- a/codebase1/codebase/Unison/Codebase/V1/ABT.hs +++ b/codebase1/codebase/Unison/Codebase/V1/ABT.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE LambdaCase #-} -- Based on: http://semantic-domain.blogspot.com/2015/03/abstract-binding-trees.html {-# LANGUAGE DeriveFoldable #-} {-# LANGUAGE DeriveTraversable #-} @@ -13,6 +14,10 @@ import qualified Data.Foldable as Foldable import qualified Data.Set as Set import Data.Set (Set) import Prelude hiding (abs, cycle) +-- import U.Util.Hashable (Accumulate, Hashable1) +-- import qualified Data.Map as Map +-- import qualified U.Util.Hashable as Hashable +-- import Data.Functor (void) data ABT f v r = Var v @@ -144,14 +149,14 @@ class Ord v => Var v where -- instance Functor f => Functor (Term f v) where -- fmap f (Term fvs a sub) = Term fvs (f a) (fmap (fmap f) sub) --- extraMap :: Functor g => (forall k . f k -> g k) -> Term f v a -> Term g v a --- extraMap p (Term fvs a sub) = Term fvs a (go p sub) where --- go :: Functor g => (forall k . f k -> g k) -> ABT f v (Term f v a) -> ABT g v (Term g v a) --- go p = \case --- Var v -> Var v --- Cycle r -> Cycle (extraMap p r) --- Abs v r -> Abs v (extraMap p r) --- Tm x -> Tm (fmap (extraMap p) (p x)) +extraMap :: Functor g => (forall k . f k -> g k) -> Term f v a -> Term g v a +extraMap p (Term fvs a sub) = Term fvs a (go p sub) where + go :: Functor g => (forall k . f k -> g k) -> ABT f v (Term f v a) -> ABT g v (Term g v a) + go p = \case + Var v -> Var v + Cycle r -> Cycle (extraMap p r) + Abs v r -> Abs v (extraMap p r) + Tm x -> Tm (fmap (extraMap p) (p x)) -- pattern Var' v <- Term _ _ (Var v) -- pattern Cycle' vs t <- Term _ _ (Cycle (AbsN' vs t)) @@ -179,11 +184,8 @@ class Ord v => Var v where annotatedVar :: a -> v -> Term f v a annotatedVar a v = Term (Set.singleton v) a (Var v) --- abs :: Ord v => v -> Term f v () -> Term f v () --- abs = abs' () - -abs' :: Ord v => a -> v -> Term f v a -> Term f v a -abs' a v body = Term (Set.delete v (freeVars body)) a (Abs v body) +abs :: Ord v => a -> v -> Term f v a -> Term f v a +abs a v body = Term (Set.delete v (freeVars body)) a (Abs v body) -- absr :: (Functor f, Foldable f, Var v) => v -> Term f (V v) () -> Term f (V v) () -- absr = absr' () @@ -201,17 +203,11 @@ abs' a v body = Term (Set.delete v (freeVars body)) a (Abs v body) -- absChain' :: Ord v => [(a, v)] -> Term f v a -> Term f v a -- absChain' vs t = foldr (\(a,v) t -> abs' a v t) t vs --- tm :: (Foldable f, Ord v) => f (Term f v ()) -> Term f v () --- tm = tm' () - -tm' :: (Foldable f, Ord v) => a -> f (Term f v a) -> Term f v a -tm' a t = Term (Set.unions (fmap freeVars (Foldable.toList t))) a (Tm t) +tm :: (Foldable f, Ord v) => a -> f (Term f v a) -> Term f v a +tm a t = Term (Set.unions (fmap freeVars (Foldable.toList t))) a (Tm t) --- cycle :: Term f v () -> Term f v () --- cycle = cycle' () - -cycle' :: a -> Term f v a -> Term f v a -cycle' a t = Term (freeVars t) a (Cycle t) +cycle :: a -> Term f v a -> Term f v a +cycle a t = Term (freeVars t) a (Cycle t) -- cycler' :: (Functor f, Foldable f, Var v) => a -> [v] -> Term f (V v) a -> Term f (V v) a -- cycler' a vs t = cycle' a $ foldr (absr' a) t vs @@ -423,9 +419,9 @@ visit :: g (Term f v a) visit f t = flip fromMaybe (f t) $ case out t of Var _ -> pure t - Cycle body -> cycle' (annotation t) <$> visit f body - Abs x e -> abs' (annotation t) x <$> visit f e - Tm body -> tm' (annotation t) <$> traverse (visit f) body + Cycle body -> cycle (annotation t) <$> visit f body + Abs x e -> abs (annotation t) x <$> visit f e + Tm body -> tm (annotation t) <$> traverse (visit f) body -- | Apply an effectful function to an ABT tree top down, sequencing the results. visit' :: @@ -435,9 +431,9 @@ visit' :: g (Term f v a) visit' f t = case out t of Var _ -> pure t - Cycle body -> cycle' (annotation t) <$> visit' f body - Abs x e -> abs' (annotation t) x <$> visit' f e - Tm body -> f body >>= (fmap (tm' (annotation t)) . traverse (visit' f)) + Cycle body -> cycle (annotation t) <$> visit' f body + Abs x e -> abs (annotation t) x <$> visit' f e + Tm body -> f body >>= (fmap (tm (annotation t)) . traverse (visit' f)) -- -- | `visit` specialized to the `Identity` effect. -- visitPure :: (Traversable f, Ord v) @@ -596,6 +592,7 @@ visit' f t = case out t of -- isCyclic [(v,b)] = Set.member v (freeVars b) -- isCyclic bs = length bs > 1 +-- -- todo: -- -- Hash a strongly connected component and sort its definitions into a canonical order. -- hashComponent :: -- (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Var v, Ord h, Accumulate h) @@ -604,12 +601,19 @@ visit' f t = case out t of -- ts = Map.toList byName -- embeds = [ (v, void (transform Embed t)) | (v,t) <- ts ] -- vs = fst <$> ts +-- -- make closed terms for each element of the component +-- -- [ let x = ..., y = ..., in x +-- -- , let x = ..., y = ..., in y ] +-- -- so that we can then hash them (closed terms can be hashed) +-- -- so that we can sort them by hash. this is the "canonical, name-agnostic" +-- -- hash that yields the canonical ordering of the component. -- tms = [ (v, absCycle vs (tm $ Component (snd <$> embeds) (var v))) | v <- vs ] -- hashed = [ ((v,t), hash t) | (v,t) <- tms ] -- sortedHashed = sortOn snd hashed -- overallHash = Hashable.accumulate (Hashable.Hashed . snd <$> sortedHashed) -- in (overallHash, [ (v, t) | ((v, _),_) <- sortedHashed, Just t <- [Map.lookup v byName] ]) + -- -- Group the definitions into strongly connected components and hash -- -- each component. Substitute the hash of each component into subsequent -- -- components (using the `termFromHash` function). Requires that the diff --git a/codebase1/codebase/Unison/Codebase/V1/FileCodebase.hs b/codebase1/codebase/Unison/Codebase/V1/FileCodebase.hs index d2282ce602..4430943869 100644 --- a/codebase1/codebase/Unison/Codebase/V1/FileCodebase.hs +++ b/codebase1/codebase/Unison/Codebase/V1/FileCodebase.hs @@ -5,19 +5,7 @@ {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} -module Unison.Codebase.V1.FileCodebase - ( getRootBranch, -- used by Git module - codebaseExists, -- used by Main - getCodebaseDir, - termsDir, - reflogPath, - getTerm, - getTypeOfTerm, - getDecl, - getWatch, - deserializeEdits, - ) -where +module Unison.Codebase.V1.FileCodebase where import Control.Error (ExceptT (..), runExceptT) import Control.Monad.Catch (catch) @@ -39,14 +27,20 @@ import Unison.Codebase.V1.Reference (Reference) import qualified Unison.Codebase.V1.Reference as Reference import qualified Unison.Codebase.V1.Serialization.Serialization as S import qualified Unison.Codebase.V1.Serialization.V1 as V1 -import Unison.Codebase.V1.Term (Term) -import Unison.Codebase.V1.Type (Type) +import qualified Unison.Codebase.V1.Term (Term) +import qualified Unison.Codebase.V1.Type (Type) import UnliftIO (MonadIO) import UnliftIO (IOException) import UnliftIO (MonadIO (liftIO)) import UnliftIO.Directory (createDirectoryIfMissing, doesDirectoryExist, doesFileExist) +import Data.Text (Text) +import Data.Maybe (fromMaybe) +import Data.Char (isDigit) +import Unison.Codebase.V1.Symbol (Symbol) -newtype CodebasePath = CodebasePath FilePath +type CodebasePath = FilePath +type Term = Unison.Codebase.V1.Term.Term Symbol () +type Type = Unison.Codebase.V1.Type.Type Symbol () data WatchKind = RegularWatch | TestWatch deriving (Eq, Ord, Show) @@ -66,18 +60,18 @@ codebasePath :: FilePath codebasePath = ".unison" "v1" termsDir, typesDir, branchesDir, branchHeadDir, editsDir :: CodebasePath -> FilePath -termsDir (CodebasePath root) = root codebasePath "terms" -typesDir (CodebasePath root) = root codebasePath "types" -branchesDir (CodebasePath root) = root codebasePath "paths" +termsDir root = root codebasePath "terms" +typesDir root = root codebasePath "types" +branchesDir root = root codebasePath "paths" branchHeadDir root = branchesDir root "_head" -editsDir (CodebasePath root) = root codebasePath "patches" +editsDir root = root codebasePath "patches" termDir, declDir :: CodebasePath -> Reference.Id -> FilePath termDir root r = termsDir root componentIdToString r declDir root r = typesDir root componentIdToString r watchesDir :: CodebasePath -> WatchKind -> FilePath -watchesDir (CodebasePath root) k = +watchesDir root k = root codebasePath "watches" case k of RegularWatch -> "_cache" TestWatch -> "test" @@ -98,7 +92,7 @@ editsPath :: CodebasePath -> EditHash -> FilePath editsPath root (EditHash h) = editsDir root hashToString h ++ ".up" reflogPath :: CodebasePath -> FilePath -reflogPath (CodebasePath root) = root codebasePath "reflog" +reflogPath root = root codebasePath "reflog" -- checks if `path` looks like a unison codebase minimalCodebaseStructure :: CodebasePath -> [FilePath] @@ -176,8 +170,40 @@ componentIdToString :: Reference.Id -> String componentIdToString = Text.unpack . Reference.toText . Reference.DerivedId -- here --- componentIdFromString :: String -> Maybe Reference.Id --- componentIdFromString = Reference.idFromText . Text.pack +componentIdFromString :: String -> Maybe Reference.Id +componentIdFromString = idFromText . Text.pack where + idFromText :: Text.Text -> Maybe Reference.Id + idFromText s = case fromText s of + Left _ -> Nothing + Right (Reference.Builtin _) -> Nothing + Right (Reference.DerivedId id) -> pure id + +-- examples: +-- `##Text.take` — builtins don’t have cycles +-- `#2tWjVAuc7` — derived, no cycle +-- `#y9ycWkiC1.y9` — derived, part of cycle +-- todo: take a (Reference -> CycleSize) so that `readSuffix` doesn't have to parse the size from the text. +fromText :: Text -> Either String Reference +fromText t = case Text.split (=='#') t of + [_, "", b] -> Right (Reference.Builtin b) + [_, h] -> case Text.split (=='.') h of + [hash] -> Right (derivedBase32Hex hash 0 1) + [hash, suffix] -> uncurry (derivedBase32Hex hash) <$> readSuffix suffix + _ -> bail + _ -> bail + where + bail = Left $ "couldn't parse a Reference from " <> Text.unpack t + derivedBase32Hex :: Text -> Reference.Pos -> Reference.Size -> Reference + derivedBase32Hex b32Hex i n = Reference.DerivedId (Reference.Id (fromMaybe msg h) i n) + where + msg = error $ "Reference.derivedBase32Hex " <> show h + h = Hash.fromBase32Hex <$> Base32Hex.fromText b32Hex + readSuffix :: Text -> Either String (Reference.Pos, Reference.Size) + readSuffix t = case Text.breakOn "c" t of + (pos, Text.drop 1 -> size) | Text.all isDigit pos && Text.all isDigit size -> + Right (read (Text.unpack pos), read (Text.unpack size)) + _ -> Left "suffix decoding error" + -- here -- referentFromString :: String -> Maybe Referent @@ -193,39 +219,32 @@ componentIdToString = Text.unpack . Reference.toText . Reference.DerivedId -- referentToString :: Referent -> String -- referentToString = Text.unpack . Referent.toText -getTerm :: (MonadIO m, Ord v) => S.Get v -> S.Get a -> CodebasePath -> Reference.Id -> m (Maybe (Term v a)) -getTerm getV getA path h = S.getFromFile (V1.getTerm getV getA) (termPath path h) +getTerm :: MonadIO m => CodebasePath -> Reference.Id -> m (Maybe Term) +getTerm path h = S.getFromFile V1.getTerm (termPath path h) -getTypeOfTerm :: (MonadIO m, Ord v) => S.Get v -> S.Get a -> CodebasePath -> Reference.Id -> m (Maybe (Type v a)) -getTypeOfTerm getV getA path h = S.getFromFile (V1.getType getV getA) (typePath path h) +getTypeOfTerm :: MonadIO m => CodebasePath -> Reference.Id -> m (Maybe Type) +getTypeOfTerm path h = S.getFromFile V1.getType (typePath path h) getDecl :: - (MonadIO m, Ord v) => - S.Get v -> - S.Get a -> + MonadIO m => CodebasePath -> Reference.Id -> - m (Maybe (DD.Decl v a)) -getDecl getV getA root h = + m (Maybe (DD.Decl Symbol ())) +getDecl root h = S.getFromFile - ( V1.getEither - (V1.getEffectDeclaration getV getA) - (V1.getDataDeclaration getV getA) - ) + (V1.getEither V1.getEffectDeclaration V1.getDataDeclaration) (declPath root h) getWatch :: - (MonadIO m, Ord v) => - S.Get v -> - S.Get a -> + MonadIO m => CodebasePath -> WatchKind -> Reference.Id -> - m (Maybe (Term v a)) -getWatch getV getA path k id = do + m (Maybe Term) +getWatch path k id = do let wp = watchesDir path k createDirectoryIfMissing True wp - S.getFromFile (V1.getTerm getV getA) (watchPath path k id) + S.getFromFile V1.getTerm (watchPath path k id) failWith :: MonadIO m => Err -> m a failWith = liftIO . fail . show diff --git a/codebase1/codebase/Unison/Codebase/V1/Reference.hs b/codebase1/codebase/Unison/Codebase/V1/Reference.hs index 6303d2fd76..500d69e93f 100644 --- a/codebase1/codebase/Unison/Codebase/V1/Reference.hs +++ b/codebase1/codebase/Unison/Codebase/V1/Reference.hs @@ -27,6 +27,8 @@ data Reference pattern Derived :: H.Hash -> Pos -> Size -> Reference pattern Derived h i n = DerivedId (Id h i n) +{-# COMPLETE Builtin, Derived #-} + type Pos = Word64 type Size = Word64 diff --git a/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs b/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs index 5e467b6033..f14304255b 100644 --- a/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs +++ b/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs @@ -171,12 +171,12 @@ getABT getVar getA getF = getList getVar >>= go [] 0 -> ABT.annotatedVar a . (env !!) <$> getLength 1 -> ABT.annotatedVar a . (fvs !!) <$> getLength _ -> unknownTag "getABT.Var" tag - 1 -> ABT.tm' a <$> getF (go env fvs) + 1 -> ABT.tm a <$> getF (go env fvs) 2 -> do v <- getVar body <- go (v : env) fvs - pure $ ABT.abs' a v body - 3 -> ABT.cycle' a <$> go env fvs + pure $ ABT.abs a v body + 3 -> ABT.cycle a <$> go env fvs _ -> unknownTag "getABT" tag getKind :: MonadGet m => m Kind @@ -185,12 +185,15 @@ getKind = getWord8 >>= \tag -> case tag of 1 -> Kind.Arrow <$> getKind <*> getKind _ -> unknownTag "getKind" tag -getType :: +getType :: MonadGet m => m (Type Symbol ()) +getType = getType' getSymbol (pure ()) + +getType' :: (MonadGet m, Ord v) => m v -> m a -> m (Type v a) -getType getVar getA = getABT getVar getA go +getType' getVar getA = getABT getVar getA go where go getChild = getWord8 >>= \tag -> case tag of 0 -> Type.Ref <$> getReference @@ -245,12 +248,15 @@ getPattern getA = getWord8 >>= \tag -> case tag of 13 -> Pattern.Char <$ getA <*> getChar _ -> unknownTag "Pattern" tag -getTerm :: +getTerm :: MonadGet m => m (Term Symbol ()) +getTerm = getTerm' getSymbol (pure ()) + +getTerm' :: (MonadGet m, Ord v) => m v -> m a -> m (Term v a) -getTerm getVar getA = getABT getVar getA go +getTerm' getVar getA = getABT getVar getA go where go getChild = getWord8 >>= \tag -> case tag of 0 -> Term.Int <$> getInt @@ -263,7 +269,7 @@ getTerm getVar getA = getABT getVar getA go 7 -> Term.Request <$> getReference <*> getLength 8 -> Term.Handle <$> getChild <*> getChild 9 -> Term.App <$> getChild <*> getChild - 10 -> Term.Ann <$> getChild <*> getType getVar getA + 10 -> Term.Ann <$> getChild <*> getType' getVar getA 11 -> Term.Sequence . Sequence.fromList <$> getList getChild 12 -> Term.If <$> getChild <*> getChild <*> getChild 13 -> Term.And <$> getChild <*> getChild @@ -347,13 +353,16 @@ getRawBranch = <*> getMap getNameSegment (Branch.BranchHash <$> getHash) <*> getMap getNameSegment (Branch.EditHash <$> getHash) -getDataDeclaration :: (MonadGet m, Ord v) => m v -> m a -> m (DataDeclaration v a) -getDataDeclaration getV getA = +getDataDeclaration :: MonadGet m => m (DataDeclaration Symbol ()) +getDataDeclaration = getDataDeclaration' getSymbol (pure ()) + +getDataDeclaration' :: (MonadGet m, Ord v) => m v -> m a -> m (DataDeclaration v a) +getDataDeclaration' getV getA = DataDeclaration.DataDeclaration <$> getModifier <*> getA <*> getList getV - <*> getList (getTuple3 getA getV (getType getV getA)) + <*> getList (getTuple3 getA getV (getType' getV getA)) getModifier :: MonadGet m => m DataDeclaration.Modifier getModifier = getWord8 >>= \case @@ -361,9 +370,13 @@ getModifier = getWord8 >>= \case 1 -> DataDeclaration.Unique <$> getText tag -> unknownTag "DataDeclaration.Modifier" tag -getEffectDeclaration :: (MonadGet m, Ord v) => m v -> m a -> m (EffectDeclaration v a) -getEffectDeclaration getV getA = - DataDeclaration.EffectDeclaration <$> getDataDeclaration getV getA +getEffectDeclaration :: MonadGet m => m (EffectDeclaration Symbol ()) +getEffectDeclaration = + DataDeclaration.EffectDeclaration <$> getDataDeclaration + +getEffectDeclaration' :: (MonadGet m, Ord v) => m v -> m a -> m (EffectDeclaration v a) +getEffectDeclaration' getV getA = + DataDeclaration.EffectDeclaration <$> getDataDeclaration' getV getA getEither :: MonadGet m => m a -> m b -> m (Either a b) getEither getL getR = getWord8 >>= \case diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 236d338ba7..d4560e0d6a 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -30,6 +30,8 @@ import qualified U.Codebase.Referent as Referent import U.Codebase.Sqlite.ObjectType import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hashable (Hashable) +import U.Util.Hash (Hash) +import qualified U.Util.Hash as Hash -- * types type DB m = (MonadIO m, MonadReader Connection m) @@ -53,6 +55,9 @@ saveHash :: DB m => Base32Hex -> m HashId saveHash base32 = execute sql (Only base32) >> queryOne (loadHash base32) where sql = [here| INSERT OR IGNORE INTO hash (base32) VALUES (?) |] +saveHashHash :: DB m => Hash -> m HashId +saveHashHash = saveHash . Hash.toBase32Hex + loadHash :: DB m => Base32Hex -> m (Maybe HashId) loadHash base32 = queryOnly sql (Only base32) where sql = [here| SELECT id FROM hash WHERE base32 = ? |] @@ -99,6 +104,23 @@ objectByPrimaryHashId h = queryOnly sql (Only h) where sql = [here| SELECT id FROM object WHERE primary_hash_id = ? |] +objectAndPrimaryHashByAnyHash :: DB m => Base32Hex -> m (Maybe (Base32Hex, ObjectId)) +objectAndPrimaryHashByAnyHash h = queryMaybe sql (Only h) where sql = [here| + SELECT object.id + FROM hash + INNER JOIN hash_object ON hash_object.hash_id = hash.id + INNER JOIN object ON hash_object.objectId = object.id + WHERE hash.base32 = ? +|] + +objectExistsWithHash :: DB m => Base32Hex -> m Bool +objectExistsWithHash h = queryExists sql (Only h) where + sql = [here| + SELECT 1 + FROM hash INNER JOIN hash_object ON hash.id = hash_object.hash_id + WHERE base32 = ? + |] + updateObjectBlob :: DB m => ObjectId -> ByteString -> m () updateObjectBlob oId bs = execute sql (oId, bs) where sql = [here| UPDATE object SET bytes = ? WHERE id = ? diff --git a/codebase2/codebase/U/Codebase/Referent.hs b/codebase2/codebase/U/Codebase/Referent.hs index cbd708433c..c1ad4987fd 100644 --- a/codebase2/codebase/U/Codebase/Referent.hs +++ b/codebase2/codebase/U/Codebase/Referent.hs @@ -15,7 +15,6 @@ import qualified U.Util.Hashable as Hashable type Referent = Referent' (Reference' Text Hash) (Reference' Text Hash) type ConstructorIndex = Word64 - data Referent' rTm rTp = Ref rTm | Con rTp ConstructorIndex diff --git a/codebase2/codebase/U/Codebase/Term.hs b/codebase2/codebase/U/Codebase/Term.hs index 7e7cc5d3e2..7d7d5ad9af 100644 --- a/codebase2/codebase/U/Codebase/Term.hs +++ b/codebase2/codebase/U/Codebase/Term.hs @@ -7,6 +7,8 @@ {-# LANGUAGE Rank2Types #-} {-# LANGUAGE UnicodeSyntax #-} {-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE ScopedTypeVariables #-} + module U.Codebase.Term where @@ -21,6 +23,7 @@ import U.Codebase.Type (TypeR) import U.Util.Hash (Hash) import qualified U.Core.ABT as ABT import qualified U.Util.Hashable as H +import qualified U.Codebase.Type as Type type ConstructorId = Word64 @@ -105,6 +108,8 @@ data SeqOp | PConcat deriving (Eq, Show) +-- getHashesAndTextF :: + -- rmap :: -- (termRef -> termRef') -> -- (typeRef -> typeRef') -> @@ -114,16 +119,59 @@ data SeqOp -- rmap fTermRef fTypeRef fTermLink t = -- extraMap fTermRef fTypeRef fTermLink fTypeRef (Type.rmap fTypeRef) undefined id t --- rmapPattern :: (r -> r') -> Pattern r loc -> Pattern r' loc --- rmapPattern f = \case --- PConstructor loc r i ps -> PConstructor loc (f r) i (rmap f <$> ps) --- PAs loc p -> PAs loc (rmap f p) --- PEffectPure loc p -> PEffectPure loc (rmap f p) --- PEffectBind loc r i ps p -> PEffectBind loc (f r) i (rmap f <$> ps) (rmap f p) --- PSequenceLiteral loc ps -> PSequenceLiteral loc (rmap f <$> ps) --- PSequenceOp loc p1 op p2 -> PSequenceOp loc (rmap f p1) op (rmap f p2) --- -- cover all cases having references or subpatterns above; the rest are fine --- x -> unsafeCoerce x +extraMap :: forall text termRef typeRef termLink typeLink vt + text' termRef' typeRef' termLink' typeLink' vt' v a. Ord vt' + => (text -> text') -> (termRef -> termRef') -> (typeRef -> typeRef') + -> (termLink -> termLink') -> (typeLink -> typeLink') -> (vt -> vt') + -> ABT.Term (F' text termRef typeRef termLink typeLink vt) v a + -> ABT.Term (F' text' termRef' typeRef' termLink' typeLink' vt') v a +extraMap ftext ftermRef ftypeRef ftermLink ftypeLink fvt = go' where + go' = ABT.extraMap go + go :: forall x. F' text termRef typeRef termLink typeLink vt x -> F' text' termRef' typeRef' termLink' typeLink' vt' x + go = \case + Int i -> Int i + Nat n -> Nat n + Float d -> Float d + Boolean b -> Boolean b + Text t -> Text (ftext t) + Char c -> Char c + Ref r -> Ref (ftermRef r) + Constructor r cid -> Constructor (ftypeRef r) cid + Request r cid -> Request (ftypeRef r) cid + Handle e h -> Handle e h + App f a -> App f a + Ann a typ -> Ann a (Type.rmap ftypeRef $ ABT.vmap fvt typ) + Sequence s -> Sequence s + If c t f -> If c t f + And p q -> And p q + Or p q -> Or p q + Lam b -> Lam b + LetRec bs b -> LetRec bs b + Let a b -> Let a b + Match s cs -> Match s (goCase <$> cs) + TermLink r -> TermLink (ftermLink r) + TypeLink r -> TypeLink (ftypeLink r) + goCase :: MatchCase text typeRef x -> MatchCase text' typeRef' x + goCase (MatchCase p g b) = MatchCase (goPat p) g b + goPat = rmapPattern ftext ftypeRef + +rmapPattern :: (t -> t') -> (r -> r') -> Pattern t r -> Pattern t' r' +rmapPattern ft fr = go where + go = \case + PUnbound -> PUnbound + PVar -> PVar + PBoolean b -> PBoolean b + PInt i -> PInt i + PNat n -> PNat n + PFloat d -> PFloat d + PText t -> PText (ft t) + PChar c -> PChar c + PConstructor r i ps -> PConstructor (fr r) i (go <$> ps) + PAs p -> PAs (go p) + PEffectPure p -> PEffectPure (go p) + PEffectBind r i ps p -> PEffectBind (fr r) i (go <$> ps) (go p) + PSequenceLiteral ps -> PSequenceLiteral (go <$> ps) + PSequenceOp p1 op p2 -> PSequenceOp (go p1) op (go p2) instance H.Hashable SeqOp where tokens PCons = [H.Tag 0] diff --git a/codebase2/codebase/U/Codebase/Type.hs b/codebase2/codebase/U/Codebase/Type.hs index 5651ca5c12..177ce16f41 100644 --- a/codebase2/codebase/U/Codebase/Type.hs +++ b/codebase2/codebase/U/Codebase/Type.hs @@ -36,9 +36,10 @@ data F' r a -- variables deriving (Foldable, Functor, Eq, Ord, Traversable) --- | Types are represented as ABTs over the base functor F, with variables in `v` +-- | Non-recursive type type TypeT v = ABT.Term FT v () +-- | Potentially-recursive type type TypeD v = ABT.Term FD v () type TypeR r v = ABT.Term (F' r) v () diff --git a/codebase2/core/U/Core/ABT.hs b/codebase2/core/U/Core/ABT.hs index 58de733038..1e0400aa49 100644 --- a/codebase2/core/U/Core/ABT.hs +++ b/codebase2/core/U/Core/ABT.hs @@ -12,6 +12,7 @@ module U.Core.ABT where import Data.Set (Set) import qualified Data.Set as Set import qualified Data.Foldable as Foldable +import Prelude hiding (abs,cycle) data ABT f v r = Var v @@ -21,7 +22,9 @@ data ABT f v r -- | At each level in the tree, we store the set of free variables and -- a value of type `a`. Variables are of type `v`. + data Term f v a = Term { freeVars :: Set v, annotation :: a, out :: ABT f v (Term f v a) } + deriving (Functor, Foldable, Traversable) -- instance (Show1 f, Show v) => Show (Term f v a) where -- -- annotations not shown @@ -31,6 +34,13 @@ data Term f v a = Term { freeVars :: Set v, annotation :: a, out :: ABT f v (Ter -- Abs v body -> showParen True $ (show v ++) . showString ". " . showsPrec p body -- Tm f -> showsPrec1 p f +vmap :: (Functor f, Foldable f, Ord v') => (v -> v') -> Term f v a -> Term f v' a +vmap f (Term _ a out) = case out of + Var v -> annotatedVar a (f v) + Tm fa -> tm a (fmap (vmap f) fa) + Cycle r -> cycle a (vmap f r) + Abs v body -> abs a (f v) (vmap f body) + extraMap :: Functor g => (forall k . f k -> g k) -> Term f v a -> Term g v a extraMap p (Term fvs a sub) = Term fvs a (go p sub) where go :: Functor g => (forall k . f k -> g k) -> ABT f v (Term f v a) -> ABT g v (Term g v a) diff --git a/codebase2/util/U/Util/Base32Hex.hs b/codebase2/util/U/Util/Base32Hex.hs index f5c091bf4b..6a973e418a 100644 --- a/codebase2/util/U/Util/Base32Hex.hs +++ b/codebase2/util/U/Util/Base32Hex.hs @@ -2,16 +2,14 @@ {-# LANGUAGE OverloadedStrings #-} -module U.Util.Base32Hex - (Base32Hex(UnsafeBase32Hex), fromByteString, toByteString, toText, textToByteString) -where +module U.Util.Base32Hex where import Data.Text (Text) import qualified Codec.Binary.Base32Hex import Data.ByteString (ByteString) import qualified Data.Text as Text import Data.Text.Encoding (encodeUtf8, decodeUtf8) -import Data.Maybe (fromJust) +import Data.Maybe (fromMaybe) newtype Base32Hex = UnsafeBase32Hex { toText :: Text } deriving (Eq, Ord, Show) @@ -25,13 +23,19 @@ fromByteString bs = UnsafeBase32Hex . Text.toLower . Text.dropWhileEnd (== '=') . decodeUtf8 $ Codec.Binary.Base32Hex.encode bs --- by not exporting the Base32Hex constructor, we can trust that it's valid toByteString :: Base32Hex -> ByteString -toByteString = fromJust . textToByteString . toText +toByteString = fromMaybe err . textToByteString . toText + where err = "invalid base32Hex presumably created via \"unsafe\" constructors" + +fromText :: Text -> Maybe Base32Hex +fromText = fmap fromByteString . textToByteString + +unsafeFromText :: Text -> Base32Hex +unsafeFromText = UnsafeBase32Hex -- | Produce a 'Hash' from a base32hex-encoded version of its binary representation textToByteString :: Text -> Maybe ByteString -textToByteString txt = +textToByteString txt = case Codec.Binary.Base32Hex.decode (encodeUtf8 $ Text.toUpper txt <> paddingChars) of Left (_, _rem) -> Nothing Right h -> pure h diff --git a/codebase2/util/U/Util/Components.hs b/codebase2/util/U/Util/Components.hs new file mode 100644 index 0000000000..8ee40dc99a --- /dev/null +++ b/codebase2/util/U/Util/Components.hs @@ -0,0 +1,48 @@ +module U.Util.Components where + +import qualified Data.Graph as Graph +import qualified Data.Map as Map +import qualified Data.Set as Set +import Data.Set (Set) +import Data.Maybe (fromMaybe) + +-- | Order bindings by dependencies and group into components. +-- Each component consists of > 1 bindings, each of which depends +-- transitively on all other bindings in the component. +-- +-- 1-element components may or may not depend on themselves. +-- +-- The order is such that a component at index i will not depend +-- on components and indexes > i. But a component at index i does not +-- _necessarily_ depend on any components at earlier indices. +-- +-- Example: +-- +-- let rec +-- ping n = pong (n + 1); +-- pong n = ping (n + 1); +-- g = id 42; +-- y = id "hi" +-- id x = x; +-- in ping g +-- +-- `components` would produce `[[ping,pong], [id], [g], [y]]` +-- Notice that `id` comes before `g` and `y` in the output, since +-- both `g` and `y` depend on `id`. +-- +-- Uses Tarjan's algorithm: +-- https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm +components :: Ord v => (t -> Set v) -> [(v, t)] -> [[(v, t)]] +components freeVars bs = + let varIds = + Map.fromList (map fst bs `zip` reverse [(1 :: Int) .. length bs]) + -- something horribly wrong if this bombs + varId v = fromMaybe msg $ Map.lookup v varIds + where msg = error "Components.components bug" + + -- use ints as keys for graph to preserve original source order as much as + -- possible + graph = [ ((v, b), varId v, deps b) | (v, b) <- bs ] + vars = Set.fromList (map fst bs) + deps b = varId <$> Set.toList (Set.intersection vars (freeVars b)) + in Graph.flattenSCC <$> Graph.stronglyConnComp graph diff --git a/codebase2/util/U/Util/Monoid.hs b/codebase2/util/U/Util/Monoid.hs new file mode 100644 index 0000000000..ebdfbf4a84 --- /dev/null +++ b/codebase2/util/U/Util/Monoid.hs @@ -0,0 +1,27 @@ +module U.Util.Monoid where + +import Data.Foldable (toList) +import Data.List (intersperse) +import Control.Monad (foldM) + +-- List.intercalate extended to any monoid +-- "The type that intercalate should have had to begin with." +intercalateMap :: (Foldable t, Monoid a) => a -> (b -> a) -> t b -> a +intercalateMap separator renderer elements = + mconcat $ intersperse separator (renderer <$> toList elements) + +fromMaybe :: Monoid a => Maybe a -> a +fromMaybe Nothing = mempty +fromMaybe (Just a) = a + +whenM, unlessM :: Monoid a => Bool -> a -> a +whenM True a = a +whenM False _ = mempty +unlessM = whenM . not + +isEmpty, nonEmpty :: (Eq a, Monoid a) => a -> Bool +isEmpty a = a == mempty +nonEmpty = not . isEmpty + +foldMapM :: (Monad m, Foldable f, Monoid b) => (a -> m b) -> f a -> m b +foldMapM f as = foldM (\b a -> fmap (b <>) (f a)) mempty as diff --git a/codebase2/util/unison-util.cabal b/codebase2/util/unison-util.cabal index c872883dc2..594f7dbd9c 100644 --- a/codebase2/util/unison-util.cabal +++ b/codebase2/util/unison-util.cabal @@ -16,12 +16,14 @@ category: Development library exposed-modules: U.Util.Base32Hex + U.Util.Components U.Util.Hash U.Util.Hashable + U.Util.Monoid U.Util.Relation -- other-modules: -- other-extensions: - build-depends: + build-depends: base, bytestring, containers, From 2a7eebf3af713b3999eac91b60c3263978a76b0a Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 7 Oct 2020 15:05:54 -0400 Subject: [PATCH 011/225] work on rewriting terms renamed ABT.annotatedVar to ABT.var renamed ABT.extraMap/M to ABT.transform/M --- .../lib/U/Codebase/Convert/SyncV1V2.hs | 116 ++++++++++++++++-- .../unison-codebase-convert-1to2.cabal | 1 + .../U/Codebase/Sqlite/Queries.hs | 3 +- .../U/Codebase/Sqlite/Serialization.hs | 6 +- .../U/Codebase/Sqlite/Term/Format.hs | 15 +-- codebase2/codebase/U/Codebase/Term.hs | 5 +- codebase2/codebase/U/Codebase/Type.hs | 5 +- codebase2/core/U/Core/ABT.hs | 116 +++++++++++++++--- codebase2/core/unison-core.cabal | 1 + 9 files changed, 222 insertions(+), 46 deletions(-) diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index 64607b5a69..96d380a7a9 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -1,3 +1,5 @@ +{-# LANGUAGE PartialTypeSignatures #-} +{-# LANGUAGE EmptyCase #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE FlexibleContexts #-} @@ -40,9 +42,10 @@ import qualified U.Codebase.Sqlite.Reference as V2S.Reference import Unison.Codebase.V1.FileCodebase (CodebasePath) import U.Codebase.Sqlite.Queries (DB) import Data.Map (Map) -import qualified Data.Map as Map +import qualified Data.Map.Strict as Map import Control.Monad.Extra (ifM) import Data.String.Here.Uninterpolated (here) +import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Queries as Db import qualified Unison.Codebase.V1.Symbol as V1.Symbol import qualified Unison.Codebase.V1.Term as V1.Term @@ -64,7 +67,7 @@ import qualified Unison.Codebase.V1.LabeledDependency as V1.LD import Data.List.Extra (nubOrd) import Data.Either (partitionEithers) import U.Util.Base32Hex (Base32Hex) -import Data.Bifunctor (Bifunctor(first)) +import Data.Bifunctor (second, Bifunctor(first)) import qualified U.Codebase.Referent as V2.Referent import qualified U.Codebase.Sqlite.ObjectType as V2.OT import qualified U.Util.Serialization as S @@ -78,6 +81,13 @@ import qualified U.Codebase.Kind as V2.Kind import qualified U.Core.ABT as V2.ABT import qualified Unison.Codebase.V1.Term.Pattern as V1.Pattern import qualified Unison.Codebase.V1.Referent as V1.Referent +import qualified Control.Monad.State as State +import Control.Monad.State (State) +import qualified U.Codebase.Sqlite.LocalIds as V2.LocalIds +import Data.Vector (Vector) +import qualified Data.Vector as Vector +import qualified Data.List as List +import Data.Tuple (swap) newtype V1 a = V1 {runV1 :: a} deriving (Eq, Ord, Show) @@ -744,13 +754,30 @@ convertKind = \case V1.Kind.Star -> V2.Kind.Star V1.Kind.Arrow i o -> V2.Kind.Arrow (convertKind i) (convertKind o) +type LocalIdState = + (Map Text V2.TermFormat.LocalTextId, Map (V2 Hash) V2.TermFormat.LocalDefnId) + +rewriteType :: + (V2.Reference.Reference -> + State.State LocalIdState V2.TermFormat.TypeRef) -> + V2TypeT -> State LocalIdState V2.TermFormat.Type +rewriteType doRef = V2.ABT.transformM go where + go :: V2.Type.FT k -> State LocalIdState (V2.TermFormat.FT k) + go = \case + V2.Type.Ref r -> (V2.Type.Ref <$> doRef r) + V2.Type.Arrow l r -> pure $ V2.Type.Arrow l r + V2.Type.Ann a kind -> pure $ V2.Type.Ann a kind + V2.Type.Effect e b -> pure $ V2.Type.Effect e b + V2.Type.Effects es -> pure $ V2.Type.Effects es + V2.Type.Forall a -> pure $ V2.Type.Forall a + V2.Type.IntroOuter a -> pure $ V2.Type.IntroOuter a + -- | Given a V1 term component, convert and save it to the V2 codebase -- Pre-requisite: all hash-identified entities in the V1 component have -- already been converted and added to the V2 codebase, apart from self- -- references. convertTerm1 :: DB m => (V1 Hash -> V2 Hash) -> (V2 Hash -> Db.ObjectId) -> (Text -> Db.TextId) -> V1 Hash -> [(V1Term, V1Type)] -> m () convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do - -- construct v2 term component for hashing let buildTermType2H :: (V1 Hash -> V2 Hash) -> V1Type -> V2TypeT @@ -840,13 +867,82 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do V1.Pattern.Cons -> V2.Term.PCons V1.Pattern.Snoc -> V2.Term.PSnoc V1.Pattern.Concat -> V2.Term.PConcat - - -- |this function assumes that the terms are already in their canonical order - buildTermComponent2S :: (V2 Hash -> Db.ObjectId) -> V2 Hash -> V2TermComponentH -> V2TermComponentS - buildTermComponent2S getId h terms = - -- collect the local id values - error "not implemented" - + buildTermComponent2S :: (V2 Hash -> Db.ObjectId) + -> V2 Hash + -> V2TermComponentH + -> V2TermComponentS + buildTermComponent2S getId h0 terms = let + rewrittenTerms :: + [(V2.TermFormat.Term, LocalIdState)] = + + map (flip State.runState mempty . rewriteTerm) terms + rewriteTerm :: V2TermH -> State.State LocalIdState V2.TermFormat.Term + rewriteTerm = V2.ABT.transformM go where + doText :: Text -> State.State LocalIdState V2.TermFormat.LocalTextId + doText t = do + (textMap, objectMap) <- State.get + case Map.lookup t textMap of + Nothing -> do + let id = V2.TermFormat.LocalTextId + . fromIntegral + $ Map.size textMap + State.put (Map.insert t id textMap, objectMap) + pure id + Just id -> pure id + doHash :: Hash -> State.State LocalIdState V2.TermFormat.LocalDefnId + doHash (V2 -> h) = do + (textMap, objectMap) <- State.get + case Map.lookup h objectMap of + Nothing -> do + let id = V2.TermFormat.LocalDefnId + . fromIntegral + $ Map.size objectMap + State.put (textMap, Map.insert h id objectMap) + pure id + Just id -> pure id + doRecRef :: V2.Reference.Reference' Text (Maybe Hash) -> State.State LocalIdState V2.TermFormat.TermRef + doRecRef = \case + V2.Reference.ReferenceBuiltin t -> + V2.Reference.ReferenceBuiltin <$> doText t + V2.Reference.ReferenceDerived r -> + V2.Reference.ReferenceDerived <$> case r of + V2.Reference.Id h i -> V2.Reference.Id <$> traverse doHash h <*> pure i + doRef :: V2.Reference.Reference -> State.State LocalIdState V2.TermFormat.TypeRef + doRef = \case + V2.Reference.ReferenceBuiltin t -> + V2.Reference.ReferenceBuiltin <$> doText t + V2.Reference.ReferenceDerived (V2.Reference.Id h i) -> + V2.Reference.ReferenceDerived <$> + (V2.Reference.Id <$> doHash h <*> pure i) + go :: V2.Term.F V2.Symbol.Symbol k -> State LocalIdState (V2.TermFormat.F k) + go = \case + V2.Term.Int i -> pure $ V2.Term.Int i + V2.Term.Nat n -> pure $ V2.Term.Nat n + V2.Term.Float d -> pure $ V2.Term.Float d + V2.Term.Boolean b -> pure $ V2.Term.Boolean b + V2.Term.Text t -> V2.Term.Text <$> doText t + V2.Term.Char c -> pure $ V2.Term.Char c + V2.Term.Ref r -> V2.Term.Ref <$> doRecRef r + V2.Term.Constructor r cid -> + V2.Term.Constructor <$> doRef r <*> pure cid + V2.Term.Request r cid -> V2.Term.Request <$> doRef r <*> pure cid + V2.Term.Handle e h -> pure $ V2.Term.Handle e h + V2.Term.App f a -> pure $ V2.Term.App f a + V2.Term.Ann e typ -> V2.Term.Ann e <$> rewriteType doRef typ + mapToVec :: Ord i => (a -> b) -> Map a i -> Vector b + mapToVec f = Vector.fromList . map (f . fst) . List.sortOn snd . Map.toList + stateToIds :: LocalIdState -> V2.LocalIds.LocalIds + stateToIds (t, o) = + V2.LocalIds.LocalIds (mapToVec lookupText t) (mapToVec lookup2 o) + -- state : (Map Text Int, Map Hash Int) + -- Term.app Nat.+ 7 #8sf73g + -- ["Nat.+"] [#8sf73g] + -- [lookupText "Nat.+"] [lookup #8sf73g] + -- Term.app (Builtin 0) 7 (Hash 0) + in V2.TermFormat.LocallyIndexedComponent + . Vector.fromList + . fmap swap + . fmap (second stateToIds) $ rewrittenTerms v2types :: [V2TypeT] = map (buildTermType2H lookup1 . snd) v1component diff --git a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal index 0aaba54159..c2d5a2050d 100644 --- a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal +++ b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal @@ -53,6 +53,7 @@ library text, sqlite-simple, unliftio, + vector, unison-core, unison-codebase1, unison-codebase, diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index d4560e0d6a..f532657c67 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -32,14 +32,13 @@ import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hashable (Hashable) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash +import U.Codebase.Sqlite.DbId -- * types type DB m = (MonadIO m, MonadReader Connection m) newtype HashId = HashId Word64 deriving (Eq, Ord) deriving (Hashable, FromField, ToField) via Word64 -newtype TextId = TextId Word64 deriving (Eq, Ord) deriving (Hashable, FromField, ToField) via Word64 -newtype ObjectId = ObjectId Word64 deriving (Eq, Ord, Hashable, FromField, ToField) via Word64 newtype TypeId = TypeId ObjectId deriving (FromField, ToField) via ObjectId newtype TermId = TermCycleId ObjectId deriving (FromField, ToField) via ObjectId newtype DeclId = DeclCycleId ObjectId deriving (FromField, ToField) via ObjectId diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 42427de109..c33b7c9fd8 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -64,7 +64,7 @@ putABT putVar putA putF abt = ABT.Abs v body -> ABT.abs a v (go (v : env) body) ABT.Cycle body -> ABT.cycle a (go env body) ABT.Tm f -> ABT.tm a (go env <$> f) - ABT.Var v -> ABT.annotatedVar a v + ABT.Var v -> ABT.var a v putVarRef env v = case v `elemIndex` env of Just i -> putWord8 0 *> putVarInt i Nothing -> case v `elemIndex` fvs of @@ -86,8 +86,8 @@ getABT getVar getA getF = getList getVar >>= go [] 0 -> do tag <- getWord8 case tag of - 0 -> ABT.annotatedVar a . (env !!) <$> getVarInt - 1 -> ABT.annotatedVar a . (fvs !!) <$> getVarInt + 0 -> ABT.var a . (env !!) <$> getVarInt + 1 -> ABT.var a . (fvs !!) <$> getVarInt _ -> unknownTag "getABT.Var" tag 1 -> ABT.tm a <$> getF (go env fvs) 2 -> do diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs index 64952f2635..6be42ed20b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs @@ -2,8 +2,6 @@ module U.Codebase.Sqlite.Term.Format where --- import U.Codebase.Sqlite.DbId - import Data.Bits (Bits) import Data.Vector (Vector) import Data.Word (Word64) @@ -13,22 +11,25 @@ import U.Codebase.Sqlite.LocalIds import U.Codebase.Sqlite.Symbol import qualified U.Codebase.Term as Term import qualified U.Core.ABT as ABT +import qualified U.Codebase.Type as Type -newtype LocalTermId = LocalTermId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 -newtype LocalTypeId = LocalTypeId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalDefnId = LocalDefnId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 -type TermRef = Reference' LocalTextId (Maybe LocalTermId) +type TermRef = Reference' LocalTextId (Maybe LocalDefnId) -type TypeRef = Reference' LocalTextId LocalTypeId +type TypeRef = Reference' LocalTextId LocalDefnId -data LocallyIndexedComponent = +data LocallyIndexedComponent = LocallyIndexedComponent (Vector (LocalIds, Term)) type F = Term.F' LocalTextId TermRef TypeRef (Referent' TermRef TypeRef) TypeRef Symbol +type FT = Type.F' TypeRef + type Term = ABT.Term F Symbol () +type Type = ABT.Term FT Symbol () data TermFormat = Term LocallyIndexedComponent diff --git a/codebase2/codebase/U/Codebase/Term.hs b/codebase2/codebase/U/Codebase/Term.hs index 7d7d5ad9af..bf57e6f765 100644 --- a/codebase2/codebase/U/Codebase/Term.hs +++ b/codebase2/codebase/U/Codebase/Term.hs @@ -120,13 +120,14 @@ data SeqOp -- extraMap fTermRef fTypeRef fTermLink fTypeRef (Type.rmap fTypeRef) undefined id t extraMap :: forall text termRef typeRef termLink typeLink vt - text' termRef' typeRef' termLink' typeLink' vt' v a. Ord vt' + text' termRef' typeRef' termLink' typeLink' vt' v a + . (Ord v, Ord vt') => (text -> text') -> (termRef -> termRef') -> (typeRef -> typeRef') -> (termLink -> termLink') -> (typeLink -> typeLink') -> (vt -> vt') -> ABT.Term (F' text termRef typeRef termLink typeLink vt) v a -> ABT.Term (F' text' termRef' typeRef' termLink' typeLink' vt') v a extraMap ftext ftermRef ftypeRef ftermLink ftypeLink fvt = go' where - go' = ABT.extraMap go + go' = ABT.transform go go :: forall x. F' text termRef typeRef termLink typeLink vt x -> F' text' termRef' typeRef' termLink' typeLink' vt' x go = \case Int i -> Int i diff --git a/codebase2/codebase/U/Codebase/Type.hs b/codebase2/codebase/U/Codebase/Type.hs index 177ce16f41..f2440d9e93 100644 --- a/codebase2/codebase/U/Codebase/Type.hs +++ b/codebase2/codebase/U/Codebase/Type.hs @@ -7,6 +7,7 @@ {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE Rank2Types #-} {-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE BlockArguments #-} module U.Codebase.Type where @@ -44,7 +45,7 @@ type TypeD v = ABT.Term FD v () type TypeR r v = ABT.Term (F' r) v () -rmap :: (r -> r') -> ABT.Term (F' r) v a -> ABT.Term (F' r') v a -rmap f = ABT.extraMap $ \case +rmap :: Ord v => (r -> r') -> ABT.Term (F' r) v a -> ABT.Term (F' r') v a +rmap f = ABT.transform \case Ref r -> Ref (f r) x -> unsafeCoerce x \ No newline at end of file diff --git a/codebase2/core/U/Core/ABT.hs b/codebase2/core/U/Core/ABT.hs index 1e0400aa49..05407359b0 100644 --- a/codebase2/core/U/Core/ABT.hs +++ b/codebase2/core/U/Core/ABT.hs @@ -6,6 +6,8 @@ {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE Rank2Types #-} {-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE ScopedTypeVariables #-} + module U.Core.ABT where @@ -13,6 +15,13 @@ import Data.Set (Set) import qualified Data.Set as Set import qualified Data.Foldable as Foldable import Prelude hiding (abs,cycle) +import U.Util.Hashable (Accumulate, Hashable1) +import Data.Map (Map) +import qualified Data.Map as Map +import qualified U.Util.Hashable as Hashable +import Data.Functor (void) +import qualified Data.List as List +import qualified Data.Vector as Vector data ABT f v r = Var v @@ -22,42 +31,109 @@ data ABT f v r -- | At each level in the tree, we store the set of free variables and -- a value of type `a`. Variables are of type `v`. - data Term f v a = Term { freeVars :: Set v, annotation :: a, out :: ABT f v (Term f v a) } deriving (Functor, Foldable, Traversable) --- instance (Show1 f, Show v) => Show (Term f v a) where --- -- annotations not shown --- showsPrec p (Term _ _ out) = case out of --- Var v -> \x -> "Var " ++ show v ++ x --- Cycle body -> ("Cycle " ++) . showsPrec p body --- Abs v body -> showParen True $ (show v ++) . showString ". " . showsPrec p body --- Tm f -> showsPrec1 p f - vmap :: (Functor f, Foldable f, Ord v') => (v -> v') -> Term f v a -> Term f v' a vmap f (Term _ a out) = case out of - Var v -> annotatedVar a (f v) + Var v -> var a (f v) Tm fa -> tm a (fmap (vmap f) fa) Cycle r -> cycle a (vmap f r) Abs v body -> abs a (f v) (vmap f body) -extraMap :: Functor g => (forall k . f k -> g k) -> Term f v a -> Term g v a -extraMap p (Term fvs a sub) = Term fvs a (go p sub) where - go :: Functor g => (forall k . f k -> g k) -> ABT f v (Term f v a) -> ABT g v (Term g v a) - go p = \case - Var v -> Var v - Cycle r -> Cycle (extraMap p r) - Abs v r -> Abs v (extraMap p r) - Tm x -> Tm (fmap (extraMap p) (p x)) +transform :: (Ord v, Foldable g, Functor g) + => (forall a. f a -> g a) -> Term f v a -> Term g v a +transform f t = case out t of + Var v -> var (annotation t) v + Abs v body -> abs (annotation t) v (transform f body) + Tm subterms -> tm (annotation t) (fmap (transform f) (f subterms)) + Cycle body -> cycle (annotation t) (transform f body) + +transformM :: (Ord v, Monad m, Traversable g) + => (forall a. f a -> m (g a)) -> Term f v a -> m (Term g v a) +transformM f t = case out t of + Var v -> pure $ var (annotation t) v + Abs v body -> abs (annotation t) v <$> (transformM f body) + Tm subterms -> tm (annotation t) <$> (traverse (transformM f) =<< f subterms) + Cycle body -> cycle (annotation t) <$> (transformM f body) abs :: Ord v => a -> v -> Term f v a -> Term f v a abs a v body = Term (Set.delete v (freeVars body)) a (Abs v body) -annotatedVar :: a -> v -> Term f v a -annotatedVar a v = Term (Set.singleton v) a (Var v) +var :: a -> v -> Term f v a +var a v = Term (Set.singleton v) a (Var v) cycle :: a -> Term f v a -> Term f v a cycle a t = Term (freeVars t) a (Cycle t) tm :: (Foldable f, Ord v) => a -> f (Term f v a) -> Term f v a tm a t = Term (Set.unions (fmap freeVars (Foldable.toList t))) a (Tm t) + +-- Hash a strongly connected component and sort its definitions into a canonical order. +hashComponent :: + (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Ord v, Ord h, Accumulate h) + => Map v (Term f v a) -> (h, [(v, Term f v a)]) +hashComponent byName = let + ts = Map.toList byName + embeds = [ (v, void (transform Embed t)) | (v,t) <- ts ] + vs = fst <$> ts + -- make closed terms for each element of the component + -- [ let x = ..., y = ..., in x + -- , let x = ..., y = ..., in y ] + -- so that we can then hash them (closed terms can be hashed) + -- so that we can sort them by hash. this is the "canonical, name-agnostic" + -- hash that yields the canonical ordering of the component. + tms = [ (v, absCycle vs (tm () $ Component (snd <$> embeds) (var () v))) | v <- vs ] + hashed = [ ((v,t), hash t) | (v,t) <- tms ] + sortedHashed = List.sortOn snd hashed + overallHash = Hashable.accumulate (Hashable.Hashed . snd <$> sortedHashed) + in (overallHash, [ (v, t) | ((v, _),_) <- sortedHashed, Just t <- [Map.lookup v byName] ]) + where + absChain :: Ord v => [v] -> Term f v () -> Term f v () + absChain vs t = foldr (abs ()) t vs + absCycle :: Ord v => [v] -> Term f v () -> Term f v () + absCycle vs t = cycle () $ absChain vs t + -- | We ignore annotations in the `Term`, as these should never affect the + -- meaning of the term. + hash :: forall f v a h . (Functor f, Hashable1 f, Eq v, Show v, Ord h, Accumulate h) + => Term f v a -> h + hash = hash' [] where + hash' :: [Either [v] v] -> Term f v a -> h + hash' env (Term _ _ t) = case t of + Var v -> maybe die hashInt ind + where lookup (Left cycle) = v `elem` cycle + lookup (Right v') = v == v' + ind = List.findIndex lookup env + hashInt :: Int -> h + hashInt i = Hashable.accumulate [Hashable.Nat $ fromIntegral i] + die = error $ "unknown var in environment: " ++ show v + ++ " environment = " ++ show env + Cycle (unabs -> (vs, t)) -> hash' (Left vs : env) t + Abs v t -> hash' (Right v : env) t + Tm t -> Hashable.hash1 (hashCycle env) (hash' env) t + unabs :: Term f v a -> ([v], Term f v a) + unabs = \case + Term _ _ (Abs hd body) -> + let (tl, body') = unabs body in (hd : tl, body') + t -> ([], t) + hashCycle :: [Either [v] v] -> [Term f v a] -> ([h], Term f v a -> h) + hashCycle env@(Left cycle : envTl) ts | length cycle == length ts = + let + permute p xs = case Vector.fromList xs of xs -> map (xs Vector.!) p + hashed = map (\(i,t) -> ((i,t), hash' env t)) (zip [0..] ts) + pt = fst <$> List.sortOn snd hashed + (p,ts') = unzip pt + in case map Right (permute p cycle) ++ envTl of + env -> (map (hash' env) ts', hash' env) + hashCycle env ts = (map (hash' env) ts, hash' env) + +-- Implementation detail of hashComponent +data Component f a = Component [a] a | Embed (f a) deriving (Functor, Traversable, Foldable) +instance (Hashable1 f, Functor f) => Hashable1 (Component f) where + hash1 hashCycle hash c = case c of + Component as a -> let + (hs, hash) = hashCycle as + toks = Hashable.Hashed <$> hs + in Hashable.accumulate $ (Hashable.Tag 1 : toks) ++ [Hashable.Hashed (hash a)] + Embed fa -> Hashable.hash1 hashCycle hash fa + diff --git a/codebase2/core/unison-core.cabal b/codebase2/core/unison-core.cabal index 49e5d4660f..a3b818bd63 100644 --- a/codebase2/core/unison-core.cabal +++ b/codebase2/core/unison-core.cabal @@ -26,6 +26,7 @@ library base, containers, text, + vector, unison-util hs-source-dirs: . default-language: Haskell2010 From 2972bcc7260dfad566915bba0c7b965d644e1255 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 10 Oct 2020 01:15:51 -0400 Subject: [PATCH 012/225] converting and rehashing v1 term components --- .../lib/U/Codebase/Convert/SyncV1V2.hs | 79 ++++++++++++++-- codebase2/codebase/U/Codebase/Reference.hs | 12 +++ codebase2/codebase/U/Codebase/Referent.hs | 13 +-- codebase2/codebase/U/Codebase/Term.hs | 94 ++++++++++++++++++- codebase2/codebase/U/Codebase/Type.hs | 26 ++++- codebase2/core/U/Core/ABT.hs | 68 +++++++------- codebase2/util/U/Util/Hashable.hs | 9 +- 7 files changed, 250 insertions(+), 51 deletions(-) diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index 96d380a7a9..f8d39230c0 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -88,6 +88,7 @@ import Data.Vector (Vector) import qualified Data.Vector as Vector import qualified Data.List as List import Data.Tuple (swap) +import Data.Either.Extra (mapLeft) newtype V1 a = V1 {runV1 :: a} deriving (Eq, Ord, Show) @@ -772,6 +773,34 @@ rewriteType doRef = V2.ABT.transformM go where V2.Type.Forall a -> pure $ V2.Type.Forall a V2.Type.IntroOuter a -> pure $ V2.Type.IntroOuter a +-- | rewrite Vars and Tms 🙃 +mapTermToVar :: (Foldable f, Functor f, Ord v2) + => (v -> v2) + -> (a -> f (V2.ABT.Term f v a) -> Maybe (V2.ABT.Term f v2 a)) + -> V2.ABT.Term f v a + -> V2.ABT.Term f v2 a +mapTermToVar fv ft t@(V2.ABT.Term _ a abt) = case abt of + V2.ABT.Var v -> V2.ABT.var a (fv v) + V2.ABT.Cycle body -> V2.ABT.cycle a (mapTermToVar fv ft body) + V2.ABT.Abs x e -> V2.ABT.abs a (fv x) (mapTermToVar fv ft e) + V2.ABT.Tm body -> + case ft a body of + Nothing -> V2.ABT.tm a (mapTermToVar fv ft `fmap` body) + Just t' -> t' + +mapVarToTerm :: (Foldable f, Functor f, Ord v2) => + (v -> v2) -> + (v -> Either (f (V2.ABT.Term f v2 a)) v2) -> + V2.ABT.Term f v a -> + V2.ABT.Term f v2 a +mapVarToTerm fAbs fVar t@(V2.ABT.Term _ a abt) = case abt of + V2.ABT.Var v -> case fVar v of + Left tm -> V2.ABT.tm a tm + Right v2 -> V2.ABT.var a v2 + V2.ABT.Cycle body -> V2.ABT.cycle a (mapVarToTerm fAbs fVar body) + V2.ABT.Abs x e -> V2.ABT.abs a (fAbs x) (mapVarToTerm fAbs fVar e) + V2.ABT.Tm body -> V2.ABT.tm a (mapVarToTerm fAbs fVar <$> body) + -- | Given a V1 term component, convert and save it to the V2 codebase -- Pre-requisite: all hash-identified entities in the V1 component have -- already been converted and added to the V2 codebase, apart from self- @@ -943,21 +972,59 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do . Vector.fromList . fmap swap . fmap (second stateToIds) $ rewrittenTerms + -- | converts v to (Right v) and converts (Ref Nothing i) to (Left i) + refToVarTerm :: Ord v => + V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) v a -> + V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) (Either (V1 Int) v) a + refToVarTerm = mapTermToVar Right \a body -> case body of + V2.Term.Ref (V2.Reference.ReferenceDerived (V2.Reference.Id Nothing i)) -> + Just $ V2.ABT.var a (Left (V1 (fromIntegral i))) + _ -> Nothing + varToRefTerm :: (Show v, Ord v) => Map (V1 Int) (V2 Int) -> + V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) (Either (V1 Int) v) a -> + V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) v a + varToRefTerm lookup = let + fromLeft :: Show a => Either a b -> b + fromLeft = flip either id \r -> + error ("encountered a reference pseudovar " ++ show r ++ " in ABT.Abs") + in mapVarToTerm fromLeft $ mapLeft \(V1 i) -> + V2.Term.Ref (V2.Reference.Derived Nothing (fromIntegral i)) + v2types :: [V2TypeT] = map (buildTermType2H lookup1 . snd) v1component + -- foo :: Map (Either Int V2.Symbol.Symbol) V2TermH = + -- |may need an extra pass to put them into their canonical order - -- or a proof that none is needed - v2componentH :: V2TermComponentH = error "todo" $ - map (buildTerm2H lookup1 hash1 . fst) v1component + (hash2 :: V2 Hash, v2component0) = let + v1terms :: [V1Term] = map fst v1component + indexVars = Left . V1 <$> [0..] + namedTerms1 :: [(Either (V1 Int) V2.Symbol.Symbol, V1Term)] + namedTerms1 = zip indexVars v1terms + namedTerms2 :: [(Either (V1 Int) V2.Symbol.Symbol, V2TermH)] + namedTerms2 = fmap (second (buildTerm2H lookup1 hash1)) namedTerms1 + namedTermMap :: Map (Either (V1 Int) V2.Symbol.Symbol) V2TermH + namedTermMap = Map.fromList namedTerms2 + bar :: Show a => Either (V1 Int) a -> V1 Int + bar = either id (\x -> error $ "impossibly " ++ show x) + hash2 :: V2 Hash + v1Index :: [V1 Int] + (hash2, unzip -> (fmap (either id (\x -> error $ "impossibly " ++ show x)) -> v1Index, v2Terms)) = + V2.ABT.hashComponent (refToVarTerm <$> namedTermMap) + -- (h, ([2, 0, 1], [t2, t0, t1]) + indexMap :: Map (V1 Int) (V2 Int) + indexMap = Map.fromList (zip v1Index (V2 <$> [0 :: Int ..])) + in (hash2, varToRefTerm indexMap <$> v2Terms) + + v2ComponentH :: V2TermComponentH = error "todo" -- note: we'd need some special care here if we want to make sure that this -- hash function is identity for simple references - hash2 = error "todo" -- V2 (H.accumulate' v2componentH) + -- hash2 = V2.ABT.hashComponent error "todo" -- V2 (H.accumulate' v2componentH) -- construct v2 term component for serializing - v2componentS :: V2TermComponentS = - buildTermComponent2S lookup2 hash2 v2componentH + -- v2componentS :: V2TermComponentS = + -- buildTermComponent2S lookup2 hash2 v2componentH -- -- serialize the v2 term component -- componentObjectId :: Db.ObjectId <- saveTermComponent hash1 hash2 v2componentS diff --git a/codebase2/codebase/U/Codebase/Reference.hs b/codebase2/codebase/U/Codebase/Reference.hs index 461c420e0e..80ca893db5 100644 --- a/codebase2/codebase/U/Codebase/Reference.hs +++ b/codebase2/codebase/U/Codebase/Reference.hs @@ -1,6 +1,7 @@ {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeSynonymInstances #-} +{-# LANGUAGE PatternSynonyms #-} module U.Codebase.Reference where @@ -20,6 +21,11 @@ data Reference' t h | ReferenceDerived (Id' h) deriving (Eq, Ord, Show, Functor) +pattern Derived :: h -> ComponentIndex -> Reference' t h +pattern Derived h i = ReferenceDerived (Id h i) + +{-# COMPLETE ReferenceBuiltin, Derived #-} + type ComponentIndex = Word64 data Id' h = Id h ComponentIndex deriving (Eq, Ord, Show, Functor) @@ -29,3 +35,9 @@ instance Hashable Reference where [Hashable.Tag 0, Hashable.Text txt] tokens (ReferenceDerived (Id h i)) = [Hashable.Tag 1, Hashable.Bytes (Hash.toBytes h), Hashable.Nat i] + +instance Hashable (Reference' Text (Maybe Hash)) where + tokens (ReferenceBuiltin txt) = + [Hashable.Tag 0, Hashable.Text txt] + tokens (ReferenceDerived (Id h i)) = + [Hashable.Tag 1, Hashable.accumulateToken h, Hashable.Nat i] diff --git a/codebase2/codebase/U/Codebase/Referent.hs b/codebase2/codebase/U/Codebase/Referent.hs index c1ad4987fd..84381096c0 100644 --- a/codebase2/codebase/U/Codebase/Referent.hs +++ b/codebase2/codebase/U/Codebase/Referent.hs @@ -5,14 +5,16 @@ module U.Codebase.Referent where import Data.Text (Text) -import U.Codebase.Reference (Reference') +import U.Codebase.Reference (Reference, Reference') import qualified U.Codebase.Reference as Reference import U.Util.Hash (Hash) import U.Util.Hashable (Hashable (..)) import Data.Word (Word64) import qualified U.Util.Hashable as Hashable -type Referent = Referent' (Reference' Text Hash) (Reference' Text Hash) +type Referent = Referent' Reference Reference +type ReferentH = Referent' (Reference' Text (Maybe Hash)) (Reference' Text Hash) + type ConstructorIndex = Word64 data Referent' rTm rTp @@ -20,13 +22,12 @@ data Referent' rTm rTp | Con rTp ConstructorIndex deriving (Eq, Ord, Show) -instance Hashable Referent where - tokens (Ref r) = Hashable.Tag 0 : Hashable.tokens r - tokens (Con r i) = [Hashable.Tag 1] ++ Hashable.tokens r ++ [Hashable.Nat (fromIntegral i)] - type Id = Id' Hash Hash data Id' hTm hTp = RefId (Reference.Id' hTm) | ConId (Reference.Id' hTp) ConstructorIndex deriving (Eq, Ord, Show, Functor) +instance (Hashable rTm, Hashable rTp) => Hashable (Referent' rTm rTp) where + tokens (Ref r) = Hashable.Tag 0 : Hashable.tokens r + tokens (Con r i) = [Hashable.Tag 1] ++ Hashable.tokens r ++ [Hashable.Nat (fromIntegral i)] diff --git a/codebase2/codebase/U/Codebase/Term.hs b/codebase2/codebase/U/Codebase/Term.hs index bf57e6f765..75e4575d7f 100644 --- a/codebase2/codebase/U/Codebase/Term.hs +++ b/codebase2/codebase/U/Codebase/Term.hs @@ -1,3 +1,6 @@ +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE DeriveFoldable #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE LambdaCase #-} @@ -17,13 +20,16 @@ import Data.Sequence (Seq) import Data.Text (Text) import Data.Word (Word64) import GHC.Generics (Generic, Generic1) -import U.Codebase.Reference (Reference, Reference') +import U.Codebase.Reference (Reference, Reference'(ReferenceBuiltin, ReferenceDerived)) +import qualified U.Codebase.Reference as Reference import U.Codebase.Referent (Referent') import U.Codebase.Type (TypeR) import U.Util.Hash (Hash) import qualified U.Core.ABT as ABT import qualified U.Util.Hashable as H import qualified U.Codebase.Type as Type +import qualified U.Util.Hash as Hash +import qualified Data.Foldable as Foldable type ConstructorId = Word64 @@ -174,7 +180,91 @@ rmapPattern ft fr = go where PSequenceLiteral ps -> PSequenceLiteral (go <$> ps) PSequenceOp p1 op p2 -> PSequenceOp (go p1) op (go p2) +-- * Instances + instance H.Hashable SeqOp where tokens PCons = [H.Tag 0] tokens PSnoc = [H.Tag 1] - tokens PConcat = [H.Tag 2] \ No newline at end of file + tokens PConcat = [H.Tag 2] + +instance H.Hashable (Pattern Text Reference) where + tokens (PUnbound) = [H.Tag 0] + tokens (PVar) = [H.Tag 1] + tokens (PBoolean b) = H.Tag 2 : [H.Tag $ if b then 1 else 0] + tokens (PInt n) = H.Tag 3 : [H.Int n] + tokens (PNat n) = H.Tag 4 : [H.Nat n] + tokens (PFloat f) = H.Tag 5 : H.tokens f + tokens (PConstructor r n args) = + [H.Tag 6, H.accumulateToken r, H.Nat $ fromIntegral n, H.accumulateToken args] + tokens (PEffectPure p) = H.Tag 7 : H.tokens p + tokens (PEffectBind r n args k) = + [H.Tag 8, H.accumulateToken r, H.Nat $ fromIntegral n, H.accumulateToken args, H.accumulateToken k] + tokens (PAs p) = H.Tag 9 : H.tokens p + tokens (PText t) = H.Tag 10 : H.tokens t + tokens (PSequenceLiteral ps) = H.Tag 11 : concatMap H.tokens ps + tokens (PSequenceOp l op r) = H.Tag 12 : H.tokens op ++ H.tokens l ++ H.tokens r + tokens (PChar c) = H.Tag 13 : H.tokens c + +instance (Eq v, Show v) => H.Hashable1 (F v) where + hash1 hashCycle hash e + = let (tag, hashed, varint) = + (H.Tag, H.Hashed, H.Nat . fromIntegral) + in + case e of + -- So long as `Reference.Derived` ctors are created using the same + -- hashing function as is used here, this case ensures that references + -- are 'transparent' wrt hash and hashing is unaffected by whether + -- expressions are linked. So for example `x = 1 + 1` and `y = x` hash + -- the same. + Ref (Reference.Derived (Just h) 0) -> H.fromBytes (Hash.toBytes h) + Ref (Reference.Derived h i) -> H.accumulate + [ tag 1 -- it's a term + , tag 1 -- it's a derived reference + , H.accumulateToken (Hash.toBytes <$> h) + , H.Nat i + ] + -- Note: start each layer with leading `1` byte, to avoid collisions + -- with types, which start each layer with leading `0`. + -- See `Hashable1 Type.F` + _ -> + H.accumulate + $ tag 1 -- it's a term + : case e of + Nat n -> tag 64 : H.tokens n + Int i -> tag 65 : H.tokens i + Float d -> tag 66 : H.tokens d + Boolean b -> tag 67 : H.tokens b + Text t -> tag 68 : H.tokens t + Char c -> tag 69 : H.tokens c + Ref (ReferenceBuiltin name) -> [tag 2, H.accumulateToken name] + Ref ReferenceDerived {} -> + error "handled above, but GHC can't figure this out" + App a a2 -> [tag 3, hashed (hash a), hashed (hash a2)] + Ann a t -> [tag 4, hashed (hash a), hashed (ABT.hash t)] + Sequence as -> tag 5 : varint (fromIntegral (length as)) : map + (hashed . hash) + (Foldable.toList as) + Lam a -> [tag 6, hashed (hash a)] + -- note: we use `hashCycle` to ensure result is independent of + -- let binding order + LetRec as a -> case hashCycle as of + (hs, hash) -> tag 7 : hashed (hash a) : map hashed hs + -- here, order is significant, so don't use hashCycle + Let b a -> [tag 8, hashed $ hash b, hashed $ hash a] + If b t f -> + [tag 9, hashed $ hash b, hashed $ hash t, hashed $ hash f] + Request r n -> [tag 10, H.accumulateToken r, varint n] + Constructor r n -> [tag 12, H.accumulateToken r, varint n] + Match e branches -> + tag 13 : hashed (hash e) : concatMap h branches + where + h (MatchCase pat guard branch) = concat + [ [H.accumulateToken pat] + , Foldable.toList @Maybe (hashed . hash <$> guard) + , [hashed (hash branch)] + ] + Handle h b -> [tag 15, hashed $ hash h, hashed $ hash b] + And x y -> [tag 16, hashed $ hash x, hashed $ hash y] + Or x y -> [tag 17, hashed $ hash x, hashed $ hash y] + TermLink r -> [tag 18, H.accumulateToken r] + TypeLink r -> [tag 19, H.accumulateToken r] diff --git a/codebase2/codebase/U/Codebase/Type.hs b/codebase2/codebase/U/Codebase/Type.hs index f2440d9e93..bcd3777dd0 100644 --- a/codebase2/codebase/U/Codebase/Type.hs +++ b/codebase2/codebase/U/Codebase/Type.hs @@ -17,6 +17,8 @@ import Data.Text (Text) import U.Util.Hash (Hash) import U.Codebase.Kind (Kind) import Unsafe.Coerce (unsafeCoerce) +import U.Util.Hashable (Hashable, Hashable1) +import qualified U.Util.Hashable as Hashable -- | For standalone types, like those in Term.Ann type FT = F' Reference @@ -48,4 +50,26 @@ type TypeR r v = ABT.Term (F' r) v () rmap :: Ord v => (r -> r') -> ABT.Term (F' r) v a -> ABT.Term (F' r') v a rmap f = ABT.transform \case Ref r -> Ref (f r) - x -> unsafeCoerce x \ No newline at end of file + x -> unsafeCoerce x + +instance Hashable r => Hashable1 (F' r) where + hash1 hashCycle hash e = + let + (tag, hashed) = (Hashable.Tag, Hashable.Hashed) + -- Note: start each layer with leading `0` byte, to avoid collisions with + -- terms, which start each layer with leading `1`. See `Hashable1 Term.F` + in Hashable.accumulate $ tag 0 : case e of + Ref r -> [tag 0, Hashable.accumulateToken r] + Arrow a b -> [tag 1, hashed (hash a), hashed (hash b) ] + App a b -> [tag 2, hashed (hash a), hashed (hash b) ] + Ann a k -> [tag 3, hashed (hash a), Hashable.accumulateToken k ] + -- Example: + -- a) {Remote, Abort} (() -> {Remote} ()) should hash the same as + -- b) {Abort, Remote} (() -> {Remote} ()) but should hash differently from + -- c) {Remote, Abort} (() -> {Abort} ()) + Effects es -> let + (hs, _) = hashCycle es + in tag 4 : map hashed hs + Effect e t -> [tag 5, hashed (hash e), hashed (hash t)] + Forall a -> [tag 6, hashed (hash a)] + IntroOuter a -> [tag 7, hashed (hash a)] diff --git a/codebase2/core/U/Core/ABT.hs b/codebase2/core/U/Core/ABT.hs index 05407359b0..35ca032ae2 100644 --- a/codebase2/core/U/Core/ABT.hs +++ b/codebase2/core/U/Core/ABT.hs @@ -69,6 +69,40 @@ cycle a t = Term (freeVars t) a (Cycle t) tm :: (Foldable f, Ord v) => a -> f (Term f v a) -> Term f v a tm a t = Term (Set.unions (fmap freeVars (Foldable.toList t))) a (Tm t) +-- | We ignore annotations in the `Term`, as these should never affect the +-- meaning of the term. +hash :: forall f v a h . (Functor f, Hashable1 f, Eq v, Show v, Ord h, Accumulate h) + => Term f v a -> h +hash = hash' [] where + hash' :: [Either [v] v] -> Term f v a -> h + hash' env (Term _ _ t) = case t of + Var v -> maybe die hashInt ind + where lookup (Left cycle) = v `elem` cycle + lookup (Right v') = v == v' + ind = List.findIndex lookup env + hashInt :: Int -> h + hashInt i = Hashable.accumulate [Hashable.Nat $ fromIntegral i] + die = error $ "unknown var in environment: " ++ show v + ++ " environment = " ++ show env + Cycle (unabs -> (vs, t)) -> hash' (Left vs : env) t + Abs v t -> hash' (Right v : env) t + Tm t -> Hashable.hash1 (hashCycle env) (hash' env) t + unabs :: Term f v a -> ([v], Term f v a) + unabs = \case + Term _ _ (Abs hd body) -> + let (tl, body') = unabs body in (hd : tl, body') + t -> ([], t) + hashCycle :: [Either [v] v] -> [Term f v a] -> ([h], Term f v a -> h) + hashCycle env@(Left cycle : envTl) ts | length cycle == length ts = + let + permute p xs = case Vector.fromList xs of xs -> map (xs Vector.!) p + hashed = map (\(i,t) -> ((i,t), hash' env t)) (zip [0..] ts) + pt = fst <$> List.sortOn snd hashed + (p,ts') = unzip pt + in case map Right (permute p cycle) ++ envTl of + env -> (map (hash' env) ts', hash' env) + hashCycle env ts = (map (hash' env) ts, hash' env) + -- Hash a strongly connected component and sort its definitions into a canonical order. hashComponent :: (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Ord v, Ord h, Accumulate h) @@ -93,39 +127,6 @@ hashComponent byName = let absChain vs t = foldr (abs ()) t vs absCycle :: Ord v => [v] -> Term f v () -> Term f v () absCycle vs t = cycle () $ absChain vs t - -- | We ignore annotations in the `Term`, as these should never affect the - -- meaning of the term. - hash :: forall f v a h . (Functor f, Hashable1 f, Eq v, Show v, Ord h, Accumulate h) - => Term f v a -> h - hash = hash' [] where - hash' :: [Either [v] v] -> Term f v a -> h - hash' env (Term _ _ t) = case t of - Var v -> maybe die hashInt ind - where lookup (Left cycle) = v `elem` cycle - lookup (Right v') = v == v' - ind = List.findIndex lookup env - hashInt :: Int -> h - hashInt i = Hashable.accumulate [Hashable.Nat $ fromIntegral i] - die = error $ "unknown var in environment: " ++ show v - ++ " environment = " ++ show env - Cycle (unabs -> (vs, t)) -> hash' (Left vs : env) t - Abs v t -> hash' (Right v : env) t - Tm t -> Hashable.hash1 (hashCycle env) (hash' env) t - unabs :: Term f v a -> ([v], Term f v a) - unabs = \case - Term _ _ (Abs hd body) -> - let (tl, body') = unabs body in (hd : tl, body') - t -> ([], t) - hashCycle :: [Either [v] v] -> [Term f v a] -> ([h], Term f v a -> h) - hashCycle env@(Left cycle : envTl) ts | length cycle == length ts = - let - permute p xs = case Vector.fromList xs of xs -> map (xs Vector.!) p - hashed = map (\(i,t) -> ((i,t), hash' env t)) (zip [0..] ts) - pt = fst <$> List.sortOn snd hashed - (p,ts') = unzip pt - in case map Right (permute p cycle) ++ envTl of - env -> (map (hash' env) ts', hash' env) - hashCycle env ts = (map (hash' env) ts, hash' env) -- Implementation detail of hashComponent data Component f a = Component [a] a | Embed (f a) deriving (Functor, Traversable, Foldable) @@ -136,4 +137,3 @@ instance (Hashable1 f, Functor f) => Hashable1 (Component f) where toks = Hashable.Hashed <$> hs in Hashable.accumulate $ (Hashable.Tag 1 : toks) ++ [Hashable.Hashed (hash a)] Embed fa -> Hashable.hash1 hashCycle hash fa - diff --git a/codebase2/util/U/Util/Hashable.hs b/codebase2/util/U/Util/Hashable.hs index 165933a866..ace3d36066 100644 --- a/codebase2/util/U/Util/Hashable.hs +++ b/codebase2/util/U/Util/Hashable.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE FlexibleInstances #-} module U.Util.Hashable where import qualified Data.Map as Map @@ -31,12 +32,16 @@ class Hashable t where tokens :: Accumulate h => t -> [Token h] instance Hashable a => Hashable [a] where - tokens = map accumulateToken + tokens as = Nat (fromIntegral . length $ as) : map accumulateToken as + +instance Hashable a => Hashable (Maybe a) where + tokens Nothing = [Tag 0] + tokens (Just n) = Tag 1 : tokens n instance (Hashable a, Hashable b) => Hashable (a,b) where tokens (a,b) = [accumulateToken a, accumulateToken b] -instance (Hashable a) => Hashable (Set.Set a) where +instance Hashable a => Hashable (Set.Set a) where tokens = tokens . Set.toList instance (Hashable k, Hashable v) => Hashable (Map.Map k v) where From ba48608d4eac94448df9ce521fac47794044adb7 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 10 Oct 2020 12:38:52 -0400 Subject: [PATCH 013/225] save converted term to codebase still need to save type and create type index --- .../lib/U/Codebase/Convert/SyncV1V2.hs | 98 +++++++++---------- 1 file changed, 45 insertions(+), 53 deletions(-) diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index f8d39230c0..67780b6667 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -171,14 +171,14 @@ type V1Type = V1.Type.Type V1.Symbol.Symbol () type V1Term = V1.Term.Term V1.Symbol.Symbol () type V1Decl = V1.DD.Decl V1.Symbol.Symbol () -type V2TermH = V2.Term.Term V2.Symbol.Symbol -type V2TypeT = V2.Type.TypeT V2.Symbol.Symbol -type V2TermComponentH = [V2TermH] -type V2TermComponentS = V2.TermFormat.LocallyIndexedComponent +type V2HashTerm = V2.Term.Term V2.Symbol.Symbol +type V2TypeOfTerm = V2.Type.TypeT V2.Symbol.Symbol +type V2HashTermComponent = [V2HashTerm] +type V2DiskTermComponent = V2.TermFormat.LocallyIndexedComponent -type V2DeclH = V2.Decl.Decl V2.Symbol.Symbol -type V2DeclComponentH = [V2DeclH] -type V2DeclComponentS = V2.DeclFormat.LocallyIndexedComponent +type V2HashDecl = V2.Decl.Decl V2.Symbol.Symbol +type V2HashDeclComponent = [V2HashDecl] +type V2DiskDeclComponent = V2.DeclFormat.LocallyIndexedComponent -- type Patch = Patch.Patch V1.Reference -- -- the H stands for "for hashing" @@ -697,7 +697,7 @@ makeLookup l lookupDescription a = -- . DD.declDependencies -- $ DD.rmapDecl (fmap $ fromMaybe selfId) decl -saveTermComponent :: DB m => V1 Hash -> V2 Hash -> V2TermComponentS -> m Db.ObjectId +saveTermComponent :: DB m => V1 Hash -> V2 Hash -> V2DiskTermComponent -> m Db.ObjectId saveTermComponent h1 h2 component = do h1Id <- Db.saveHashHash (runV1 h1) h2Id <- Db.saveHashHash (runV2 h2) @@ -761,7 +761,7 @@ type LocalIdState = rewriteType :: (V2.Reference.Reference -> State.State LocalIdState V2.TermFormat.TypeRef) -> - V2TypeT -> State LocalIdState V2.TermFormat.Type + V2TypeOfTerm -> State LocalIdState V2.TermFormat.Type rewriteType doRef = V2.ABT.transformM go where go :: V2.Type.FT k -> State LocalIdState (V2.TermFormat.FT k) go = \case @@ -809,11 +809,11 @@ convertTerm1 :: DB m => (V1 Hash -> V2 Hash) -> (V2 Hash -> Db.ObjectId) -> (Tex convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do -- construct v2 term component for hashing let - buildTermType2H :: (V1 Hash -> V2 Hash) -> V1Type -> V2TypeT + buildTermType2H :: (V1 Hash -> V2 Hash) -> V1Type -> V2TypeOfTerm buildTermType2H lookup = goType where - goType :: V1Type -> V2TypeT + goType :: V1Type -> V2TypeOfTerm goType = convertABT goABT convertSymbol (const ()) - goABT :: V1.Type.F V1Type -> V2.Type.FT V2TypeT + goABT :: V1.Type.F V1Type -> V2.Type.FT V2TypeOfTerm goABT = \case V1.Type.Ref r -> V2.Type.Ref case r of V1.Reference.Builtin t -> @@ -828,10 +828,10 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do V1.Type.Effects as -> V2.Type.Effects (goType <$> as) V1.Type.Forall a -> V2.Type.Forall (goType a) V1.Type.IntroOuter a -> V2.Type.IntroOuter (goType a) - buildTerm2H :: (V1 Hash -> V2 Hash) -> V1 Hash -> V1Term -> V2TermH + buildTerm2H :: (V1 Hash -> V2 Hash) -> V1 Hash -> V1Term -> V2HashTerm buildTerm2H lookup self = goTerm where goTerm = convertABT goABT convertSymbol (const ()) - goABT :: V1.Term.F V1.Symbol.Symbol () V1Term -> V2.Term.F V2.Symbol.Symbol V2TermH + goABT :: V1.Term.F V1.Symbol.Symbol () V1Term -> V2.Term.F V2.Symbol.Symbol V2HashTerm lookupTermLink = \case V1.Referent.Ref r -> V2.Referent.Ref (lookupTerm r) V1.Referent.Con r i _ct -> V2.Referent.Con (lookupType r) (fromIntegral i) @@ -896,16 +896,12 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do V1.Pattern.Cons -> V2.Term.PCons V1.Pattern.Snoc -> V2.Term.PSnoc V1.Pattern.Concat -> V2.Term.PConcat - buildTermComponent2S :: (V2 Hash -> Db.ObjectId) - -> V2 Hash - -> V2TermComponentH - -> V2TermComponentS + buildTermComponent2S :: + (V2 Hash -> Db.ObjectId) -> V2 Hash -> V2HashTermComponent -> V2DiskTermComponent buildTermComponent2S getId h0 terms = let - rewrittenTerms :: - [(V2.TermFormat.Term, LocalIdState)] = - - map (flip State.runState mempty . rewriteTerm) terms - rewriteTerm :: V2TermH -> State.State LocalIdState V2.TermFormat.Term + rewrittenTerms :: [(V2.TermFormat.Term, LocalIdState)] = + map (flip State.runState mempty . rewriteTerm) terms + rewriteTerm :: V2HashTerm -> State.State LocalIdState V2.TermFormat.Term rewriteTerm = V2.ABT.transformM go where doText :: Text -> State.State LocalIdState V2.TermFormat.LocalTextId doText t = do @@ -983,69 +979,65 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do varToRefTerm :: (Show v, Ord v) => Map (V1 Int) (V2 Int) -> V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) (Either (V1 Int) v) a -> V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) v a - varToRefTerm lookup = let + varToRefTerm lookup = mapVarToTerm fromLeft $ mapLeft \(V1 i) -> + V2.Term.Ref (V2.Reference.Derived Nothing (fromIntegral i)) + where fromLeft :: Show a => Either a b -> b fromLeft = flip either id \r -> error ("encountered a reference pseudovar " ++ show r ++ " in ABT.Abs") - in mapVarToTerm fromLeft $ mapLeft \(V1 i) -> - V2.Term.Ref (V2.Reference.Derived Nothing (fromIntegral i)) - - v2types :: [V2TypeT] = - map (buildTermType2H lookup1 . snd) v1component - -- foo :: Map (Either Int V2.Symbol.Symbol) V2TermH = - - -- |may need an extra pass to put them into their canonical order - (hash2 :: V2 Hash, v2component0) = let - v1terms :: [V1Term] = map fst v1component + rehashComponent :: (V1 Hash -> V2 Hash) -> V1 Hash -> [V1Term] -> (V2 Hash, V2HashTermComponent) + rehashComponent lookup1 hash1 v1terms = + let fromLeft = either id (\x -> error $ "impossibly " ++ show x) + in let indexVars = Left . V1 <$> [0..] namedTerms1 :: [(Either (V1 Int) V2.Symbol.Symbol, V1Term)] namedTerms1 = zip indexVars v1terms - namedTerms2 :: [(Either (V1 Int) V2.Symbol.Symbol, V2TermH)] + namedTerms2 :: [(Either (V1 Int) V2.Symbol.Symbol, V2HashTerm)] namedTerms2 = fmap (second (buildTerm2H lookup1 hash1)) namedTerms1 - namedTermMap :: Map (Either (V1 Int) V2.Symbol.Symbol) V2TermH + namedTermMap :: Map (Either (V1 Int) V2.Symbol.Symbol) V2HashTerm namedTermMap = Map.fromList namedTerms2 - bar :: Show a => Either (V1 Int) a -> V1 Int - bar = either id (\x -> error $ "impossibly " ++ show x) hash2 :: V2 Hash v1Index :: [V1 Int] - (hash2, unzip -> (fmap (either id (\x -> error $ "impossibly " ++ show x)) -> v1Index, v2Terms)) = + -- (h, ([2, 0, 1], [t2, t0, t1]) + (hash2, unzip -> (fmap fromLeft -> v1Index, v2Terms)) = V2.ABT.hashComponent (refToVarTerm <$> namedTermMap) - -- (h, ([2, 0, 1], [t2, t0, t1]) indexMap :: Map (V1 Int) (V2 Int) indexMap = Map.fromList (zip v1Index (V2 <$> [0 :: Int ..])) in (hash2, varToRefTerm indexMap <$> v2Terms) - v2ComponentH :: V2TermComponentH = error "todo" + v2types :: [V2TypeOfTerm] = + map (buildTermType2H lookup1 . snd) v1component - -- note: we'd need some special care here if we want to make sure that this - -- hash function is identity for simple references - -- hash2 = V2.ABT.hashComponent error "todo" -- V2 (H.accumulate' v2componentH) + -- |rehash and reorder component + hash2 :: V2 Hash + v2hashComponent :: V2HashTermComponent + (hash2, v2hashComponent) = rehashComponent lookup1 hash1 (map fst v1component) -- construct v2 term component for serializing - -- v2componentS :: V2TermComponentS = - -- buildTermComponent2S lookup2 hash2 v2componentH + v2componentS :: V2DiskTermComponent = + buildTermComponent2S lookup2 hash2 v2hashComponent - -- -- serialize the v2 term component - -- componentObjectId :: Db.ObjectId <- saveTermComponent hash1 hash2 v2componentS + -- serialize the v2 term component + componentObjectId :: Db.ObjectId <- saveTermComponent hash1 hash2 v2componentS -- -- construct v2 types for each component element, and save the types to the -- -- to the indices - -- for_ (zip3 [0 ..] v1component v2componentS) $ \(i, (_term1, typ1), term2) -> do + -- for_ (zip [0 ..] v2types) $ \(i, type2) -> do -- let r = V2.Reference.Id componentObjectId i -- let rt = V2.Referent.RefId r -- saveTypeBlobForReferent rt (buildTermType2S (snd . lookup) typ1) -- createTypeSearchIndicesForReferent rt typ1 -- createDependencyIndexForTerm r term2 - error "todo" + error "todo: save types and create type indices for component" -- convertDecl1 :: DB m => (V1 Hash -> (V2 Hash, Db.ObjectId)) -> V1 Hash -> [Decl] -> m () -- convertDecl1 lookup hash1 v1component = do -- -- construct v2 decl component for hashing --- let v2componentH :: Decl2ComponentH = +-- let v2hashComponent :: Decl2ComponentH = -- map (buildDecl2H (fst . lookup) hash1) v1component --- let hash2 = V2 (H.hash v2componentH) +-- let hash2 = V2 (H.hash v2hashComponent) -- let v2componentS :: Decl2ComponentS = -- map (buildDecl2S (snd . lookup) hash1) v1component @@ -1053,7 +1045,7 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do -- componentObjectId :: Db.ObjectId <- saveDeclComponent hash1 hash2 v2ComponentS -- let v2componentI :: [Decl2I] = --- map (buildDecl2I hash2) v2componentH +-- map (buildDecl2I hash2) v2hashComponent -- for_ (zip v2componentI [0..]) $ \(decl2, i) -> do -- let r = V2.ReferenceId componentObjectId i From 5f31975d0955f37980434d9c4fcdb5904b452357 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 10 Oct 2020 21:39:12 -0400 Subject: [PATCH 014/225] starting on convertDecl1 also renamed accumulate' to hash --- .../lib/U/Codebase/Convert/SyncV1V2.hs | 33 ++++++++++--------- codebase2/util/U/Util/Hashable.hs | 6 ++-- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index 67780b6667..7d008e7767 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -177,6 +177,7 @@ type V2HashTermComponent = [V2HashTerm] type V2DiskTermComponent = V2.TermFormat.LocallyIndexedComponent type V2HashDecl = V2.Decl.Decl V2.Symbol.Symbol +type V2TypeOfConstructor = V2.Type.TypeD V2.Symbol.Symbol type V2HashDeclComponent = [V2HashDecl] type V2DiskDeclComponent = V2.DeclFormat.LocallyIndexedComponent @@ -708,8 +709,8 @@ saveTermComponent h1 h2 component = do where blob = S.putBytes S.V2.putTermFormat (V2.TermFormat.Term component) --- saveDeclComponent :: DB m => Db.HashId -> [Decl2S] -> m Db.ObjectId --- saveDeclComponent h component = error "todo" -- do +saveDeclComponent :: DB m => V1 Hash -> V2 Hash -> V2DiskDeclComponent -> m Db.ObjectId +saveDeclComponent h component = error "todo" -- do -- -- o <- Db.saveObject h V2.DeclComponent blob -- -- Db.saveHashObject h o 2 -- -- pure o @@ -1015,11 +1016,11 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do (hash2, v2hashComponent) = rehashComponent lookup1 hash1 (map fst v1component) -- construct v2 term component for serializing - v2componentS :: V2DiskTermComponent = + v2diskComponent :: V2DiskTermComponent = buildTermComponent2S lookup2 hash2 v2hashComponent -- serialize the v2 term component - componentObjectId :: Db.ObjectId <- saveTermComponent hash1 hash2 v2componentS + componentObjectId :: Db.ObjectId <- saveTermComponent hash1 hash2 v2diskComponent -- -- construct v2 types for each component element, and save the types to the -- -- to the indices @@ -1032,17 +1033,19 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do -- createDependencyIndexForTerm r term2 error "todo: save types and create type indices for component" --- convertDecl1 :: DB m => (V1 Hash -> (V2 Hash, Db.ObjectId)) -> V1 Hash -> [Decl] -> m () --- convertDecl1 lookup hash1 v1component = do --- -- construct v2 decl component for hashing --- let v2hashComponent :: Decl2ComponentH = --- map (buildDecl2H (fst . lookup) hash1) v1component --- let hash2 = V2 (H.hash v2hashComponent) - --- let v2componentS :: Decl2ComponentS = --- map (buildDecl2S (snd . lookup) hash1) v1component - --- componentObjectId :: Db.ObjectId <- saveDeclComponent hash1 hash2 v2ComponentS +convertDecl1 :: DB m => (V1 Hash -> V2 Hash) -> (V2 Hash -> Db.ObjectId) -> V1 Hash -> [V1Decl] -> m () +convertDecl1 lookup1 lookup2 hash1 v1component = do + let + -- convert constructor type (similar to buildTermType2H) + v2ctorTypes :: [V2TypeOfConstructor] = error "todo" + -- rehash and reorder component + hash2 :: V2 Hash + v2hashComponent :: V2HashDeclComponent + (hash2, v2hashComponent) = error "todo: rehashComponent lookup1 hash1 v1component" + -- convert decl component + v2diskComponent :: V2DiskDeclComponent = error "todo" + componentObjectId :: Db.ObjectId <- saveDeclComponent hash1 hash2 v2diskComponent + error "todo: create type indices for each decl in the component" -- let v2componentI :: [Decl2I] = -- map (buildDecl2I hash2) v2hashComponent diff --git a/codebase2/util/U/Util/Hashable.hs b/codebase2/util/U/Util/Hashable.hs index ace3d36066..f8d4147521 100644 --- a/codebase2/util/U/Util/Hashable.hs +++ b/codebase2/util/U/Util/Hashable.hs @@ -23,10 +23,10 @@ class Accumulate h where toBytes :: h -> ByteString accumulateToken :: (Accumulate h, Hashable t) => t -> Token h -accumulateToken = Hashed . accumulate' +accumulateToken = Hashed . hash -accumulate' :: (Accumulate h, Hashable t) => t -> h -accumulate' = accumulate . tokens +hash :: (Accumulate h, Hashable t) => t -> h +hash = accumulate . tokens class Hashable t where tokens :: Accumulate h => t -> [Token h] From f59d9cd0fa12f238f07884aa7c57873a4688ea46 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 13 Oct 2020 16:54:08 -0400 Subject: [PATCH 015/225] wip; next is add term to index --- .../lib/U/Codebase/Convert/SyncV1V2.hs | 812 ++--- .../lib/U/Codebase/Convert/TypeUtil.hs | 103 + .../unison-codebase-convert-1to2.cabal | 2 + .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 1 + .../U/Codebase/Sqlite/ObjectType.hs | 10 +- .../U/Codebase/Sqlite/Queries.hs | 56 +- .../U/Codebase/Sqlite/Reference.hs | 27 +- .../U/Codebase/Sqlite/Referent.hs | 19 +- .../U/Codebase/Sqlite/Symbol.hs | 13 +- codebase2/codebase-sqlite/sql/create.sql | 32 +- codebase2/codebase/U/Codebase/Reference.hs | 12 + codebase2/codebase/unison-codebase.cabal | 3 +- codebase2/core/U/Core/ABT.hs | 69 + codebase2/core/U/Core/ABT/Var.hs | 69 + codebase2/core/unison-core.cabal | 1 + parser-typechecker/LICENSE | 19 + parser-typechecker/benchmarks/runtime/Main.hs | 286 ++ parser-typechecker/prettyprintdemo/Main.hs | 68 + parser-typechecker/src/Unison/Builtin.hs | 517 +++ .../src/Unison/Builtin/Decls.hs | 310 ++ parser-typechecker/src/Unison/Codebase.hs | 294 ++ .../src/Unison/Codebase/Branch.hs | 900 +++++ .../Unison/Codebase/Branch/Dependencies.hs | 89 + .../src/Unison/Codebase/BranchDiff.hs | 166 + .../src/Unison/Codebase/BranchUtil.hs | 135 + .../src/Unison/Codebase/Causal.hs | 373 +++ .../src/Unison/Codebase/Classes.hs | 40 + .../src/Unison/Codebase/CodeLookup.hs | 57 + .../src/Unison/Codebase/Editor/AuthorInfo.hs | 59 + .../src/Unison/Codebase/Editor/Command.hs | 191 ++ .../Unison/Codebase/Editor/DisplayThing.hs | 12 + .../src/Unison/Codebase/Editor/Git.hs | 249 ++ .../Unison/Codebase/Editor/HandleCommand.hs | 272 ++ .../src/Unison/Codebase/Editor/HandleInput.hs | 2898 +++++++++++++++++ .../src/Unison/Codebase/Editor/Input.hs | 144 + .../src/Unison/Codebase/Editor/Output.hs | 359 ++ .../Codebase/Editor/Output/BranchDiff.hs | 338 ++ .../src/Unison/Codebase/Editor/Propagate.hs | 522 +++ .../src/Unison/Codebase/Editor/RemoteRepo.hs | 32 + .../Unison/Codebase/Editor/SearchResult'.hs | 52 + .../Unison/Codebase/Editor/SlurpComponent.hs | 87 + .../src/Unison/Codebase/Editor/SlurpResult.hs | 391 +++ .../src/Unison/Codebase/Editor/TodoOutput.hs | 63 + .../src/Unison/Codebase/Editor/UriParser.hs | 166 + .../Unison/Codebase/Editor/VersionParser.hs | 27 + .../src/Unison/Codebase/Execute.hs | 56 + .../src/Unison/Codebase/FileCodebase.hs | 282 ++ .../Unison/Codebase/FileCodebase/Common.hs | 590 ++++ .../FileCodebase/SlimCopyRegenerateIndex.hs | 321 ++ .../src/Unison/Codebase/GitError.hs | 23 + .../src/Unison/Codebase/MainTerm.hs | 72 + .../src/Unison/Codebase/Metadata.hs | 72 + .../src/Unison/Codebase/NameEdit.hs | 15 + .../src/Unison/Codebase/Patch.hs | 136 + .../src/Unison/Codebase/Path.hs | 440 +++ .../src/Unison/Codebase/Reflog.hs | 30 + .../src/Unison/Codebase/Runtime.hs | 132 + .../src/Unison/Codebase/SearchResult.hs | 83 + .../src/Unison/Codebase/Serialization.hs | 43 + .../src/Unison/Codebase/Serialization/PutT.hs | 57 + .../src/Unison/Codebase/Serialization/V1.hs | 811 +++++ .../src/Unison/Codebase/ShortBranchHash.hs | 35 + .../src/Unison/Codebase/SyncMode.hs | 3 + .../src/Unison/Codebase/TermEdit.hs | 51 + .../src/Unison/Codebase/TranscriptParser.hs | 427 +++ .../src/Unison/Codebase/TypeEdit.hs | 20 + .../src/Unison/Codebase/Watch.hs | 151 + parser-typechecker/src/Unison/Codecs.hs | 340 ++ parser-typechecker/src/Unison/CommandLine.hs | 221 ++ .../src/Unison/CommandLine/DisplayValues.hs | 96 + .../src/Unison/CommandLine/InputPattern.hs | 92 + .../src/Unison/CommandLine/InputPatterns.hs | 1549 +++++++++ .../src/Unison/CommandLine/Main.hs | 258 ++ .../src/Unison/CommandLine/OutputMessages.hs | 1977 +++++++++++ parser-typechecker/src/Unison/DeclPrinter.hs | 187 ++ parser-typechecker/src/Unison/FileParser.hs | 289 ++ parser-typechecker/src/Unison/FileParsers.hs | 196 ++ parser-typechecker/src/Unison/Lexer.hs | 777 +++++ parser-typechecker/src/Unison/NamePrinter.hs | 81 + parser-typechecker/src/Unison/Parser.hs | 443 +++ parser-typechecker/src/Unison/Parsers.hs | 88 + parser-typechecker/src/Unison/Path.hs | 54 + .../src/Unison/PrettyPrintEnv.hs | 142 + .../src/Unison/PrettyTerminal.hs | 51 + parser-typechecker/src/Unison/PrintError.hs | 1243 +++++++ parser-typechecker/src/Unison/Result.hs | 90 + parser-typechecker/src/Unison/Runtime/ANF.hs | 1432 ++++++++ .../src/Unison/Runtime/Builtin.hs | 1465 +++++++++ .../src/Unison/Runtime/Debug.hs | 52 + .../src/Unison/Runtime/Decompile.hs | 120 + .../src/Unison/Runtime/Exception.hs | 18 + .../src/Unison/Runtime/Foreign.hs | 93 + .../src/Unison/Runtime/Foreign/Function.hs | 311 ++ .../src/Unison/Runtime/IOSource.hs | 559 ++++ parser-typechecker/src/Unison/Runtime/IR.hs | 1196 +++++++ .../src/Unison/Runtime/Interface.hs | 225 ++ .../src/Unison/Runtime/MCode.hs | 1229 +++++++ .../src/Unison/Runtime/Machine.hs | 1257 +++++++ .../src/Unison/Runtime/Pattern.hs | 720 ++++ parser-typechecker/src/Unison/Runtime/Rt1.hs | 868 +++++ .../src/Unison/Runtime/Rt1IO.hs | 529 +++ .../src/Unison/Runtime/SparseVector.hs | 128 + .../src/Unison/Runtime/Stack.hs | 640 ++++ .../src/Unison/Runtime/Vector.hs | 54 + .../src/Unison/Runtime/docs.markdown | 240 ++ parser-typechecker/src/Unison/TermParser.hs | 901 +++++ parser-typechecker/src/Unison/TermPrinter.hs | 1029 ++++++ parser-typechecker/src/Unison/TypeParser.hs | 115 + parser-typechecker/src/Unison/TypePrinter.hs | 185 ++ parser-typechecker/src/Unison/Typechecker.hs | 351 ++ .../src/Unison/Typechecker/Components.hs | 88 + .../src/Unison/Typechecker/Context.hs | 1801 ++++++++++ .../src/Unison/Typechecker/Extractor.hs | 343 ++ .../src/Unison/Typechecker/TypeError.hs | 298 ++ .../src/Unison/Typechecker/TypeLookup.hs | 66 + .../src/Unison/Typechecker/TypeVar.hs | 53 + parser-typechecker/src/Unison/UnisonFile.hs | 369 +++ .../src/Unison/Util/AnnotatedText.hs | 199 ++ parser-typechecker/src/Unison/Util/Bytes.hs | 96 + parser-typechecker/src/Unison/Util/Cache.hs | 90 + .../src/Unison/Util/ColorText.hs | 129 + .../src/Unison/Util/CycleTable.hs | 39 + .../src/Unison/Util/CyclicEq.hs | 60 + .../src/Unison/Util/CyclicOrd.hs | 54 + .../src/Unison/Util/EnumContainers.hs | 115 + .../src/Unison/Util/Exception.hs | 16 + parser-typechecker/src/Unison/Util/Find.hs | 188 ++ parser-typechecker/src/Unison/Util/Free.hs | 68 + parser-typechecker/src/Unison/Util/Less.hs | 25 + parser-typechecker/src/Unison/Util/Logger.hs | 109 + parser-typechecker/src/Unison/Util/Map.hs | 16 + parser-typechecker/src/Unison/Util/Menu.hs | 286 ++ .../src/Unison/Util/PinBoard.hs | 143 + parser-typechecker/src/Unison/Util/Pretty.hs | 903 +++++ parser-typechecker/src/Unison/Util/Range.hs | 27 + parser-typechecker/src/Unison/Util/Star3.hs | 214 ++ .../src/Unison/Util/SyntaxText.hs | 62 + parser-typechecker/src/Unison/Util/TQueue.hs | 89 + parser-typechecker/src/Unison/Util/Timing.hs | 41 + .../src/Unison/Util/TransitiveClosure.hs | 31 + parser-typechecker/tests/Suite.hs | 87 + .../tests/Unison/Core/Test/Name.hs | 29 + parser-typechecker/tests/Unison/Test/ABT.hs | 44 + parser-typechecker/tests/Unison/Test/ANF.hs | 199 ++ parser-typechecker/tests/Unison/Test/Cache.hs | 80 + .../tests/Unison/Test/Codebase.hs | 40 + .../tests/Unison/Test/Codebase/Causal.hs | 318 ++ .../Unison/Test/Codebase/FileCodebase.hs | 48 + .../tests/Unison/Test/Codebase/Path.hs | 66 + .../tests/Unison/Test/ColorText.hs | 84 + .../tests/Unison/Test/Common.hs | 75 + .../tests/Unison/Test/DataDeclaration.hs | 121 + .../tests/Unison/Test/FileParser.hs | 136 + parser-typechecker/tests/Unison/Test/Git.hs | 523 +++ parser-typechecker/tests/Unison/Test/IO.hs | 114 + parser-typechecker/tests/Unison/Test/Lexer.hs | 207 ++ parser-typechecker/tests/Unison/Test/MCode.hs | 105 + parser-typechecker/tests/Unison/Test/Range.hs | 33 + .../tests/Unison/Test/Referent.hs | 82 + parser-typechecker/tests/Unison/Test/Term.hs | 53 + .../tests/Unison/Test/TermParser.hs | 227 ++ .../tests/Unison/Test/TermPrinter.hs | 586 ++++ parser-typechecker/tests/Unison/Test/Type.hs | 33 + .../tests/Unison/Test/TypePrinter.hs | 170 + .../tests/Unison/Test/Typechecker.hs | 33 + .../Unison/Test/Typechecker/Components.hs | 35 + .../tests/Unison/Test/Typechecker/Context.hs | 41 + .../Unison/Test/Typechecker/TypeError.hs | 57 + .../tests/Unison/Test/UnisonSources.hs | 195 ++ .../tests/Unison/Test/UriParser.hs | 84 + .../tests/Unison/Test/Util/Bytes.hs | 62 + .../tests/Unison/Test/Util/PinBoard.hs | 52 + .../tests/Unison/Test/Util/Pretty.hs | 33 + parser-typechecker/tests/Unison/Test/Var.hs | 23 + .../tests/Unison/Test/VersionParser.hs | 26 + parser-typechecker/transcripts/Transcripts.hs | 87 + .../unison-parser-typechecker.cabal | 385 +++ parser-typechecker/unison/Main.hs | 317 ++ parser-typechecker/unison/System/Path.hs | 106 + parser-typechecker/unison/Version.hs | 16 + unison-core/LICENSE | 19 + unison-core/src/Unison/ABT.hs | 715 ++++ unison-core/src/Unison/ABT/Normalized.hs | 134 + unison-core/src/Unison/Blank.hs | 22 + unison-core/src/Unison/ConstructorType.hs | 8 + unison-core/src/Unison/DataDeclaration.hs | 413 +++ unison-core/src/Unison/Hash.hs | 109 + unison-core/src/Unison/HashQualified'.hs | 126 + unison-core/src/Unison/HashQualified.hs | 169 + unison-core/src/Unison/Hashable.hs | 94 + unison-core/src/Unison/Kind.hs | 15 + unison-core/src/Unison/LabeledDependency.hs | 56 + unison-core/src/Unison/Name.hs | 173 + unison-core/src/Unison/NameSegment.hs | 41 + unison-core/src/Unison/Names2.hs | 334 ++ unison-core/src/Unison/Names3.hs | 240 ++ unison-core/src/Unison/Paths.hs | 204 ++ unison-core/src/Unison/Pattern.hs | 165 + unison-core/src/Unison/PatternCompat.hs | 30 + unison-core/src/Unison/Prelude.hs | 62 + unison-core/src/Unison/Reference.hs | 179 + unison-core/src/Unison/Reference/Util.hs | 22 + unison-core/src/Unison/Referent.hs | 128 + unison-core/src/Unison/Settings.hs | 18 + unison-core/src/Unison/ShortHash.hs | 92 + unison-core/src/Unison/Symbol.hs | 35 + unison-core/src/Unison/Term.hs | 1123 +++++++ unison-core/src/Unison/Type.hs | 648 ++++ unison-core/src/Unison/Util/Components.hs | 48 + unison-core/src/Unison/Util/List.hs | 65 + unison-core/src/Unison/Util/Monoid.hs | 27 + unison-core/src/Unison/Util/Relation.hs | 496 +++ unison-core/src/Unison/Util/Relation3.hs | 120 + unison-core/src/Unison/Util/Relation4.hs | 122 + unison-core/src/Unison/Util/Set.hs | 10 + unison-core/src/Unison/Var.hs | 168 + unison-core/unison-core.cabal | 117 + yaks/easytest/LICENSE | 19 + yaks/easytest/README.markdown | 264 ++ yaks/easytest/easytest.cabal | 95 + yaks/easytest/src/EasyTest.hs | 458 +++ yaks/easytest/tests/Suite.hs | 34 + 222 files changed, 54074 insertions(+), 459 deletions(-) create mode 100644 codebase-convert-1to2/lib/U/Codebase/Convert/TypeUtil.hs create mode 100644 codebase2/core/U/Core/ABT/Var.hs create mode 100644 parser-typechecker/LICENSE create mode 100644 parser-typechecker/benchmarks/runtime/Main.hs create mode 100644 parser-typechecker/prettyprintdemo/Main.hs create mode 100644 parser-typechecker/src/Unison/Builtin.hs create mode 100644 parser-typechecker/src/Unison/Builtin/Decls.hs create mode 100644 parser-typechecker/src/Unison/Codebase.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Branch.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Branch/Dependencies.hs create mode 100644 parser-typechecker/src/Unison/Codebase/BranchDiff.hs create mode 100644 parser-typechecker/src/Unison/Codebase/BranchUtil.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Causal.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Classes.hs create mode 100644 parser-typechecker/src/Unison/Codebase/CodeLookup.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/AuthorInfo.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/Command.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/DisplayThing.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/Git.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/Input.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/Output.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/Output/BranchDiff.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/Propagate.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/RemoteRepo.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/SearchResult'.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/SlurpComponent.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/SlurpResult.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/TodoOutput.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/UriParser.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Editor/VersionParser.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Execute.hs create mode 100644 parser-typechecker/src/Unison/Codebase/FileCodebase.hs create mode 100644 parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs create mode 100644 parser-typechecker/src/Unison/Codebase/FileCodebase/SlimCopyRegenerateIndex.hs create mode 100644 parser-typechecker/src/Unison/Codebase/GitError.hs create mode 100644 parser-typechecker/src/Unison/Codebase/MainTerm.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Metadata.hs create mode 100644 parser-typechecker/src/Unison/Codebase/NameEdit.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Patch.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Path.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Reflog.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Runtime.hs create mode 100644 parser-typechecker/src/Unison/Codebase/SearchResult.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Serialization.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Serialization/PutT.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Serialization/V1.hs create mode 100644 parser-typechecker/src/Unison/Codebase/ShortBranchHash.hs create mode 100644 parser-typechecker/src/Unison/Codebase/SyncMode.hs create mode 100644 parser-typechecker/src/Unison/Codebase/TermEdit.hs create mode 100644 parser-typechecker/src/Unison/Codebase/TranscriptParser.hs create mode 100644 parser-typechecker/src/Unison/Codebase/TypeEdit.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Watch.hs create mode 100644 parser-typechecker/src/Unison/Codecs.hs create mode 100644 parser-typechecker/src/Unison/CommandLine.hs create mode 100644 parser-typechecker/src/Unison/CommandLine/DisplayValues.hs create mode 100644 parser-typechecker/src/Unison/CommandLine/InputPattern.hs create mode 100644 parser-typechecker/src/Unison/CommandLine/InputPatterns.hs create mode 100644 parser-typechecker/src/Unison/CommandLine/Main.hs create mode 100644 parser-typechecker/src/Unison/CommandLine/OutputMessages.hs create mode 100644 parser-typechecker/src/Unison/DeclPrinter.hs create mode 100644 parser-typechecker/src/Unison/FileParser.hs create mode 100644 parser-typechecker/src/Unison/FileParsers.hs create mode 100644 parser-typechecker/src/Unison/Lexer.hs create mode 100644 parser-typechecker/src/Unison/NamePrinter.hs create mode 100644 parser-typechecker/src/Unison/Parser.hs create mode 100644 parser-typechecker/src/Unison/Parsers.hs create mode 100644 parser-typechecker/src/Unison/Path.hs create mode 100644 parser-typechecker/src/Unison/PrettyPrintEnv.hs create mode 100644 parser-typechecker/src/Unison/PrettyTerminal.hs create mode 100644 parser-typechecker/src/Unison/PrintError.hs create mode 100644 parser-typechecker/src/Unison/Result.hs create mode 100644 parser-typechecker/src/Unison/Runtime/ANF.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Builtin.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Debug.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Decompile.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Exception.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Foreign.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Foreign/Function.hs create mode 100644 parser-typechecker/src/Unison/Runtime/IOSource.hs create mode 100644 parser-typechecker/src/Unison/Runtime/IR.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Interface.hs create mode 100644 parser-typechecker/src/Unison/Runtime/MCode.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Machine.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Pattern.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Rt1.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Rt1IO.hs create mode 100644 parser-typechecker/src/Unison/Runtime/SparseVector.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Stack.hs create mode 100644 parser-typechecker/src/Unison/Runtime/Vector.hs create mode 100644 parser-typechecker/src/Unison/Runtime/docs.markdown create mode 100644 parser-typechecker/src/Unison/TermParser.hs create mode 100644 parser-typechecker/src/Unison/TermPrinter.hs create mode 100644 parser-typechecker/src/Unison/TypeParser.hs create mode 100644 parser-typechecker/src/Unison/TypePrinter.hs create mode 100644 parser-typechecker/src/Unison/Typechecker.hs create mode 100644 parser-typechecker/src/Unison/Typechecker/Components.hs create mode 100644 parser-typechecker/src/Unison/Typechecker/Context.hs create mode 100644 parser-typechecker/src/Unison/Typechecker/Extractor.hs create mode 100644 parser-typechecker/src/Unison/Typechecker/TypeError.hs create mode 100644 parser-typechecker/src/Unison/Typechecker/TypeLookup.hs create mode 100644 parser-typechecker/src/Unison/Typechecker/TypeVar.hs create mode 100644 parser-typechecker/src/Unison/UnisonFile.hs create mode 100644 parser-typechecker/src/Unison/Util/AnnotatedText.hs create mode 100644 parser-typechecker/src/Unison/Util/Bytes.hs create mode 100644 parser-typechecker/src/Unison/Util/Cache.hs create mode 100644 parser-typechecker/src/Unison/Util/ColorText.hs create mode 100644 parser-typechecker/src/Unison/Util/CycleTable.hs create mode 100644 parser-typechecker/src/Unison/Util/CyclicEq.hs create mode 100644 parser-typechecker/src/Unison/Util/CyclicOrd.hs create mode 100644 parser-typechecker/src/Unison/Util/EnumContainers.hs create mode 100644 parser-typechecker/src/Unison/Util/Exception.hs create mode 100644 parser-typechecker/src/Unison/Util/Find.hs create mode 100644 parser-typechecker/src/Unison/Util/Free.hs create mode 100644 parser-typechecker/src/Unison/Util/Less.hs create mode 100644 parser-typechecker/src/Unison/Util/Logger.hs create mode 100644 parser-typechecker/src/Unison/Util/Map.hs create mode 100644 parser-typechecker/src/Unison/Util/Menu.hs create mode 100644 parser-typechecker/src/Unison/Util/PinBoard.hs create mode 100644 parser-typechecker/src/Unison/Util/Pretty.hs create mode 100644 parser-typechecker/src/Unison/Util/Range.hs create mode 100644 parser-typechecker/src/Unison/Util/Star3.hs create mode 100644 parser-typechecker/src/Unison/Util/SyntaxText.hs create mode 100644 parser-typechecker/src/Unison/Util/TQueue.hs create mode 100644 parser-typechecker/src/Unison/Util/Timing.hs create mode 100644 parser-typechecker/src/Unison/Util/TransitiveClosure.hs create mode 100644 parser-typechecker/tests/Suite.hs create mode 100644 parser-typechecker/tests/Unison/Core/Test/Name.hs create mode 100644 parser-typechecker/tests/Unison/Test/ABT.hs create mode 100644 parser-typechecker/tests/Unison/Test/ANF.hs create mode 100644 parser-typechecker/tests/Unison/Test/Cache.hs create mode 100644 parser-typechecker/tests/Unison/Test/Codebase.hs create mode 100644 parser-typechecker/tests/Unison/Test/Codebase/Causal.hs create mode 100644 parser-typechecker/tests/Unison/Test/Codebase/FileCodebase.hs create mode 100644 parser-typechecker/tests/Unison/Test/Codebase/Path.hs create mode 100644 parser-typechecker/tests/Unison/Test/ColorText.hs create mode 100644 parser-typechecker/tests/Unison/Test/Common.hs create mode 100644 parser-typechecker/tests/Unison/Test/DataDeclaration.hs create mode 100644 parser-typechecker/tests/Unison/Test/FileParser.hs create mode 100644 parser-typechecker/tests/Unison/Test/Git.hs create mode 100644 parser-typechecker/tests/Unison/Test/IO.hs create mode 100644 parser-typechecker/tests/Unison/Test/Lexer.hs create mode 100644 parser-typechecker/tests/Unison/Test/MCode.hs create mode 100644 parser-typechecker/tests/Unison/Test/Range.hs create mode 100644 parser-typechecker/tests/Unison/Test/Referent.hs create mode 100644 parser-typechecker/tests/Unison/Test/Term.hs create mode 100644 parser-typechecker/tests/Unison/Test/TermParser.hs create mode 100755 parser-typechecker/tests/Unison/Test/TermPrinter.hs create mode 100644 parser-typechecker/tests/Unison/Test/Type.hs create mode 100755 parser-typechecker/tests/Unison/Test/TypePrinter.hs create mode 100644 parser-typechecker/tests/Unison/Test/Typechecker.hs create mode 100644 parser-typechecker/tests/Unison/Test/Typechecker/Components.hs create mode 100644 parser-typechecker/tests/Unison/Test/Typechecker/Context.hs create mode 100644 parser-typechecker/tests/Unison/Test/Typechecker/TypeError.hs create mode 100644 parser-typechecker/tests/Unison/Test/UnisonSources.hs create mode 100644 parser-typechecker/tests/Unison/Test/UriParser.hs create mode 100644 parser-typechecker/tests/Unison/Test/Util/Bytes.hs create mode 100644 parser-typechecker/tests/Unison/Test/Util/PinBoard.hs create mode 100644 parser-typechecker/tests/Unison/Test/Util/Pretty.hs create mode 100644 parser-typechecker/tests/Unison/Test/Var.hs create mode 100644 parser-typechecker/tests/Unison/Test/VersionParser.hs create mode 100644 parser-typechecker/transcripts/Transcripts.hs create mode 100644 parser-typechecker/unison-parser-typechecker.cabal create mode 100644 parser-typechecker/unison/Main.hs create mode 100644 parser-typechecker/unison/System/Path.hs create mode 100644 parser-typechecker/unison/Version.hs create mode 100644 unison-core/LICENSE create mode 100644 unison-core/src/Unison/ABT.hs create mode 100644 unison-core/src/Unison/ABT/Normalized.hs create mode 100644 unison-core/src/Unison/Blank.hs create mode 100644 unison-core/src/Unison/ConstructorType.hs create mode 100644 unison-core/src/Unison/DataDeclaration.hs create mode 100644 unison-core/src/Unison/Hash.hs create mode 100644 unison-core/src/Unison/HashQualified'.hs create mode 100644 unison-core/src/Unison/HashQualified.hs create mode 100644 unison-core/src/Unison/Hashable.hs create mode 100644 unison-core/src/Unison/Kind.hs create mode 100644 unison-core/src/Unison/LabeledDependency.hs create mode 100644 unison-core/src/Unison/Name.hs create mode 100644 unison-core/src/Unison/NameSegment.hs create mode 100644 unison-core/src/Unison/Names2.hs create mode 100644 unison-core/src/Unison/Names3.hs create mode 100644 unison-core/src/Unison/Paths.hs create mode 100644 unison-core/src/Unison/Pattern.hs create mode 100644 unison-core/src/Unison/PatternCompat.hs create mode 100644 unison-core/src/Unison/Prelude.hs create mode 100644 unison-core/src/Unison/Reference.hs create mode 100644 unison-core/src/Unison/Reference/Util.hs create mode 100644 unison-core/src/Unison/Referent.hs create mode 100644 unison-core/src/Unison/Settings.hs create mode 100644 unison-core/src/Unison/ShortHash.hs create mode 100644 unison-core/src/Unison/Symbol.hs create mode 100644 unison-core/src/Unison/Term.hs create mode 100644 unison-core/src/Unison/Type.hs create mode 100644 unison-core/src/Unison/Util/Components.hs create mode 100644 unison-core/src/Unison/Util/List.hs create mode 100644 unison-core/src/Unison/Util/Monoid.hs create mode 100644 unison-core/src/Unison/Util/Relation.hs create mode 100644 unison-core/src/Unison/Util/Relation3.hs create mode 100644 unison-core/src/Unison/Util/Relation4.hs create mode 100644 unison-core/src/Unison/Util/Set.hs create mode 100644 unison-core/src/Unison/Var.hs create mode 100644 unison-core/unison-core.cabal create mode 100644 yaks/easytest/LICENSE create mode 100644 yaks/easytest/README.markdown create mode 100644 yaks/easytest/easytest.cabal create mode 100644 yaks/easytest/src/EasyTest.hs create mode 100644 yaks/easytest/tests/Suite.hs diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index 7d008e7767..ae8e83ac00 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -1,16 +1,15 @@ -{-# LANGUAGE PartialTypeSignatures #-} +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE DeriveFunctor #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE DerivingVia #-} {-# LANGUAGE EmptyCase #-} -{-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE ViewPatterns #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} -{-# LANGUAGE DeriveFunctor #-} -{-# LANGUAGE DerivingVia #-} -{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE PartialTypeSignatures #-} +{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE BlockArguments #-} - +{-# LANGUAGE ViewPatterns #-} {-# OPTIONS_GHC -Wno-incomplete-patterns #-} {-# OPTIONS_GHC -Wno-unused-imports #-} {-# OPTIONS_GHC -Wno-unused-do-bind #-} @@ -19,76 +18,80 @@ module U.Codebase.Convert.SyncV1V2 where -import qualified U.Util.Hashable as H -import Database.SQLite.Simple.FromField (FromField) -import U.Util.Hash (Hash) -import qualified Unison.Codebase.V1.FileCodebase as V1 -import qualified Unison.Codebase.V1.Branch.Raw as V1 +import Control.Lens (mapMOf, over) +import Control.Monad.Except (MonadError, runExceptT, throwError) +import Control.Monad.Extra ((>=>), ifM) +import Control.Monad.Reader (ReaderT (runReaderT)) +import qualified Control.Monad.State as State +import Control.Monad.State (State) +import Data.Bifunctor (Bifunctor (first), second) +import Data.Bytes.Get (MonadGet) +import Data.Either (partitionEithers) +import Data.Either.Extra (mapLeft) +import Data.Foldable (Foldable (toList), for_) +import Data.Foldable (Foldable (foldl')) +import Data.Functor ((<&>)) +import qualified Data.List as List +import Data.List.Extra (nubOrd) +import Data.Map (Map) +import qualified Data.Map.Strict as Map +import qualified Data.Set as Set import Data.Text (Text) -import Database.SQLite.Simple.ToField (ToField) -import qualified Unison.Codebase.V1.Reference as V1.Reference -import UnliftIO (liftIO, MonadIO) +import qualified Data.Text as Text +import Data.Traversable (for) +import Data.Tuple (swap) +import Data.Vector (Vector) +import qualified Data.Vector as Vector import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as SQLite -import Control.Monad.Except (MonadError, throwError, runExceptT) -import Control.Monad.Reader (ReaderT(runReaderT)) -import qualified U.Util.Base32Hex as Base32Hex -import qualified U.Util.Hash as Hash -import qualified Data.Text as Text -import UnliftIO.Directory (listDirectory) -import Data.Functor ((<&>)) +import Database.SQLite.Simple.FromField (FromField) +import Database.SQLite.Simple.ToField (ToField) +import qualified U.Codebase.Convert.TypeUtil as TypeUtil +import qualified U.Codebase.Decl as V2.Decl +import qualified U.Codebase.Kind as V2.Kind import qualified U.Codebase.Reference as V2.Reference -import qualified U.Codebase.Sqlite.Reference as V2S.Reference -import Unison.Codebase.V1.FileCodebase (CodebasePath) -import U.Codebase.Sqlite.Queries (DB) -import Data.Map (Map) -import qualified Data.Map.Strict as Map -import Control.Monad.Extra (ifM) +import qualified U.Codebase.Referent as V2.Referent +import qualified U.Codebase.Referent as V2.Sqlite.Referent import Data.String.Here.Uninterpolated (here) import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Queries as Db -import qualified Unison.Codebase.V1.Symbol as V1.Symbol -import qualified Unison.Codebase.V1.Term as V1.Term -import qualified Unison.Codebase.V1.Type as V1.Type -import qualified Unison.Codebase.V1.DataDeclaration as V1.DD +import qualified U.Codebase.Sqlite.Reference as V2S.Reference +import qualified U.Codebase.Sqlite.Reference as V2.Sqlite.Reference +import qualified U.Codebase.Sqlite.Serialization as S.V2 +import qualified U.Codebase.Sqlite.Symbol as V2.Symbol import qualified U.Codebase.Sqlite.Term.Format as V2.TermFormat import qualified U.Codebase.Term as V2.Term -import qualified U.Codebase.Sqlite.Symbol as V2.Symbol -import qualified U.Codebase.Type as V2.Type -import qualified U.Codebase.Decl as V2.Decl import qualified U.Codebase.Sqlite.Decl.Format as V2.DeclFormat -import qualified Unison.Codebase.V1.FileCodebase as V1.FC -import qualified Unison.Codebase.V1.Serialization.Serialization as V1.S -import qualified Unison.Codebase.V1.Serialization.V1 as V1.S -import Data.Bytes.Get (MonadGet) -import Data.Foldable (for_, Foldable(toList)) -import Data.Traversable (for) -import qualified Unison.Codebase.V1.LabeledDependency as V1.LD -import Data.List.Extra (nubOrd) -import Data.Either (partitionEithers) -import U.Util.Base32Hex (Base32Hex) -import Data.Bifunctor (second, Bifunctor(first)) -import qualified U.Codebase.Referent as V2.Referent +import qualified U.Codebase.Sqlite.LocalIds as V2.LocalIds import qualified U.Codebase.Sqlite.ObjectType as V2.OT -import qualified U.Util.Serialization as S -import qualified U.Codebase.Sqlite.Serialization as S.V2 +import U.Codebase.Sqlite.Queries (DB) +import qualified U.Codebase.Type as V2.Type +import qualified U.Core.ABT as V2.ABT +import qualified U.Util.Base32Hex as Base32Hex +import U.Util.Base32Hex (Base32Hex) +import U.Util.Hash (Hash) +import qualified U.Util.Hash as Hash +import qualified U.Util.Hashable as H import U.Util.Monoid (foldMapM) -import Data.Foldable (Foldable(foldl')) +import qualified U.Util.Serialization as S import qualified Unison.Codebase.V1.ABT as V1.ABT -import qualified Data.Set as Set -import qualified Unison.Codebase.V1.Type.Kind as V1.Kind -import qualified U.Codebase.Kind as V2.Kind -import qualified U.Core.ABT as V2.ABT -import qualified Unison.Codebase.V1.Term.Pattern as V1.Pattern +import qualified Unison.Codebase.V1.Branch.Raw as V1 +import qualified Unison.Codebase.V1.DataDeclaration as V1.DD +import qualified Unison.Codebase.V1.FileCodebase as V1 +import Unison.Codebase.V1.FileCodebase (CodebasePath) +import qualified Unison.Codebase.V1.FileCodebase as V1.FC +import qualified Unison.Codebase.V1.LabeledDependency as V1.LD +import qualified Unison.Codebase.V1.Reference as V1.Reference import qualified Unison.Codebase.V1.Referent as V1.Referent -import qualified Control.Monad.State as State -import Control.Monad.State (State) -import qualified U.Codebase.Sqlite.LocalIds as V2.LocalIds -import Data.Vector (Vector) -import qualified Data.Vector as Vector -import qualified Data.List as List -import Data.Tuple (swap) -import Data.Either.Extra (mapLeft) +import qualified Unison.Codebase.V1.Serialization.Serialization as V1.S +import qualified Unison.Codebase.V1.Serialization.V1 as V1.S +import qualified Unison.Codebase.V1.Symbol as V1.Symbol +import qualified Unison.Codebase.V1.Term as V1.Term +import qualified Unison.Codebase.V1.Term.Pattern as V1.Pattern +import qualified Unison.Codebase.V1.Type as V1.Type +import qualified Unison.Codebase.V1.Type.Kind as V1.Kind +import UnliftIO (MonadIO, liftIO) +import UnliftIO.Directory (listDirectory) newtype V1 a = V1 {runV1 :: a} deriving (Eq, Ord, Show) @@ -134,13 +137,13 @@ newtype CausalHash h = CausalHash h -- (NamespaceHash Hash) -- (CausalHash Hash) --- |things that appear in a serialized RawBranch --- type V2EntityRefS = --- V2EntityRef --- Db.ObjectId --- (PatchHash Db.ObjectId) --- (NamespaceHash Db.NamespaceHashId) --- (CausalHash Db.CausalHashId) +-- | things that appear in a serialized RawBranch +-- type V2EntityRefS = +-- V2EntityRef +-- Db.ObjectId +-- (PatchHash Db.ObjectId) +-- (NamespaceHash Db.NamespaceHashId) +-- (CausalHash Db.CausalHashId) -- data V2EntityRef hr hp hn hc -- = Decl2 V2.Reference.Id @@ -167,30 +170,39 @@ data FatalError | InvalidTypeOfTerm V1.Reference.Id | InvalidDecl V1.Reference.Id + type V1Type = V1.Type.Type V1.Symbol.Symbol () type V1Term = V1.Term.Term V1.Symbol.Symbol () + type V1Decl = V1.DD.Decl V1.Symbol.Symbol () type V2HashTerm = V2.Term.Term V2.Symbol.Symbol -type V2TypeOfTerm = V2.Type.TypeT V2.Symbol.Symbol +type V2HashTypeOfTerm = V2.Type.TypeT V2.Symbol.Symbol + +type V2DiskTypeOfTerm = V2.Type.TypeR V2.Sqlite.Reference.Reference V2.Symbol.Symbol type V2HashTermComponent = [V2HashTerm] + type V2DiskTermComponent = V2.TermFormat.LocallyIndexedComponent + type V2HashDecl = V2.Decl.Decl V2.Symbol.Symbol type V2TypeOfConstructor = V2.Type.TypeD V2.Symbol.Symbol + type V2HashDeclComponent = [V2HashDecl] type V2DiskDeclComponent = V2.DeclFormat.LocallyIndexedComponent -- type Patch = Patch.Patch V1.Reference + -- -- the H stands for "for hashing" -- -- the S stands for "for serialization" + -- type Term2ComponentH = [Term2 Hash] -- type Term2ComponentS = [Term2 Db.ObjectId] + -- type Decl2ComponentH = [Decl2 (Maybe Hash)] -- type Decl2S = Decl2 Db.ObjectId -- type Decl2ComponentS = [Decl2S] - -- -- these have maybes in them to indicate a self-component reference -- type Term2 h = V2.Term h -- type Decl2 h = DD.DeclR (V2.Reference h) Symbol () @@ -271,32 +283,33 @@ syncV1V2 c rootDir = liftIO $ SQLite.withTransaction c . runExceptT . flip runRe Right (getHash, getObjId, getTextId) -> do convertTerm1 getHash getObjId getTextId h e convertEntities rest - -- Decl1 h -> - -- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do - -- d <- loadDecl1 rootDir declsDirComponents h - -- matchDecl1Dependencies h d >>= \case - -- Left missing -> convertEntities (missing ++ all) - -- Right lookup -> do - -- convertDecl1 (error "todo: lookup") h d - -- convertEntities rest - -- Patch1 h -> - -- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do - -- p <- loadPatch1 rootDir h - -- matchPatch1Dependencies ("patch " ++ show h) p >>= \case - -- Left missing -> convertEntities (missing ++ all) - -- Right lookup -> do - -- -- hashId <- Db.saveHashByteString (runV1 h) - -- -- savePatch hashId (Patch.hmap (lookup . V1) p) - -- error "todo" - -- convertEntities rest - -- Branch1 (V1.BranchHash h) -> - -- ifM (existsObjectWithHash h) (convertEntities rest) $ do - -- cb <- loadCausalBranch1 rootDir (V1 h) - -- matchCausalBranch1Dependencies ("branch " ++ show h) cb >>= \case - -- Left missing -> convertEntities (missing ++ all) - -- Right (lookupObject, lookupCausal) -> do - -- convertCausalBranch1 lookupObject lookupCausal cb - -- convertEntities rest +-- Decl1 h -> +-- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do +-- d <- loadDecl1 rootDir declsDirComponents h +-- matchDecl1Dependencies h d >>= \case +-- Left missing -> convertEntities (missing ++ all) +-- Right lookup -> do +-- convertDecl1 (error "todo: lookup") h d +-- convertEntities rest +-- Patch1 h -> +-- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do +-- p <- loadPatch1 rootDir h +-- matchPatch1Dependencies ("patch " ++ show h) p >>= \case +-- Left missing -> convertEntities (missing ++ all) +-- Right lookup -> do +-- -- hashId <- Db.saveHashByteString (runV1 h) +-- -- savePatch hashId (Patch.hmap (lookup . V1) p) +-- error "todo" +-- convertEntities rest +-- Branch1 (V1.BranchHash h) -> +-- ifM (existsObjectWithHash h) (convertEntities rest) $ do +-- cb <- loadCausalBranch1 rootDir (V1 h) +-- matchCausalBranch1Dependencies ("branch " ++ show h) cb >>= \case +-- Left missing -> convertEntities (missing ++ all) +-- Right (lookupObject, lookupCausal) -> do +-- convertCausalBranch1 lookupObject lookupCausal cb +-- convertEntities rest + -- -- | load a causal branch raw thingo -- loadCausalBranch1 :: @@ -435,6 +448,11 @@ lookupObject r@(runV1 . v1EntityRefToHash -> h) = -- WHERE old_hash.base32 = ? -- |] +saveTypeBlobForTerm :: DB m => V2.Sqlite.Reference.Id -> V2DiskTypeOfTerm -> m () +saveTypeBlobForTerm r typ = Db.saveTypeOfTerm r blob + where + blob = S.putBytes (S.V2.putType S.V2.putReference S.V2.putSymbol) typ + -- -- | no Maybes here, as all relevant ObjectId can be known in advance -- saveTypeBlobForReferent :: DB m => V2.ReferentId Db.ObjectId -> Type2S -> m () -- saveTypeBlobForReferent r type2s = @@ -449,8 +467,11 @@ getObjectIdByBase32Hex h = -- augmentLookup :: Ord a => (a -> b) -> Map a b -> a -> b -- augmentLookup f m a = fromMaybe (f a) (Map.lookup a m) --- saveReferenceAsReference2 :: DB m => Reference -> m (V2.Reference Db.HashId) --- saveReferenceAsReference2 = mapMOf Db.referenceTraversal Db.saveHashByteString +-- Control.Monad (>=>) :: Monad m => (a -> m b) -> (b -> m c) -> a -> m c +saveReferenceAsReference2 :: DB m => V2.Reference.Reference -> m V2.Sqlite.Reference.ReferenceH +saveReferenceAsReference2 = + mapMOf V2.Reference.h Db.saveHashHash >=> + mapMOf V2.Reference.t Db.saveText -- | load a term component by its hash. -- A v1 term component is split across an arbitrary number of files. @@ -647,36 +668,36 @@ makeLookup l lookupDescription a = ++ " in the map for " ++ lookupDescription --- -- --- createTypeSearchIndicesForReferent :: DB m => (V2.ReferentId Db.ObjectId) -> Type -> m () --- createTypeSearchIndicesForReferent r typ = do --- let typeForIndexing = Type.removeAllEffectVars typ - --- -- add the term to the type index --- typeReferenceForIndexing :: (V2.Reference Db.HashId) <- --- saveReferenceAsReference2 (Type.toReference typeForIndexing) - --- Db.addToFindByTypeIndex r typeReferenceForIndexing - --- -- add the term to the type mentions index --- typeMentionsForIndexing :: [V2.Reference Db.HashId] <- --- traverse --- saveReferenceAsReference2 --- (toList $ Type.toReferenceMentions typeForIndexing) - --- traverse_ (Db.addToFindByTypeMentionsIndex r) typeMentionsForIndexing --- where --- addTermToFindByTypeIndex :: DB m => (V2.ReferentId Db.ObjectId) -> Reference -> m () --- addTermToFindByTypeIndex termRef typeRef = do --- typeRef2 :: (V2.Reference Db.HashId) <- --- saveReferenceAsReference2 typeRef --- Db.addToFindByTypeIndex termRef typeRef2 --- addTermToTypeMentionsIndex :: --- (DB m, Foldable f) => (V2.ReferentId Db.ObjectId) -> f Reference -> m () --- addTermToTypeMentionsIndex termRef typeRefs = do --- typeRefs2 :: [V2.Reference Db.HashId] <- --- traverse saveReferenceAsReference2 (toList typeRefs) --- traverse_ (Db.addToFindByTypeMentionsIndex termRef) typeRefs2 +createTypeSearchIndicesForReferent :: DB m => V2.Sqlite.Referent.Id -> V2HashTypeOfTerm -> m () +createTypeSearchIndicesForReferent r typ = do + let typeForIndexing = TypeUtil.removeAllEffectVars typ + + -- add the term to the type index + typeReferenceForIndexing :: V2.Sqlite.Reference.ReferenceH <- + saveReferenceAsReference2 (TypeUtil.toReference typeForIndexing) + + -- Db.addToFindByTypeIndex r typeReferenceForIndexing + + -- -- add the term to the type mentions index + -- typeMentionsForIndexing :: [V2.Sqlite.Reference.ReferenceH] <- + -- traverse + -- saveReferenceAsReference2 + -- (toList $ Type.toReferenceMentions typeForIndexing) + + -- traverse_ (Db.addToFindByTypeMentionsIndex r) typeMentionsForIndexing + error "todo" + -- where + -- addTermToFindByTypeIndex :: DB m => (V2.ReferentId Db.ObjectId) -> Reference -> m () + -- addTermToFindByTypeIndex termRef typeRef = do + -- typeRef2 :: (V2.Reference Db.HashId) <- + -- saveReferenceAsReference2 typeRef + -- Db.addToFindByTypeIndex termRef typeRef2 + -- addTermToTypeMentionsIndex :: + -- (DB m, Foldable f) => (V2.ReferentId Db.ObjectId) -> f Reference -> m () + -- addTermToTypeMentionsIndex termRef typeRefs = do + -- typeRefs2 :: [V2.Reference Db.HashId] <- + -- traverse saveReferenceAsReference2 (toList typeRefs) + -- traverse_ (Db.addToFindByTypeMentionsIndex termRef) typeRefs2 -- createDependencyIndexForTerm :: DB m => V2.ReferenceId Db.ObjectId -> Term2 Db.ObjectId-> m () -- createDependencyIndexForTerm tmRef@(V2.ReferenceId selfId _i) tm = error "todo" @@ -711,11 +732,11 @@ saveTermComponent h1 h2 component = do saveDeclComponent :: DB m => V1 Hash -> V2 Hash -> V2DiskDeclComponent -> m Db.ObjectId saveDeclComponent h component = error "todo" -- do --- -- o <- Db.saveObject h V2.DeclComponent blob --- -- Db.saveHashObject h o 2 --- -- pure o --- -- where --- -- blob = S.putBytes (S.V1.putFoldable (V2.putDecl putObjectId putV putA)) component + -- -- o <- Db.saveObject h V2.DeclComponent blob + -- -- Db.saveHashObject h o 2 + -- -- pure o + -- -- where + -- -- blob = S.putBytes (S.V1.putFoldable (V2.putDecl putObjectId putV putA)) component -- savePatch :: DB m => Db.HashId -> Patch2S -> m () -- savePatch h p = do @@ -737,16 +758,17 @@ componentMapForDir root = listDirectory root <&> foldl' insert mempty existsObjectWithHash :: DB m => Hash -> m Bool existsObjectWithHash = Db.objectExistsWithHash . Hash.toBase32Hex -convertABT :: forall f v a f' v' a' . Ord v' => (f (V1.ABT.Term f v a) -> f' (V2.ABT.Term f' v' a')) -> (v -> v') -> (a -> a') -> V1.ABT.Term f v a -> V2.ABT.Term f' v' a' -convertABT ff fv fa = goTerm where - goTerm :: V1.ABT.Term f v a -> V2.ABT.Term f' v' a' - goTerm (V1.ABT.Term vs a out) = V2.ABT.Term (Set.map fv vs) (fa a) (goABT out) - goABT :: V1.ABT.ABT f v (V1.ABT.Term f v a) -> V2.ABT.ABT f' v' (V2.ABT.Term f' v' a') - goABT = \case - V1.ABT.Var v -> V2.ABT.Var (fv v) - V1.ABT.Cycle t -> V2.ABT.Cycle (goTerm t) - V1.ABT.Abs v t -> V2.ABT.Abs (fv v) (goTerm t) - V1.ABT.Tm ft -> V2.ABT.Tm (ff ft) +convertABT :: forall f v a f' v' a'. Ord v' => (f (V1.ABT.Term f v a) -> f' (V2.ABT.Term f' v' a')) -> (v -> v') -> (a -> a') -> V1.ABT.Term f v a -> V2.ABT.Term f' v' a' +convertABT ff fv fa = goTerm + where + goTerm :: V1.ABT.Term f v a -> V2.ABT.Term f' v' a' + goTerm (V1.ABT.Term vs a out) = V2.ABT.Term (Set.map fv vs) (fa a) (goABT out) + goABT :: V1.ABT.ABT f v (V1.ABT.Term f v a) -> V2.ABT.ABT f' v' (V2.ABT.Term f' v' a') + goABT = \case + V1.ABT.Var v -> V2.ABT.Var (fv v) + V1.ABT.Cycle t -> V2.ABT.Cycle (goTerm t) + V1.ABT.Abs v t -> V2.ABT.Abs (fv v) (goTerm t) + V1.ABT.Tm ft -> V2.ABT.Tm (ff ft) convertSymbol :: V1.Symbol.Symbol -> V2.Symbol.Symbol convertSymbol (V1.Symbol.Symbol id name) = V2.Symbol.Symbol id name @@ -760,26 +782,28 @@ type LocalIdState = (Map Text V2.TermFormat.LocalTextId, Map (V2 Hash) V2.TermFormat.LocalDefnId) rewriteType :: - (V2.Reference.Reference -> - State.State LocalIdState V2.TermFormat.TypeRef) -> - V2TypeOfTerm -> State LocalIdState V2.TermFormat.Type -rewriteType doRef = V2.ABT.transformM go where - go :: V2.Type.FT k -> State LocalIdState (V2.TermFormat.FT k) - go = \case - V2.Type.Ref r -> (V2.Type.Ref <$> doRef r) - V2.Type.Arrow l r -> pure $ V2.Type.Arrow l r - V2.Type.Ann a kind -> pure $ V2.Type.Ann a kind - V2.Type.Effect e b -> pure $ V2.Type.Effect e b - V2.Type.Effects es -> pure $ V2.Type.Effects es - V2.Type.Forall a -> pure $ V2.Type.Forall a - V2.Type.IntroOuter a -> pure $ V2.Type.IntroOuter a + (V2.Reference.Reference -> State.State LocalIdState V2.TermFormat.TypeRef) -> + V2HashTypeOfTerm -> + State LocalIdState V2.TermFormat.Type +rewriteType doRef = V2.ABT.transformM go + where + go :: V2.Type.FT k -> State LocalIdState (V2.TermFormat.FT k) + go = \case + V2.Type.Ref r -> (V2.Type.Ref <$> doRef r) + V2.Type.Arrow l r -> pure $ V2.Type.Arrow l r + V2.Type.Ann a kind -> pure $ V2.Type.Ann a kind + V2.Type.Effect e b -> pure $ V2.Type.Effect e b + V2.Type.Effects es -> pure $ V2.Type.Effects es + V2.Type.Forall a -> pure $ V2.Type.Forall a + V2.Type.IntroOuter a -> pure $ V2.Type.IntroOuter a -- | rewrite Vars and Tms 🙃 -mapTermToVar :: (Foldable f, Functor f, Ord v2) - => (v -> v2) - -> (a -> f (V2.ABT.Term f v a) -> Maybe (V2.ABT.Term f v2 a)) - -> V2.ABT.Term f v a - -> V2.ABT.Term f v2 a +mapTermToVar :: + (Foldable f, Functor f, Ord v2) => + (v -> v2) -> + (a -> f (V2.ABT.Term f v a) -> Maybe (V2.ABT.Term f v2 a)) -> + V2.ABT.Term f v a -> + V2.ABT.Term f v2 a mapTermToVar fv ft t@(V2.ABT.Term _ a abt) = case abt of V2.ABT.Var v -> V2.ABT.var a (fv v) V2.ABT.Cycle body -> V2.ABT.cycle a (mapTermToVar fv ft body) @@ -789,7 +813,8 @@ mapTermToVar fv ft t@(V2.ABT.Term _ a abt) = case abt of Nothing -> V2.ABT.tm a (mapTermToVar fv ft `fmap` body) Just t' -> t' -mapVarToTerm :: (Foldable f, Functor f, Ord v2) => +mapVarToTerm :: + (Foldable f, Functor f, Ord v2) => (v -> v2) -> (v -> Either (f (V2.ABT.Term f v2 a)) v2) -> V2.ABT.Term f v a -> @@ -809,241 +834,244 @@ mapVarToTerm fAbs fVar t@(V2.ABT.Term _ a abt) = case abt of convertTerm1 :: DB m => (V1 Hash -> V2 Hash) -> (V2 Hash -> Db.ObjectId) -> (Text -> Db.TextId) -> V1 Hash -> [(V1Term, V1Type)] -> m () convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do -- construct v2 term component for hashing - let - buildTermType2H :: (V1 Hash -> V2 Hash) -> V1Type -> V2TypeOfTerm - buildTermType2H lookup = goType where - goType :: V1Type -> V2TypeOfTerm - goType = convertABT goABT convertSymbol (const ()) - goABT :: V1.Type.F V1Type -> V2.Type.FT V2TypeOfTerm - goABT = \case - V1.Type.Ref r -> V2.Type.Ref case r of - V1.Reference.Builtin t -> - V2.Reference.ReferenceBuiltin t - V1.Reference.Derived h i _n -> - V2.Reference.ReferenceDerived - (V2.Reference.Id (runV2 . lookup $ V1 h) i) - V1.Type.Arrow i o -> V2.Type.Arrow (goType i) (goType o) - V1.Type.Ann a k -> V2.Type.Ann (goType a) (convertKind k) - V1.Type.App f x -> V2.Type.App (goType f) (goType x) - V1.Type.Effect e b -> V2.Type.Effect (goType e) (goType b) - V1.Type.Effects as -> V2.Type.Effects (goType <$> as) - V1.Type.Forall a -> V2.Type.Forall (goType a) - V1.Type.IntroOuter a -> V2.Type.IntroOuter (goType a) - buildTerm2H :: (V1 Hash -> V2 Hash) -> V1 Hash -> V1Term -> V2HashTerm - buildTerm2H lookup self = goTerm where - goTerm = convertABT goABT convertSymbol (const ()) - goABT :: V1.Term.F V1.Symbol.Symbol () V1Term -> V2.Term.F V2.Symbol.Symbol V2HashTerm - lookupTermLink = \case - V1.Referent.Ref r -> V2.Referent.Ref (lookupTerm r) - V1.Referent.Con r i _ct -> V2.Referent.Con (lookupType r) (fromIntegral i) - lookupTerm = \case - V1.Reference.Builtin t -> V2.Reference.ReferenceBuiltin t - V1.Reference.Derived h i _n -> - V2.Reference.ReferenceDerived - (V2.Reference.Id - (if V1 h == self then Nothing - else (Just . runV2.lookup $ V1 h)) i) - lookupType = \case - V1.Reference.Builtin t -> V2.Reference.ReferenceBuiltin t - V1.Reference.Derived h i _n -> - V2.Reference.ReferenceDerived - (V2.Reference.Id (runV2 . lookup $ V1 h) i) - goABT = \case - V1.Term.Int i -> V2.Term.Int i - V1.Term.Nat n -> V2.Term.Nat n - V1.Term.Float f -> V2.Term.Float f - V1.Term.Boolean b -> V2.Term.Boolean b - V1.Term.Text t -> V2.Term.Text t - V1.Term.Char c -> V2.Term.Char c - V1.Term.Ref r -> V2.Term.Ref (lookupTerm r) - V1.Term.Constructor r i -> - V2.Term.Constructor (lookupType r) (fromIntegral i) - V1.Term.Request r i -> - V2.Term.Constructor (lookupType r) (fromIntegral i) - V1.Term.Handle b h -> V2.Term.Handle (goTerm b) (goTerm h) - V1.Term.App f a -> V2.Term.App (goTerm f) (goTerm a) - V1.Term.Ann e t -> V2.Term.Ann (goTerm e) (buildTermType2H lookup t) - V1.Term.Sequence as -> V2.Term.Sequence (goTerm <$> as) - V1.Term.If c t f -> V2.Term.If (goTerm c) (goTerm t) (goTerm f) - V1.Term.And a b -> V2.Term.And (goTerm a) (goTerm b) - V1.Term.Or a b -> V2.Term.Or (goTerm a) (goTerm b) - V1.Term.Lam a -> V2.Term.Lam (goTerm a) - V1.Term.LetRec _ bs body -> V2.Term.LetRec (goTerm <$> bs) (goTerm body) - V1.Term.Let _ b body -> V2.Term.Let (goTerm b) (goTerm body) - V1.Term.Match e cases -> V2.Term.Match (goTerm e) (goCase <$> cases) - V1.Term.TermLink r -> V2.Term.TermLink (lookupTermLink r) - V1.Term.TypeLink r -> V2.Term.TypeLink (lookupType r) - goCase (V1.Term.MatchCase p g b) = - V2.Term.MatchCase (goPat p) (goTerm <$> g) (goTerm b) - goPat = \case - V1.Pattern.Unbound -> V2.Term.PUnbound - V1.Pattern.Var -> V2.Term.PVar - V1.Pattern.Boolean b -> V2.Term.PBoolean b - V1.Pattern.Int i -> V2.Term.PInt i - V1.Pattern.Nat n -> V2.Term.PNat n - V1.Pattern.Float d -> V2.Term.PFloat d - V1.Pattern.Text t -> V2.Term.PText t - V1.Pattern.Char c -> V2.Term.PChar c - V1.Pattern.Constructor r i ps -> - V2.Term.PConstructor (lookupType r) i (goPat <$> ps) - V1.Pattern.As p -> V2.Term.PAs (goPat p) - V1.Pattern.EffectPure p -> V2.Term.PEffectPure (goPat p) - V1.Pattern.EffectBind r i ps k -> - V2.Term.PEffectBind (lookupType r) i (goPat <$> ps) (goPat k) - V1.Pattern.SequenceLiteral ps -> V2.Term.PSequenceLiteral (goPat <$> ps) - V1.Pattern.SequenceOp p op p2 -> - V2.Term.PSequenceOp (goPat p) (goSeqOp op) (goPat p2) - goSeqOp = \case - V1.Pattern.Cons -> V2.Term.PCons - V1.Pattern.Snoc -> V2.Term.PSnoc - V1.Pattern.Concat -> V2.Term.PConcat - buildTermComponent2S :: - (V2 Hash -> Db.ObjectId) -> V2 Hash -> V2HashTermComponent -> V2DiskTermComponent - buildTermComponent2S getId h0 terms = let - rewrittenTerms :: [(V2.TermFormat.Term, LocalIdState)] = - map (flip State.runState mempty . rewriteTerm) terms - rewriteTerm :: V2HashTerm -> State.State LocalIdState V2.TermFormat.Term - rewriteTerm = V2.ABT.transformM go where - doText :: Text -> State.State LocalIdState V2.TermFormat.LocalTextId - doText t = do - (textMap, objectMap) <- State.get - case Map.lookup t textMap of - Nothing -> do - let id = V2.TermFormat.LocalTextId - . fromIntegral - $ Map.size textMap - State.put (Map.insert t id textMap, objectMap) - pure id - Just id -> pure id - doHash :: Hash -> State.State LocalIdState V2.TermFormat.LocalDefnId - doHash (V2 -> h) = do - (textMap, objectMap) <- State.get - case Map.lookup h objectMap of - Nothing -> do - let id = V2.TermFormat.LocalDefnId - . fromIntegral - $ Map.size objectMap - State.put (textMap, Map.insert h id objectMap) - pure id - Just id -> pure id - doRecRef :: V2.Reference.Reference' Text (Maybe Hash) -> State.State LocalIdState V2.TermFormat.TermRef - doRecRef = \case - V2.Reference.ReferenceBuiltin t -> - V2.Reference.ReferenceBuiltin <$> doText t - V2.Reference.ReferenceDerived r -> - V2.Reference.ReferenceDerived <$> case r of - V2.Reference.Id h i -> V2.Reference.Id <$> traverse doHash h <*> pure i - doRef :: V2.Reference.Reference -> State.State LocalIdState V2.TermFormat.TypeRef - doRef = \case - V2.Reference.ReferenceBuiltin t -> - V2.Reference.ReferenceBuiltin <$> doText t - V2.Reference.ReferenceDerived (V2.Reference.Id h i) -> - V2.Reference.ReferenceDerived <$> - (V2.Reference.Id <$> doHash h <*> pure i) - go :: V2.Term.F V2.Symbol.Symbol k -> State LocalIdState (V2.TermFormat.F k) - go = \case - V2.Term.Int i -> pure $ V2.Term.Int i - V2.Term.Nat n -> pure $ V2.Term.Nat n - V2.Term.Float d -> pure $ V2.Term.Float d - V2.Term.Boolean b -> pure $ V2.Term.Boolean b - V2.Term.Text t -> V2.Term.Text <$> doText t - V2.Term.Char c -> pure $ V2.Term.Char c - V2.Term.Ref r -> V2.Term.Ref <$> doRecRef r - V2.Term.Constructor r cid -> - V2.Term.Constructor <$> doRef r <*> pure cid - V2.Term.Request r cid -> V2.Term.Request <$> doRef r <*> pure cid - V2.Term.Handle e h -> pure $ V2.Term.Handle e h - V2.Term.App f a -> pure $ V2.Term.App f a - V2.Term.Ann e typ -> V2.Term.Ann e <$> rewriteType doRef typ - mapToVec :: Ord i => (a -> b) -> Map a i -> Vector b - mapToVec f = Vector.fromList . map (f . fst) . List.sortOn snd . Map.toList - stateToIds :: LocalIdState -> V2.LocalIds.LocalIds - stateToIds (t, o) = - V2.LocalIds.LocalIds (mapToVec lookupText t) (mapToVec lookup2 o) - -- state : (Map Text Int, Map Hash Int) - -- Term.app Nat.+ 7 #8sf73g - -- ["Nat.+"] [#8sf73g] - -- [lookupText "Nat.+"] [lookup #8sf73g] - -- Term.app (Builtin 0) 7 (Hash 0) - in V2.TermFormat.LocallyIndexedComponent - . Vector.fromList - . fmap swap - . fmap (second stateToIds) $ rewrittenTerms - -- | converts v to (Right v) and converts (Ref Nothing i) to (Left i) - refToVarTerm :: Ord v => - V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) v a -> - V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) (Either (V1 Int) v) a - refToVarTerm = mapTermToVar Right \a body -> case body of - V2.Term.Ref (V2.Reference.ReferenceDerived (V2.Reference.Id Nothing i)) -> - Just $ V2.ABT.var a (Left (V1 (fromIntegral i))) - _ -> Nothing - varToRefTerm :: (Show v, Ord v) => Map (V1 Int) (V2 Int) -> - V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) (Either (V1 Int) v) a -> - V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) v a - varToRefTerm lookup = mapVarToTerm fromLeft $ mapLeft \(V1 i) -> + let buildTermType2H :: (V1 Hash -> V2 Hash) -> V1Type -> V2HashTypeOfTerm + buildTermType2H lookup = goType + where + goType :: V1Type -> V2HashTypeOfTerm + goType = convertABT goABT convertSymbol (const ()) + goABT :: V1.Type.F V1Type -> V2.Type.FT V2HashTypeOfTerm + goABT = \case + V1.Type.Ref r -> V2.Type.Ref case r of + V1.Reference.Builtin t -> + V2.Reference.ReferenceBuiltin t + V1.Reference.Derived h i _n -> + V2.Reference.ReferenceDerived + (V2.Reference.Id (runV2 . lookup $ V1 h) i) + V1.Type.Arrow i o -> V2.Type.Arrow (goType i) (goType o) + V1.Type.Ann a k -> V2.Type.Ann (goType a) (convertKind k) + V1.Type.App f x -> V2.Type.App (goType f) (goType x) + V1.Type.Effect e b -> V2.Type.Effect (goType e) (goType b) + V1.Type.Effects as -> V2.Type.Effects (goType <$> as) + V1.Type.Forall a -> V2.Type.Forall (goType a) + V1.Type.IntroOuter a -> V2.Type.IntroOuter (goType a) + buildTerm2H :: (V1 Hash -> V2 Hash) -> V1 Hash -> V1Term -> V2HashTerm + buildTerm2H lookup self = goTerm + where + goTerm = convertABT goABT convertSymbol (const ()) + goABT :: V1.Term.F V1.Symbol.Symbol () V1Term -> V2.Term.F V2.Symbol.Symbol V2HashTerm + lookupTermLink = \case + V1.Referent.Ref r -> V2.Referent.Ref (lookupTerm r) + V1.Referent.Con r i _ct -> V2.Referent.Con (lookupType r) (fromIntegral i) + lookupTerm = \case + V1.Reference.Builtin t -> V2.Reference.ReferenceBuiltin t + V1.Reference.Derived h i _n -> + let h' = if V1 h == self then + Nothing + else Just . runV2 . lookup $ V1 h + in V2.Reference.ReferenceDerived (V2.Reference.Id h' i) + lookupType = \case + V1.Reference.Builtin t -> V2.Reference.ReferenceBuiltin t + V1.Reference.Derived h i _n -> + V2.Reference.ReferenceDerived + (V2.Reference.Id (runV2 . lookup $ V1 h) i) + goABT = \case + V1.Term.Int i -> V2.Term.Int i + V1.Term.Nat n -> V2.Term.Nat n + V1.Term.Float f -> V2.Term.Float f + V1.Term.Boolean b -> V2.Term.Boolean b + V1.Term.Text t -> V2.Term.Text t + V1.Term.Char c -> V2.Term.Char c + V1.Term.Ref r -> V2.Term.Ref (lookupTerm r) + V1.Term.Constructor r i -> + V2.Term.Constructor (lookupType r) (fromIntegral i) + V1.Term.Request r i -> + V2.Term.Constructor (lookupType r) (fromIntegral i) + V1.Term.Handle b h -> V2.Term.Handle (goTerm b) (goTerm h) + V1.Term.App f a -> V2.Term.App (goTerm f) (goTerm a) + V1.Term.Ann e t -> V2.Term.Ann (goTerm e) (buildTermType2H lookup t) + V1.Term.Sequence as -> V2.Term.Sequence (goTerm <$> as) + V1.Term.If c t f -> V2.Term.If (goTerm c) (goTerm t) (goTerm f) + V1.Term.And a b -> V2.Term.And (goTerm a) (goTerm b) + V1.Term.Or a b -> V2.Term.Or (goTerm a) (goTerm b) + V1.Term.Lam a -> V2.Term.Lam (goTerm a) + V1.Term.LetRec _ bs body -> V2.Term.LetRec (goTerm <$> bs) (goTerm body) + V1.Term.Let _ b body -> V2.Term.Let (goTerm b) (goTerm body) + V1.Term.Match e cases -> V2.Term.Match (goTerm e) (goCase <$> cases) + V1.Term.TermLink r -> V2.Term.TermLink (lookupTermLink r) + V1.Term.TypeLink r -> V2.Term.TypeLink (lookupType r) + goCase (V1.Term.MatchCase p g b) = + V2.Term.MatchCase (goPat p) (goTerm <$> g) (goTerm b) + goPat = \case + V1.Pattern.Unbound -> V2.Term.PUnbound + V1.Pattern.Var -> V2.Term.PVar + V1.Pattern.Boolean b -> V2.Term.PBoolean b + V1.Pattern.Int i -> V2.Term.PInt i + V1.Pattern.Nat n -> V2.Term.PNat n + V1.Pattern.Float d -> V2.Term.PFloat d + V1.Pattern.Text t -> V2.Term.PText t + V1.Pattern.Char c -> V2.Term.PChar c + V1.Pattern.Constructor r i ps -> + V2.Term.PConstructor (lookupType r) i (goPat <$> ps) + V1.Pattern.As p -> V2.Term.PAs (goPat p) + V1.Pattern.EffectPure p -> V2.Term.PEffectPure (goPat p) + V1.Pattern.EffectBind r i ps k -> + V2.Term.PEffectBind (lookupType r) i (goPat <$> ps) (goPat k) + V1.Pattern.SequenceLiteral ps -> V2.Term.PSequenceLiteral (goPat <$> ps) + V1.Pattern.SequenceOp p op p2 -> + V2.Term.PSequenceOp (goPat p) (goSeqOp op) (goPat p2) + goSeqOp = \case + V1.Pattern.Cons -> V2.Term.PCons + V1.Pattern.Snoc -> V2.Term.PSnoc + V1.Pattern.Concat -> V2.Term.PConcat + buildTermComponent2S :: + (V2 Hash -> Db.ObjectId) -> V2 Hash -> V2HashTermComponent -> V2DiskTermComponent + buildTermComponent2S getId h0 terms = + let rewrittenTerms :: [(V2.TermFormat.Term, LocalIdState)] = + map (flip State.runState mempty . rewriteTerm) terms + rewriteTerm :: V2HashTerm -> State.State LocalIdState V2.TermFormat.Term + rewriteTerm = V2.ABT.transformM go + where + doText :: Text -> State.State LocalIdState V2.TermFormat.LocalTextId + doText t = do + (textMap, objectMap) <- State.get + case Map.lookup t textMap of + Nothing -> do + let id = + V2.TermFormat.LocalTextId + . fromIntegral + $ Map.size textMap + State.put (Map.insert t id textMap, objectMap) + pure id + Just id -> pure id + doHash :: Hash -> State.State LocalIdState V2.TermFormat.LocalDefnId + doHash (V2 -> h) = do + (textMap, objectMap) <- State.get + case Map.lookup h objectMap of + Nothing -> do + let id = + V2.TermFormat.LocalDefnId + . fromIntegral + $ Map.size objectMap + State.put (textMap, Map.insert h id objectMap) + pure id + Just id -> pure id + doRecRef :: V2.Reference.Reference' Text (Maybe Hash) -> State.State LocalIdState V2.TermFormat.TermRef + doRecRef = \case + V2.Reference.ReferenceBuiltin t -> + V2.Reference.ReferenceBuiltin <$> doText t + V2.Reference.ReferenceDerived r -> + V2.Reference.ReferenceDerived <$> case r of + V2.Reference.Id h i -> V2.Reference.Id <$> traverse doHash h <*> pure i + doRef :: V2.Reference.Reference -> State.State LocalIdState V2.TermFormat.TypeRef + doRef = \case + V2.Reference.ReferenceBuiltin t -> + V2.Reference.ReferenceBuiltin <$> doText t + V2.Reference.ReferenceDerived (V2.Reference.Id h i) -> + V2.Reference.ReferenceDerived + <$> (V2.Reference.Id <$> doHash h <*> pure i) + go :: V2.Term.F V2.Symbol.Symbol k -> State LocalIdState (V2.TermFormat.F k) + go = \case + V2.Term.Int i -> pure $ V2.Term.Int i + V2.Term.Nat n -> pure $ V2.Term.Nat n + V2.Term.Float d -> pure $ V2.Term.Float d + V2.Term.Boolean b -> pure $ V2.Term.Boolean b + V2.Term.Text t -> V2.Term.Text <$> doText t + V2.Term.Char c -> pure $ V2.Term.Char c + V2.Term.Ref r -> V2.Term.Ref <$> doRecRef r + V2.Term.Constructor r cid -> + V2.Term.Constructor <$> doRef r <*> pure cid + V2.Term.Request r cid -> V2.Term.Request <$> doRef r <*> pure cid + V2.Term.Handle e h -> pure $ V2.Term.Handle e h + V2.Term.App f a -> pure $ V2.Term.App f a + V2.Term.Ann e typ -> V2.Term.Ann e <$> rewriteType doRef typ + mapToVec :: Ord i => (a -> b) -> Map a i -> Vector b + mapToVec f = Vector.fromList . map (f . fst) . List.sortOn snd . Map.toList + stateToIds :: LocalIdState -> V2.LocalIds.LocalIds + stateToIds (t, o) = + V2.LocalIds.LocalIds (mapToVec lookupText t) (mapToVec lookup2 o) + -- state : (Map Text Int, Map Hash Int) + -- Term.app Nat.+ 7 #8sf73g + -- ["Nat.+"] [#8sf73g] + -- [lookupText "Nat.+"] [lookup #8sf73g] + -- Term.app (Builtin 0) 7 (Hash 0) + in V2.TermFormat.LocallyIndexedComponent + . Vector.fromList + . fmap swap + . fmap (second stateToIds) + $ rewrittenTerms + refToVarTerm :: + Ord v => + V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) v a -> + V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) (Either (V1 Int) v) a + refToVarTerm = mapTermToVar Right \a body -> case body of + V2.Term.Ref (V2.Reference.ReferenceDerived (V2.Reference.Id Nothing i)) -> + Just $ V2.ABT.var a (Left (V1 (fromIntegral i))) + _ -> Nothing + varToRefTerm :: + (Show v, Ord v) => + Map (V1 Int) (V2 Int) -> + V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) (Either (V1 Int) v) a -> + V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) v a + varToRefTerm lookup = mapVarToTerm fromLeft $ mapLeft \(V1 i) -> V2.Term.Ref (V2.Reference.Derived Nothing (fromIntegral i)) - where - fromLeft :: Show a => Either a b -> b - fromLeft = flip either id \r -> - error ("encountered a reference pseudovar " ++ show r ++ " in ABT.Abs") - - rehashComponent :: (V1 Hash -> V2 Hash) -> V1 Hash -> [V1Term] -> (V2 Hash, V2HashTermComponent) - rehashComponent lookup1 hash1 v1terms = - let fromLeft = either id (\x -> error $ "impossibly " ++ show x) - in let - indexVars = Left . V1 <$> [0..] - namedTerms1 :: [(Either (V1 Int) V2.Symbol.Symbol, V1Term)] - namedTerms1 = zip indexVars v1terms - namedTerms2 :: [(Either (V1 Int) V2.Symbol.Symbol, V2HashTerm)] - namedTerms2 = fmap (second (buildTerm2H lookup1 hash1)) namedTerms1 - namedTermMap :: Map (Either (V1 Int) V2.Symbol.Symbol) V2HashTerm - namedTermMap = Map.fromList namedTerms2 + where + fromLeft :: Show a => Either a b -> b + fromLeft = flip either id \r -> + error ("encountered a reference pseudovar " ++ show r ++ " in ABT.Abs") + rehashComponent :: (V1 Hash -> V2 Hash) -> V1 Hash -> [(V1Term, V1Type)] -> (V2 Hash, [V2HashTypeOfTerm], V2HashTermComponent) + rehashComponent lookup1 hash1 (unzip -> (v1terms, v1types)) = + let fromLeft = either id (\x -> error $ "impossibly " ++ show x) + in let indexVars = Left . V1 <$> [0 ..] + namedTerms1 :: [(Either (V1 Int) V2.Symbol.Symbol, V1Term)] + namedTerms1 = zip indexVars v1terms + namedTerms2 :: [(Either (V1 Int) V2.Symbol.Symbol, V2HashTerm)] + namedTerms2 = fmap (second (buildTerm2H lookup1 hash1)) namedTerms1 + namedTermMap :: Map (Either (V1 Int) V2.Symbol.Symbol) V2HashTerm + namedTermMap = Map.fromList namedTerms2 + hash2 :: V2 Hash + v1Index :: [V1 Int] + -- (h, ([2, 0, 1], [t2, t0, t1]) + (hash2, unzip -> (fmap fromLeft -> v1Index, v2Terms)) = + V2.ABT.hashComponent (refToVarTerm <$> namedTermMap) + indexMap :: Map (V1 Int) (V2 Int) + indexMap = Map.fromList (zip v1Index (V2 <$> [0 :: Int ..])) + convertedTypes, permutedTypes :: [V2HashTypeOfTerm] + convertedTypes = map (buildTermType2H lookup1) v1types + -- the first element of v1Index is the V1 index of the first V2 element + permutedTypes = map (((!!) convertedTypes) . runV1) v1Index + in (hash2, permutedTypes, varToRefTerm indexMap <$> v2Terms) hash2 :: V2 Hash - v1Index :: [V1 Int] - -- (h, ([2, 0, 1], [t2, t0, t1]) - (hash2, unzip -> (fmap fromLeft -> v1Index, v2Terms)) = - V2.ABT.hashComponent (refToVarTerm <$> namedTermMap) - indexMap :: Map (V1 Int) (V2 Int) - indexMap = Map.fromList (zip v1Index (V2 <$> [0 :: Int ..])) - in (hash2, varToRefTerm indexMap <$> v2Terms) - - v2types :: [V2TypeOfTerm] = - map (buildTermType2H lookup1 . snd) v1component - - -- |rehash and reorder component - hash2 :: V2 Hash - v2hashComponent :: V2HashTermComponent - (hash2, v2hashComponent) = rehashComponent lookup1 hash1 (map fst v1component) - - -- construct v2 term component for serializing - v2diskComponent :: V2DiskTermComponent = - buildTermComponent2S lookup2 hash2 v2hashComponent + v2types :: [V2HashTypeOfTerm] + v2hashComponent :: V2HashTermComponent + (hash2, v2types, v2hashComponent) = rehashComponent lookup1 hash1 v1component + -- construct v2 term component for serializing + v2diskComponent :: V2DiskTermComponent = + buildTermComponent2S lookup2 hash2 v2hashComponent -- serialize the v2 term component componentObjectId :: Db.ObjectId <- saveTermComponent hash1 hash2 v2diskComponent - -- -- construct v2 types for each component element, and save the types to the - -- -- to the indices - -- for_ (zip [0 ..] v2types) $ \(i, type2) -> do - -- let r = V2.Reference.Id componentObjectId i - -- let rt = V2.Referent.RefId r + -- construct v2 types for each component element, and save the types to the + -- to the indices + for_ (zip [0 ..] v2types) $ \(i, type2) -> do + let r = V2.Reference.Id componentObjectId i + let rt = V2.Referent.RefId r - -- saveTypeBlobForReferent rt (buildTermType2S (snd . lookup) typ1) - -- createTypeSearchIndicesForReferent rt typ1 - -- createDependencyIndexForTerm r term2 + saveTypeBlobForTerm r (buildTermType2S lookupText lookup2 type2) + -- createTypeSearchIndicesForReferent rt type2 + -- createDependencyIndexForTerm r term2 error "todo: save types and create type indices for component" convertDecl1 :: DB m => (V1 Hash -> V2 Hash) -> (V2 Hash -> Db.ObjectId) -> V1 Hash -> [V1Decl] -> m () convertDecl1 lookup1 lookup2 hash1 v1component = do - let - -- convert constructor type (similar to buildTermType2H) - v2ctorTypes :: [V2TypeOfConstructor] = error "todo" - -- rehash and reorder component - hash2 :: V2 Hash - v2hashComponent :: V2HashDeclComponent - (hash2, v2hashComponent) = error "todo: rehashComponent lookup1 hash1 v1component" - -- convert decl component - v2diskComponent :: V2DiskDeclComponent = error "todo" + let -- convert constructor type (similar to buildTermType2H) + v2ctorTypes :: [V2TypeOfConstructor] = error "todo" + -- rehash and reorder component + hash2 :: V2 Hash + v2hashComponent :: V2HashDeclComponent + (hash2, v2hashComponent) = error "todo: rehashComponent lookup1 hash1 v1component" + -- convert decl component + v2diskComponent :: V2DiskDeclComponent = error "todo" componentObjectId :: Db.ObjectId <- saveDeclComponent hash1 hash2 v2diskComponent error "todo: create type indices for each decl in the component" @@ -1253,9 +1281,9 @@ convertDecl1 lookup1 lookup2 hash1 v1component = do -- lookupType :: V1 Hash -> Db.ObjectId -- lookupType = lookup --- buildTermType2S :: (V1 Hash -> Db.ObjectId) -> Type -> Type2S --- buildTermType2S lookup = --- void . Type.rmap (over Db.referenceTraversal (lookup . V1)) +buildTermType2S :: (Text -> Db.TextId) -> (V2 Hash -> Db.ObjectId) -> V2HashTypeOfTerm -> V2DiskTypeOfTerm +buildTermType2S lookupText lookup2 = V2.Type.rmap + (over V2.Reference.t lookupText . over V2.Reference.h (lookup2 . V2)) -- buildDecl2H :: (V1 Hash -> V2 Hash) -> V1 Hash -> Decl -> Decl2 Hash -- buildDecl2H lookup = diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/TypeUtil.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/TypeUtil.hs new file mode 100644 index 0000000000..90efafe24e --- /dev/null +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/TypeUtil.hs @@ -0,0 +1,103 @@ +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE TupleSections #-} +{-# LANGUAGE PatternSynonyms #-} + +module U.Codebase.Convert.TypeUtil where + +import U.Codebase.Type (TypeT, F'(..), TypeR) +import qualified U.Core.ABT.Var as ABT +import qualified U.Core.ABT as ABT +import qualified Data.Set as Set +import U.Core.ABT (pattern Var') +import Data.Set (Set) +import U.Codebase.Reference (Reference) +import qualified U.Codebase.Reference as Reference + +-- * Constructors +effect :: Ord v => [TypeR r v] -> TypeR r v -> TypeR r v +effect es (Effect1' fs t) = + let es' = (es >>= flattenEffects) ++ flattenEffects fs + in ABT.tm () (Effect (ABT.tm () (Effects es')) t) +effect es t = ABT.tm () (Effect (ABT.tm () (Effects es)) t) + +effects :: Ord v => [TypeR r v] -> TypeR r v +effects es = ABT.tm () (Effects $ es >>= flattenEffects) + +-- * Modification + +-- Remove all effect variables from the type. +-- Used for type-based search, we apply this transformation to both the +-- indexed type and the query type, so the user can supply `a -> b` that will +-- match `a ->{e} b` (but not `a ->{IO} b`). +removeAllEffectVars :: ABT.Var v => TypeR r v -> TypeR r v +removeAllEffectVars t = let + allEffectVars = foldMap go (ABT.subterms t) + go (Effects' vs) = Set.fromList [ v | Var' v <- vs] + go (Effect1' (Var' v) _) = Set.singleton v + go _ = mempty + (vs, tu) = unforall' t + in generalize vs (removeEffectVars allEffectVars tu) + +-- Remove free effect variables from the type that are in the set +removeEffectVars :: ABT.Var v => Set v -> TypeR r v -> TypeR r v +removeEffectVars removals t = + let z = effects [] + t' = ABT.substsInheritAnnotation ((,z) <$> Set.toList removals) t + -- leave explicitly empty `{}` alone + removeEmpty (Effect1' (Effects' []) v) = Just (ABT.visitPure removeEmpty v) + removeEmpty (Effect1' e v) = + case flattenEffects e of + [] -> Just (ABT.visitPure removeEmpty v) + es -> Just (effect es $ ABT.visitPure removeEmpty v) + removeEmpty (Effects' es) = + Just $ effects (es >>= flattenEffects) + removeEmpty _ = Nothing + in ABT.visitPure removeEmpty t' + +flattenEffects :: TypeR r v -> [TypeR r v] +flattenEffects (Effects' es) = es >>= flattenEffects +flattenEffects es = [es] + +-- | Bind the given variables with an outer `forall`, if they are used in `t`. +generalize :: Ord v => [v] -> TypeR r v -> TypeR r v +generalize vs t = foldr f t vs where + f v t = if Set.member v (ABT.freeVars t) then forall v t else t + + +-- * Utility +toReference :: (Ord v, Show v) => TypeT v -> Reference +toReference (Ref' r) = r +-- a bit of normalization - any unused type parameters aren't part of the hash +toReference (ForallNamed' v body) | not (Set.member v (ABT.freeVars body)) = toReference body +toReference t = Reference.Derived (ABT.hash t) 0 + +toReferenceMentions :: (ABT.Var v, Show v) => TypeT v -> Set Reference +toReferenceMentions ty = + let (vs, _) = unforall' ty + gen ty = generalize (Set.toList (ABT.freeVars ty)) $ generalize vs ty + in Set.fromList $ toReference . gen <$> ABT.subterms ty + +-- * Patterns +pattern ForallsNamed' :: [v] -> TypeR r v -> TypeR r v +pattern ForallsNamed' vs body <- (unForalls -> Just (vs, body)) +pattern ForallNamed' :: v -> TypeR r v -> TypeR r v +pattern ForallNamed' v body <- ABT.Tm' (Forall (ABT.out -> ABT.Abs v body)) +pattern Effects' :: [TypeR r v] -> TypeR r v +pattern Effects' es <- ABT.Tm' (Effects es) +pattern Effect1' :: TypeR r v -> TypeR r v -> TypeR r v +pattern Effect1' e t <- ABT.Tm' (Effect e t) +pattern Ref' :: r -> TypeR r v +pattern Ref' r <- ABT.Tm' (Ref r) + +forall :: Ord v => v -> TypeR r v -> TypeR r v +forall v body = ABT.tm () (Forall (ABT.abs () v body)) + +unforall' :: TypeR r v -> ([v], TypeR r v) +unforall' (ForallsNamed' vs t) = (vs, t) +unforall' t = ([], t) + +unForalls :: TypeR r v -> Maybe ([v], TypeR r v) +unForalls t = go t [] + where go (ForallNamed' v body) vs = go body (v:vs) + go _body [] = Nothing + go body vs = Just(reverse vs, body) diff --git a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal index c2d5a2050d..b66e65820e 100644 --- a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal +++ b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal @@ -26,6 +26,7 @@ executable uconvert12 -- directory, -- errors, -- filepath, + lens, -- megaparsec, -- safe, -- shellmet, @@ -38,6 +39,7 @@ library hs-source-dirs: lib exposed-modules: U.Codebase.Convert.SyncV1V2 + U.Codebase.Convert.TypeUtil -- other-modules: -- other-extensions: build-depends: diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index 4bcedabdb0..cc13d8f148 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -15,6 +15,7 @@ import Data.Bits (Bits) newtype ObjectId = ObjectId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 newtype TextId = TextId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 +newtype HashId = HashId Word64 deriving (Eq, Ord) deriving (Hashable, FromField, ToField) via Word64 newtype TermId = TermId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via ObjectId newtype DeclId = DeclId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via ObjectId diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs index ba8f730a4c..bd050c495e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs @@ -7,15 +7,13 @@ import Database.SQLite.Simple (SQLData(SQLInteger)) -- |Don't reorder these, they are part of the database data ObjectType = TermComponent -- 0 - | TermComponentTypes -- 1 - | DeclComponent -- 2 - | Namespace -- 3 - | Patch -- 4 - -- -- | LocalIds -- 5 -- future? + | DeclComponent -- 1 + | Namespace -- 2 + | Patch -- 3 deriving (Eq, Ord, Show, Enum) instance ToField ObjectType where toField = SQLInteger . fromIntegral . fromEnum instance FromField ObjectType where - fromField = fmap toEnum . fromField \ No newline at end of file + fromField = fmap toEnum . fromField diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index f532657c67..3277b4c67b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -20,24 +20,22 @@ import Data.Maybe (fromJust) import Data.String.Here.Uninterpolated (here) import Data.Text (Text) import qualified Database.SQLite.Simple as SQLite -import Database.SQLite.Simple ((:.) (..), Connection, FromRow, Only (..), SQLData (SQLNull), ToRow (..)) -import Data.Word (Word64) +import Database.SQLite.Simple (SQLData, (:.) (..), Connection, FromRow, Only (..), ToRow (..)) import Database.SQLite.Simple.FromField import Database.SQLite.Simple.ToField -import U.Codebase.Reference (Reference' (ReferenceBuiltin, ReferenceDerived)) -import qualified U.Codebase.Reference as Reference -import qualified U.Codebase.Referent as Referent +import qualified U.Codebase.Sqlite.Reference as Reference +import qualified U.Codebase.Sqlite.Referent as Referent import U.Codebase.Sqlite.ObjectType import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hashable (Hashable) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash import U.Codebase.Sqlite.DbId +import U.Codebase.Reference (Reference') -- * types type DB m = (MonadIO m, MonadReader Connection m) -newtype HashId = HashId Word64 deriving (Eq, Ord) deriving (Hashable, FromField, ToField) via Word64 newtype TypeId = TypeId ObjectId deriving (FromField, ToField) via ObjectId newtype TermId = TermCycleId ObjectId deriving (FromField, ToField) via ObjectId @@ -46,8 +44,9 @@ newtype CausalHashId = CausalHashId HashId deriving (Hashable, FromField, ToFiel newtype CausalOldHashId = CausalOldHashId HashId deriving (Hashable, FromField, ToField) via HashId newtype NamespaceHashId = NamespaceHashId ObjectId deriving (Hashable, FromField, ToField) via ObjectId -type DerivedReferent = Referent.Id' ObjectId ObjectId -type DerivedReference = Reference.Id' ObjectId +-- type DerivedReferent = Referent.Id ObjectId ObjectId +-- type DerivedReference = Reference.Id ObjectId +type TypeHashReference = Reference' TextId HashId -- * main squeeze saveHash :: DB m => Base32Hex -> m HashId @@ -165,8 +164,20 @@ loadCausalParents h = queryList sql (Only h) where sql = [here| SELECT parent_id FROM causal_parent WHERE causal_id = ? |] +saveTypeOfTerm :: DB m => Reference.Id -> ByteString -> m () +saveTypeOfTerm r blob = execute sql (r :. Only blob) where sql = [here| + INSERT OR IGNORE INTO type_of_term + VALUES (?, ?, ?) + |] + +loadTypeOfTerm :: DB m => Reference.Id -> m (Maybe ByteString) +loadTypeOfTerm r = queryOnly sql r where sql = [here| + SELECT bytes FROM type_of_term + WHERE object_id = ? AND component_index = ? +|] + -- * Index-building -addToTypeIndex :: DB m => Reference' TextId HashId -> DerivedReferent -> m () +addToTypeIndex :: DB m => Reference' TextId HashId -> Referent.Id -> m () addToTypeIndex tp tm = execute sql (tp :. tm) where sql = [here| INSERT OR IGNORE INTO find_type_index ( type_reference_builtin, @@ -178,7 +189,7 @@ addToTypeIndex tp tm = execute sql (tp :. tm) where sql = [here| ) VALUES (?, ?, ?, ?, ?, ?) |] -addToTypeMentionsIndex :: DB m => Reference' TextId HashId -> DerivedReferent -> m () +addToTypeMentionsIndex :: DB m => Reference' TextId HashId -> Referent.Id -> m () addToTypeMentionsIndex tp tm = execute sql (tp :. tm) where sql = [here| INSERT OR IGNORE INTO find_type_mentions_index ( type_reference_builtin, @@ -190,7 +201,7 @@ addToTypeMentionsIndex tp tm = execute sql (tp :. tm) where sql = [here| ) VALUES (?, ?, ?, ?, ?, ?) |] -addToDependentsIndex :: DB m => Reference' TextId ObjectId -> DerivedReference -> m () +addToDependentsIndex :: DB m => Reference' TextId ObjectId -> Reference.Id -> m () addToDependentsIndex dependency dependent = execute sql (dependency :. dependent) where sql = [here| INSERT OR IGNORE INTO dependents_index ( @@ -230,26 +241,3 @@ headMay (a:_) = Just a -- * orphan instances deriving via Text instance ToField Base32Hex deriving via Text instance FromField Base32Hex - -instance ToRow (Reference' TextId HashId) where - -- | builtinId, hashId, componentIndex - toRow = \case - ReferenceBuiltin t -> toRow (Only t) ++ [SQLNull, SQLNull] - ReferenceDerived (Reference.Id h i) -> SQLNull : toRow (Only h) ++ toRow (Only i) - -instance ToRow (Reference' TextId ObjectId) where - -- | builtinId, hashId, componentIndex - toRow = \case - ReferenceBuiltin t -> toRow (Only t) ++ [SQLNull, SQLNull] - ReferenceDerived (Reference.Id h i) -> SQLNull : toRow (Only h) ++ toRow (Only i) - -instance ToRow (Reference.Id' ObjectId) where - -- | builtinId, hashId, componentIndex - toRow = \case - Reference.Id h i -> toRow (Only h) ++ toRow (Only i) - -instance ToRow DerivedReferent where - -- | objectId, componentIndex, constructorIndex - toRow = \case - Referent.RefId (Reference.Id h i) -> toRow (Only h) ++ toRow (Only i) ++ [SQLNull] - Referent.ConId (Reference.Id h i) cid -> toRow (Only h) ++ toRow (Only i) ++ toRow (Only cid) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs index 882fa93b74..894794599b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs @@ -1,7 +1,32 @@ +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE LambdaCase #-} +{-# OPTIONS_GHC -Wno-orphans #-} + module U.Codebase.Sqlite.Reference where import U.Codebase.Sqlite.DbId -import U.Codebase.Reference (Reference', Id') +import U.Codebase.Reference (Reference'(ReferenceBuiltin, ReferenceDerived), Id'(Id)) +import Database.SQLite.Simple (SQLData(..), Only(..), ToRow(toRow)) type Reference = Reference' TextId ObjectId type Id = Id' ObjectId + +type ReferenceH = Reference' TextId HashId + +-- * Orphan instances +instance ToRow (Reference' TextId HashId) where + -- | builtinId, hashId, componentIndex + toRow = \case + ReferenceBuiltin t -> toRow (Only t) ++ [SQLNull, SQLNull] + ReferenceDerived (Id h i) -> SQLNull : toRow (Only h) ++ toRow (Only i) + +instance ToRow (Reference' TextId ObjectId) where + -- | builtinId, hashId, componentIndex + toRow = \case + ReferenceBuiltin t -> toRow (Only t) ++ [SQLNull, SQLNull] + ReferenceDerived (Id h i) -> SQLNull : toRow (Only h) ++ toRow (Only i) + +instance ToRow Id where + -- | builtinId, hashId, componentIndex + toRow = \case + Id h i -> toRow (Only h) ++ toRow (Only i) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs index 3abecc4aa2..32640600cf 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs @@ -1,6 +1,23 @@ +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE TypeSynonymInstances #-} +{-# LANGUAGE LambdaCase #-} +{-# OPTIONS_GHC -Wno-orphans #-} + module U.Codebase.Sqlite.Referent where +import Database.SQLite.Simple (SQLData(..), Only(..), ToRow(..)) + +import U.Codebase.Referent (Id', Referent') import U.Codebase.Sqlite.Reference (Reference) -import U.Codebase.Referent (Referent') +import U.Codebase.Sqlite.DbId (ObjectId) +import qualified U.Codebase.Referent as Referent +import qualified U.Codebase.Reference as Reference type Referent = Referent' Reference Reference +type Id = Id' ObjectId ObjectId + +instance ToRow Id where + -- | objectId, componentIndex, constructorIndex + toRow = \case + Referent.RefId (Reference.Id h i) -> toRow (Only h) ++ toRow (Only i) ++ [SQLNull] + Referent.ConId (Reference.Id h i) cid -> toRow (Only h) ++ toRow (Only i) ++ toRow (Only cid) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Symbol.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Symbol.hs index bc3ae38f80..80f9ec4415 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Symbol.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Symbol.hs @@ -2,4 +2,15 @@ module U.Codebase.Sqlite.Symbol where import Data.Word (Word64) import Data.Text (Text) -data Symbol = Symbol !Word64 !Text deriving (Eq, Ord, Show) \ No newline at end of file +import qualified U.Core.ABT.Var as ABT +import qualified Data.Set as Set + +data Symbol = Symbol !Word64 !Text deriving (Eq, Ord, Show) + +-- |This clever instance relies on Ord to synthesize a new id. +-- If i > i2, then s > vs; otherwise increment the max i2: +-- freshIn [(0,"foo"), (1,"bar")] (0,"cat") = (3, "cat") +instance ABT.Var Symbol where + freshIn vs s | Set.null vs || Set.notMember s vs = s -- already fresh! + freshIn vs s@(Symbol i n) = case Set.elemAt (Set.size vs - 1) vs of + Symbol i2 _ -> if i > i2 then s else Symbol (i2+1) n diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 5ba8e0004f..00810ee628 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -34,23 +34,11 @@ CREATE TABLE object_type_description ( ); INSERT INTO object_type_description (id, description) VALUES (0, "Term Component"), -- foo x = x + 1 - (1, "Types of Term Component"), -- [Nat -> Nat] - (2, "Decl Component"), -- unique type Animal = Cat | Dog | Mouse - (3, "Namespace"), -- a one-level slice - (4, "Patch") -- replace term #abc with term #def - -- (5, "Local Text/Object Lookup") -- future + (1, "Decl Component"), -- unique type Animal = Cat | Dog | Mouse + (2, "Namespace"), -- a one-level slice + (3, "Patch") -- replace term #abc with term #def ; --- How should objects be linked to hashes? (and old hashes) --- And which id should be linked into blobs? --- a) object.id -- no: I ran into an issue in serializing a type annotation --- within a term; there wasn't enough info here to properly --- ser/des a type annotation that includes a self-ref, and I --- couldn't convince myself that the situation wouldn't come up --- b) hash.id -- ~~no: multiple hashes may refer to one object~~ --- -- though, I guess that's true even when they are represented as --- -- inline bytestrings. so I'm going with this option. --- CREATE TABLE object ( id INTEGER PRIMARY KEY, primary_hash_id UNIQUE INTEGER NOT NULL REFERENCES hash(id), @@ -92,14 +80,12 @@ CREATE TABLE causal_old ( new_hash_id INTEGER NOT NULL REFERENCES hash(id) ); --- -- |Links a referent to its type's object --- CREATE TABLE type_of_referent ( --- object_id INTEGER NOT NULL REFERENCES object(id), --- component_index INTEGER NOT NULL, --- constructor_index INTEGER NULL, --- bytes BLOB NOT NULL, --- PRIMARY KEY (object_id, component_index, constructor_index) --- ); +CREATE TABLE type_of_term ( + object_id INTEGER NOT NULL REFERENCES object(id), + component_index INTEGER NOT NULL, + bytes BLOB NOT NULL, + PRIMARY KEY (object_id, component_index) +); -- --CREATE TABLE type_of_referent ( -- -- referent_derived_id INTEGER NOT NULL PRIMARY KEY REFERENCES referent_derived(id), diff --git a/codebase2/codebase/U/Codebase/Reference.hs b/codebase2/codebase/U/Codebase/Reference.hs index 80ca893db5..0782fc93da 100644 --- a/codebase2/codebase/U/Codebase/Reference.hs +++ b/codebase2/codebase/U/Codebase/Reference.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeSynonymInstances #-} @@ -11,6 +12,7 @@ import qualified U.Util.Hash as Hash import U.Util.Hash (Hash) import U.Util.Hashable (Hashable (..)) import qualified U.Util.Hashable as Hashable +import Control.Lens (Traversal) -- |This is the canonical representation of Reference type Reference = Reference' Text Hash @@ -30,6 +32,16 @@ type ComponentIndex = Word64 data Id' h = Id h ComponentIndex deriving (Eq, Ord, Show, Functor) +t :: Traversal (Reference' t h) (Reference' t' h) t t' +t f = \case + ReferenceBuiltin t -> ReferenceBuiltin <$> f t + ReferenceDerived id -> pure (ReferenceDerived id) + +h :: Traversal (Reference' t h) (Reference' t h') h h' +h f = \case + ReferenceBuiltin t -> pure (ReferenceBuiltin t) + Derived h i -> Derived <$> f h <*> pure i + instance Hashable Reference where tokens (ReferenceBuiltin txt) = [Hashable.Tag 0, Hashable.Text txt] diff --git a/codebase2/codebase/unison-codebase.cabal b/codebase2/codebase/unison-codebase.cabal index c13c3a09fe..672d9ee8e7 100644 --- a/codebase2/codebase/unison-codebase.cabal +++ b/codebase2/codebase/unison-codebase.cabal @@ -16,7 +16,7 @@ category: Development library exposed-modules: U.Codebase.Branch - U.Codebase.Causal + U.Codebase.Causal U.Codebase.Codebase U.Codebase.Decl U.Codebase.Kind @@ -34,6 +34,7 @@ library build-depends: base, containers, + lens, text, unison-core, unison-util diff --git a/codebase2/core/U/Core/ABT.hs b/codebase2/core/U/Core/ABT.hs index 35ca032ae2..7e0a88175d 100644 --- a/codebase2/core/U/Core/ABT.hs +++ b/codebase2/core/U/Core/ABT.hs @@ -22,6 +22,9 @@ import qualified U.Util.Hashable as Hashable import Data.Functor (void) import qualified Data.List as List import qualified Data.Vector as Vector +import Control.Monad (join) +import Data.Functor.Identity (Identity(runIdentity)) +import Data.Maybe (fromMaybe) data ABT f v r = Var v @@ -137,3 +140,69 @@ instance (Hashable1 f, Functor f) => Hashable1 (Component f) where toks = Hashable.Hashed <$> hs in Hashable.accumulate $ (Hashable.Tag 1 : toks) ++ [Hashable.Hashed (hash a)] Embed fa -> Hashable.hash1 hashCycle hash fa + +-- * Traversals +-- | `visit f t` applies an effectful function to each subtree of +-- `t` and sequences the results. When `f` returns `Nothing`, `visit` +-- descends into the children of the current subtree. When `f` returns +-- `Just t2`, `visit` replaces the current subtree with `t2`. Thus: +-- `visit (const Nothing) t == pure t` and +-- `visit (const (Just (pure t2))) t == pure t2` +visit + :: (Traversable f, Applicative g, Ord v) + => (Term f v a -> Maybe (g (Term f v a))) + -> Term f v a + -> g (Term f v a) +visit f t = flip fromMaybe (f t) $ case out t of + Var _ -> pure t + Cycle body -> cycle (annotation t) <$> visit f body + Abs x e -> abs (annotation t) x <$> visit f e + Tm body -> tm (annotation t) <$> traverse (visit f) body + +-- | Apply an effectful function to an ABT tree top down, sequencing the results. +visit' :: (Traversable f, Applicative g, Monad g, Ord v) + => (f (Term f v a) -> g (f (Term f v a))) + -> Term f v a + -> g (Term f v a) +visit' f t = case out t of + Var _ -> pure t + Cycle body -> cycle (annotation t) <$> visit' f body + Abs x e -> abs (annotation t) x <$> visit' f e + Tm body -> f body >>= (fmap (tm (annotation t)) . traverse (visit' f)) + +-- | `visit` specialized to the `Identity` effect. +visitPure :: (Traversable f, Ord v) + => (Term f v a -> Maybe (Term f v a)) -> Term f v a -> Term f v a +visitPure f = runIdentity . visit (fmap pure . f) + +foreachSubterm + :: (Traversable f, Applicative g, Ord v) + => (Term f v a -> g b) + -> Term f v a + -> g [b] +foreachSubterm f e = case out e of + Var _ -> pure <$> f e + Cycle body -> (:) <$> f e <*> foreachSubterm f body + Abs _ body -> (:) <$> f e <*> foreachSubterm f body + Tm body -> + (:) + <$> f e + <*> (join . Foldable.toList <$> traverse (foreachSubterm f) body) + +subterms :: (Ord v, Traversable f) => Term f v a -> [Term f v a] +subterms t = runIdentity $ foreachSubterm pure t + +-- * Patterns +pattern Var' :: v -> Term f v a +pattern Var' v <- Term _ _ (Var v) +pattern Cycle' :: [v] -> Term f v a -> Term f v a +pattern Cycle' vs t <- Term _ _ (Cycle (AbsN' vs t)) +pattern AbsN' :: [v] -> Term f v a -> Term f v a +pattern AbsN' vs body <- (unabs -> (vs, body)) +pattern Tm' :: f (Term f v a) -> Term f v a +pattern Tm' f <- Term _ _ (Tm f) + +unabs :: Term f v a -> ([v], Term f v a) +unabs (Term _ _ (Abs hd body)) = + let (tl, body') = unabs body in (hd : tl, body') +unabs t = ([], t) diff --git a/codebase2/core/U/Core/ABT/Var.hs b/codebase2/core/U/Core/ABT/Var.hs new file mode 100644 index 0000000000..6bb4b0fbe7 --- /dev/null +++ b/codebase2/core/U/Core/ABT/Var.hs @@ -0,0 +1,69 @@ +module U.Core.ABT.Var where + +import Data.Set (Set) +import Prelude hiding (abs, cycle) +import U.Core.ABT +import qualified Data.Set as Set + +-- | A class for avoiding accidental variable capture +-- +-- * `Set.notMember (freshIn vs v) vs`: +-- `freshIn` returns a variable not used in the `Set` +class Ord v => Var v where + freshIn :: Set v -> v -> v + +substsInheritAnnotation + :: (Foldable f, Functor f, Var v) + => [(v, Term f v b)] + -> Term f v a + -> Term f v a +substsInheritAnnotation replacements body = + foldr (uncurry substInheritAnnotation) body (reverse replacements) + +-- Like `subst`, but the annotation of the replacement is inherited from +-- the previous annotation at each replacement point. +substInheritAnnotation :: (Foldable f, Functor f, Var v) + => v -> Term f v b -> Term f v a -> Term f v a +substInheritAnnotation v r = + subst' (\ann -> const ann <$> r) v (freeVars r) + +-- Slightly generalized version of `subst`, the replacement action is handled +-- by the function `replace`, which is given the annotation `a` at the point +-- of replacement. `r` should be the set of free variables contained in the +-- term returned by `replace`. See `substInheritAnnotation` for an example usage. +subst' :: (Foldable f, Functor f, Var v) => (a -> Term f v a) -> v -> Set v -> Term f v a -> Term f v a +subst' replace v r t2@(Term fvs ann body) + | Set.notMember v fvs = t2 -- subtrees not containing the var can be skipped + | otherwise = case body of + Var v' | v == v' -> replace ann -- var match; perform replacement + | otherwise -> t2 -- var did not match one being substituted; ignore + Cycle body -> cycle ann (subst' replace v r body) + Abs x _ | x == v -> t2 -- x shadows v; ignore subtree + Abs x e -> abs ann x' e' + where x' = freshIn (fvs `Set.union` r) x + -- rename x to something that cannot be captured by `r` + e' = if x /= x' then subst' replace v r (rename x x' e) + else subst' replace v r e + Tm body -> tm ann (fmap (subst' replace v r) body) + +-- | renames `old` to `new` in the given term, ignoring subtrees that bind `old` +rename :: (Foldable f, Functor f, Var v) => v -> v -> Term f v a -> Term f v a +rename old new t0@(Term fvs ann t) = + if Set.notMember old fvs then t0 + else case t of + Var v -> if v == old then var ann new else t0 + Cycle body -> cycle ann (rename old new body) + Abs v body -> + -- v shadows old, so skip this subtree + if v == old then abs ann v body + + -- the rename would capture new, freshen this Abs + -- to make that no longer true, then proceed with + -- renaming `old` to `new` + else if v == new then + let v' = freshIn (Set.fromList [new,old] <> freeVars body) v + in abs ann v' (rename old new (rename v v' body)) + + -- nothing special, just rename inside body of Abs + else abs ann v (rename old new body) + Tm v -> tm ann (fmap (rename old new) v) diff --git a/codebase2/core/unison-core.cabal b/codebase2/core/unison-core.cabal index a3b818bd63..b15a478987 100644 --- a/codebase2/core/unison-core.cabal +++ b/codebase2/core/unison-core.cabal @@ -16,6 +16,7 @@ category: Development library exposed-modules: U.Core.ABT + U.Core.ABT.Var -- U.Core.Reference -- U.Core.Referent -- U.Core.Term diff --git a/parser-typechecker/LICENSE b/parser-typechecker/LICENSE new file mode 100644 index 0000000000..cca9c4376c --- /dev/null +++ b/parser-typechecker/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013, Paul Chiusano and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/parser-typechecker/benchmarks/runtime/Main.hs b/parser-typechecker/benchmarks/runtime/Main.hs new file mode 100644 index 0000000000..3970fa518d --- /dev/null +++ b/parser-typechecker/benchmarks/runtime/Main.hs @@ -0,0 +1,286 @@ +{-# language PatternSynonyms #-} + +module Main(main) where + +import Criterion.Main + +import Data.Word + +import Unison.Runtime.MCode +import Unison.Runtime.Machine + +import Unison.Util.EnumContainers + +infixr 0 $$ +($$) :: Instr -> Section -> Section +($$) = Ins + +loop :: Section +loop = Match 0 $ Test1 0 (Yield $ UArg1 1) rec + where + rec = Prim2 ADDI 0 1 + $$ Prim1 DECI 1 + $$ App False (Env 0) (UArg2 0 1) + +-- Boxed version of loop to see how fast we are without +-- worker/wrapper. +sloop :: Section +sloop = Unpack 1 $$ Unpack 0 $$ body + where + body = Match 1 $ Test1 + 0 (Pack 0 (UArg1 3) $$ Yield (BArg1 0)) + {-else-} rec + rec = Prim2 ADDI 1 3 + $$ Prim1 DECI 2 + $$ Pack 0 (UArg1 1) + $$ Pack 0 (UArg1 0) + $$ App False (Env 1) (BArg2 0 1) + +-- loop with fast path optimization +oloop :: Section +oloop = Match 0 $ Test1 0 (Yield $ UArg1 1) rec + where + rec = Prim2 ADDI 0 1 + $$ Prim1 DECI 1 + $$ Call False 7 (UArg2 0 1) + +-- sloop with fast path optimization +soloop :: Section +soloop = Unpack 1 $$ Unpack 0 $$ body + where + body = Match 1 $ Test1 + 0 (Pack 0 (UArg1 3) $$ Yield (BArg1 0)) + {-else-} rec + rec = Prim2 ADDI 1 3 + $$ Prim1 DECI 2 + $$ Pack 0 (UArg1 1) + $$ Pack 0 (UArg1 0) + $$ Call False 8 (BArg2 0 1) + +konst :: Section +konst = Yield (BArg1 0) + +add :: Section +add = Unpack 1 + $$ Unpack 0 + $$ Prim2 ADDI 1 3 + $$ Pack 0 (UArg1 0) + $$ Yield (BArg1 0) + +-- get = shift $ \k s -> k s s +-- put s = shift $ \k _ -> k () s +-- loop :: Int -> Int -> Int +-- loop n s0 = reset (body n) s0 +-- where +-- body m | m == 0 = x = get ; f _ = x ; f +-- | otherwise = x = get ; put (x+m) ; body (m-1) + +-- k s => (k s) s -- k continuation +diag :: Section +diag = Let (Reset (setSingleton 0) $$ Jump 0 (BArg1 1)) + $ App False (Stk 0) (BArg1 2) + +-- => shift k. diag k +get :: Section +get = Capture 0 + $$ App False (Env 12) (BArg1 0) + +-- k s _ => (k) s +kid :: Section +kid = Let (Reset (setSingleton 0) $$ Jump 0 ZArgs) + $ App False (Stk 0) (BArg1 2) + +-- s => shift k. kid k s +put :: Section +put = Capture 0 + $$ App False (Env 15) (BArg2 0 1) + +-- m => ... +kloopb :: Section +kloopb = + Match 0 $ Test1 + 0 (Let (App False (Env 13) ZArgs) $ App False (Env 10) (BArg1 0)) + {-else-} $ rec + where + rec = Let (App False (Env 13) ZArgs) -- get + $ Pack 0 (UArg1 0) + $$ Let (App False (Env 11) (BArg2 0 1)) -- add + $ Let (App False (Env 14) (BArg1 0)) -- put + $ Prim1 DECI 0 + $$ App False (Env 5) (UArg1 0) + +-- m a => f = reset (kloopb m) ; y = f (I# a) ; print y +kloop :: Section +kloop = Let (Reset (setSingleton 0) $$ App False (Env 5) (UArg1 0)) + $ Pack 0 (UArg1 1) + $$ App False (Stk 1) (BArg1 0) + +-- s0 0 => s0 +-- s0 1 s => tinst s setDyn 0 (teff s) +teff :: Section +teff + = Match 0 $ Test1 + 0 (Yield $ BArg1 0) + $ {-else-} Call True 21 ZArgs + +-- s => setDyn 0 (teff s) +tinst :: Section +tinst + = Name 20 (BArg1 0) + $$ SetDyn 0 0 + $$ Yield ZArgs + +-- m => ... +tloopb :: Section +tloopb = + Match 0 $ Test1 + 0 (Lit 0 $$ App True (Dyn 0) (UArg1 0)) -- get + {-else-} rec + where + rec = Let (Lit 0 $$ App False (Dyn 0) (UArg1 0)) -- get + $ Pack 0 (UArg1 0) -- I# m + $$ Let (App False (Env 11) (BArg2 0 1)) -- add + $ Let (Lit 1 $$ App False (Dyn 0) (UArg1 0)) -- put + $ Prim1 DECI 0 + $$ Call False 25 (UArg1 0) + +-- m s => reset (tinst (I# s) ; tloopb m) +tloop :: Section +tloop = Reset (setSingleton 0) + $$ Pack 0 (UArg1 1) + $$ Let (Call True 21 $ BArg1 0) + $ Call True 25 $ UArg1 0 + +fib :: Section +fib = Match 0 $ Test2 + 0 (Lit 0 $$ Yield $ UArg1 0) + 1 (Lit 1 $$ Yield $ UArg1 0) + {-else-} rec + where + rec = Prim1 DECI 0 + $$ Prim1 DECI 0 + $$ Let (App False (Env 2) (UArg1 1)) + $ Let (App False (Env 2) (UArg1 1)) + $ Prim2 ADDI 0 1 $$ Yield (UArg1 0) + +ofib :: Section +ofib = Match 0 $ Test2 + 0 (Lit 0 $$ Yield $ UArg1 0) + 1 (Lit 1 $$ Yield $ UArg1 0) + {-else-} rec + where + rec = Prim1 DECI 0 + $$ Prim1 DECI 0 + $$ Let (Call True 9 (UArg1 1)) + $ Let (Call True 9 (UArg1 1)) + $ Prim2 ADDI 0 1 $$ Yield (UArg1 0) + +stackEater :: Section +stackEater + = Match 0 $ Test1 + 0 (Yield ZArgs) + $ Prim1 DECI 0 + $$ Let (App False (Env 4) (UArg1 0)) + $ Yield ZArgs + +testEnv :: Word64 -> Comb +testEnv 0 = Lam 2 0 4 0 loop +testEnv 1 = Lam 0 2 6 4 sloop +testEnv 2 = Lam 1 0 6 0 fib +testEnv 4 = Lam 1 0 1 0 stackEater +testEnv 5 = Lam 1 0 2 3 kloopb +testEnv 6 = Lam 2 0 2 2 kloop +testEnv 7 = Lam 2 0 4 0 oloop +testEnv 8 = Lam 0 2 6 4 soloop +testEnv 9 = Lam 1 0 6 0 ofib +testEnv 10 = Lam 0 2 0 2 konst +testEnv 11 = Lam 0 2 5 3 add +testEnv 12 = Lam 0 2 0 2 diag +testEnv 13 = Lam 0 0 0 1 get +testEnv 14 = Lam 0 1 0 2 put +testEnv 15 = Lam 0 3 0 3 kid +testEnv 20 = Lam 1 1 1 2 teff +testEnv 21 = Lam 0 1 0 2 tinst +testEnv 25 = Lam 1 0 4 3 tloopb +testEnv 26 = Lam 1 0 4 3 tloop +testEnv _ = error "testEnv" + +setupu1 :: Word64 -> Int -> Section +setupu1 f n = Lit n $$ App False (Env f) (UArg1 0) + +setupu2 :: Word64 -> Int -> Int -> Section +setupu2 f m n = Lit m $$ Lit n $$ App False (Env f) (UArg2 0 1) + +setupb2 :: Word64 -> Int -> Int -> Section +setupb2 f m n + = Lit m $$ Pack 0 (UArg1 0) + $$ Lit n $$ Pack 0 (UArg1 0) + $$ App False (Env f) (BArgR 0 2) + +benchEv :: String -> Section -> Benchmark +benchEv str code = bench str . whnfIO . eval0 testEnv $ code + +main = defaultMain + [ bgroup "loop" + [ benchEv "2500" $ setupu2 0 0 2500 + , benchEv "5000" $ setupu2 0 0 5000 + , benchEv "10000" $ setupu2 0 0 10000 + , benchEv "100000" $ setupu2 0 0 100000 + , benchEv "1000000" $ setupu2 0 0 1000000 + ] + , bgroup "oloop" + [ benchEv "2500" $ setupu2 7 0 2500 + , benchEv "5000" $ setupu2 7 0 5000 + , benchEv "10000" $ setupu2 7 0 10000 + , benchEv "100000" $ setupu2 7 0 100000 + , benchEv "1000000" $ setupu2 7 0 1000000 + ] + , bgroup "sloop" + [ benchEv "2500" $ setupb2 1 0 2500 + , benchEv "5000" $ setupb2 1 0 5000 + , benchEv "10000" $ setupb2 1 0 10000 + , benchEv "100000" $ setupb2 1 0 100000 + , benchEv "1000000" $ setupb2 1 0 1000000 + ] + , bgroup "soloop" + [ benchEv "2500" $ setupb2 8 0 2500 + , benchEv "5000" $ setupb2 8 0 5000 + , benchEv "10000" $ setupb2 8 0 10000 + , benchEv "100000" $ setupb2 8 0 100000 + , benchEv "1000000" $ setupb2 8 0 1000000 + ] + , bgroup "kloop" + [ benchEv "2500" $ setupu2 6 0 2500 + , benchEv "5000" $ setupu2 6 0 5000 + , benchEv "10000" $ setupu2 6 0 10000 + , benchEv "100000" $ setupu2 6 0 100000 + , benchEv "1000000" $ setupu2 6 0 1000000 + ] + , bgroup "tloop" + [ benchEv "2500" $ setupu2 26 0 2500 + , benchEv "5000" $ setupu2 26 0 5000 + , benchEv "10000" $ setupu2 26 0 10000 + , benchEv "100000" $ setupu2 26 0 100000 + , benchEv "1000000" $ setupu2 26 0 1000000 + ] + , bgroup "fib" + [ benchEv "10" $ setupu1 2 10 + , benchEv "15" $ setupu1 2 15 + , benchEv "20" $ setupu1 2 20 + , benchEv "25" $ setupu1 2 25 + , benchEv "30" $ setupu1 2 30 + ] + , bgroup "ofib" + [ benchEv "10" $ setupu1 9 10 + , benchEv "15" $ setupu1 9 15 + , benchEv "20" $ setupu1 9 20 + , benchEv "25" $ setupu1 9 25 + , benchEv "30" $ setupu1 9 30 + ] + , bgroup "stackEater" + [ benchEv "100" $ setupu1 4 100 + , benchEv "1000" $ setupu1 4 1000 + , benchEv "10000" $ setupu1 4 10000 + , benchEv "100000" $ setupu1 4 100000 + ] + ] diff --git a/parser-typechecker/prettyprintdemo/Main.hs b/parser-typechecker/prettyprintdemo/Main.hs new file mode 100644 index 0000000000..9e3402cb0e --- /dev/null +++ b/parser-typechecker/prettyprintdemo/Main.hs @@ -0,0 +1,68 @@ +{-# Language OverloadedStrings #-} + +module Main where + +import Data.String (fromString) +import Unison.Util.Pretty as PP +import Data.Text (Text) + +main :: IO () +main = do + -- putStrLn . PP.toANSI 60 $ ex1 + -- print $ examples + putStrLn . PP.toANSI 25 $ examples + where + -- ex1 = PP.linesSpaced [PP.red "hi", PP.blue "blue"] + examples = PP.linesSpaced [ + PP.bold "Creating `Pretty`s", + + "Use `OverloadedStrings`, `lit`, and `text` to get values into `Pretty`", + "Here's an overloaded string", + PP.lit "Here's a call to `lit`", -- works for any `IsString` + PP.text ("No need to Text.unpack, just `PP.text` directly" :: Text), + + PP.bold "Use the `Monoid` and/or `Semigroup` to combine strings", + "Hello, " <> PP.red "world!", + + PP.yellow "`wrap` does automatic line wrapping", + PP.wrap $ loremIpsum, + PP.wrapString "Can also call `wrapString` directly if you have a String value.", + + PP.bold "Indentation: can indent by n spaces, or by another `Pretty`", + PP.indentN 2 (PP.wrap loremIpsum), + PP.indent (PP.red ">> ") (PP.wrap loremIpsum), + + PP.bold "Other handy functions", + + PP.bulleted [ + PP.sep ", " (replicate 10 "a"), + PP.lines ["Alice", PP.hiBlue "Bob", "Carol"], + PP.blue "foo bar baz" + ], + + PP.indentN 4 $ PP.bulleted ["Alice", "Bob", "Carol"], + PP.dashed ["Alice", PP.red "Bob", "Carol"], + PP.column2 [ + (PP.bold "Name", PP.bold "Favorite color"), + ("Alice" , PP.red "Red"), + ("Bob" , PP.blue "Blue"), + ("Carolina" , PP.green "Green"), + ("Dave" , PP.black "Black") + ], + PP.numbered (fromString . show) [ + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], + -- Feel free to start the numbering wherever you like + PP.numbered (fromString . show . (10 +)) ["uno", "dos", "tres"], + + PP.bold "Grouping and breaking", + PP.wrap "The orElse function chooses between two `Pretty`, preferring the first if it fits, and using the second otherwise.", + + PP.wrap "The `group` function introduces a level of breaking. The renderer will try to avoid breaking up a `group` unless it's needed. Groups are broken \"outside in\".", + + -- question - I think this group shouldn't be needed + PP.group (PP.orElse "This fits." "So this won't be used."), + "This is a very long string which won't fit." + `PP.orElse` "This is a very...(truncated)" + ] + loremIpsum = "Lorem ipsum dolor sit amet, consectetur adipiscing elit." + -- loremIpsum = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas sem nisi, venenatis viverra ex eu, tristique dapibus justo. Ut lobortis mattis rutrum. Vivamus mattis eros diam, a egestas mi venenatis vel. Nunc felis dui, consectetur ac volutpat vitae, molestie in augue. Cras nec aliquet ex. In et sem vel sapien auctor euismod. Pellentesque eu aliquam dolor. Cras porttitor mi velit, dapibus vulputate odio pharetra non. Etiam iaculis nulla eu nisl euismod ultricies." diff --git a/parser-typechecker/src/Unison/Builtin.hs b/parser-typechecker/src/Unison/Builtin.hs new file mode 100644 index 0000000000..1eb35544bb --- /dev/null +++ b/parser-typechecker/src/Unison/Builtin.hs @@ -0,0 +1,517 @@ +{-# LANGUAGE ExplicitForAll #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE Rank2Types #-} + +module Unison.Builtin + (codeLookup + ,constructorType + ,names + ,names0 + ,builtinDataDecls + ,builtinEffectDecls + ,builtinConstructorType + ,builtinTypeDependents + ,builtinTermsByType + ,builtinTermsByTypeMention + ,intrinsicTermReferences + ,intrinsicTypeReferences + ,isBuiltinType + ,typeLookup + ,termRefTypes + ) where + +import Unison.Prelude + +import Data.Bifunctor ( second, first ) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Data.Text as Text +import qualified Unison.ConstructorType as CT +import Unison.Codebase.CodeLookup ( CodeLookup(..) ) +import qualified Unison.Builtin.Decls as DD +import qualified Unison.DataDeclaration as DD +import Unison.Parser ( Ann(..) ) +import qualified Unison.Reference as R +import qualified Unison.Referent as Referent +import Unison.Symbol ( Symbol ) +import qualified Unison.Type as Type +import Unison.Var ( Var ) +import qualified Unison.Var as Var +import Unison.Name ( Name ) +import qualified Unison.Name as Name +import Unison.Names3 (Names(Names), Names0) +import qualified Unison.Names3 as Names3 +import qualified Unison.Typechecker.TypeLookup as TL +import qualified Unison.Util.Relation as Rel + +type DataDeclaration v = DD.DataDeclaration v Ann +type EffectDeclaration v = DD.EffectDeclaration v Ann +type Type v = Type.Type v () + +names :: Names +names = Names names0 mempty + +names0 :: Names0 +names0 = Names3.names0 terms types where + terms = Rel.mapRan Referent.Ref (Rel.fromMap termNameRefs) <> + Rel.fromList [ (Name.fromVar vc, Referent.Con (R.DerivedId r) cid ct) + | (ct, (_,(r,decl))) <- ((CT.Data,) <$> builtinDataDecls @Symbol) <> + ((CT.Effect,) . (second . second) DD.toDataDecl <$> builtinEffectDecls) + , ((_,vc,_), cid) <- DD.constructors' decl `zip` [0..]] + types = Rel.fromList builtinTypes <> + Rel.fromList [ (Name.fromVar v, R.DerivedId r) + | (v,(r,_)) <- builtinDataDecls @Symbol ] <> + Rel.fromList [ (Name.fromVar v, R.DerivedId r) + | (v,(r,_)) <- builtinEffectDecls @Symbol ] + +-- note: this function is really for deciding whether `r` is a term or type, +-- but it can only answer correctly for Builtins. +isBuiltinType :: R.Reference -> Bool +isBuiltinType r = elem r . fmap snd $ builtinTypes + +typeLookup :: Var v => TL.TypeLookup v Ann +typeLookup = + TL.TypeLookup + (fmap (const Intrinsic) <$> termRefTypes) + (Map.fromList . map (first R.DerivedId) $ map snd builtinDataDecls) + (Map.fromList . map (first R.DerivedId) $ map snd builtinEffectDecls) + +constructorType :: R.Reference -> Maybe CT.ConstructorType +constructorType r = TL.constructorType (typeLookup @Symbol) r + <|> Map.lookup r builtinConstructorType + +builtinDataDecls :: Var v => [(v, (R.Id, DataDeclaration v))] +builtinDataDecls = + [ (v, (r, Intrinsic <$ d)) | (v, r, d) <- DD.builtinDataDecls ] + +builtinEffectDecls :: Var v => [(v, (R.Id, EffectDeclaration v))] +builtinEffectDecls = [ (v, (r, Intrinsic <$ d)) | (v, r, d) <- DD.builtinEffectDecls ] + +codeLookup :: (Applicative m, Var v) => CodeLookup v m Ann +codeLookup = CodeLookup (const $ pure Nothing) $ \r -> + pure + $ lookup r [ (r, Right x) | (r, x) <- snd <$> builtinDataDecls ] + <|> lookup r [ (r, Left x) | (r, x) <- snd <$> builtinEffectDecls ] + +-- Relation predicate: Domain depends on range. +builtinDependencies :: Rel.Relation R.Reference R.Reference +builtinDependencies = + Rel.fromMultimap (Type.dependencies <$> termRefTypes @Symbol) + +-- a relation whose domain is types and whose range is builtin terms with that type +builtinTermsByType :: Rel.Relation R.Reference Referent.Referent +builtinTermsByType = + Rel.fromList [ (Type.toReference ty, Referent.Ref r) + | (r, ty) <- Map.toList (termRefTypes @Symbol) ] + +-- a relation whose domain is types and whose range is builtin terms that mention that type +-- example: Nat.+ mentions the type `Nat` +builtinTermsByTypeMention :: Rel.Relation R.Reference Referent.Referent +builtinTermsByTypeMention = + Rel.fromList [ (m, Referent.Ref r) | (r, ty) <- Map.toList (termRefTypes @Symbol) + , m <- toList $ Type.toReferenceMentions ty ] + +-- The dependents of a builtin type is the set of builtin terms which +-- mention that type. +builtinTypeDependents :: R.Reference -> Set R.Reference +builtinTypeDependents r = Rel.lookupRan r builtinDependencies + +-- WARNING: +-- As with the terms, we should avoid changing these references, even +-- if we decide to change their names. +builtinTypes :: [(Name, R.Reference)] +builtinTypes = Map.toList . Map.mapKeys Name.unsafeFromText + $ foldl' go mempty builtinTypesSrc where + go m = \case + B' r _ -> Map.insert r (R.Builtin r) m + D' r -> Map.insert r (R.Builtin r) m + Rename' r name -> case Map.lookup name m of + Just _ -> error . Text.unpack $ + "tried to rename `" <> r <> "` to `" <> name <> "`, " <> + "which already exists." + Nothing -> case Map.lookup r m of + Nothing -> error . Text.unpack $ + "tried to rename `" <> r <> "` before it was declared." + Just t -> Map.insert name t . Map.delete r $ m + Alias' r name -> case Map.lookup name m of + Just _ -> error . Text.unpack $ + "tried to alias `" <> r <> "` to `" <> name <> "`, " <> + "which already exists." + Nothing -> case Map.lookup r m of + Nothing -> error . Text.unpack $ + "tried to alias `" <> r <> "` before it was declared." + Just t -> Map.insert name t m + +-- WARNING: Don't delete any of these lines, only add corrections. +builtinTypesSrc :: [BuiltinTypeDSL] +builtinTypesSrc = + [ B' "Int" CT.Data + , B' "Nat" CT.Data + , B' "Float" CT.Data + , B' "Boolean" CT.Data + , B' "Sequence" CT.Data, Rename' "Sequence" "List" + , B' "Text" CT.Data + , B' "Char" CT.Data + , B' "Effect" CT.Data, Rename' "Effect" "Request" + , B' "Bytes" CT.Data + , B' "Link.Term" CT.Data + , B' "Link.Type" CT.Data + , B' "IO" CT.Effect, Rename' "IO" "io2.IO" + , B' "Handle" CT.Data, Rename' "Handle" "io2.Handle" + , B' "Socket" CT.Data, Rename' "Socket" "io2.Socket" + , B' "ThreadId" CT.Data, Rename' "ThreadId" "io2.ThreadId" + , B' "MVar" CT.Data, Rename' "MVar" "io2.MVar" + ] + +-- rename these to "builtin" later, when builtin means intrinsic as opposed to +-- stuff that intrinsics depend on. +intrinsicTypeReferences :: Set R.Reference +intrinsicTypeReferences = foldl' go mempty builtinTypesSrc where + go acc = \case + B' r _ -> Set.insert (R.Builtin r) acc + D' r -> Set.insert (R.Builtin r) acc + _ -> acc + +intrinsicTermReferences :: Set R.Reference +intrinsicTermReferences = Map.keysSet (termRefTypes @Symbol) + +builtinConstructorType :: Map R.Reference CT.ConstructorType +builtinConstructorType = Map.fromList [ (R.Builtin r, ct) | B' r ct <- builtinTypesSrc ] + +data BuiltinTypeDSL = B' Text CT.ConstructorType | D' Text | Rename' Text Text | Alias' Text Text + + +data BuiltinDSL v + -- simple builtin: name=ref, type + = B Text (Type v) + -- deprecated builtin: name=ref, type (TBD) + | D Text (Type v) + -- rename builtin: refname, newname + -- must not appear before corresponding B/D + -- will overwrite newname + | Rename Text Text + -- alias builtin: refname, newname + -- must not appear before corresponding B/D + -- will overwrite newname + | Alias Text Text + +termNameRefs :: Map Name R.Reference +termNameRefs = Map.mapKeys Name.unsafeFromText $ foldl' go mempty (builtinsSrc @Symbol) where + go m = \case + B r _tp -> Map.insert r (R.Builtin r) m + D r _tp -> Map.insert r (R.Builtin r) m + Rename r name -> case Map.lookup name m of + Just _ -> error . Text.unpack $ + "tried to rename `" <> r <> "` to `" <> name <> "`, " <> + "which already exists." + Nothing -> case Map.lookup r m of + Nothing -> error . Text.unpack $ + "tried to rename `" <> r <> "` before it was declared." + Just t -> Map.insert name t . Map.delete r $ m + Alias r name -> case Map.lookup name m of + Just _ -> error . Text.unpack $ + "tried to alias `" <> r <> "` to `" <> name <> "`, " <> + "which already exists." + Nothing -> case Map.lookup r m of + Nothing -> error . Text.unpack $ + "tried to alias `" <> r <> "` before it was declared." + Just t -> Map.insert name t m + +termRefTypes :: Var v => Map R.Reference (Type v) +termRefTypes = foldl' go mempty builtinsSrc where + go m = \case + B r t -> Map.insert (R.Builtin r) t m + D r t -> Map.insert (R.Builtin r) t m + _ -> m + +builtinsSrc :: Var v => [BuiltinDSL v] +builtinsSrc = + [ B "Int.+" $ int --> int --> int + , B "Int.-" $ int --> int --> int + , B "Int.*" $ int --> int --> int + , B "Int./" $ int --> int --> int + , B "Int.<" $ int --> int --> boolean + , B "Int.>" $ int --> int --> boolean + , B "Int.<=" $ int --> int --> boolean + , B "Int.>=" $ int --> int --> boolean + , B "Int.==" $ int --> int --> boolean + , B "Int.and" $ int --> int --> int + , B "Int.or" $ int --> int --> int + , B "Int.xor" $ int --> int --> int + , B "Int.complement" $ int --> int + , B "Int.increment" $ int --> int + , B "Int.isEven" $ int --> boolean + , B "Int.isOdd" $ int --> boolean + , B "Int.signum" $ int --> int + , B "Int.leadingZeros" $ int --> nat + , B "Int.negate" $ int --> int + , B "Int.negate" $ int --> int + , B "Int.mod" $ int --> int --> int + , B "Int.pow" $ int --> nat --> int + , B "Int.shiftLeft" $ int --> nat --> int + , B "Int.shiftRight" $ int --> nat --> int + , B "Int.truncate0" $ int --> nat + , B "Int.toText" $ int --> text + , B "Int.fromText" $ text --> optionalt int + , B "Int.toFloat" $ int --> float + , B "Int.trailingZeros" $ int --> nat + + , B "Nat.*" $ nat --> nat --> nat + , B "Nat.+" $ nat --> nat --> nat + , B "Nat./" $ nat --> nat --> nat + , B "Nat.<" $ nat --> nat --> boolean + , B "Nat.<=" $ nat --> nat --> boolean + , B "Nat.==" $ nat --> nat --> boolean + , B "Nat.>" $ nat --> nat --> boolean + , B "Nat.>=" $ nat --> nat --> boolean + , B "Nat.and" $ nat --> nat --> nat + , B "Nat.or" $ nat --> nat --> nat + , B "Nat.xor" $ nat --> nat --> nat + , B "Nat.complement" $ nat --> nat + , B "Nat.drop" $ nat --> nat --> nat + , B "Nat.fromText" $ text --> optionalt nat + , B "Nat.increment" $ nat --> nat + , B "Nat.isEven" $ nat --> boolean + , B "Nat.isOdd" $ nat --> boolean + , B "Nat.leadingZeros" $ nat --> nat + , B "Nat.mod" $ nat --> nat --> nat + , B "Nat.pow" $ nat --> nat --> nat + , B "Nat.shiftLeft" $ nat --> nat --> nat + , B "Nat.shiftRight" $ nat --> nat --> nat + , B "Nat.sub" $ nat --> nat --> int + , B "Nat.toFloat" $ nat --> float + , B "Nat.toInt" $ nat --> int + , B "Nat.toText" $ nat --> text + , B "Nat.trailingZeros" $ nat --> nat + + , B "Float.+" $ float --> float --> float + , B "Float.-" $ float --> float --> float + , B "Float.*" $ float --> float --> float + , B "Float./" $ float --> float --> float + , B "Float.<" $ float --> float --> boolean + , B "Float.>" $ float --> float --> boolean + , B "Float.<=" $ float --> float --> boolean + , B "Float.>=" $ float --> float --> boolean + , B "Float.==" $ float --> float --> boolean + + -- Trigonmetric Functions + , B "Float.acos" $ float --> float + , B "Float.asin" $ float --> float + , B "Float.atan" $ float --> float + , B "Float.atan2" $ float --> float --> float + , B "Float.cos" $ float --> float + , B "Float.sin" $ float --> float + , B "Float.tan" $ float --> float + + -- Hyperbolic Functions + , B "Float.acosh" $ float --> float + , B "Float.asinh" $ float --> float + , B "Float.atanh" $ float --> float + , B "Float.cosh" $ float --> float + , B "Float.sinh" $ float --> float + , B "Float.tanh" $ float --> float + + -- Exponential Functions + , B "Float.exp" $ float --> float + , B "Float.log" $ float --> float + , B "Float.logBase" $ float --> float --> float + + -- Power Functions + , B "Float.pow" $ float --> float --> float + , B "Float.sqrt" $ float --> float + + -- Rounding and Remainder Functions + , B "Float.ceiling" $ float --> int + , B "Float.floor" $ float --> int + , B "Float.round" $ float --> int + , B "Float.truncate" $ float --> int + + -- Float Utils + , B "Float.abs" $ float --> float + , B "Float.max" $ float --> float --> float + , B "Float.min" $ float --> float --> float + , B "Float.toText" $ float --> text + , B "Float.fromText" $ text --> optionalt float + + , B "Universal.==" $ forall1 "a" (\a -> a --> a --> boolean) + -- Don't we want a Universal.!= ? + + -- Universal.compare intended as a low level function that just returns + -- `Int` rather than some Ordering data type. If we want, later, + -- could provide a pure Unison wrapper for Universal.compare that + -- returns a proper data type. + -- + -- 0 is equal, < 0 is less than, > 0 is greater than + , B "Universal.compare" $ forall1 "a" (\a -> a --> a --> int) + , B "Universal.>" $ forall1 "a" (\a -> a --> a --> boolean) + , B "Universal.<" $ forall1 "a" (\a -> a --> a --> boolean) + , B "Universal.>=" $ forall1 "a" (\a -> a --> a --> boolean) + , B "Universal.<=" $ forall1 "a" (\a -> a --> a --> boolean) + + , B "bug" $ forall1 "a" (\a -> forall1 "b" (\b -> a --> b)) + , B "todo" $ forall1 "a" (\a -> forall1 "b" (\b -> a --> b)) + + , B "Boolean.not" $ boolean --> boolean + + , B "Text.empty" text + , B "Text.++" $ text --> text --> text + , B "Text.take" $ nat --> text --> text + , B "Text.drop" $ nat --> text --> text + , B "Text.size" $ text --> nat + , B "Text.==" $ text --> text --> boolean + , D "Text.!=" $ text --> text --> boolean + , B "Text.<=" $ text --> text --> boolean + , B "Text.>=" $ text --> text --> boolean + , B "Text.<" $ text --> text --> boolean + , B "Text.>" $ text --> text --> boolean + , B "Text.uncons" $ text --> optionalt (tuple [char, text]) + , B "Text.unsnoc" $ text --> optionalt (tuple [text, char]) + + , B "Text.toCharList" $ text --> list char + , B "Text.fromCharList" $ list char --> text + + , B "Char.toNat" $ char --> nat + , B "Char.fromNat" $ nat --> char + + , B "Bytes.empty" bytes + , B "Bytes.fromList" $ list nat --> bytes + , B "Bytes.++" $ bytes --> bytes --> bytes + , B "Bytes.take" $ nat --> bytes --> bytes + , B "Bytes.drop" $ nat --> bytes --> bytes + , B "Bytes.at" $ nat --> bytes --> optionalt nat + , B "Bytes.toList" $ bytes --> list nat + , B "Bytes.size" $ bytes --> nat + , B "Bytes.flatten" $ bytes --> bytes + + , B "List.empty" $ forall1 "a" list + , B "List.cons" $ forall1 "a" (\a -> a --> list a --> list a) + , Alias "List.cons" "List.+:" + , B "List.snoc" $ forall1 "a" (\a -> list a --> a --> list a) + , Alias "List.snoc" "List.:+" + , B "List.take" $ forall1 "a" (\a -> nat --> list a --> list a) + , B "List.drop" $ forall1 "a" (\a -> nat --> list a --> list a) + , B "List.++" $ forall1 "a" (\a -> list a --> list a --> list a) + , B "List.size" $ forall1 "a" (\a -> list a --> nat) + , B "List.at" $ forall1 "a" (\a -> nat --> list a --> optionalt a) + + , B "Debug.watch" $ forall1 "a" (\a -> text --> a --> a) + ] ++ + -- avoid name conflicts with Universal == < > <= >= + [ Rename (t <> "." <> old) (t <> "." <> new) + | t <- ["Int", "Nat", "Float", "Text"] + , (old, new) <- [("==", "eq") + ,("<" , "lt") + ,("<=", "lteq") + ,(">" , "gt") + ,(">=", "gteq")] + ] ++ io2List ioBuiltins ++ io2List mvarBuiltins + +io2List :: [(Text, Type v)] -> [BuiltinDSL v] +io2List bs = bs >>= \(n,ty) -> [B n ty, Rename n ("io2." <> n)] + +ioBuiltins :: Var v => [(Text, Type v)] +ioBuiltins = + [ ("IO.openFile", text --> ioe handle) + , ("IO.closeFile", handle --> ioe unit) + , ("IO.isFileEOF", handle --> ioe boolean) + , ("IO.isFileOpen", handle --> ioe boolean) + , ("IO.isSeekable", handle --> ioe boolean) + , ("IO.seekHandle", handle --> fmode --> int --> ioe unit) + , ("IO.handlePosition", handle --> ioe int) + , ("IO.getBuffering", handle --> ioe bmode) + , ("IO.setBuffering", handle --> bmode --> ioe unit) + , ("IO.getLine", handle --> ioe text) + , ("IO.getText", handle --> ioe text) + , ("IO.putText", handle --> text --> ioe unit) + , ("IO.systemTime", unit --> ioe nat) + , ("IO.getTempDirectory", unit --> ioe text) + , ("IO.getCurrentDirectory", unit --> ioe text) + , ("IO.setCurrentDirectory", text --> ioe unit) + , ("IO.fileExists", text --> ioe boolean) + , ("IO.isDirectory", text --> ioe boolean) + , ("IO.createDirectory", text --> ioe unit) + , ("IO.removeDirectory", text --> ioe unit) + , ("IO.renameDirectory", text --> text --> ioe unit) + , ("IO.removeFile", text --> ioe unit) + , ("IO.renameFile", text --> text --> ioe unit) + , ("IO.getFileTimestamp", text --> ioe nat) + , ("IO.getFileSize", text --> ioe nat) + , ("IO.serverSocket", text --> text --> ioe socket) + , ("IO.listen", socket --> ioe unit) + , ("IO.clientSocket", text --> text --> ioe socket) + , ("IO.closeSocket", socket --> ioe unit) + , ("IO.socketAccept", socket --> ioe socket) + , ("IO.socketSend", socket --> bytes --> ioe unit) + , ("IO.socketReceive", socket --> nat --> ioe bytes) + , ("IO.forkComp" + , forall1 "a" $ \a -> (unit --> ioe a) --> ioe threadId) + , ("IO.stdHandle", nat --> optionalt handle) + ] + +mvarBuiltins :: forall v. Var v => [(Text, Type v)] +mvarBuiltins = + [ ("MVar.new", forall1 "a" $ \a -> a --> io (mvar a)) + , ("MVar.newEmpty", forall1 "a" $ \a -> io (mvar a)) + , ("MVar.take", forall1 "a" $ \a -> mvar a --> ioe a) + , ("MVar.tryTake", forall1 "a" $ \a -> mvar a --> io (optionalt a)) + , ("MVar.put", forall1 "a" $ \a -> mvar a --> a --> ioe unit) + , ("MVar.tryPut", forall1 "a" $ \a -> mvar a --> a --> io boolean) + , ("MVar.swap", forall1 "a" $ \a -> mvar a --> a --> ioe a) + , ("MVar.isEmpty", forall1 "a" $ \a -> mvar a --> io boolean) + , ("MVar.read", forall1 "a" $ \a -> mvar a --> ioe a) + , ("MVar.tryRead", forall1 "a" $ \a -> mvar a --> io (optionalt a)) + ] + where + mvar :: Type v -> Type v + mvar a = Type.ref () Type.mvarRef `app` a + +forall1 :: Var v => Text -> (Type v -> Type v) -> Type v +forall1 name body = + let + a = Var.named name + in Type.forall () a (body $ Type.var () a) + +app :: Ord v => Type v -> Type v -> Type v +app = Type.app () + +list :: Ord v => Type v -> Type v +list arg = Type.vector () `app` arg + +optionalt :: Ord v => Type v -> Type v +optionalt arg = DD.optionalType () `app` arg + +tuple :: Ord v => [Type v] -> Type v +tuple [t] = t +tuple ts = foldr pair (DD.unitType ()) ts + +pair :: Ord v => Type v -> Type v -> Type v +pair l r = DD.pairType () `app` l `app` r + +(-->) :: Ord v => Type v -> Type v -> Type v +a --> b = Type.arrow () a b +infixr --> + +io, ioe :: Var v => Type v -> Type v +io = Type.effect1 () (Type.builtinIO ()) +ioe = io . either (DD.ioErrorType ()) + where + either l r = DD.eitherType () `app` l `app` r + +socket, threadId, handle, unit :: Var v => Type v +socket = Type.socket () +threadId = Type.threadId () +handle = Type.fileHandle () +unit = DD.unitType () + +fmode, bmode :: Var v => Type v +fmode = DD.fileModeType () +bmode = DD.bufferModeType () + +int, nat, bytes, text, boolean, float, char :: Var v => Type v +int = Type.int () +nat = Type.nat () +bytes = Type.bytes () +text = Type.text () +boolean = Type.boolean () +float = Type.float () +char = Type.char () diff --git a/parser-typechecker/src/Unison/Builtin/Decls.hs b/parser-typechecker/src/Unison/Builtin/Decls.hs new file mode 100644 index 0000000000..6ce8c3f25a --- /dev/null +++ b/parser-typechecker/src/Unison/Builtin/Decls.hs @@ -0,0 +1,310 @@ +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Builtin.Decls where + +import Data.List ( elemIndex, find ) +import qualified Data.Map as Map +import Data.Text (Text) +import qualified Unison.ABT as ABT +import qualified Unison.ConstructorType as CT +import qualified Unison.DataDeclaration as DD +import Unison.DataDeclaration ( DataDeclaration(..) + , Modifier(Structural, Unique) + , hashDecls ) +import qualified Unison.Pattern as Pattern +import Unison.Reference (Reference) +import qualified Unison.Reference as Reference +import Unison.Referent (Referent) +import qualified Unison.Referent as Referent +import Unison.Symbol (Symbol) +import Unison.Term (ConstructorId, Term, Term2) +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import Unison.Type (Type) +import qualified Unison.Var as Var +import Unison.Var (Var) + + +unitRef, pairRef, optionalRef, eitherRef :: Reference +testResultRef, linkRef, docRef, ioErrorRef :: Reference +fileModeRef, bufferModeRef, seqViewRef :: Reference +(unitRef, pairRef, optionalRef, testResultRef, linkRef, docRef, eitherRef, ioErrorRef, fileModeRef, bufferModeRef, seqViewRef) = + let decls = builtinDataDecls @Symbol + [(_, unit, _)] = filter (\(v, _, _) -> v == Var.named "Unit") decls + [(_, pair, _)] = filter (\(v, _, _) -> v == Var.named "Tuple") decls + [(_, opt , _)] = filter (\(v, _, _) -> v == Var.named "Optional") decls + [(_, testResult, _)] = + filter (\(v, _, _) -> v == Var.named "Test.Result") decls + [(_, link , _)] = filter (\(v, _, _) -> v == Var.named "Link") decls + [(_, doc , _)] = filter (\(v, _, _) -> v == Var.named "Doc") decls + + [(_,ethr,_)] = filter (\(v,_,_) -> v == Var.named "Either") decls + [(_,ioerr,_)] = filter (\(v,_,_) -> v == Var.named "io2.IOError") decls + [(_,fmode,_)] = filter (\(v,_,_) -> v == Var.named "io2.FileMode") decls + [(_,bmode,_)] = filter (\(v,_,_) -> v == Var.named "io2.BufferMode") decls + [(_,seqv,_)] = filter (\(v,_,_) -> v == Var.named "SeqView") decls + r = Reference.DerivedId + in (r unit, r pair, r opt, r testResult, r link, r doc, r ethr, r ioerr, r fmode, r bmode, r seqv) + +pairCtorRef, unitCtorRef :: Referent +pairCtorRef = Referent.Con pairRef 0 CT.Data +unitCtorRef = Referent.Con unitRef 0 CT.Data + +constructorId :: Reference -> Text -> Maybe Int +constructorId ref name = do + (_,_,dd) <- find (\(_,r,_) -> Reference.DerivedId r == ref) (builtinDataDecls @Symbol) + elemIndex name $ DD.constructorNames dd + +okConstructorId, failConstructorId, docBlobId, docLinkId, docSignatureId, docSourceId, docEvaluateId, docJoinId, linkTermId, linkTypeId :: ConstructorId +Just okConstructorId = constructorId testResultRef "Test.Result.Ok" +Just failConstructorId = constructorId testResultRef "Test.Result.Fail" +Just docBlobId = constructorId docRef "Doc.Blob" +Just docLinkId = constructorId docRef "Doc.Link" +Just docSignatureId = constructorId docRef "Doc.Signature" +Just docSourceId = constructorId docRef "Doc.Source" +Just docEvaluateId = constructorId docRef "Doc.Evaluate" +Just docJoinId = constructorId docRef "Doc.Join" +Just linkTermId = constructorId linkRef "Link.Term" +Just linkTypeId = constructorId linkRef "Link.Type" + +okConstructorReferent, failConstructorReferent :: Referent.Referent +okConstructorReferent = Referent.Con testResultRef okConstructorId CT.Data +failConstructorReferent = Referent.Con testResultRef failConstructorId CT.Data + +-- | parse some builtin data types, and resolve their free variables using +-- | builtinTypes' and those types defined herein +builtinDataDecls :: Var v => [(v, Reference.Id, DataDeclaration v ())] +builtinDataDecls = rs1 ++ rs + where + rs1 = case hashDecls $ Map.fromList + [ (v "Link" , link) + ] of Right a -> a; Left e -> error $ "builtinDataDecls: " <> show e + rs = case hashDecls $ Map.fromList + [ (v "Unit" , unit) + , (v "Tuple" , tuple) + , (v "Optional" , opt) + , (v "Either" , eith) + , (v "Test.Result" , tr) + , (v "Doc" , doc) + , (v "io2.FileMode" , fmode) + , (v "io2.BufferMode" , bmode) + , (v "SeqView" , seqview) + + , (v "io2.IOError" , ioerr) + ] of Right a -> a; Left e -> error $ "builtinDataDecls: " <> show e + [(_, linkRef, _)] = rs1 + v = Var.named + var name = Type.var () (v name) + arr = Type.arrow' + -- see note on `hashDecls` above for why ctor must be called `Unit.Unit`. + unit = DataDeclaration Structural () [] [((), v "Unit.Unit", var "Unit")] + tuple = DataDeclaration + Structural + () + [v "a", v "b"] + [ ( () + , v "Tuple.Cons" + , Type.foralls + () + [v "a", v "b"] + ( var "a" + `arr` (var "b" `arr` Type.apps' (var "Tuple") [var "a", var "b"]) + ) + ) + ] + opt = DataDeclaration + Structural + () + [v "a"] + [ ( () + , v "Optional.None" + , Type.foralls () [v "a"] (Type.app' (var "Optional") (var "a")) + ) + , ( () + , v "Optional.Some" + , Type.foralls () + [v "a"] + (var "a" `arr` Type.app' (var "Optional") (var "a")) + ) + ] + eith = DataDeclaration + Structural + () + [v "a", v "b"] + [ ( () + , v "Either.Left" + , Type.foralls () [v "a", v "b"] + (var "a" `arr` Type.apps' (var "Either") [var "a", var "b"]) + ) + , ( () + , v "Either.Right" + , Type.foralls () [v "a", v "b"] + (var "b" `arr` Type.apps' (var "Either") [var "a", var "b"]) + ) + ] + fmode = DataDeclaration + (Unique "3c11ba4f0a5d8fedd427b476cdd2d7673197d11e") + () + [] + [ ((), v "io2.FileMode.Read", var "io2.FileMode") + , ((), v "io2.FileMode.Write", var "io2.FileMode") + , ((), v "io2.FileMode.Append", var "io2.FileMode") + , ((), v "io2.FileMode.ReadWrite", var "io2.FileMode") + ] + bmode = DataDeclaration + (Unique "7dd9560d3826c21e5e6a7e08f575b61adcddf849") + () + [] + [ ((), v "io2.BufferMode.NoBuffering", var "io2.BufferMode") + , ((), v "io2.BufferMode.LineBuffering", var "io2.BufferMode") + , ((), v "io2.BufferMode.BlockBuffering", var "io2.BufferMode") + , ((), v "io2.BufferMode.SizedBlockBuffering" + , Type.nat () `arr` var "io2.BufferMode") + ] + ioerr = DataDeclaration + (Unique "5915e25ac83205f7885395cc6c6c988bc5ec69a1") + () + [] + [ ((), v "io2.IOError.AlreadyExists", var "io2.IOError") + , ((), v "io2.IOError.NoSuchThing", var "io2.IOError") + , ((), v "io2.IOError.ResourceBusy", var "io2.IOError") + , ((), v "io2.IOError.ResourceExhausted", var "io2.IOError") + , ((), v "io2.IOError.EOF", var "io2.IOError") + , ((), v "io2.IOError.IllegalOperation", var "io2.IOError") + , ((), v "io2.IOError.PermissionDenied", var "io2.IOError") + , ((), v "io2.IOError.UserError", var "io2.IOError") + ] + seqview = DataDeclaration + Structural + () + [v "a", v "b"] + [ ( () + , v "SeqView.VEmpty" + , Type.foralls () [v "a", v "b"] + (Type.apps' (var "SeqView") [var "a", var "b"]) + ) + , ( () + , v "SeqView.VElem" + , let sv = Type.apps' (var "SeqView") [var "a", var "b"] + in Type.foralls () [v "a", v "b"] + (var "a" `arr` (var "b" `arr` sv)) + ) + ] + tr = DataDeclaration + (Unique "70621e539cd802b2ad53105697800930411a3ebc") + () + [] + [ ((), v "Test.Result.Fail", Type.text () `arr` var "Test.Result") + , ((), v "Test.Result.Ok" , Type.text () `arr` var "Test.Result") + ] + doc = DataDeclaration + (Unique "c63a75b845e4f7d01107d852e4c2485c51a50aaaa94fc61995e71bbee983a2ac3713831264adb47fb6bd1e058d5f004") + () + [] + [ ((), v "Doc.Blob", Type.text () `arr` var "Doc") + , ((), v "Doc.Link", Type.refId () linkRef `arr` var "Doc") + , ((), v "Doc.Signature", Type.termLink () `arr` var "Doc") + , ((), v "Doc.Source", Type.refId () linkRef `arr` var "Doc") + , ((), v "Doc.Evaluate", Type.termLink () `arr` var "Doc") + , ((), v "Doc.Join", Type.app () (Type.vector()) (var "Doc") `arr` var "Doc") + ] + link = DataDeclaration + (Unique "a5803524366ead2d7f3780871d48771e8142a3b48802f34a96120e230939c46bd5e182fcbe1fa64e9bff9bf741f3c04") + () + [] + [ ((), v "Link.Term", Type.termLink () `arr` var "Link") + , ((), v "Link.Type", Type.typeLink () `arr` var "Link") + ] + +builtinEffectDecls :: [(v, Reference.Id, DD.EffectDeclaration v ())] +builtinEffectDecls = [] + +pattern UnitRef <- (unUnitRef -> True) +pattern PairRef <- (unPairRef -> True) +pattern OptionalRef <- (unOptionalRef -> True) +pattern TupleType' ts <- (unTupleType -> Just ts) +pattern TupleTerm' xs <- (unTupleTerm -> Just xs) +pattern TuplePattern ps <- (unTuplePattern -> Just ps) + +-- some pattern synonyms to make pattern matching on some of these constants more pleasant +pattern DocRef <- ((== docRef) -> True) +pattern DocJoin segs <- Term.App' (Term.Constructor' DocRef DocJoinId) (Term.Sequence' segs) +pattern DocBlob txt <- Term.App' (Term.Constructor' DocRef DocBlobId) (Term.Text' txt) +pattern DocLink link <- Term.App' (Term.Constructor' DocRef DocLinkId) link +pattern DocSource link <- Term.App' (Term.Constructor' DocRef DocSourceId) link +pattern DocSignature link <- Term.App' (Term.Constructor' DocRef DocSignatureId) link +pattern DocEvaluate link <- Term.App' (Term.Constructor' DocRef DocEvaluateId) link +pattern Doc <- Term.App' (Term.Constructor' DocRef _) _ +pattern DocSignatureId <- ((== docSignatureId) -> True) +pattern DocBlobId <- ((== docBlobId) -> True) +pattern DocLinkId <- ((== docLinkId) -> True) +pattern DocSourceId <- ((== docSourceId) -> True) +pattern DocEvaluateId <- ((== docEvaluateId) -> True) +pattern DocJoinId <- ((== docJoinId) -> True) +pattern LinkTermId <- ((== linkTermId) -> True) +pattern LinkTypeId <- ((== linkTypeId) -> True) +pattern LinkRef <- ((== linkRef) -> True) +pattern LinkTerm tm <- Term.App' (Term.Constructor' LinkRef LinkTermId) tm +pattern LinkType ty <- Term.App' (Term.Constructor' LinkRef LinkTypeId) ty + +unitType, pairType, optionalType, testResultType, + eitherType, ioErrorType, fileModeType, bufferModeType + :: Ord v => a -> Type v a +unitType a = Type.ref a unitRef +pairType a = Type.ref a pairRef +testResultType a = Type.app a (Type.vector a) (Type.ref a testResultRef) +optionalType a = Type.ref a optionalRef +eitherType a = Type.ref a eitherRef +ioErrorType a = Type.ref a ioErrorRef +fileModeType a = Type.ref a fileModeRef +bufferModeType a = Type.ref a bufferModeRef + +unitTerm :: Var v => a -> Term v a +unitTerm ann = Term.constructor ann unitRef 0 + +tupleConsTerm :: (Ord v, Semigroup a) + => Term2 vt at ap v a + -> Term2 vt at ap v a + -> Term2 vt at ap v a +tupleConsTerm hd tl = + Term.apps' (Term.constructor (ABT.annotation hd) pairRef 0) [hd, tl] + +tupleTerm :: (Var v, Monoid a) => [Term v a] -> Term v a +tupleTerm = foldr tupleConsTerm (unitTerm mempty) + +-- delayed terms are just lambdas that take a single `()` arg +-- `force` calls the function +forceTerm :: Var v => a -> a -> Term v a -> Term v a +forceTerm a au e = Term.app a e (unitTerm au) + +delayTerm :: Var v => a -> Term v a -> Term v a +delayTerm a = Term.lam a $ Var.named "()" + +unTupleTerm + :: Term.Term2 vt at ap v a + -> Maybe [Term.Term2 vt at ap v a] +unTupleTerm t = case t of + Term.Apps' (Term.Constructor' PairRef 0) [fst, snd] -> + (fst :) <$> unTupleTerm snd + Term.Constructor' UnitRef 0 -> Just [] + _ -> Nothing + +unTupleType :: Var v => Type v a -> Maybe [Type v a] +unTupleType t = case t of + Type.Apps' (Type.Ref' PairRef) [fst, snd] -> (fst :) <$> unTupleType snd + Type.Ref' UnitRef -> Just [] + _ -> Nothing + +unTuplePattern :: Pattern.Pattern loc -> Maybe [Pattern.Pattern loc] +unTuplePattern p = case p of + Pattern.Constructor _ PairRef 0 [fst, snd] -> (fst : ) <$> unTuplePattern snd + Pattern.Constructor _ UnitRef 0 [] -> Just [] + _ -> Nothing + +unUnitRef,unPairRef,unOptionalRef:: Reference -> Bool +unUnitRef = (== unitRef) +unPairRef = (== pairRef) +unOptionalRef = (== optionalRef) + diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs new file mode 100644 index 0000000000..b815b8f9a7 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -0,0 +1,294 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Codebase where + +import Unison.Prelude + +import Control.Lens ( _1, _2, (%=) ) +import Control.Monad.State ( State, evalState, get ) +import Data.Bifunctor ( bimap ) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Unison.ABT as ABT +import qualified Unison.Builtin as Builtin +import Unison.Codebase.Branch ( Branch ) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.CodeLookup as CL +import qualified Unison.Codebase.Reflog as Reflog +import Unison.Codebase.SyncMode ( SyncMode ) +import qualified Unison.DataDeclaration as DD +import qualified Unison.Names2 as Names +import Unison.Reference ( Reference ) +import qualified Unison.Reference as Reference +import qualified Unison.Referent as Referent +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import Unison.Typechecker.TypeLookup (TypeLookup(TypeLookup)) +import qualified Unison.Typechecker.TypeLookup as TL +import qualified Unison.Parser as Parser +import qualified Unison.UnisonFile as UF +import qualified Unison.Util.Relation as Rel +import qualified Unison.Util.Set as Set +import qualified Unison.Var as Var +import Unison.Var ( Var ) +import qualified Unison.Runtime.IOSource as IOSource +import Unison.Symbol ( Symbol ) +import Unison.DataDeclaration (Decl) +import Unison.Term (Term) +import Unison.Type (Type) +import Unison.Codebase.ShortBranchHash (ShortBranchHash) +import Unison.ShortHash (ShortHash) + +type DataDeclaration v a = DD.DataDeclaration v a +type EffectDeclaration v a = DD.EffectDeclaration v a + +-- | this FileCodebase detail lives here, because the interface depends on it 🙃 +type CodebasePath = FilePath + +data Codebase m v a = + Codebase { getTerm :: Reference.Id -> m (Maybe (Term v a)) + , getTypeOfTermImpl :: Reference.Id -> m (Maybe (Type v a)) + , getTypeDeclaration :: Reference.Id -> m (Maybe (Decl v a)) + + , putTerm :: Reference.Id -> Term v a -> Type v a -> m () + , putTypeDeclaration :: Reference.Id -> Decl v a -> m () + + , getRootBranch :: m (Either GetRootBranchError (Branch m)) + , putRootBranch :: Branch m -> m () + , rootBranchUpdates :: m (m (), m (Set Branch.Hash)) + , getBranchForHash :: Branch.Hash -> m (Maybe (Branch m)) + + , dependentsImpl :: Reference -> m (Set Reference.Id) + -- This copies all the dependencies of `b` from the specified + -- FileCodebase into this Codebase, and sets our root branch to `b` + , syncFromDirectory :: CodebasePath -> SyncMode -> Branch m -> m () + -- This copies all the dependencies of `b` from the this Codebase + -- into the specified FileCodebase, and sets its _head to `b` + , syncToDirectory :: CodebasePath -> SyncMode -> Branch m -> m () + + -- Watch expressions are part of the codebase, the `Reference.Id` is + -- the hash of the source of the watch expression, and the `Term v a` + -- is the evaluated result of the expression, decompiled to a term. + , watches :: UF.WatchKind -> m [Reference.Id] + , getWatch :: UF.WatchKind -> Reference.Id -> m (Maybe (Term v a)) + , putWatch :: UF.WatchKind -> Reference.Id -> Term v a -> m () + + , getReflog :: m [Reflog.Entry] + , appendReflog :: Text -> Branch m -> Branch m -> m () + + -- list of terms of the given type + , termsOfTypeImpl :: Reference -> m (Set Referent.Id) + -- list of terms that mention the given type anywhere in their signature + , termsMentioningTypeImpl :: Reference -> m (Set Referent.Id) + -- number of base58 characters needed to distinguish any two references in the codebase + , hashLength :: m Int + , termReferencesByPrefix :: ShortHash -> m (Set Reference.Id) + , typeReferencesByPrefix :: ShortHash -> m (Set Reference.Id) + , termReferentsByPrefix :: ShortHash -> m (Set Referent.Id) + + , branchHashLength :: m Int + , branchHashesByPrefix :: ShortBranchHash -> m (Set Branch.Hash) + } + +data GetRootBranchError + = NoRootBranch + | CouldntParseRootBranch String + | CouldntLoadRootBranch Branch.Hash + deriving Show + +data SyncFileCodebaseResult = SyncOk | UnknownDestinationRootBranch Branch.Hash | NotFastForward + +bootstrapNames :: Names.Names0 +bootstrapNames = + Builtin.names0 <> UF.typecheckedToNames0 IOSource.typecheckedFile + +-- | Write all of the builtins types into the codebase and create empty namespace +initializeCodebase :: forall m. Monad m => Codebase m Symbol Parser.Ann -> m () +initializeCodebase c = do + let uf = (UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls) + (Map.fromList Builtin.builtinEffectDecls) + mempty mempty) + addDefsToCodebase c uf + putRootBranch c (Branch.one Branch.empty0) + +-- Feel free to refactor this to use some other type than TypecheckedUnisonFile +-- if it makes sense to later. +addDefsToCodebase :: forall m v a. (Monad m, Var v) + => Codebase m v a -> UF.TypecheckedUnisonFile v a -> m () +addDefsToCodebase c uf = do + traverse_ (goType Right) (UF.dataDeclarationsId' uf) + traverse_ (goType Left) (UF.effectDeclarationsId' uf) + -- put terms + traverse_ goTerm (UF.hashTermsId uf) + where + goTerm (r, tm, tp) = putTerm c r tm tp + goType :: (t -> Decl v a) -> (Reference.Id, t) -> m () + goType f (ref, decl) = putTypeDeclaration c ref (f decl) + +getTypeOfConstructor :: + (Monad m, Ord v) => Codebase m v a -> Reference -> Int -> m (Maybe (Type v a)) +getTypeOfConstructor codebase (Reference.DerivedId r) cid = do + maybeDecl <- getTypeDeclaration codebase r + pure $ case maybeDecl of + Nothing -> Nothing + Just decl -> DD.typeOfConstructor (either DD.toDataDecl id decl) cid +getTypeOfConstructor _ r cid = + error $ "Don't know how to getTypeOfConstructor " ++ show r ++ " " ++ show cid + +typeLookupForDependencies + :: (Monad m, Var v, BuiltinAnnotation a) + => Codebase m v a -> Set Reference -> m (TL.TypeLookup v a) +typeLookupForDependencies codebase = foldM go mempty + where + go tl ref@(Reference.DerivedId id) = fmap (tl <>) $ + getTypeOfTerm codebase ref >>= \case + Just typ -> pure $ TypeLookup (Map.singleton ref typ) mempty mempty + Nothing -> getTypeDeclaration codebase id >>= \case + Just (Left ed) -> + pure $ TypeLookup mempty mempty (Map.singleton ref ed) + Just (Right dd) -> + pure $ TypeLookup mempty (Map.singleton ref dd) mempty + Nothing -> pure mempty + go tl Reference.Builtin{} = pure tl -- codebase isn't consulted for builtins + +-- todo: can this be implemented in terms of TransitiveClosure.transitiveClosure? +-- todo: add some tests on this guy? +transitiveDependencies + :: (Monad m, Var v) + => CL.CodeLookup v m a + -> Set Reference.Id + -> Reference.Id + -> m (Set Reference.Id) +transitiveDependencies code seen0 rid = if Set.member rid seen0 + then pure seen0 + else + let seen = Set.insert rid seen0 + getIds = Set.mapMaybe Reference.toId + in CL.getTerm code rid >>= \case + Just t -> + foldM (transitiveDependencies code) seen (getIds $ Term.dependencies t) + Nothing -> + CL.getTypeDeclaration code rid >>= \case + Nothing -> pure seen + Just (Left ed) -> foldM (transitiveDependencies code) + seen + (getIds $ DD.dependencies (DD.toDataDecl ed)) + Just (Right dd) -> foldM (transitiveDependencies code) + seen + (getIds $ DD.dependencies dd) + +toCodeLookup :: Codebase m v a -> CL.CodeLookup v m a +toCodeLookup c = CL.CodeLookup (getTerm c) (getTypeDeclaration c) + +-- Like the other `makeSelfContained`, but takes and returns a `UnisonFile`. +-- Any watches in the input `UnisonFile` will be watches in the returned +-- `UnisonFile`. +makeSelfContained' + :: forall m v a . (Monad m, Monoid a, Var v) + => CL.CodeLookup v m a + -> UF.UnisonFile v a + -> m (UF.UnisonFile v a) +makeSelfContained' code uf = do + let UF.UnisonFileId ds0 es0 bs0 ws0 = uf + deps0 = getIds . Term.dependencies . snd <$> (UF.allWatches uf <> bs0) + where getIds = Set.mapMaybe Reference.toId + -- transitive dependencies (from codebase) of all terms (including watches) in the UF + deps <- foldM (transitiveDependencies code) Set.empty (Set.unions deps0) + -- load all decls from deps list + decls <- fmap catMaybes + . forM (toList deps) + $ \rid -> fmap (rid, ) <$> CL.getTypeDeclaration code rid + -- partition the decls into effects and data + let es1 :: [(Reference.Id, DD.EffectDeclaration v a)] + ds1 :: [(Reference.Id, DD.DataDeclaration v a)] + (es1, ds1) = partitionEithers [ bimap (r,) (r,) d | (r, d) <- decls ] + -- load all terms from deps list + bs1 <- fmap catMaybes + . forM (toList deps) + $ \rid -> fmap (rid, ) <$> CL.getTerm code rid + let + allVars :: Set v + allVars = Set.unions + [ UF.allVars uf + , Set.unions [ DD.allVars dd | (_, dd) <- ds1 ] + , Set.unions [ DD.allVars (DD.toDataDecl ed) | (_, ed) <- es1 ] + , Set.unions [ Term.allVars tm | (_, tm) <- bs1 ] + ] + refVar :: Reference.Id -> State (Set v, Map Reference.Id v) v + refVar r = do + m <- snd <$> get + case Map.lookup r m of + Just v -> pure v + Nothing -> do + v <- ABT.freshenS' _1 (Var.refNamed (Reference.DerivedId r)) + _2 %= Map.insert r v + pure v + assignVars :: [(Reference.Id, b)] -> State (Set v, Map Reference.Id v) [(v, (Reference.Id, b))] + assignVars = traverse (\e@(r, _) -> (,e) <$> refVar r) + unref :: Term v a -> State (Set v, Map Reference.Id v) (Term v a) + unref = ABT.visit go where + go t@(Term.Ref' (Reference.DerivedId r)) = + Just (Term.var (ABT.annotation t) <$> refVar r) + go _ = Nothing + unrefb = traverse (\(v, tm) -> (v,) <$> unref tm) + pair :: forall f a b. Applicative f => f a -> f b -> f (a,b) + pair = liftA2 (,) + uf' = flip evalState (allVars, Map.empty) $ do + datas' <- Map.union ds0 . Map.fromList <$> assignVars ds1 + effects' <- Map.union es0 . Map.fromList <$> assignVars es1 + -- bs0 is terms from the input file + bs0' <- unrefb bs0 + ws0' <- traverse unrefb ws0 + -- bs1 is dependency terms + bs1' <- traverse (\(r, tm) -> refVar r `pair` unref tm) bs1 + pure $ UF.UnisonFileId datas' effects' (bs1' ++ bs0') ws0' + pure uf' + +getTypeOfTerm :: (Applicative m, Var v, BuiltinAnnotation a) => + Codebase m v a -> Reference -> m (Maybe (Type v a)) +getTypeOfTerm c = \case + Reference.DerivedId h -> getTypeOfTermImpl c h + r@Reference.Builtin{} -> + pure $ fmap (const builtinAnnotation) + <$> Map.lookup r Builtin.termRefTypes + + +-- The dependents of a builtin type is the set of builtin terms which +-- mention that type. +dependents :: Functor m => Codebase m v a -> Reference -> m (Set Reference) +dependents c r + = Set.union (Builtin.builtinTypeDependents r) + . Set.map Reference.DerivedId + <$> dependentsImpl c r + +termsOfType :: (Var v, Functor m) => Codebase m v a -> Type v a -> m (Set Referent.Referent) +termsOfType c ty = + Set.union (Rel.lookupDom r Builtin.builtinTermsByType) + . Set.map (fmap Reference.DerivedId) + <$> termsOfTypeImpl c r + where + r = Type.toReference ty + +termsMentioningType :: (Var v, Functor m) => Codebase m v a -> Type v a -> m (Set Referent.Referent) +termsMentioningType c ty = + Set.union (Rel.lookupDom r Builtin.builtinTermsByTypeMention) + . Set.map (fmap Reference.DerivedId) + <$> termsMentioningTypeImpl c r + where + r = Type.toReference ty + +-- todo: could have a way to look this up just by checking for a file rather than loading it +isTerm :: (Applicative m, Var v, BuiltinAnnotation a) + => Codebase m v a -> Reference -> m Bool +isTerm code = fmap isJust . getTypeOfTerm code + +isType :: Applicative m => Codebase m v a -> Reference -> m Bool +isType c r = case r of + Reference.Builtin{} -> pure $ Builtin.isBuiltinType r + Reference.DerivedId r -> isJust <$> getTypeDeclaration c r + +class BuiltinAnnotation a where + builtinAnnotation :: a + +instance BuiltinAnnotation Parser.Ann where + builtinAnnotation = Parser.Intrinsic diff --git a/parser-typechecker/src/Unison/Codebase/Branch.hs b/parser-typechecker/src/Unison/Codebase/Branch.hs new file mode 100644 index 0000000000..0568038acd --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Branch.hs @@ -0,0 +1,900 @@ +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE RankNTypes #-} + +module Unison.Codebase.Branch + ( -- * Branch types + Branch(..) + , Branch0(..) + , MergeMode(..) + , Raw(..) + , Star + , Hash + , EditHash + , pattern Hash + + -- * Branch construction + , empty + , empty0 + , branch0 + , one + , toCausalRaw + , transform + + -- * Branch history + -- ** History queries + , isEmpty + , isEmpty0 + , isOne + , head + , headHash + , before + , findHistoricalHQs + , findHistoricalRefs + , findHistoricalRefs' + , namesDiff + -- ** History updates + , step + , stepEverywhere + , uncons + , merge + , merge' + + -- * Branch children + -- ** Children lenses + , children + -- ** Children queries + , toList0 + , getAt + , getAt' + , getAt0 + -- ** Children updates + , setChildBranch + , stepManyAt + , stepManyAt0 + , stepManyAtM + , modifyAtM + + -- * Branch terms/types + -- ** Term/type lenses + , terms + , types + -- ** Term/type queries + , deepReferents + , deepTypeReferences + , toNames0 + -- ** Term/type updates + , addTermName + , addTypeName + , deleteTermName + , deleteTypeName + + + -- * Branch patches + -- ** Patch queries + , deepEdits' + , getPatch + , getMaybePatch + -- ** Patch updates + , replacePatch + , deletePatch + , modifyPatches + + -- * Branch serialization + , cachedRead + , boundedCache + , Cache + , sync + + -- * Unused + , childrenR + , debugPaths + , editedPatchRemoved + , editsR + , findHistoricalSHs + , fork + , lca + , move + , numHashChars + , printDebugPaths + , removedPatchEdited + , stepAt + , stepAtM + , termsR + , typesR + ) where + +import Unison.Prelude hiding (empty) + +import Prelude hiding (head,read,subtract) + +import Control.Lens hiding ( children, cons, transform, uncons ) +import qualified Control.Monad.State as State +import Control.Monad.State ( StateT ) +import Data.Bifunctor ( second ) +import qualified Data.Map as Map +import qualified Data.Map.Merge.Lazy as Map +import qualified Data.Set as Set +import qualified Unison.Codebase.Patch as Patch +import Unison.Codebase.Patch ( Patch ) +import qualified Unison.Codebase.Causal as Causal +import Unison.Codebase.Causal ( Causal + , pattern RawOne + , pattern RawCons + , pattern RawMerge + ) +import Unison.Codebase.Path ( Path(..) ) +import qualified Unison.Codebase.Path as Path +import Unison.NameSegment ( NameSegment ) +import qualified Unison.NameSegment as NameSegment +import qualified Unison.Codebase.Metadata as Metadata +import qualified Unison.Hash as Hash +import Unison.Hashable ( Hashable ) +import qualified Unison.Hashable as H +import Unison.Name ( Name(..) ) +import qualified Unison.Name as Name +import qualified Unison.Names2 as Names +import qualified Unison.Names3 as Names +import Unison.Names2 ( Names'(Names), Names0 ) +import Unison.Reference ( Reference ) +import Unison.Referent ( Referent ) +import qualified Unison.Referent as Referent +import qualified Unison.Reference as Reference + +import qualified Unison.Util.Cache as Cache +import qualified Unison.Util.Relation as R +import Unison.Util.Relation ( Relation ) +import qualified Unison.Util.Relation4 as R4 +import qualified Unison.Util.List as List +import Unison.Util.Map ( unionWithM ) +import qualified Unison.Util.Star3 as Star3 +import Unison.ShortHash (ShortHash) +import qualified Unison.ShortHash as SH +import qualified Unison.HashQualified as HQ +import Unison.HashQualified (HashQualified) +import qualified Unison.LabeledDependency as LD +import Unison.LabeledDependency (LabeledDependency) + +newtype Branch m = Branch { _history :: Causal m Raw (Branch0 m) } + deriving (Eq, Ord) + +type Hash = Causal.RawHash Raw +type EditHash = Hash.Hash + +-- Star3 r n Metadata.Type (Metadata.Type, Metadata.Value) +type Star r n = Metadata.Star r n + +data Branch0 m = Branch0 + { _terms :: Star Referent NameSegment + , _types :: Star Reference NameSegment + , _children :: Map NameSegment (Branch m) + , _edits :: Map NameSegment (EditHash, m Patch) + -- names and metadata for this branch and its children + -- (ref, (name, value)) iff ref has metadata `value` at name `name` + , deepTerms :: Relation Referent Name + , deepTypes :: Relation Reference Name + , deepTermMetadata :: Metadata.R4 Referent Name + , deepTypeMetadata :: Metadata.R4 Reference Name + , deepPaths :: Set Path + , deepEdits :: Map Name EditHash + } + +-- Represents a shallow diff of a Branch0. +-- Each of these `Star`s contain metadata as well, so an entry in +-- `added` or `removed` could be an update to the metadata. +data BranchDiff = BranchDiff + { addedTerms :: Star Referent NameSegment + , removedTerms :: Star Referent NameSegment + , addedTypes :: Star Reference NameSegment + , removedTypes :: Star Reference NameSegment + , changedPatches :: Map NameSegment Patch.PatchDiff + } deriving (Eq, Ord, Show) + +instance Semigroup BranchDiff where + left <> right = BranchDiff + { addedTerms = addedTerms left <> addedTerms right + , removedTerms = removedTerms left <> removedTerms right + , addedTypes = addedTypes left <> addedTypes right + , removedTypes = removedTypes left <> removedTypes right + , changedPatches = + Map.unionWith (<>) (changedPatches left) (changedPatches right) + } + +instance Monoid BranchDiff where + mappend = (<>) + mempty = BranchDiff mempty mempty mempty mempty mempty + +-- The raw Branch +data Raw = Raw + { _termsR :: Star Referent NameSegment + , _typesR :: Star Reference NameSegment + , _childrenR :: Map NameSegment Hash + , _editsR :: Map NameSegment EditHash + } + +makeLenses ''Branch +makeLensesFor [("_edits", "edits")] ''Branch0 +makeLenses ''Raw + +toNames0 :: Branch0 m -> Names0 +toNames0 b = Names (R.swap . deepTerms $ b) + (R.swap . deepTypes $ b) + +-- This stops searching for a given ShortHash once it encounters +-- any term or type in any Branch0 that satisfies that ShortHash. +findHistoricalSHs + :: Monad m => Set ShortHash -> Branch m -> m (Set ShortHash, Names0) +findHistoricalSHs = findInHistory + (\sh r _n -> sh `SH.isPrefixOf` Referent.toShortHash r) + (\sh r _n -> sh `SH.isPrefixOf` Reference.toShortHash r) + +-- This stops searching for a given HashQualified once it encounters +-- any term or type in any Branch0 that satisfies that HashQualified. +findHistoricalHQs :: Monad m + => Set HashQualified + -> Branch m + -> m (Set HashQualified, Names0) +findHistoricalHQs = findInHistory + (\hq r n -> HQ.matchesNamedReferent n r hq) + (\hq r n -> HQ.matchesNamedReference n r hq) + +findHistoricalRefs :: Monad m => Set LabeledDependency -> Branch m + -> m (Set LabeledDependency, Names0) +findHistoricalRefs = findInHistory + (\query r _n -> LD.fold (const False) (==r) query) + (\query r _n -> LD.fold (==r) (const False) query) + +findHistoricalRefs' :: Monad m => Set Reference -> Branch m + -> m (Set Reference, Names0) +findHistoricalRefs' = findInHistory + (\queryRef r _n -> r == Referent.Ref queryRef) + (\queryRef r _n -> r == queryRef) + +findInHistory :: forall m q. (Monad m, Ord q) + => (q -> Referent -> Name -> Bool) + -> (q -> Reference -> Name -> Bool) + -> Set q -> Branch m -> m (Set q, Names0) +findInHistory termMatches typeMatches queries b = + (Causal.foldHistoryUntil f (queries, mempty) . _history) b <&> \case + -- could do something more sophisticated here later to report that some SH + -- couldn't be found anywhere in the history. but for now, I assume that + -- the normal thing will happen when it doesn't show up in the namespace. + Causal.Satisfied (_, names) -> (mempty, names) + Causal.Unsatisfied (missing, names) -> (missing, names) + where + -- in order to not favor terms over types, we iterate through the ShortHashes, + -- for each `remainingQueries`, if we find a matching Referent or Reference, + -- we remove `q` from the accumulated `remainingQueries`, and add the Ref* to + -- the accumulated `names0`. + f acc@(remainingQueries, _) b0 = (acc', null remainingQueries') + where + acc'@(remainingQueries', _) = foldl' findQ acc remainingQueries + findQ :: (Set q, Names0) -> q -> (Set q, Names0) + findQ acc sh = + foldl' (doType sh) (foldl' (doTerm sh) acc + (R.toList $ deepTerms b0)) + (R.toList $ deepTypes b0) + doTerm q acc@(remainingSHs, names0) (r, n) = if termMatches q r n + then (Set.delete q remainingSHs, Names.addTerm n r names0) else acc + doType q acc@(remainingSHs, names0) (r, n) = if typeMatches q r n + then (Set.delete q remainingSHs, Names.addType n r names0) else acc + +deepReferents :: Branch0 m -> Set Referent +deepReferents = R.dom . deepTerms + +deepTypeReferences :: Branch0 m -> Set Reference +deepTypeReferences = R.dom . deepTypes + +terms :: Lens' (Branch0 m) (Star Referent NameSegment) +terms = lens _terms (\Branch0{..} x -> branch0 x _types _children _edits) + +types :: Lens' (Branch0 m) (Star Reference NameSegment) +types = lens _types (\Branch0{..} x -> branch0 _terms x _children _edits) + +children :: Lens' (Branch0 m) (Map NameSegment (Branch m)) +children = lens _children (\Branch0{..} x -> branch0 _terms _types x _edits) + +-- creates a Branch0 from the primary fields and derives the others. +branch0 :: Metadata.Star Referent NameSegment + -> Metadata.Star Reference NameSegment + -> Map NameSegment (Branch m) + -> Map NameSegment (EditHash, m Patch) + -> Branch0 m +branch0 terms types children edits = + Branch0 terms types children edits + deepTerms' deepTypes' + deepTermMetadata' deepTypeMetadata' + deepPaths' deepEdits' + where + nameSegToName = Name.unsafeFromText . NameSegment.toText + deepTerms' = (R.mapRan nameSegToName . Star3.d1) terms + <> foldMap go (Map.toList children) + where + go (nameSegToName -> n, b) = + R.mapRan (Name.joinDot n) (deepTerms $ head b) -- could use mapKeysMonotonic + deepTypes' = (R.mapRan nameSegToName . Star3.d1) types + <> foldMap go (Map.toList children) + where + go (nameSegToName -> n, b) = + R.mapRan (Name.joinDot n) (deepTypes $ head b) -- could use mapKeysMonotonic + deepTermMetadata' = R4.mapD2 nameSegToName (Metadata.starToR4 terms) + <> foldMap go (Map.toList children) + where + go (nameSegToName -> n, b) = + R4.mapD2 (Name.joinDot n) (deepTermMetadata $ head b) + deepTypeMetadata' = R4.mapD2 nameSegToName (Metadata.starToR4 types) + <> foldMap go (Map.toList children) + where + go (nameSegToName -> n, b) = + R4.mapD2 (Name.joinDot n) (deepTypeMetadata $ head b) + deepPaths' = Set.map Path.singleton (Map.keysSet children) + <> foldMap go (Map.toList children) + where go (nameSeg, b) = Set.map (Path.cons nameSeg) (deepPaths $ head b) + deepEdits' = Map.mapKeys nameSegToName (Map.map fst edits) + <> foldMap go (Map.toList children) + where + go (nameSeg, b) = + Map.mapKeys (nameSegToName nameSeg `Name.joinDot`) . deepEdits $ head b + +head :: Branch m -> Branch0 m +head (Branch c) = Causal.head c + +headHash :: Branch m -> Hash +headHash (Branch c) = Causal.currentHash c + +deepEdits' :: Branch0 m -> Map Name (EditHash, m Patch) +deepEdits' b = go id b where + -- can change this to an actual prefix once Name is a [NameSegment] + go :: (Name -> Name) -> Branch0 m -> Map Name (EditHash, m Patch) + go addPrefix Branch0{..} = + Map.mapKeysMonotonic (addPrefix . Name.fromSegment) _edits + <> foldMap f (Map.toList _children) + where + f :: (NameSegment, Branch m) -> Map Name (EditHash, m Patch) + f (c, b) = go (addPrefix . Name.joinDot (Name.fromSegment c)) (head b) + +data MergeMode = RegularMerge | SquashMerge deriving (Eq,Ord,Show) + +merge :: forall m . Monad m => Branch m -> Branch m -> m (Branch m) +merge = merge' RegularMerge + +-- Discards the history of a Branch0's children, recursively +discardHistory0 :: Applicative m => Branch0 m -> Branch0 m +discardHistory0 = over children (fmap tweak) where + tweak b = cons (discardHistory0 (head b)) empty + +merge' :: forall m . Monad m => MergeMode -> Branch m -> Branch m -> m (Branch m) +merge' _ b1 b2 | isEmpty b1 = pure b2 +merge' mode b1 b2 | isEmpty b2 = case mode of + RegularMerge -> pure b1 + SquashMerge -> pure $ cons (discardHistory0 (head b1)) b2 +merge' mode (Branch x) (Branch y) = + Branch <$> case mode of + RegularMerge -> Causal.threeWayMerge combine x y + SquashMerge -> Causal.squashMerge combine x y + where + combine :: Maybe (Branch0 m) -> Branch0 m -> Branch0 m -> m (Branch0 m) + combine Nothing l r = merge0 mode l r + combine (Just ca) l r = do + dl <- diff0 ca l + dr <- diff0 ca r + head0 <- apply ca (dl <> dr) + children <- Map.mergeA + (Map.traverseMaybeMissing $ combineMissing ca) + (Map.traverseMaybeMissing $ combineMissing ca) + (Map.zipWithAMatched $ const (merge' mode)) + (_children l) (_children r) + pure $ branch0 (_terms head0) (_types head0) children (_edits head0) + + combineMissing ca k cur = + case Map.lookup k (_children ca) of + Nothing -> pure $ Just cur + Just old -> do + nw <- merge' mode (cons empty0 old) cur + if isEmpty0 $ head nw + then pure Nothing + else pure $ Just nw + + apply :: Branch0 m -> BranchDiff -> m (Branch0 m) + apply b0 BranchDiff {..} = do + patches <- sequenceA + $ Map.differenceWith patchMerge (pure @m <$> _edits b0) changedPatches + let newPatches = makePatch <$> Map.difference changedPatches (_edits b0) + makePatch Patch.PatchDiff {..} = + let p = Patch.Patch _addedTermEdits _addedTypeEdits + in (H.accumulate' p, pure p) + pure $ branch0 (Star3.difference (_terms b0) removedTerms <> addedTerms) + (Star3.difference (_types b0) removedTypes <> addedTypes) + (_children b0) + (patches <> newPatches) + patchMerge mhp Patch.PatchDiff {..} = Just $ do + (_, mp) <- mhp + p <- mp + let np = Patch.Patch + { _termEdits = R.difference (Patch._termEdits p) _removedTermEdits + <> _addedTermEdits + , _typeEdits = R.difference (Patch._typeEdits p) _removedTypeEdits + <> _addedTypeEdits + } + pure (H.accumulate' np, pure np) + +-- `before b1 b2` is true if `b2` incorporates all of `b1` +before :: Monad m => Branch m -> Branch m -> m Bool +before (Branch x) (Branch y) = Causal.before x y + +merge0 :: forall m. Monad m => MergeMode -> Branch0 m -> Branch0 m -> m (Branch0 m) +merge0 mode b1 b2 = do + c3 <- unionWithM (merge' mode) (_children b1) (_children b2) + e3 <- unionWithM g (_edits b1) (_edits b2) + pure $ branch0 (_terms b1 <> _terms b2) + (_types b1 <> _types b2) + c3 + e3 + where + g :: (EditHash, m Patch) -> (EditHash, m Patch) -> m (EditHash, m Patch) + g (h1, m1) (h2, _) | h1 == h2 = pure (h1, m1) + g (_, m1) (_, m2) = do + e1 <- m1 + e2 <- m2 + let e3 = e1 <> e2 + pure (H.accumulate' e3, pure e3) + +pattern Hash h = Causal.RawHash h + +toList0 :: Branch0 m -> [(Path, Branch0 m)] +toList0 = go Path.empty where + go p b = (p, b) : (Map.toList (_children b) >>= (\(seg, cb) -> + go (Path.snoc p seg) (head cb) )) + +printDebugPaths :: Branch m -> String +printDebugPaths = unlines . map show . Set.toList . debugPaths + +debugPaths :: Branch m -> Set (Path, Hash) +debugPaths = go Path.empty where + go p b = Set.insert (p, headHash b) . Set.unions $ + [ go (Path.snoc p seg) b | (seg, b) <- Map.toList $ _children (head b) ] + +data Target = TargetType | TargetTerm | TargetBranch + deriving (Eq, Ord, Show) + +instance Eq (Branch0 m) where + a == b = view terms a == view terms b + && view types a == view types b + && view children a == view children b + && (fmap fst . view edits) a == (fmap fst . view edits) b + +data ForkFailure = SrcNotFound | DestExists + +-- consider delegating to Names.numHashChars when ready to implement? +-- are those enough? +-- could move this to a read-only field in Branch0 +-- could move a Names0 to a read-only field in Branch0 until it gets too big +numHashChars :: Branch m -> Int +numHashChars _b = 3 + +-- This type is a little ugly, so we wrap it up with a nice type alias for +-- use outside this module. +type Cache m = Cache.Cache m (Causal.RawHash Raw) (Causal m Raw (Branch0 m)) + +boundedCache :: MonadIO m => Word -> m (Cache m) +boundedCache = Cache.semispaceCache + +-- Can use `Cache.nullCache` to disable caching if needed +cachedRead :: forall m . Monad m + => Cache m + -> Causal.Deserialize m Raw Raw + -> (EditHash -> m Patch) + -> Hash + -> m (Branch m) +cachedRead cache deserializeRaw deserializeEdits h = + Branch <$> Causal.cachedRead cache d h + where + fromRaw :: Raw -> m (Branch0 m) + fromRaw Raw {..} = do + children <- traverse go _childrenR + edits <- for _editsR $ \hash -> (hash,) . pure <$> deserializeEdits hash + pure $ branch0 _termsR _typesR children edits + go = cachedRead cache deserializeRaw deserializeEdits + d :: Causal.Deserialize m Raw (Branch0 m) + d h = deserializeRaw h >>= \case + RawOne raw -> RawOne <$> fromRaw raw + RawCons raw h -> flip RawCons h <$> fromRaw raw + RawMerge raw hs -> flip RawMerge hs <$> fromRaw raw + +sync + :: Monad m + => (Hash -> m Bool) + -> Causal.Serialize m Raw Raw + -> (EditHash -> m Patch -> m ()) + -> Branch m + -> m () +sync exists serializeRaw serializeEdits b = do + _written <- State.execStateT (sync' exists serializeRaw serializeEdits b) mempty + -- traceM $ "Branch.sync wrote " <> show (Set.size written) <> " namespace files." + pure () + +-- serialize a `Branch m` indexed by the hash of its corresponding Raw +sync' + :: forall m + . Monad m + => (Hash -> m Bool) + -> Causal.Serialize m Raw Raw + -> (EditHash -> m Patch -> m ()) + -> Branch m + -> StateT (Set Hash) m () +sync' exists serializeRaw serializeEdits b = Causal.sync exists + serialize0 + (view history b) + where + serialize0 :: Causal.Serialize (StateT (Set Hash) m) Raw (Branch0 m) + serialize0 h b0 = case b0 of + RawOne b0 -> do + writeB0 b0 + lift $ serializeRaw h $ RawOne (toRaw b0) + RawCons b0 ht -> do + writeB0 b0 + lift $ serializeRaw h $ RawCons (toRaw b0) ht + RawMerge b0 hs -> do + writeB0 b0 + lift $ serializeRaw h $ RawMerge (toRaw b0) hs + where + writeB0 :: Branch0 m -> StateT (Set Hash) m () + writeB0 b0 = do + for_ (view children b0) $ \c -> do + queued <- State.get + when (Set.notMember (headHash c) queued) $ + sync' exists serializeRaw serializeEdits c + for_ (view edits b0) (lift . uncurry serializeEdits) + + -- this has to serialize the branch0 and its descendants in the tree, + -- and then serialize the rest of the history of the branch as well + +toRaw :: Branch0 m -> Raw +toRaw Branch0 {..} = + Raw _terms _types (headHash <$> _children) (fst <$> _edits) + +toCausalRaw :: Branch m -> Causal.Raw Raw Raw +toCausalRaw = \case + Branch (Causal.One _h e) -> RawOne (toRaw e) + Branch (Causal.Cons _h e (ht, _m)) -> RawCons (toRaw e) ht + Branch (Causal.Merge _h e tls) -> RawMerge (toRaw e) (Map.keysSet tls) + +-- copy a path to another path +fork + :: Applicative m + => Path + -> Path + -> Branch m + -> Either ForkFailure (Branch m) +fork src dest root = case getAt src root of + Nothing -> Left SrcNotFound + Just src' -> case setIfNotExists dest src' root of + Nothing -> Left DestExists + Just root' -> Right root' + +-- Move the node at src to dest. +-- It's okay if `dest` is inside `src`, just create empty levels. +-- Try not to `step` more than once at each node. +move :: Applicative m + => Path + -> Path + -> Branch m + -> Either ForkFailure (Branch m) +move src dest root = case getAt src root of + Nothing -> Left SrcNotFound + Just src' -> + -- make sure dest doesn't already exist + case getAt dest root of + Just _destExists -> Left DestExists + Nothing -> + -- find and update common ancestor of `src` and `dest`: + Right $ modifyAt ancestor go root + where + (ancestor, relSrc, relDest) = Path.relativeToAncestor src dest + go = deleteAt relSrc . setAt relDest src' + +setIfNotExists + :: Applicative m => Path -> Branch m -> Branch m -> Maybe (Branch m) +setIfNotExists dest b root = case getAt dest root of + Just _destExists -> Nothing + Nothing -> Just $ setAt dest b root + +setAt :: Applicative m => Path -> Branch m -> Branch m -> Branch m +setAt path b = modifyAt path (const b) + +deleteAt :: Applicative m => Path -> Branch m -> Branch m +deleteAt path = setAt path empty + +-- returns `Nothing` if no Branch at `path` or if Branch is empty at `path` +getAt :: Path + -> Branch m + -> Maybe (Branch m) +getAt path root = case Path.uncons path of + Nothing -> if isEmpty root then Nothing else Just root + Just (seg, path) -> case Map.lookup seg (_children $ head root) of + Just b -> getAt path b + Nothing -> Nothing + +getAt' :: Path -> Branch m -> Branch m +getAt' p b = fromMaybe empty $ getAt p b + +getAt0 :: Path -> Branch0 m -> Branch0 m +getAt0 p b = case Path.uncons p of + Nothing -> b + Just (seg, path) -> case Map.lookup seg (_children b) of + Just c -> getAt0 path (head c) + Nothing -> empty0 + +empty :: Branch m +empty = Branch $ Causal.one empty0 + +one :: Branch0 m -> Branch m +one = Branch . Causal.one + +empty0 :: Branch0 m +empty0 = + Branch0 mempty mempty mempty mempty mempty mempty mempty mempty mempty mempty + +isEmpty0 :: Branch0 m -> Bool +isEmpty0 = (== empty0) + +isEmpty :: Branch m -> Bool +isEmpty = (== empty) + +step :: Applicative m => (Branch0 m -> Branch0 m) -> Branch m -> Branch m +step f = over history (Causal.stepDistinct f) + +stepM :: (Monad m, Monad n) => (Branch0 m -> n (Branch0 m)) -> Branch m -> n (Branch m) +stepM f = mapMOf history (Causal.stepDistinctM f) + +cons :: Applicative m => Branch0 m -> Branch m -> Branch m +cons = step . const + +isOne :: Branch m -> Bool +isOne (Branch Causal.One{}) = True +isOne _ = False + +uncons :: Applicative m => Branch m -> m (Maybe (Branch0 m, Branch m)) +uncons (Branch b) = go <$> Causal.uncons b where + go = over (_Just . _2) Branch + +-- Modify the branch0 at the head of at `path` with `f`, +-- after creating it if necessary. Preserves history. +stepAt :: forall m. Applicative m + => Path + -> (Branch0 m -> Branch0 m) + -> Branch m -> Branch m +stepAt p f = modifyAt p g where + g :: Branch m -> Branch m + g (Branch b) = Branch . Causal.consDistinct (f (Causal.head b)) $ b + +stepManyAt :: (Monad m, Foldable f) + => f (Path, Branch0 m -> Branch0 m) -> Branch m -> Branch m +stepManyAt actions = step (stepManyAt0 actions) + +-- Modify the branch0 at the head of at `path` with `f`, +-- after creating it if necessary. Preserves history. +stepAtM :: forall n m. (Functor n, Applicative m) + => Path -> (Branch0 m -> n (Branch0 m)) -> Branch m -> n (Branch m) +stepAtM p f = modifyAtM p g where + g :: Branch m -> n (Branch m) + g (Branch b) = do + b0' <- f (Causal.head b) + pure $ Branch . Causal.consDistinct b0' $ b + +stepManyAtM :: (Monad m, Monad n, Foldable f) + => f (Path, Branch0 m -> n (Branch0 m)) -> Branch m -> n (Branch m) +stepManyAtM actions = stepM (stepManyAt0M actions) + +-- starting at the leaves, apply `f` to every level of the branch. +stepEverywhere + :: Applicative m => (Branch0 m -> Branch0 m) -> (Branch0 m -> Branch0 m) +stepEverywhere f Branch0 {..} = f (branch0 _terms _types children _edits) + where children = fmap (step $ stepEverywhere f) _children + +-- Creates a function to fix up the children field._1 +-- If the action emptied a child, then remove the mapping, +-- otherwise update it. +-- Todo: Fix this in hashing & serialization instead of here? +getChildBranch :: NameSegment -> Branch0 m -> Branch m +getChildBranch seg b = fromMaybe empty $ Map.lookup seg (_children b) + +setChildBranch :: NameSegment -> Branch m -> Branch0 m -> Branch0 m +setChildBranch seg b = over children (updateChildren seg b) + +getPatch :: Applicative m => NameSegment -> Branch0 m -> m Patch +getPatch seg b = case Map.lookup seg (_edits b) of + Nothing -> pure Patch.empty + Just (_, p) -> p + +getMaybePatch :: Applicative m => NameSegment -> Branch0 m -> m (Maybe Patch) +getMaybePatch seg b = case Map.lookup seg (_edits b) of + Nothing -> pure Nothing + Just (_, p) -> Just <$> p + +modifyPatches + :: Monad m => NameSegment -> (Patch -> Patch) -> Branch0 m -> m (Branch0 m) +modifyPatches seg f = mapMOf edits update + where + update m = do + p' <- case Map.lookup seg m of + Nothing -> pure $ f Patch.empty + Just (_, p) -> f <$> p + let h = H.accumulate' p' + pure $ Map.insert seg (h, pure p') m + +replacePatch :: Applicative m => NameSegment -> Patch -> Branch0 m -> Branch0 m +replacePatch n p = over edits (Map.insert n (H.accumulate' p, pure p)) + +deletePatch :: NameSegment -> Branch0 m -> Branch0 m +deletePatch n = over edits (Map.delete n) + +updateChildren ::NameSegment + -> Branch m + -> Map NameSegment (Branch m) + -> Map NameSegment (Branch m) +updateChildren seg updatedChild = + if isEmpty updatedChild + then Map.delete seg + else Map.insert seg updatedChild + +-- Modify the Branch at `path` with `f`, after creating it if necessary. +-- Because it's a `Branch`, it overwrites the history at `path`. +modifyAt :: Applicative m + => Path -> (Branch m -> Branch m) -> Branch m -> Branch m +modifyAt path f = runIdentity . modifyAtM path (pure . f) + +-- Modify the Branch at `path` with `f`, after creating it if necessary. +-- Because it's a `Branch`, it overwrites the history at `path`. +modifyAtM + :: forall n m + . Functor n + => Applicative m -- because `Causal.cons` uses `pure` + => Path + -> (Branch m -> n (Branch m)) + -> Branch m + -> n (Branch m) +modifyAtM path f b = case Path.uncons path of + Nothing -> f b + Just (seg, path) -> do -- Functor + let child = getChildBranch seg (head b) + child' <- modifyAtM path f child + -- step the branch by updating its children according to fixup + pure $ step (setChildBranch seg child') b + +-- stepManyAt0 consolidates several changes into a single step +stepManyAt0 :: forall f m . (Monad m, Foldable f) + => f (Path, Branch0 m -> Branch0 m) + -> Branch0 m -> Branch0 m +stepManyAt0 actions = + runIdentity . stepManyAt0M [ (p, pure . f) | (p,f) <- toList actions ] + +stepManyAt0M :: forall m n f . (Monad m, Monad n, Foldable f) + => f (Path, Branch0 m -> n (Branch0 m)) + -> Branch0 m -> n (Branch0 m) +stepManyAt0M actions b = go (toList actions) b where + go :: [(Path, Branch0 m -> n (Branch0 m))] -> Branch0 m -> n (Branch0 m) + go actions b = let + -- combines the functions that apply to this level of the tree + currentAction b = foldM (\b f -> f b) b [ f | (Path.Empty, f) <- actions ] + + -- groups the actions based on the child they apply to + childActions :: Map NameSegment [(Path, Branch0 m -> n (Branch0 m))] + childActions = + List.multimap [ (seg, (rest,f)) | (seg :< rest, f) <- actions ] + + -- alters the children of `b` based on the `childActions` map + stepChildren :: Map NameSegment (Branch m) -> n (Map NameSegment (Branch m)) + stepChildren children0 = foldM g children0 $ Map.toList childActions + where + g children (seg, actions) = do + -- Recursively applies the relevant actions to the child branch + -- The `findWithDefault` is important - it allows the stepManyAt + -- to create new children at paths that don't previously exist. + child <- stepM (go actions) (Map.findWithDefault empty seg children0) + pure $ updateChildren seg child children + in do + c2 <- stepChildren (view children b) + currentAction (set children c2 b) + +instance Hashable (Branch0 m) where + tokens b = + [ H.accumulateToken (_terms b) + , H.accumulateToken (_types b) + , H.accumulateToken (headHash <$> _children b) + ] + +-- getLocalBranch :: Hash -> IO Branch +-- getGithubBranch :: RemotePath -> IO Branch +-- getLocalEdit :: GUID -> IO Patch + +-- todo: consider inlining these into Actions2 +addTermName + :: Referent -> NameSegment -> Metadata.Metadata -> Branch0 m -> Branch0 m +addTermName r new md = + over terms (Metadata.insertWithMetadata (r, md) . Star3.insertD1 (r, new)) + +addTypeName + :: Reference -> NameSegment -> Metadata.Metadata -> Branch0 m -> Branch0 m +addTypeName r new md = + over types (Metadata.insertWithMetadata (r, md) . Star3.insertD1 (r, new)) + +-- addTermNameAt :: Path.Split -> Referent -> Branch0 m -> Branch0 m +-- addTypeNameAt :: Path.Split -> Reference -> Branch0 m -> Branch0 m + +deleteTermName :: Referent -> NameSegment -> Branch0 m -> Branch0 m +deleteTermName r n b | Star3.memberD1 (r,n) (view terms b) + = over terms (Star3.deletePrimaryD1 (r,n)) b +deleteTermName _ _ b = b + +deleteTypeName :: Reference -> NameSegment -> Branch0 m -> Branch0 m +deleteTypeName r n b | Star3.memberD1 (r,n) (view types b) + = over types (Star3.deletePrimaryD1 (r,n)) b +deleteTypeName _ _ b = b + +namesDiff :: Branch m -> Branch m -> Names.Diff +namesDiff b1 b2 = Names.diff0 (toNames0 (head b1)) (toNames0 (head b2)) + +lca :: Monad m => Branch m -> Branch m -> m (Maybe (Branch m)) +lca (Branch a) (Branch b) = fmap Branch <$> Causal.lca a b + +diff0 :: Monad m => Branch0 m -> Branch0 m -> m BranchDiff +diff0 old new = do + newEdits <- sequenceA $ snd <$> _edits new + oldEdits <- sequenceA $ snd <$> _edits old + let diffEdits = Map.merge (Map.mapMissing $ \_ p -> Patch.diff p mempty) + (Map.mapMissing $ \_ p -> Patch.diff mempty p) + (Map.zipWithMatched (const Patch.diff)) + newEdits + oldEdits + pure $ BranchDiff + { addedTerms = Star3.difference (_terms new) (_terms old) + , removedTerms = Star3.difference (_terms old) (_terms new) + , addedTypes = Star3.difference (_types new) (_types old) + , removedTypes = Star3.difference (_types old) (_types new) + , changedPatches = diffEdits + } + +transform :: Functor m => (forall a . m a -> n a) -> Branch m -> Branch n +transform f b = case _history b of + causal -> Branch . Causal.transform f $ transformB0s f causal + where + transformB0 :: Functor m => (forall a . m a -> n a) -> Branch0 m -> Branch0 n + transformB0 f b = + b { _children = transform f <$> _children b + , _edits = second f <$> _edits b + } + + transformB0s :: Functor m => (forall a . m a -> n a) + -> Causal m Raw (Branch0 m) + -> Causal m Raw (Branch0 n) + transformB0s f = Causal.unsafeMapHashPreserving (transformB0 f) + +data BranchAttentions = BranchAttentions + { -- Patches that were edited on the right but entirely removed on the left. + removedPatchEdited :: [Name] + -- Patches that were edited on the left but entirely removed on the right. + , editedPatchRemoved :: [Name] + } + +instance Semigroup BranchAttentions where + BranchAttentions edited1 removed1 <> BranchAttentions edited2 removed2 + = BranchAttentions (edited1 <> edited2) (removed1 <> removed2) + +instance Monoid BranchAttentions where + mempty = BranchAttentions [] [] + mappend = (<>) + +data RefCollisions = + RefCollisions { termCollisions :: Relation Name Name + , typeCollisions :: Relation Name Name + } deriving (Eq, Show) + +instance Semigroup RefCollisions where + (<>) = mappend +instance Monoid RefCollisions where + mempty = RefCollisions mempty mempty + mappend r1 r2 = RefCollisions (termCollisions r1 <> termCollisions r2) + (typeCollisions r1 <> typeCollisions r2) diff --git a/parser-typechecker/src/Unison/Codebase/Branch/Dependencies.hs b/parser-typechecker/src/Unison/Codebase/Branch/Dependencies.hs new file mode 100644 index 0000000000..d54e2ace49 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Branch/Dependencies.hs @@ -0,0 +1,89 @@ +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE RecordWildCards #-} + +module Unison.Codebase.Branch.Dependencies where + +import Data.Set (Set) +import Data.Foldable (toList) +import qualified Data.Set as Set +import qualified Data.Map as Map +import Unison.Codebase.Branch (Branch(Branch), Branch0, EditHash) +import qualified Unison.Codebase.Causal as Causal +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Reference as Reference +import qualified Unison.Referent as Referent +import GHC.Generics (Generic) +import Data.Monoid.Generic +import Data.Map (Map) +import Unison.NameSegment (NameSegment) +import Unison.Referent (Referent) +import Unison.Codebase.Patch (Patch) +import qualified Unison.Util.Star3 as Star3 +import qualified Unison.Util.Relation as R +import Unison.Reference (Reference(DerivedId)) + +type Branches m = [(Branch.Hash, Maybe (m (Branch m)))] + +data Dependencies = Dependencies + { patches :: Set EditHash + , terms :: Set Reference.Id + , decls :: Set Reference.Id + } + deriving Show + deriving Generic + deriving Semigroup via GenericSemigroup Dependencies + deriving Monoid via GenericMonoid Dependencies + +data Dependencies' = Dependencies' + { patches' :: [EditHash] + , terms' :: [Reference.Id] + , decls' :: [Reference.Id] + } deriving Show + +to' :: Dependencies -> Dependencies' +to' Dependencies{..} = Dependencies' (toList patches) (toList terms) (toList decls) + +fromBranch :: Applicative m => Branch m -> (Branches m, Dependencies) +fromBranch (Branch c) = case c of + Causal.One _hh e -> fromBranch0 e + Causal.Cons _hh e (h, m) -> fromBranch0 e <> fromTails (Map.singleton h m) + Causal.Merge _hh e tails -> fromBranch0 e <> fromTails tails + where + fromTails m = ([(h, Just (Branch <$> mc)) | (h, mc) <- Map.toList m], mempty) + +fromRawCausal :: Causal.Raw Branch.Raw (Branches m, Dependencies) + -> (Branches m, Dependencies) +fromRawCausal = \case + Causal.RawOne e -> e + Causal.RawCons e h -> e <> fromTails [h] + Causal.RawMerge e hs -> e <> fromTails (toList hs) + where + fromTails ts = (fmap (,Nothing) ts, mempty) + +fromBranch0 :: Applicative m => Branch0 m -> (Branches m, Dependencies) +fromBranch0 b = + ( fromChildren (Branch._children b) + , fromTermsStar (Branch._terms b) + <> fromTypesStar (Branch._types b) + <> fromEdits (Branch._edits b) ) + where + fromChildren :: Applicative m => Map NameSegment (Branch m) -> Branches m + fromChildren m = [ (Branch.headHash b, Just (pure b)) | b <- toList m ] + references :: Branch.Star r NameSegment -> [r] + references = toList . R.dom . Star3.d1 + mdValues :: Branch.Star r NameSegment -> [Reference] + mdValues = fmap snd . toList . R.ran . Star3.d3 + fromTermsStar :: Branch.Star Referent NameSegment -> Dependencies + fromTermsStar s = Dependencies mempty terms decls where + terms = Set.fromList $ + [ i | Referent.Ref (DerivedId i) <- references s] ++ + [ i | DerivedId i <- mdValues s] + decls = Set.fromList $ + [ i | Referent.Con (DerivedId i) _ _ <- references s ] + fromTypesStar :: Branch.Star Reference NameSegment -> Dependencies + fromTypesStar s = Dependencies mempty terms decls where + terms = Set.fromList [ i | DerivedId i <- mdValues s ] + decls = Set.fromList [ i | DerivedId i <- references s ] + fromEdits :: Map NameSegment (EditHash, m Patch) -> Dependencies + fromEdits m = Dependencies (Set.fromList . fmap fst $ toList m) mempty mempty diff --git a/parser-typechecker/src/Unison/Codebase/BranchDiff.hs b/parser-typechecker/src/Unison/Codebase/BranchDiff.hs new file mode 100644 index 0000000000..980ee3da2c --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/BranchDiff.hs @@ -0,0 +1,166 @@ +module Unison.Codebase.BranchDiff where + +import Unison.Prelude +import qualified Data.Set as Set +import qualified Data.Map as Map +import Unison.Codebase.Branch (Branch0(..)) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Metadata as Metadata +import qualified Unison.Codebase.Patch as Patch +import Unison.Codebase.Patch (Patch, PatchDiff) +import Unison.Name (Name) +import Unison.Reference (Reference) +import Unison.Referent (Referent) +import qualified Unison.Util.Relation as R +import qualified Unison.Util.Relation3 as R3 +import qualified Unison.Util.Relation4 as R4 +import Unison.Util.Relation (Relation) +import Unison.Util.Relation3 (Relation3) +import Unison.Runtime.IOSource (isPropagatedValue) + +data DiffType a = Create a | Delete a | Modify a deriving Show + +-- todo: maybe simplify this file using Relation3? +data NamespaceSlice r = NamespaceSlice { + names :: Relation r Name, + metadata :: Relation3 r Name Metadata.Value +} deriving Show + +data DiffSlice r = DiffSlice { +-- tpatchUpdates :: Relation r r, -- old new + tallnamespaceUpdates :: Map Name (Set r, Set r), + talladds :: Relation r Name, + tallremoves :: Relation r Name, + trenames :: Map r (Set Name, Set Name), -- ref (old, new) + taddedMetadata :: Relation3 r Name Metadata.Value, + tremovedMetadata :: Relation3 r Name Metadata.Value +} deriving Show + +data BranchDiff = BranchDiff + { termsDiff :: DiffSlice Referent + , typesDiff :: DiffSlice Reference + , patchesDiff :: Map Name (DiffType PatchDiff) + } deriving Show + +diff0 :: forall m. Monad m => Branch0 m -> Branch0 m -> m BranchDiff +diff0 old new = BranchDiff terms types <$> patchDiff old new where + (terms, types) = + computeSlices + (deepr4ToSlice (Branch.deepTerms old) (Branch.deepTermMetadata old)) + (deepr4ToSlice (Branch.deepTerms new) (Branch.deepTermMetadata new)) + (deepr4ToSlice (Branch.deepTypes old) (Branch.deepTypeMetadata old)) + (deepr4ToSlice (Branch.deepTypes new) (Branch.deepTypeMetadata new)) + +patchDiff :: forall m. Monad m => Branch0 m -> Branch0 m -> m (Map Name (DiffType PatchDiff)) +patchDiff old new = do + let oldDeepEdits, newDeepEdits :: Map Name (Branch.EditHash, m Patch) + oldDeepEdits = Branch.deepEdits' old + newDeepEdits = Branch.deepEdits' new + added <- do + addedPatches :: Map Name Patch <- + traverse snd $ Map.difference newDeepEdits oldDeepEdits + pure $ fmap (\p -> Create (Patch.diff p mempty)) addedPatches + removed <- do + removedPatches :: Map Name Patch <- + traverse snd $ Map.difference oldDeepEdits newDeepEdits + pure $ fmap (\p -> Delete (Patch.diff mempty p)) removedPatches + + let f acc k = case (Map.lookup k oldDeepEdits, Map.lookup k newDeepEdits) of + (Just (h1,p1), Just (h2,p2)) -> + if h1 == h2 then pure acc + else Map.singleton k . Modify <$> (Patch.diff <$> p2 <*> p1) + _ -> error "we've done something very wrong" + modified <- foldM f mempty (Set.intersection (Map.keysSet oldDeepEdits) (Map.keysSet newDeepEdits)) + pure $ added <> removed <> modified + +deepr4ToSlice :: Ord r + => R.Relation r Name + -> Metadata.R4 r Name + -> NamespaceSlice r +deepr4ToSlice deepNames deepMetadata = + NamespaceSlice deepNames (unpackMetadata deepMetadata) + where + unpackMetadata = R3.fromList . fmap (\(r,n,_t,v) -> (r,n,v)) . R4.toList + +computeSlices :: NamespaceSlice Referent + -> NamespaceSlice Referent + -> NamespaceSlice Reference + -> NamespaceSlice Reference + -> (DiffSlice Referent, DiffSlice Reference) +computeSlices oldTerms newTerms oldTypes newTypes = (termsOut, typesOut) where + termsOut = + let nc = allNames oldTerms newTerms + nu = allNamespaceUpdates oldTerms newTerms in + DiffSlice + nu + (allAdds nc nu) + (allRemoves nc nu) + (remainingNameChanges nc) + (addedMetadata oldTerms newTerms) + (removedMetadata oldTerms newTerms) + typesOut = + let nc = allNames oldTypes newTypes + nu = allNamespaceUpdates oldTypes newTypes in + DiffSlice + nu + (allAdds nc nu) + (allRemoves nc nu) + (remainingNameChanges nc) + (addedMetadata oldTypes newTypes) + (removedMetadata oldTypes newTypes) + + allNames :: Ord r => NamespaceSlice r -> NamespaceSlice r -> Map r (Set Name, Set Name) + allNames old new = R.outerJoinDomMultimaps (names old) (names new) + + allAdds, allRemoves :: forall r. Ord r + => Map r (Set Name, Set Name) + -> Map Name (Set r, Set r) + -> Relation r Name + allAdds nc nu = R.fromMultimap . fmap snd . Map.filterWithKey f $ nc where + f r (oldNames, newNames) = null oldNames && any (notInUpdates r) newNames + -- if an add matches RHS of an update, we exclude it from "Adds" + notInUpdates r name = case Map.lookup name nu of + Nothing -> True + Just (_, rs_new) -> Set.notMember r rs_new + + allRemoves nc nu = R.fromMultimap . fmap fst . Map.filterWithKey f $ nc where + f r (oldNames, newNames) = null newNames && any (notInUpdates r) oldNames + -- if a remove matches LHS of an update, we exclude it from "Removes" + notInUpdates r name = case Map.lookup name nu of + Nothing -> True + Just (rs_old, _) -> Set.notMember r rs_old + + -- renames and stuff, name changes without a reference change + remainingNameChanges :: forall r. Ord r + => Map r (Set Name, Set Name) -> Map r (Set Name, Set Name) + remainingNameChanges = + Map.filter (\(old, new) -> not (null old) && not (null new) && old /= new) + + allNamespaceUpdates :: Ord r => NamespaceSlice r -> NamespaceSlice r -> Map Name (Set r, Set r) + allNamespaceUpdates old new = + Map.filter f $ R.innerJoinRanMultimaps (names old) (names new) + where f (old, new) = old /= new + + addedMetadata :: Ord r => NamespaceSlice r -> NamespaceSlice r -> Relation3 r Name Metadata.Value + addedMetadata old new = metadata new `R3.difference` metadata old + + removedMetadata :: Ord r => NamespaceSlice r -> NamespaceSlice r -> Relation3 r Name Metadata.Value + removedMetadata old new = metadata old `R3.difference` metadata new + +-- the namespace updates that aren't propagated +namespaceUpdates :: Ord r => DiffSlice r -> Map Name (Set r, Set r) +namespaceUpdates s = Map.mapMaybeWithKey f (tallnamespaceUpdates s) + where + f name (olds, news) = let + news' = Set.difference news (Map.findWithDefault mempty name propagated) + in if null news' then Nothing else Just (olds, news') + propagated = propagatedUpdates s + +propagatedUpdates :: Ord r => DiffSlice r -> Map Name (Set r) +propagatedUpdates s = Map.fromList + [ (name, news) + | (name, (_olds0, news0)) <- Map.toList $ tallnamespaceUpdates s + , let news = Set.filter propagated news0 + propagated rnew = R3.member rnew name isPropagatedValue (taddedMetadata s) + , not (null news) + ] diff --git a/parser-typechecker/src/Unison/Codebase/BranchUtil.hs b/parser-typechecker/src/Unison/Codebase/BranchUtil.hs new file mode 100644 index 0000000000..749f2e75c0 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/BranchUtil.hs @@ -0,0 +1,135 @@ +module Unison.Codebase.BranchUtil where + +import Unison.Prelude + +import qualified Data.Set as Set +import qualified Data.Map as Map +import Unison.Codebase.Path (Path) +import qualified Unison.Codebase.Path as Path +import qualified Unison.Codebase.Branch as Branch +import Unison.Codebase.Branch (Branch, Branch0) +import qualified Unison.Names2 as Names +import Unison.Names2 (Names0) +import qualified Unison.Referent as Referent +import qualified Unison.Reference as Reference +import Unison.Referent (Referent) +import Unison.Reference (Reference) +import Unison.HashQualified' (HashQualified'(NameOnly, HashQualified)) +import qualified Unison.HashQualified' as HQ' +import qualified Unison.ShortHash as SH +import qualified Unison.Util.Relation as R +import qualified Unison.Util.Relation4 as R4 +import qualified Unison.Util.Star3 as Star3 +import Unison.Codebase.Metadata (Metadata) +import qualified Unison.Codebase.Metadata as Metadata +import qualified Unison.Util.List as List +import Unison.Codebase.Patch (Patch) +import Unison.NameSegment (NameSegment) +import Control.Lens (view) + +fromNames0 :: Monad m => Names0 -> Branch m +fromNames0 names0 = Branch.one $ addFromNames0 names0 Branch.empty0 + +-- can produce a pure value because there's no history to traverse +hashesFromNames0 :: Monad m => Names0 -> Map Branch.Hash (Branch m) +hashesFromNames0 = deepHashes . fromNames0 where + deepHashes :: Branch m -> Map Branch.Hash (Branch m) + deepHashes b = Map.singleton (Branch.headHash b) b + <> (foldMap deepHashes . view Branch.children . Branch.head) b + +addFromNames0 :: Monad m => Names0 -> Branch0 m -> Branch0 m +addFromNames0 names0 = Branch.stepManyAt0 (typeActions <> termActions) + where + typeActions = map doType . R.toList $ Names.types names0 + termActions = map doTerm . R.toList $ Names.terms names0 +-- doTerm :: (Name, Referent) -> (Path, Branch0 m -> Branch0 m) + doTerm (n, r) = case Path.splitFromName n of + Nothing -> errorEmptyName + Just split -> makeAddTermName split r mempty -- no metadata +-- doType :: (Name, Reference) -> (Path, Branch0 m -> Branch0 m) + doType (n, r) = case Path.splitFromName n of + Nothing -> errorEmptyName + Just split -> makeAddTypeName split r mempty -- no metadata + errorEmptyName = error "encountered an empty name" + +getTerm :: Path.HQSplit -> Branch0 m -> Set Referent +getTerm (p, hq) b = case hq of + NameOnly n -> Star3.lookupD1 n terms + HashQualified n sh -> filter sh $ Star3.lookupD1 n terms + where + filter sh = Set.filter (SH.isPrefixOf sh . Referent.toShortHash) + terms = Branch._terms (Branch.getAt0 p b) + +getTermMetadataHQNamed + :: (Path.Path, HQ'.HQSegment) -> Branch0 m -> Metadata.R4 Referent NameSegment +getTermMetadataHQNamed (path, hqseg) b = + R4.filter (\(r,n,_t,_v) -> HQ'.matchesNamedReferent n r hqseg) terms + where terms = Metadata.starToR4 . Branch._terms $ Branch.getAt0 path b + +getTypeMetadataHQNamed + :: (Path.Path, HQ'.HQSegment) + -> Branch0 m + -> Metadata.R4 Reference NameSegment +getTypeMetadataHQNamed (path, hqseg) b = + R4.filter (\(r,n,_t,_v) -> HQ'.matchesNamedReference n r hqseg) types + where types = Metadata.starToR4 . Branch._types $ Branch.getAt0 path b + +-- todo: audit usages and maybe eliminate! +-- Only returns metadata for the term at the exact level given +getTermMetadataAt :: (Path.Path, a) -> Referent -> Branch0 m -> Metadata +getTermMetadataAt (path,_) r b = Set.fromList <$> List.multimap mdList + where + mdList :: [(Metadata.Type, Metadata.Value)] + mdList = Set.toList . R.ran . Star3.d3 . Star3.selectFact (Set.singleton r) $ terms + terms = Branch._terms $ Branch.getAt0 path b + +getType :: Path.HQSplit -> Branch0 m -> Set Reference +getType (p, hq) b = case hq of + NameOnly n -> Star3.lookupD1 n types + HashQualified n sh -> filter sh $ Star3.lookupD1 n types + where + filter sh = Set.filter (SH.isPrefixOf sh . Reference.toShortHash) + types = Branch._types (Branch.getAt0 p b) + +getTypeByShortHash :: SH.ShortHash -> Branch0 m -> Set Reference +getTypeByShortHash sh b = filter sh $ Branch.deepTypeReferences b + where + filter sh = Set.filter (SH.isPrefixOf sh . Reference.toShortHash) + +getTypeMetadataAt :: (Path.Path, a) -> Reference -> Branch0 m -> Metadata +getTypeMetadataAt (path,_) r b = Set.fromList <$> List.multimap mdList + where + mdList :: [(Metadata.Type, Metadata.Value)] + mdList = Set.toList . R.ran . Star3.d3 . Star3.selectFact (Set.singleton r) $ types + types = Branch._types $ Branch.getAt0 path b + +getBranch :: Path.Split -> Branch0 m -> Maybe (Branch m) +getBranch (p, seg) b = case Path.toList p of + [] -> Map.lookup seg (Branch._children b) + h : p -> + (Branch.head <$> Map.lookup h (Branch._children b)) >>= + getBranch (Path.fromList p, seg) + + +makeAddTermName :: Path.Split -> Referent -> Metadata -> (Path, Branch0 m -> Branch0 m) +makeAddTermName (p, name) r md = (p, Branch.addTermName r name md) + +makeDeleteTermName :: Path.Split -> Referent -> (Path, Branch0 m -> Branch0 m) +makeDeleteTermName (p, name) r = (p, Branch.deleteTermName r name) + +makeReplacePatch :: Applicative m => Path.Split -> Patch -> (Path, Branch0 m -> Branch0 m) +makeReplacePatch (p, name) patch = (p, Branch.replacePatch name patch) + +makeDeletePatch :: Path.Split -> (Path, Branch0 m -> Branch0 m) +makeDeletePatch (p, name) = (p, Branch.deletePatch name) + +makeAddTypeName :: Path.Split -> Reference -> Metadata -> (Path, Branch0 m -> Branch0 m) +makeAddTypeName (p, name) r md = (p, Branch.addTypeName r name md) + +makeDeleteTypeName :: Path.Split -> Reference -> (Path, Branch0 m -> Branch0 m) +makeDeleteTypeName (p, name) r = (p, Branch.deleteTypeName r name) + +-- to delete, just set with Branch.empty +makeSetBranch :: + Path.Split -> Branch m -> (Path, Branch0 m -> Branch0 m) +makeSetBranch (p, name) b = (p, Branch.setChildBranch name b) diff --git a/parser-typechecker/src/Unison/Codebase/Causal.hs b/parser-typechecker/src/Unison/Codebase/Causal.hs new file mode 100644 index 0000000000..46164d168f --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Causal.hs @@ -0,0 +1,373 @@ +{-# LANGUAGE RankNTypes #-} +module Unison.Codebase.Causal where + +import Unison.Prelude + +import Prelude hiding ( head + , tail + , read + ) +import qualified Control.Monad.State as State +import Control.Monad.State ( StateT ) +import Data.Sequence ( ViewL(..) ) +import qualified Data.Sequence as Seq +import Unison.Hash ( Hash ) +import qualified Unison.Hashable as Hashable +import Unison.Hashable ( Hashable ) +import qualified Unison.Util.Cache as Cache +import qualified Data.Map as Map +import qualified Data.Set as Set + +{- +`Causal a` has 5 operations, specified algebraically here: + +* `before : Causal m a -> Causal m a -> m Bool` defines a partial order on + `Causal`. +* `head : Causal m a -> a`, which represents the "latest" `a` value in a causal + chain. +* `one : a -> Causal m a`, satisfying `head (one hd) == hd` +* `cons : a -> Causal a -> Causal a`, satisfying `head (cons hd tl) == hd` and + also `before tl (cons hd tl)`. +* `merge : CommutativeSemigroup a => Causal a -> Causal a -> Causal a`, which is + commutative (but not associative) and satisfies: + * `before c1 (merge c1 c2)` + * `before c2 (merge c1 c2)` +* `sequence : Causal a -> Causal a -> Causal a`, which is defined as + `sequence c1 c2 = cons (head c2) (merge c1 c2)`. + * `before c1 (sequence c1 c2)` + * `head (sequence c1 c2) == head c2` +-} + +newtype RawHash a = RawHash { unRawHash :: Hash } + deriving (Eq, Ord) + +instance Show (RawHash a) where + show = show . unRawHash + +instance Show e => Show (Causal m h e) where + show = \case + One h e -> "One " ++ (take 3 . show) h ++ " " ++ show e + Cons h e t -> "Cons " ++ (take 3 . show) h ++ " " ++ show e ++ " " ++ (take 3 . show) (fst t) + Merge h e ts -> "Merge " ++ (take 3 . show) h ++ " " ++ show e ++ " " ++ (show . fmap (take 3 . show) . toList) (Map.keysSet ts) + +-- h is the type of the pure data structure that will be hashed and used as +-- an index; e.g. h = Branch00, e = Branch0 m +data Causal m h e + = One { currentHash :: RawHash h + , head :: e + } + | Cons { currentHash :: RawHash h + , head :: e + , tail :: (RawHash h, m (Causal m h e)) + } + -- The merge operation `<>` flattens and normalizes for order + | Merge { currentHash :: RawHash h + , head :: e + , tails :: Map (RawHash h) (m (Causal m h e)) + } + +-- Convert the Causal to an adjacency matrix for debugging purposes. +toGraph + :: Monad m + => Set (RawHash h) + -> Causal m h e + -> m (Seq (RawHash h, RawHash h)) +toGraph seen c = case c of + One _ _ -> pure Seq.empty + Cons h1 _ (h2, m) -> if Set.notMember h1 seen + then do + tail <- m + g <- toGraph (Set.insert h1 seen) tail + pure $ (h1, h2) Seq.<| g + else pure Seq.empty + Merge h _ ts -> if Set.notMember h seen + then do + tails <- sequence $ Map.elems ts + gs <- Seq.fromList <$> traverse (toGraph (Set.insert h seen)) tails + pure $ Seq.fromList ((h, ) <$> Set.toList (Map.keysSet ts)) <> join gs + else pure Seq.empty + +-- A serializer `Causal m h e`. Nonrecursive -- only responsible for +-- writing a single node of the causal structure. +data Raw h e + = RawOne e + | RawCons e (RawHash h) + | RawMerge e (Set (RawHash h)) + +rawHead :: Raw h e -> e +rawHead (RawOne e ) = e +rawHead (RawCons e _) = e +rawHead (RawMerge e _) = e + +-- Don't need to deserialize the `e` to calculate `before`. +data Tails h + = TailsOne + | TailsCons (RawHash h) + | TailsMerge (Set (RawHash h)) + +type Deserialize m h e = RawHash h -> m (Raw h e) + +cachedRead :: Monad m + => Cache.Cache m (RawHash h) (Causal m h e) + -> Deserialize m h e + -> RawHash h -> m (Causal m h e) +cachedRead cache deserializeRaw h = Cache.lookup cache h >>= \case + Nothing -> do + raw <- deserializeRaw h + causal <- pure $ case raw of + RawOne e -> One h e + RawCons e tailHash -> Cons h e (tailHash, read tailHash) + RawMerge e tailHashes -> Merge h e $ + Map.fromList [(h, read h) | h <- toList tailHashes ] + Cache.insert cache h causal + pure causal + Just causal -> pure causal + where + read = cachedRead cache deserializeRaw + +type Serialize m h e = RawHash h -> Raw h e -> m () + +-- Sync a causal to some persistent store, stopping when hitting a Hash which +-- has already been written, according to the `exists` function provided. +sync + :: forall m h e + . Monad m + => (RawHash h -> m Bool) + -> Serialize (StateT (Set (RawHash h)) m) h e + -> Causal m h e + -> StateT (Set (RawHash h)) m () +sync exists serialize c = do + queued <- State.get + itExists <- if Set.member (currentHash c) queued then pure True + else lift . exists $ currentHash c + unless itExists $ go c + where + go :: Causal m h e -> StateT (Set (RawHash h)) m () + go c = do + queued <- State.get + when (Set.notMember (currentHash c) queued) $ do + State.modify (Set.insert $ currentHash c) + case c of + One currentHash head -> serialize currentHash $ RawOne head + Cons currentHash head (tailHash, tailm) -> do + -- write out the tail first, so what's on disk is always valid + b <- lift $ exists tailHash + unless b $ go =<< lift tailm + serialize currentHash (RawCons head tailHash) + Merge currentHash head tails -> do + for_ (Map.toList tails) $ \(hash, cm) -> do + b <- lift $ exists hash + unless b $ go =<< lift cm + serialize currentHash (RawMerge head (Map.keysSet tails)) + +instance Eq (Causal m h a) where + a == b = currentHash a == currentHash b + +instance Ord (Causal m h a) where + a <= b = currentHash a <= currentHash b + +instance Hashable (RawHash h) where + tokens (RawHash h) = Hashable.tokens h + +-- Find the lowest common ancestor of two causals. +lca :: Monad m => Causal m h e -> Causal m h e -> m (Maybe (Causal m h e)) +lca a b = + lca' (Seq.singleton $ pure a) (Seq.singleton $ pure b) + +-- `lca' xs ys` finds the lowest common ancestor of any element of `xs` and any +-- element of `ys`. +-- This is a breadth-first search used in the implementation of `lca a b`. +lca' + :: Monad m + => Seq (m (Causal m h e)) + -> Seq (m (Causal m h e)) + -> m (Maybe (Causal m h e)) +lca' = go Set.empty Set.empty where + go seenLeft seenRight remainingLeft remainingRight = + case Seq.viewl remainingLeft of + Seq.EmptyL -> search seenLeft remainingRight + a :< as -> do + left <- a + if Set.member (currentHash left) seenRight + then pure $ Just left + -- Note: swapping position of left and right when we recurse so that + -- we search each side equally. This avoids having to case on both + -- arguments, and the order shouldn't really matter. + else go seenRight + (Set.insert (currentHash left) seenLeft) + remainingRight + (as <> children left) + search seen remaining = case Seq.viewl remaining of + Seq.EmptyL -> pure Nothing + a :< as -> do + current <- a + if Set.member (currentHash current) seen + then pure $ Just current + else search seen (as <> children current) + +children :: Causal m h e -> Seq (m (Causal m h e)) +children (One _ _ ) = Seq.empty +children (Cons _ _ (_, t)) = Seq.singleton t +children (Merge _ _ ts ) = Seq.fromList $ Map.elems ts + +-- A `squashMerge combine c1 c2` gives the same resulting `e` +-- as a `threeWayMerge`, but doesn't introduce a merge node for the +-- result. Instead, the resulting causal is a simple `Cons` onto `c2` +-- (or is equal to `c2` if `c1` changes nothing). +squashMerge + :: forall m h e + . (Monad m, Hashable e, Eq e) + => (Maybe e -> e -> e -> m e) + -> Causal m h e + -> Causal m h e + -> m (Causal m h e) +squashMerge combine c1 c2 = do + theLCA <- lca c1 c2 + let done newHead = consDistinct newHead c2 + case theLCA of + Nothing -> done <$> combine Nothing (head c1) (head c2) + Just lca + | lca == c1 -> pure c2 + + -- Pretty subtle: if we were to add this short circuit, then + -- the history of c1's children would still make it into the result + -- Calling `combine` will recursively call into `squashMerge` + -- for the children, discarding their history before calling `done` + -- on the parent. + -- | lca == c2 -> pure $ done c1 + + | otherwise -> done <$> combine (Just $ head lca) (head c1) (head c2) + +threeWayMerge + :: forall m h e + . (Monad m, Hashable e) + => (Maybe e -> e -> e -> m e) + -> Causal m h e + -> Causal m h e + -> m (Causal m h e) +threeWayMerge combine c1 c2 = do + theLCA <- lca c1 c2 + case theLCA of + Nothing -> done <$> combine Nothing (head c1) (head c2) + Just lca + | lca == c1 -> pure c2 + | lca == c2 -> pure c1 + | otherwise -> done <$> combine (Just $ head lca) (head c1) (head c2) + where + children = + Map.fromList [(currentHash c1, pure c1), (currentHash c2, pure c2)] + done :: e -> Causal m h e + done newHead = + Merge (RawHash (hash (newHead, Map.keys children))) newHead children + +before :: Monad m => Causal m h e -> Causal m h e -> m Bool +before a b = (== Just a) <$> lca a b + +hash :: Hashable e => e -> Hash +hash = Hashable.accumulate' + +step :: (Applicative m, Hashable e) => (e -> e) -> Causal m h e -> Causal m h e +step f c = f (head c) `cons` c + +stepDistinct :: (Applicative m, Eq e, Hashable e) => (e -> e) -> Causal m h e -> Causal m h e +stepDistinct f c = f (head c) `consDistinct` c + +stepIf + :: (Applicative m, Hashable e) + => (e -> Bool) + -> (e -> e) + -> Causal m h e + -> Causal m h e +stepIf cond f c = if cond (head c) then step f c else c + +stepM + :: (Applicative m, Hashable e) => (e -> m e) -> Causal m h e -> m (Causal m h e) +stepM f c = (`cons` c) <$> f (head c) + +stepDistinctM + :: (Applicative m, Functor n, Eq e, Hashable e) + => (e -> n e) -> Causal m h e -> n (Causal m h e) +stepDistinctM f c = (`consDistinct` c) <$> f (head c) + +one :: Hashable e => e -> Causal m h e +one e = One (RawHash $ hash e) e + +cons :: (Applicative m, Hashable e) => e -> Causal m h e -> Causal m h e +cons e tl = + Cons (RawHash $ hash [hash e, unRawHash . currentHash $ tl]) e (currentHash tl, pure tl) + +consDistinct :: (Applicative m, Eq e, Hashable e) => e -> Causal m h e -> Causal m h e +consDistinct e tl = + if head tl == e then tl + else cons e tl + +uncons :: Applicative m => Causal m h e -> m (Maybe (e, Causal m h e)) +uncons c = case c of + Cons _ e (_,tl) -> fmap (e,) . Just <$> tl + _ -> pure Nothing + +transform :: Functor m => (forall a . m a -> n a) -> Causal m h e -> Causal n h e +transform nt c = case c of + One h e -> One h e + Cons h e (ht, tl) -> Cons h e (ht, nt (transform nt <$> tl)) + Merge h e tls -> Merge h e $ Map.map (\mc -> nt (transform nt <$> mc)) tls + +unsafeMapHashPreserving :: Functor m => (e -> e2) -> Causal m h e -> Causal m h e2 +unsafeMapHashPreserving f c = case c of + One h e -> One h (f e) + Cons h e (ht, tl) -> Cons h (f e) (ht, unsafeMapHashPreserving f <$> tl) + Merge h e tls -> Merge h (f e) $ Map.map (fmap $ unsafeMapHashPreserving f) tls + +data FoldHistoryResult a = Satisfied a | Unsatisfied a deriving (Eq,Ord,Show) + +-- foldHistoryUntil some condition on the accumulator is met, +-- attempting to work backwards fairly through merge nodes +-- (rather than following one back all the way to its root before working +-- through others). Returns Unsatisfied if the condition was never satisfied, +-- otherwise Satisfied. +-- +-- NOTE by RÓB: this short-circuits immediately and only looks at the first +-- entry in the history, since this operation is far too slow to be practical. +foldHistoryUntil + :: forall m h e a + . (Monad m) + => (a -> e -> (a, Bool)) + -> a + -> Causal m h e + -> m (FoldHistoryResult a) +foldHistoryUntil f a c = step a mempty (pure c) where + step :: a -> Set (RawHash h) -> Seq (Causal m h e) -> m (FoldHistoryResult a) + step a _seen Seq.Empty = pure (Unsatisfied a) + step a seen (c Seq.:<| rest) | currentHash c `Set.member` seen = + step a seen rest + step a seen (c Seq.:<| rest) = case f a (head c) of + (a, True ) -> pure (Satisfied a) + (a, False) -> do + tails <- case c of + One{} -> pure mempty + Cons{} -> + let (_, t) = tail c + in --if h `Set.member` seen + if not (Set.null seen) then pure mempty else Seq.singleton <$> t + Merge{} -> + fmap Seq.fromList + . traverse snd + . filter (\(_, _) -> not (Set.null seen)) + . Map.toList + $ tails c + step a (Set.insert (currentHash c) seen) (rest <> tails) + +hashToRaw :: + forall m h e. Monad m => Causal m h e -> m (Map (RawHash h) [RawHash h]) +hashToRaw c = go mempty [c] where + go :: Map (RawHash h) [RawHash h] -> [Causal m h e] + -> m (Map (RawHash h) [RawHash h]) + go output [] = pure output + go output (c : queue) = case c of + One h _ -> go (Map.insert h [] output) queue + Cons h _ (htail, mctail) -> do + ctail <- mctail + go (Map.insert h [htail] output) (ctail : queue) + Merge h _ mtails -> do + tails <- sequence mtails + go (Map.insert h (Map.keys tails) output) (toList tails ++ queue) diff --git a/parser-typechecker/src/Unison/Codebase/Classes.hs b/parser-typechecker/src/Unison/Codebase/Classes.hs new file mode 100644 index 0000000000..afc6108da0 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Classes.hs @@ -0,0 +1,40 @@ + +module Unison.Codebase.Classes where +-- ( GetDecls(..) +-- , PutDecls(..) +-- , GetBranch(..) +-- , PutBranch(..) +-- , GetDependents(..) +-- ) where +-- +--import Data.Set ( Set ) +--import Unison.Codebase.Branch ( Branch ) +--import Unison.DataDeclaration ( Decl ) +--import qualified Unison.Reference as Reference +--import Unison.Reference ( Reference ) +--import qualified Unison.Term as Term +--import qualified Unison.Type as Type +--import qualified Unison.Typechecker.TypeLookup as TL +-- +--type Term v a = Term.AnnotatedTerm v a +--type Type v a = Type.AnnotatedType v a +-- +--class GetDecls d m v a | d -> m v a where +-- getTerm :: d -> Reference.Id -> m (Maybe (Term v a)) +-- getTypeOfTerm :: d -> Reference -> m (Maybe (Type v a)) +-- getTypeDeclaration :: d -> Reference.Id -> m (Maybe (Decl v a)) +-- hasTerm :: d -> Reference.Id -> m Bool +-- hasType :: d -> Reference.Id -> m Bool +-- +--class PutDecls d m v a | d -> m v a where +-- putTerm :: d -> Reference.Id -> Term v a -> Type v a -> m () +-- putTypeDeclarationImpl :: d -> Reference.Id -> Decl v a -> m () +-- +--class GetBranch b m | b -> m where +-- getRootBranch :: b -> m (Branch m) +-- +--class PutBranch b m | b -> m where +-- putRootBranch :: b -> Branch m -> m () +-- +--class GetDependents d m | d -> m where +-- dependentsImpl :: d -> Reference -> m (Set Reference.Id) diff --git a/parser-typechecker/src/Unison/Codebase/CodeLookup.hs b/parser-typechecker/src/Unison/Codebase/CodeLookup.hs new file mode 100644 index 0000000000..e283adbe71 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/CodeLookup.hs @@ -0,0 +1,57 @@ +module Unison.Codebase.CodeLookup where + +import Unison.Prelude + +import Control.Monad.Morph +import qualified Data.Map as Map +import Unison.UnisonFile ( UnisonFile ) +import qualified Unison.UnisonFile as UF +import qualified Unison.Term as Term +import Unison.Term ( Term ) +import Unison.Var ( Var ) +import qualified Unison.Reference as Reference +import Unison.DataDeclaration (Decl) + +fromUnisonFile :: (Var v, Monad m) => UnisonFile v a -> CodeLookup v m a +fromUnisonFile uf = CodeLookup tm ty where + tm id = pure $ Map.lookup id termMap + ty id = pure $ Map.lookup id typeMap1 <|> Map.lookup id typeMap2 + typeMap1 = Map.fromList [ (id, Right dd) | + (_, (Reference.DerivedId id, dd)) <- + Map.toList (UF.dataDeclarations uf) ] + typeMap2 = Map.fromList [ (id, Left ad) | + (_, (Reference.DerivedId id, ad)) <- + Map.toList (UF.effectDeclarations uf) ] + tmm = Map.fromList (UF.terms uf) + termMap = Map.fromList [ (id, e) | + (_, (id, e)) <- + Map.toList (Term.hashComponents tmm) ] + +data CodeLookup v m a + = CodeLookup { + getTerm :: Reference.Id -> m (Maybe (Term v a)), + getTypeDeclaration :: Reference.Id -> m (Maybe (Decl v a)) + } + +instance MFunctor (CodeLookup v) where + hoist f (CodeLookup tm tp) = CodeLookup (f . tm) (f . tp) + +instance (Ord v, Functor m) => Functor (CodeLookup v m) where + fmap f cl = CodeLookup tm ty where + tm id = fmap (Term.amap f) <$> getTerm cl id + ty id = fmap md <$> getTypeDeclaration cl id + md (Left e) = Left (f <$> e) + md (Right d) = Right (f <$> d) + +instance Monad m => Semigroup (CodeLookup v m a) where + (<>) = mappend + +instance Monad m => Monoid (CodeLookup v m a) where + mempty = CodeLookup (const $ pure Nothing) (const $ pure Nothing) + c1 `mappend` c2 = CodeLookup tm ty where + tm id = do + o <- getTerm c1 id + case o of Nothing -> getTerm c2 id; Just _ -> pure o + ty id = do + o <- getTypeDeclaration c1 id + case o of Nothing -> getTypeDeclaration c2 id; Just _ -> pure o diff --git a/parser-typechecker/src/Unison/Codebase/Editor/AuthorInfo.hs b/parser-typechecker/src/Unison/Codebase/Editor/AuthorInfo.hs new file mode 100644 index 0000000000..264a491ad2 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/AuthorInfo.hs @@ -0,0 +1,59 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Codebase.Editor.AuthorInfo where + +import Unison.Term (Term, hashComponents) + +import qualified Unison.Reference as Reference +import Unison.Prelude (MonadIO, Word8) +import Unison.Var (Var) +import Data.ByteString (unpack) +import Crypto.Random (getRandomBytes) +import qualified Data.Map as Map +import qualified Unison.Var as Var +import Data.Foldable (toList) +import UnliftIO (liftIO) +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import Unison.Type (Type) +import Data.Text (Text) + +data AuthorInfo v a = AuthorInfo + { guid, author, copyrightHolder :: (Reference.Id, Term v a, Type v a) } + +createAuthorInfo :: forall m v a. MonadIO m => Var v => a -> Text -> m (AuthorInfo v a) +createAuthorInfo a t = createAuthorInfo' . unpack <$> liftIO (getRandomBytes 32) + where + createAuthorInfo' :: [Word8] -> AuthorInfo v a + createAuthorInfo' bytes = let + [(guidRef, guidTerm)] = hashAndWrangle "guid" $ + Term.app a + (Term.constructor a guidTypeRef 0) + (Term.app a + (Term.builtin a "Bytes.fromList") + (Term.seq a (map (Term.nat a . fromIntegral) bytes))) + + [(authorRef, authorTerm)] = hashAndWrangle "author" $ + Term.apps + (Term.constructor a authorTypeRef 0) + [(a, Term.ref a (Reference.DerivedId guidRef)) + ,(a, Term.text a t)] + + [(chRef, chTerm)] = hashAndWrangle "copyrightHolder" $ + Term.apps + (Term.constructor a chTypeRef 0) + [(a, Term.ref a (Reference.DerivedId guidRef)) + ,(a, Term.text a t)] + + in AuthorInfo + (guidRef, guidTerm, guidType) + (authorRef, authorTerm, authorType) + (chRef, chTerm, chType) + hashAndWrangle v tm = toList . hashComponents $ Map.fromList [(Var.named v, tm)] + (chType, chTypeRef) = (Type.ref a chTypeRef, unsafeParse copyrightHolderHash) + (authorType, authorTypeRef) = (Type.ref a authorTypeRef, unsafeParse authorHash) + (guidType, guidTypeRef) = (Type.ref a guidTypeRef, unsafeParse guidHash) + unsafeParse = either error id . Reference.fromText + guidHash = "#rc29vdqe019p56kupcgkg07fkib86r3oooatbmsgfbdsgpmjhsh00l307iuts3r973q5etb61vbjkes42b6adb3mkorusvmudiuorno" + copyrightHolderHash = "#aohndsu9bl844vspujp142j5aijv86rifmnrbnjvpv3h3f3aekn45rj5s1uf1ucrrtm5urbc5d1ajtm7lqq1tr8lkgv5fathp6arqug" + authorHash = "#5hi1vvs5t1gmu6vn1kpqmgksou8ie872j31gc294lgqks71di6gm3d4ugnrr4mq8ov0ap1e20lq099d5g6jjf9c6cbp361m9r9n5g50" diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Command.hs b/parser-typechecker/src/Unison/Codebase/Editor/Command.hs new file mode 100644 index 0000000000..4cab12ebb0 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/Command.hs @@ -0,0 +1,191 @@ +{-# LANGUAGE GADTs #-} + +module Unison.Codebase.Editor.Command ( + Command(..), + AmbientAbilities, + LexedSource, + Source, + SourceName, + TypecheckingResult, + LoadSourceResult(..) + ) where + +import Unison.Prelude + +import Data.Configurator.Types ( Configured ) + +import Unison.Codebase.Editor.Output +import Unison.Codebase.Editor.RemoteRepo + +import Unison.Codebase.Branch ( Branch ) +import qualified Unison.Codebase.Branch as Branch +import Unison.Codebase.GitError +import qualified Unison.Codebase.Reflog as Reflog +import Unison.Codebase.SyncMode ( SyncMode ) +import Unison.Names3 ( Names, Names0 ) +import Unison.Parser ( Ann ) +import Unison.Referent ( Referent ) +import Unison.Reference ( Reference ) +import Unison.Result ( Note + , Result) +import Unison.DataDeclaration ( Decl ) +import qualified Unison.Codebase.Runtime as Runtime +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.Reference as Reference +import Unison.Term ( Term ) +import qualified Unison.UnisonFile as UF +import qualified Unison.Lexer as L +import qualified Unison.Parser as Parser +import Unison.ShortHash ( ShortHash ) +import Unison.Type ( Type ) +import Unison.Codebase.ShortBranchHash + ( ShortBranchHash ) +import Unison.Codebase.Editor.AuthorInfo (AuthorInfo) + + +type AmbientAbilities v = [Type v Ann] +type SourceName = Text +type Source = Text +type LexedSource = (Text, [L.Token L.Lexeme]) + +data LoadSourceResult = InvalidSourceNameError + | LoadError + | LoadSuccess Text + +type TypecheckingResult v = + Result (Seq (Note v Ann)) + (Either Names0 (UF.TypecheckedUnisonFile v Ann)) + +data Command m i v a where + Eval :: m a -> Command m i v a + + ConfigLookup :: Configured a => Text -> Command m i v (Maybe a) + + Input :: Command m i v i + + -- Presents some output to the user + Notify :: Output v -> Command m i v () + NotifyNumbered :: NumberedOutput v -> Command m i v NumberedArgs + + -- literally just write some terms and types .unison/{terms,types} + AddDefsToCodebase :: UF.TypecheckedUnisonFile v Ann -> Command m i v () + + -- the hash length needed to disambiguate any definition in the codebase + CodebaseHashLength :: Command m i v Int + + TypeReferencesByShortHash :: ShortHash -> Command m i v (Set Reference) + TermReferencesByShortHash :: ShortHash -> Command m i v (Set Reference) + TermReferentsByShortHash :: ShortHash -> Command m i v (Set Referent) + + -- the hash length needed to disambiguate any branch in the codebase + BranchHashLength :: Command m i v Int + + BranchHashesByPrefix :: ShortBranchHash -> Command m i v (Set Branch.Hash) + + ParseType :: Names -> LexedSource + -> Command m i v (Either (Parser.Err v) (Type v Ann)) + + LoadSource :: SourceName -> Command m i v LoadSourceResult + + Typecheck :: AmbientAbilities v + -> Names + -> SourceName + -> LexedSource + -> Command m i v (TypecheckingResult v) + + TypecheckFile :: UF.UnisonFile v Ann + -> [Type v Ann] + -> Command m i v (TypecheckingResult v) + + -- Evaluate all watched expressions in a UnisonFile and return + -- their results, keyed by the name of the watch variable. The tuple returned + -- has the form: + -- (hash, (ann, sourceTerm, evaluatedTerm, isCacheHit)) + -- + -- where + -- `hash` is the hash of the original watch expression definition + -- `ann` gives the location of the watch expression + -- `sourceTerm` is a closed term (no free vars) for the watch expression + -- `evaluatedTerm` is the result of evaluating that `sourceTerm` + -- `isCacheHit` is True if the result was computed by just looking up + -- in a cache + -- + -- It's expected that the user of this action might add the + -- `(hash, evaluatedTerm)` mapping to a cache to make future evaluations + -- of the same watches instantaneous. + + Evaluate :: PPE.PrettyPrintEnv + -> UF.TypecheckedUnisonFile v Ann + -> Command m i v (Either Runtime.Error + ([(v, Term v ())], Map v + (Ann, UF.WatchKind, Reference, Term v (), Term v (), Runtime.IsCacheHit))) + + -- Evaluate a single closed definition + Evaluate1 :: PPE.PrettyPrintEnv -> Term v Ann -> Command m i v (Either Runtime.Error (Term v Ann)) + + -- Add a cached watch to the codebase + PutWatch :: UF.WatchKind -> Reference.Id -> Term v Ann -> Command m i v () + + -- Loads any cached watches of the given kind + LoadWatches :: UF.WatchKind -> Set Reference -> Command m i v [(Reference, Term v Ann)] + + -- Loads a root branch from some codebase, returning `Nothing` if not found. + -- Any definitions in the head of the requested root that aren't in the local + -- codebase are copied there. + LoadLocalRootBranch :: Command m i v (Branch m) + + -- Like `LoadLocalRootBranch`. + LoadLocalBranch :: Branch.Hash -> Command m i v (Branch m) + + ViewRemoteBranch :: + RemoteNamespace -> Command m i v (Either GitError (Branch m)) + + -- we want to import as little as possible, so we pass the SBH/path as part + -- of the `RemoteNamespace`. + ImportRemoteBranch :: + RemoteNamespace -> SyncMode -> Command m i v (Either GitError (Branch m)) + + -- Syncs the Branch to some codebase and updates the head to the head of this causal. + -- Any definitions in the head of the supplied branch that aren't in the target + -- codebase are copied there. + SyncLocalRootBranch :: Branch m -> Command m i v () + + SyncRemoteRootBranch :: + RemoteRepo -> Branch m -> SyncMode -> Command m i v (Either GitError ()) + + AppendToReflog :: Text -> Branch m -> Branch m -> Command m i v () + + -- load the reflog in file (chronological) order + LoadReflog :: Command m i v [Reflog.Entry] + + LoadTerm :: Reference.Id -> Command m i v (Maybe (Term v Ann)) + + -- todo: change this to take Reference and return DeclOrBuiltin + LoadType :: Reference.Id -> Command m i v (Maybe (Decl v Ann)) + + LoadTypeOfTerm :: Reference -> Command m i v (Maybe (Type v Ann)) + + PutTerm :: Reference.Id -> Term v Ann -> Type v Ann -> Command m i v () + + PutDecl :: Reference.Id -> Decl v Ann -> Command m i v () + + -- todo: eliminate these hopefully + -- (why, again? because we can know from the Reference?) + IsTerm :: Reference -> Command m i v Bool + IsType :: Reference -> Command m i v Bool + + -- Get the immediate (not transitive) dependents of the given reference + -- This might include historical definitions not in any current path; these + -- should be filtered by the caller of this command if that's not desired. + GetDependents :: Reference -> Command m i v (Set Reference) + + GetTermsOfType :: Type v Ann -> Command m i v (Set Referent) + GetTermsMentioningType :: Type v Ann -> Command m i v (Set Referent) + + -- Execute a UnisonFile for its IO effects + -- todo: Execute should do some evaluation? + Execute :: PPE.PrettyPrintEnv -> UF.TypecheckedUnisonFile v Ann -> Command m i v () + + CreateAuthorInfo :: Text -> Command m i v (AuthorInfo v Ann) + + RuntimeMain :: Command m i v (Type v Ann) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/DisplayThing.hs b/parser-typechecker/src/Unison/Codebase/Editor/DisplayThing.hs new file mode 100644 index 0000000000..7f47e07797 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/DisplayThing.hs @@ -0,0 +1,12 @@ +module Unison.Codebase.Editor.DisplayThing where + +import Unison.Reference as Reference + +data DisplayThing a = BuiltinThing | MissingThing Reference.Id | RegularThing a + deriving (Eq, Ord, Show) + +toMaybe :: DisplayThing a -> Maybe a +toMaybe = \case + RegularThing a -> Just a + _ -> Nothing + diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Git.hs b/parser-typechecker/src/Unison/Codebase/Editor/Git.hs new file mode 100644 index 0000000000..2e66e2122d --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/Git.hs @@ -0,0 +1,249 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.Editor.Git + ( importRemoteBranch + , pushGitRootBranch + , viewRemoteBranch + ) where + +import Unison.Prelude + +import Control.Monad.Except ( MonadError + , throwError + , ExceptT + ) +import Control.Monad.Extra ((||^)) +import qualified Control.Exception +import qualified Data.Text as Text +import Shellmet ( ($?), ($|), ($^)) +import System.FilePath ( () ) +import qualified Unison.Codebase.GitError as GitError +import Unison.Codebase.GitError (GitError) +import qualified Unison.Codebase as Codebase +import Unison.Codebase (Codebase, CodebasePath) +import Unison.Codebase.Editor.RemoteRepo ( RemoteRepo(GitRepo) + , RemoteNamespace + , printRepo + ) +import Unison.Codebase.FileCodebase as FC +import Unison.Codebase.Branch ( Branch + , headHash + ) +import qualified Unison.Codebase.Path as Path +import Unison.Codebase.SyncMode ( SyncMode ) +import qualified Unison.Util.Exception as Ex +import Unison.Util.Timing (time) +import qualified Unison.Codebase.Branch as Branch +import UnliftIO.IO (hFlush, stdout) +import UnliftIO.Directory (getXdgDirectory, XdgDirectory(XdgCache), doesDirectoryExist, findExecutable, removeDirectoryRecursive) +import Unison.Codebase.FileCodebase.Common (encodeFileName, updateCausalHead, branchHeadDir) + +tempGitDir :: MonadIO m => Text -> m FilePath +tempGitDir url = + getXdgDirectory XdgCache + $ "unisonlanguage" + "gitfiles" + encodeFileName (Text.unpack url) + +withStatus :: MonadIO m => String -> m a -> m a +withStatus str ma = do + flushStr str + a <- ma + flushStr (const ' ' <$> str) + pure a + where + flushStr str = do + liftIO . putStr $ " " ++ str ++ "\r" + hFlush stdout + +-- | Given a remote git repo url, and branch/commit hash (currently +-- not allowed): checks for git, clones or updates a cached copy of the repo +pullBranch :: (MonadIO m, MonadError GitError m) => RemoteRepo -> m CodebasePath +pullBranch (GitRepo _uri (Just t)) = error $ + "Pulling a specific commit isn't fully implemented or tested yet.\n" ++ + "InputPatterns.parseUri was expected to have prevented you " ++ + "from supplying the git treeish `" ++ Text.unpack t ++ "`!" +pullBranch repo@(GitRepo uri Nothing) = do + checkForGit + localPath <- tempGitDir uri + ifM (doesDirectoryExist localPath) + -- try to update existing directory + (ifM (isGitRepo localPath) + (checkoutExisting localPath) + (throwError (GitError.UnrecognizableCacheDir uri localPath))) + -- directory doesn't exist, so clone anew + (checkOutNew localPath Nothing) + pure localPath + + where + -- | Do a `git clone` (for a not-previously-cached repo). + checkOutNew :: (MonadIO m, MonadError GitError m) => CodebasePath -> Maybe Text -> m () + checkOutNew localPath branch = do + withStatus ("Downloading from " ++ Text.unpack uri ++ " ...") $ + (liftIO $ + "git" $^ (["clone", "--quiet"] ++ ["--depth", "1"] + ++ maybe [] (\t -> ["--branch", t]) branch + ++ [uri, Text.pack localPath])) + `withIOError` (throwError . GitError.CloneException repo . show) + isGitDir <- liftIO $ isGitRepo localPath + unless isGitDir . throwError $ GitError.UnrecognizableCheckoutDir uri localPath + + -- | Do a `git pull` on a cached repo. + checkoutExisting :: (MonadIO m, MonadError GitError m) => FilePath -> m () + checkoutExisting localPath = + ifM (isEmptyGitRepo localPath) + -- I don't know how to properly update from an empty remote repo. + -- As a heuristic, if this cached copy is empty, then the remote might + -- be too, so this impl. just wipes the cached copy and starts from scratch. + (do wipeDir localPath; checkOutNew localPath Nothing) + -- Otherwise proceed! + (withStatus ("Updating cached copy of " ++ Text.unpack uri ++ " ...") $ do + gitIn localPath ["reset", "--hard", "--quiet", "HEAD"] + gitIn localPath ["clean", "-d", "--force", "--quiet"] + gitIn localPath ["pull", "--force", "--quiet"]) + + isEmptyGitRepo :: MonadIO m => FilePath -> m Bool + isEmptyGitRepo localPath = liftIO $ + -- if rev-parse succeeds, the repo is _not_ empty, so return False; else True + (gitTextIn localPath ["rev-parse", "--verify", "--quiet", "HEAD"] $> False) + $? pure True + + -- | try removing a cached copy + wipeDir localPath = do + e <- Ex.tryAny . whenM (doesDirectoryExist localPath) $ + removeDirectoryRecursive localPath + case e of + Left e -> throwError (GitError.SomeOtherError (show e)) + Right _ -> pure () + +-- | Sync elements as needed from a remote codebase into the local one. +-- If `sbh` is supplied, we try to load the specified branch hash; +-- otherwise we try to load the root branch. +importRemoteBranch + :: forall m v a + . MonadIO m + => Codebase m v a + -> Branch.Cache m + -> RemoteNamespace + -> SyncMode + -> ExceptT GitError m (Branch m) +importRemoteBranch codebase cache ns mode = do + (branch, cacheDir) <- viewRemoteBranch' cache ns + withStatus "Importing downloaded files into local codebase..." $ + time "SyncFromDirectory" $ + lift $ Codebase.syncFromDirectory codebase cacheDir mode branch + pure branch + +-- | Pull a git branch and view it from the cache, without syncing into the +-- local codebase. +viewRemoteBranch :: forall m. MonadIO m + => Branch.Cache m -> RemoteNamespace -> ExceptT GitError m (Branch m) +viewRemoteBranch cache = fmap fst . viewRemoteBranch' cache + +viewRemoteBranch' :: forall m. MonadIO m + => Branch.Cache m -> RemoteNamespace -> ExceptT GitError m (Branch m, CodebasePath) +viewRemoteBranch' cache (repo, sbh, path) = do + -- set up the cache dir + remotePath <- time "Git fetch" $ pullBranch repo + -- try to load the requested branch from it + branch <- time "Git fetch (sbh)" $ case sbh of + -- load the root branch + Nothing -> lift (FC.getRootBranch cache remotePath) >>= \case + Left Codebase.NoRootBranch -> pure Branch.empty + Left (Codebase.CouldntLoadRootBranch h) -> + throwError $ GitError.CouldntLoadRootBranch repo h + Left (Codebase.CouldntParseRootBranch s) -> + throwError $ GitError.CouldntParseRootBranch repo s + Right b -> pure b + -- load from a specific `ShortBranchHash` + Just sbh -> do + branchCompletions <- lift $ FC.branchHashesByPrefix remotePath sbh + case toList branchCompletions of + [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + [h] -> (lift $ FC.branchFromFiles cache remotePath h) >>= \case + Just b -> pure b + Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions + pure (Branch.getAt' path branch, remotePath) + +-- | See if `git` is on the system path. +checkForGit :: MonadIO m => MonadError GitError m => m () +checkForGit = do + gitPath <- liftIO $ findExecutable "git" + when (isNothing gitPath) $ throwError GitError.NoGit + +-- | Does `git` recognize this directory as being managed by git? +isGitRepo :: MonadIO m => FilePath -> m Bool +isGitRepo dir = liftIO $ + (True <$ gitIn dir ["rev-parse"]) $? pure False + +-- | Perform an IO action, passing any IO exception to `handler` +withIOError :: MonadIO m => IO a -> (IOException -> m a) -> m a +withIOError action handler = + liftIO (fmap Right action `Control.Exception.catch` (pure . Left)) >>= + either handler pure + +-- | Generate some `git` flags for operating on some arbitary checked out copy +setupGitDir :: FilePath -> [Text] +setupGitDir localPath = + ["--git-dir", Text.pack $ localPath ".git" + ,"--work-tree", Text.pack localPath] + +gitIn :: MonadIO m => FilePath -> [Text] -> m () +gitIn localPath args = liftIO $ "git" $^ (setupGitDir localPath <> args) + +gitTextIn :: MonadIO m => FilePath -> [Text] -> m Text +gitTextIn localPath args = liftIO $ "git" $| setupGitDir localPath <> args + +-- Given a branch that is "after" the existing root of a given git repo, +-- stage and push the branch (as the new root) + dependencies to the repo. +pushGitRootBranch + :: MonadIO m + => Codebase m v a + -> Branch.Cache m + -> Branch m + -> RemoteRepo + -> SyncMode + -> ExceptT GitError m () +pushGitRootBranch codebase cache branch repo syncMode = do + -- Pull the remote repo into a staging directory + (remoteRoot, remotePath) <- viewRemoteBranch' cache (repo, Nothing, Path.empty) + ifM (pure (remoteRoot == Branch.empty) + ||^ lift (remoteRoot `Branch.before` branch)) + -- ours is newer 👍, meaning this is a fast-forward push, + -- so sync branch to staging area + (stageAndPush remotePath) + (throwError $ GitError.PushDestinationHasNewStuff repo) + where + stageAndPush remotePath = do + let repoString = Text.unpack $ printRepo repo + withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ + lift (Codebase.syncToDirectory codebase remotePath syncMode branch) + updateCausalHead (branchHeadDir remotePath) (Branch._history branch) + -- push staging area to remote + withStatus ("Uploading to " ++ repoString ++ " ...") $ + unlessM + (push remotePath repo + `withIOError` (throwError . GitError.PushException repo . show)) + (throwError $ GitError.PushNoOp repo) + -- Commit our changes + push :: CodebasePath -> RemoteRepo -> IO Bool -- withIOError needs IO + push remotePath (GitRepo url gitbranch) = do + -- has anything changed? + status <- gitTextIn remotePath ["status", "--short"] + if Text.null status then + pure False + else do + gitIn remotePath ["add", "--all", "."] + gitIn remotePath + ["commit", "-q", "-m", "Sync branch " <> Text.pack (show $ headHash branch)] + -- Push our changes to the repo + case gitbranch of + Nothing -> gitIn remotePath ["push", "--quiet", url] + Just gitbranch -> error $ + "Pushing to a specific branch isn't fully implemented or tested yet.\n" + ++ "InputPatterns.parseUri was expected to have prevented you " + ++ "from supplying the git treeish `" ++ Text.unpack gitbranch ++ "`!" + -- gitIn remotePath ["push", "--quiet", url, gitbranch] + pure True diff --git a/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs b/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs new file mode 100644 index 0000000000..a96d1b824d --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs @@ -0,0 +1,272 @@ +{-# OPTIONS_GHC -Wno-partial-type-signatures #-} + +{-# LANGUAGE GADTs #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE PartialTypeSignatures #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Codebase.Editor.HandleCommand where + +import Unison.Prelude + +import Unison.Codebase.Editor.Output +import Unison.Codebase.Editor.Command + +import qualified Unison.Builtin as B + +import qualified Crypto.Random as Random +import Control.Monad.Except ( runExceptT ) +import qualified Data.Configurator as Config +import Data.Configurator.Types ( Config ) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Data.Text as Text +import Unison.Codebase ( Codebase ) +import qualified Unison.Codebase as Codebase +import Unison.Codebase.Branch ( Branch ) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Editor.Git as Git +import Unison.Parser ( Ann ) +import qualified Unison.Parser as Parser +import qualified Unison.Parsers as Parsers +import qualified Unison.Reference as Reference +import qualified Unison.Referent as Referent +import qualified Unison.Codebase.Runtime as Runtime +import Unison.Codebase.Runtime (Runtime) +import qualified Unison.Term as Term +import qualified Unison.UnisonFile as UF +import Unison.Util.Free ( Free ) +import qualified Unison.Util.Free as Free +import Unison.Var ( Var ) +import qualified Unison.Result as Result +import Unison.FileParsers ( parseAndSynthesizeFile + , synthesizeFile' + ) +import qualified Unison.PrettyPrintEnv as PPE +import Unison.Term (Term) +import Unison.Type (Type) +import qualified Unison.Codebase.Editor.AuthorInfo as AuthorInfo + +typecheck + :: (Monad m, Var v) + => [Type v Ann] + -> Codebase m v Ann + -> Parser.ParsingEnv + -> SourceName + -> LexedSource + -> m (TypecheckingResult v) +typecheck ambient codebase parsingEnv sourceName src = + Result.getResult $ parseAndSynthesizeFile ambient + (((<> B.typeLookup) <$>) . Codebase.typeLookupForDependencies codebase) + parsingEnv + (Text.unpack sourceName) + (fst src) + +typecheck' + :: Monad m + => Var v + => [Type v Ann] + -> Codebase m v Ann + -> UF.UnisonFile v Ann + -> m (TypecheckingResult v) +typecheck' ambient codebase file = do + typeLookup <- (<> B.typeLookup) + <$> Codebase.typeLookupForDependencies codebase (UF.dependencies file) + pure . fmap Right $ synthesizeFile' ambient typeLookup file + +commandLine + :: forall i v a gen + . (Var v, Random.DRG gen) + => Config + -> IO i + -> (Branch IO -> IO ()) + -> Runtime v + -> (Output v -> IO ()) + -> (NumberedOutput v -> IO NumberedArgs) + -> (SourceName -> IO LoadSourceResult) + -> Codebase IO v Ann + -> (Int -> IO gen) + -> Branch.Cache IO + -> Free (Command IO i v) a + -> IO a +commandLine config awaitInput setBranchRef rt notifyUser notifyNumbered loadSource codebase rngGen branchCache = + Free.foldWithIndex go + where + go :: forall x . Int -> Command IO i v x -> IO x + go i x = case x of + -- Wait until we get either user input or a unison file update + Eval m -> m + Input -> awaitInput + Notify output -> notifyUser output + NotifyNumbered output -> notifyNumbered output + ConfigLookup name -> + Config.lookup config name + LoadSource sourcePath -> loadSource sourcePath + + Typecheck ambient names sourceName source -> do + -- todo: if guids are being shown to users, + -- not ideal to generate new guid every time + rng <- rngGen i + let namegen = Parser.uniqueBase32Namegen rng + env = Parser.ParsingEnv namegen names + typecheck ambient codebase env sourceName source + TypecheckFile file ambient -> typecheck' ambient codebase file + Evaluate ppe unisonFile -> evalUnisonFile ppe unisonFile + Evaluate1 ppe term -> eval1 ppe term + LoadLocalRootBranch -> either (const Branch.empty) id <$> Codebase.getRootBranch codebase + LoadLocalBranch h -> fromMaybe Branch.empty <$> Codebase.getBranchForHash codebase h + SyncLocalRootBranch branch -> do + setBranchRef branch + Codebase.putRootBranch codebase branch + ViewRemoteBranch ns -> + runExceptT $ Git.viewRemoteBranch branchCache ns + ImportRemoteBranch ns syncMode -> + runExceptT $ Git.importRemoteBranch codebase branchCache ns syncMode + SyncRemoteRootBranch repo branch syncMode -> + runExceptT $ Git.pushGitRootBranch codebase branchCache branch repo syncMode + LoadTerm r -> Codebase.getTerm codebase r + LoadType r -> Codebase.getTypeDeclaration codebase r + LoadTypeOfTerm r -> Codebase.getTypeOfTerm codebase r + PutTerm r tm tp -> Codebase.putTerm codebase r tm tp + PutDecl r decl -> Codebase.putTypeDeclaration codebase r decl + PutWatch kind r e -> Codebase.putWatch codebase kind r e + LoadWatches kind rs -> catMaybes <$> traverse go (toList rs) where + go (Reference.Builtin _) = pure Nothing + go r@(Reference.DerivedId rid) = + fmap (r,) <$> Codebase.getWatch codebase kind rid + IsTerm r -> Codebase.isTerm codebase r + IsType r -> Codebase.isType codebase r + GetDependents r -> Codebase.dependents codebase r + AddDefsToCodebase unisonFile -> Codebase.addDefsToCodebase codebase unisonFile + GetTermsOfType ty -> Codebase.termsOfType codebase ty + GetTermsMentioningType ty -> Codebase.termsMentioningType codebase ty + CodebaseHashLength -> Codebase.hashLength codebase + -- all builtin and derived type references + TypeReferencesByShortHash sh -> do + fromCodebase <- Codebase.typeReferencesByPrefix codebase sh + let fromBuiltins = Set.filter (\r -> sh == Reference.toShortHash r) + $ B.intrinsicTypeReferences + pure (fromBuiltins <> Set.map Reference.DerivedId fromCodebase) + -- all builtin and derived term references + TermReferencesByShortHash sh -> do + fromCodebase <- Codebase.termReferencesByPrefix codebase sh + let fromBuiltins = Set.filter (\r -> sh == Reference.toShortHash r) + $ B.intrinsicTermReferences + pure (fromBuiltins <> Set.map Reference.DerivedId fromCodebase) + -- all builtin and derived term references & type constructors + TermReferentsByShortHash sh -> do + fromCodebase <- Codebase.termReferentsByPrefix codebase sh + let fromBuiltins = Set.map Referent.Ref + . Set.filter (\r -> sh == Reference.toShortHash r) + $ B.intrinsicTermReferences + pure (fromBuiltins <> Set.map (fmap Reference.DerivedId) fromCodebase) + BranchHashLength -> Codebase.branchHashLength codebase + BranchHashesByPrefix h -> Codebase.branchHashesByPrefix codebase h + ParseType names (src, _) -> pure $ + Parsers.parseType (Text.unpack src) (Parser.ParsingEnv mempty names) + RuntimeMain -> pure $ Runtime.mainType rt + +-- Todo b -> doTodo codebase (Branch.head b) +-- Propagate b -> do +-- b0 <- Codebase.propagate codebase (Branch.head b) +-- pure $ Branch.append b0 b + Execute ppe uf -> void $ evalUnisonFile ppe uf + AppendToReflog reason old new -> Codebase.appendReflog codebase reason old new + LoadReflog -> Codebase.getReflog codebase + CreateAuthorInfo t -> AuthorInfo.createAuthorInfo Parser.External t + + eval1 :: PPE.PrettyPrintEnv -> Term v Ann -> _ + eval1 ppe tm = do + let codeLookup = Codebase.toCodeLookup codebase + r <- Runtime.evaluateTerm codeLookup ppe rt tm + pure $ r <&> Term.amap (const Parser.External) + + evalUnisonFile :: PPE.PrettyPrintEnv -> UF.TypecheckedUnisonFile v Ann -> _ + evalUnisonFile ppe (UF.discardTypes -> unisonFile) = do + let codeLookup = Codebase.toCodeLookup codebase + selfContained <- Codebase.makeSelfContained' codeLookup unisonFile + let watchCache (Reference.DerivedId h) = do + m1 <- Codebase.getWatch codebase UF.RegularWatch h + m2 <- maybe (Codebase.getWatch codebase UF.TestWatch h) (pure . Just) m1 + pure $ Term.amap (const ()) <$> m2 + watchCache Reference.Builtin{} = pure Nothing + r <- Runtime.evaluateWatches codeLookup ppe watchCache rt selfContained + case r of + Left e -> pure (Left e) + Right rs@(_,map) -> do + forM_ (Map.elems map) $ \(_loc, kind, hash, _src, value, isHit) -> + if isHit then pure () + else case hash of + Reference.DerivedId h -> do + let value' = Term.amap (const Parser.External) value + Codebase.putWatch codebase kind h value' + Reference.Builtin{} -> pure () + pure $ Right rs + +-- doTodo :: Monad m => Codebase m v a -> Branch0 -> m (TodoOutput v a) +-- doTodo code b = do +-- -- traceM $ "edited terms: " ++ show (Branch.editedTerms b) +-- f <- Codebase.frontier code b +-- let dirty = R.dom f +-- frontier = R.ran f +-- ppe = Branch.prettyPrintEnv b +-- (frontierTerms, frontierTypes) <- loadDefinitions code frontier +-- (dirtyTerms, dirtyTypes) <- loadDefinitions code dirty +-- -- todo: something more intelligent here? +-- scoreFn <- pure $ const 1 +-- remainingTransitive <- Codebase.frontierTransitiveDependents code b frontier +-- let +-- addTermNames terms = [(PPE.termName ppe (Referent.Ref r), r, t) | (r,t) <- terms ] +-- addTypeNames types = [(PPE.typeName ppe r, r, d) | (r,d) <- types ] +-- frontierTermsNamed = addTermNames frontierTerms +-- frontierTypesNamed = addTypeNames frontierTypes +-- dirtyTermsNamed = sortOn (\(s,_,_,_) -> s) $ +-- [ (scoreFn r, n, r, t) | (n,r,t) <- addTermNames dirtyTerms ] +-- dirtyTypesNamed = sortOn (\(s,_,_,_) -> s) $ +-- [ (scoreFn r, n, r, t) | (n,r,t) <- addTypeNames dirtyTypes ] +-- pure $ +-- TodoOutput_ +-- (Set.size remainingTransitive) +-- (frontierTermsNamed, frontierTypesNamed) +-- (dirtyTermsNamed, dirtyTypesNamed) +-- (Branch.conflicts' b) + +-- loadDefinitions :: Monad m => Codebase m v a -> Set Reference +-- -> m ( [(Reference, Maybe (Type v a))], +-- [(Reference, DisplayThing (Decl v a))] ) +-- loadDefinitions code refs = do +-- termRefs <- filterM (Codebase.isTerm code) (toList refs) +-- terms <- forM termRefs $ \r -> (r,) <$> Codebase.getTypeOfTerm code r +-- typeRefs <- filterM (Codebase.isType code) (toList refs) +-- types <- forM typeRefs $ \r -> do +-- case r of +-- Reference.Builtin _ -> pure (r, BuiltinThing) +-- Reference.DerivedId id -> do +-- decl <- Codebase.getTypeDeclaration code id +-- case decl of +-- Nothing -> pure (r, MissingThing id) +-- Just d -> pure (r, RegularThing d) +-- pure (terms, types) +-- +-- -- | Write all of the builtins into the codebase +-- initializeCodebase :: forall m . Monad m => Codebase m Symbol Ann -> m () +-- initializeCodebase c = do +-- traverse_ (go Right) B.builtinDataDecls +-- traverse_ (go Left) B.builtinEffectDecls +-- void $ fileToBranch updateCollisionHandler c mempty IOSource.typecheckedFile +-- where +-- go :: (t -> Decl Symbol Ann) -> (a, (Reference.Reference, t)) -> m () +-- go f (_, (ref, decl)) = case ref of +-- Reference.DerivedId id -> Codebase.putTypeDeclaration c id (f decl) +-- _ -> pure () +-- +-- -- todo: probably don't use this anywhere +-- nameDistance :: Name -> Name -> Maybe Int +-- nameDistance (Name.toString -> q) (Name.toString -> n) = +-- if q == n then Just 0-- exact match is top choice +-- else if map toLower q == map toLower n then Just 1-- ignore case +-- else if q `isSuffixOf` n then Just 2-- matching suffix is p.good +-- else if q `isPrefixOf` n then Just 3-- matching prefix +-- else Nothing diff --git a/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs b/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs new file mode 100644 index 0000000000..5ebcfd754c --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs @@ -0,0 +1,2898 @@ +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ApplicativeDo #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE PartialTypeSignatures #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE EmptyCase #-} + +module Unison.Codebase.Editor.HandleInput + ( loop + , loopState0 + , LoopState(..) + , currentPath + , parseSearchType + ) +where + +import Unison.Prelude + +import Unison.Codebase.MainTerm ( getMainTerm ) +import qualified Unison.Codebase.MainTerm as MainTerm +import Unison.Codebase.Editor.Command +import Unison.Codebase.Editor.Input +import Unison.Codebase.Editor.Output +import Unison.Codebase.Editor.DisplayThing +import qualified Unison.Codebase.Editor.Output as Output +import Unison.Codebase.Editor.SlurpResult (SlurpResult(..)) +import qualified Unison.Codebase.Editor.SlurpResult as Slurp +import Unison.Codebase.Editor.SlurpComponent (SlurpComponent(..)) +import qualified Unison.Codebase.Editor.SlurpComponent as SC +import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace, printNamespace) +import qualified Unison.CommandLine.InputPattern as InputPattern +import qualified Unison.CommandLine.InputPatterns as InputPatterns + +import Control.Lens +import Control.Monad.State ( StateT ) +import Control.Monad.Except ( ExceptT(..), runExceptT, withExceptT) +import Data.Bifunctor ( second, first ) +import Data.Configurator () +import qualified Data.List as List +import Data.List ( partition ) +import Data.List.Extra ( nubOrd, sort ) +import qualified Data.Map as Map +import qualified Data.Text as Text +import qualified Text.Megaparsec as P +import qualified Data.Set as Set +import Data.Sequence ( Seq(..) ) +import qualified Unison.ABT as ABT +import qualified Unison.Codebase.BranchDiff as BranchDiff +import qualified Unison.Codebase.Editor.Output.BranchDiff as OBranchDiff +import Unison.Codebase.Branch ( Branch(..) + , Branch0(..) + ) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.BranchUtil as BranchUtil +import qualified Unison.Codebase.Causal as Causal +import qualified Unison.Codebase.Metadata as Metadata +import Unison.Codebase.Patch ( Patch(..) ) +import qualified Unison.Codebase.Patch as Patch +import Unison.Codebase.Path ( Path + , Path'(..) ) +import qualified Unison.Codebase.Path as Path +import qualified Unison.Codebase.Reflog as Reflog +import Unison.Codebase.SearchResult ( SearchResult ) +import qualified Unison.Codebase.SearchResult as SR +import qualified Unison.Codebase.ShortBranchHash as SBH +import qualified Unison.Codebase.SyncMode as SyncMode +import qualified Unison.Builtin.Decls as DD +import qualified Unison.DataDeclaration as DD +import qualified Unison.HashQualified as HQ +import qualified Unison.HashQualified' as HQ' +import qualified Unison.Name as Name +import Unison.Name ( Name ) +import Unison.Names3 ( Names(..), Names0 + , pattern Names0 ) +import qualified Unison.Names2 as Names +import qualified Unison.Names3 as Names3 +import Unison.Parser ( Ann(..) ) +import Unison.Reference ( Reference(..) ) +import qualified Unison.Reference as Reference +import Unison.Referent ( Referent ) +import qualified Unison.Referent as Referent +import Unison.Result ( pattern Result ) +import qualified Unison.ShortHash as SH +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import qualified Unison.Result as Result +import qualified Unison.UnisonFile as UF +import qualified Unison.Util.Find as Find +import Unison.Util.Free ( Free ) +import qualified Unison.Util.Free as Free +import Unison.Util.List ( uniqueBy ) +import qualified Unison.Util.Relation as R +import qualified Unison.Util.Relation4 as R4 +import Unison.Util.Timing (unsafeTime) +import Unison.Util.TransitiveClosure (transitiveClosure) +import Unison.Var ( Var ) +import qualified Unison.Var as Var +import qualified Unison.Codebase.TypeEdit as TypeEdit +import Unison.Codebase.TermEdit (TermEdit(..)) +import qualified Unison.Codebase.TermEdit as TermEdit +import qualified Unison.Typechecker as Typechecker +import qualified Unison.PrettyPrintEnv as PPE +import Unison.Runtime.IOSource ( isTest ) +import qualified Unison.Runtime.IOSource as IOSource +import qualified Unison.Util.Star3 as Star3 +import qualified Unison.Util.Monoid as Monoid +import Unison.UnisonFile (TypecheckedUnisonFile) +import qualified Unison.Codebase.Editor.TodoOutput as TO +import qualified Unison.Lexer as L +import Unison.Codebase.Editor.SearchResult' (SearchResult') +import qualified Unison.Codebase.Editor.SearchResult' as SR' +import qualified Unison.LabeledDependency as LD +import Unison.LabeledDependency (LabeledDependency) +import Unison.Term (Term) +import Unison.Type (Type) +import qualified Unison.Builtin as Builtin +import Unison.NameSegment (NameSegment(..)) +import qualified Unison.NameSegment as NameSegment +import Unison.Codebase.ShortBranchHash (ShortBranchHash) +import qualified Unison.Codebase.Editor.Propagate as Propagate +import qualified Unison.Codebase.Editor.UriParser as UriParser +import Data.Tuple.Extra (uncurry3) +import qualified Unison.CommandLine.DisplayValues as DisplayValues +import qualified Control.Error.Util as ErrorUtil +import Unison.Util.Monoid (intercalateMap) +import Data.List.NonEmpty (NonEmpty) +import qualified Data.List.NonEmpty as Nel +import Unison.Codebase.Editor.AuthorInfo (AuthorInfo(..)) + +type F m i v = Free (Command m i v) + +-- type (Action m i v) a +type Action m i v = MaybeT (StateT (LoopState m v) (F m i v)) + +_liftToAction :: m a -> Action m i v a +_liftToAction = lift . lift . Free.eval . Eval + +data LoopState m v + = LoopState + { _root :: Branch m + , _lastSavedRoot :: Branch m + -- the current position in the namespace + , _currentPathStack :: NonEmpty Path.Absolute + + -- TBD + -- , _activeEdits :: Set Branch.EditGuid + + -- The file name last modified, and whether to skip the next file + -- change event for that path (we skip file changes if the file has + -- just been modified programmatically) + , _latestFile :: Maybe (FilePath, SkipNextUpdate) + , _latestTypecheckedFile :: Maybe (UF.TypecheckedUnisonFile v Ann) + + -- The previous user input. Used to request confirmation of + -- questionable user commands. + , _lastInput :: Maybe Input + + -- A 1-indexed list of strings that can be referenced by index at the + -- CLI prompt. e.g. Given ["Foo.bat", "Foo.cat"], + -- `rename 2 Foo.foo` will rename `Foo.cat` to `Foo.foo`. + , _numberedArgs :: NumberedArgs + } + +type SkipNextUpdate = Bool +type InputDescription = Text + +makeLenses ''LoopState + +-- replacing the old read/write scalar Lens with "peek" Getter for the NonEmpty +currentPath :: Getter (LoopState m v) Path.Absolute +currentPath = currentPathStack . to Nel.head + +loopState0 :: Branch m -> Path.Absolute -> LoopState m v +loopState0 b p = LoopState b b (pure p) Nothing Nothing Nothing [] + +type Action' m v = Action m (Either Event Input) v + +defaultPatchNameSegment :: NameSegment +defaultPatchNameSegment = "patch" + +loop :: forall m v . (Monad m, Var v) => Action m (Either Event Input) v () +loop = do + uf <- use latestTypecheckedFile + root' <- use root + currentPath' <- use currentPath + latestFile' <- use latestFile + currentBranch' <- getAt currentPath' + e <- eval Input + hqLength <- eval CodebaseHashLength + sbhLength <- eval BranchHashLength + let + sbh = SBH.fromHash sbhLength + root0 = Branch.head root' + currentBranch0 = Branch.head currentBranch' + defaultPatchPath :: PatchPath + defaultPatchPath = (Path' $ Left currentPath', defaultPatchNameSegment) + resolveSplit' :: (Path', a) -> (Path, a) + resolveSplit' = Path.fromAbsoluteSplit . Path.toAbsoluteSplit currentPath' + resolveToAbsolute :: Path' -> Path.Absolute + resolveToAbsolute = Path.resolve currentPath' + getAtSplit :: Path.Split -> Maybe (Branch m) + getAtSplit p = BranchUtil.getBranch p root0 + getAtSplit' :: Path.Split' -> Maybe (Branch m) + getAtSplit' = getAtSplit . resolveSplit' + getPatchAtSplit' :: Path.Split' -> Action' m v (Maybe Patch) + getPatchAtSplit' s = do + let (p, seg) = Path.toAbsoluteSplit currentPath' s + b <- getAt p + eval . Eval $ Branch.getMaybePatch seg (Branch.head b) + getHQ'TermsIncludingHistorical p = + getTermsIncludingHistorical (resolveSplit' p) root0 + + getHQ'Terms :: Path.HQSplit' -> Set Referent + getHQ'Terms p = BranchUtil.getTerm (resolveSplit' p) root0 + getHQ'Types :: Path.HQSplit' -> Set Reference + getHQ'Types p = BranchUtil.getType (resolveSplit' p) root0 + getHQTerms :: HQ.HashQualified -> Action' m v (Set Referent) + getHQTerms hq = case hq of + HQ.NameOnly n -> let + -- absolute-ify the name, then lookup in deepTerms of root + path :: Path.Path' + path = Path.fromName' n + Path.Absolute absPath = resolveToAbsolute path + in pure $ R.lookupRan (Path.toName absPath) (Branch.deepTerms root0) + HQ.HashOnly sh -> hashOnly sh + HQ.HashQualified _ sh -> hashOnly sh + where + hashOnly sh = eval $ TermReferentsByShortHash sh + + resolveHHQS'Types :: HashOrHQSplit' -> Action' m v (Set Reference) + resolveHHQS'Types = either + (eval . TypeReferencesByShortHash) + (pure . getHQ'Types) + -- Term Refs and Cons + resolveHHQS'Referents = either + (eval . TermReferentsByShortHash) + (pure . getHQ'Terms) + getTypes :: Path.Split' -> Set Reference + getTypes = getHQ'Types . fmap HQ'.NameOnly + getTerms :: Path.Split' -> Set Referent + getTerms = getHQ'Terms . fmap HQ'.NameOnly + getPatchAt :: Path.Split' -> Action' m v Patch + getPatchAt patchPath' = do + let (p, seg) = Path.toAbsoluteSplit currentPath' patchPath' + b <- getAt p + eval . Eval $ Branch.getPatch seg (Branch.head b) + withFile ambient sourceName lexed@(text, tokens) k = do + let + getHQ = \case + L.Backticks s (Just sh) -> + Just (HQ.HashQualified (Name.unsafeFromString s) sh) + L.WordyId s (Just sh) -> + Just (HQ.HashQualified (Name.unsafeFromString s) sh) + L.SymbolyId s (Just sh) -> + Just (HQ.HashQualified (Name.unsafeFromString s) sh) + L.Hash sh -> Just (HQ.HashOnly sh) + _ -> Nothing + hqs = Set.fromList . mapMaybe (getHQ . L.payload) $ tokens + parseNames :: Names <- makeHistoricalParsingNames hqs + latestFile .= Just (Text.unpack sourceName, False) + latestTypecheckedFile .= Nothing + Result notes r <- eval $ Typecheck ambient parseNames sourceName lexed + case r of + -- Parsing failed + Nothing -> respond $ + ParseErrors text [ err | Result.Parsing err <- toList notes ] + Just (Left errNames) -> do + ppe <- prettyPrintEnv =<< makeShadowedPrintNamesFromHQ hqs errNames + respond $ + TypeErrors text ppe [ err | Result.TypeError err <- toList notes ] + Just (Right uf) -> k uf + loadUnisonFile sourceName text = do + let lexed = L.lexer (Text.unpack sourceName) (Text.unpack text) + withFile [] sourceName (text, lexed) $ \unisonFile -> do + sr <- toSlurpResult currentPath' unisonFile <$> slurpResultNames0 + names <- makeShadowedPrintNamesFromLabeled + (UF.termSignatureExternalLabeledDependencies unisonFile) + (UF.typecheckedToNames0 unisonFile) + ppe <- PPE.suffixifiedPPE <$> prettyPrintEnvDecl names + eval . Notify $ Typechecked sourceName ppe sr unisonFile + unlessError' EvaluationFailure do + (bindings, e) <- ExceptT . eval . Evaluate ppe $ unisonFile + lift do + let e' = Map.map go e + go (ann, kind, _hash, _uneval, eval, isHit) = (ann, kind, eval, isHit) + unless (null e') $ + eval . Notify $ Evaluated text ppe bindings e' + latestTypecheckedFile .= Just unisonFile + + case e of + Left (IncomingRootBranch hashes) -> + eval . Notify $ WarnIncomingRootBranch + (SBH.fromHash sbhLength $ Branch.headHash root') + (Set.map (SBH.fromHash sbhLength) hashes) + Left (UnisonFileChanged sourceName text) -> + -- We skip this update if it was programmatically generated + if maybe False snd latestFile' + then modifying latestFile (fmap (const False) <$>) + else loadUnisonFile sourceName text + Right input -> + let + ifConfirmed = ifM (confirmedCommand input) + branchNotFound = respond . BranchNotFound + branchNotFound' = respond . BranchNotFound . Path.unsplit' + patchNotFound :: Path.Split' -> Action' m v () + patchNotFound s = respond $ PatchNotFound s + patchExists :: Path.Split' -> Action' m v () + patchExists s = respond $ PatchAlreadyExists s + typeNotFound = respond . TypeNotFound + typeNotFound' = respond . TypeNotFound' + termNotFound = respond . TermNotFound + termNotFound' = respond . TermNotFound' + nameConflicted src tms tys = respond (DeleteNameAmbiguous hqLength src tms tys) + typeConflicted src = nameConflicted src Set.empty + termConflicted src tms = nameConflicted src tms Set.empty + hashConflicted src = respond . HashAmbiguous src + hqNameQuery' doSuffixify hqs = do + let (hqnames, hashes) = partition (isJust . HQ.toName) hqs + termRefs <- filter (not . Set.null . snd) . zip hashes <$> traverse + (eval . TermReferentsByShortHash) + (catMaybes (HQ.toHash <$> hashes)) + typeRefs <- filter (not . Set.null . snd) . zip hashes <$> traverse + (eval . TypeReferencesByShortHash) + (catMaybes (HQ.toHash <$> hashes)) + parseNames0 <- makeHistoricalParsingNames $ Set.fromList hqnames + let + mkTermResult n r = SR.termResult (HQ'.fromHQ' n) r Set.empty + mkTypeResult n r = SR.typeResult (HQ'.fromHQ' n) r Set.empty + termResults = + (\(n, tms) -> (n, toList $ mkTermResult n <$> toList tms)) <$> termRefs + typeResults = + (\(n, tps) -> (n, toList $ mkTypeResult n <$> toList tps)) <$> typeRefs + parseNames = (if doSuffixify then Names3.suffixify else id) parseNames0 + resultss = searchBranchExact hqLength parseNames hqnames + missingRefs = + [ x + | x <- hashes + , isNothing (lookup x termRefs) && isNothing (lookup x typeRefs) + ] + (misses, hits) = + partition (\(_, results) -> null results) (zip hqs resultss) + results = + List.sort + . uniqueBy SR.toReferent + $ (hits ++ termResults ++ typeResults) + >>= snd + pure (missingRefs ++ (fst <$> misses), results) + hqNameQuery = hqNameQuery' False + hqNameQuerySuffixify = hqNameQuery' True + typeReferences :: [SearchResult] -> [Reference] + typeReferences rs + = [ r | SR.Tp (SR.TypeResult _ r _) <- rs ] + termReferences :: [SearchResult] -> [Reference] + termReferences rs = + [ r | SR.Tm (SR.TermResult _ (Referent.Ref r) _) <- rs ] + termResults rs = [ r | SR.Tm r <- rs ] + typeResults rs = [ r | SR.Tp r <- rs ] + doRemoveReplacement from patchPath isTerm = do + let patchPath' = fromMaybe defaultPatchPath patchPath + patch <- getPatchAt patchPath' + (misses', hits) <- hqNameQuery [from] + let tpRefs = Set.fromList $ typeReferences hits + tmRefs = Set.fromList $ termReferences hits + tmMisses = misses' + <> (HQ'.toHQ . SR.termName <$> termResults hits) + tpMisses = misses' + <> (HQ'.toHQ . SR.typeName <$> typeResults hits) + misses = if isTerm then tpMisses else tmMisses + go :: Reference -> Action m (Either Event Input) v () + go fr = do + let termPatch = + over Patch.termEdits (R.deleteDom fr) patch + typePatch = + over Patch.typeEdits (R.deleteDom fr) patch + (patchPath'', patchName) = resolveSplit' patchPath' + -- Save the modified patch + stepAtM inputDescription + (patchPath'', + Branch.modifyPatches + patchName + (const (if isTerm then termPatch else typePatch))) + -- Say something + success + unless (null misses) $ + respond $ SearchTermsNotFound misses + traverse_ go (if isTerm then tmRefs else tpRefs) + branchExists dest _x = respond $ BranchAlreadyExists dest + branchExistsSplit = branchExists . Path.unsplit' + typeExists dest = respond . TypeAlreadyExists dest + termExists dest = respond . TermAlreadyExists dest + -- | try to get these as close as possible to the command that caused the change + inputDescription :: InputDescription + inputDescription = case input of + ForkLocalBranchI src dest -> "fork " <> hp' src <> " " <> p' dest + MergeLocalBranchI src dest mode -> case mode of + Branch.RegularMerge -> "merge " <> p' src <> " " <> p' dest + Branch.SquashMerge -> "merge.squash " <> p' src <> " " <> p' dest + ResetRootI src -> "reset-root " <> hp' src + AliasTermI src dest -> "alias.term " <> hhqs' src <> " " <> ps' dest + AliasTypeI src dest -> "alias.type " <> hhqs' src <> " " <> ps' dest + AliasManyI srcs dest -> + "alias.many " <> intercalateMap " " hqs srcs <> " " <> p' dest + MoveTermI src dest -> "move.term " <> hqs' src <> " " <> ps' dest + MoveTypeI src dest -> "move.type " <> hqs' src <> " " <> ps' dest + MoveBranchI src dest -> "move.namespace " <> ops' src <> " " <> ps' dest + MovePatchI src dest -> "move.patch " <> ps' src <> " " <> ps' dest + CopyPatchI src dest -> "copy.patch " <> ps' src <> " " <> ps' dest + DeleteI thing -> "delete " <> hqs' thing + DeleteTermI def -> "delete.term " <> hqs' def + DeleteTypeI def -> "delete.type " <> hqs' def + DeleteBranchI opath -> "delete.namespace " <> ops' opath + DeletePatchI path -> "delete.patch " <> ps' path + ReplaceTermI src target p -> + "replace.term " <> HQ.toText src <> " " + <> HQ.toText target <> " " + <> opatch p + ReplaceTypeI src target p -> + "replace.type " <> HQ.toText src <> " " + <> HQ.toText target <> " " + <> opatch p + ResolveTermNameI path -> "resolve.termName " <> hqs' path + ResolveTypeNameI path -> "resolve.typeName " <> hqs' path + AddI _selection -> "add" + UpdateI p _selection -> "update " <> opatch p + PropagatePatchI p scope -> "patch " <> ps' p <> " " <> p' scope + UndoI{} -> "undo" + ExecuteI s -> "execute " <> Text.pack s + LinkI md defs -> + "link " <> HQ.toText md <> " " <> intercalateMap " " hqs' defs + UnlinkI md defs -> + "unlink " <> HQ.toText md <> " " <> intercalateMap " " hqs' defs + UpdateBuiltinsI -> "builtins.update" + MergeBuiltinsI -> "builtins.merge" + MergeIOBuiltinsI -> "builtins.mergeio" + PullRemoteBranchI orepo dest _syncMode -> + (Text.pack . InputPattern.patternName + $ InputPatterns.patternFromInput input) + <> " " + -- todo: show the actual config-loaded namespace + <> maybe "(remote namespace from .unisonConfig)" + (uncurry3 printNamespace) orepo + <> " " + <> p' dest + LoadI{} -> wat + PreviewAddI{} -> wat + PreviewUpdateI{} -> wat + CreateAuthorI (NameSegment id) name -> "create.author " <> id <> " " <> name + CreatePullRequestI{} -> wat + LoadPullRequestI base head dest -> + "pr.load " + <> uncurry3 printNamespace base + <> " " + <> uncurry3 printNamespace head + <> " " + <> p' dest + PushRemoteBranchI{} -> wat + PreviewMergeLocalBranchI{} -> wat + DiffNamespaceI{} -> wat + SwitchBranchI{} -> wat + PopBranchI{} -> wat + NamesI{} -> wat + TodoI{} -> wat + ListEditsI{} -> wat + ListDependenciesI{} -> wat + ListDependentsI{} -> wat + HistoryI{} -> wat + TestI{} -> wat + LinksI{} -> wat + SearchByNameI{} -> wat + FindShallowI{} -> wat + FindPatchI{} -> wat + ShowDefinitionI{} -> wat + DisplayI{} -> wat + DocsI{} -> wat + ShowDefinitionByPrefixI{} -> wat + ShowReflogI{} -> wat + DebugNumberedArgsI{} -> wat + DebugBranchHistoryI{} -> wat + DebugTypecheckedUnisonFileI{} -> wat + QuitI{} -> wat + DeprecateTermI{} -> undefined + DeprecateTypeI{} -> undefined + RemoveTermReplacementI src p -> + "delete.term-replacement" <> HQ.toText src <> " " <> opatch p + RemoveTypeReplacementI src p -> + "delete.type-replacement" <> HQ.toText src <> " " <> opatch p + where + hp' = either (Text.pack . show) p' + p' = Text.pack . show . resolveToAbsolute + ops' = maybe "." ps' + opatch = ps' . fromMaybe defaultPatchPath + wat = error $ show input ++ " is not expected to alter the branch" + hhqs' (Left sh) = SH.toText sh + hhqs' (Right x) = hqs' x + hqs' (p, hq) = + Monoid.unlessM (Path.isRoot' p) (p' p) <> "." <> Text.pack (show hq) + hqs (p, hq) = hqs' (Path' . Right . Path.Relative $ p, hq) + ps' = p' . Path.unsplit' + stepAt = Unison.Codebase.Editor.HandleInput.stepAt inputDescription + stepManyAt = Unison.Codebase.Editor.HandleInput.stepManyAt inputDescription + stepManyAtNoSync = + Unison.Codebase.Editor.HandleInput.stepManyAtNoSync + updateRoot = flip Unison.Codebase.Editor.HandleInput.updateRoot inputDescription + syncRoot = use root >>= updateRoot + updateAtM = Unison.Codebase.Editor.HandleInput.updateAtM inputDescription + unlessGitError = unlessError' (Output.GitError input) + importRemoteBranch ns mode = ExceptT . eval $ ImportRemoteBranch ns mode + viewRemoteBranch ns = ExceptT . eval $ ViewRemoteBranch ns + syncRemoteRootBranch repo b mode = + ExceptT . eval $ SyncRemoteRootBranch repo b mode + handleFailedDelete failed failedDependents = do + failed <- loadSearchResults $ SR.fromNames failed + failedDependents <- loadSearchResults $ SR.fromNames failedDependents + ppe <- prettyPrintEnv =<< makePrintNamesFromLabeled' + (foldMap SR'.labeledDependencies $ failed <> failedDependents) + respond $ CantDelete ppe failed failedDependents + saveAndApplyPatch patchPath'' patchName patch' = do + stepAtM (inputDescription <> " (1/2)") + (patchPath'', + Branch.modifyPatches patchName (const patch')) + -- Apply the modified patch to the current path + -- since we might be able to propagate further. + void $ propagatePatch inputDescription patch' currentPath' + -- Say something + success + previewResponse sourceName sr uf = do + names <- makeShadowedPrintNamesFromLabeled + (UF.termSignatureExternalLabeledDependencies uf) + (UF.typecheckedToNames0 uf) + ppe <- PPE.suffixifiedPPE <$> prettyPrintEnvDecl names + respond $ Typechecked (Text.pack sourceName) ppe sr uf + + addDefaultMetadata + :: SlurpComponent v + -> Action m (Either Event Input) v () + addDefaultMetadata adds = do + let addedVs = Set.toList $ SC.types adds <> SC.terms adds + addedNs = traverse (Path.hqSplitFromName' . Name.fromVar) addedVs + case addedNs of + Nothing -> + error $ "I couldn't parse a name I just added to the codebase! " + <> "-- Added names: " <> show addedVs + Just addedNames -> do + dm <- resolveDefaultMetadata currentPath' + case toList dm of + [] -> pure () + dm' -> do + let hqs = traverse InputPatterns.parseHashQualifiedName dm' + case hqs of + Left e -> respond $ ConfiguredMetadataParseError + (Path.absoluteToPath' currentPath') + (show dm') + e + Right defaultMeta -> + manageLinks True addedNames defaultMeta Metadata.insert + + -- Add/remove links between definitions and metadata. + -- `silent` controls whether this produces any output to the user. + -- `srcs` is (names of the) definitions to pass to `op` + -- `mdValues` is (names of the) metadata to pass to `op` + -- `op` is the operation to add/remove/alter metadata mappings. + -- e.g. `Metadata.insert` is passed to add metadata links. + manageLinks :: Bool + -> [(Path', HQ'.HQSegment)] + -> [HQ.HashQualified] + -> (forall r. Ord r + => (r, Metadata.Type, Metadata.Value) + -> Branch.Star r NameSegment + -> Branch.Star r NameSegment) + -> Action m (Either Event Input) v () + manageLinks silent srcs mdValues op = do + mdValuels <- fmap (first toList) <$> + traverse (\x -> fmap (,x) (getHQTerms x)) mdValues + before <- Branch.head <$> use root + traverse_ go mdValuels + after <- Branch.head <$> use root + (ppe, outputDiff) <- diffHelper before after + if not silent then + if OBranchDiff.isEmpty outputDiff + then respond NoOp + else respondNumbered $ ShowDiffNamespace Path.absoluteEmpty + Path.absoluteEmpty + ppe + outputDiff + else unless (OBranchDiff.isEmpty outputDiff) $ + respond DefaultMetadataNotification + where + go (mdl, hqn) = do + newRoot <- use root + let r0 = Branch.head newRoot + getTerms p = BranchUtil.getTerm (resolveSplit' p) r0 + getTypes p = BranchUtil.getType (resolveSplit' p) r0 + !srcle = toList . getTerms =<< srcs + !srclt = toList . getTypes =<< srcs + names0 <- basicPrettyPrintNames0 + ppe <- PPE.suffixifiedPPE <$> prettyPrintEnvDecl (Names names0 mempty) + case mdl of + [r@(Referent.Ref mdValue)] -> do + mdType <- eval $ LoadTypeOfTerm mdValue + case mdType of + Nothing -> respond $ MetadataMissingType ppe r + Just ty -> do + let steps = + bimap (Path.unabsolute . resolveToAbsolute) + (const . step $ Type.toReference ty) + <$> srcs + stepManyAtNoSync steps + where + step mdType b0 = + let tmUpdates terms = foldl' go terms srcle + where go terms src = op (src, mdType, mdValue) terms + tyUpdates types = foldl' go types srclt + where go types src = op (src, mdType, mdValue) types + in over Branch.terms tmUpdates . over Branch.types tyUpdates $ b0 + mdValues -> respond $ MetadataAmbiguous hqn ppe mdValues + delete + :: (Path.HQSplit' -> Set Referent) -- compute matching terms + -> (Path.HQSplit' -> Set Reference) -- compute matching types + -> Path.HQSplit' + -> Action' m v () + delete getHQ'Terms getHQ'Types hq = do + let matchingTerms = toList (getHQ'Terms hq) + let matchingTypes = toList (getHQ'Types hq) + case (matchingTerms, matchingTypes) of + ([], []) -> respond (NameNotFound hq) + (Set.fromList -> tms, Set.fromList -> tys) -> goMany tms tys + where + resolvedPath = resolveSplit' (HQ'.toName <$> hq) + goMany tms tys = do + let rootNames = Branch.toNames0 root0 + name = Path.toName (Path.unsplit resolvedPath) + toRel :: Ord ref => Set ref -> R.Relation Name ref + toRel = R.fromList . fmap (name,) . toList + -- these names are relative to the root + toDelete = Names0 (toRel tms) (toRel tys) + (failed, failedDependents) <- + getEndangeredDependents (eval . GetDependents) toDelete rootNames + if failed == mempty then do + let makeDeleteTermNames = fmap (BranchUtil.makeDeleteTermName resolvedPath) . toList $ tms + let makeDeleteTypeNames = fmap (BranchUtil.makeDeleteTypeName resolvedPath) . toList $ tys + stepManyAt (makeDeleteTermNames ++ makeDeleteTypeNames) + root'' <- use root + diffHelper (Branch.head root') (Branch.head root'') >>= + respondNumbered . uncurry ShowDiffAfterDeleteDefinitions + else handleFailedDelete failed failedDependents + in case input of + ShowReflogI -> do + entries <- convertEntries Nothing [] <$> eval LoadReflog + numberedArgs .= + fmap (('#':) . SBH.toString . Output.hash) entries + respond $ ShowReflog entries + where + -- reverses & formats entries, adds synthetic entries when there is a + -- discontinuity in the reflog. + convertEntries :: Maybe Branch.Hash + -> [Output.ReflogEntry] + -> [Reflog.Entry] + -> [Output.ReflogEntry] + convertEntries _ acc [] = acc + convertEntries Nothing acc entries@(Reflog.Entry old _ _ : _) = + convertEntries + (Just old) + (Output.ReflogEntry (SBH.fromHash sbhLength old) "(initial reflogged namespace)" : acc) + entries + convertEntries (Just lastHash) acc entries@(Reflog.Entry old new reason : rest) = + if lastHash /= old then + convertEntries + (Just old) + (Output.ReflogEntry (SBH.fromHash sbhLength old) "(external change)" : acc) + entries + else + convertEntries + (Just new) + (Output.ReflogEntry (SBH.fromHash sbhLength new) reason : acc) + rest + + ResetRootI src0 -> + case src0 of + Left hash -> unlessError do + newRoot <- resolveShortBranchHash hash + lift do + updateRoot newRoot + success + Right path' -> do + newRoot <- getAt $ resolveToAbsolute path' + if Branch.isEmpty newRoot then respond $ BranchNotFound path' + else do + updateRoot newRoot + success + ForkLocalBranchI src0 dest0 -> do + let tryUpdateDest srcb dest0 = do + let dest = resolveToAbsolute dest0 + -- if dest isn't empty: leave dest unchanged, and complain. + destb <- getAt dest + if Branch.isEmpty destb then do + ok <- updateAtM dest (const $ pure srcb) + if ok then success else respond $ BranchEmpty src0 + else respond $ BranchAlreadyExists dest0 + case src0 of + Left hash -> unlessError do + srcb <- resolveShortBranchHash hash + lift $ tryUpdateDest srcb dest0 + Right path' -> do + srcb <- getAt $ resolveToAbsolute path' + if Branch.isEmpty srcb then respond $ BranchNotFound path' + else tryUpdateDest srcb dest0 + MergeLocalBranchI src0 dest0 mergeMode -> do + let [src, dest] = resolveToAbsolute <$> [src0, dest0] + srcb <- getAt src + if Branch.isEmpty srcb then branchNotFound src0 + else do + let err = Just $ MergeAlreadyUpToDate src0 dest0 + mergeBranchAndPropagateDefaultPatch mergeMode inputDescription err srcb (Just dest0) dest + + PreviewMergeLocalBranchI src0 dest0 -> do + let [src, dest] = resolveToAbsolute <$> [src0, dest0] + srcb <- getAt src + if Branch.isEmpty srcb then branchNotFound src0 + else do + destb <- getAt dest + merged <- eval . Eval $ Branch.merge srcb destb + if merged == destb + then respond (PreviewMergeAlreadyUpToDate src0 dest0) + else + diffHelper (Branch.head destb) (Branch.head merged) >>= + respondNumbered . uncurry (ShowDiffAfterMergePreview dest0 dest) + + DiffNamespaceI before0 after0 -> do + let [beforep, afterp] = + resolveToAbsolute <$> [before0, after0] + before <- Branch.head <$> getAt beforep + after <- Branch.head <$> getAt afterp + (ppe, outputDiff) <- diffHelper before after + respondNumbered $ ShowDiffNamespace beforep afterp ppe outputDiff + + CreatePullRequestI baseRepo headRepo -> unlessGitError do + baseBranch <- viewRemoteBranch baseRepo + headBranch <- viewRemoteBranch headRepo + lift do + merged <- eval . Eval $ Branch.merge baseBranch headBranch + (ppe, diff) <- diffHelper (Branch.head baseBranch) (Branch.head merged) + respondNumbered $ ShowDiffAfterCreatePR baseRepo headRepo ppe diff + + LoadPullRequestI baseRepo headRepo dest0 -> do + let desta = resolveToAbsolute dest0 + let dest = Path.unabsolute desta + destb <- getAt desta + if Branch.isEmpty0 (Branch.head destb) then unlessGitError do + baseb <- importRemoteBranch baseRepo SyncMode.ShortCircuit + headb <- importRemoteBranch headRepo SyncMode.ShortCircuit + lift $ do + mergedb <- eval . Eval $ Branch.merge baseb headb + squashedb <- eval . Eval $ Branch.merge' Branch.SquashMerge headb baseb + stepManyAt + [BranchUtil.makeSetBranch (dest, "base") baseb + ,BranchUtil.makeSetBranch (dest, "head") headb + ,BranchUtil.makeSetBranch (dest, "merged") mergedb + ,BranchUtil.makeSetBranch (dest, "squashed") squashedb] + let base = snoc dest0 "base" + head = snoc dest0 "head" + merged = snoc dest0 "merged" + squashed = snoc dest0 "squashed" + respond $ LoadPullRequest baseRepo headRepo base head merged squashed + loadPropagateDiffDefaultPatch + inputDescription + (Just merged) + (snoc desta "merged") + else + respond . BranchNotEmpty . Path.Path' . Left $ currentPath' + + + -- move the root to a sub-branch + MoveBranchI Nothing dest -> do + b <- use root + stepManyAt [ (Path.empty, const Branch.empty0) + , BranchUtil.makeSetBranch (resolveSplit' dest) b ] + success + + MoveBranchI (Just src) dest -> + maybe (branchNotFound' src) srcOk (getAtSplit' src) + where + srcOk b = maybe (destOk b) (branchExistsSplit dest) (getAtSplit' dest) + destOk b = do + stepManyAt + [ BranchUtil.makeSetBranch (resolveSplit' src) Branch.empty + , BranchUtil.makeSetBranch (resolveSplit' dest) b ] + success -- could give rando stats about new defns + + MovePatchI src dest -> do + psrc <- getPatchAtSplit' src + pdest <- getPatchAtSplit' dest + case (psrc, pdest) of + (Nothing, _) -> patchNotFound src + (_, Just _) -> patchExists dest + (Just p, Nothing) -> do + stepManyAt [ + BranchUtil.makeDeletePatch (resolveSplit' src), + BranchUtil.makeReplacePatch (resolveSplit' dest) p ] + success + + CopyPatchI src dest -> do + psrc <- getPatchAtSplit' src + pdest <- getPatchAtSplit' dest + case (psrc, pdest) of + (Nothing, _) -> patchNotFound src + (_, Just _) -> patchExists dest + (Just p, Nothing) -> do + stepAt (BranchUtil.makeReplacePatch (resolveSplit' dest) p) + success + + DeletePatchI src -> do + psrc <- getPatchAtSplit' src + case psrc of + Nothing -> patchNotFound src + Just _ -> do + stepAt (BranchUtil.makeDeletePatch (resolveSplit' src)) + success + + DeleteBranchI Nothing -> + ifConfirmed + (do + stepAt (Path.empty, const Branch.empty0) + respond DeletedEverything) + (respond DeleteEverythingConfirmation) + + DeleteBranchI (Just p) -> + maybe (branchNotFound' p) go $ getAtSplit' p + where + go (Branch.head -> b) = do + (failed, failedDependents) <- + let rootNames = Branch.toNames0 root0 + toDelete = Names.prefix0 + (Path.toName . Path.unsplit . resolveSplit' $ p) -- resolveSplit' incorporates currentPath + (Branch.toNames0 b) + in getEndangeredDependents (eval . GetDependents) toDelete rootNames + if failed == mempty then do + stepAt $ BranchUtil.makeSetBranch (resolveSplit' p) Branch.empty + -- Looks similar to the 'toDelete' above... investigate me! ;) + diffHelper b Branch.empty0 >>= + respondNumbered + . uncurry (ShowDiffAfterDeleteBranch + $ resolveToAbsolute (Path.unsplit' p)) + else handleFailedDelete failed failedDependents + SwitchBranchI path' -> do + let path = resolveToAbsolute path' + currentPathStack %= Nel.cons path + branch' <- getAt path + when (Branch.isEmpty branch') (respond $ CreatedNewBranch path) + + PopBranchI -> use (currentPathStack . to Nel.uncons) >>= \case + (_, Nothing) -> respond StartOfCurrentPathHistory + (_, Just t) -> currentPathStack .= t + + HistoryI resultsCap diffCap from -> case from of + Left hash -> unlessError do + b <- resolveShortBranchHash hash + lift $ doHistory 0 b [] + Right path' -> do + let path = resolveToAbsolute path' + branch' <- getAt path + if Branch.isEmpty branch' then respond $ CreatedNewBranch path + else doHistory 0 branch' [] + where + doHistory !n b acc = + if maybe False (n >=) resultsCap then + respond $ History diffCap acc (PageEnd (sbh $ Branch.headHash b) n) + else case Branch._history b of + Causal.One{} -> + respond $ History diffCap acc (EndOfLog . sbh $ Branch.headHash b) + Causal.Merge{..} -> + respond $ History diffCap acc (MergeTail (sbh $ Branch.headHash b) . map sbh $ Map.keys tails) + Causal.Cons{..} -> do + b' <- fmap Branch.Branch . eval . Eval $ snd tail + let elem = (sbh $ Branch.headHash b, Branch.namesDiff b' b) + doHistory (n+1) b' (elem : acc) + + UndoI -> do + prev <- eval . Eval $ Branch.uncons root' + case prev of + Nothing -> + respond . CantUndo $ if Branch.isOne root' then CantUndoPastStart + else CantUndoPastMerge + Just (_, prev) -> do + updateRoot prev + diffHelper (Branch.head prev) (Branch.head root') >>= + respondNumbered . uncurry Output.ShowDiffAfterUndo + + AliasTermI src dest -> do + referents <- resolveHHQS'Referents src + case (toList referents, toList (getTerms dest)) of + ([r], []) -> do + stepAt (BranchUtil.makeAddTermName (resolveSplit' dest) r (oldMD r)) + success + ([_], rs@(_:_)) -> termExists dest (Set.fromList rs) + ([], _) -> either termNotFound' termNotFound src + (rs, _) -> + either hashConflicted termConflicted src (Set.fromList rs) + where + oldMD r = either (const mempty) + (\src -> + let p = resolveSplit' src in + BranchUtil.getTermMetadataAt p r root0) + src + + AliasTypeI src dest -> do + refs <- resolveHHQS'Types src + case (toList refs, toList (getTypes dest)) of + ([r], []) -> do + stepAt (BranchUtil.makeAddTypeName (resolveSplit' dest) r (oldMD r)) + success + ([_], rs@(_:_)) -> typeExists dest (Set.fromList rs) + ([], _) -> either typeNotFound' typeNotFound src + (rs, _) -> + either + (\src -> hashConflicted src . Set.map Referent.Ref) + typeConflicted + src + (Set.fromList rs) + + + where + oldMD r = + either (const mempty) + (\src -> + let p = resolveSplit' src in + BranchUtil.getTypeMetadataAt p r root0) + src + + -- this implementation will happily produce name conflicts, + -- but will surface them in a normal diff at the end of the operation. + AliasManyI srcs dest' -> do + let destAbs = resolveToAbsolute dest' + old <- getAt destAbs + let (unknown, actions) = foldl' go mempty srcs + stepManyAt actions + new <- getAt destAbs + diffHelper (Branch.head old) (Branch.head new) >>= + respondNumbered . uncurry (ShowDiffAfterModifyBranch dest' destAbs) + unless (null unknown) $ + respond . SearchTermsNotFound . fmap fixupOutput $ unknown + where + -- a list of missing sources (if any) and the actions that do the work + go :: ([Path.HQSplit], [(Path, Branch0 m -> Branch0 m)]) + -> Path.HQSplit + -> ([Path.HQSplit], [(Path, Branch0 m -> Branch0 m)]) + go (missingSrcs, actions) hqsrc = + let + src :: Path.Split + src = second HQ'.toName hqsrc + proposedDest :: Path.Split + proposedDest = second HQ'.toName hqProposedDest + hqProposedDest :: Path.HQSplit + hqProposedDest = first Path.unabsolute $ + Path.resolve (resolveToAbsolute dest') hqsrc + -- `Nothing` if src doesn't exist + doType :: Maybe [(Path, Branch0 m -> Branch0 m)] + doType = case ( BranchUtil.getType hqsrc currentBranch0 + , BranchUtil.getType hqProposedDest root0 + ) of + (null -> True, _) -> Nothing -- missing src + (rsrcs, existing) -> -- happy path + Just . map addAlias . toList $ Set.difference rsrcs existing + where + addAlias r = BranchUtil.makeAddTypeName proposedDest r (oldMD r) + oldMD r = BranchUtil.getTypeMetadataAt src r currentBranch0 + doTerm :: Maybe [(Path, Branch0 m -> Branch0 m)] + doTerm = case ( BranchUtil.getTerm hqsrc currentBranch0 + , BranchUtil.getTerm hqProposedDest root0 + ) of + (null -> True, _) -> Nothing -- missing src + (rsrcs, existing) -> + Just . map addAlias . toList $ Set.difference rsrcs existing + where + addAlias r = BranchUtil.makeAddTermName proposedDest r (oldMD r) + oldMD r = BranchUtil.getTermMetadataAt src r currentBranch0 + in case (doType, doTerm) of + (Nothing, Nothing) -> (missingSrcs :> hqsrc, actions) + (Just as, Nothing) -> (missingSrcs, actions ++ as) + (Nothing, Just as) -> (missingSrcs, actions ++ as) + (Just as1, Just as2) -> (missingSrcs, actions ++ as1 ++ as2) + + fixupOutput :: Path.HQSplit -> HQ.HashQualified + fixupOutput = fmap Path.toName . HQ'.toHQ . Path.unsplitHQ + + NamesI thing -> do + parseNames0 <- Names3.suffixify0 <$> basicParseNames0 + let filtered = case thing of + HQ.HashOnly shortHash -> + Names.filterBySHs (Set.singleton shortHash) parseNames0 + HQ.HashQualified n sh -> + Names.filterByHQs (Set.singleton $ HQ'.HashQualified n sh) parseNames0 + HQ.NameOnly n -> + Names.filterByHQs (Set.singleton $ HQ'.NameOnly n) parseNames0 + printNames0 <- basicPrettyPrintNames0 + let printNames = Names printNames0 mempty + let terms' ::Set (Referent, Set HQ'.HashQualified) + terms' = (`Set.map` Names.termReferents filtered) $ + \r -> (r, Names3.termName hqLength r printNames) + types' :: Set (Reference, Set HQ'.HashQualified) + types' = (`Set.map` Names.typeReferences filtered) $ + \r -> (r, Names3.typeName hqLength r printNames) + respond $ ListNames hqLength (toList types') (toList terms') +-- let (p, hq) = p0 +-- namePortion = HQ'.toName hq +-- case hq of +-- HQ'.NameOnly _ -> +-- respond $ uncurry ListNames (results p namePortion) +-- HQ'.HashQualified _ sh -> let +-- (terms, types) = results p namePortion +-- -- filter terms and types based on `sh : ShortHash` +-- terms' = filter (Reference.isPrefixOf sh . Referent.toReference . fst) terms +-- types' = filter (Reference.isPrefixOf sh . fst) types +-- in respond $ ListNames terms' types' +-- where +-- results p namePortion = let +-- name = Path.toName . Path.unprefix currentPath' . Path.snoc' p +-- $ namePortion +-- ns = prettyPrintNames0 +-- terms = [ (r, Names.namesForReferent ns r) +-- | r <- toList $ Names.termsNamed ns name ] +-- types = [ (r, Names.namesForReference ns r) +-- | r <- toList $ Names.typesNamed ns name ] +-- in (terms, types) + + LinkI mdValue srcs -> do + manageLinks False srcs [mdValue] Metadata.insert + syncRoot + + UnlinkI mdValue srcs -> do + manageLinks False srcs [mdValue] Metadata.delete + syncRoot + + -- > links List.map (.Docs .English) + -- > links List.map -- give me all the + -- > links Optional License + LinksI src mdTypeStr -> unlessError do + (ppe, out) <- getLinks input src (Right mdTypeStr) + lift do + numberedArgs .= fmap (HQ.toString . view _1) out + respond $ ListOfLinks ppe out + + DocsI src -> unlessError do + (ppe, out) <- getLinks input src (Left $ Set.singleton DD.docRef) + lift case out of + [(_name, ref, _tm)] -> do + names <- basicPrettyPrintNames0 + doDisplay ConsoleLocation (Names3.Names names mempty) (Referent.Ref ref) + out -> do + numberedArgs .= fmap (HQ.toString . view _1) out + respond $ ListOfLinks ppe out + + CreateAuthorI authorNameSegment authorFullName -> do + initialBranch <- getAt currentPath' + AuthorInfo + guid@(guidRef, _, _) + author@(authorRef, _, _) + copyrightHolder@(copyrightHolderRef, _, _) <- + eval $ CreateAuthorInfo authorFullName + -- add the new definitions to the codebase and to the namespace + traverse_ (eval . uncurry3 PutTerm) [guid, author, copyrightHolder] + stepManyAt + [ BranchUtil.makeAddTermName (resolveSplit' authorPath) (d authorRef) mempty + , BranchUtil.makeAddTermName (resolveSplit' copyrightHolderPath) (d copyrightHolderRef) mempty + , BranchUtil.makeAddTermName (resolveSplit' guidPath) (d guidRef) mempty + ] + finalBranch <- getAt currentPath' + -- print some output + diffHelper (Branch.head initialBranch) (Branch.head finalBranch) >>= + respondNumbered + . uncurry (ShowDiffAfterCreateAuthor + authorNameSegment + (Path.unsplit' base) + currentPath') + where + d :: Reference.Id -> Referent + d = Referent.Ref . Reference.DerivedId + base :: Path.Split' = (Path.relativeEmpty', "metadata") + authorPath = base |> "authors" |> authorNameSegment + copyrightHolderPath = base |> "copyrightHolders" |> authorNameSegment + guidPath = authorPath |> "guid" + + MoveTermI src dest -> + case (toList (getHQ'Terms src), toList (getTerms dest)) of + ([r], []) -> do + stepManyAt + [ BranchUtil.makeDeleteTermName p r + , BranchUtil.makeAddTermName (resolveSplit' dest) r (mdSrc r)] + success + ([_], rs) -> termExists dest (Set.fromList rs) + ([], _) -> termNotFound src + (rs, _) -> termConflicted src (Set.fromList rs) + where p = resolveSplit' (HQ'.toName <$> src) + mdSrc r = BranchUtil.getTermMetadataAt p r root0 + + MoveTypeI src dest -> + case (toList (getHQ'Types src), toList (getTypes dest)) of + ([r], []) -> do + stepManyAt + [ BranchUtil.makeDeleteTypeName p r + , BranchUtil.makeAddTypeName (resolveSplit' dest) r (mdSrc r) ] + success + ([_], rs) -> typeExists dest (Set.fromList rs) + ([], _) -> typeNotFound src + (rs, _) -> typeConflicted src (Set.fromList rs) + where + p = resolveSplit' (HQ'.toName <$> src) + mdSrc r = BranchUtil.getTypeMetadataAt p r root0 + + DeleteI hq -> delete getHQ'Terms getHQ'Types hq + DeleteTypeI hq -> delete (const Set.empty) getHQ'Types hq + DeleteTermI hq -> delete getHQ'Terms (const Set.empty) hq + + DisplayI outputLoc hq -> do + parseNames0 <- (`Names3.Names` mempty) <$> basicPrettyPrintNames0 + -- use suffixed names for resolving the argument to display + let parseNames = Names3.suffixify parseNames0 + let results = Names3.lookupHQTerm hq parseNames + if Set.null results then + respond $ SearchTermsNotFound [hq] + else if Set.size results > 1 then + respond $ TermAmbiguous hq results + -- ... but use the unsuffixed names for display + else doDisplay outputLoc parseNames0 (Set.findMin results) + + ShowDefinitionI outputLoc query -> do + (misses, results) <- hqNameQuerySuffixify query + results' <- loadSearchResults results + let termTypes :: Map.Map Reference (Type v Ann) + termTypes = + Map.fromList + [ (r, t) | SR'.Tm _ (Just t) (Referent.Ref r) _ <- results' ] + (collatedTypes, collatedTerms) = collateReferences + (mapMaybe SR'.tpReference results') + (mapMaybe SR'.tmReferent results') + -- load the `collatedTerms` and types into a Map Reference.Id Term/Type + -- for later + loadedDerivedTerms <- + fmap (Map.fromList . catMaybes) . for (toList collatedTerms) $ \case + Reference.DerivedId i -> fmap (i,) <$> eval (LoadTerm i) + Reference.Builtin{} -> pure Nothing + loadedDerivedTypes <- + fmap (Map.fromList . catMaybes) . for (toList collatedTypes) $ \case + Reference.DerivedId i -> fmap (i,) <$> eval (LoadType i) + Reference.Builtin{} -> pure Nothing + -- Populate DisplayThings for the search results, in anticipation of + -- displaying the definitions. + loadedDisplayTerms :: Map Reference (DisplayThing (Term v Ann)) <- + fmap Map.fromList . for (toList collatedTerms) $ \case + r@(Reference.DerivedId i) -> do + let tm = Map.lookup i loadedDerivedTerms + -- We add a type annotation to the term using if it doesn't + -- already have one that the user provided + pure . (r, ) $ case liftA2 (,) tm (Map.lookup r termTypes) of + Nothing -> MissingThing i + Just (tm, typ) -> case tm of + Term.Ann' _ _ -> RegularThing tm + _ -> RegularThing (Term.ann (ABT.annotation tm) tm typ) + r@(Reference.Builtin _) -> pure (r, BuiltinThing) + let loadedDisplayTypes :: Map Reference (DisplayThing (DD.Decl v Ann)) + loadedDisplayTypes = + Map.fromList . (`fmap` toList collatedTypes) $ \case + r@(Reference.DerivedId i) -> + (r,) . maybe (MissingThing i) RegularThing + $ Map.lookup i loadedDerivedTypes + r@(Reference.Builtin _) -> (r, BuiltinThing) + -- the SR' deps include the result term/type names, and the + let deps = foldMap SR'.labeledDependencies results' + <> foldMap Term.labeledDependencies loadedDerivedTerms + printNames <- makePrintNamesFromLabeled' deps + + -- We might like to make sure that the user search terms get used as + -- the names in the pretty-printer, but the current implementation + -- doesn't. + ppe <- prettyPrintEnvDecl printNames + let loc = case outputLoc of + ConsoleLocation -> Nothing + FileLocation path -> Just path + LatestFileLocation -> fmap fst latestFile' <|> Just "scratch.u" + do + unless (null loadedDisplayTypes && null loadedDisplayTerms) $ + eval . Notify $ + DisplayDefinitions loc ppe loadedDisplayTypes loadedDisplayTerms + unless (null misses) $ + eval . Notify $ SearchTermsNotFound misses + -- We set latestFile to be programmatically generated, if we + -- are viewing these definitions to a file - this will skip the + -- next update for that file (which will happen immediately) + latestFile .= ((, True) <$> loc) + + FindPatchI -> do + let patches = + [ Path.toName $ Path.snoc p seg + | (p, b) <- Branch.toList0 currentBranch0 + , (seg, _) <- Map.toList (Branch._edits b) ] + respond $ ListOfPatches $ Set.fromList patches + numberedArgs .= fmap Name.toString patches + + FindShallowI pathArg -> do + prettyPrintNames0 <- basicPrettyPrintNames0 + ppe <- fmap PPE.suffixifiedPPE . prettyPrintEnvDecl $ Names prettyPrintNames0 mempty + let pathArgAbs = resolveToAbsolute pathArg + b0 <- Branch.head <$> getAt pathArgAbs + let + hqTerm b0 ns r = + let refs = Star3.lookupD1 ns . _terms $ b0 + in case length refs of + 1 -> HQ'.fromName ns + _ -> HQ'.take hqLength $ HQ'.fromNamedReferent ns r + hqType b0 ns r = + let refs = Star3.lookupD1 ns . _types $ b0 + in case length refs of + 1 -> HQ'.fromName ns + _ -> HQ'.take hqLength $ HQ'.fromNamedReference ns r + defnCount b = + (R.size . deepTerms $ Branch.head b) + + (R.size . deepTypes $ Branch.head b) + + termEntries <- for (R.toList . Star3.d1 $ _terms b0) $ + \(r, ns) -> do + ot <- loadReferentType r + pure $ ShallowTermEntry r (hqTerm b0 ns r) ot + let + typeEntries = + [ ShallowTypeEntry r (hqType b0 ns r) + | (r, ns) <- R.toList . Star3.d1 $ _types b0 ] + branchEntries = + [ ShallowBranchEntry ns (defnCount b) + | (ns, b) <- Map.toList $ _children b0 ] + patchEntries = + [ ShallowPatchEntry ns + | (ns, (_h, _mp)) <- Map.toList $ _edits b0 ] + let + entries :: [ShallowListEntry v Ann] + entries = sort $ termEntries ++ typeEntries ++ branchEntries ++ patchEntries + entryToHQString :: ShallowListEntry v Ann -> String + -- caching the result as an absolute path, for easier jumping around + entryToHQString e = fixup $ case e of + ShallowTypeEntry _ hq -> HQ'.toString hq + ShallowTermEntry _ hq _ -> HQ'.toString hq + ShallowBranchEntry ns _ -> NameSegment.toString ns + ShallowPatchEntry ns -> NameSegment.toString ns + where + fixup s = + if last pathArgStr == '.' + then pathArgStr ++ s + else pathArgStr ++ "." ++ s + pathArgStr = show pathArgAbs + numberedArgs .= fmap entryToHQString entries + respond $ ListShallow ppe entries + where + + SearchByNameI isVerbose _showAll ws -> do + prettyPrintNames0 <- basicPrettyPrintNames0 + unlessError do + results <- case ws of + -- no query, list everything + [] -> pure . listBranch $ Branch.head currentBranch' + + -- type query + ":" : ws -> ExceptT (parseSearchType input (unwords ws)) >>= \typ -> ExceptT $ do + let named = Branch.deepReferents root0 + matches <- fmap toList . eval $ GetTermsOfType typ + matches <- filter (`Set.member` named) <$> + if null matches then do + respond NoExactTypeMatches + fmap toList . eval $ GetTermsMentioningType typ + else pure matches + let results = + -- in verbose mode, aliases are shown, so we collapse all + -- aliases to a single search result; in non-verbose mode, + -- a separate result may be shown for each alias + (if isVerbose then uniqueBy SR.toReferent else id) $ + searchResultsFor prettyPrintNames0 matches [] + pure . pure $ results + + -- name query + (map HQ.unsafeFromString -> qs) -> do + ns <- lift basicPrettyPrintNames0 + let srs = searchBranchScored ns fuzzyNameDistance qs + pure $ uniqueBy SR.toReferent srs + lift do + numberedArgs .= fmap searchResultToHQString results + results' <- loadSearchResults results + ppe <- prettyPrintEnv . Names3.suffixify =<< + makePrintNamesFromLabeled' + (foldMap SR'.labeledDependencies results') + respond $ ListOfDefinitions ppe isVerbose results' + + ResolveTypeNameI hq -> + zeroOneOrMore (getHQ'Types hq) (typeNotFound hq) go (typeConflicted hq) + where + conflicted = getHQ'Types (fmap HQ'.toNameOnly hq) + makeDelete = + BranchUtil.makeDeleteTypeName (resolveSplit' (HQ'.toName <$> hq)) + go r = stepManyAt . fmap makeDelete . toList . Set.delete r $ conflicted + + ResolveTermNameI hq -> do + refs <- getHQ'TermsIncludingHistorical hq + zeroOneOrMore refs (termNotFound hq) go (termConflicted hq) + where + conflicted = getHQ'Terms (fmap HQ'.toNameOnly hq) + makeDelete = + BranchUtil.makeDeleteTermName (resolveSplit' (HQ'.toName <$> hq)) + go r = stepManyAt . fmap makeDelete . toList . Set.delete r $ conflicted + + ReplaceTermI from to patchPath -> do + let patchPath' = fromMaybe defaultPatchPath patchPath + patch <- getPatchAt patchPath' + (fromMisses', fromHits) <- hqNameQuery [from] + (toMisses', toHits) <- hqNameQuery [to] + let fromRefs = termReferences fromHits + toRefs = termReferences toHits + -- Type hits are term misses + fromMisses = fromMisses' + <> (HQ'.toHQ . SR.typeName <$> typeResults fromHits) + toMisses = toMisses' + <> (HQ'.toHQ . SR.typeName <$> typeResults fromHits) + go :: Reference + -> Reference + -> Action m (Either Event Input) v () + go fr tr = do + mft <- eval $ LoadTypeOfTerm fr + mtt <- eval $ LoadTypeOfTerm tr + let termNotFound = respond . TermNotFound' + . SH.take hqLength + . Reference.toShortHash + case (mft, mtt) of + (Nothing, _) -> termNotFound fr + (_, Nothing) -> termNotFound tr + (Just ft, Just tt) -> do + let + patch' = + -- The modified patch + over Patch.termEdits + (R.insert fr (Replace tr (TermEdit.typing tt ft)) + . R.deleteDom fr) + patch + (patchPath'', patchName) = resolveSplit' patchPath' + saveAndApplyPatch patchPath'' patchName patch' + misses = fromMisses <> toMisses + ambiguous t rs = + let rs' = Set.map Referent.Ref $ Set.fromList rs + in case t of + HQ.HashOnly h -> + hashConflicted h rs' + (Path.parseHQSplit' . HQ.toString -> Right n) -> + termConflicted n rs' + _ -> respond . BadName $ HQ.toString t + unless (null misses) $ + respond $ SearchTermsNotFound misses + case (fromRefs, toRefs) of + ([fr], [tr]) -> go fr tr + ([_], tos) -> ambiguous to tos + (frs, _) -> ambiguous from frs + ReplaceTypeI from to patchPath -> do + let patchPath' = fromMaybe defaultPatchPath patchPath + (fromMisses', fromHits) <- hqNameQuery [from] + (toMisses', toHits) <- hqNameQuery [to] + patch <- getPatchAt patchPath' + let fromRefs = typeReferences fromHits + toRefs = typeReferences toHits + -- Term hits are type misses + fromMisses = fromMisses' + <> (HQ'.toHQ . SR.termName <$> termResults fromHits) + toMisses = toMisses' + <> (HQ'.toHQ . SR.termName <$> termResults fromHits) + go :: Reference + -> Reference + -> Action m (Either Event Input) v () + go fr tr = do + let patch' = + -- The modified patch + over Patch.typeEdits + (R.insert fr (TypeEdit.Replace tr) . R.deleteDom fr) patch + (patchPath'', patchName) = resolveSplit' patchPath' + saveAndApplyPatch patchPath'' patchName patch' + misses = fromMisses <> toMisses + ambiguous t rs = + let rs' = Set.map Referent.Ref $ Set.fromList rs + in case t of + HQ.HashOnly h -> + hashConflicted h rs' + (Path.parseHQSplit' . HQ.toString -> Right n) -> + typeConflicted n $ Set.fromList rs + -- This is unlikely to happen, as t has to be a parsed + -- hash-qualified name already. + -- Still, the types say we need to handle this case. + _ -> respond . BadName $ HQ.toString t + unless (null misses) $ + respond $ SearchTermsNotFound misses + case (fromRefs, toRefs) of + ([fr], [tr]) -> go fr tr + ([_], tos) -> ambiguous to tos + (frs, _) -> ambiguous from frs + LoadI maybePath -> + case maybePath <|> (fst <$> latestFile') of + Nothing -> respond NoUnisonFile + Just path -> do + res <- eval . LoadSource . Text.pack $ path + case res of + InvalidSourceNameError -> respond $ InvalidSourceName path + LoadError -> respond $ SourceLoadFailed path + LoadSuccess contents -> loadUnisonFile (Text.pack path) contents + + AddI hqs -> case uf of + Nothing -> respond NoUnisonFile + Just uf -> do + sr <- Slurp.disallowUpdates + . applySelection hqs uf + . toSlurpResult currentPath' uf + <$> slurpResultNames0 + let adds = Slurp.adds sr + when (Slurp.isNonempty sr) $ do + stepAtNoSync ( Path.unabsolute currentPath' + , doSlurpAdds adds uf) + eval . AddDefsToCodebase . filterBySlurpResult sr $ uf + ppe <- prettyPrintEnvDecl =<< + makeShadowedPrintNamesFromLabeled + (UF.termSignatureExternalLabeledDependencies uf) + (UF.typecheckedToNames0 uf) + respond $ SlurpOutput input (PPE.suffixifiedPPE ppe) sr + addDefaultMetadata adds + syncRoot + + PreviewAddI hqs -> case (latestFile', uf) of + (Just (sourceName, _), Just uf) -> do + sr <- Slurp.disallowUpdates + . applySelection hqs uf + . toSlurpResult currentPath' uf + <$> slurpResultNames0 + previewResponse sourceName sr uf + _ -> respond NoUnisonFile + + UpdateI maybePatchPath hqs -> case uf of + Nothing -> respond NoUnisonFile + Just uf -> do + let patchPath = fromMaybe defaultPatchPath maybePatchPath + slurpCheckNames0 <- slurpResultNames0 + currentPathNames0 <- currentPathNames0 + let sr = applySelection hqs uf + . toSlurpResult currentPath' uf + $ slurpCheckNames0 + addsAndUpdates = Slurp.updates sr <> Slurp.adds sr + fileNames0 = UF.typecheckedToNames0 uf + -- todo: display some error if typeEdits or termEdits itself contains a loop + typeEdits :: Map Name (Reference, Reference) + typeEdits = Map.fromList $ map f (toList $ SC.types (updates sr)) where + f v = case (toList (Names.typesNamed slurpCheckNames0 n) + ,toList (Names.typesNamed fileNames0 n)) of + ([old],[new]) -> (n, (old, new)) + _ -> error $ "Expected unique matches for " + ++ Var.nameStr v ++ " but got: " + ++ show otherwise + where n = Name.fromVar v + hashTerms :: Map Reference (Type v Ann) + hashTerms = Map.fromList (toList hashTerms0) where + hashTerms0 = (\(r, _, typ) -> (r, typ)) <$> UF.hashTerms uf + termEdits :: Map Name (Reference, Reference) + termEdits = Map.fromList $ map g (toList $ SC.terms (updates sr)) where + g v = case ( toList (Names.refTermsNamed slurpCheckNames0 n) + , toList (Names.refTermsNamed fileNames0 n)) of + ([old], [new]) -> (n, (old, new)) + _ -> error $ "Expected unique matches for " + ++ Var.nameStr v ++ " but got: " + ++ show otherwise + where n = Name.fromVar v + termDeprecations :: [(Name, Referent)] + termDeprecations = + [ (n, r) | (oldTypeRef,_) <- Map.elems typeEdits + , (n, r) <- Names3.constructorsForType0 oldTypeRef currentPathNames0 ] + + ye'ol'Patch <- getPatchAt patchPath + -- If `uf` updates a -> a', we want to replace all (a0 -> a) in patch + -- with (a0 -> a') in patch'. + -- So for all (a0 -> a) in patch, for all (a -> a') in `uf`, + -- we must know the type of a0, a, a'. + let + -- we need: + -- all of the `old` references from the `new` edits, + -- plus all of the `old` references for edits from patch we're replacing + collectOldForTyping :: [(Reference, Reference)] -> Patch -> Set Reference + collectOldForTyping new old = foldl' f mempty (new ++ fromOld) where + f acc (r, _r') = Set.insert r acc + newLHS = Set.fromList . fmap fst $ new + fromOld :: [(Reference, Reference)] + fromOld = [ (r,r') | (r, TermEdit.Replace r' _) <- R.toList . Patch._termEdits $ old + , Set.member r' newLHS ] + neededTypes = collectOldForTyping (toList termEdits) ye'ol'Patch + + allTypes :: Map Reference (Type v Ann) <- + fmap Map.fromList . for (toList neededTypes) $ \r -> + (r,) . fromMaybe (Type.builtin External "unknown type") + <$> (eval . LoadTypeOfTerm) r + + let typing r1 r2 = case (Map.lookup r1 allTypes, Map.lookup r2 hashTerms) of + (Just t1, Just t2) + | Typechecker.isEqual t1 t2 -> TermEdit.Same + | Typechecker.isSubtype t1 t2 -> TermEdit.Subtype + | otherwise -> TermEdit.Different + e -> error $ "compiler bug: typing map not constructed properly\n" <> + "typing " <> show r1 <> " " <> show r2 <> " : " <> show e + + let updatePatch :: Patch -> Patch + updatePatch p = foldl' step2 p' termEdits + where + p' = foldl' step1 p typeEdits + step1 p (r,r') = Patch.updateType r (TypeEdit.Replace r') p + step2 p (r,r') = Patch.updateTerm typing r (TermEdit.Replace r' (typing r r')) p + (p, seg) = Path.toAbsoluteSplit currentPath' patchPath + updatePatches :: Branch0 m -> m (Branch0 m) + updatePatches = Branch.modifyPatches seg updatePatch + + when (Slurp.isNonempty sr) $ do + -- take a look at the `updates` from the SlurpResult + -- and make a patch diff to record a replacement from the old to new references + stepManyAtMNoSync + [( Path.unabsolute currentPath' + , pure . doSlurpUpdates typeEdits termEdits termDeprecations) + ,( Path.unabsolute currentPath' + , pure . doSlurpAdds addsAndUpdates uf) + ,( Path.unabsolute p, updatePatches )] + eval . AddDefsToCodebase . filterBySlurpResult sr $ uf + ppe <- prettyPrintEnvDecl =<< + makeShadowedPrintNamesFromLabeled + (UF.termSignatureExternalLabeledDependencies uf) + (UF.typecheckedToNames0 uf) + respond $ SlurpOutput input (PPE.suffixifiedPPE ppe) sr + -- propagatePatch prints TodoOutput + void $ propagatePatchNoSync (updatePatch ye'ol'Patch) currentPath' + addDefaultMetadata addsAndUpdates + syncRoot + + PreviewUpdateI hqs -> case (latestFile', uf) of + (Just (sourceName, _), Just uf) -> do + sr <- applySelection hqs uf + . toSlurpResult currentPath' uf + <$> slurpResultNames0 + previewResponse sourceName sr uf + _ -> respond NoUnisonFile + + TodoI patchPath branchPath' -> do + patch <- getPatchAt (fromMaybe defaultPatchPath patchPath) + doShowTodoOutput patch $ resolveToAbsolute branchPath' + + TestI showOk showFail -> do + let + testTerms = Map.keys . R4.d1 . uncurry R4.selectD34 isTest + . Branch.deepTermMetadata $ currentBranch0 + testRefs = Set.fromList [ r | Referent.Ref r <- toList testTerms ] + oks results = + [ (r, msg) + | (r, Term.Sequence' ts) <- Map.toList results + , Term.App' (Term.Constructor' ref cid) (Term.Text' msg) <- toList ts + , cid == DD.okConstructorId && ref == DD.testResultRef ] + fails results = + [ (r, msg) + | (r, Term.Sequence' ts) <- Map.toList results + , Term.App' (Term.Constructor' ref cid) (Term.Text' msg) <- toList ts + , cid == DD.failConstructorId && ref == DD.testResultRef ] + cachedTests <- fmap Map.fromList . eval $ LoadWatches UF.TestWatch testRefs + let stats = Output.CachedTests (Set.size testRefs) (Map.size cachedTests) + names <- makePrintNamesFromLabeled' $ + LD.referents testTerms <> + LD.referents [ DD.okConstructorReferent, DD.failConstructorReferent ] + ppe <- prettyPrintEnv names + respond $ TestResults stats ppe showOk showFail + (oks cachedTests) (fails cachedTests) + let toCompute = Set.difference testRefs (Map.keysSet cachedTests) + unless (Set.null toCompute) $ do + let total = Set.size toCompute + computedTests <- fmap join . for (toList toCompute `zip` [1..]) $ \(r,n) -> + case r of + Reference.DerivedId rid -> do + tm <- eval $ LoadTerm rid + case tm of + Nothing -> [] <$ respond (TermNotFound' . SH.take hqLength . Reference.toShortHash $ Reference.DerivedId rid) + Just tm -> do + respond $ TestIncrementalOutputStart ppe (n,total) r tm + tm' <- eval $ Evaluate1 ppe tm + case tm' of + Left e -> respond (EvaluationFailure e) $> [] + Right tm' -> do + eval $ PutWatch UF.TestWatch rid tm' + respond $ TestIncrementalOutputEnd ppe (n,total) r tm' + pure [(r, tm')] + r -> error $ "unpossible, tests can't be builtins: " <> show r + let m = Map.fromList computedTests + respond $ TestResults Output.NewlyComputed ppe showOk showFail (oks m) (fails m) + + -- ListBranchesI -> + -- eval ListBranches >>= respond . ListOfBranches currentBranchName' + -- DeleteBranchI branchNames -> withBranches branchNames $ \bnbs -> do + -- uniqueToDelete <- prettyUniqueDefinitions bnbs + -- let deleteBranches b = + -- traverse (eval . DeleteBranch) b >> respond (Success input) + -- if (currentBranchName' `elem` branchNames) + -- then respond DeletingCurrentBranch + -- else if null uniqueToDelete + -- then deleteBranches branchNames + -- else ifM (confirmedCommand input) + -- (deleteBranches branchNames) + -- (respond . DeleteBranchConfirmation $ uniqueToDelete) + + PropagatePatchI patchPath scopePath -> do + patch <- getPatchAt patchPath + updated <- propagatePatch inputDescription patch (resolveToAbsolute scopePath) + unless updated (respond $ NothingToPatch patchPath scopePath) + + ExecuteI main -> addRunMain main uf >>= \case + Nothing -> do + names0 <- basicPrettyPrintNames0 + ppe <- prettyPrintEnv (Names3.Names names0 mempty) + mainType <- eval RuntimeMain + respond $ NoMainFunction main ppe [mainType] + Just unisonFile -> do + ppe <- executePPE unisonFile + eval $ Execute ppe unisonFile + + -- UpdateBuiltinsI -> do + -- stepAt updateBuiltins + -- checkTodo + + MergeBuiltinsI -> do + -- these were added once, but maybe they've changed and need to be + -- added again. + let uf = UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls) + (Map.fromList Builtin.builtinEffectDecls) + mempty mempty + eval $ AddDefsToCodebase uf + -- add the names; note, there are more names than definitions + -- due to builtin terms; so we don't just reuse `uf` above. + let srcb = BranchUtil.fromNames0 Builtin.names0 + _ <- updateAtM (currentPath' `snoc` "builtin") $ \destb -> + eval . Eval $ Branch.merge srcb destb + success + + MergeIOBuiltinsI -> do + -- these were added once, but maybe they've changed and need to be + -- added again. + let uf = UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls) + (Map.fromList Builtin.builtinEffectDecls) + mempty mempty + eval $ AddDefsToCodebase uf + -- these have not neceesarily been added yet + eval $ AddDefsToCodebase IOSource.typecheckedFile' + + -- add the names; note, there are more names than definitions + -- due to builtin terms; so we don't just reuse `uf` above. + let names0 = Builtin.names0 + <> UF.typecheckedToNames0 @v IOSource.typecheckedFile' + let srcb = BranchUtil.fromNames0 names0 + _ <- updateAtM (currentPath' `snoc` "builtin") $ \destb -> + eval . Eval $ Branch.merge srcb destb + + success + + ListEditsI maybePath -> do + let (p, seg) = + maybe (Path.toAbsoluteSplit currentPath' defaultPatchPath) + (Path.toAbsoluteSplit currentPath') + maybePath + patch <- eval . Eval . Branch.getPatch seg . Branch.head =<< getAt p + ppe <- prettyPrintEnv =<< + makePrintNamesFromLabeled' (Patch.labeledDependencies patch) + respond $ ListEdits patch ppe + + PullRemoteBranchI mayRepo path syncMode -> unlessError do + ns <- resolveConfiguredGitUrl Pull path mayRepo + lift $ unlessGitError do + b <- importRemoteBranch ns syncMode + let msg = Just $ PullAlreadyUpToDate ns path + let destAbs = resolveToAbsolute path + lift $ mergeBranchAndPropagateDefaultPatch Branch.RegularMerge inputDescription msg b (Just path) destAbs + + PushRemoteBranchI mayRepo path syncMode -> do + let srcAbs = resolveToAbsolute path + srcb <- getAt srcAbs + let expandRepo (r, rp) = (r, Nothing, rp) + unlessError do + (repo, sbh, remotePath) <- + resolveConfiguredGitUrl Push path (fmap expandRepo mayRepo) + case sbh of + Nothing -> lift $ unlessGitError do + remoteRoot <- viewRemoteBranch (repo, Nothing, Path.empty) + newRemoteRoot <- lift . eval . Eval $ + Branch.modifyAtM remotePath (Branch.merge srcb) remoteRoot + syncRemoteRootBranch repo newRemoteRoot syncMode + lift $ respond Success + Just{} -> + error $ "impossible match, resolveConfiguredGitUrl shouldn't return" + <> " `Just` unless it was passed `Just`; and here it is passed" + <> " `Nothing` by `expandRepo`." + ListDependentsI hq -> -- todo: add flag to handle transitive efficiently + resolveHQToLabeledDependencies hq >>= \lds -> + if null lds + then respond $ LabeledReferenceNotFound hq + else for_ lds $ \ld -> do + dependents <- let + tp r = eval $ GetDependents r + tm (Referent.Ref r) = eval $ GetDependents r + tm (Referent.Con r _i _ct) = eval $ GetDependents r + in LD.fold tp tm ld + (missing, names0) <- eval . Eval $ Branch.findHistoricalRefs' dependents root' + let types = R.toList $ Names3.types0 names0 + let terms = fmap (second Referent.toReference) $ R.toList $ Names.terms names0 + let names = types <> terms + numberedArgs .= fmap (Text.unpack . Reference.toText) ((fmap snd names) <> toList missing) + respond $ ListDependents hqLength ld names missing + ListDependenciesI hq -> -- todo: add flag to handle transitive efficiently + resolveHQToLabeledDependencies hq >>= \lds -> + if null lds + then respond $ LabeledReferenceNotFound hq + else for_ lds $ \ld -> do + dependencies :: Set Reference <- let + tp r@(Reference.DerivedId i) = eval (LoadType i) <&> \case + Nothing -> error $ "What happened to " ++ show i ++ "?" + Just decl -> Set.delete r . DD.dependencies $ DD.asDataDecl decl + tp _ = pure mempty + tm (Referent.Ref r@(Reference.DerivedId i)) = eval (LoadTerm i) <&> \case + Nothing -> error $ "What happened to " ++ show i ++ "?" + Just tm -> Set.delete r $ Term.dependencies tm + tm con@(Referent.Con (Reference.DerivedId i) cid _ct) = eval (LoadType i) <&> \case + Nothing -> error $ "What happened to " ++ show i ++ "?" + Just decl -> case DD.typeOfConstructor (DD.asDataDecl decl) cid of + Nothing -> error $ "What happened to " ++ show con ++ "?" + Just tp -> Type.dependencies tp + tm _ = pure mempty + in LD.fold tp tm ld + (missing, names0) <- eval . Eval $ Branch.findHistoricalRefs' dependencies root' + let types = R.toList $ Names3.types0 names0 + let terms = fmap (second Referent.toReference) $ R.toList $ Names.terms names0 + let names = types <> terms + numberedArgs .= fmap (Text.unpack . Reference.toText) ((fmap snd names) <> toList missing) + respond $ ListDependencies hqLength ld names missing + DebugNumberedArgsI -> use numberedArgs >>= respond . DumpNumberedArgs + DebugBranchHistoryI -> + eval . Notify . DumpBitBooster (Branch.headHash currentBranch') =<< + (eval . Eval $ Causal.hashToRaw (Branch._history currentBranch')) + DebugTypecheckedUnisonFileI -> case uf of + Nothing -> respond NoUnisonFile + Just uf -> let + datas, effects, terms :: [(Name, Reference.Id)] + datas = [ (Name.fromVar v, r) | (v, (r, _d)) <- Map.toList $ UF.dataDeclarationsId' uf ] + effects = [ (Name.fromVar v, r) | (v, (r, _e)) <- Map.toList $ UF.effectDeclarationsId' uf ] + terms = [ (Name.fromVar v, r) | (v, (r, _tm, _tp)) <- Map.toList $ UF.hashTermsId uf ] + in eval . Notify $ DumpUnisonFileHashes hqLength datas effects terms + + DeprecateTermI {} -> notImplemented + DeprecateTypeI {} -> notImplemented + RemoveTermReplacementI from patchPath -> + doRemoveReplacement from patchPath True + RemoveTypeReplacementI from patchPath -> + doRemoveReplacement from patchPath False + ShowDefinitionByPrefixI {} -> notImplemented + UpdateBuiltinsI -> notImplemented + QuitI -> MaybeT $ pure Nothing + where + notImplemented = eval $ Notify NotImplemented + success = respond Success + + resolveDefaultMetadata :: Path.Absolute -> Action' m v [String] + resolveDefaultMetadata path = do + let superpaths = Path.ancestors path + xs <- for + superpaths + (\path -> do + mayNames <- + eval . ConfigLookup @[String] $ configKey "DefaultMetadata" path + pure . join $ toList mayNames + ) + pure . join $ toList xs + + configKey k p = + Text.intercalate "." . toList $ k :<| fmap + NameSegment.toText + (Path.toSeq $ Path.unabsolute p) + + -- Takes a maybe (namespace address triple); returns it as-is if `Just`; + -- otherwise, tries to load a value from .unisonConfig, and complains + -- if needed. + resolveConfiguredGitUrl + :: PushPull + -> Path' + -> Maybe RemoteNamespace + -> ExceptT (Output v) (Action' m v) RemoteNamespace + resolveConfiguredGitUrl pushPull destPath' = \case + Just ns -> pure ns + Nothing -> ExceptT do + let destPath = resolveToAbsolute destPath' + let configKey = gitUrlKey destPath + (eval . ConfigLookup) configKey >>= \case + Just url -> + case P.parse UriParser.repoPath (Text.unpack configKey) url of + Left e -> + pure . Left $ + ConfiguredGitUrlParseError pushPull destPath' url (show e) + Right (repo, Just sbh, remotePath) -> + pure . Left $ + ConfiguredGitUrlIncludesShortBranchHash pushPull repo sbh remotePath + Right ns -> + pure . Right $ ns + Nothing -> + pure . Left $ NoConfiguredGitUrl pushPull destPath' + + gitUrlKey = configKey "GitUrl" + + case e of + Right input -> lastInput .= Just input + _ -> pure () + +-- todo: compare to `getHQTerms` / `getHQTypes`. Is one universally better? +resolveHQToLabeledDependencies :: Functor m => HQ.HashQualified -> Action' m v (Set LabeledDependency) +resolveHQToLabeledDependencies = \case + HQ.NameOnly n -> do + parseNames <- Names3.suffixify0 <$> basicParseNames0 + let terms, types :: Set LabeledDependency + terms = Set.map LD.referent . R.lookupDom n $ Names3.terms0 parseNames + types = Set.map LD.typeRef . R.lookupDom n $ Names3.types0 parseNames + pure $ terms <> types + -- rationale: the hash should be unique enough that the name never helps + HQ.HashQualified _n sh -> resolveHashOnly sh + HQ.HashOnly sh -> resolveHashOnly sh + where + resolveHashOnly sh = do + terms <- eval $ TermReferentsByShortHash sh + types <- eval $ TypeReferencesByShortHash sh + pure $ Set.map LD.referent terms <> Set.map LD.typeRef types + +doDisplay :: Var v => OutputLocation -> Names -> Referent -> Action' m v () +doDisplay outputLoc names r = do + let tm = Term.fromReferent External r + ppe <- prettyPrintEnvDecl names + latestFile' <- use latestFile + let + loc = case outputLoc of + ConsoleLocation -> Nothing + FileLocation path -> Just path + LatestFileLocation -> fmap fst latestFile' <|> Just "scratch.u" + evalTerm r = fmap ErrorUtil.hush . eval $ + Evaluate1 (PPE.suffixifiedPPE ppe) (Term.ref External r) + loadTerm (Reference.DerivedId r) = eval $ LoadTerm r + loadTerm _ = pure Nothing + loadDecl (Reference.DerivedId r) = eval $ LoadType r + loadDecl _ = pure Nothing + rendered <- DisplayValues.displayTerm ppe loadTerm loadTypeOfTerm evalTerm loadDecl tm + respond $ DisplayRendered loc rendered + +getLinks :: (Var v, Monad m) + => Input + -> Path.HQSplit' + -> Either (Set Reference) (Maybe String) + -> ExceptT (Output v) + (Action' m v) + (PPE.PrettyPrintEnv, + -- e.g. ("Foo.doc", #foodoc, Just (#builtin.Doc) + [(HQ.HashQualified, Reference, Maybe (Type v Ann))]) +getLinks input src mdTypeStr = ExceptT $ do + let go = fmap Right . getLinks' src + case mdTypeStr of + Left s -> go (Just s) + Right Nothing -> go Nothing + Right (Just mdTypeStr) -> parseType input mdTypeStr >>= \case + Left e -> pure $ Left e + Right typ -> go . Just . Set.singleton $ Type.toReference typ + +getLinks' :: (Var v, Monad m) + => Path.HQSplit' -- definition to print metadata of + -> Maybe (Set Reference) -- return all metadata if empty + -> Action' m v (PPE.PrettyPrintEnv, + -- e.g. ("Foo.doc", #foodoc, Just (#builtin.Doc) + [(HQ.HashQualified, Reference, Maybe (Type v Ann))]) +getLinks' src selection0 = do + root0 <- Branch.head <$> use root + currentPath' <- use currentPath + let resolveSplit' = Path.fromAbsoluteSplit . Path.toAbsoluteSplit currentPath' + p = resolveSplit' src -- ex: the (parent,hqsegment) of `List.map` - `List` + -- all metadata (type+value) associated with name `src` + allMd = R4.d34 (BranchUtil.getTermMetadataHQNamed p root0) + <> R4.d34 (BranchUtil.getTypeMetadataHQNamed p root0) + allMd' = maybe allMd (`R.restrictDom` allMd) selection0 + -- then list the values after filtering by type + allRefs :: Set Reference = R.ran allMd' + sigs <- for (toList allRefs) (loadTypeOfTerm . Referent.Ref) + let deps = Set.map LD.termRef allRefs <> + Set.unions [ Set.map LD.typeRef . Type.dependencies $ t | Just t <- sigs ] + ppe <- prettyPrintEnvDecl =<< makePrintNamesFromLabeled' deps + let ppeDecl = PPE.unsuffixifiedPPE ppe + let sortedSigs = sortOn snd (toList allRefs `zip` sigs) + let out = [(PPE.termName ppeDecl (Referent.Ref r), r, t) | (r, t) <- sortedSigs ] + pure (PPE.suffixifiedPPE ppe, out) + +resolveShortBranchHash :: + ShortBranchHash -> ExceptT (Output v) (Action' m v) (Branch m) +resolveShortBranchHash hash = ExceptT do + hashSet <- eval $ BranchHashesByPrefix hash + len <- eval BranchHashLength + case Set.toList hashSet of + [] -> pure . Left $ NoBranchWithHash hash + [h] -> fmap Right . eval $ LoadLocalBranch h + _ -> pure . Left $ BranchHashAmbiguous hash (Set.map (SBH.fromHash len) hashSet) + +-- Returns True if the operation changed the namespace, False otherwise. +propagatePatchNoSync + :: (Monad m, Var v) + => Patch + -> Path.Absolute + -> Action' m v Bool +propagatePatchNoSync patch scopePath = stepAtMNoSync' + (Path.unabsolute scopePath, lift . lift . Propagate.propagateAndApply patch) + +-- Returns True if the operation changed the namespace, False otherwise. +propagatePatch :: (Monad m, Var v) => + InputDescription -> Patch -> Path.Absolute -> Action' m v Bool +propagatePatch inputDescription patch scopePath = + stepAtM' (inputDescription <> " (applying patch)") + (Path.unabsolute scopePath, + lift . lift . Propagate.propagateAndApply patch) + +-- | Create the args needed for showTodoOutput and call it +doShowTodoOutput :: Monad m => Patch -> Path.Absolute -> Action' m v () +doShowTodoOutput patch scopePath = do + scope <- getAt scopePath + let names0 = Branch.toNames0 (Branch.head scope) + -- only needs the local references to check for obsolete defs + let getPpe = do + names <- makePrintNamesFromLabeled' (Patch.labeledDependencies patch) + prettyPrintEnvDecl names + showTodoOutput getPpe patch names0 + +-- | Show todo output if there are any conflicts or edits. +showTodoOutput + :: Action' m v PPE.PrettyPrintEnvDecl + -- ^ Action that fetches the pretty print env. It's expensive because it + -- involves looking up historical names, so only call it if necessary. + -> Patch + -> Names0 + -> Action' m v () +showTodoOutput getPpe patch names0 = do + todo <- checkTodo patch names0 + if TO.noConflicts todo && TO.noEdits todo + then respond NoConflictsOrEdits + else do + numberedArgs .= + (Text.unpack . Reference.toText . view _2 <$> + fst (TO.todoFrontierDependents todo)) + ppe <- getPpe + respond $ TodoOutput ppe todo + +checkTodo :: Patch -> Names0 -> Action m i v (TO.TodoOutput v Ann) +checkTodo patch names0 = do + f <- computeFrontier (eval . GetDependents) patch names0 + let dirty = R.dom f + frontier = R.ran f + (frontierTerms, frontierTypes) <- loadDisplayInfo frontier + (dirtyTerms, dirtyTypes) <- loadDisplayInfo dirty + -- todo: something more intelligent here? + let scoreFn = const 1 + remainingTransitive <- + frontierTransitiveDependents (eval . GetDependents) names0 frontier + let + scoredDirtyTerms = + List.sortOn (view _1) [ (scoreFn r, r, t) | (r,t) <- dirtyTerms ] + scoredDirtyTypes = + List.sortOn (view _1) [ (scoreFn r, r, t) | (r,t) <- dirtyTypes ] + pure $ + TO.TodoOutput + (Set.size remainingTransitive) + (frontierTerms, frontierTypes) + (scoredDirtyTerms, scoredDirtyTypes) + (Names.conflicts names0) + (Patch.conflicts patch) + where + frontierTransitiveDependents :: + Monad m => (Reference -> m (Set Reference)) -> Names0 -> Set Reference -> m (Set Reference) + frontierTransitiveDependents dependents names0 rs = do + let branchDependents r = Set.filter (Names.contains names0) <$> dependents r + tdeps <- transitiveClosure branchDependents rs + -- we don't want the frontier in the result + pure $ tdeps `Set.difference` rs + +-- (d, f) when d is "dirty" (needs update), +-- f is in the frontier (an edited dependency of d), +-- and d depends on f +-- a ⋖ b = a depends directly on b +-- dirty(d) ∧ frontier(f) <=> not(edited(d)) ∧ edited(f) ∧ d ⋖ f +-- +-- The range of this relation is the frontier, and the domain is +-- the set of dirty references. +computeFrontier :: forall m . Monad m + => (Reference -> m (Set Reference)) -- eg Codebase.dependents codebase + -> Patch + -> Names0 + -> m (R.Relation Reference Reference) +computeFrontier getDependents patch names = let + edited :: Set Reference + edited = R.dom (Patch._termEdits patch) <> R.dom (Patch._typeEdits patch) + addDependents :: R.Relation Reference Reference -> Reference -> m (R.Relation Reference Reference) + addDependents dependents ref = + (\ds -> R.insertManyDom ds ref dependents) . Set.filter (Names.contains names) + <$> getDependents ref + in do + -- (r,r2) ∈ dependsOn if r depends on r2 + dependsOn <- foldM addDependents R.empty edited + -- Dirty is everything that `dependsOn` Frontier, minus already edited defns + pure $ R.filterDom (not . flip Set.member edited) dependsOn + +eval :: Command m i v a -> Action m i v a +eval = lift . lift . Free.eval + +confirmedCommand :: Input -> Action m i v Bool +confirmedCommand i = do + i0 <- use lastInput + pure $ Just i == i0 + +listBranch :: Branch0 m -> [SearchResult] +listBranch (Branch.toNames0 -> b) = + List.sortOn (\s -> (SR.name s, s)) (SR.fromNames b) + +-- | restores the full hash to these search results, for _numberedArgs purposes +searchResultToHQString :: SearchResult -> String +searchResultToHQString = \case + SR.Tm' n r _ -> HQ'.toString $ HQ'.requalify n r + SR.Tp' n r _ -> HQ'.toString $ HQ'.requalify n (Referent.Ref r) + _ -> error "unpossible match failure" + +-- Return a list of definitions whose names fuzzy match the given queries. +fuzzyNameDistance :: Name -> Name -> Maybe Int +fuzzyNameDistance (Name.toString -> q) (Name.toString -> n) = + Find.simpleFuzzyScore q n + +-- return `name` and `name....` +_searchBranchPrefix :: Branch m -> Name -> [SearchResult] +_searchBranchPrefix b n = case Path.unsnoc (Path.fromName n) of + Nothing -> [] + Just (init, last) -> case Branch.getAt init b of + Nothing -> [] + Just b -> SR.fromNames . Names.prefix0 n $ names0 + where + lastName = Path.toName (Path.singleton last) + subnames = Branch.toNames0 . Branch.head $ + Branch.getAt' (Path.singleton last) b + rootnames = + Names.filter (== lastName) . + Branch.toNames0 . set Branch.children mempty $ Branch.head b + names0 = rootnames <> Names.prefix0 lastName subnames + +searchResultsFor :: Names0 -> [Referent] -> [Reference] -> [SearchResult] +searchResultsFor ns terms types = + [ SR.termSearchResult ns name ref + | ref <- terms + , name <- toList (Names.namesForReferent ns ref) + ] <> + [ SR.typeSearchResult ns name ref + | ref <- types + , name <- toList (Names.namesForReference ns ref) + ] + +searchBranchScored :: forall score. (Ord score) + => Names0 + -> (Name -> Name -> Maybe score) + -> [HQ.HashQualified] + -> [SearchResult] +searchBranchScored names0 score queries = + nubOrd . fmap snd . toList $ searchTermNamespace <> searchTypeNamespace + where + searchTermNamespace = foldMap do1query queries + where + do1query :: HQ.HashQualified -> Set (Maybe score, SearchResult) + do1query q = foldMap (score1hq q) (R.toList . Names.terms $ names0) + score1hq :: HQ.HashQualified -> (Name, Referent) -> Set (Maybe score, SearchResult) + score1hq query (name, ref) = case query of + HQ.NameOnly qn -> + pair qn + HQ.HashQualified qn h | h `SH.isPrefixOf` Referent.toShortHash ref -> + pair qn + HQ.HashOnly h | h `SH.isPrefixOf` Referent.toShortHash ref -> + Set.singleton (Nothing, result) + _ -> mempty + where + result = SR.termSearchResult names0 name ref + pair qn = case score qn name of + Just score -> Set.singleton (Just score, result) + Nothing -> mempty + searchTypeNamespace = foldMap do1query queries + where + do1query :: HQ.HashQualified -> Set (Maybe score, SearchResult) + do1query q = foldMap (score1hq q) (R.toList . Names.types $ names0) + score1hq :: HQ.HashQualified -> (Name, Reference) -> Set (Maybe score, SearchResult) + score1hq query (name, ref) = case query of + HQ.NameOnly qn -> + pair qn + HQ.HashQualified qn h | h `SH.isPrefixOf` Reference.toShortHash ref -> + pair qn + HQ.HashOnly h | h `SH.isPrefixOf` Reference.toShortHash ref -> + Set.singleton (Nothing, result) + _ -> mempty + where + result = SR.typeSearchResult names0 name ref + pair qn = case score qn name of + Just score -> Set.singleton (Just score, result) + Nothing -> mempty + +-- Separates type references from term references and returns types and terms, +-- respectively. For terms that are constructors, turns them into their data +-- types. +collateReferences + :: Foldable f + => Foldable g + => f Reference -- types requested + -> g Referent -- terms requested, including ctors + -> (Set Reference, Set Reference) +collateReferences (toList -> types) (toList -> terms) = + let terms' = [ r | Referent.Ref r <- terms ] + types' = [ r | Referent.Con r _ _ <- terms ] + in (Set.fromList types' <> Set.fromList types, Set.fromList terms') + +-- | The output list (of lists) corresponds to the query list. +searchBranchExact :: Int -> Names -> [HQ.HashQualified] -> [[SearchResult]] +searchBranchExact len names queries = let + searchTypes :: HQ.HashQualified -> [SearchResult] + searchTypes query = + -- a bunch of references will match a HQ ref. + let refs = toList $ Names3.lookupHQType query names in + refs <&> \r -> + let hqNames = Names3.typeName len r names in + let primaryName = + last . sortOn (\n -> HQ.matchesNamedReference (HQ'.toName n) r query) + $ toList hqNames in + let aliases = Set.delete primaryName hqNames in + SR.typeResult primaryName r aliases + searchTerms :: HQ.HashQualified -> [SearchResult] + searchTerms query = + -- a bunch of references will match a HQ ref. + let refs = toList $ Names3.lookupHQTerm query names in + refs <&> \r -> + let hqNames = Names3.termName len r names in + let primaryName = + last . sortOn (\n -> HQ.matchesNamedReferent (HQ'.toName n) r query) + $ toList hqNames in + let aliases = Set.delete primaryName hqNames in + SR.termResult primaryName r aliases + in [ searchTypes q <> searchTerms q | q <- queries ] + +respond :: Output v -> Action m i v () +respond output = eval $ Notify output + +respondNumbered :: NumberedOutput v -> Action m i v () +respondNumbered output = do + args <- eval $ NotifyNumbered output + unless (null args) $ + numberedArgs .= toList args + +unlessError :: ExceptT (Output v) (Action' m v) () -> Action' m v () +unlessError ma = runExceptT ma >>= either (eval . Notify) pure + +unlessError' :: (e -> Output v) -> ExceptT e (Action' m v) () -> Action' m v () +unlessError' f ma = unlessError $ withExceptT f ma + +-- | supply `dest0` if you want to print diff messages +-- supply unchangedMessage if you want to display it if merge had no effect +mergeBranchAndPropagateDefaultPatch :: (Monad m, Var v) => Branch.MergeMode -> + InputDescription -> Maybe (Output v) -> Branch m -> Maybe Path.Path' -> Path.Absolute -> Action' m v () +mergeBranchAndPropagateDefaultPatch mode inputDescription unchangedMessage srcb dest0 dest = + ifM (mergeBranch mode inputDescription srcb dest0 dest) + (loadPropagateDiffDefaultPatch inputDescription dest0 dest) + (for_ unchangedMessage respond) + where + mergeBranch :: (Monad m, Var v) => + Branch.MergeMode -> InputDescription -> Branch m -> Maybe Path.Path' -> Path.Absolute -> Action' m v Bool + mergeBranch mode inputDescription srcb dest0 dest = unsafeTime "Merge Branch" $ do + destb <- getAt dest + merged <- eval . Eval $ Branch.merge' mode srcb destb + b <- updateAtM inputDescription dest (const $ pure merged) + for_ dest0 $ \dest0 -> + diffHelper (Branch.head destb) (Branch.head merged) >>= + respondNumbered . uncurry (ShowDiffAfterMerge dest0 dest) + pure b + +loadPropagateDiffDefaultPatch :: (Monad m, Var v) => + InputDescription -> Maybe Path.Path' -> Path.Absolute -> Action' m v () +loadPropagateDiffDefaultPatch inputDescription dest0 dest = unsafeTime "Propagate Default Patch" $ do + original <- getAt dest + patch <- eval . Eval $ Branch.getPatch defaultPatchNameSegment (Branch.head original) + patchDidChange <- propagatePatch inputDescription patch dest + when patchDidChange . for_ dest0 $ \dest0 -> do + patched <- getAt dest + let patchPath = snoc dest0 defaultPatchNameSegment + diffHelper (Branch.head original) (Branch.head patched) >>= + respondNumbered . uncurry (ShowDiffAfterMergePropagate dest0 dest patchPath) + +getAt :: Functor m => Path.Absolute -> Action m i v (Branch m) +getAt (Path.Absolute p) = + use root <&> fromMaybe Branch.empty . Branch.getAt p + +-- Update a branch at the given path, returning `True` if +-- an update occurred and false otherwise +updateAtM :: Applicative m + => InputDescription + -> Path.Absolute + -> (Branch m -> Action m i v (Branch m)) + -> Action m i v Bool +updateAtM reason (Path.Absolute p) f = do + b <- use lastSavedRoot + b' <- Branch.modifyAtM p f b + updateRoot b' reason + pure $ b /= b' + +stepAt + :: forall m i v + . Monad m + => InputDescription + -> (Path, Branch0 m -> Branch0 m) + -> Action m i v () +stepAt cause = stepManyAt @m @[] cause . pure + +stepAtNoSync :: forall m i v. Monad m + => (Path, Branch0 m -> Branch0 m) + -> Action m i v () +stepAtNoSync = stepManyAtNoSync @m @[] . pure + +stepAtM :: forall m i v. Monad m + => InputDescription + -> (Path, Branch0 m -> m (Branch0 m)) + -> Action m i v () +stepAtM cause = stepManyAtM @m @[] cause . pure + +stepAtM' + :: forall m i v + . Monad m + => InputDescription + -> (Path, Branch0 m -> Action m i v (Branch0 m)) + -> Action m i v Bool +stepAtM' cause = stepManyAtM' @m @[] cause . pure + +stepAtMNoSync' + :: forall m i v + . Monad m + => (Path, Branch0 m -> Action m i v (Branch0 m)) + -> Action m i v Bool +stepAtMNoSync' = stepManyAtMNoSync' @m @[] . pure + +stepManyAt + :: (Monad m, Foldable f) + => InputDescription + -> f (Path, Branch0 m -> Branch0 m) + -> Action m i v () +stepManyAt reason actions = do + stepManyAtNoSync actions + b <- use root + updateRoot b reason + +-- Like stepManyAt, but doesn't update the root +stepManyAtNoSync + :: (Monad m, Foldable f) + => f (Path, Branch0 m -> Branch0 m) + -> Action m i v () +stepManyAtNoSync actions = do + b <- use root + let new = Branch.stepManyAt actions b + root .= new + +stepManyAtM :: (Monad m, Foldable f) + => InputDescription + -> f (Path, Branch0 m -> m (Branch0 m)) + -> Action m i v () +stepManyAtM reason actions = do + stepManyAtMNoSync actions + b <- use root + updateRoot b reason + +stepManyAtMNoSync :: (Monad m, Foldable f) + => f (Path, Branch0 m -> m (Branch0 m)) + -> Action m i v () +stepManyAtMNoSync actions = do + b <- use root + b' <- eval . Eval $ Branch.stepManyAtM actions b + root .= b' + +stepManyAtM' :: (Monad m, Foldable f) + => InputDescription + -> f (Path, Branch0 m -> Action m i v (Branch0 m)) + -> Action m i v Bool +stepManyAtM' reason actions = do + b <- use root + b' <- Branch.stepManyAtM actions b + updateRoot b' reason + pure (b /= b') + +stepManyAtMNoSync' :: (Monad m, Foldable f) + => f (Path, Branch0 m -> Action m i v (Branch0 m)) + -> Action m i v Bool +stepManyAtMNoSync' actions = do + b <- use root + b' <- Branch.stepManyAtM actions b + root .= b' + pure (b /= b') + +updateRoot :: Branch m -> InputDescription -> Action m i v () +updateRoot new reason = do + old <- use lastSavedRoot + when (old /= new) $ do + root .= new + eval $ SyncLocalRootBranch new + eval $ AppendToReflog reason old new + lastSavedRoot .= new + +-- cata for 0, 1, or more elements of a Foldable +-- tries to match as lazily as possible +zeroOneOrMore :: Foldable f => f a -> b -> (a -> b) -> (f a -> b) -> b +zeroOneOrMore f zero one more = case toList f of + _ : _ : _ -> more f + a : _ -> one a + _ -> zero + +-- Goal: If `remaining = root - toBeDeleted` contains definitions X which +-- depend on definitions Y not in `remaining` (which should also be in +-- `toBeDeleted`), then complain by returning (Y, X). +getEndangeredDependents :: forall m. Monad m + => (Reference -> m (Set Reference)) + -> Names0 + -> Names0 + -> m (Names0, Names0) +getEndangeredDependents getDependents toDelete root = do + let remaining = root `Names.difference` toDelete + toDelete', remaining', extinct :: Set Reference + toDelete' = Names.allReferences toDelete + remaining' = Names.allReferences remaining -- left over after delete + extinct = toDelete' `Set.difference` remaining' -- deleting and not left over + accumulateDependents m r = getDependents r <&> \ds -> Map.insert r ds m + dependentsOfExtinct :: Map Reference (Set Reference) <- + foldM accumulateDependents mempty extinct + let orphaned, endangered, failed :: Set Reference + orphaned = fold dependentsOfExtinct + endangered = orphaned `Set.intersection` remaining' + failed = Set.filter hasEndangeredDependent extinct + hasEndangeredDependent r = any (`Set.member` endangered) + (dependentsOfExtinct Map.! r) + pure ( Names.restrictReferences failed toDelete + , Names.restrictReferences endangered root `Names.difference` toDelete) + +-- Applies the selection filter to the adds/updates of a slurp result, +-- meaning that adds/updates should only contain the selection or its transitive +-- dependencies, any unselected transitive dependencies of the selection will +-- be added to `extraDefinitions`. +applySelection :: forall v a. Var v => + [HQ'.HashQualified] -> UF.TypecheckedUnisonFile v a -> SlurpResult v -> SlurpResult v +applySelection [] _ = id +applySelection hqs file = \sr@SlurpResult{..} -> + sr { adds = adds `SC.intersection` closed + , updates = updates `SC.intersection` closed + , extraDefinitions = closed `SC.difference` selection + } + where + selectedNames0 = + Names.filterByHQs (Set.fromList hqs) (UF.typecheckedToNames0 file) + selection, closed :: SlurpComponent v + selection = SlurpComponent selectedTypes selectedTerms + closed = SC.closeWithDependencies file selection + selectedTypes, selectedTerms :: Set v + selectedTypes = Set.map var $ R.dom (Names.types selectedNames0) + selectedTerms = Set.map var $ R.dom (Names.terms selectedNames0) + +var :: Var v => Name -> v +var name = Var.named (Name.toText name) + +toSlurpResult + :: forall v + . Var v + => Path.Absolute + -> UF.TypecheckedUnisonFile v Ann + -> Names0 + -> SlurpResult v +toSlurpResult currentPath uf existingNames = + Slurp.subtractComponent (conflicts <> ctorCollisions) $ SlurpResult + uf + mempty + adds + dups + mempty + conflicts + updates + termCtorCollisions + ctorTermCollisions + termAliases + typeAliases + mempty + where + fileNames0 = UF.typecheckedToNames0 uf + + sc :: R.Relation Name Referent -> R.Relation Name Reference -> SlurpComponent v + sc terms types = SlurpComponent { terms = Set.map var (R.dom terms) + , types = Set.map var (R.dom types) } + + -- conflict (n,r) if n is conflicted in names0 + conflicts :: SlurpComponent v + conflicts = sc terms types where + terms = R.filterDom (conflicted . Names.termsNamed existingNames) + (Names.terms fileNames0) + types = R.filterDom (conflicted . Names.typesNamed existingNames) + (Names.types fileNames0) + conflicted s = Set.size s > 1 + + ctorCollisions :: SlurpComponent v + ctorCollisions = + mempty { SC.terms = termCtorCollisions <> ctorTermCollisions } + + -- termCtorCollision (n,r) if (n, r' /= r) exists in existingNames and + -- r is Ref and r' is Con + termCtorCollisions :: Set v + termCtorCollisions = Set.fromList + [ var n + | (n, Referent.Ref{}) <- R.toList (Names.terms fileNames0) + , [r@Referent.Con{}] <- [toList $ Names.termsNamed existingNames n] + -- ignore collisions w/ ctors of types being updated + , Set.notMember (Referent.toReference r) typesToUpdate + ] + + -- the set of typerefs that are being updated by this file + typesToUpdate :: Set Reference + typesToUpdate = Set.fromList + [ r + | (n, r') <- R.toList (Names.types fileNames0) + , r <- toList (Names.typesNamed existingNames n) + , r /= r' + ] + + -- ctorTermCollisions (n,r) if (n, r' /= r) exists in names0 and r is Con + -- and r' is Ref except we relaxed it to where r' can be Con or Ref + -- what if (n,r) and (n,r' /= r) exists in names and r, r' are Con + ctorTermCollisions :: Set v + ctorTermCollisions = Set.fromList + [ var n + | (n, Referent.Con{}) <- R.toList (Names.terms fileNames0) + , r <- toList $ Names.termsNamed existingNames n + -- ignore collisions w/ ctors of types being updated + , Set.notMember (Referent.toReference r) typesToUpdate + , Set.notMember (var n) (terms dups) + ] + + -- duplicate (n,r) if (n,r) exists in names0 + dups :: SlurpComponent v + dups = sc terms types where + terms = R.intersection (Names.terms existingNames) (Names.terms fileNames0) + types = R.intersection (Names.types existingNames) (Names.types fileNames0) + + -- update (n,r) if (n,r' /= r) exists in existingNames and r, r' are Ref + updates :: SlurpComponent v + updates = SlurpComponent (Set.fromList types) (Set.fromList terms) where + terms = + [ var n + | (n, r'@Referent.Ref{}) <- R.toList (Names.terms fileNames0) + , [r@Referent.Ref{}] <- [toList $ Names.termsNamed existingNames n] + , r' /= r + ] + types = + [ var n + | (n, r') <- R.toList (Names.types fileNames0) + , [r] <- [toList $ Names.typesNamed existingNames n] + , r' /= r + ] + + buildAliases + :: R.Relation Name Referent + -> R.Relation Name Referent + -> Set v + -> Map v Slurp.Aliases + buildAliases existingNames namesFromFile duplicates = Map.fromList + [ ( var n + , if null aliasesOfOld + then Slurp.AddAliases aliasesOfNew + else Slurp.UpdateAliases aliasesOfOld aliasesOfNew + ) + | (n, r@Referent.Ref{}) <- R.toList namesFromFile + -- All the refs whose names include `n`, and are not `r` + , let + refs = Set.delete r $ R.lookupDom n existingNames + aliasesOfNew = + Set.map (Path.unprefixName currentPath) . Set.delete n $ + R.lookupRan r existingNames + aliasesOfOld = + Set.map (Path.unprefixName currentPath) . Set.delete n . R.dom $ + R.restrictRan existingNames refs + , not (null aliasesOfNew && null aliasesOfOld) + , Set.notMember (var n) duplicates + ] + + termAliases :: Map v Slurp.Aliases + termAliases = buildAliases (Names.terms existingNames) + (Names.terms fileNames0) + (SC.terms dups) + + typeAliases :: Map v Slurp.Aliases + typeAliases = buildAliases (R.mapRan Referent.Ref $ Names.types existingNames) + (R.mapRan Referent.Ref $ Names.types fileNames0) + (SC.types dups) + + -- (n,r) is in `adds` if n isn't in existingNames + adds = sc terms types where + terms = addTerms (Names.terms existingNames) (Names.terms fileNames0) + types = addTypes (Names.types existingNames) (Names.types fileNames0) + addTerms existingNames = R.filter go where + go (n, Referent.Ref{}) = (not . R.memberDom n) existingNames + go _ = False + addTypes existingNames = R.filter go where + go (n, _) = (not . R.memberDom n) existingNames + +filterBySlurpResult :: Ord v + => SlurpResult v + -> UF.TypecheckedUnisonFile v Ann + -> UF.TypecheckedUnisonFile v Ann +filterBySlurpResult SlurpResult{..} + (UF.TypecheckedUnisonFileId + dataDeclarations' + effectDeclarations' + topLevelComponents' + watchComponents + hashTerms) = + UF.TypecheckedUnisonFileId datas effects tlcs watches hashTerms' + where + keep = updates <> adds + keepTerms = SC.terms keep + keepTypes = SC.types keep + hashTerms' = Map.restrictKeys hashTerms keepTerms + datas = Map.restrictKeys dataDeclarations' keepTypes + effects = Map.restrictKeys effectDeclarations' keepTypes + tlcs = filter (not.null) $ fmap (List.filter filterTLC) topLevelComponents' + watches = filter (not.null.snd) $ fmap (second (List.filter filterTLC)) watchComponents + filterTLC (v,_,_) = Set.member v keepTerms + +-- updates the namespace for adding `slurp` +doSlurpAdds :: forall m v. (Monad m, Var v) + => SlurpComponent v + -> UF.TypecheckedUnisonFile v Ann + -> (Branch0 m -> Branch0 m) +doSlurpAdds slurp uf = Branch.stepManyAt0 (typeActions <> termActions) + where + typeActions = map doType . toList $ SC.types slurp + termActions = map doTerm . toList $ + SC.terms slurp <> Slurp.constructorsFor (SC.types slurp) uf + names = UF.typecheckedToNames0 uf + tests = Set.fromList $ fst <$> UF.watchesOfKind UF.TestWatch (UF.discardTypes uf) + (isTestType, isTestValue) = isTest + md v = + if Set.member v tests then Metadata.singleton isTestType isTestValue + else Metadata.empty + doTerm :: v -> (Path, Branch0 m -> Branch0 m) + doTerm v = case toList (Names.termsNamed names (Name.fromVar v)) of + [] -> errorMissingVar v + [r] -> case Path.splitFromName (Name.fromVar v) of + Nothing -> errorEmptyVar + Just split -> BranchUtil.makeAddTermName split r (md v) + wha -> error $ "Unison bug, typechecked file w/ multiple terms named " + <> Var.nameStr v <> ": " <> show wha + doType :: v -> (Path, Branch0 m -> Branch0 m) + doType v = case toList (Names.typesNamed names (Name.fromVar v)) of + [] -> errorMissingVar v + [r] -> case Path.splitFromName (Name.fromVar v) of + Nothing -> errorEmptyVar + Just split -> BranchUtil.makeAddTypeName split r Metadata.empty + wha -> error $ "Unison bug, typechecked file w/ multiple types named " + <> Var.nameStr v <> ": " <> show wha + errorEmptyVar = error "encountered an empty var name" + errorMissingVar v = error $ "expected to find " ++ show v ++ " in " ++ show uf + +doSlurpUpdates :: Monad m + => Map Name (Reference, Reference) + -> Map Name (Reference, Reference) + -> [(Name, Referent)] + -> (Branch0 m -> Branch0 m) +doSlurpUpdates typeEdits termEdits deprecated b0 = + Branch.stepManyAt0 (typeActions <> termActions <> deprecateActions) b0 + where + typeActions = join . map doType . Map.toList $ typeEdits + termActions = join . map doTerm . Map.toList $ termEdits + deprecateActions = join . map doDeprecate $ deprecated where + doDeprecate (n, r) = case Path.splitFromName n of + Nothing -> errorEmptyVar + Just split -> [BranchUtil.makeDeleteTermName split r] + + -- we copy over the metadata on the old thing + -- todo: if the thing being updated, m, is metadata for something x in b0 + -- update x's md to reference `m` + doType, doTerm :: + (Name, (Reference, Reference)) -> [(Path, Branch0 m -> Branch0 m)] + doType (n, (old, new)) = case Path.splitFromName n of + Nothing -> errorEmptyVar + Just split -> [ BranchUtil.makeDeleteTypeName split old + , BranchUtil.makeAddTypeName split new oldMd ] + where + oldMd = BranchUtil.getTypeMetadataAt split old b0 + doTerm (n, (old, new)) = case Path.splitFromName n of + Nothing -> errorEmptyVar + Just split -> [ BranchUtil.makeDeleteTermName split (Referent.Ref old) + , BranchUtil.makeAddTermName split (Referent.Ref new) oldMd ] + where + -- oldMd is the metadata linked to the old definition + -- we relink it to the new definition + oldMd = BranchUtil.getTermMetadataAt split (Referent.Ref old) b0 + errorEmptyVar = error "encountered an empty var name" + +loadSearchResults :: Ord v => [SR.SearchResult] -> Action m i v [SearchResult' v Ann] +loadSearchResults = traverse loadSearchResult + where + loadSearchResult = \case + SR.Tm (SR.TermResult name r aliases) -> do + typ <- loadReferentType r + pure $ SR'.Tm name typ r aliases + SR.Tp (SR.TypeResult name r aliases) -> do + dt <- loadTypeDisplayThing r + pure $ SR'.Tp name dt r aliases + +loadDisplayInfo :: + Set Reference -> Action m i v ([(Reference, Maybe (Type v Ann))] + ,[(Reference, DisplayThing (DD.Decl v Ann))]) +loadDisplayInfo refs = do + termRefs <- filterM (eval . IsTerm) (toList refs) + typeRefs <- filterM (eval . IsType) (toList refs) + terms <- forM termRefs $ \r -> (r,) <$> eval (LoadTypeOfTerm r) + types <- forM typeRefs $ \r -> (r,) <$> loadTypeDisplayThing r + pure (terms, types) + +loadReferentType :: Referent -> Action m i v (Maybe (Type v Ann)) +loadReferentType = \case + Referent.Ref r -> eval $ LoadTypeOfTerm r + Referent.Con r cid _ -> getTypeOfConstructor r cid + where + getTypeOfConstructor :: Reference -> Int -> Action m i v (Maybe (Type v Ann)) + getTypeOfConstructor (Reference.DerivedId r) cid = do + maybeDecl <- eval $ LoadType r + pure $ case maybeDecl of + Nothing -> Nothing + Just decl -> DD.typeOfConstructor (either DD.toDataDecl id decl) cid + getTypeOfConstructor r cid = + error $ "Don't know how to getTypeOfConstructor " ++ show r ++ " " ++ show cid + +loadTypeDisplayThing :: Reference -> Action m i v (DisplayThing (DD.Decl v Ann)) +loadTypeDisplayThing = \case + Reference.Builtin _ -> pure BuiltinThing + Reference.DerivedId id -> + maybe (MissingThing id) RegularThing <$> eval (LoadType id) + +lexedSource :: Monad m => SourceName -> Source -> Action' m v (Names, LexedSource) +lexedSource name src = do + let tokens = L.lexer (Text.unpack name) (Text.unpack src) + getHQ = \case + L.Backticks s (Just sh) -> Just (HQ.HashQualified (Name.unsafeFromString s) sh) + L.WordyId s (Just sh) -> Just (HQ.HashQualified (Name.unsafeFromString s) sh) + L.SymbolyId s (Just sh) -> Just (HQ.HashQualified (Name.unsafeFromString s) sh) + L.Hash sh -> Just (HQ.HashOnly sh) + _ -> Nothing + hqs = Set.fromList . mapMaybe (getHQ . L.payload) $ tokens + parseNames <- makeHistoricalParsingNames hqs + pure (parseNames, (src, tokens)) + +prettyPrintEnv :: Names -> Action' m v PPE.PrettyPrintEnv +prettyPrintEnv ns = eval CodebaseHashLength <&> (`PPE.fromNames` ns) + +prettyPrintEnvDecl :: Names -> Action' m v PPE.PrettyPrintEnvDecl +prettyPrintEnvDecl ns = eval CodebaseHashLength <&> (`PPE.fromNamesDecl` ns) + +parseSearchType :: (Monad m, Var v) + => Input -> String -> Action' m v (Either (Output v) (Type v Ann)) +parseSearchType input typ = fmap Type.removeAllEffectVars <$> parseType input typ + +parseType :: (Monad m, Var v) + => Input -> String -> Action' m v (Either (Output v) (Type v Ann)) +parseType input src = do + -- `show Input` is the name of the "file" being lexed + (names0, lexed) <- lexedSource (Text.pack $ show input) (Text.pack src) + parseNames <- Names3.suffixify0 <$> basicParseNames0 + let names = Names3.push (Names3.currentNames names0) + (Names3.Names parseNames (Names3.oldNames names0)) + e <- eval $ ParseType names lexed + pure $ case e of + Left err -> Left $ TypeParseError src err + Right typ -> case Type.bindNames mempty (Names3.currentNames names) + $ Type.generalizeLowercase mempty typ of + Left es -> Left $ ParseResolutionFailures src (toList es) + Right typ -> Right typ + +makeShadowedPrintNamesFromLabeled + :: Monad m => Set LabeledDependency -> Names0 -> Action' m v Names +makeShadowedPrintNamesFromLabeled deps shadowing = + Names3.shadowing shadowing <$> makePrintNamesFromLabeled' deps + +getTermsIncludingHistorical + :: Monad m => Path.HQSplit -> Branch0 m -> Action' m v (Set Referent) +getTermsIncludingHistorical (p, hq) b = case Set.toList refs of + [] -> case hq of + HQ'.HashQualified n hs -> do + names <- findHistoricalHQs + $ Set.fromList [HQ.HashQualified (Name.unsafeFromText (NameSegment.toText n)) hs] + pure . R.ran $ Names.terms names + _ -> pure Set.empty + _ -> pure refs + where refs = BranchUtil.getTerm (p, hq) b + +-- discards inputs that aren't hashqualified; +-- I'd enforce it with finer-grained types if we had them. +findHistoricalHQs :: Monad m => Set HQ.HashQualified -> Action' m v Names0 +findHistoricalHQs lexedHQs0 = do + root <- use root + currentPath <- use currentPath + let + -- omg this nightmare name-to-path parsing code is littered everywhere. + -- We need to refactor so that the absolute-ness of a name isn't represented + -- by magical text combinations. + -- Anyway, this function takes a name, tries to determine whether it is + -- relative or absolute, and tries to return the corresponding name that is + -- /relative/ to the root. + preprocess n = case Name.toString n of + -- some absolute name that isn't just "." + '.' : t@(_:_) -> Name.unsafeFromString t + -- something in current path + _ -> if Path.isRoot currentPath then n + else Name.joinDot (Path.toName . Path.unabsolute $ currentPath) n + + lexedHQs = Set.map (fmap preprocess) . Set.filter HQ.hasHash $ lexedHQs0 + (_missing, rawHistoricalNames) <- eval . Eval $ Branch.findHistoricalHQs lexedHQs root + pure rawHistoricalNames + +makeShadowedPrintNamesFromHQ :: Monad m => Set HQ.HashQualified -> Names0 -> Action' m v Names +makeShadowedPrintNamesFromHQ lexedHQs shadowing = do + rawHistoricalNames <- findHistoricalHQs lexedHQs + basicNames0 <- basicPrettyPrintNames0 + currentPath <- use currentPath + -- The basic names go into "current", but are shadowed by "shadowing". + -- They go again into "historical" as a hack that makes them available HQ-ed. + pure $ + Names3.shadowing + shadowing + (Names basicNames0 (fixupNamesRelative currentPath rawHistoricalNames)) + +makePrintNamesFromLabeled' + :: Monad m => Set LabeledDependency -> Action' m v Names +makePrintNamesFromLabeled' deps = do + root <- use root + currentPath <- use currentPath + (_missing, rawHistoricalNames) <- eval . Eval $ Branch.findHistoricalRefs + deps + root + basicNames0 <- basicPrettyPrintNames0 + pure $ Names basicNames0 (fixupNamesRelative currentPath rawHistoricalNames) + +-- Any absolute names in the input which have `currentPath` as a prefix +-- are converted to names relative to current path. All other names are +-- converted to absolute names. For example: +-- +-- e.g. if currentPath = .foo.bar +-- then name foo.bar.baz becomes baz +-- name cat.dog becomes .cat.dog +fixupNamesRelative :: Path.Absolute -> Names0 -> Names0 +fixupNamesRelative currentPath' = Names3.map0 fixName where + prefix = Path.toName (Path.unabsolute currentPath') + fixName n = if currentPath' == Path.absoluteEmpty then n else + fromMaybe (Name.makeAbsolute n) (Name.stripNamePrefix prefix n) + +makeHistoricalParsingNames :: + Monad m => Set HQ.HashQualified -> Action' m v Names +makeHistoricalParsingNames lexedHQs = do + rawHistoricalNames <- findHistoricalHQs lexedHQs + basicNames0 <- basicParseNames0 + currentPath <- use currentPath + pure $ Names basicNames0 + (Names3.makeAbsolute0 rawHistoricalNames <> + fixupNamesRelative currentPath rawHistoricalNames) + +basicParseNames0, basicPrettyPrintNames0, slurpResultNames0 :: Functor m => Action' m v Names0 +basicParseNames0 = fst <$> basicNames0' +basicPrettyPrintNames0 = snd <$> basicNames0' +-- we check the file against everything in the current path +slurpResultNames0 = currentPathNames0 + +currentPathNames0 :: Functor m => Action' m v Names0 +currentPathNames0 = do + currentPath' <- use currentPath + currentBranch' <- getAt currentPath' + pure $ Branch.toNames0 (Branch.head currentBranch') + +-- implementation detail of baseicParseNames0 and basicPrettyPrintNames0 +basicNames0' :: Functor m => Action' m v (Names0, Names0) +basicNames0' = do + root' <- use root + currentPath' <- use currentPath + currentBranch' <- getAt currentPath' + let root0 = Branch.head root' + absoluteRootNames0 = Names3.makeAbsolute0 (Branch.toNames0 root0) + currentBranch0 = Branch.head currentBranch' + currentPathNames0 = Branch.toNames0 currentBranch0 + -- all names, but with local names in their relative form only, rather + -- than absolute; external names appear as absolute + currentAndExternalNames0 = currentPathNames0 `Names3.unionLeft0` absDot externalNames where + absDot = Names.prefix0 (Name.unsafeFromText "") + externalNames = rootNames `Names.difference` pathPrefixed currentPathNames0 + rootNames = Branch.toNames0 root0 + pathPrefixed = case Path.unabsolute currentPath' of + Path.Path (toList -> []) -> id + p -> Names.prefix0 (Path.toName p) + -- parsing should respond to local and absolute names + parseNames00 = currentPathNames0 <> absoluteRootNames0 + -- pretty-printing should use local names where available + prettyPrintNames00 = currentAndExternalNames0 + pure (parseNames00, prettyPrintNames00) + +-- Given a typechecked file with a main function called `mainName` +-- of the type `'{IO} ()`, adds an extra binding which +-- forces the `main` function. +-- +-- If that function doesn't exist in the typechecked file, the +-- codebase is consulted. +addRunMain + :: (Monad m, Var v) + => String + -> Maybe (TypecheckedUnisonFile v Ann) + -> Action' m v (Maybe (TypecheckedUnisonFile v Ann)) +addRunMain mainName Nothing = do + parseNames0 <- basicParseNames0 + let loadTypeOfTerm ref = eval $ LoadTypeOfTerm ref + mainType <- eval RuntimeMain + mainToFile <$> + getMainTerm loadTypeOfTerm parseNames0 mainName mainType + where + mainToFile (MainTerm.NotAFunctionName _) = Nothing + mainToFile (MainTerm.NotFound _) = Nothing + mainToFile (MainTerm.BadType _) = Nothing + mainToFile (MainTerm.Success hq tm typ) = Just $ + let v = Var.named (HQ.toText hq) in + UF.typecheckedUnisonFile mempty mempty mempty [("main",[(v, tm, typ)])] -- mempty +addRunMain mainName (Just uf) = do + let components = join $ UF.topLevelComponents uf + let mainComponent = filter ((\v -> Var.nameStr v == mainName) . view _1) components + mainType <- eval RuntimeMain + case mainComponent of + [(v, tm, ty)] -> pure $ let + v2 = Var.freshIn (Set.fromList [v]) v + a = ABT.annotation tm + in + if Typechecker.isSubtype ty mainType then Just $ let + runMain = DD.forceTerm a a (Term.var a v) + in UF.typecheckedUnisonFile + (UF.dataDeclarationsId' uf) + (UF.effectDeclarationsId' uf) + (UF.topLevelComponents' uf) + (UF.watchComponents uf <> [("main", [(v2, runMain, mainType)])]) + else Nothing + _ -> addRunMain mainName Nothing + +executePPE + :: (Var v, Monad m) + => TypecheckedUnisonFile v a + -> Action' m v PPE.PrettyPrintEnv +executePPE unisonFile = + -- voodoo + prettyPrintEnv =<< + makeShadowedPrintNamesFromLabeled + (UF.termSignatureExternalLabeledDependencies unisonFile) + (UF.typecheckedToNames0 unisonFile) + +diffHelper :: Monad m + => Branch0 m + -> Branch0 m + -> Action' m v (PPE.PrettyPrintEnv, OBranchDiff.BranchDiffOutput v Ann) +diffHelper before after = do + hqLength <- eval CodebaseHashLength + diff <- eval . Eval $ BranchDiff.diff0 before after + names0 <- basicPrettyPrintNames0 + ppe <- PPE.suffixifiedPPE <$> prettyPrintEnvDecl (Names names0 mempty) + (ppe,) <$> + OBranchDiff.toOutput + loadTypeOfTerm + declOrBuiltin + hqLength + (Branch.toNames0 before) + (Branch.toNames0 after) + ppe + diff + +loadTypeOfTerm :: Referent -> Action m i v (Maybe (Type v Ann)) +loadTypeOfTerm (Referent.Ref r) = eval $ LoadTypeOfTerm r +loadTypeOfTerm (Referent.Con (Reference.DerivedId r) cid _) = do + decl <- eval $ LoadType r + case decl of + Just (either DD.toDataDecl id -> dd) -> pure $ DD.typeOfConstructor dd cid + Nothing -> pure Nothing +loadTypeOfTerm Referent.Con{} = error $ + reportBug "924628772" "Attempt to load a type declaration which is a builtin!" + +declOrBuiltin :: Reference -> Action m i v (Maybe (DD.DeclOrBuiltin v Ann)) +declOrBuiltin r = case r of + Reference.Builtin{} -> + pure . fmap DD.Builtin $ Map.lookup r Builtin.builtinConstructorType + Reference.DerivedId id -> + fmap DD.Decl <$> eval (LoadType id) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Input.hs b/parser-typechecker/src/Unison/Codebase/Editor/Input.hs new file mode 100644 index 0000000000..f25665d400 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/Input.hs @@ -0,0 +1,144 @@ +module Unison.Codebase.Editor.Input + ( Input(..) + , Event(..) + , OutputLocation(..) + , PatchPath + , BranchId, parseBranchId + , HashOrHQSplit' + ) where + +import Unison.Prelude + +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.HashQualified as HQ +import qualified Unison.HashQualified' as HQ' +import Unison.Codebase.Path ( Path' ) +import qualified Unison.Codebase.Path as Path +import Unison.Codebase.Editor.RemoteRepo +import Unison.ShortHash (ShortHash) +import Unison.Codebase.ShortBranchHash (ShortBranchHash) +import qualified Unison.Codebase.ShortBranchHash as SBH +import Unison.Codebase.SyncMode ( SyncMode ) +import qualified Data.Text as Text +import Unison.NameSegment ( NameSegment ) + +data Event + = UnisonFileChanged SourceName Source + | IncomingRootBranch (Set Branch.Hash) + +type Source = Text -- "id x = x\nconst a b = a" +type SourceName = Text -- "foo.u" or "buffer 7" +type PatchPath = Path.Split' +type BranchId = Either ShortBranchHash Path' +type HashOrHQSplit' = Either ShortHash Path.HQSplit' + +parseBranchId :: String -> Either String BranchId +parseBranchId ('#':s) = case SBH.fromText (Text.pack s) of + Nothing -> Left "Invalid hash, expected a base32hex string." + Just h -> pure $ Left h +parseBranchId s = Right <$> Path.parsePath' s + +data Input + -- names stuff: + -- directory ops + -- `Link` must describe a repo and a source path within that repo. + -- clone w/o merge, error if would clobber + = ForkLocalBranchI (Either ShortBranchHash Path') Path' + -- merge first causal into destination + | MergeLocalBranchI Path' Path' Branch.MergeMode + | PreviewMergeLocalBranchI Path' Path' + | DiffNamespaceI Path' Path' -- old new + | PullRemoteBranchI (Maybe RemoteNamespace) Path' SyncMode + | PushRemoteBranchI (Maybe RemoteHead) Path' SyncMode + | CreatePullRequestI RemoteNamespace RemoteNamespace + | LoadPullRequestI RemoteNamespace RemoteNamespace Path' + | ResetRootI (Either ShortBranchHash Path') + -- todo: Q: Does it make sense to publish to not-the-root of a Github repo? + -- Does it make sense to fork from not-the-root of a Github repo? + -- change directory + | SwitchBranchI Path' + | PopBranchI + -- > names foo + -- > names foo.bar + -- > names .foo.bar + -- > names .foo.bar#asdflkjsdf + -- > names #sdflkjsdfhsdf + | NamesI HQ.HashQualified + | AliasTermI HashOrHQSplit' Path.Split' + | AliasTypeI HashOrHQSplit' Path.Split' + | AliasManyI [Path.HQSplit] Path' + -- Move = Rename; It's an HQSplit' not an HQSplit', meaning the arg has to have a name. + | MoveTermI Path.HQSplit' Path.Split' + | MoveTypeI Path.HQSplit' Path.Split' + | MoveBranchI (Maybe Path.Split') Path.Split' + | MovePatchI Path.Split' Path.Split' + | CopyPatchI Path.Split' Path.Split' + -- delete = unname + | DeleteI Path.HQSplit' + | DeleteTermI Path.HQSplit' + | DeleteTypeI Path.HQSplit' + | DeleteBranchI (Maybe Path.Split') + | DeletePatchI Path.Split' + -- resolving naming conflicts within `branchpath` + -- Add the specified name after deleting all others for a given reference + -- within a given branch. + | ResolveTermNameI Path.HQSplit' + | ResolveTypeNameI Path.HQSplit' + -- edits stuff: + | LoadI (Maybe FilePath) + | AddI [HQ'.HashQualified] + | PreviewAddI [HQ'.HashQualified] + | UpdateI (Maybe PatchPath) [HQ'.HashQualified] + | PreviewUpdateI [HQ'.HashQualified] + | TodoI (Maybe PatchPath) Path' + | PropagatePatchI PatchPath Path' + | ListEditsI (Maybe PatchPath) + -- -- create and remove update directives + | DeprecateTermI PatchPath Path.HQSplit' + | DeprecateTypeI PatchPath Path.HQSplit' + | ReplaceTermI HQ.HashQualified HQ.HashQualified (Maybe PatchPath) + | ReplaceTypeI HQ.HashQualified HQ.HashQualified (Maybe PatchPath) + | RemoveTermReplacementI HQ.HashQualified (Maybe PatchPath) + | RemoveTypeReplacementI HQ.HashQualified (Maybe PatchPath) + | UndoI + -- First `Maybe Int` is cap on number of results, if any + -- Second `Maybe Int` is cap on diff elements shown, if any + | HistoryI (Maybe Int) (Maybe Int) BranchId + -- execute an IO thunk + | ExecuteI String + | TestI Bool Bool -- TestI showSuccesses showFailures + -- metadata + -- `link metadata definitions` (adds metadata to all of `definitions`) + | LinkI HQ.HashQualified [Path.HQSplit'] + -- `unlink metadata definitions` (removes metadata from all of `definitions`) + | UnlinkI HQ.HashQualified [Path.HQSplit'] + -- links from + | LinksI Path.HQSplit' (Maybe String) + | CreateAuthorI NameSegment {- identifier -} Text {- name -} + | DisplayI OutputLocation HQ.HashQualified + | DocsI Path.HQSplit' + -- other + | SearchByNameI Bool Bool [String] -- SearchByName isVerbose showAll query + | FindShallowI Path' + | FindPatchI + | ShowDefinitionI OutputLocation [HQ.HashQualified] + | ShowDefinitionByPrefixI OutputLocation [HQ.HashQualified] + | ShowReflogI + | UpdateBuiltinsI + | MergeBuiltinsI + | MergeIOBuiltinsI + | ListDependenciesI HQ.HashQualified + | ListDependentsI HQ.HashQualified + | DebugNumberedArgsI + | DebugBranchHistoryI + | DebugTypecheckedUnisonFileI + | QuitI + deriving (Eq, Show) + +-- Some commands, like `view`, can dump output to either console or a file. +data OutputLocation + = ConsoleLocation + | LatestFileLocation + | FileLocation FilePath + -- ClipboardLocation + deriving (Eq, Show) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Output.hs b/parser-typechecker/src/Unison/Codebase/Editor/Output.hs new file mode 100644 index 0000000000..abc51a34cb --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/Output.hs @@ -0,0 +1,359 @@ +{-# LANGUAGE PatternSynonyms #-} + +module Unison.Codebase.Editor.Output + ( Output(..) + , NumberedOutput(..) + , NumberedArgs + , ListDetailed + , ShallowListEntry(..) + , HistoryTail(..) + , TestReportStats(..) + , UndoFailureReason(..) + , PushPull(..) + , ReflogEntry(..) + , pushPull + , isFailure + , isNumberedFailure + ) where + +import Unison.Prelude + +import Unison.Codebase.Editor.Input +import Unison.Codebase.Editor.SlurpResult (SlurpResult(..)) +import Unison.Codebase.GitError +import Unison.Codebase.Path (Path', Path) +import Unison.Codebase.Patch (Patch) +import Unison.Name ( Name ) +import Unison.Names2 ( Names ) +import Unison.Parser ( Ann ) +import qualified Unison.Reference as Reference +import Unison.Reference ( Reference ) +import Unison.Referent ( Referent ) +import Unison.DataDeclaration ( Decl ) +import Unison.Util.Relation (Relation) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Editor.SlurpResult as SR +import qualified Unison.Codebase.Metadata as Metadata +import qualified Unison.Codebase.Path as Path +import qualified Unison.Codebase.Runtime as Runtime +import qualified Unison.HashQualified as HQ +import qualified Unison.HashQualified' as HQ' +import qualified Unison.Parser as Parser +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.Typechecker.Context as Context +import qualified Unison.UnisonFile as UF +import qualified Unison.Util.Pretty as P +import Unison.Codebase.Editor.DisplayThing (DisplayThing) +import qualified Unison.Codebase.Editor.TodoOutput as TO +import Unison.Codebase.Editor.SearchResult' (SearchResult') +import Unison.Term (Term) +import Unison.Type (Type) +import qualified Unison.Names3 as Names +import qualified Data.Set as Set +import Unison.NameSegment (NameSegment) +import Unison.ShortHash (ShortHash) +import Unison.Var (Var) +import Unison.Codebase.ShortBranchHash (ShortBranchHash) +import Unison.Codebase.Editor.RemoteRepo as RemoteRepo +import Unison.Codebase.Editor.Output.BranchDiff (BranchDiffOutput) +import Unison.LabeledDependency (LabeledDependency) + +type ListDetailed = Bool +type SourceName = Text +type NumberedArgs = [String] + +data PushPull = Push | Pull deriving (Eq, Ord, Show) + +pushPull :: a -> a -> PushPull -> a +pushPull push pull p = case p of + Push -> push + Pull -> pull + +data NumberedOutput v + = ShowDiffNamespace Path.Absolute Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) + | ShowDiffAfterUndo PPE.PrettyPrintEnv (BranchDiffOutput v Ann) + | ShowDiffAfterDeleteDefinitions PPE.PrettyPrintEnv (BranchDiffOutput v Ann) + | ShowDiffAfterDeleteBranch Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) + | ShowDiffAfterModifyBranch Path.Path' Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) + | ShowDiffAfterMerge Path.Path' Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) + | ShowDiffAfterMergePropagate Path.Path' Path.Absolute Path.Path' PPE.PrettyPrintEnv (BranchDiffOutput v Ann) + | ShowDiffAfterMergePreview Path.Path' Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) + | ShowDiffAfterPull Path.Path' Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) + | ShowDiffAfterCreatePR RemoteNamespace RemoteNamespace PPE.PrettyPrintEnv (BranchDiffOutput v Ann) + -- + | ShowDiffAfterCreateAuthor NameSegment Path.Path' Path.Absolute PPE.PrettyPrintEnv (BranchDiffOutput v Ann) + +-- | ShowDiff + +data Output v + -- Generic Success response; we might consider deleting this. + = Success + -- User did `add` or `update` before typechecking a file? + | NoUnisonFile + | InvalidSourceName String + | SourceLoadFailed String + -- No main function, the [Type v Ann] are the allowed types + | NoMainFunction String PPE.PrettyPrintEnv [Type v Ann] + | BranchEmpty (Either ShortBranchHash Path') + | BranchNotEmpty Path' + | LoadPullRequest RemoteNamespace RemoteNamespace Path' Path' Path' Path' + | CreatedNewBranch Path.Absolute + | BranchAlreadyExists Path' + | PatchAlreadyExists Path.Split' + | NoExactTypeMatches + | TypeAlreadyExists Path.Split' (Set Reference) + | TypeParseError String (Parser.Err v) + | ParseResolutionFailures String [Names.ResolutionFailure v Ann] + | TypeHasFreeVars (Type v Ann) + | TermAlreadyExists Path.Split' (Set Referent) + | LabeledReferenceAmbiguous Int HQ.HashQualified (Set LabeledDependency) + | LabeledReferenceNotFound HQ.HashQualified + | DeleteNameAmbiguous Int Path.HQSplit' (Set Referent) (Set Reference) + | TermAmbiguous HQ.HashQualified (Set Referent) + | HashAmbiguous ShortHash (Set Referent) + | BranchHashAmbiguous ShortBranchHash (Set ShortBranchHash) + | BranchNotFound Path' + | NameNotFound Path.HQSplit' + | PatchNotFound Path.Split' + | TypeNotFound Path.HQSplit' + | TermNotFound Path.HQSplit' + | TypeNotFound' ShortHash + | TermNotFound' ShortHash + | SearchTermsNotFound [HQ.HashQualified] + -- ask confirmation before deleting the last branch that contains some defns + -- `Path` is one of the paths the user has requested to delete, and is paired + -- with whatever named definitions would not have any remaining names if + -- the path is deleted. + | DeleteBranchConfirmation + [(Path', (Names, [SearchResult' v Ann]))] + -- CantDelete input couldntDelete becauseTheseStillReferenceThem + | CantDelete PPE.PrettyPrintEnv [SearchResult' v Ann] [SearchResult' v Ann] + | DeleteEverythingConfirmation + | DeletedEverything + | ListNames Int -- hq length to print References + [(Reference, Set HQ'.HashQualified)] -- type match, type names + [(Referent, Set HQ'.HashQualified)] -- term match, term names + -- list of all the definitions within this branch + | ListOfDefinitions PPE.PrettyPrintEnv ListDetailed [SearchResult' v Ann] + | ListOfLinks PPE.PrettyPrintEnv [(HQ.HashQualified, Reference, Maybe (Type v Ann))] + | ListShallow PPE.PrettyPrintEnv [ShallowListEntry v Ann] + | ListOfPatches (Set Name) + -- show the result of add/update + | SlurpOutput Input PPE.PrettyPrintEnv (SlurpResult v) + -- Original source, followed by the errors: + | ParseErrors Text [Parser.Err v] + | TypeErrors Text PPE.PrettyPrintEnv [Context.ErrorNote v Ann] + | DisplayConflicts (Relation Name Referent) (Relation Name Reference) + | EvaluationFailure Runtime.Error + | Evaluated SourceFileContents + PPE.PrettyPrintEnv + [(v, Term v ())] + (Map v (Ann, UF.WatchKind, Term v (), Runtime.IsCacheHit)) + | Typechecked SourceName PPE.PrettyPrintEnv (SlurpResult v) (UF.TypecheckedUnisonFile v Ann) + | DisplayRendered (Maybe FilePath) (P.Pretty P.ColorText) + -- "display" definitions, possibly to a FilePath on disk (e.g. editing) + | DisplayDefinitions (Maybe FilePath) + PPE.PrettyPrintEnvDecl + (Map Reference (DisplayThing (Decl v Ann))) + (Map Reference (DisplayThing (Term v Ann))) + -- | Invariant: there's at least one conflict or edit in the TodoOutput. + | TodoOutput PPE.PrettyPrintEnvDecl (TO.TodoOutput v Ann) + | TestIncrementalOutputStart PPE.PrettyPrintEnv (Int,Int) Reference (Term v Ann) + | TestIncrementalOutputEnd PPE.PrettyPrintEnv (Int,Int) Reference (Term v Ann) + | TestResults TestReportStats + PPE.PrettyPrintEnv ShowSuccesses ShowFailures + [(Reference, Text)] -- oks + [(Reference, Text)] -- fails + | CantUndo UndoFailureReason + | ListEdits Patch PPE.PrettyPrintEnv + + -- new/unrepresented references followed by old/removed + -- todo: eventually replace these sets with [SearchResult' v Ann] + -- and a nicer render. + | BustedBuiltins (Set Reference) (Set Reference) + | GitError Input GitError + | ConfiguredMetadataParseError Path' String (P.Pretty P.ColorText) + | NoConfiguredGitUrl PushPull Path' + | ConfiguredGitUrlParseError PushPull Path' Text String + | ConfiguredGitUrlIncludesShortBranchHash PushPull RemoteRepo ShortBranchHash Path + | DisplayLinks PPE.PrettyPrintEnvDecl Metadata.Metadata + (Map Reference (DisplayThing (Decl v Ann))) + (Map Reference (DisplayThing (Term v Ann))) + | MetadataMissingType PPE.PrettyPrintEnv Referent + | MetadataAmbiguous HQ.HashQualified PPE.PrettyPrintEnv [Referent] + -- todo: tell the user to run `todo` on the same patch they just used + | NothingToPatch PatchPath Path' + | PatchNeedsToBeConflictFree + | PatchInvolvesExternalDependents PPE.PrettyPrintEnv (Set Reference) + | WarnIncomingRootBranch ShortBranchHash (Set ShortBranchHash) + | StartOfCurrentPathHistory + | History (Maybe Int) [(ShortBranchHash, Names.Diff)] HistoryTail + | ShowReflog [ReflogEntry] + | PullAlreadyUpToDate RemoteNamespace Path' + | MergeAlreadyUpToDate Path' Path' + | PreviewMergeAlreadyUpToDate Path' Path' + -- | No conflicts or edits remain for the current patch. + | NoConflictsOrEdits + | NotImplemented + | NoBranchWithHash ShortBranchHash + | ListDependencies Int LabeledDependency [(Name, Reference)] (Set Reference) + | ListDependents Int LabeledDependency [(Name, Reference)] (Set Reference) + | DumpNumberedArgs NumberedArgs + | DumpBitBooster Branch.Hash (Map Branch.Hash [Branch.Hash]) + | DumpUnisonFileHashes Int [(Name, Reference.Id)] [(Name, Reference.Id)] [(Name, Reference.Id)] + | BadName String + | DefaultMetadataNotification + | NoOp + deriving (Show) + +data ReflogEntry = + ReflogEntry { hash :: ShortBranchHash, reason :: Text } + deriving (Show) + +data ShallowListEntry v a + = ShallowTermEntry Referent HQ'.HQSegment (Maybe (Type v a)) + | ShallowTypeEntry Reference HQ'.HQSegment + | ShallowBranchEntry NameSegment Int -- number of child definitions + | ShallowPatchEntry NameSegment + deriving (Eq, Show) + +-- requires Var v to derive Eq, which is required by Ord though not by `compare` +instance Var v => Ord (ShallowListEntry v a) where + compare x y = case compare (toNS x) (toNS y) of + EQ -> compare (toHash x) (toHash y) + c -> c + where + toNS = \case + ShallowTermEntry _ hq _ -> HQ'.toName hq + ShallowTypeEntry _ hq -> HQ'.toName hq + ShallowBranchEntry ns _ -> ns + ShallowPatchEntry ns -> ns + toHash :: ShallowListEntry v a -> Maybe ShortHash + toHash = \case + ShallowTermEntry _ hq _ -> HQ'.toHash hq + ShallowTypeEntry _ hq -> HQ'.toHash hq + ShallowBranchEntry _ _ -> Nothing + ShallowPatchEntry _ -> Nothing + +data HistoryTail = + EndOfLog ShortBranchHash | + MergeTail ShortBranchHash [ShortBranchHash] | + PageEnd ShortBranchHash Int -- PageEnd nextHash nextIndex + deriving (Show) + +data TestReportStats + = CachedTests TotalCount CachedCount + | NewlyComputed deriving Show + +type TotalCount = Int -- total number of tests +type CachedCount = Int -- number of tests found in the cache +type ShowSuccesses = Bool -- whether to list results or just summarize +type ShowFailures = Bool -- whether to list results or just summarize + +data UndoFailureReason = CantUndoPastStart | CantUndoPastMerge deriving Show + +type SourceFileContents = Text + +isFailure :: Ord v => Output v -> Bool +isFailure o = case o of + Success{} -> False + NoUnisonFile{} -> True + InvalidSourceName{} -> True + SourceLoadFailed{} -> True + NoMainFunction{} -> True + CreatedNewBranch{} -> False + BranchAlreadyExists{} -> True + PatchAlreadyExists{} -> True + NoExactTypeMatches -> True + BranchEmpty{} -> True + BranchNotEmpty{} -> True + TypeAlreadyExists{} -> True + TypeParseError{} -> True + ParseResolutionFailures{} -> True + TypeHasFreeVars{} -> True + TermAlreadyExists{} -> True + LabeledReferenceAmbiguous{} -> True + LabeledReferenceNotFound{} -> True + DeleteNameAmbiguous{} -> True + TermAmbiguous{} -> True + BranchHashAmbiguous{} -> True + BadName{} -> True + BranchNotFound{} -> True + NameNotFound{} -> True + PatchNotFound{} -> True + TypeNotFound{} -> True + TypeNotFound'{} -> True + TermNotFound{} -> True + TermNotFound'{} -> True + SearchTermsNotFound ts -> not (null ts) + DeleteBranchConfirmation{} -> False + CantDelete{} -> True + DeleteEverythingConfirmation -> False + DeletedEverything -> False + ListNames _ tys tms -> null tms && null tys + ListOfLinks _ ds -> null ds + ListOfDefinitions _ _ ds -> null ds + ListOfPatches s -> Set.null s + SlurpOutput _ _ sr -> not $ SR.isOk sr + ParseErrors{} -> True + TypeErrors{} -> True + DisplayConflicts{} -> False + EvaluationFailure{} -> True + Evaluated{} -> False + Typechecked{} -> False + DisplayDefinitions _ _ m1 m2 -> null m1 && null m2 + DisplayRendered{} -> False + TodoOutput _ todo -> TO.todoScore todo /= 0 + TestIncrementalOutputStart{} -> False + TestIncrementalOutputEnd{} -> False + TestResults _ _ _ _ _ fails -> not (null fails) + CantUndo{} -> True + ListEdits{} -> False + GitError{} -> True + BustedBuiltins{} -> True + ConfiguredMetadataParseError{} -> True + NoConfiguredGitUrl{} -> True + ConfiguredGitUrlParseError{} -> True + ConfiguredGitUrlIncludesShortBranchHash{} -> True + DisplayLinks{} -> False + MetadataMissingType{} -> True + MetadataAmbiguous{} -> True + PatchNeedsToBeConflictFree{} -> True + PatchInvolvesExternalDependents{} -> True + NothingToPatch{} -> False + WarnIncomingRootBranch{} -> False + History{} -> False + StartOfCurrentPathHistory -> True + NotImplemented -> True + DumpNumberedArgs{} -> False + DumpBitBooster{} -> False + NoBranchWithHash{} -> True + PullAlreadyUpToDate{} -> False + MergeAlreadyUpToDate{} -> False + PreviewMergeAlreadyUpToDate{} -> False + NoConflictsOrEdits{} -> False + ListShallow _ es -> null es + HashAmbiguous{} -> True + ShowReflog{} -> False + LoadPullRequest{} -> False + DefaultMetadataNotification -> False + NoOp -> False + ListDependencies{} -> False + ListDependents{} -> False + DumpUnisonFileHashes _ x y z -> x == mempty && y == mempty && z == mempty + +isNumberedFailure :: NumberedOutput v -> Bool +isNumberedFailure = \case + ShowDiffNamespace{} -> False + ShowDiffAfterDeleteDefinitions{} -> False + ShowDiffAfterDeleteBranch{} -> False + ShowDiffAfterModifyBranch{} -> False + ShowDiffAfterMerge{} -> False + ShowDiffAfterMergePropagate{} -> False + ShowDiffAfterMergePreview{} -> False + ShowDiffAfterUndo{} -> False + ShowDiffAfterPull{} -> False + ShowDiffAfterCreatePR{} -> False + ShowDiffAfterCreateAuthor{} -> False + + diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Output/BranchDiff.hs b/parser-typechecker/src/Unison/Codebase/Editor/Output/BranchDiff.hs new file mode 100644 index 0000000000..d9d213f63b --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/Output/BranchDiff.hs @@ -0,0 +1,338 @@ +{-# Language DeriveFoldable, DeriveTraversable #-} +{-# LANGUAGE PartialTypeSignatures #-} +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE RecordWildCards #-} + +module Unison.Codebase.Editor.Output.BranchDiff where + +import Control.Lens (_1,view) +import Unison.Prelude +import Unison.Name (Name) +import qualified Unison.Codebase.Patch as P +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.Codebase.BranchDiff as BranchDiff +import Unison.Codebase.BranchDiff (BranchDiff(BranchDiff), DiffSlice) +import qualified Unison.Util.Relation as R +import qualified Unison.Util.Relation3 as R3 +import qualified Unison.Codebase.Metadata as Metadata +import qualified Data.Set as Set +import qualified Data.Map as Map +import Unison.Util.Set (symmetricDifference) + +import Unison.Reference (Reference) +import Unison.Type (Type) +import Unison.HashQualified' (HashQualified) +import qualified Unison.HashQualified as HQ +import qualified Unison.Referent as Referent +import Unison.Referent (Referent) +import qualified Unison.Names2 as Names2 +import Unison.Names3 (Names0) +import Unison.DataDeclaration (DeclOrBuiltin) +import Unison.Runtime.IOSource (isPropagatedValue) + +data MetadataDiff tm = + MetadataDiff { addedMetadata :: [tm] + , removedMetadata :: [tm] } + deriving (Ord,Eq,Functor,Foldable,Traversable,Show) + +instance Semigroup (MetadataDiff tm) where + a <> b = MetadataDiff (addedMetadata a <> addedMetadata b) + (removedMetadata a <> removedMetadata b) + +instance Monoid (MetadataDiff tm) where + mempty = MetadataDiff mempty mempty + mappend = (<>) + +data BranchDiffOutput v a = BranchDiffOutput { + updatedTypes :: [UpdateTypeDisplay v a], + updatedTerms :: [UpdateTermDisplay v a], + newTypeConflicts :: [UpdateTypeDisplay v a], + newTermConflicts :: [UpdateTermDisplay v a], + resolvedTypeConflicts :: [UpdateTypeDisplay v a], + resolvedTermConflicts :: [UpdateTermDisplay v a], + propagatedUpdates :: Int, + updatedPatches :: [PatchDisplay], + addedTypes :: [AddedTypeDisplay v a], + addedTerms :: [AddedTermDisplay v a], + addedPatches :: [PatchDisplay], + removedTypes :: [RemovedTypeDisplay v a], + removedTerms :: [RemovedTermDisplay v a], + removedPatches :: [PatchDisplay], + renamedTypes :: [RenameTypeDisplay v a], + renamedTerms :: [RenameTermDisplay v a] + } deriving Show + +isEmpty :: BranchDiffOutput v a -> Bool +isEmpty BranchDiffOutput{..} = + null updatedTypes && null updatedTerms && + null newTypeConflicts && null newTermConflicts && + null resolvedTypeConflicts && null resolvedTermConflicts && + null addedTypes && null addedTerms && null addedPatches && + null removedTypes && null removedTerms && null removedPatches && + null renamedTypes && null renamedTerms && null updatedPatches && + propagatedUpdates == 0 + +-- Need to be able to turn a (Name,Reference) into a HashQualified relative to... what. +-- the new namespace? + +type TermDisplay v a = (HashQualified, Referent, Maybe (Type v a), MetadataDiff (MetadataDisplay v a)) +type TypeDisplay v a = (HashQualified, Reference, Maybe (DeclOrBuiltin v a), MetadataDiff (MetadataDisplay v a)) + +type AddedTermDisplay v a = ([(HashQualified, [MetadataDisplay v a])], Referent, Maybe (Type v a)) +type AddedTypeDisplay v a = ([(HashQualified, [MetadataDisplay v a])], Reference, Maybe (DeclOrBuiltin v a)) + +type RemovedTermDisplay v a = ([HashQualified], Referent, Maybe (Type v a)) +type RemovedTypeDisplay v a = ([HashQualified], Reference, Maybe (DeclOrBuiltin v a)) + +type SimpleTermDisplay v a = (HashQualified, Referent, Maybe (Type v a)) +type SimpleTypeDisplay v a = (HashQualified, Reference, Maybe (DeclOrBuiltin v a)) + +type UpdateTermDisplay v a = (Maybe [SimpleTermDisplay v a], [TermDisplay v a]) +type UpdateTypeDisplay v a = (Maybe [SimpleTypeDisplay v a], [TypeDisplay v a]) + +type MetadataDisplay v a = (HQ.HashQualified, Referent, Maybe (Type v a)) +type RenameTermDisplay v a = (Referent, Maybe (Type v a), Set HashQualified, Set HashQualified) +type RenameTypeDisplay v a = (Reference, Maybe (DeclOrBuiltin v a), Set HashQualified, Set HashQualified) +type PatchDisplay = (Name, P.PatchDiff) + +toOutput :: forall m v a + . Monad m + => (Referent -> m (Maybe (Type v a))) + -> (Reference -> m (Maybe (DeclOrBuiltin v a))) + -> Int + -> Names0 + -> Names0 + -> PPE.PrettyPrintEnv + -> BranchDiff.BranchDiff + -> m (BranchDiffOutput v a) +toOutput typeOf declOrBuiltin hqLen names1 names2 ppe + (BranchDiff termsDiff typesDiff patchesDiff) = do + let + -- | This calculates the new reference's metadata as: + -- adds: now-attached metadata that was missing from + -- any of the old references associated with the name + -- removes: not-attached metadata that had been attached to any of + -- the old references associated with the name + getNewMetadataDiff :: Ord r => Bool -> DiffSlice r -> Name -> Set r -> r -> MetadataDiff Metadata.Value + getNewMetadataDiff hidePropagatedMd s n rs_old r_new = let + old_metadatas :: [Set Metadata.Value] = + toList . R.toMultimap . R.restrictDom rs_old . R3.lookupD2 n $ + BranchDiff.tremovedMetadata s + old_intersection :: Set Metadata.Value = + foldl' Set.intersection mempty old_metadatas + old_union :: Set Metadata.Value = + foldl' Set.union mempty old_metadatas + new_metadata :: Set Metadata.Value = + R.lookupDom n . R3.lookupD1 r_new $ BranchDiff.taddedMetadata s + toDelete = if hidePropagatedMd then Set.singleton isPropagatedValue else mempty + in MetadataDiff + { addedMetadata = toList $ new_metadata `Set.difference` old_intersection `Set.difference` toDelete + , removedMetadata = toList $ old_union `Set.difference` new_metadata `Set.difference` toDelete + } + -- For the metadata on a definition to have changed, the name + -- and the reference must have existed before and the reference + -- must not have been removed and the name must not have been removed or added + -- or updated 😅 + -- "getMetadataUpdates" = a defn has been updated via change of metadata + getMetadataUpdates :: Ord r => DiffSlice r -> Map Name (Set r, Set r) + getMetadataUpdates s = Map.fromList + [ (n, (Set.singleton r, Set.singleton r)) -- the reference is unchanged + | (r,n,v) <- R3.toList $ BranchDiff.taddedMetadata s <> + BranchDiff.tremovedMetadata s + , R.notMember r n (BranchDiff.talladds s) + , R.notMember r n (BranchDiff.tallremoves s) + -- don't count it as a metadata update if it already's already a regular update + , let (oldRefs, newRefs) = + Map.findWithDefault mempty n (BranchDiff.tallnamespaceUpdates s) + in Set.notMember r oldRefs && Set.notMember r newRefs +-- trenames :: Map r (Set Name, Set Name), -- ref (old, new) + , case Map.lookup r (BranchDiff.trenames s) of + Nothing -> True + Just (olds, news) -> + Set.notMember n (symmetricDifference olds news) + , v /= isPropagatedValue ] + + let isSimpleUpdate, isNewConflict, isResolvedConflict :: Eq r => (Set r, Set r) -> Bool + isSimpleUpdate (old, new) = Set.size old == 1 && Set.size new == 1 + isNewConflict (_old, new) = Set.size new > 1 -- should already be the case that old /= new + isResolvedConflict (old, new) = Set.size old > 1 && Set.size new == 1 + + (updatedTypes :: [UpdateTypeDisplay v a], + newTypeConflicts :: [UpdateTypeDisplay v a], + resolvedTypeConflicts :: [UpdateTypeDisplay v a]) <- let + -- things where what the name pointed to changed + nsUpdates :: Map Name (Set Reference, Set Reference) = + BranchDiff.namespaceUpdates typesDiff + -- things where the metadata changed (`uniqueBy` below removes these + -- if they were already included in `nsUpdates) + metadataUpdates = getMetadataUpdates typesDiff + loadOld :: Bool -> Name -> Reference -> m (SimpleTypeDisplay v a) + loadOld forceHQ n r_old = + (,,) <$> pure (if forceHQ + then Names2.hqTypeName' hqLen n r_old + else Names2.hqTypeName hqLen names1 n r_old) + <*> pure r_old + <*> declOrBuiltin r_old + loadNew :: Bool -> Bool -> Name -> Set Reference -> Reference -> m (TypeDisplay v a) + loadNew hidePropagatedMd forceHQ n rs_old r_new = + (,,,) <$> pure (if forceHQ + then Names2.hqTypeName' hqLen n r_new + else Names2.hqTypeName hqLen names2 n r_new) + <*> pure r_new + <*> declOrBuiltin r_new + <*> fillMetadata ppe (getNewMetadataDiff hidePropagatedMd typesDiff n rs_old r_new) + loadEntry :: Bool -> (Name, (Set Reference, Set Reference)) -> m (UpdateTypeDisplay v a) + loadEntry hidePropagatedMd (n, (Set.toList -> [rold], Set.toList -> [rnew])) | rold == rnew = + (Nothing,) <$> for [rnew] (loadNew hidePropagatedMd False n (Set.singleton rold)) + loadEntry hidePropagatedMd (n, (rs_old, rs_new)) = + let forceHQ = Set.size rs_old > 1 || Set.size rs_new > 1 in + (,) <$> (Just <$> for (toList rs_old) (loadOld forceHQ n)) + <*> for (toList rs_new) (loadNew hidePropagatedMd forceHQ n rs_old) + in liftA3 (,,) + (sortOn (view _1 . head . snd) <$> liftA2 (<>) + (for (Map.toList $ Map.filter isSimpleUpdate nsUpdates) (loadEntry True)) + (for (Map.toList metadataUpdates) (loadEntry False))) + (for (Map.toList $ Map.filter isNewConflict nsUpdates) (loadEntry True)) + (for (Map.toList $ Map.filter isResolvedConflict nsUpdates) (loadEntry True)) + + (updatedTerms :: [UpdateTermDisplay v a], + newTermConflicts :: [UpdateTermDisplay v a], + resolvedTermConflicts :: [UpdateTermDisplay v a]) <- let + -- things where what the name pointed to changed + nsUpdates = BranchDiff.namespaceUpdates termsDiff + -- things where the metadata changed (`uniqueBy` below removes these + -- if they were already included in `nsUpdates) + metadataUpdates = getMetadataUpdates termsDiff + loadOld forceHQ n r_old = + (,,) <$> pure (if forceHQ then Names2.hqTermName' hqLen n r_old + else Names2.hqTermName hqLen names1 n r_old) + <*> pure r_old + <*> typeOf r_old + loadNew hidePropagatedMd forceHQ n rs_old r_new = + (,,,) <$> pure (if forceHQ then Names2.hqTermName' hqLen n r_new + else Names2.hqTermName hqLen names2 n r_new) + <*> pure r_new + <*> typeOf r_new + <*> fillMetadata ppe (getNewMetadataDiff hidePropagatedMd termsDiff n rs_old r_new) + loadEntry hidePropagatedMd (n, (rs_old, rs_new)) + -- if the references haven't changed, it's code for: only the metadata has changed + -- and we can ignore the old references in the output. + | rs_old == rs_new = (Nothing,) <$> for (toList rs_new) (loadNew hidePropagatedMd False n rs_old) + | otherwise = let forceHQ = Set.size rs_old > 1 || Set.size rs_new > 1 in + (,) <$> (Just <$> for (toList rs_old) (loadOld forceHQ n)) + <*> for (toList rs_new) (loadNew hidePropagatedMd forceHQ n rs_old) + in liftA3 (,,) + -- this is sorting the Update section back into alphabetical Name order + -- after calling loadEntry on the two halves. + (sortOn (view _1 . head . snd) <$> liftA2 (<>) + (for (Map.toList $ Map.filter isSimpleUpdate nsUpdates) (loadEntry True)) + (for (Map.toList metadataUpdates) (loadEntry False))) + (for (Map.toList $ Map.filter isNewConflict nsUpdates) (loadEntry True)) + (for (Map.toList $ Map.filter isResolvedConflict nsUpdates) (loadEntry True)) + + let propagatedUpdates :: Int = + -- counting the number of named auto-propagated definitions + (Set.size . Set.unions . toList . BranchDiff.propagatedUpdates) typesDiff + + (Set.size . Set.unions . toList . BranchDiff.propagatedUpdates) termsDiff + + let updatedPatches :: [PatchDisplay] = + [(name, diff) | (name, BranchDiff.Modify diff) <- Map.toList patchesDiff] + + addedTypes :: [AddedTypeDisplay v a] <- do + let typeAdds :: [(Reference, [(Name, [Metadata.Value])])] = sortOn snd + [ (r, nsmd) + | (r, ns) <- Map.toList . R.toMultimap . BranchDiff.talladds $ typesDiff + , let nsmd = [ (n, toList $ getAddedMetadata r n typesDiff) + | n <- toList ns ] + ] + for typeAdds $ \(r, nsmd) -> do + hqmds :: [(HashQualified, [MetadataDisplay v a])] <- + for nsmd $ \(n, mdRefs) -> + (,) <$> pure (Names2.hqTypeName hqLen names2 n r) + <*> fillMetadata ppe mdRefs + (hqmds, r, ) <$> declOrBuiltin r + + addedTerms :: [AddedTermDisplay v a] <- do + let termAdds :: [(Referent, [(Name, [Metadata.Value])])] = sortOn snd + [ (r, nsmd) + | (r, ns) <- Map.toList . R.toMultimap . BranchDiff.talladds $ termsDiff + , let nsmd = [ (n, toList $ getAddedMetadata r n termsDiff) + | n <- toList ns ] + ] + for termAdds $ \(r, nsmd) -> do + hqmds <- for nsmd $ \(n, mdRefs) -> + (,) <$> pure (Names2.hqTermName hqLen names2 n r) + <*> fillMetadata ppe mdRefs + (hqmds, r, ) <$> typeOf r + + let addedPatches :: [PatchDisplay] = + [ (name, diff) + | (name, BranchDiff.Create diff) <- Map.toList patchesDiff ] + + removedTypes :: [RemovedTypeDisplay v a] <- let + typeRemoves :: [(Reference, [Name])] = sortOn snd $ + Map.toList . fmap toList . R.toMultimap . BranchDiff.tallremoves $ typesDiff + in for typeRemoves $ \(r, ns) -> + (,,) <$> pure ((\n -> Names2.hqTypeName hqLen names1 n r) <$> ns) + <*> pure r + <*> declOrBuiltin r + + removedTerms :: [RemovedTermDisplay v a] <- let + termRemoves :: [(Referent, [Name])] = sortOn snd $ + Map.toList . fmap toList . R.toMultimap . BranchDiff.tallremoves $ termsDiff + in for termRemoves $ \(r, ns) -> + (,,) <$> pure ((\n -> Names2.hqTermName hqLen names1 n r) <$> ns) + <*> pure r + <*> typeOf r + + let removedPatches :: [PatchDisplay] = + [ (name, diff) + | (name, BranchDiff.Delete diff) <- Map.toList patchesDiff ] + + let renamedTerm :: Map Referent (Set Name, Set Name) -> m [RenameTermDisplay v a] + renamedTerm renames = + for (sortOn snd $ Map.toList renames) $ \(r, (ol'names, new'names)) -> + (,,,) <$> pure r + <*> typeOf r + <*> pure (Set.map (\n -> Names2.hqTermName hqLen names1 n r) ol'names) + <*> pure (Set.map (\n -> Names2.hqTermName hqLen names2 n r) new'names) + + let renamedType :: Map Reference (Set Name, Set Name) -> m [RenameTypeDisplay v a] + renamedType renames = + for (sortOn snd $ Map.toList renames) $ \(r, (ol'names, new'names)) -> + (,,,) <$> pure r + <*> declOrBuiltin r + <*> pure (Set.map (\n -> Names2.hqTypeName hqLen names1 n r) ol'names) + <*> pure (Set.map (\n -> Names2.hqTypeName hqLen names2 n r) new'names) + + renamedTypes :: [RenameTypeDisplay v a] <- renamedType (BranchDiff.trenames typesDiff) + renamedTerms :: [RenameTermDisplay v a] <- renamedTerm (BranchDiff.trenames termsDiff) + + pure $ BranchDiffOutput + updatedTypes + updatedTerms + newTypeConflicts + newTermConflicts + resolvedTypeConflicts + resolvedTermConflicts + propagatedUpdates + updatedPatches + addedTypes + addedTerms + addedPatches + removedTypes + removedTerms + removedPatches + renamedTypes + renamedTerms + where + fillMetadata :: Traversable t => PPE.PrettyPrintEnv -> t Metadata.Value -> m (t (MetadataDisplay v a)) + fillMetadata ppe = traverse $ -- metadata values are all terms + \(Referent.Ref -> mdRef) -> + let name = PPE.termName ppe mdRef + in (name, mdRef, ) <$> typeOf mdRef + getMetadata :: Ord r => r -> Name -> R3.Relation3 r Name Metadata.Value -> Set Metadata.Value + getMetadata r n = R.lookupDom n . R3.lookupD1 r + + getAddedMetadata :: Ord r => r -> Name -> BranchDiff.DiffSlice r -> Set Metadata.Value + getAddedMetadata r n slice = getMetadata r n $ BranchDiff.taddedMetadata slice diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Propagate.hs b/parser-typechecker/src/Unison/Codebase/Editor/Propagate.hs new file mode 100644 index 0000000000..9ec10150f8 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/Propagate.hs @@ -0,0 +1,522 @@ +{-# OPTIONS_GHC -Wno-partial-type-signatures #-} +{-# LANGUAGE PartialTypeSignatures #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE RecordWildCards #-} + +module Unison.Codebase.Editor.Propagate where + +import Control.Error.Util ( hush ) +import Control.Lens +import Data.Configurator ( ) +import qualified Data.Graph as Graph +import qualified Data.Map as Map +import qualified Data.Set as Set +import Unison.Codebase.Branch ( Branch0(..) ) +import Unison.Prelude +import qualified Unison.Codebase.Branch as Branch +import Unison.Codebase.Editor.Command +import Unison.Codebase.Editor.Output +import Unison.Codebase.Patch ( Patch(..) ) +import qualified Unison.Codebase.Patch as Patch +import Unison.DataDeclaration ( Decl ) +import qualified Unison.DataDeclaration as Decl +import Unison.Names3 ( Names0 ) +import qualified Unison.Names2 as Names +import Unison.Parser ( Ann(..) ) +import Unison.Reference ( Reference(..) ) +import qualified Unison.Reference as Reference +import qualified Unison.Referent as Referent +import qualified Unison.Result as Result +import qualified Unison.Term as Term +import Unison.Term ( Term ) +import Unison.Util.Free ( Free + , eval + ) +import qualified Unison.Util.Relation as R +import Unison.Util.TransitiveClosure ( transitiveClosure ) +import Unison.Var ( Var ) +import qualified Unison.Codebase.Metadata as Metadata +import qualified Unison.Codebase.TypeEdit as TypeEdit +import Unison.Codebase.TermEdit ( TermEdit(..) ) +import qualified Unison.Codebase.TermEdit as TermEdit +import Unison.Codebase.TypeEdit ( TypeEdit(..) ) +import Unison.UnisonFile ( UnisonFile(..) ) +import qualified Unison.UnisonFile as UF +import qualified Unison.Util.Star3 as Star3 +import Unison.Type ( Type ) +import qualified Unison.Type as Type +import qualified Unison.Typechecker as Typechecker +import Unison.ConstructorType ( ConstructorType ) +import qualified Unison.Runtime.IOSource as IOSource + +type F m i v = Free (Command m i v) + +data Edits v = Edits + { termEdits :: Map Reference TermEdit + -- same info as `termEdits` but in more efficient form for calling `Term.updateDependencies` + , termReplacements :: Map Reference Reference + , newTerms :: Map Reference (Term v Ann, Type v Ann) + , typeEdits :: Map Reference TypeEdit + , typeReplacements :: Map Reference Reference + , newTypes :: Map Reference (Decl v Ann) + , constructorReplacements :: Map (Reference, Int, ConstructorType) + (Reference, Int, ConstructorType) + } deriving (Eq, Show) + +noEdits :: Edits v +noEdits = Edits mempty mempty mempty mempty mempty mempty mempty + +propagateAndApply + :: forall m i v + . (Applicative m, Var v) + => Patch + -> Branch0 m + -> F m i v (Branch0 m) +propagateAndApply patch branch = do + edits <- propagate patch branch + f <- applyPropagate patch edits + (pure . f . applyDeprecations patch) branch + + +-- Creates a mapping from old data constructors to new data constructors +-- by looking at the original names for the data constructors which are +-- embedded in the Decl object because we carefully planned that. +generateConstructorMapping + :: Eq v + => Map v (Reference, Decl v _) + -> Map v (Reference, Decl.DataDeclaration v _) + -> Map + (Reference, Int, ConstructorType) + (Reference, Int, ConstructorType) +generateConstructorMapping oldComponent newComponent = Map.fromList + [ let t = Decl.constructorType oldDecl in ((oldR, oldC, t), (newR, newC, t)) + | (v1, (oldR, oldDecl)) <- Map.toList oldComponent + , (v2, (newR, newDecl)) <- Map.toList newComponent + , v1 == v2 + , (oldC, (_, oldName, _)) <- zip [0 ..] + $ Decl.constructors' (Decl.asDataDecl oldDecl) + , (newC, (_, newName, _)) <- zip [0 ..] $ Decl.constructors' newDecl + , oldName == newName + ] + +-- Note: this function adds definitions to the codebase as it propagates. +-- Description: +------------------ +-- For any `Reference` in the frontier which has an unconflicted +-- term edit, `old -> new`, replace `old` with `new` in dependents of the +-- frontier, and call `propagate'` recursively on the new frontier if +-- the dependents still typecheck. +-- +-- If the term is `Typing.Same`, the dependents don't need to be typechecked. +-- If the term is `Typing.Subtype`, and the dependent only has inferred type, +-- it should be re-typechecked, and the new inferred type should be used. +-- +-- This will create a whole bunch of new terms and types in the codebase and +-- move the names onto those new terms. Uses `updateDependencies` to perform +-- the substitutions. +-- +-- Algorithm: +---------------- +-- compute the frontier relation (dependencies of updated terms and types) +-- for each dirty definition d: +-- for each member c of cycle(d): +-- construct c', an updated c incorporating all edits +-- Add an edit c -> c' +-- and save c' to a `Map Reference Term` or `Map Reference Type` +-- as appropriate +-- Collect all c' into a new cycle and typecheck (TODO: kindcheck) that cycle. +-- If the cycle doesn't check, discard edits to that cycle. +-- +-- "dirty" means in need of update +-- "frontier" means updated definitions responsible for the "dirty" +propagate + :: forall m i v + . (Applicative m, Var v) + => Patch + -> Branch0 m + -> F m i v (Edits v) +propagate patch b = case validatePatch patch of + Nothing -> do + eval $ Notify PatchNeedsToBeConflictFree + pure noEdits + Just (initialTermEdits, initialTypeEdits) -> do + let + entireBranch = Set.union + (Branch.deepTypeReferences b) + (Set.fromList + [ r | Referent.Ref r <- Set.toList $ Branch.deepReferents b ] + ) + initialDirty <- + R.dom <$> computeFrontier (eval . GetDependents) patch names0 + order <- sortDependentsGraph initialDirty entireBranch + let + + getOrdered :: Set Reference -> Map Int Reference + getOrdered rs = + Map.fromList [ (i, r) | r <- toList rs, Just i <- [Map.lookup r order] ] + collectEdits + :: (Applicative m, Var v) + => Edits v + -> Set Reference + -> Map Int Reference + -> F m i v (Edits v) + collectEdits es@Edits {..} seen todo = case Map.minView todo of + Nothing -> pure es + Just (r, todo) -> case r of + Reference.Builtin _ -> collectEdits es seen todo + Reference.DerivedId _ -> go r todo + where + go r todo = + if Map.member r termEdits + || Map.member r typeEdits + || Set.member r seen + then + collectEdits es seen todo + else + do + haveType <- eval $ IsType r + haveTerm <- eval $ IsTerm r + let message = + "This reference is not a term nor a type " <> show r + mmayEdits | haveTerm = doTerm r + | haveType = doType r + | otherwise = error message + mayEdits <- mmayEdits + case mayEdits of + (Nothing , seen') -> collectEdits es seen' todo + (Just edits', seen') -> do + -- plan to update the dependents of this component too + dependents <- + fmap Set.unions + . traverse (eval . GetDependents) + . toList + . Reference.members + $ Reference.componentFor r + let todo' = todo <> getOrdered dependents + collectEdits edits' seen' todo' + doType :: Reference -> F m i v (Maybe (Edits v), Set Reference) + doType r = do + componentMap <- unhashTypeComponent r + let componentMap' = + over _2 (Decl.updateDependencies typeReplacements) + <$> componentMap + declMap = over _2 (either Decl.toDataDecl id) <$> componentMap' + -- TODO: kind-check the new components + hashedDecls = (fmap . fmap) (over _2 DerivedId) + . Decl.hashDecls + $ view _2 <$> declMap + hashedComponents' <- case hashedDecls of + Left _ -> + error + $ "Edit propagation failed because some of the dependencies of " + <> show r + <> " could not be resolved." + Right c -> pure . Map.fromList $ (\(v, r, d) -> (v, (r, d))) <$> c + let + -- Relation: (nameOfType, oldRef, newRef, newType) + joinedStuff + :: [(v, (Reference, Reference, Decl.DataDeclaration v _))] + joinedStuff = + Map.toList (Map.intersectionWith f declMap hashedComponents') + f (oldRef, _) (newRef, newType) = (oldRef, newRef, newType) + typeEdits' = typeEdits <> (Map.fromList . fmap toEdit) joinedStuff + toEdit (_, (r, r', _)) = (r, TypeEdit.Replace r') + typeReplacements' = typeReplacements + <> (Map.fromList . fmap toReplacement) joinedStuff + toReplacement (_, (r, r', _)) = (r, r') + -- New types this iteration + newNewTypes = (Map.fromList . fmap toNewType) joinedStuff + -- Accumulated new types + newTypes' = newTypes <> newNewTypes + toNewType (v, (_, r', tp)) = + ( r' + , case Map.lookup v componentMap of + Just (_, Left _ ) -> Left (Decl.EffectDeclaration tp) + Just (_, Right _) -> Right tp + _ -> error "It's not gone well!" + ) + seen' = seen <> Set.fromList (view _1 . view _2 <$> joinedStuff) + writeTypes = + traverse_ (\(Reference.DerivedId id, tp) -> eval $ PutDecl id tp) + constructorMapping = + constructorReplacements + <> generateConstructorMapping componentMap hashedComponents' + writeTypes $ Map.toList newNewTypes + pure + ( Just $ Edits termEdits + termReplacements + newTerms + typeEdits' + typeReplacements' + newTypes' + constructorMapping + , seen' + ) + doTerm :: Reference -> F m i v (Maybe (Edits v), Set Reference) + doTerm r = do + componentMap <- unhashTermComponent r + let componentMap' = + over + _2 + (Term.updateDependencies termReplacements typeReplacements) + <$> componentMap + seen' = seen <> Set.fromList (view _1 <$> Map.elems componentMap) + mayComponent <- verifyTermComponent componentMap' es + case mayComponent of + Nothing -> pure (Nothing, seen') + Just componentMap'' -> do + let + joinedStuff = + toList (Map.intersectionWith f componentMap componentMap'') + f (oldRef, _oldTerm, oldType) (newRef, newTerm, newType) = + (oldRef, newRef, newTerm, oldType, newType') + -- Don't replace the type if it hasn't changed. + + where + newType' | Typechecker.isEqual oldType newType = oldType + | otherwise = newType + -- collect the hashedComponents into edits/replacements/newterms/seen + termEdits' = + termEdits <> (Map.fromList . fmap toEdit) joinedStuff + toEdit (r, r', _newTerm, oldType, newType) = + (r, TermEdit.Replace r' $ TermEdit.typing newType oldType) + termReplacements' = termReplacements + <> (Map.fromList . fmap toReplacement) joinedStuff + toReplacement (r, r', _, _, _) = (r, r') + newTerms' = + newTerms <> (Map.fromList . fmap toNewTerm) joinedStuff + toNewTerm (_, r', tm, _, tp) = (r', (tm, tp)) + writeTerms = + traverse_ + (\(Reference.DerivedId id, (tm, tp)) -> + eval $ PutTerm id tm tp + ) + writeTerms + [ (r, (tm, ty)) | (_old, r, tm, _oldTy, ty) <- joinedStuff ] + pure + ( Just $ Edits termEdits' + termReplacements' + newTerms' + typeEdits + typeReplacements + newTypes + constructorReplacements + , seen' + ) + collectEdits + (Edits initialTermEdits + (Map.mapMaybe TermEdit.toReference initialTermEdits) + mempty + initialTypeEdits + (Map.mapMaybe TypeEdit.toReference initialTypeEdits) + mempty + mempty + ) + mempty -- things to skip + (getOrdered initialDirty) + where + sortDependentsGraph :: Set Reference -> Set Reference -> _ (Map Reference Int) + sortDependentsGraph dependencies restrictTo = do + closure <- transitiveClosure + (fmap (Set.intersection restrictTo) . eval . GetDependents) + dependencies + dependents <- traverse (\r -> (r, ) <$> (eval . GetDependents) r) + (toList closure) + let graphEdges = [ (r, r, toList deps) | (r, deps) <- toList dependents ] + (graph, getReference, _) = Graph.graphFromEdges graphEdges + pure $ Map.fromList + (zip (view _1 . getReference <$> Graph.topSort graph) [0 ..]) + -- vertex i precedes j whenever i has an edge to j and not vice versa. + -- vertex i precedes j when j is a dependent of i. + names0 = Branch.toNames0 b + validatePatch + :: Patch -> Maybe (Map Reference TermEdit, Map Reference TypeEdit) + validatePatch p = + (,) <$> R.toMap (Patch._termEdits p) <*> R.toMap (Patch._typeEdits p) + -- Turns a cycle of references into a term with free vars that we can edit + -- and hash again. + -- todo: Maybe this an others can be moved to HandleCommand, in the + -- Free (Command m i v) monad, passing in the actions that are needed. + -- However, if we want this to be parametric in the annotation type, then + -- Command would have to be made parametric in the annotation type too. + unhashTermComponent + :: forall m v + . (Applicative m, Var v) + => Reference + -> F m i v (Map v (Reference, Term v _, Type v _)) + unhashTermComponent ref = do + let component = Reference.members $ Reference.componentFor ref + termInfo + :: Reference -> F m i v (Maybe (Reference, (Term v Ann, Type v Ann))) + termInfo termRef = do + tpm <- eval $ LoadTypeOfTerm termRef + tp <- maybe (error $ "Missing type for term " <> show termRef) + pure + tpm + case termRef of + Reference.DerivedId id -> do + mtm <- eval $ LoadTerm id + tm <- maybe (error $ "Missing term with id " <> show id) pure mtm + pure $ Just (termRef, (tm, tp)) + Reference.Builtin{} -> pure Nothing + unhash m = + let f (_oldTm, oldTyp) (v, newTm) = (v, newTm, oldTyp) + m' = Map.intersectionWith f m (Term.unhashComponent (fst <$> m)) + in Map.fromList + [ (v, (r, tm, tp)) | (r, (v, tm, tp)) <- Map.toList m' ] + unhash . Map.fromList . catMaybes <$> traverse termInfo (toList component) + unhashTypeComponent + :: forall m v + . (Applicative m, Var v) + => Reference + -> F m i v (Map v (Reference, Decl v _)) + unhashTypeComponent ref = do + let + component = Reference.members $ Reference.componentFor ref + typeInfo :: Reference -> F m i v (Maybe (Reference, Decl v Ann)) + typeInfo typeRef = case typeRef of + Reference.DerivedId id -> do + declm <- eval $ LoadType id + decl <- maybe (error $ "Missing type declaration " <> show typeRef) + pure + declm + pure $ Just (typeRef, decl) + Reference.Builtin{} -> pure Nothing + unhash = + Map.fromList . map reshuffle . Map.toList . Decl.unhashComponent + where reshuffle (r, (v, decl)) = (v, (r, decl)) + unhash . Map.fromList . catMaybes <$> traverse typeInfo (toList component) + verifyTermComponent + :: Map v (Reference, Term v _, a) + -> Edits v + -> F m i v (Maybe (Map v (Reference, Term v _, Type v _))) + verifyTermComponent componentMap Edits {..} = do + -- If the term contains references to old patterns, we can't update it. + -- If the term had a redunant type signature, it's discarded and a new type + -- is inferred. If it wasn't redunant, we have already substituted any updates + -- into it and we're going to check against that signature. + -- + -- Note: This only works if the type update is kind-preserving. + let + -- See if the constructor dependencies of any element of the cycle + -- contains one of the old types. + terms = Map.elems $ view _2 <$> componentMap + oldTypes = Map.keysSet typeEdits + if not . Set.null $ Set.intersection + (foldMap Term.constructorDependencies terms) + oldTypes + then pure Nothing + else do + let file = UnisonFileId + mempty + mempty + (Map.toList $ (\(_, tm, _) -> tm) <$> componentMap) + mempty + typecheckResult <- eval $ TypecheckFile file [] + pure + . fmap UF.hashTerms + $ runIdentity (Result.toMaybe typecheckResult) + >>= hush + +applyDeprecations :: Applicative m => Patch -> Branch0 m -> Branch0 m +applyDeprecations patch = deleteDeprecatedTerms deprecatedTerms + . deleteDeprecatedTypes deprecatedTypes + where + deprecatedTerms, deprecatedTypes :: Set Reference + deprecatedTerms = Set.fromList + [ r | (r, TermEdit.Deprecate) <- R.toList (Patch._termEdits patch) ] + deprecatedTypes = Set.fromList + [ r | (r, TypeEdit.Deprecate) <- R.toList (Patch._typeEdits patch) ] + deleteDeprecatedTerms, deleteDeprecatedTypes + :: Set Reference -> Branch0 m -> Branch0 m + deleteDeprecatedTerms rs = + over Branch.terms (Star3.deleteFact (Set.map Referent.Ref rs)) + deleteDeprecatedTypes rs = over Branch.types (Star3.deleteFact rs) + +-- | Things in the patch are not marked as propagated changes, but every other +-- definition that is created by the `Edits` which is passed in is marked as +-- a propagated change. +applyPropagate + :: Var v => Applicative m => Patch -> Edits v -> F m i v (Branch0 m -> Branch0 m) +applyPropagate patch Edits {..} = do + let termRefs = Map.mapMaybe TermEdit.toReference termEdits + typeRefs = Map.mapMaybe TypeEdit.toReference typeEdits + termTypes = Map.map (Type.toReference . snd) newTerms + -- recursively update names and delete deprecated definitions + pure $ Branch.stepEverywhere (updateLevel termRefs typeRefs termTypes) + where + updateLevel + :: Map Reference Reference + -> Map Reference Reference + -> Map Reference Reference + -> Branch0 m + -> Branch0 m + updateLevel termEdits typeEdits termTypes Branch0 {..} = + Branch.branch0 termsWithCons types _children _edits + where + isPropagated = (`Set.notMember` allPatchTargets) where + allPatchTargets = Patch.allReferenceTargets patch + + terms = foldl' replaceTerm _terms (Map.toList termEdits) + types = foldl' replaceType _types (Map.toList typeEdits) + + updateMetadata r r' (tp, v) = if v == r then (typeOf r' tp, r') else (tp, v) + where typeOf r t = fromMaybe t $ Map.lookup r termTypes + + propagatedMd :: r -> (r, Metadata.Type, Metadata.Value) + propagatedMd r = (r, IOSource.isPropagatedReference, IOSource.isPropagatedValue) + termsWithCons = + foldl' replaceConstructor terms (Map.toList constructorReplacements) + replaceTerm s (r, r') = + (if isPropagated r' + then Metadata.insert (propagatedMd (Referent.Ref r')) + else Metadata.delete (propagatedMd (Referent.Ref r'))) . + Star3.replaceFact (Referent.Ref r) (Referent.Ref r') $ + Star3.mapD3 (updateMetadata r r') s + + replaceConstructor s ((oldr, oldc, oldt), (newr, newc, newt)) = + -- always insert the metadata since patches can't contain ctor mappings (yet) + Metadata.insert (propagatedMd con') . + Star3.replaceFact (Referent.Con oldr oldc oldt) con' $ s + where + con' = Referent.Con newr newc newt + replaceType s (r, r') = + (if isPropagated r' then Metadata.insert (propagatedMd r') + else Metadata.delete (propagatedMd r')) . + Star3.replaceFact r r' $ s + + -- typePreservingTermEdits :: Patch -> Patch + -- typePreservingTermEdits Patch {..} = Patch termEdits mempty + -- where termEdits = R.filterRan TermEdit.isTypePreserving _termEdits + +-- (d, f) when d is "dirty" (needs update), +-- f is in the frontier (an edited dependency of d), +-- and d depends on f +-- a ⋖ b = a depends directly on b +-- dirty(d) ∧ frontier(f) <=> not(edited(d)) ∧ edited(f) ∧ d ⋖ f +-- +-- The range of this relation is the frontier, and the domain is +-- the set of dirty references. +computeFrontier + :: forall m + . Monad m + => (Reference -> m (Set Reference)) -- eg Codebase.dependents codebase + -> Patch + -> Names0 + -> m (R.Relation Reference Reference) +computeFrontier getDependents patch names = do + -- (r,r2) ∈ dependsOn if r depends on r2 + dependsOn <- foldM addDependents R.empty edited + -- Dirty is everything that `dependsOn` Frontier, minus already edited defns + pure $ R.filterDom (not . flip Set.member edited) dependsOn + where + edited :: Set Reference + edited = R.dom (Patch._termEdits patch) <> R.dom (Patch._typeEdits patch) + addDependents + :: R.Relation Reference Reference + -> Reference + -> m (R.Relation Reference Reference) + addDependents dependents ref = + (\ds -> R.insertManyDom ds ref dependents) + . Set.filter (Names.contains names) + <$> getDependents ref diff --git a/parser-typechecker/src/Unison/Codebase/Editor/RemoteRepo.hs b/parser-typechecker/src/Unison/Codebase/Editor/RemoteRepo.hs new file mode 100644 index 0000000000..9648b398f0 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/RemoteRepo.hs @@ -0,0 +1,32 @@ +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE OverloadedStrings #-} +module Unison.Codebase.Editor.RemoteRepo where + +import Unison.Prelude +import Unison.Util.Monoid as Monoid +import Data.Text as Text +import qualified Unison.Codebase.Path as Path +import Unison.Codebase.Path (Path) +import Unison.Codebase.ShortBranchHash (ShortBranchHash) +import qualified Unison.Codebase.ShortBranchHash as SBH + +data RemoteRepo = GitRepo { url :: Text, commit :: Maybe Text } + deriving (Eq, Ord, Show) + +printRepo :: RemoteRepo -> Text +printRepo GitRepo{..} = url <> Monoid.fromMaybe (Text.cons ':' <$> commit) + +printNamespace :: RemoteRepo -> Maybe ShortBranchHash -> Path -> Text +printNamespace repo sbh path = + printRepo repo <> case sbh of + Nothing -> if path == Path.empty then mempty + else ":." <> Path.toText path + Just sbh -> ":#" <> SBH.toText sbh <> + if path == Path.empty then mempty + else "." <> Path.toText path + +printHead :: RemoteRepo -> Path -> Text +printHead repo path = printNamespace repo Nothing path + +type RemoteNamespace = (RemoteRepo, Maybe ShortBranchHash, Path) +type RemoteHead = (RemoteRepo, Path) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/SearchResult'.hs b/parser-typechecker/src/Unison/Codebase/Editor/SearchResult'.hs new file mode 100644 index 0000000000..ea08604948 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/SearchResult'.hs @@ -0,0 +1,52 @@ +{-# LANGUAGE PatternSynonyms #-} + +module Unison.Codebase.Editor.SearchResult' where + +import Unison.Prelude + +import Unison.Referent (Referent) +import Unison.Reference (Reference) +import qualified Unison.HashQualified' as HQ' +import qualified Data.Set as Set +import qualified Unison.DataDeclaration as DD +import qualified Unison.Codebase.Editor.DisplayThing as DT +import qualified Unison.Type as Type +import Unison.DataDeclaration (Decl) +import Unison.Codebase.Editor.DisplayThing (DisplayThing) +import Unison.Type (Type) +import qualified Unison.LabeledDependency as LD +import Unison.LabeledDependency (LabeledDependency) + +data SearchResult' v a + = Tm' (TermResult' v a) + | Tp' (TypeResult' v a) + deriving (Eq, Show) +data TermResult' v a = + TermResult' HQ'.HashQualified (Maybe (Type v a)) Referent (Set HQ'.HashQualified) + deriving (Eq, Show) +data TypeResult' v a = + TypeResult' HQ'.HashQualified (DisplayThing (Decl v a)) Reference (Set HQ'.HashQualified) + deriving (Eq, Show) + +pattern Tm n t r as = Tm' (TermResult' n t r as) +pattern Tp n t r as = Tp' (TypeResult' n t r as) + +tmReferent :: SearchResult' v a -> Maybe Referent +tmReferent = \case; Tm _ _ r _ -> Just r; _ -> Nothing +tpReference :: SearchResult' v a -> Maybe Reference +tpReference = \case; Tp _ _ r _ -> Just r; _ -> Nothing + +foldResult' :: (TermResult' v a -> b) -> (TypeResult' v a -> b) -> SearchResult' v a -> b +foldResult' f g = \case + Tm' tm -> f tm + Tp' tp -> g tp + +-- todo: comment me out, is this actually useful, given what we saw in ShowDefinitionI? +-- namely, that it doesn't include the Term's deps, just the Decl's and the +-- result Term/Type names. +labeledDependencies :: Ord v => SearchResult' v a -> Set LabeledDependency +labeledDependencies = \case + Tm' (TermResult' _ t r _) -> + Set.insert (LD.referent r) $ maybe mempty (Set.map LD.typeRef . Type.dependencies) t + Tp' (TypeResult' _ d r _) -> + Set.map LD.typeRef . Set.insert r $ maybe mempty DD.declDependencies (DT.toMaybe d) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/SlurpComponent.hs b/parser-typechecker/src/Unison/Codebase/Editor/SlurpComponent.hs new file mode 100644 index 0000000000..ff772168a2 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/SlurpComponent.hs @@ -0,0 +1,87 @@ +{-# LANGUAGE PatternSynonyms #-} + +module Unison.Codebase.Editor.SlurpComponent where + +import Unison.Prelude + +import Data.Tuple (swap) +import Unison.Reference ( Reference ) +import Unison.UnisonFile (TypecheckedUnisonFile) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Unison.DataDeclaration as DD +import qualified Unison.Term as Term +import qualified Unison.UnisonFile as UF + +data SlurpComponent v = + SlurpComponent { types :: Set v, terms :: Set v } + deriving (Eq,Ord,Show) + +isEmpty :: SlurpComponent v -> Bool +isEmpty sc = Set.null (types sc) && Set.null (terms sc) + +empty :: Ord v => SlurpComponent v +empty = SlurpComponent mempty mempty + +difference :: Ord v => SlurpComponent v -> SlurpComponent v -> SlurpComponent v +difference c1 c2 = SlurpComponent types' terms' where + types' = types c1 `Set.difference` types c2 + terms' = terms c1 `Set.difference` terms c2 + +intersection :: Ord v => SlurpComponent v -> SlurpComponent v -> SlurpComponent v +intersection c1 c2 = SlurpComponent types' terms' where + types' = types c1 `Set.intersection` types c2 + terms' = terms c1 `Set.intersection` terms c2 + +instance Ord v => Semigroup (SlurpComponent v) where (<>) = mappend +instance Ord v => Monoid (SlurpComponent v) where + mempty = SlurpComponent mempty mempty + c1 `mappend` c2 = SlurpComponent (types c1 <> types c2) + (terms c1 <> terms c2) + + +-- I'm calling this `closeWithDependencies` because it doesn't just compute +-- the dependencies of the inputs, it mixes them together. Make sure this +-- is what you want. +closeWithDependencies :: forall v a. Ord v + => TypecheckedUnisonFile v a -> SlurpComponent v -> SlurpComponent v +closeWithDependencies uf inputs = seenDefns where + seenDefns = foldl' termDeps (SlurpComponent mempty seenTypes) (terms inputs) + seenTypes = foldl' typeDeps mempty (types inputs) + + termDeps :: SlurpComponent v -> v -> SlurpComponent v + termDeps seen v | Set.member v (terms seen) = seen + termDeps seen v = fromMaybe seen $ do + term <- findTerm v + let -- get the `v`s for the transitive dependency types + -- (the ones for terms are just the `freeVars below`) + -- although this isn't how you'd do it for a term that's already in codebase + tdeps :: [v] + tdeps = resolveTypes $ Term.dependencies term + seenTypes :: Set v + seenTypes = foldl' typeDeps (types seen) tdeps + seenTerms = Set.insert v (terms seen) + pure $ foldl' termDeps (seen { types = seenTypes + , terms = seenTerms}) + (Term.freeVars term) + + typeDeps :: Set v -> v -> Set v + typeDeps seen v | Set.member v seen = seen + typeDeps seen v = fromMaybe seen $ do + dd <- fmap snd (Map.lookup v (UF.dataDeclarations' uf)) <|> + fmap (DD.toDataDecl . snd) (Map.lookup v (UF.effectDeclarations' uf)) + pure $ foldl' typeDeps (Set.insert v seen) (resolveTypes $ DD.dependencies dd) + + resolveTypes :: Set Reference -> [v] + resolveTypes rs = [ v | r <- Set.toList rs, Just v <- [Map.lookup r typeNames]] + + findTerm :: v -> Maybe (Term.Term v a) + findTerm v = Map.lookup v allTerms + + allTerms = UF.allTerms uf + + typeNames :: Map Reference v + typeNames = invert (fst <$> UF.dataDeclarations' uf) <> invert (fst <$> UF.effectDeclarations' uf) + + invert :: forall k v . Ord k => Ord v => Map k v -> Map v k + invert m = Map.fromList (swap <$> Map.toList m) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/SlurpResult.hs b/parser-typechecker/src/Unison/Codebase/Editor/SlurpResult.hs new file mode 100644 index 0000000000..a65d80f183 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/SlurpResult.hs @@ -0,0 +1,391 @@ +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Codebase.Editor.SlurpResult where + +import Unison.Prelude + +import Unison.Codebase.Editor.SlurpComponent (SlurpComponent(..)) +import Unison.Name ( Name ) +import Unison.Parser ( Ann ) +import Unison.Var (Var) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Unison.Codebase.Editor.SlurpComponent as SC +import qualified Unison.DataDeclaration as DD +import qualified Unison.DeclPrinter as DeclPrinter +import qualified Unison.HashQualified as HQ +import qualified Unison.Name as Name +import qualified Unison.Names2 as Names +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.Referent as Referent +import qualified Unison.TypePrinter as TP +import qualified Unison.UnisonFile as UF +import qualified Unison.Util.Monoid as Monoid +import qualified Unison.Util.Pretty as P +import qualified Unison.Util.Relation as R +import qualified Unison.Var as Var + +-- `oldRefNames` are the previously existing names for the old reference +-- (these names will all be pointed to a new reference) +-- `newRefNames` are the previously existing names for the new reference +-- (the reference that all the old names will point to after the update) +data Aliases + = AddAliases (Set Name) + | UpdateAliases { oldRefNames :: Set Name + , newRefNames :: Set Name } + deriving (Show, Eq, Ord) + +data SlurpResult v = SlurpResult { + -- The file that we tried to add from + originalFile :: UF.TypecheckedUnisonFile v Ann + -- Extra definitions that were added to satisfy transitive closure, + -- beyond what the user specified. + , extraDefinitions :: SlurpComponent v + -- Previously existed only in the file; now added to the codebase. + , adds :: SlurpComponent v + -- Exists in the branch and the file, with the same name and contents. + , duplicates :: SlurpComponent v + -- Not added to codebase due to the name already existing + -- in the branch with a different definition. + , collisions :: SlurpComponent v + -- Not added to codebase due to the name existing + -- in the branch with a conflict (two or more definitions). + , conflicts :: SlurpComponent v + -- Names that already exist in the branch, but whose definitions + -- in `originalFile` are treated as updates. + , updates :: SlurpComponent v + -- Names of terms in `originalFile` that couldn't be updated because + -- they refer to existing constructors. (User should instead do a find/replace, + -- a constructor rename, or refactor the type that the name comes from). + , termExistingConstructorCollisions :: Set v + , constructorExistingTermCollisions :: Set v + -- -- Already defined in the branch, but with a different name. + , termAlias :: Map v Aliases + , typeAlias :: Map v Aliases + , defsWithBlockedDependencies :: SlurpComponent v + } deriving (Show) + +-- Returns the set of constructor names for type names in the given `Set`. +constructorsFor :: Var v => Set v -> UF.TypecheckedUnisonFile v Ann -> Set v +constructorsFor types uf = let + names = UF.typecheckedToNames0 uf + typesRefs = Set.unions $ Names.typesNamed names . Name.fromVar <$> toList types + ctorNames = R.filterRan isOkCtor (Names.terms names) + isOkCtor (Referent.Con r _ _) | Set.member r typesRefs = True + isOkCtor _ = False + in Set.map Name.toVar $ R.dom ctorNames + +-- Remove `removed` from the slurp result, and move any defns with transitive +-- dependencies on the removed component into `defsWithBlockedDependencies`. +-- Also removes `removed` from `extraDefinitions`. +subtractComponent :: forall v. Var v => SlurpComponent v -> SlurpResult v -> SlurpResult v +subtractComponent removed sr = + sr { adds = SC.difference (adds sr) (removed <> blocked) + , updates = SC.difference (updates sr) (removed <> blocked) + , defsWithBlockedDependencies = blocked + , extraDefinitions = SC.difference (extraDefinitions sr) blocked + } + where + -- for each v in adds, move to blocked if transitive dependency in removed + blocked = defsWithBlockedDependencies sr <> + SC.difference (blockedTerms <> blockedTypes) removed + + uf = originalFile sr + constructorsFor v = case UF.lookupDecl v uf of + Nothing -> mempty + Just (_, e) -> Set.fromList . DD.constructorVars $ either DD.toDataDecl id e + + blockedTypes = foldMap doType . SC.types $ adds sr <> updates sr where + -- include this type if it or any of its dependencies are removed + doType :: v -> SlurpComponent v + doType v = + if null (Set.intersection (SC.types removed) (SC.types (SC.closeWithDependencies uf vc))) + && null (Set.intersection (SC.terms removed) (constructorsFor v)) + then mempty else vc + where vc = mempty { types = Set.singleton v } + + blockedTerms = foldMap doTerm . SC.terms $ adds sr <> updates sr where + doTerm :: v -> SlurpComponent v + doTerm v = + if mempty == SC.intersection removed (SC.closeWithDependencies uf vc) + then mempty else vc + where vc = mempty { terms = Set.singleton v } + +-- Move `updates` to `collisions`, and move any dependents of those updates to `*WithBlockedDependencies`. +-- Subtract stuff from `extraDefinitions` that isn't in `adds` or `updates` +disallowUpdates :: forall v. Var v => SlurpResult v -> SlurpResult v +disallowUpdates sr = + let sr2 = subtractComponent (updates sr) sr + in sr2 { collisions = collisions sr2 <> updates sr } + +isNonempty :: Ord v => SlurpResult v -> Bool +isNonempty s = Monoid.nonEmpty (adds s) || Monoid.nonEmpty (updates s) + +data Status = + Add | Update | Duplicate | Collision | Conflicted | + TermExistingConstructorCollision | ConstructorExistingTermCollision | + ExtraDefinition | BlockedDependency + deriving (Ord,Eq,Show) + +isFailure :: Status -> Bool +isFailure s = case s of + TermExistingConstructorCollision -> True + ConstructorExistingTermCollision -> True + BlockedDependency -> True + Collision -> True + Conflicted -> True + _ -> False + +prettyStatus :: Status -> P.Pretty P.ColorText +prettyStatus s = case s of + Add -> "added" + Update -> "updated" + Collision -> "needs update" + Conflicted -> "conflicted" + Duplicate -> "duplicate" + TermExistingConstructorCollision -> "term/ctor collision" + ConstructorExistingTermCollision -> "ctor/term collision" + BlockedDependency -> "blocked" + ExtraDefinition -> "extra dependency" + +type IsPastTense = Bool + +prettyVar :: Var v => v -> P.Pretty P.ColorText +prettyVar = P.text . Var.name + +aliasesToShow :: Int +aliasesToShow = 5 + +pretty + :: forall v + . Var v + => IsPastTense + -> PPE.PrettyPrintEnv + -> SlurpResult v + -> P.Pretty P.ColorText +pretty isPast ppe sr = + let + tms = UF.hashTerms (originalFile sr) + goodIcon = P.green "⍟ " + badIcon = P.red "x " + plus = P.green " " + oxfordAliases shown sz end = + P.oxfordCommasWith end $ (P.shown <$> shown) ++ case sz of + 0 -> [] + n -> [P.shown n <> " more"] + okType v = (plus <>) $ case UF.lookupDecl v (originalFile sr) of + Just (_, dd) -> + P.syntaxToColor (DeclPrinter.prettyDeclHeader (HQ.unsafeFromVar v) dd) + <> if null aliases + then mempty + else P.newline <> P.indentN 2 (P.lines aliases) + where aliases = aliasesMessage . Map.lookup v $ typeAlias sr + Nothing -> P.bold (prettyVar v) <> P.red " (Unison bug, unknown type)" + + aliasesMessage aliases = case aliases of + Nothing -> [] + Just (AddAliases (splitAt aliasesToShow . toList -> (shown, rest))) -> + [ P.indentN 2 . P.wrap $ + P.hiBlack "(also named " <> oxfordAliases + shown + (length rest) + (P.hiBlack ")") + ] + Just (UpdateAliases oldNames newNames) -> + let oldMessage = + let (shown, rest) = splitAt aliasesToShow $ toList oldNames + sz = length oldNames + in P.indentN + 2 + ( P.wrap + $ P.hiBlack + ( "(The old definition " + <> (if isPast then "was" else "is") + <> " also named " + ) + <> oxfordAliases shown (length rest) (P.hiBlack ".") + <> P.hiBlack + (case (sz, isPast) of + (1, True ) -> "I updated this name too.)" + (1, False) -> "I'll update this name too.)" + (_, True ) -> "I updated these names too.)" + (_, False) -> "I'll update these names too.)" + ) + ) + newMessage = + let (shown, rest) = splitAt aliasesToShow $ toList newNames + sz = length rest + in P.indentN + 2 + ( P.wrap + $ P.hiBlack "(The new definition is already named " + <> oxfordAliases shown sz (P.hiBlack " as well.)") + ) + in (if null oldNames then mempty else [oldMessage]) + ++ (if null newNames then mempty else [newMessage]) + + -- The second field in the result is an optional second column. + okTerm :: v -> [(P.Pretty P.ColorText, Maybe (P.Pretty P.ColorText))] + okTerm v = case Map.lookup v tms of + Nothing -> + [(P.bold (prettyVar v), Just $ P.red "(Unison bug, unknown term)")] + Just (_, _, ty) -> + ( plus <> P.bold (prettyVar v) + , Just $ ": " <> P.indentNAfterNewline 2 (TP.pretty ppe ty) + ) + : ((, Nothing) <$> aliases) + where + aliases = fmap (P.indentN 2) . aliasesMessage . Map.lookup v $ termAlias sr + ok _ _ sc | SC.isEmpty sc = mempty + ok past present sc = + let header = goodIcon <> P.indentNAfterNewline + 2 + (P.wrap (if isPast then past else present)) + updatedTypes = P.lines $ okType <$> toList (SC.types sc) + updatedTerms = P.mayColumn2 . (=<<) okTerm . Set.toList $ SC.terms sc + in header <> "\n\n" <> P.linesNonEmpty [updatedTypes, updatedTerms] + okToUpdate = ok + (P.green "I've updated these names to your new definition:") + ( P.green + $ "These names already exist. You can `update` them " + <> "to your new definition:" + ) + okToAdd = ok (P.green "I've added these definitions:") + (P.green "These new definitions are ok to `add`:") + notOks _past _present sr | isOk sr = mempty + notOks past present sr = + let + header = badIcon <> P.indentNAfterNewline + 2 + (P.wrap (if isPast then past else present)) + typeLineFor status v = case UF.lookupDecl v (originalFile sr) of + Just (_, dd) -> + ( prettyStatus status + , P.syntaxToColor + $ DeclPrinter.prettyDeclHeader (HQ.unsafeFromVar v) dd + ) + Nothing -> + ( prettyStatus status + , prettyVar v <> P.red (P.wrap " (Unison bug, unknown type)") + ) + typeMsgs = + P.column2 + $ (typeLineFor Conflicted <$> toList (types (conflicts sr))) + ++ (typeLineFor Collision <$> toList (types (collisions sr))) + ++ ( typeLineFor BlockedDependency + <$> toList (types (defsWithBlockedDependencies sr)) + ) + termLineFor status v = case Map.lookup v tms of + Just (_ref, _tm, ty) -> + ( prettyStatus status + , P.bold (P.text $ Var.name v) + , ": " <> P.indentNAfterNewline 6 (TP.pretty ppe ty) + ) + Nothing -> (prettyStatus status, P.text (Var.name v), "") + termMsgs = + P.column3sep " " + $ (termLineFor Conflicted <$> toList (terms (conflicts sr))) + ++ (termLineFor Collision <$> toList (terms (collisions sr))) + ++ ( termLineFor TermExistingConstructorCollision + <$> toList (termExistingConstructorCollisions sr) + ) + ++ ( termLineFor ConstructorExistingTermCollision + <$> toList (constructorExistingTermCollisions sr) + ) + ++ ( termLineFor BlockedDependency + <$> toList (terms (defsWithBlockedDependencies sr)) + ) + in + header + <> "\n\n" + <> P.hiBlack " Reason" + <> "\n" + <> P.indentN 2 (P.linesNonEmpty [typeMsgs, termMsgs]) + <> "\n\n" + <> P.indentN + 2 + (P.column2 [("Tip:", "Use `help filestatus` to learn more.")]) + dups = Set.toList (SC.terms (duplicates sr) <> SC.types (duplicates sr)) + more i = + "... " + <> P.bold (P.shown i) + <> P.hiBlack " more." + <> "Try moving these below the `---` \"fold\" in your file." + in + P.sepNonEmpty + "\n\n" + [ if SC.isEmpty (duplicates sr) + then mempty + else + (if isPast + then "⊡ Ignored previously added definitions: " + else "⊡ Previously added definitions will be ignored: " + ) + <> P.indentNAfterNewline + 2 + (P.wrap $ P.excerptSep' (Just 7) + more + " " + (P.hiBlack . prettyVar <$> dups) + ) + , okToAdd (adds sr) + , okToUpdate (updates sr) + , notOks + (P.red "These definitions failed:") + (P.wrap $ P.red "These definitions would fail on `add` or `update`:") + sr + ] + +isOk :: Ord v => SlurpResult v -> Bool +isOk SlurpResult {..} = + SC.isEmpty collisions && + SC.isEmpty conflicts && + Set.null termExistingConstructorCollisions && + Set.null constructorExistingTermCollisions && + SC.isEmpty defsWithBlockedDependencies + +isAllDuplicates :: Ord v => SlurpResult v -> Bool +isAllDuplicates SlurpResult {..} = + SC.isEmpty adds && + SC.isEmpty updates && + SC.isEmpty extraDefinitions && + SC.isEmpty collisions && + SC.isEmpty conflicts && + Map.null typeAlias && + Map.null termAlias && + Set.null termExistingConstructorCollisions && + Set.null constructorExistingTermCollisions && + SC.isEmpty defsWithBlockedDependencies + +-- stack repl +-- +-- λ> import Unison.Util.Pretty +-- λ> import Unison.Codebase.Editor.SlurpResult +-- λ> putStrLn $ toANSI 80 ex +ex :: P.Pretty P.ColorText +ex = P.indentN 2 $ P.lines ["", + P.green "▣ I've added these definitions: ", "", + P.indentN 2 . P.column2 $ [("a", "Nat"), ("map", "(a -> b) -> [a] -> [b]")], + "", + P.green "▣ I've updated these definitions: ", "", + P.indentN 2 . P.column2 $ [("c", "Nat"), ("flatMap", "(a -> [b]) -> [a] -> [b]")], + "", + P.wrap $ P.red "x" <> P.bold "These definitions couldn't be added:", "", + P.indentN 2 $ + P.lines [ + P.column2 [(P.hiBlack + "Reason for failure Symbol ", P.hiBlack "Type"), + ("ctor/term collision foo ", "Nat"), + ("failed dependency zoot ", "[a] -> [a] -> [a]"), + ("term/ctor collision unique type Foo ", "f x")], + "", "Tip: use `help filestatus` to learn more." + ], + "", + "⊡ Ignoring previously added definitions: " <> + P.indentNAfterNewline 2 ( + P.hiBlack (P.wrap $ P.sep " " ["zonk", "anotherOne", "List.wrangle", "oatbag", "blarg", "mcgee", P.group "ability Woot"])), + "" + ] diff --git a/parser-typechecker/src/Unison/Codebase/Editor/TodoOutput.hs b/parser-typechecker/src/Unison/Codebase/Editor/TodoOutput.hs new file mode 100644 index 0000000000..f117523a94 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/TodoOutput.hs @@ -0,0 +1,63 @@ +{-# LANGUAGE RecordWildCards #-} +module Unison.Codebase.Editor.TodoOutput where + +import Unison.Prelude + +import qualified Unison.Names3 as Names +import qualified Unison.Type as Type +import qualified Unison.Util.Relation as R +import qualified Unison.Codebase.Patch as Patch +import qualified Data.Set as Set +import qualified Unison.DataDeclaration as DD +import Unison.Reference (Reference) +import Unison.Names3 (Names0) +import Unison.Codebase.Patch (Patch) +import Unison.Codebase.Editor.DisplayThing (DisplayThing(RegularThing)) +import Unison.Type (Type) +import Unison.DataDeclaration (Decl) +import qualified Unison.LabeledDependency as LD +import Unison.LabeledDependency (LabeledDependency) + +type Score = Int + +data TodoOutput v a = TodoOutput + { todoScore :: Score + , todoFrontier :: + ( [(Reference, Maybe (Type v a))] + , [(Reference, DisplayThing (Decl v a))]) + , todoFrontierDependents :: + ( [(Score, Reference, Maybe (Type v a))] + , [(Score, Reference, DisplayThing (Decl v a))]) + , nameConflicts :: Names0 + , editConflicts :: Patch + } deriving (Show) + +labeledDependencies :: Ord v => TodoOutput v a -> Set LabeledDependency +labeledDependencies TodoOutput{..} = Set.fromList ( + -- term refs + [LD.termRef r | (r, _) <- fst todoFrontier] <> + [LD.termRef r | (_, r, _) <- fst todoFrontierDependents] <> + [LD.typeRef r | (r, _) <- snd todoFrontier] <> + [LD.typeRef r | (_, r, _) <- snd todoFrontierDependents] <> + -- types of term refs + [LD.typeRef r | (_, Just t) <- fst todoFrontier + , r <- toList (Type.dependencies t)] <> + [LD.typeRef r | (_, _, Just t) <- fst todoFrontierDependents + , r <- toList (Type.dependencies t)] <> + -- and decls of type refs + [LD.typeRef r | (_, RegularThing d) <- snd todoFrontier + , r <- toList (DD.declDependencies d)] <> + [LD.typeRef r | (_, _, RegularThing d) <- snd todoFrontierDependents + , r <- toList (DD.declDependencies d)]) <> + -- name conflicts + Set.map LD.referent (R.ran (Names.terms0 nameConflicts)) <> + Set.map LD.typeRef (R.ran (Names.types0 nameConflicts)) <> + Patch.labeledDependencies editConflicts + +noConflicts :: TodoOutput v a -> Bool +noConflicts todo = + nameConflicts todo == mempty && editConflicts todo == Patch.empty + +noEdits :: TodoOutput v a -> Bool +noEdits todo = + todoScore todo == 0 diff --git a/parser-typechecker/src/Unison/Codebase/Editor/UriParser.hs b/parser-typechecker/src/Unison/Codebase/Editor/UriParser.hs new file mode 100644 index 0000000000..99f4c12642 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/UriParser.hs @@ -0,0 +1,166 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Codebase.Editor.UriParser (repoPath) where + +import qualified Text.Megaparsec as P +import qualified Text.Megaparsec.Char.Lexer as L +import qualified Text.Megaparsec.Char as C +import Data.Text as Text + +import Unison.Codebase.Path (Path(..)) +import qualified Unison.Codebase.Path as Path +import Unison.Codebase.Editor.RemoteRepo (RemoteRepo(..), RemoteNamespace) +import Unison.Codebase.ShortBranchHash (ShortBranchHash(..)) +import Unison.Prelude +import qualified Unison.Hash as Hash +import qualified Unison.Lexer +import Unison.NameSegment (NameSegment(..)) +import Data.Sequence as Seq +import Data.Char (isAlphaNum, isSpace, isDigit) + +type P = P.Parsec () Text + +-- Here are the git protocols that we know how to parse +-- Local Protocol +-- $ git clone /srv/git/project.git +-- $ git clone /srv/git/project.git[:treeish][:[#hash][.path]] +-- File Protocol +-- $ git clone file:///srv/git/project.git[:treeish][:[#hash][.path]] +-- Smart / Dumb HTTP protocol +-- $ git clone https://example.com/gitproject.git[:treeish][:[#hash][.path]] +-- SSH Protocol +-- $ git clone ssh://[user@]server/project.git[:treeish][:[#hash][.path]] +-- $ git clone [user@]server:project.git[:treeish][:[#hash][.path]] +-- Git Protocol (obsolete) +repoPath :: P RemoteNamespace +repoPath = P.label "generic git repo" $ do + protocol <- parseProtocol + treeish <- P.optional treeishSuffix + let repo = GitRepo (printProtocol protocol) treeish + nshashPath <- P.optional (C.char ':' *> namespaceHashPath) + case nshashPath of + Nothing -> pure (repo, Nothing, Path.empty) + Just (sbh, p) -> pure (repo, sbh, p) + +-- does this not exist somewhere in megaparsec? yes in 7.0 +symbol :: Text -> P Text +symbol = L.symbol (pure ()) + +data GitProtocol + = HttpsProtocol (Maybe User) HostInfo UrlPath + | SshProtocol (Maybe User) HostInfo UrlPath + | ScpProtocol (Maybe User) Host UrlPath + | FileProtocol UrlPath + | LocalProtocol UrlPath + deriving (Eq, Ord, Show) + +printProtocol :: GitProtocol -> Text +--printProtocol x | traceShow x False = undefined +printProtocol x = case x of + HttpsProtocol muser hostInfo path -> "https://" + <> printUser muser + <> printHostInfo hostInfo + <> path + SshProtocol muser hostInfo path -> "ssh://" + <> printUser muser + <> printHostInfo hostInfo + <> path + ScpProtocol muser host path -> printUser muser <> host <> ":" <> path + FileProtocol path -> "file://" <> path + LocalProtocol path -> path + where + printUser = maybe mempty (\(User u) -> u <> "@") + printHostInfo :: HostInfo -> Text + printHostInfo (HostInfo hostname mport) = + hostname <> maybe mempty (Text.cons ':') mport + +data Scheme = Ssh | Https + deriving (Eq, Ord, Show) + +data User = User Text + deriving (Eq, Ord, Show) + +type UrlPath = Text + +data HostInfo = HostInfo Text (Maybe Text) + deriving (Eq, Ord, Show) + +type Host = Text -- no port + +-- doesn't yet handle basic authentication like https://user:pass@server.com +-- (does anyone even want that?) +-- or handle ipv6 addresses (https://en.wikipedia.org/wiki/IPv6#Addressing) +parseProtocol :: P GitProtocol +parseProtocol = P.label "parseProtocol" $ + fileRepo <|> httpsRepo <|> sshRepo <|> scpRepo <|> localRepo + where + localRepo, fileRepo, httpsRepo, sshRepo, scpRepo :: P GitProtocol + parsePath = + P.takeWhile1P (Just "repo path character") + (\c -> not (isSpace c || c == ':')) + localRepo = LocalProtocol <$> parsePath + fileRepo = P.label "fileRepo" $ do + void $ symbol "file://" + FileProtocol <$> parsePath + httpsRepo = P.label "httpsRepo" $ do + void $ symbol "https://" + HttpsProtocol <$> P.optional userInfo <*> parseHostInfo <*> parsePath + sshRepo = P.label "sshRepo" $ do + void $ symbol "ssh://" + SshProtocol <$> P.optional userInfo <*> parseHostInfo <*> parsePath + scpRepo = P.label "scpRepo" . P.try $ + ScpProtocol <$> P.optional userInfo <*> parseHost <* symbol ":" <*> parsePath + userInfo :: P User + userInfo = P.label "userInfo" . P.try $ do + username <- P.takeWhile1P (Just "username character") (/= '@') + void $ C.char '@' + pure $ User username + parseHostInfo :: P HostInfo + parseHostInfo = P.label "parseHostInfo" $ + HostInfo <$> parseHost <*> (P.optional $ do + void $ symbol ":" + P.takeWhile1P (Just "digits") isDigit) + + parseHost = P.label "parseHost" $ hostname <|> ipv4 -- <|> ipv6 + where + hostname = + P.takeWhile1P (Just "hostname character") + (\c -> isAlphaNum c || c == '.' || c == '-') + ipv4 = P.label "ipv4 address" $ do + o1 <- decOctet + void $ C.char '.' + o2 <- decOctet + void $ C.char '.' + o3 <- decOctet + void $ C.char '.' + o4 <- decOctet + pure $ Text.pack $ o1 <> "." <> o2 <> "." <> o3 <> "." <> o4 + decOctet = P.count' 1 3 C.digitChar + +-- #nshashabc.path.foo.bar or .path.foo.bar +namespaceHashPath :: P (Maybe ShortBranchHash, Path) +namespaceHashPath = do + sbh <- P.optional shortBranchHash + p <- P.optional $ do + void $ C.char '.' + P.sepBy1 + ((:) <$> C.satisfy Unison.Lexer.wordyIdStartChar + <*> P.many (C.satisfy Unison.Lexer.wordyIdChar)) + (C.char '.') + case p of + Nothing -> pure (sbh, Path.empty) + Just p -> pure (sbh, makePath p) + where makePath = Path . Seq.fromList . fmap (NameSegment . Text.pack) + +treeishSuffix :: P Text +treeishSuffix = P.label "git treeish" . P.try $ do + void $ C.char ':' + notdothash <- C.noneOf @[] ".#:" + rest <- P.takeWhileP (Just "not colon") (/= ':') + pure $ Text.cons notdothash rest + +shortBranchHash :: P ShortBranchHash +shortBranchHash = P.label "short branch hash" $ do + void $ C.char '#' + ShortBranchHash <$> + P.takeWhile1P (Just "base32hex chars") (`elem` Hash.validBase32HexChars) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/VersionParser.hs b/parser-typechecker/src/Unison/Codebase/Editor/VersionParser.hs new file mode 100644 index 0000000000..3e638cda85 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Editor/VersionParser.hs @@ -0,0 +1,27 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Codebase.Editor.VersionParser where + +import Text.Megaparsec +import Unison.Codebase.Editor.RemoteRepo +import Text.Megaparsec.Char +import Data.Functor (($>)) +import Data.Text (Text) +import qualified Data.Text as Text +import qualified Unison.Codebase.Path as Path +import Data.Void (Void) + +-- |"release/M1j.2" -> "releases._M1j" +-- "devel/*" -> "trunk" +defaultBaseLib :: Parsec Void Text RemoteNamespace +defaultBaseLib = fmap makeNS $ devel <|> release + where + devel, release, version :: Parsec Void Text Text + devel = "devel/" *> many anyChar *> eof $> "trunk" + release = fmap ("releases._" <>) $ "release/" *> version <* eof + version = fmap Text.pack $ + try (someTill anyChar "." <* many anyChar) <|> many anyChar + makeNS :: Text -> RemoteNamespace + makeNS t = ( GitRepo "https://github.com/unisonweb/base" Nothing + , Nothing + , Path.fromText t) diff --git a/parser-typechecker/src/Unison/Codebase/Execute.hs b/parser-typechecker/src/Unison/Codebase/Execute.hs new file mode 100644 index 0000000000..97507be75b --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Execute.hs @@ -0,0 +1,56 @@ +{-# LANGUAGE GADTs #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE PartialTypeSignatures #-} +{-# LANGUAGE OverloadedStrings #-} + +-- | Execute a computation of type '{IO} () that has been previously added to +-- the codebase, without setting up an interactive environment. +-- +-- This allows one to run standalone applications implemented in the Unison +-- language. + +module Unison.Codebase.Execute where + +import Unison.Prelude + +import Unison.Codebase.MainTerm ( getMainTerm ) +import qualified Unison.Codebase.MainTerm as MainTerm +import qualified Unison.Codebase as Codebase +import Unison.Parser ( Ann ) +import qualified Unison.Codebase.Runtime as Runtime +import Unison.Codebase.Runtime ( Runtime ) +import Unison.Var ( Var ) +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.Names3 as Names3 +import qualified Unison.Codebase.Branch as Branch +import System.Exit (die) +import Control.Exception (finally) + +execute + :: Var v + => Codebase.Codebase IO v Ann + -> Runtime v + -> String + -> IO () +execute codebase runtime mainName = + (`finally` Runtime.terminate runtime) $ do + root <- Codebase.getRootBranch codebase >>= \case + Right r -> pure r + Left Codebase.NoRootBranch -> + die ("Couldn't identify a root namespace.") + Left (Codebase.CouldntLoadRootBranch h) -> + die ("Couldn't load root branch " ++ show h) + Left (Codebase.CouldntParseRootBranch h) -> + die ("Couldn't parse root branch head " ++ show h) + let parseNames0 = Names3.makeAbsolute0 (Branch.toNames0 (Branch.head root)) + loadTypeOfTerm = Codebase.getTypeOfTerm codebase + let mainType = Runtime.mainType runtime + mt <- getMainTerm loadTypeOfTerm parseNames0 mainName mainType + case mt of + MainTerm.NotAFunctionName s -> die ("Not a function name: " ++ s) + MainTerm.NotFound s -> die ("Not found: " ++ s) + MainTerm.BadType s -> die (s ++ " is not of type '{IO} ()") + MainTerm.Success _ tm _ -> do + let codeLookup = Codebase.toCodeLookup codebase + ppe = PPE.PrettyPrintEnv (const Nothing) (const Nothing) + void $ Runtime.evaluateTerm codeLookup ppe runtime tm diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs new file mode 100644 index 0000000000..55b4558deb --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs @@ -0,0 +1,282 @@ +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.FileCodebase +( getRootBranch -- used by Git module +, branchHashesByPrefix -- used by Git module +, branchFromFiles -- used by Git module +, codebase1 -- used by Main +, codebase1' -- used by Test/Git +, codebaseExists -- used by Main +, initCodebaseAndExit +, initCodebase +, getCodebaseOrExit +, getCodebaseDir +) where + +import Unison.Prelude + +import UnliftIO ( MonadUnliftIO ) +import UnliftIO.Exception ( catchIO ) +import UnliftIO.Concurrent ( forkIO + , killThread + ) +import UnliftIO.STM ( atomically ) +import qualified Data.Set as Set +import qualified Data.Text as Text +import qualified Data.Text.IO as TextIO +import UnliftIO.Directory ( createDirectoryIfMissing + , doesDirectoryExist + ) +import System.FilePath ( takeFileName + ) +import System.Directory ( getHomeDirectory + , canonicalizePath + ) +import System.Environment ( getProgName ) +import System.Exit ( exitFailure, exitSuccess ) +import qualified Unison.Codebase as Codebase +import Unison.Codebase ( Codebase(Codebase) + , BuiltinAnnotation + , CodebasePath + ) +import Unison.Codebase.Branch ( Branch ) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Reflog as Reflog +import qualified Unison.Codebase.Serialization as S +import qualified Unison.Codebase.Serialization.V1 + as V1 +import qualified Unison.Codebase.Watch as Watch +import Unison.Parser (Ann() ) +import Unison.Reference ( Reference ) +import qualified Unison.Reference as Reference +import qualified Unison.Referent as Referent +import qualified Unison.Util.TQueue as TQueue +import Unison.Var ( Var ) +import qualified Unison.UnisonFile as UF +import qualified Unison.Util.Cache as Cache +import qualified Unison.Util.Pretty as P +import qualified Unison.PrettyTerminal as PT +import Unison.Symbol ( Symbol ) +import qualified Unison.Codebase.FileCodebase.Common as Common +import Unison.Codebase.FileCodebase.Common + ( Err(CantParseBranchHead) + , codebaseExists + --- + , branchHeadDir + , dependentsDir + , reflogPath + , typeIndexDir + , typeMentionsIndexDir + , watchesDir + --- + , componentIdFromString + , hashFromFilePath + , referentIdFromString + , decodeFileName + , formatAnn + , getRootBranch + , getDecl + , getTerm + , getTypeOfTerm + , getWatch + , putDecl + , putTerm + , putRootBranch + , putWatch + --- + , branchFromFiles + , branchHashesByPrefix + , termReferencesByPrefix + , termReferentsByPrefix + , typeReferencesByPrefix + --- + , failWith + , listDirectory + ) + +import qualified Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex as Sync + +initCodebaseAndExit :: Maybe FilePath -> IO () +initCodebaseAndExit mdir = do + dir <- getCodebaseDir mdir + cache <- Cache.cache + _ <- initCodebase cache dir + exitSuccess + +-- initializes a new codebase here (i.e. `ucm -codebase dir init`) +initCodebase :: Branch.Cache IO -> FilePath -> IO (Codebase IO Symbol Ann) +initCodebase cache path = do + theCodebase <- codebase1 cache V1.formatSymbol Common.formatAnn path + prettyDir <- P.string <$> canonicalizePath path + + whenM (codebaseExists path) $ + do PT.putPrettyLn' + . P.wrap + $ "It looks like there's already a codebase in: " + <> prettyDir + exitFailure + + PT.putPrettyLn' + . P.wrap + $ "Initializing a new codebase in: " + <> prettyDir + Codebase.initializeCodebase theCodebase + pure theCodebase + +-- get the codebase in dir, or in the home directory if not provided. +getCodebaseOrExit :: Branch.Cache IO -> Maybe FilePath -> IO (Codebase IO Symbol Ann) +getCodebaseOrExit cache mdir = do + dir <- getCodebaseDir mdir + progName <- getProgName + prettyDir <- P.string <$> canonicalizePath dir + let errMsg = getNoCodebaseErrorMsg ((P.text . Text.pack) progName) prettyDir mdir + let theCodebase = codebase1 cache V1.formatSymbol formatAnn dir + unlessM (codebaseExists dir) $ do + PT.putPrettyLn' errMsg + exitFailure + theCodebase + +getNoCodebaseErrorMsg :: IsString s => P.Pretty s -> P.Pretty s -> Maybe FilePath -> P.Pretty s +getNoCodebaseErrorMsg executable prettyDir mdir = + let secondLine = + case mdir of + Just dir -> "Run `" <> executable <> " -codebase " <> fromString dir + <> " init` to create one, then try again!" + Nothing -> "Run `" <> executable <> " init` to create one there," + <> " then try again;" + <> " or `" <> executable <> " -codebase ` to load a codebase from someplace else!" + in + P.lines + [ "No codebase exists in " <> prettyDir <> "." + , secondLine ] + +getCodebaseDir :: Maybe FilePath -> IO FilePath +getCodebaseDir = maybe getHomeDirectory pure + +-- builds a `Codebase IO v a`, given serializers for `v` and `a` +codebase1 + :: forall m v a + . MonadUnliftIO m + => Var v + => BuiltinAnnotation a + => Branch.Cache m -> S.Format v -> S.Format a -> CodebasePath -> m (Codebase m v a) +codebase1 = codebase1' Sync.syncToDirectory + +codebase1' + :: forall m v a + . MonadUnliftIO m + => Var v + => BuiltinAnnotation a + => Common.SyncToDir m v a -> Branch.Cache m -> S.Format v -> S.Format a -> CodebasePath -> m (Codebase m v a) +codebase1' syncToDirectory branchCache fmtV@(S.Format getV putV) fmtA@(S.Format getA putA) path = do + termCache <- Cache.semispaceCache 8192 + typeOfTermCache <- Cache.semispaceCache 8192 + declCache <- Cache.semispaceCache 1024 + let c = + Codebase + (Cache.applyDefined termCache $ getTerm getV getA path) + (Cache.applyDefined typeOfTermCache $ getTypeOfTerm getV getA path) + (Cache.applyDefined declCache $ getDecl getV getA path) + (putTerm putV putA path) + (putDecl putV putA path) + (getRootBranch branchCache path) + (putRootBranch path) + (branchHeadUpdates path) + (branchFromFiles branchCache path) + dependents + (flip (syncToDirectory fmtV fmtA) path) + (syncToDirectory fmtV fmtA path) + watches + (getWatch getV getA path) + (putWatch putV putA path) + getReflog + appendReflog + getTermsOfType + getTermsMentioningType + -- todo: maintain a trie of references to come up with this number + (pure 10) + -- The same trie can be used to make this lookup fast: + (termReferencesByPrefix path) + (typeReferencesByPrefix path) + (termReferentsByPrefix (getDecl getV getA) path) + (pure 10) + (branchHashesByPrefix path) + in pure c + where + dependents :: Reference -> m (Set Reference.Id) + dependents r = listDirAsIds (dependentsDir path r) + getTermsOfType :: Reference -> m (Set Referent.Id) + getTermsOfType r = listDirAsReferents (typeIndexDir path r) + getTermsMentioningType :: Reference -> m (Set Referent.Id) + getTermsMentioningType r = listDirAsReferents (typeMentionsIndexDir path r) + -- todo: revisit these + listDirAsIds :: FilePath -> m (Set Reference.Id) + listDirAsIds d = do + e <- doesDirectoryExist d + if e + then do + ls <- fmap decodeFileName <$> listDirectory d + pure . Set.fromList $ ls >>= (toList . componentIdFromString) + else pure Set.empty + listDirAsReferents :: FilePath -> m (Set Referent.Id) + listDirAsReferents d = do + e <- doesDirectoryExist d + if e + then do + ls <- fmap decodeFileName <$> listDirectory d + pure . Set.fromList $ ls >>= (toList . referentIdFromString) + else pure Set.empty + watches :: UF.WatchKind -> m [Reference.Id] + watches k = + liftIO $ do + let wp = watchesDir path (Text.pack k) + createDirectoryIfMissing True wp + ls <- listDirectory wp + pure $ ls >>= (toList . componentIdFromString . takeFileName) + getReflog :: m [Reflog.Entry] + getReflog = + liftIO + (do contents <- TextIO.readFile (reflogPath path) + let lines = Text.lines contents + let entries = parseEntry <$> lines + pure entries) `catchIO` + const (pure []) + where + parseEntry t = fromMaybe (err t) (Reflog.fromText t) + err t = error $ + "I couldn't understand this line in " ++ reflogPath path ++ "\n\n" ++ + Text.unpack t + appendReflog :: Text -> Branch m -> Branch m -> m () + appendReflog reason old new = + let + t = Reflog.toText $ + Reflog.Entry (Branch.headHash old) (Branch.headHash new) reason + in liftIO $ TextIO.appendFile (reflogPath path) (t <> "\n") + +-- watches in `branchHeadDir root` for externally deposited heads; +-- parse them, and return them +branchHeadUpdates + :: MonadUnliftIO m => CodebasePath -> m (m (), m (Set Branch.Hash)) +branchHeadUpdates root = do + branchHeadChanges <- TQueue.newIO + (cancelWatch, watcher) <- Watch.watchDirectory' (branchHeadDir root) +-- -- add .ubf file changes to intermediate queue + watcher1 <- + forkIO + $ forever + $ do + -- Q: what does watcher return on a file deletion? + -- A: nothing + (filePath, _) <- watcher + case hashFromFilePath filePath of + Nothing -> failWith $ CantParseBranchHead filePath + Just h -> + atomically . TQueue.enqueue branchHeadChanges $ Branch.Hash h + -- smooth out intermediate queue + pure + ( cancelWatch >> killThread watcher1 + , Set.fromList <$> Watch.collectUntilPause branchHeadChanges 400000 + ) diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs new file mode 100644 index 0000000000..426047a2e5 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs @@ -0,0 +1,590 @@ +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.FileCodebase.Common + ( Err(..) + , SyncToDir + , SimpleLens + , codebaseExists + , codebasePath + , hashExists + -- dirs (parent of all the files) + , branchHeadDir + , dependentsDir + , dependentsDir' + , typeIndexDir + , typeIndexDir' + , typeMentionsIndexDir + , typeMentionsIndexDir' + , watchesDir + -- paths (looking up one file) + , branchPath + , declPath + , editsPath + , reflogPath + , termPath + , typePath + , watchPath + -- core stuff + , formatAnn + , getDecl + , putDecl + , putRootBranch + , getTerm + , getTypeOfTerm + , putTerm + , getWatch + , putWatch + , updateCausalHead + , serializeEdits + , deserializeEdits + , serializeRawBranch + , branchFromFiles + , branchHashesByPrefix + , termReferencesByPrefix + , termReferentsByPrefix + , typeReferencesByPrefix + -- stringing + , hashFromFilePath + , componentIdFromString + , componentIdToString + , referentIdFromString + -- touching files + , touchIdFile + , touchReferentFile + , touchReferentIdFile + -- util + , copyFileWithParents + , doFileOnce + , failWith + , listDirectory + -- expose for tests :| + , encodeFileName + , decodeFileName + , getRootBranch + + ) where + +import Unison.Prelude + +import Control.Error (runExceptT, ExceptT(..)) +import Control.Lens (Lens, use, to, (%=)) +import Control.Monad.Catch (catch) +import Control.Monad.State (MonadState) +import qualified Data.ByteString.Base16 as ByteString (decodeBase16, encodeBase16) +import qualified Data.Char as Char +import Data.List ( isPrefixOf ) +import qualified Data.Set as Set +import qualified Data.Text as Text +import UnliftIO.Directory ( createDirectoryIfMissing + , doesFileExist + , removeFile + , doesDirectoryExist, copyFile + ) +import UnliftIO.IO.File (writeBinaryFile) +import qualified System.Directory +import System.FilePath ( takeBaseName + , takeDirectory + , () + ) +import qualified Unison.Codebase as Codebase +import Unison.Codebase (CodebasePath) +import Unison.Codebase.Causal ( Causal + , RawHash(..) + ) +import qualified Unison.Codebase.Causal as Causal +import Unison.Codebase.Branch ( Branch ) +import qualified Unison.Codebase.Branch as Branch +import Unison.Codebase.ShortBranchHash (ShortBranchHash(..)) +import qualified Unison.Codebase.ShortBranchHash as SBH +import qualified Unison.Codebase.Serialization as S +import qualified Unison.Codebase.Serialization.V1 as V1 +import Unison.Codebase.SyncMode ( SyncMode ) +import Unison.Codebase.Patch ( Patch(..) ) +import qualified Unison.ConstructorType as CT +import qualified Unison.DataDeclaration as DD +import qualified Unison.Hash as Hash +import Unison.Parser ( Ann(External) ) +import Unison.Reference ( Reference ) +import qualified Unison.Reference as Reference +import Unison.Referent ( Referent ) +import qualified Unison.Referent as Referent +import Unison.ShortHash (ShortHash) +import qualified Unison.ShortHash as SH +import Unison.Term ( Term ) +import qualified Unison.Term as Term +import Unison.Type ( Type ) +import qualified Unison.Type as Type +import Unison.Var ( Var ) +import qualified Unison.UnisonFile as UF +import Unison.Util.Monoid (foldMapM) +import Unison.Util.Timing (time) +import Data.Either.Extra (maybeToEither) + +data Err + = InvalidBranchFile FilePath String + | InvalidEditsFile FilePath String + | NoBranchHead FilePath + | CantParseBranchHead FilePath + | AmbiguouslyTypeAndTerm Reference.Id + | UnknownTypeOrTerm Reference + deriving Show + +type SimpleLens s a = Lens s s a a + +codebasePath :: FilePath +codebasePath = ".unison" "v1" + +formatAnn :: S.Format Ann +formatAnn = S.Format (pure External) (\_ -> pure ()) + +-- Write Branch and its dependents to the dest codebase, and set it as the root. +type SyncToDir m v a + = S.Format v + -> S.Format a + -> CodebasePath -- src codebase + -> CodebasePath -- dest codebase + -> SyncMode + -> Branch m -- new dest root branch + -> m () + +termsDir, typesDir, branchesDir, branchHeadDir, editsDir + :: CodebasePath -> FilePath +termsDir root = root codebasePath "terms" +typesDir root = root codebasePath "types" +branchesDir root = root codebasePath "paths" +branchHeadDir root = branchesDir root "_head" +editsDir root = root codebasePath "patches" + +termDir, declDir :: CodebasePath -> Reference.Id -> FilePath +termDir root r = termsDir root componentIdToString r +declDir root r = typesDir root componentIdToString r + +referenceToDir :: Reference -> FilePath +referenceToDir r = case r of + Reference.Builtin name -> "_builtin" encodeFileName (Text.unpack name) + Reference.DerivedId hash -> componentIdToString hash + +dependentsDir', typeIndexDir', typeMentionsIndexDir' :: FilePath -> FilePath + +dependentsDir :: CodebasePath -> Reference -> FilePath +dependentsDir root r = dependentsDir' root referenceToDir r +dependentsDir' root = root codebasePath "dependents" + +watchesDir :: CodebasePath -> Text -> FilePath +watchesDir root UF.RegularWatch = + root codebasePath "watches" "_cache" +watchesDir root kind = + root codebasePath "watches" encodeFileName (Text.unpack kind) +watchPath :: CodebasePath -> UF.WatchKind -> Reference.Id -> FilePath +watchPath root kind id = + watchesDir root (Text.pack kind) componentIdToString id <> ".ub" + +typeIndexDir :: CodebasePath -> Reference -> FilePath +typeIndexDir root r = typeIndexDir' root referenceToDir r +typeIndexDir' root = root codebasePath "type-index" + +typeMentionsIndexDir :: CodebasePath -> Reference -> FilePath +typeMentionsIndexDir root r = typeMentionsIndexDir' root referenceToDir r +typeMentionsIndexDir' root = root codebasePath "type-mentions-index" + +decodeFileName :: FilePath -> String +decodeFileName = let + go ('$':tl) = case span (/= '$') tl of + ("forward-slash", _:tl) -> '/' : go tl + ("back-slash", _:tl) -> '\\' : go tl + ("colon", _:tl) -> ':' : go tl + ("star", _:tl) -> '*' : go tl + ("question-mark", _:tl) -> '?' : go tl + ("double-quote", _:tl) -> '\"' : go tl + ("less-than", _:tl) -> '<' : go tl + ("greater-than", _:tl) -> '>' : go tl + ("pipe", _:tl) -> '|' : go tl + ('x':hex, _:tl) -> decodeHex hex ++ go tl + ("",_:tl) -> '$' : go tl + (s,_:tl) -> '$' : s ++ '$' : go tl -- unknown escapes left unchanged + (s,[]) -> s + go (hd:tl) = hd : go tl + go [] = [] + decodeHex :: String -> String + decodeHex s = either (const s) (Text.unpack . decodeUtf8) + . ByteString.decodeBase16 . encodeUtf8 . Text.pack $ s + in \case + "$dot$" -> "." + "$dotdot$" -> ".." + t -> go t + +-- https://superuser.com/questions/358855/what-characters-are-safe-in-cross-platform-file-names-for-linux-windows-and-os +encodeFileName :: String -> FilePath +encodeFileName = let + go ('/' : rem) = "$forward-slash$" <> go rem + go ('\\' : rem) = "$back-slash$" <> go rem + go (':' : rem) = "$colon$" <> go rem + go ('*' : rem) = "$star$" <> go rem + go ('?' : rem) = "$question-mark$" <> go rem + go ('"' : rem) = "$double-quote$" <> go rem + go ('<' : rem) = "$less-than$" <> go rem + go ('>' : rem) = "$greater-than$" <> go rem + go ('|' : rem) = "$pipe$" <> go rem + go ('$' : rem) = "$$" <> go rem + go (c : rem) | not (Char.isPrint c && Char.isAscii c) + = "$x" <> encodeHex [c] <> "$" <> go rem + | otherwise = c : go rem + go [] = [] + encodeHex :: String -> String + encodeHex = Text.unpack . Text.toUpper . ByteString.encodeBase16 . + encodeUtf8 . Text.pack + in \case + "." -> "$dot$" + ".." -> "$dotdot$" + t -> go t + +termPath, typePath, declPath :: CodebasePath -> Reference.Id -> FilePath +termPath path r = termDir path r "compiled.ub" +typePath path r = termDir path r "type.ub" +declPath path r = declDir path r "compiled.ub" + +branchPath :: CodebasePath -> Branch.Hash -> FilePath +branchPath root (RawHash h) = branchesDir root hashToString h ++ ".ub" + +editsPath :: CodebasePath -> Branch.EditHash -> FilePath +editsPath root h = editsDir root hashToString h ++ ".up" + +reflogPath :: CodebasePath -> FilePath +reflogPath root = root codebasePath "reflog" + +touchIdFile :: MonadIO m => Reference.Id -> FilePath -> m () +touchIdFile id fp = + touchFile (fp encodeFileName (componentIdToString id)) + +touchReferentFile :: MonadIO m => Referent -> FilePath -> m () +touchReferentFile id fp = + touchFile (fp encodeFileName (referentToString id)) + +touchReferentIdFile :: MonadIO m => Referent.Id -> FilePath -> m () +touchReferentIdFile = touchReferentFile . Referent.fromId + +touchFile :: MonadIO m => FilePath -> m () +touchFile fp = do + createDirectoryIfMissing True (takeDirectory fp) + writeBinaryFile fp mempty + +-- checks if `path` looks like a unison codebase +minimalCodebaseStructure :: CodebasePath -> [FilePath] +minimalCodebaseStructure root = [ branchHeadDir root ] + +-- checks if a minimal codebase structure exists at `path` +codebaseExists :: MonadIO m => CodebasePath -> m Bool +codebaseExists root = + and <$> traverse doesDirectoryExist (minimalCodebaseStructure root) + +-- | load a branch w/ children from a FileCodebase +branchFromFiles :: MonadIO m => Branch.Cache m -> CodebasePath -> Branch.Hash -> m (Maybe (Branch m)) +branchFromFiles cache rootDir h = time "FileCodebase.Common.branchFromFiles" $ do + fileExists <- doesFileExist (branchPath rootDir h) + if fileExists then Just <$> + Branch.cachedRead + cache + (deserializeRawBranch rootDir) + (deserializeEdits rootDir) + h + else + pure Nothing + where + deserializeRawBranch + :: MonadIO m => CodebasePath -> Causal.Deserialize m Branch.Raw Branch.Raw + deserializeRawBranch root h = do + let ubf = branchPath root h + S.getFromFile' (V1.getCausal0 V1.getRawBranch) ubf >>= \case + Left err -> failWith $ InvalidBranchFile ubf err + Right c0 -> pure c0 + +deserializeEdits :: MonadIO m => CodebasePath -> Branch.EditHash -> m Patch +deserializeEdits root h = + let file = editsPath root h + in S.getFromFile' V1.getEdits file >>= \case + Left err -> failWith $ InvalidEditsFile file err + Right edits -> pure edits + +getRootBranch :: forall m. + MonadIO m => Branch.Cache m -> CodebasePath -> m (Either Codebase.GetRootBranchError (Branch m)) +getRootBranch cache root = time "FileCodebase.Common.getRootBranch" $ + ifM (codebaseExists root) + (listDirectory (branchHeadDir root) >>= filesToBranch) + (pure $ Left Codebase.NoRootBranch) + where + filesToBranch :: [FilePath] -> m (Either Codebase.GetRootBranchError (Branch m)) + filesToBranch = \case + [] -> pure $ Left Codebase.NoRootBranch + [single] -> runExceptT $ fileToBranch single + conflict -> runExceptT (traverse fileToBranch conflict) >>= \case + Right (x : xs) -> Right <$> foldM Branch.merge x xs + Right _ -> error "FileCodebase.getRootBranch.conflict can't be empty." + Left e -> Left <$> pure e + + fileToBranch :: String -> ExceptT Codebase.GetRootBranchError m (Branch m) + fileToBranch single = ExceptT $ case hashFromString single of + Nothing -> pure . Left $ Codebase.CouldntParseRootBranch single + Just (Branch.Hash -> h) -> branchFromFiles cache root h <&> + maybeToEither (Codebase.CouldntLoadRootBranch h) + +-- |only syncs branches and edits -- no dependencies +putRootBranch :: MonadIO m => CodebasePath -> Branch m -> m () +putRootBranch root b = do + Branch.sync (hashExists root) + (serializeRawBranch root) + (serializeEdits root) + b + updateCausalHead (branchHeadDir root) (Branch._history b) + +hashExists :: MonadIO m => CodebasePath -> Branch.Hash -> m Bool +hashExists root h = doesFileExist (branchPath root h) + +serializeRawBranch + :: (MonadIO m) => CodebasePath -> Causal.Serialize m Branch.Raw Branch.Raw +serializeRawBranch root h = + S.putWithParentDirs (V1.putRawCausal V1.putRawBranch) (branchPath root h) + +serializeEdits + :: MonadIO m => CodebasePath -> Branch.EditHash -> m Patch -> m () +serializeEdits root h medits = + unlessM (doesFileExist (editsPath root h)) $ do + edits <- medits + S.putWithParentDirs V1.putEdits (editsPath root h) edits + +-- `headDir` is like ".unison/branches/head", or ".unison/edits/head"; +-- not ".unison"; a little weird. I guess the reason this doesn't take +-- the codebase root path is because it's applicable to any causal. +-- We just have one though, and I suppose that won't change any time soon. +updateCausalHead :: MonadIO m => FilePath -> Causal n h e -> m () +updateCausalHead headDir c = do + let (RawHash h) = Causal.currentHash c + hs = hashToString h + -- write new head + touchFile (headDir hs) + -- delete existing heads + fmap (filter (/= hs)) (listDirectory headDir) + >>= traverse_ (removeFile . (headDir )) + +-- here +hashFromString :: String -> Maybe Hash.Hash +hashFromString = Hash.fromBase32Hex . Text.pack + +-- here +hashToString :: Hash.Hash -> String +hashToString = Hash.base32Hexs + +hashFromFilePath :: FilePath -> Maybe Hash.Hash +hashFromFilePath = hashFromString . takeBaseName + +-- here +componentIdToString :: Reference.Id -> String +componentIdToString = Text.unpack . Reference.toText . Reference.DerivedId + +-- here +componentIdFromString :: String -> Maybe Reference.Id +componentIdFromString = Reference.idFromText . Text.pack + +-- here +referentFromString :: String -> Maybe Referent +referentFromString = Referent.fromText . Text.pack + +referentIdFromString :: String -> Maybe Referent.Id +referentIdFromString s = referentFromString s >>= \case + Referent.Ref (Reference.DerivedId r) -> Just $ Referent.Ref' r + Referent.Con (Reference.DerivedId r) i t -> Just $ Referent.Con' r i t + _ -> Nothing + +-- here +referentToString :: Referent -> String +referentToString = Text.unpack . Referent.toText + +copyFileWithParents :: MonadIO m => FilePath -> FilePath -> m () +copyFileWithParents src dest = + unlessM (doesFileExist dest) $ do + createDirectoryIfMissing True (takeDirectory dest) + copyFile src dest + +-- Use State and Lens to do some specified thing at most once, to create a file. +doFileOnce :: forall m s h. (MonadIO m, MonadState s m, Ord h) + => CodebasePath + -> SimpleLens s (Set h) -- lens to track if `h` is already done + -> (CodebasePath -> h -> FilePath) -- done if this filepath exists + -> (h -> m ()) -- do! + -> h -> m () +doFileOnce destPath l getFilename f h = + unlessM (use (l . to (Set.member h))) $ do + l %= Set.insert h + unlessM (doesFileExist (getFilename destPath h)) (f h) + +getTerm :: (MonadIO m, Ord v) => S.Get v -> S.Get a -> CodebasePath -> Reference.Id -> m (Maybe (Term v a)) +getTerm getV getA path h = S.getFromFile (V1.getTerm getV getA) (termPath path h) + +getTypeOfTerm :: (MonadIO m, Ord v) => S.Get v -> S.Get a -> CodebasePath -> Reference.Id -> m (Maybe (Type v a)) +getTypeOfTerm getV getA path h = S.getFromFile (V1.getType getV getA) (typePath path h) + +putTerm + :: MonadIO m + => Var v + => S.Put v + -> S.Put a + -> CodebasePath + -> Reference.Id + -> Term v a + -> Type v a + -> m () +putTerm putV putA path h e typ = do + let typeForIndexing = Type.removeAllEffectVars typ + rootTypeHash = Type.toReference typeForIndexing + typeMentions = Type.toReferenceMentions typeForIndexing + S.putWithParentDirs (V1.putTerm putV putA) (termPath path h) e + S.putWithParentDirs (V1.putType putV putA) (typePath path h) typ + -- Add the term as a dependent of its dependencies + let r = Referent.Ref (Reference.DerivedId h) + let deps = deleteComponent h $ Term.dependencies e <> Type.dependencies typ + traverse_ (touchIdFile h . dependentsDir path) deps + traverse_ (touchReferentFile r . typeMentionsIndexDir path) typeMentions + touchReferentFile r (typeIndexDir path rootTypeHash) + +getDecl :: (MonadIO m, Ord v) + => S.Get v -> S.Get a -> CodebasePath -> Reference.Id -> m (Maybe (DD.Decl v a)) +getDecl getV getA root h = + S.getFromFile + (V1.getEither + (V1.getEffectDeclaration getV getA) + (V1.getDataDeclaration getV getA)) + (declPath root h) + +putDecl + :: MonadIO m + => Var v + => S.Put v + -> S.Put a + -> CodebasePath + -> Reference.Id + -> DD.Decl v a + -> m () +putDecl putV putA path h decl = do + S.putWithParentDirs + (V1.putEither + (V1.putEffectDeclaration putV putA) + (V1.putDataDeclaration putV putA)) + (declPath path h) + decl + traverse_ (touchIdFile h . dependentsDir path) deps + traverse_ addCtorToTypeIndex ctors + where + deps = deleteComponent h . DD.dependencies $ either DD.toDataDecl id decl + r = Reference.DerivedId h + decl' = either DD.toDataDecl id decl + addCtorToTypeIndex (r, typ) = do + let rootHash = Type.toReference typ + typeMentions = Type.toReferenceMentions typ + touchReferentFile r (typeIndexDir path rootHash) + traverse_ (touchReferentFile r . typeMentionsIndexDir path) typeMentions + ct = DD.constructorType decl + ctors = + [ (Referent.Con r i ct, Type.removeAllEffectVars t) + | (t,i) <- DD.constructorTypes decl' `zip` [0..] ] + +getWatch :: (MonadIO m, Ord v) + => S.Get v + -> S.Get a + -> CodebasePath + -> UF.WatchKind + -> Reference.Id + -> m (Maybe (Term v a)) +getWatch getV getA path k id = do + let wp = watchesDir path (Text.pack k) + createDirectoryIfMissing True wp + S.getFromFile (V1.getTerm getV getA) (wp componentIdToString id <> ".ub") + +putWatch + :: MonadIO m + => Var v + => S.Put v + -> S.Put a + -> CodebasePath + -> UF.WatchKind + -> Reference.Id + -> Term v a + -> m () +putWatch putV putA root k id e = + S.putWithParentDirs + (V1.putTerm putV putA) + (watchPath root k id) + e + +loadReferencesByPrefix + :: MonadIO m => FilePath -> ShortHash -> m (Set Reference.Id) +loadReferencesByPrefix dir sh = do + refs <- mapMaybe Reference.fromShortHash + . filter (SH.isPrefixOf sh) + . mapMaybe SH.fromString + <$> listDirectory dir + pure $ Set.fromList [ i | Reference.DerivedId i <- refs] + +termReferencesByPrefix, typeReferencesByPrefix + :: MonadIO m => CodebasePath -> ShortHash -> m (Set Reference.Id) +termReferencesByPrefix root = loadReferencesByPrefix (termsDir root) +typeReferencesByPrefix root = loadReferencesByPrefix (typesDir root) + +-- returns all the derived terms and derived constructors +-- that have `sh` as a prefix +termReferentsByPrefix :: MonadIO m + => (CodebasePath -> Reference.Id -> m (Maybe (DD.Decl v a))) + -> CodebasePath + -> ShortHash + -> m (Set Referent.Id) +termReferentsByPrefix _ root sh@SH.Builtin{} = + Set.map Referent.Ref' <$> termReferencesByPrefix root sh + -- builtin types don't provide any referents we could match against, + -- only decl types do. Those get handled in the next case. +termReferentsByPrefix getDecl root sh@SH.ShortHash{} = do + terms <- termReferencesByPrefix root sh + ctors <- do + -- clear out any CID from the SH, so we can use it to find a type decl + types <- typeReferencesByPrefix root sh { SH.cid = Nothing } + foldMapM collectCtors types + pure (Set.map Referent.Ref' terms <> ctors) + where + -- load up the Decl for `ref` to see how many constructors it has, + -- and what constructor type + collectCtors ref = getDecl root ref <&> \case + Nothing -> mempty + Just decl -> + Set.fromList [ con + | i <- [0 .. ctorCount-1] + , let con = Referent.Con' ref i ct + , SH.isPrefixOf sh $ Referent.toShortHashId con] + where ct = either (const CT.Effect) (const CT.Data) decl + ctorCount = length . DD.constructors' $ DD.asDataDecl decl + +branchHashesByPrefix :: MonadIO m => CodebasePath -> ShortBranchHash -> m (Set Branch.Hash) +branchHashesByPrefix codebasePath p = + fmap (Set.fromList . join) . for [branchesDir] $ \f -> do + let dir = f codebasePath + paths <- filter (isPrefixOf . Text.unpack . SBH.toText $ p) <$> listDirectory dir + let refs = paths >>= (toList . filenameToHash) + pure refs + where + filenameToHash :: String -> Maybe Branch.Hash + filenameToHash f = case Text.splitOn "." $ Text.pack f of + [h, "ub"] -> Causal.RawHash <$> Hash.fromBase32Hex h + _ -> Nothing + +failWith :: MonadIO m => Err -> m a +failWith = liftIO . fail . show + +-- | A version of listDirectory that returns mempty if the directory doesn't exist +listDirectory :: MonadIO m => FilePath -> m [FilePath] +listDirectory dir = liftIO $ + System.Directory.listDirectory dir `catch` (\(_ :: IOException) -> pure mempty) + +-- | delete all the elements of a given reference component from a set +deleteComponent :: Reference.Id -> Set Reference -> Set Reference +deleteComponent r rs = Set.difference rs + (Reference.members . Reference.componentFor . Reference.DerivedId $ r) diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase/SlimCopyRegenerateIndex.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase/SlimCopyRegenerateIndex.hs new file mode 100644 index 0000000000..1fec405f9f --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase/SlimCopyRegenerateIndex.hs @@ -0,0 +1,321 @@ +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE ViewPatterns #-} + + +module Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex (syncToDirectory) where + +import Unison.Prelude + +import qualified Data.Set as Set +import Control.Lens +import Control.Monad.State.Strict ( MonadState, evalStateT ) +import Control.Monad.Writer.Strict ( MonadWriter, execWriterT ) +import qualified Control.Monad.Writer.Strict as Writer +import UnliftIO.Directory ( doesFileExist ) +import Unison.Codebase ( CodebasePath ) +import qualified Unison.Codebase.Causal as Causal +import Unison.Codebase.Branch ( Branch(..) ) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Branch.Dependencies as BD +import qualified Unison.Codebase.Patch as Patch +import qualified Unison.Codebase.Serialization as S +import qualified Unison.Codebase.Serialization.V1 as V1 +import Unison.Codebase.SyncMode ( SyncMode ) +import qualified Unison.Codebase.SyncMode as SyncMode +import qualified Unison.Codebase.TermEdit as TermEdit +import qualified Unison.Codebase.TypeEdit as TypeEdit +import qualified Unison.DataDeclaration as DD +import qualified Unison.LabeledDependency as LD +import Unison.Reference ( Reference ) +import qualified Unison.Reference as Reference +import qualified Unison.Referent as Referent +import qualified Unison.Term as Term +import Unison.Type ( Type ) +import qualified Unison.Type as Type +import Unison.Var ( Var ) +import qualified Unison.UnisonFile as UF +import qualified Unison.Util.Relation as Relation +import Unison.Util.Relation ( Relation ) +import Unison.Util.Monoid (foldMapM) +import Unison.Util.Timing (time) + +import Data.Monoid.Generic +import Unison.Codebase.FileCodebase.Common + +data SyncedEntities = SyncedEntities + { _syncedTerms :: Set Reference.Id + , _syncedDecls :: Set Reference.Id + , _syncedEdits :: Set Branch.EditHash + , _syncedBranches :: Set Branch.Hash + , _dependentsIndex :: Relation Reference Reference.Id + , _typeIndex :: Relation Reference Referent.Id + , _typeMentionsIndex :: Relation Reference Referent.Id + } deriving Generic + deriving Show + deriving Semigroup via GenericSemigroup SyncedEntities + deriving Monoid via GenericMonoid SyncedEntities + +makeLenses ''SyncedEntities + +syncToDirectory :: forall m v a + . MonadIO m + => Var v + => S.Format v + -> S.Format a + -> CodebasePath + -> CodebasePath + -> SyncMode + -> Branch m + -> m () +syncToDirectory fmtV fmtA = syncToDirectory' (S.get fmtV) (S.get fmtA) + +data Error + = MissingBranch Branch.Hash + | MissingPatch Branch.EditHash + | MissingTerm Reference.Id + | MissingTypeOfTerm Reference.Id + | MissingDecl Reference.Id + | InvalidBranch Branch.Hash + | InvalidTerm Reference.Id + | InvalidTypeOfTerm Reference.Id + | InvalidDecl Reference.Id + deriving (Eq, Ord, Show) + +syncToDirectory' :: forall m v a + . MonadIO m + => Var v + => S.Get v + -> S.Get a + -> CodebasePath + -> CodebasePath + -> SyncMode + -> Branch m + -> m () +syncToDirectory' getV getA srcPath destPath mode newRoot = + let warnMissingEntities = False in + flip evalStateT mempty $ do -- MonadState s m + (deps, errors) <- time "Sync Branches" $ execWriterT $ + processBranches [(Branch.headHash newRoot + ,Just . pure . Branch.transform (lift . lift) $ newRoot)] + errors' <- time "Sync Definitions" $ + execWriterT $ processDependencies (BD.to' deps) + time "Write indices" $ do + lift . writeDependentsIndex =<< use dependentsIndex + lift . writeTypeIndex =<< use typeIndex + lift . writeTypeMentionsIndex =<< use typeMentionsIndex + when (warnMissingEntities) $ for_ (errors <> errors') traceShowM + where + writeDependentsIndex :: MonadIO m => Relation Reference Reference.Id -> m () + writeDependentsIndex = writeIndexHelper (\k v -> touchIdFile v (dependentsDir destPath k)) + writeTypeIndex, writeTypeMentionsIndex :: MonadIO m => Relation Reference Referent.Id -> m () + writeTypeIndex = + writeIndexHelper (\k v -> touchReferentIdFile v (typeIndexDir destPath k)) + writeTypeMentionsIndex = + writeIndexHelper (\k v -> touchReferentIdFile v (typeMentionsIndexDir destPath k)) + writeIndexHelper + :: forall m a b. MonadIO m => (a -> b -> m ()) -> Relation a b -> m () + writeIndexHelper touchIndexFile index = + traverse_ (uncurry touchIndexFile) (Relation.toList index) + processBranches :: forall m + . MonadIO m + => MonadState SyncedEntities m + => MonadWriter (BD.Dependencies, Set Error) m + => [(Branch.Hash, Maybe (m (Branch m)))] + -> m () + processBranches [] = pure () + -- for each branch, + processBranches ((h, mmb) : rest) = + let tellError = Writer.tell . (mempty,) . Set.singleton + tellDependencies = Writer.tell . (,mempty) in + -- if hash exists at the destination, skip it, mark it done + ifNeedsSyncing h destPath branchPath syncedBranches + (\h -> + -- else if hash exists at the source, enqueue its dependencies, copy it, mark it done + ifM (doesFileExist (branchPath srcPath h)) + (do + (branches, deps) <- BD.fromRawCausal <$> + (deserializeRawBranchDependencies tellError srcPath h) + copyFileWithParents (branchPath srcPath h) (branchPath destPath h) + tellDependencies deps + processBranches (branches ++ rest)) + -- else if it's in memory, enqueue its dependencies, write it, mark it done + case mmb of + Just mb -> do + b <- mb + let (branches, deps) = BD.fromBranch b + let causalRaw = Branch.toCausalRaw b + serializeRawBranch destPath h causalRaw + tellDependencies deps + processBranches (branches ++ rest) + -- else -- error? + Nothing -> do + tellError (MissingBranch h) + processBranches rest + ) + (processBranches rest) + processDependencies :: forall n + . MonadIO n + => MonadState SyncedEntities n + => MonadWriter (Set Error) n + => BD.Dependencies' + -> n () + processDependencies = \case + -- for each patch + -- enqueue its target term and type references + BD.Dependencies' (editHash : editHashes) terms decls -> + -- This code assumes that patches are always available on disk, + -- not ever just held in memory with `pure`. If that's not the case, + -- then we can do something similar to what we did with branches. + ifNeedsSyncing editHash destPath editsPath syncedEdits + (\h -> do + patch <- deserializeEdits srcPath h + -- I'm calling all the replacement terms dependents of the patches. + -- If we're supposed to replace X with Y, we don't necessarily need X, + -- but we do need Y. + let newTerms, newDecls :: [Reference.Id] + newTerms = [ i | TermEdit.Replace (Reference.DerivedId i) _ <- + toList . Relation.ran $ Patch._termEdits patch] + newDecls = [ i | TypeEdit.Replace (Reference.DerivedId i) <- + toList . Relation.ran $ Patch._typeEdits patch] + ifM (doesFileExist (editsPath srcPath h)) + (do + copyFileWithParents (editsPath srcPath h) (editsPath destPath h) + processDependencies $ + BD.Dependencies' editHashes (newTerms ++ terms) (newDecls ++ decls)) + (do + tellError (MissingPatch h) + (processDependencies $ BD.Dependencies' editHashes terms decls))) + (processDependencies $ BD.Dependencies' editHashes terms decls) + + -- for each term id + BD.Dependencies' [] (termHash : termHashes) decls -> + -- if it exists at the destination, skip it, mark it done + ifNeedsSyncing termHash destPath termPath syncedTerms + (\h -> do + -- else if it exists at the source, + ifM (doesFileExist (termPath srcPath h)) + (do + -- copy it, + -- load it, + -- enqueue its dependencies for syncing + -- enqueue its type's type dependencies for syncing + -- enqueue its type's dependencies, type & type mentions into respective indices + -- and continue + (newTerms, newDecls) <- enqueueTermDependencies h + processDependencies $ + BD.Dependencies' [] (newTerms ++ termHashes) (newDecls ++ decls) + ) + -- else -- an error? + (do + tellError (MissingTerm h) + (processDependencies $ BD.Dependencies' [] termHashes decls))) + (processDependencies $ BD.Dependencies' [] termHashes decls) + -- for each decl id + BD.Dependencies' [] [] (declHash : declHashes) -> + -- if it exists at the destination, skip it, mark it done + ifNeedsSyncing declHash destPath declPath syncedDecls + (\h -> do + -- else if it exists at the source, + ifM (doesFileExist (declPath srcPath h)) + -- copy it, + -- load it, + -- enqueue its type dependencies for syncing + -- for each constructor, + -- enqueue its dependencies, type, type mentions into respective indices + (do + newDecls <- copyAndIndexDecls h + processDependencies $ BD.Dependencies' [] [] (newDecls ++ declHashes)) + (do + tellError (MissingDecl h) + (processDependencies $ BD.Dependencies' [] [] declHashes))) + (processDependencies $ BD.Dependencies' [] [] declHashes) + BD.Dependencies' [] [] [] -> pure () + copyAndIndexDecls :: forall m + . MonadIO m + => MonadState SyncedEntities m + => MonadWriter (Set Error) m + => Reference.Id + -> m [Reference.Id] + copyAndIndexDecls h = (getDecl getV getA srcPath h :: m (Maybe (DD.Decl v a))) >>= \case + Just decl -> do + copyFileWithParents (declPath srcPath h) (declPath destPath h) + let referentTypes :: [(Referent.Id, Type v a)] + referentTypes = DD.declConstructorReferents h decl + `zip` (DD.constructorTypes . DD.asDataDecl) decl + flip foldMapM referentTypes \(r, typ) -> do + let dependencies = toList (Type.dependencies typ) + dependentsIndex <>= Relation.fromManyDom dependencies h + let typeForIndexing = Type.removeAllEffectVars typ + let typeReference = Type.toReference typeForIndexing + let typeMentions = Type.toReferenceMentions typeForIndexing + typeIndex <>= Relation.singleton typeReference r + typeMentionsIndex <>= Relation.fromManyDom typeMentions r + pure [ i | Reference.DerivedId i <- dependencies ] + Nothing -> tellError (InvalidDecl h) $> mempty + + enqueueTermDependencies :: forall m + . MonadIO m + => MonadState SyncedEntities m + => MonadWriter (Set Error) m + => Reference.Id + -> m ([Reference.Id], [Reference.Id]) + enqueueTermDependencies h = getTerm getV getA srcPath h >>= \case + Just term -> do + let (typeDeps, termDeps) = partitionEithers . fmap LD.toReference . toList + $ Term.labeledDependencies term + ifM (doesFileExist (typePath srcPath h)) + (getTypeOfTerm getV getA srcPath h >>= \case + Just typ -> do + copyFileWithParents (termPath srcPath h) (termPath destPath h) + copyFileWithParents (typePath srcPath h) (typePath destPath h) + whenM (doesFileExist $ watchPath srcPath UF.TestWatch h) $ + copyFileWithParents (watchPath srcPath UF.TestWatch h) + (watchPath destPath UF.TestWatch h) + let typeDeps' = toList (Type.dependencies typ) + let typeForIndexing = Type.removeAllEffectVars typ + let typeReference = Type.toReference typeForIndexing + let typeMentions = Type.toReferenceMentions typeForIndexing + dependentsIndex <>= + Relation.fromManyDom (typeDeps ++ typeDeps' ++ termDeps) h + typeIndex <>= + Relation.singleton typeReference (Referent.Ref' h) + typeMentionsIndex <>= + Relation.fromManyDom typeMentions (Referent.Ref' h) + let newDecls = [ i | Reference.DerivedId i <- typeDeps ++ typeDeps'] + let newTerms = [ i | Reference.DerivedId i <- termDeps ] + pure (newTerms, newDecls) + Nothing -> tellError (InvalidTypeOfTerm h) $> mempty) + (tellError (MissingTypeOfTerm h) $> mempty) + Nothing -> tellError (InvalidTerm h) $> mempty + + deserializeRawBranchDependencies :: forall m + . MonadIO m + => (Error -> m ()) + -> CodebasePath + -> Causal.Deserialize m Branch.Raw (BD.Branches m, BD.Dependencies) + deserializeRawBranchDependencies tellError root h = + S.getFromFile (V1.getCausal0 V1.getBranchDependencies) (branchPath root h) >>= \case + Nothing -> tellError (InvalidBranch h) $> Causal.RawOne mempty + Just results -> pure results + tellError :: forall m a. MonadWriter (Set a) m => a -> m () + tellError = Writer.tell . Set.singleton + + -- Use State and Lens to do some specified thing at most once, to create a file. + ifNeedsSyncing :: forall m s h. (MonadIO m, MonadState s m, Ord h) + => h + -> CodebasePath + -> (CodebasePath -> h -> FilePath) -- done if this filepath exists + -> SimpleLens s (Set h) -- lens to track if `h` is already done + -> (h -> m ()) -- do! + -> m () -- don't + -> m () + ifNeedsSyncing h destPath getFilename l doSync dontSync = + ifM (use (l . to (Set.member h))) dontSync $ do + l %= Set.insert h + if mode == SyncMode.Complete then doSync h + else ifM (doesFileExist (getFilename destPath h)) dontSync (doSync h) diff --git a/parser-typechecker/src/Unison/Codebase/GitError.hs b/parser-typechecker/src/Unison/Codebase/GitError.hs new file mode 100644 index 0000000000..082a13b188 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/GitError.hs @@ -0,0 +1,23 @@ +module Unison.Codebase.GitError where + +import Unison.Prelude + +import Unison.Codebase (CodebasePath) +import Unison.Codebase.ShortBranchHash (ShortBranchHash) +import qualified Unison.Codebase.Branch as Branch +import Unison.Codebase.Editor.RemoteRepo (RemoteRepo) + +data GitError = NoGit + | UnrecognizableCacheDir Text CodebasePath + | UnrecognizableCheckoutDir Text CodebasePath + | CloneException RemoteRepo String + | PushException RemoteRepo String + | PushNoOp RemoteRepo + -- url commit Diff of what would change on merge with remote + | PushDestinationHasNewStuff RemoteRepo + | NoRemoteNamespaceWithHash RemoteRepo ShortBranchHash + | RemoteNamespaceHashAmbiguous RemoteRepo ShortBranchHash (Set Branch.Hash) + | CouldntLoadRootBranch RemoteRepo Branch.Hash + | CouldntParseRootBranch RemoteRepo String + | SomeOtherError String + deriving Show diff --git a/parser-typechecker/src/Unison/Codebase/MainTerm.hs b/parser-typechecker/src/Unison/Codebase/MainTerm.hs new file mode 100644 index 0000000000..ca9dc83ce5 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/MainTerm.hs @@ -0,0 +1,72 @@ +{-# LANGUAGE GADTs #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE PartialTypeSignatures #-} + +-- | Find a computation of type '{IO} () in the codebase. +module Unison.Codebase.MainTerm where + +import Unison.Prelude + +import Unison.Parser ( Ann ) +import qualified Unison.Parser as Parser +import qualified Unison.Term as Term +import Unison.Term ( Term ) +import Unison.Var ( Var ) +import qualified Unison.Builtin.Decls as DD +import qualified Unison.HashQualified as HQ +import qualified Unison.Referent as Referent +import qualified Unison.Names3 as Names3 +import Unison.Reference ( Reference ) +import qualified Unison.Type as Type +import Unison.Type ( Type ) +import qualified Unison.Typechecker as Typechecker +import Unison.Runtime.IOSource ( ioReference ) + +data MainTerm v + = NotAFunctionName String + | NotFound String + | BadType String + | Success HQ.HashQualified (Term v Ann) (Type v Ann) + +getMainTerm + :: (Monad m, Var v) + => (Reference -> m (Maybe (Type v Ann))) + -> Names3.Names0 + -> String + -> Type.Type v Ann + -> m (MainTerm v) +getMainTerm loadTypeOfTerm parseNames0 mainName mainType = + case HQ.fromString mainName of + Nothing -> pure (NotAFunctionName mainName) + Just hq -> do + let refs = Names3.lookupHQTerm hq (Names3.Names parseNames0 mempty) + let a = Parser.External + case toList refs of + [Referent.Ref ref] -> do + typ <- loadTypeOfTerm ref + case typ of + Just typ | Typechecker.isSubtype typ mainType -> do + let tm = DD.forceTerm a a (Term.ref a ref) + return (Success hq tm typ) + _ -> pure (BadType mainName) + _ -> pure (NotFound mainName) + +-- {IO} () +ioUnit :: Ord v => a -> Type.Type v a +ioUnit a = Type.effect a [Type.ref a ioReference] (Type.ref a DD.unitRef) + +builtinIOUnit :: Ord v => a -> Type.Type v a +builtinIOUnit a + = Type.effect1 a (Type.builtinIO a) (Type.ref a DD.unitRef) + +-- '{IO} () +nullaryMain :: Ord v => a -> Type.Type v a +nullaryMain a + = Type.arrow a (Type.ref a DD.unitRef) (ioUnit a) + +builtinMain :: Ord v => a -> Type.Type v a +builtinMain a + = Type.arrow a (Type.ref a DD.unitRef) (builtinIOUnit a) + +mainTypes :: Ord v => a -> [Type v a] +mainTypes a = [nullaryMain a] diff --git a/parser-typechecker/src/Unison/Codebase/Metadata.hs b/parser-typechecker/src/Unison/Codebase/Metadata.hs new file mode 100644 index 0000000000..1df8b070d2 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Metadata.hs @@ -0,0 +1,72 @@ +module Unison.Codebase.Metadata where + +import Unison.Prelude + +import Unison.Reference (Reference) +import Unison.Util.Star3 (Star3) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Unison.Util.Star3 as Star3 +import Unison.Util.Relation (Relation) +import qualified Unison.Util.Relation as R +import Unison.Util.Relation4 (Relation4) +import qualified Unison.Util.Relation4 as R4 +import qualified Unison.Util.List as List + +type Type = Reference +type Value = Reference + +-- keys can be terms or types +type Metadata = Map Type (Set Value) + +-- `a` is generally the type of references or hashes +-- `n` is generally the the type of name associated with the references +-- `Type` is the type of metadata. Duplicate info to speed up certain queries. +-- `(Type, Value)` is the metadata value itself along with its type. +type Star a n = Star3 a n Type (Type, Value) +type R4 a n = R4.Relation4 a n Type Value + +starToR4 :: (Ord r, Ord n) => Star r n -> Relation4 r n Type Value +starToR4 = R4.fromList . fmap (\(r,n,_,(t,v)) -> (r,n,t,v)) . Star3.toList + +hasMetadata :: Ord a => a -> Type -> Value -> Star a n -> Bool +hasMetadata a t v = Set.member (t, v) . R.lookupDom a . Star3.d3 + +inserts :: (Ord a, Ord n) => [(a, Type, Value)] -> Star a n -> Star a n +inserts tups s = foldl' (flip insert) s tups + +insertWithMetadata + :: (Ord a, Ord n) => (a, Metadata) -> Star a n -> Star a n +insertWithMetadata (a, md) = + inserts [ (a, ty, v) | (ty, vs) <- Map.toList md, v <- toList vs ] + +insert :: (Ord a, Ord n) => (a, Type, Value) -> Star a n -> Star a n +insert (a, ty, v) = Star3.insertD23 (a, ty, (ty,v)) + +delete :: (Ord a, Ord n) => (a, Type, Value) -> Star a n -> Star a n +delete (a, ty, v) s = let + s' = Star3.deleteD3 (a, (ty,v)) s + -- if (ty,v) is the last metadata of type ty + -- we also delete (a, ty) from the d2 index + metadataByType = List.multimap (toList (R.lookupDom a (Star3.d3 s))) + in + case Map.lookup ty metadataByType of + Just vs | all (== v) vs -> Star3.deleteD2 (a, ty) s' + _ -> s' + +-- parallel composition - commutative and associative +merge :: Metadata -> Metadata -> Metadata +merge = Map.unionWith (<>) + +-- sequential composition, right-biased +append :: Metadata -> Metadata -> Metadata +append = Map.unionWith (flip const) + +empty :: Metadata +empty = mempty + +singleton :: Type -> Value -> Metadata +singleton ty v = Map.singleton ty (Set.singleton v) + +toRelation :: Star3 a n x y -> Relation a n +toRelation = Star3.d1 diff --git a/parser-typechecker/src/Unison/Codebase/NameEdit.hs b/parser-typechecker/src/Unison/Codebase/NameEdit.hs new file mode 100644 index 0000000000..3a872e1b0a --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/NameEdit.hs @@ -0,0 +1,15 @@ +module Unison.Codebase.NameEdit where + +import Unison.Prelude + +import Unison.Reference (Reference) +import Unison.Hashable (Hashable, tokens) + +data NameEdit = + NameEdit { added :: Set Reference, removed :: Set Reference } + +instance Semigroup NameEdit where + NameEdit add1 del1 <> NameEdit add2 del2 = NameEdit (add1 <> add2) (del1 <> del2) + +instance Hashable NameEdit where + tokens (NameEdit added removed) = tokens (toList added, toList removed) diff --git a/parser-typechecker/src/Unison/Codebase/Patch.hs b/parser-typechecker/src/Unison/Codebase/Patch.hs new file mode 100644 index 0000000000..a5cbdd5902 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Patch.hs @@ -0,0 +1,136 @@ +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE RecordWildCards #-} + +module Unison.Codebase.Patch where + +import Unison.Prelude hiding (empty) + +import Prelude hiding (head,read,subtract) + +import Control.Lens hiding ( children, cons, transform ) +import qualified Data.Set as Set +import Unison.Codebase.TermEdit ( TermEdit, Typing(Same) ) +import qualified Unison.Codebase.TermEdit as TermEdit +import Unison.Codebase.TypeEdit ( TypeEdit ) +import qualified Unison.Codebase.TypeEdit as TypeEdit +import Unison.Hashable ( Hashable ) +import qualified Unison.Hashable as H +import Unison.Reference ( Reference ) +import qualified Unison.Util.Relation as R +import Unison.Util.Relation ( Relation ) +import qualified Unison.LabeledDependency as LD +import Unison.LabeledDependency ( LabeledDependency ) + +data Patch = Patch + { _termEdits :: Relation Reference TermEdit + , _typeEdits :: Relation Reference TypeEdit + } deriving (Eq, Ord, Show) + +data PatchDiff = PatchDiff + { _addedTermEdits :: Relation Reference TermEdit + , _addedTypeEdits :: Relation Reference TypeEdit + , _removedTermEdits :: Relation Reference TermEdit + , _removedTypeEdits :: Relation Reference TypeEdit + } deriving (Eq, Ord, Show) + +makeLenses ''Patch +makeLenses ''PatchDiff + +diff :: Patch -> Patch -> PatchDiff +diff new old = PatchDiff + { _addedTermEdits = R.difference (view termEdits new) (view termEdits old) + , _addedTypeEdits = R.difference (view typeEdits new) (view typeEdits old) + , _removedTypeEdits = R.difference (view typeEdits old) (view typeEdits new) + , _removedTermEdits = R.difference (view termEdits old) (view termEdits new) + } + +labeledDependencies :: Patch -> Set LabeledDependency +labeledDependencies Patch {..} = + Set.map LD.termRef (R.dom _termEdits) + <> Set.fromList + (fmap LD.termRef $ TermEdit.references =<< toList (R.ran _termEdits)) + <> Set.map LD.typeRef (R.dom _typeEdits) + <> Set.fromList + (fmap LD.typeRef $ TypeEdit.references =<< toList (R.ran _typeEdits)) + +empty :: Patch +empty = Patch mempty mempty + +isEmpty :: Patch -> Bool +isEmpty p = p == empty + +allReferences :: Patch -> Set Reference +allReferences p = typeReferences p <> termReferences p where + typeReferences p = Set.fromList + [ r | (old, TypeEdit.Replace new) <- R.toList (_typeEdits p) + , r <- [old, new] ] + termReferences p = Set.fromList + [ r | (old, TermEdit.Replace new _) <- R.toList (_termEdits p) + , r <- [old, new] ] + +-- | Returns the set of references which are the target of an arrow in the patch +allReferenceTargets :: Patch -> Set Reference +allReferenceTargets p = typeReferences p <> termReferences p where + typeReferences p = Set.fromList + [ new | (_, TypeEdit.Replace new) <- R.toList (_typeEdits p) ] + termReferences p = Set.fromList + [ new | (_, TermEdit.Replace new _) <- R.toList (_termEdits p) ] + +updateTerm :: (Reference -> Reference -> Typing) + -> Reference -> TermEdit -> Patch -> Patch +updateTerm typing r edit p = + -- get D ~= lookupRan r + -- for each d ∈ D, remove (d, r) and add (d, r') + -- add (r, r') and remove (r', r') + let deleteCycle = case edit of + TermEdit.Deprecate -> id + TermEdit.Replace r' _ -> R.delete r' (TermEdit.Replace r' Same) + edits' :: Relation Reference TermEdit + edits' = deleteCycle . R.insert r edit . R.map f $ _termEdits p + f (x, TermEdit.Replace y _) | y == r = case edit of + TermEdit.Replace r' _ -> (x, TermEdit.Replace r' (typing x r')) + TermEdit.Deprecate -> (x, TermEdit.Deprecate) + f p = p + in p { _termEdits = edits' } + +updateType :: Reference -> TypeEdit -> Patch -> Patch +updateType r edit p = + let deleteCycle = case edit of + TypeEdit.Deprecate -> id + TypeEdit.Replace r' -> R.delete r' (TypeEdit.Replace r') + edits' :: Relation Reference TypeEdit + edits' = deleteCycle . R.insert r edit . R.map f $ _typeEdits p + f (x, TypeEdit.Replace y) | y == r = case edit of + TypeEdit.Replace r' -> (x, TypeEdit.Replace r') + TypeEdit.Deprecate -> (x, TypeEdit.Deprecate) + f p = p + in p { _typeEdits = edits' } + +conflicts :: Patch -> Patch +conflicts Patch{..} = + Patch (R.filterManyDom _termEdits) (R.filterManyDom _typeEdits) + +instance Semigroup Patch where + a <> b = Patch (_termEdits a <> _termEdits b) + (_typeEdits a <> _typeEdits b) + +instance Monoid Patch where + mappend = (<>) + mempty = Patch mempty mempty + +instance Hashable Patch where + tokens e = [ H.Hashed (H.accumulate (H.tokens (_termEdits e))), + H.Hashed (H.accumulate (H.tokens (_typeEdits e))) ] + +instance Semigroup PatchDiff where + a <> b = PatchDiff + { _addedTermEdits = _addedTermEdits a <> _addedTermEdits b + , _addedTypeEdits = _addedTypeEdits a <> _addedTypeEdits b + , _removedTermEdits = _removedTermEdits a <> _removedTermEdits b + , _removedTypeEdits = _removedTypeEdits a <> _removedTypeEdits b + } + +instance Monoid PatchDiff where + mappend = (<>) + mempty = PatchDiff mempty mempty mempty mempty diff --git a/parser-typechecker/src/Unison/Codebase/Path.hs b/parser-typechecker/src/Unison/Codebase/Path.hs new file mode 100644 index 0000000000..ca0df22ea6 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Path.hs @@ -0,0 +1,440 @@ +{-# LANGUAGE OverloadedLists #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.Path where + +import Unison.Prelude hiding (empty, toList) + +import Data.Bifunctor ( first ) +import Data.List.Extra ( stripPrefix, dropPrefix ) +import Control.Lens hiding (unsnoc, cons, snoc) +import qualified Control.Lens as Lens +import qualified Data.Foldable as Foldable +import qualified Data.Text as Text +import Data.Sequence (Seq((:<|),(:|>) )) +import qualified Data.Sequence as Seq +import Unison.Name ( Name ) +import qualified Unison.Name as Name +import Unison.Util.Monoid (intercalateMap) +import qualified Unison.Lexer as Lexer +import qualified Unison.HashQualified' as HQ' +import qualified Unison.ShortHash as SH + +import Unison.NameSegment ( NameSegment(NameSegment)) +import qualified Unison.NameSegment as NameSegment + +-- `Foo.Bar.baz` becomes ["Foo", "Bar", "baz"] +newtype Path = Path { toSeq :: Seq NameSegment } deriving (Eq, Ord) + +newtype Absolute = Absolute { unabsolute :: Path } deriving (Eq,Ord) +newtype Relative = Relative { unrelative :: Path } deriving (Eq,Ord) +newtype Path' = Path' { unPath' :: Either Absolute Relative } + deriving (Eq,Ord) + +isCurrentPath :: Path' -> Bool +isCurrentPath p = p == currentPath + +currentPath :: Path' +currentPath = Path' (Right (Relative (Path mempty))) + +isRoot' :: Path' -> Bool +isRoot' = either isRoot (const False) . unPath' + +isRoot :: Absolute -> Bool +isRoot = Seq.null . toSeq . unabsolute + +absoluteToPath' :: Absolute -> Path' +absoluteToPath' abs = Path' (Left abs) + +instance Show Path' where + show (Path' (Left abs)) = show abs + show (Path' (Right rel)) = show rel + +instance Show Absolute where + show s = "." ++ show (unabsolute s) + +instance Show Relative where + show = show . unrelative + +unsplit' :: Split' -> Path' +unsplit' (Path' (Left (Absolute p)), seg) = Path' (Left (Absolute (unsplit (p, seg)))) +unsplit' (Path' (Right (Relative p)), seg) = Path' (Right (Relative (unsplit (p, seg)))) + +unsplit :: Split -> Path +unsplit (Path p, a) = Path (p :|> a) + +unsplitHQ :: HQSplit -> HQ'.HashQualified' Path +unsplitHQ (p, a) = fmap (snoc p) a + +unsplitHQ' :: HQSplit' -> HQ'.HashQualified' Path' +unsplitHQ' (p, a) = fmap (snoc' p) a + +type Split = (Path, NameSegment) +type HQSplit = (Path, HQ'.HQSegment) + +type Split' = (Path', NameSegment) +type HQSplit' = (Path', HQ'.HQSegment) + +type SplitAbsolute = (Absolute, NameSegment) +type HQSplitAbsolute = (Absolute, HQ'.HQSegment) + +-- examples: +-- unprefix .foo.bar .blah == .blah (absolute paths left alone) +-- unprefix .foo.bar id == id (relative paths starting w/ nonmatching prefix left alone) +-- unprefix .foo.bar foo.bar.baz == baz (relative paths w/ common prefix get stripped) +unprefix :: Absolute -> Path' -> Path +unprefix (Absolute prefix) (Path' p) = case p of + Left abs -> unabsolute abs + Right (unrelative -> rel) -> fromList $ dropPrefix (toList prefix) (toList rel) + +-- too many types +prefix :: Absolute -> Path' -> Path +prefix (Absolute (Path prefix)) (Path' p) = case p of + Left (unabsolute -> abs) -> abs + Right (unrelative -> rel) -> Path $ prefix <> toSeq rel + +-- .libs.blah.poo is Absolute +-- libs.blah.poo is Relative +-- Left is some parse error tbd +parsePath' :: String -> Either String Path' +parsePath' p = case parsePathImpl' p of + Left e -> Left e + Right (p, "") -> Right p + Right (p, rem) -> + case (first show . (Lexer.wordyId0 <> Lexer.symbolyId0) <> unit') rem of + Right (seg, "") -> Right (unsplit' (p, NameSegment . Text.pack $ seg)) + Right (_, rem) -> + Left ("extra characters after " <> show p <> ": " <> show rem) + Left e -> Left e + +-- implementation detail of parsePath' and parseSplit' +-- foo.bar.baz.34 becomes `Right (foo.bar.baz, "34") +-- foo.bar.baz becomes `Right (foo.bar, "baz") +-- baz becomes `Right (, "baz") +-- foo.bar.baz#a8fj becomes `Left`; we don't hash-qualify paths. +-- TODO: Get rid of this thing. +parsePathImpl' :: String -> Either String (Path', String) +parsePathImpl' p = case p of + "." -> Right (Path' . Left $ absoluteEmpty, "") + '.' : p -> over _1 (Path' . Left . Absolute . fromList) <$> segs p + p -> over _1 (Path' . Right . Relative . fromList) <$> segs p + where + go f p = case f p of + Right (a, "") -> case Lens.unsnoc (Name.segments' $ Text.pack a) of + Nothing -> Left "empty path" + Just (segs, last) -> Right (NameSegment <$> segs, Text.unpack last) + Right (segs, '.' : rem) -> + let segs' = Name.segments' (Text.pack segs) + in Right (NameSegment <$> segs', rem) + Right (segs, rem) -> + Left $ "extra characters after " <> segs <> ": " <> show rem + Left e -> Left e + segs p = go (first show . (Lexer.symbolyId <> Lexer.wordyId) <> unit') p + +wordyNameSegment, definitionNameSegment :: String -> Either String NameSegment +wordyNameSegment s = case Lexer.wordyId0 s of + Left e -> Left (show e) + Right (a, "") -> Right (NameSegment (Text.pack a)) + Right (a, rem) -> + Left $ "trailing characters after " <> show a <> ": " <> show rem + +optionalWordyNameSegment :: String -> Either String NameSegment +optionalWordyNameSegment "" = Right $ NameSegment "" +optionalWordyNameSegment s = wordyNameSegment s + +-- Parse a name segment like "()" +unit' :: String -> Either String (String, String) +unit' s = case stripPrefix "()" s of + Nothing -> Left $ "Expected () but found: " <> s + Just rem -> Right ("()", rem) + +unit :: String -> Either String NameSegment +unit s = case unit' s of + Right (_, "" ) -> Right $ NameSegment "()" + Right (_, rem) -> Left $ "trailing characters after (): " <> show rem + Left _ -> Left $ "I don't know how to parse " <> s + + +definitionNameSegment s = wordyNameSegment s <> symbolyNameSegment s <> unit s + where + symbolyNameSegment s = case Lexer.symbolyId0 s of + Left e -> Left (show e) + Right (a, "") -> Right (NameSegment (Text.pack a)) + Right (a, rem) -> + Left $ "trailing characters after " <> show a <> ": " <> show rem + +-- parseSplit' wordyNameSegment "foo.bar.baz" returns Right (foo.bar, baz) +-- parseSplit' wordyNameSegment "foo.bar.+" returns Left err +-- parseSplit' definitionNameSegment "foo.bar.+" returns Right (foo.bar, +) +parseSplit' :: (String -> Either String NameSegment) + -> String + -> Either String Split' +parseSplit' lastSegment p = do + (p', rem) <- parsePathImpl' p + seg <- lastSegment rem + pure (p', seg) + +parseShortHashOrHQSplit' :: String -> Either String (Either SH.ShortHash HQSplit') +parseShortHashOrHQSplit' s = + case Text.breakOn "#" $ Text.pack s of + ("","") -> error $ "encountered empty string parsing '" <> s <> "'" + (n,"") -> do + (p, rem) <- parsePathImpl' (Text.unpack n) + seg <- definitionNameSegment rem + pure $ Right (p, HQ'.NameOnly seg) + ("", sh) -> do + sh <- maybeToRight (shError s) . SH.fromText $ sh + pure $ Left sh + (n, sh) -> do + (p, rem) <- parsePathImpl' (Text.unpack n) + seg <- definitionNameSegment rem + hq <- maybeToRight (shError s) . + fmap (\sh -> (p, HQ'.HashQualified seg sh)) . + SH.fromText $ sh + pure $ Right hq + where + shError s = "couldn't parse shorthash from " <> s + +parseHQSplit :: String -> Either String HQSplit +parseHQSplit s = case parseHQSplit' s of + Right (Path' (Right (Relative p)), hqseg) -> Right (p, hqseg) + Right (Path' Left{}, _) -> + Left $ "Sorry, you can't use an absolute name like " <> s <> " here." + Left e -> Left e + +parseHQSplit' :: String -> Either String HQSplit' +parseHQSplit' s = case Text.breakOn "#" $ Text.pack s of + ("", "") -> error $ "encountered empty string parsing '" <> s <> "'" + ("", _ ) -> Left "Sorry, you can't use a hash-only reference here." + (n , "") -> do + (p, rem) <- parsePath n + seg <- definitionNameSegment rem + pure (p, HQ'.NameOnly seg) + (n, sh) -> do + (p, rem) <- parsePath n + seg <- definitionNameSegment rem + maybeToRight (shError s) + . fmap (\sh -> (p, HQ'.HashQualified seg sh)) + . SH.fromText + $ sh + where + shError s = "couldn't parse shorthash from " <> s + parsePath n = do + x <- parsePathImpl' $ Text.unpack n + pure $ case x of + (Path' (Left e), "") | e == absoluteEmpty -> (relativeEmpty', ".") + x -> x + +toAbsoluteSplit :: Absolute -> (Path', a) -> (Absolute, a) +toAbsoluteSplit a (p, s) = (resolve a p, s) + +fromSplit' :: (Path', a) -> (Path, a) +fromSplit' (Path' (Left (Absolute p)), a) = (p, a) +fromSplit' (Path' (Right (Relative p)), a) = (p, a) + +fromAbsoluteSplit :: (Absolute, a) -> (Path, a) +fromAbsoluteSplit (Absolute p, a) = (p, a) + +absoluteEmpty :: Absolute +absoluteEmpty = Absolute empty + +relativeEmpty' :: Path' +relativeEmpty' = Path' (Right (Relative empty)) + +relativeSingleton :: NameSegment -> Relative +relativeSingleton = Relative . Path . Seq.singleton + +toPath' :: Path -> Path' +toPath' = \case + Path (NameSegment "" :<| tail) -> Path' . Left . Absolute . Path $ tail + p -> Path' . Right . Relative $ p + +toList :: Path -> [NameSegment] +toList = Foldable.toList . toSeq + +fromList :: [NameSegment] -> Path +fromList = Path . Seq.fromList + +ancestors :: Absolute -> Seq Absolute +ancestors (Absolute (Path segments)) = Absolute . Path <$> Seq.inits segments + +hqSplitFromName' :: Name -> Maybe HQSplit' +hqSplitFromName' = fmap (fmap HQ'.fromName) . Lens.unsnoc . fromName' + +splitFromName :: Name -> Maybe Split +splitFromName = unsnoc . fromName + +unprefixName :: Absolute -> Name -> Name +unprefixName prefix = toName . unprefix prefix . fromName' + +prefixName :: Absolute -> Name -> Name +prefixName p = toName . prefix p . fromName' + +singleton :: NameSegment -> Path +singleton n = fromList [n] + +cons :: NameSegment -> Path -> Path +cons = Lens.cons + +snoc :: Path -> NameSegment -> Path +snoc = Lens.snoc + +snoc' :: Path' -> NameSegment -> Path' +snoc' = Lens.snoc + +unsnoc :: Path -> Maybe (Path, NameSegment) +unsnoc = Lens.unsnoc + +uncons :: Path -> Maybe (NameSegment, Path) +uncons = Lens.uncons + +--asDirectory :: Path -> Text +--asDirectory p = case toList p of +-- NameSegment "_root_" : (Seq.fromList -> tail) -> +-- "/" <> asDirectory (Path tail) +-- other -> Text.intercalate "/" . fmap NameSegment.toText $ other + +-- > Path.fromName . Name.unsafeFromText $ ".Foo.bar" +-- /Foo/bar +-- Int./ -> "Int"/"/" +-- pkg/Int.. -> "pkg"/"Int"/"." +-- Int./foo -> error because "/foo" is not a valid NameSegment +-- and "Int." is not a valid NameSegment +-- and "Int" / "" / "foo" is not a valid path (internal "") +-- todo: fromName needs to be a little more complicated if we want to allow +-- identifiers called Function.(.) +fromName :: Name -> Path +fromName = fromList . Name.segments + +fromName' :: Name -> Path' +fromName' n = case take 1 (Name.toString n) of + "." -> Path' . Left . Absolute $ Path seq + _ -> Path' . Right $ Relative path + where + path = fromName n + seq = toSeq path + +toName :: Path -> Name +toName = Name.unsafeFromText . toText + +-- | Convert a Path' to a Name +toName' :: Path' -> Name +toName' = Name.unsafeFromText . toText' + +-- Returns the nearest common ancestor, along with the +-- two inputs relativized to that ancestor. +relativeToAncestor :: Path -> Path -> (Path, Path, Path) +relativeToAncestor (Path a) (Path b) = case (a, b) of + (ha :<| ta, hb :<| tb) | ha == hb -> + let (ancestor, relA, relB) = relativeToAncestor (Path ta) (Path tb) + in (ha `cons` ancestor, relA, relB) + -- nothing in common + _ -> (empty, Path a, Path b) + +pattern Parent h t = Path (NameSegment h :<| t) +pattern Empty = Path Seq.Empty + +empty :: Path +empty = Path mempty + +instance Show Path where + show = Text.unpack . toText + +toText :: Path -> Text +toText (Path nss) = intercalateMap "." NameSegment.toText nss + +fromText :: Text -> Path +fromText = \case + "" -> empty + t -> fromList $ NameSegment <$> Name.segments' t + +toText' :: Path' -> Text +toText' = \case + Path' (Left (Absolute path)) -> Text.cons '.' (toText path) + Path' (Right (Relative path)) -> toText path + +instance Cons Path Path NameSegment NameSegment where + _Cons = prism (uncurry cons) uncons where + cons :: NameSegment -> Path -> Path + cons ns (Path p) = Path (ns :<| p) + uncons :: Path -> Either Path (NameSegment, Path) + uncons p = case p of + Path (hd :<| tl) -> Right (hd, Path tl) + _ -> Left p + +instance Snoc Relative Relative NameSegment NameSegment where + _Snoc = prism (uncurry snocRelative) $ \case + Relative (Lens.unsnoc -> Just (s,a)) -> Right (Relative s,a) + e -> Left e + where + snocRelative :: Relative -> NameSegment -> Relative + snocRelative r n = Relative . (`Lens.snoc` n) $ unrelative r + +instance Snoc Absolute Absolute NameSegment NameSegment where + _Snoc = prism (uncurry snocAbsolute) $ \case + Absolute (Lens.unsnoc -> Just (s,a)) -> Right (Absolute s, a) + e -> Left e + where + snocAbsolute :: Absolute -> NameSegment -> Absolute + snocAbsolute a n = Absolute . (`Lens.snoc` n) $ unabsolute a + +instance Snoc Path Path NameSegment NameSegment where + _Snoc = prism (uncurry snoc) unsnoc + where + unsnoc :: Path -> Either Path (Path, NameSegment) + unsnoc = \case + Path (s Seq.:|> a) -> Right (Path s, a) + e -> Left e + snoc :: Path -> NameSegment -> Path + snoc (Path p) ns = Path (p <> pure ns) + +instance Snoc Path' Path' NameSegment NameSegment where + _Snoc = prism (uncurry snoc') $ \case + Path' (Left (Lens.unsnoc -> Just (s,a))) -> Right (Path' (Left s), a) + Path' (Right (Lens.unsnoc -> Just (s,a))) -> Right (Path' (Right s), a) + e -> Left e + where + snoc' :: Path' -> NameSegment -> Path' + snoc' (Path' e) n = case e of + Left abs -> Path' (Left . Absolute $ Lens.snoc (unabsolute abs) n) + Right rel -> Path' (Right . Relative $ Lens.snoc (unrelative rel) n) + +instance Snoc Split' Split' NameSegment NameSegment where + _Snoc = prism (uncurry snoc') $ \case -- unsnoc + (Lens.unsnoc -> Just (s, a), ns) -> Right ((s, a), ns) + e -> Left e + where + snoc' :: Split' -> NameSegment -> Split' + snoc' (p, a) n = (Lens.snoc p a, n) + +class Resolve l r o where + resolve :: l -> r -> o + +instance Resolve Path Path Path where + resolve (Path l) (Path r) = Path (l <> r) + +instance Resolve Relative Relative Relative where + resolve (Relative (Path l)) (Relative (Path r)) = Relative (Path (l <> r)) + +instance Resolve Absolute Relative Absolute where + resolve (Absolute l) (Relative r) = Absolute (resolve l r) + +instance Resolve Path' Path' Path' where + resolve _ a@(Path' Left{}) = a + resolve (Path' (Left a)) (Path' (Right r)) = Path' (Left (resolve a r)) + resolve (Path' (Right r1)) (Path' (Right r2)) = Path' (Right (resolve r1 r2)) + +instance Resolve Path' Split' Path' where + resolve l r = resolve l (unsplit' r) + +instance Resolve Path' Split' Split' where + resolve l (r, ns) = (resolve l r, ns) + +instance Resolve Absolute HQSplit HQSplitAbsolute where + resolve l (r, hq) = (resolve l (Relative r), hq) + +instance Resolve Absolute Path' Absolute where + resolve _ (Path' (Left a)) = a + resolve a (Path' (Right r)) = resolve a r diff --git a/parser-typechecker/src/Unison/Codebase/Reflog.hs b/parser-typechecker/src/Unison/Codebase/Reflog.hs new file mode 100644 index 0000000000..07df0bd380 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Reflog.hs @@ -0,0 +1,30 @@ +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.Reflog where + +import Data.Text (Text) +import qualified Data.Text as Text +import Unison.Codebase.Branch (Hash) +import qualified Unison.Codebase.Causal as Causal +import qualified Unison.Hash as Hash + +data Entry = + Entry + { from :: Hash + , to :: Hash + , reason :: Text + } + +fromText :: Text -> Maybe Entry +fromText t = + case Text.words t of + (Hash.fromBase32Hex -> Just old) : (Hash.fromBase32Hex -> Just new) : (Text.unwords -> reason) -> + Just $ Entry (Causal.RawHash old) (Causal.RawHash new) reason + _ -> Nothing + + +toText :: Entry -> Text +toText (Entry old new reason) = + Text.unwords [ Hash.base32Hex . Causal.unRawHash $ old + , Hash.base32Hex . Causal.unRawHash $ new + , reason ] diff --git a/parser-typechecker/src/Unison/Codebase/Runtime.hs b/parser-typechecker/src/Unison/Codebase/Runtime.hs new file mode 100644 index 0000000000..08bea36724 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Runtime.hs @@ -0,0 +1,132 @@ +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE PatternSynonyms #-} + +module Unison.Codebase.Runtime where + +import Unison.Prelude + +import qualified Unison.ABT as ABT +import Data.Bifunctor (first) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Unison.Codebase.CodeLookup as CL +import qualified Unison.Codebase as Codebase +import Unison.UnisonFile ( UnisonFile ) +import Unison.Parser ( Ann ) +import qualified Unison.Term as Term +import Unison.Type ( Type ) +import Unison.Var ( Var ) +import qualified Unison.Var as Var +import Unison.Reference ( Reference ) +import qualified Unison.Reference as Reference +import qualified Unison.UnisonFile as UF +import Unison.Builtin.Decls (pattern TupleTerm', tupleTerm) +import qualified Unison.Util.Pretty as P +import qualified Unison.PrettyPrintEnv as PPE + +type Error = P.Pretty P.ColorText +type Term v = Term.Term v () + +data Runtime v = Runtime + { terminate :: IO () + , evaluate + :: CL.CodeLookup v IO () + -> PPE.PrettyPrintEnv + -> Term v + -> IO (Either Error (Term v)) + , mainType :: Type v Ann + } + +type IsCacheHit = Bool + +noCache :: Reference -> IO (Maybe (Term v)) +noCache _ = pure Nothing + +-- Evaluates the watch expressions in the file, returning a `Map` of their +-- results. This has to be a bit fancy to handle that the definitions in the +-- file depend on each other and evaluation must proceed in a way that respects +-- these dependencies. +-- +-- Note: The definitions in the file are hashed and looked up in +-- `evaluationCache`. If that returns a result, evaluation of that definition +-- can be skipped. +evaluateWatches + :: forall v a + . Var v + => CL.CodeLookup v IO a + -> PPE.PrettyPrintEnv + -> (Reference -> IO (Maybe (Term v))) + -> Runtime v + -> UnisonFile v a + -> IO (Either Error + ( [(v, Term v)] + -- Map watchName (loc, hash, expression, value, isHit) + , Map v (a, UF.WatchKind, Reference, Term v, Term v, IsCacheHit) + )) + -- IO (bindings :: [v,Term v], map :: ^^^) +evaluateWatches code ppe evaluationCache rt uf = do + -- 1. compute hashes for everything in the file + let m :: Map v (Reference, Term.Term v a) + m = first Reference.DerivedId <$> + Term.hashComponents (Map.fromList (UF.terms uf <> UF.allWatches uf)) + watches = Set.fromList (fst <$> UF.allWatches uf) + watchKinds :: Map v UF.WatchKind + watchKinds = Map.fromList [ (v, k) | (k, ws) <- Map.toList (UF.watches uf) + , (v,_) <- ws ] + unann = Term.amap (const ()) + -- 2. use the cache to lookup things already computed + m' <- fmap Map.fromList . for (Map.toList m) $ \(v, (r, t)) -> do + o <- evaluationCache r + case o of + Nothing -> pure (v, (r, ABT.annotation t, unann t, False)) + Just t' -> pure (v, (r, ABT.annotation t, t', True)) + -- 3. create a big ol' let rec whose body is a big tuple of all watches + let rv :: Map Reference v + rv = Map.fromList [ (r, v) | (v, (r, _)) <- Map.toList m ] + bindings :: [(v, Term v)] + bindings = [ (v, unref rv b) | (v, (_, _, b, _)) <- Map.toList m' ] + watchVars = [ Term.var () v | v <- toList watches ] + bigOl'LetRec = Term.letRec' True bindings (tupleTerm watchVars) + cl = void $ CL.fromUnisonFile uf <> code + -- 4. evaluate it and get all the results out of the tuple, then + -- create the result Map + out <- evaluate rt cl ppe bigOl'LetRec + case out of + Right out -> do + let + (bindings, results) = case out of + TupleTerm' results -> (mempty, results) + Term.LetRecNamed' bs (TupleTerm' results) -> (bs, results) + _ -> error $ "Evaluation should produce a tuple, but gave: " ++ show out + let go v eval (ref, a, uneval, isHit) = + (a, Map.findWithDefault (die v) v watchKinds, + ref, uneval, Term.etaNormalForm eval, isHit) + watchMap = Map.intersectionWithKey go + (Map.fromList (toList watches `zip` results)) m' + die v = error $ "not sure what kind of watch this is: " <> show v + pure $ Right (bindings, watchMap) + Left e -> pure (Left e) + where + -- unref :: Map Reference v -> Term.Term v a -> Term.Term v a + unref rv t = ABT.visitPure go t + where + go t@(Term.Ref' r@(Reference.DerivedId _)) = case Map.lookup r rv of + Nothing -> Nothing + Just v -> Just (Term.var (ABT.annotation t) v) + go _ = Nothing + +evaluateTerm + :: (Var v, Monoid a) + => CL.CodeLookup v IO a + -> PPE.PrettyPrintEnv + -> Runtime v + -> Term.Term v a + -> IO (Either Error (Term v)) +evaluateTerm codeLookup ppe rt tm = do + let uf = UF.UnisonFileId mempty mempty mempty + (Map.singleton UF.RegularWatch [(Var.nameds "result", tm)]) + selfContained <- Codebase.makeSelfContained' codeLookup uf + r <- evaluateWatches codeLookup ppe noCache rt selfContained + pure $ r <&> \(_,map) -> + let [(_loc, _kind, _hash, _src, value, _isHit)] = Map.elems map + in value diff --git a/parser-typechecker/src/Unison/Codebase/SearchResult.hs b/parser-typechecker/src/Unison/Codebase/SearchResult.hs new file mode 100644 index 0000000000..1c3272f109 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/SearchResult.hs @@ -0,0 +1,83 @@ +{-# LANGUAGE PatternSynonyms #-} + +module Unison.Codebase.SearchResult where + +import Unison.Prelude + +import qualified Data.Set as Set +import Unison.HashQualified' (HashQualified) +import qualified Unison.HashQualified' as HQ +import Unison.Name (Name) +import Unison.Names2 (Names'(Names), Names0) +import qualified Unison.Names2 as Names +import Unison.Reference (Reference) +import Unison.Referent (Referent) +import qualified Unison.Referent as Referent +import qualified Unison.Util.Relation as R + +-- this Ord instance causes types < terms +data SearchResult = Tp TypeResult | Tm TermResult deriving (Eq, Ord, Show) + +data TermResult = TermResult + { termName :: HashQualified + , referent :: Referent + , termAliases :: Set HashQualified + } deriving (Eq, Ord, Show) + +data TypeResult = TypeResult + { typeName :: HashQualified + , reference :: Reference + , typeAliases :: Set HashQualified + } deriving (Eq, Ord, Show) + +pattern Tm' hq r as = Tm (TermResult hq r as) +pattern Tp' hq r as = Tp (TypeResult hq r as) + +termResult :: HashQualified -> Referent -> Set HashQualified -> SearchResult +termResult hq r as = Tm (TermResult hq r as) + +termSearchResult :: Names0 -> Name -> Referent -> SearchResult +termSearchResult b n r = + termResult (Names._hqTermName b n r) r (Names._hqTermAliases b n r) + +typeResult :: HashQualified -> Reference -> Set HashQualified -> SearchResult +typeResult hq r as = Tp (TypeResult hq r as) + +typeSearchResult :: Names0 -> Name -> Reference -> SearchResult +typeSearchResult b n r = + typeResult (Names._hqTypeName b n r) r (Names._hqTypeAliases b n r) + +name :: SearchResult -> HashQualified +name = \case + Tm t -> termName t + Tp t -> typeName t + +aliases :: SearchResult -> Set HashQualified +aliases = \case + Tm t -> termAliases t + Tp t -> typeAliases t + +-- | TypeResults yield a `Referent.Ref` +toReferent :: SearchResult -> Referent +toReferent (Tm (TermResult _ r _)) = r +toReferent (Tp (TypeResult _ r _)) = Referent.Ref r + +truncateAliases :: Int -> SearchResult -> SearchResult +truncateAliases n = \case + Tm (TermResult hq r as) -> termResult hq r (Set.map (HQ.take n) as) + Tp (TypeResult hq r as) -> typeResult hq r (Set.map (HQ.take n) as) + +-- | You may want to sort this list differently afterward. +fromNames :: Names0 -> [SearchResult] +fromNames b = + map (uncurry (typeSearchResult b)) (R.toList . Names.types $ b) <> + map (uncurry (termSearchResult b)) (R.toList . Names.terms $ b) + +_fromNames :: Names0 -> [SearchResult] +_fromNames n0@(Names terms types) = typeResults <> termResults where + typeResults = + [ typeResult (Names._hqTypeName n0 name r) r (Names._hqTypeAliases n0 name r) + | (name, r) <- R.toList types ] + termResults = + [ termResult (Names._hqTermName n0 name r) r (Names._hqTermAliases n0 name r) + | (name, r) <- R.toList terms] diff --git a/parser-typechecker/src/Unison/Codebase/Serialization.hs b/parser-typechecker/src/Unison/Codebase/Serialization.hs new file mode 100644 index 0000000000..edade8cc1c --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Serialization.hs @@ -0,0 +1,43 @@ +{-# LANGUAGE RankNTypes #-} + +module Unison.Codebase.Serialization where + +import Data.Bytes.Get (MonadGet, runGetS) +import Data.Bytes.Put (MonadPut, runPutS) +import Data.ByteString (ByteString, readFile, writeFile) +import UnliftIO.Directory (doesFileExist, createDirectoryIfMissing) +import System.FilePath (takeDirectory) +import Prelude hiding (readFile, writeFile) +import UnliftIO (MonadIO, liftIO) + +type Get a = forall m . MonadGet m => m a +type Put a = forall m . MonadPut m => a -> m () + +-- todo: do we use this? +data Format a = Format { + get :: Get a, + put :: Put a +} + +getFromBytes :: Get a -> ByteString -> Maybe a +getFromBytes getA bytes = + case runGetS getA bytes of Left _ -> Nothing; Right a -> Just a + +getFromFile :: MonadIO m => Get a -> FilePath -> m (Maybe a) +getFromFile getA file = do + b <- doesFileExist file + if b then getFromBytes getA <$> liftIO (readFile file) else pure Nothing + +getFromFile' :: MonadIO m => Get a -> FilePath -> m (Either String a) +getFromFile' getA file = do + b <- doesFileExist file + if b then runGetS getA <$> liftIO (readFile file) + else pure . Left $ "No such file: " ++ file + +putBytes :: Put a -> a -> ByteString +putBytes put a = runPutS (put a) + +putWithParentDirs :: MonadIO m => Put a -> FilePath -> a -> m () +putWithParentDirs putA file a = do + createDirectoryIfMissing True (takeDirectory file) + liftIO . writeFile file $ putBytes putA a diff --git a/parser-typechecker/src/Unison/Codebase/Serialization/PutT.hs b/parser-typechecker/src/Unison/Codebase/Serialization/PutT.hs new file mode 100644 index 0000000000..57d2f645c0 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Serialization/PutT.hs @@ -0,0 +1,57 @@ +module Unison.Codebase.Serialization.PutT where + +import Data.Bytes.Put +import qualified Data.Serialize.Put as Ser +import Data.Serialize.Put ( PutM + , runPutM + ) + +newtype PutT m a = PutT { unPutT :: m (PutM a) } + +instance Monad m => MonadPut (PutT m) where + putWord8 = PutT . pure . putWord8 + {-# INLINE putWord8 #-} + putByteString = PutT . pure . putByteString + {-# INLINE putByteString #-} + putLazyByteString = PutT . pure . putLazyByteString + {-# INLINE putLazyByteString #-} + flush = PutT $ pure flush + {-# INLINE flush #-} + putWord16le = PutT . pure . putWord16le + {-# INLINE putWord16le #-} + putWord16be = PutT . pure . putWord16be + {-# INLINE putWord16be #-} + putWord16host = PutT . pure . putWord16host + {-# INLINE putWord16host #-} + putWord32le = PutT . pure . putWord32le + {-# INLINE putWord32le #-} + putWord32be = PutT . pure . putWord32be + {-# INLINE putWord32be #-} + putWord32host = PutT . pure . putWord32host + {-# INLINE putWord32host #-} + putWord64le = PutT . pure . putWord64le + {-# INLINE putWord64le #-} + putWord64be = PutT . pure . putWord64be + {-# INLINE putWord64be #-} + putWord64host = PutT . pure . putWord64host + {-# INLINE putWord64host #-} + putWordhost = PutT . pure . putWordhost + {-# INLINE putWordhost #-} + +instance Functor m => Functor (PutT m) where + fmap f (PutT m) = PutT $ fmap (fmap f) m + +instance Applicative m => Applicative (PutT m) where + pure = PutT . pure . pure + (PutT f) <*> (PutT a) = PutT $ (<*>) <$> f <*> a + +instance Monad m => Monad (PutT m) where + (PutT m) >>= f = PutT $ do + putm <- m + let (a, bs) = runPutM putm + putm' <- unPutT $ f a + let (b, bs') = runPutM putm' + pure $ do + Ser.putByteString bs + Ser.putByteString bs' + pure b diff --git a/parser-typechecker/src/Unison/Codebase/Serialization/V1.hs b/parser-typechecker/src/Unison/Codebase/Serialization/V1.hs new file mode 100644 index 0000000000..e22e760983 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Serialization/V1.hs @@ -0,0 +1,811 @@ +{-# LANGUAGE Strict #-} +{-# LANGUAGE RankNTypes #-} + +module Unison.Codebase.Serialization.V1 where + +import Unison.Prelude + +import Prelude hiding (getChar, putChar) + +-- import qualified Data.Text as Text +import qualified Unison.Pattern as Pattern +import Unison.Pattern ( Pattern + , SeqOp + ) +import Data.Bits ( Bits ) +import Data.Bytes.Get +import Data.Bytes.Put +import Data.Bytes.Serial ( serialize + , deserialize + , serializeBE + , deserializeBE + ) +import Data.Bytes.Signed ( Unsigned ) +import Data.Bytes.VarInt ( VarInt(..) ) +import qualified Data.Map as Map +import Data.List ( elemIndex + ) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Branch.Dependencies as BD +import Unison.Codebase.Causal ( Raw(..) + , RawHash(..) + , unRawHash + ) +import qualified Unison.Codebase.Causal as Causal +import qualified Unison.Codebase.Metadata as Metadata +import Unison.NameSegment as NameSegment +import Unison.Codebase.Patch ( Patch(..) ) +import qualified Unison.Codebase.Patch as Patch +import Unison.Codebase.TermEdit ( TermEdit ) +import Unison.Codebase.TypeEdit ( TypeEdit ) +import Unison.Hash ( Hash ) +import Unison.Kind ( Kind ) +import Unison.Reference ( Reference ) +import Unison.Symbol ( Symbol(..) ) +import Unison.Term ( Term ) +import qualified Data.ByteString as B +import qualified Data.Sequence as Sequence +import qualified Data.Set as Set +import qualified Unison.ABT as ABT +import qualified Unison.Codebase.TermEdit as TermEdit +import qualified Unison.Codebase.TypeEdit as TypeEdit +import qualified Unison.Codebase.Serialization as S +import qualified Unison.Hash as Hash +import qualified Unison.Kind as Kind +import qualified Unison.Reference as Reference +import Unison.Referent (Referent) +import qualified Unison.Referent as Referent +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import Unison.Util.Star3 ( Star3 ) +import qualified Unison.Util.Star3 as Star3 +import Unison.Util.Relation ( Relation ) +import qualified Unison.Util.Relation as Relation +import qualified Unison.DataDeclaration as DataDeclaration +import Unison.DataDeclaration ( DataDeclaration + , EffectDeclaration + ) +import qualified Unison.Var as Var +import qualified Unison.ConstructorType as CT +import Unison.Type (Type) + +-- ABOUT THIS FORMAT: +-- +-- A serialization format for uncompiled Unison syntax trees. +-- +-- Finalized: No +-- +-- If Finalized: Yes, don't modify this file in a way that affects serialized form. +-- Instead, create a new file, V(n + 1). +-- This ensures that we have a well-defined serialized form and can read +-- and write old versions. + +unknownTag :: (MonadGet m, Show a) => String -> a -> m x +unknownTag msg tag = + fail $ "unknown tag " ++ show tag ++ + " while deserializing: " ++ msg + +putRawCausal :: MonadPut m => (a -> m ()) -> Causal.Raw h a -> m () +putRawCausal putA = \case + RawOne a -> putWord8 0 >> putA a + RawCons a t -> putWord8 1 >> (putHash . unRawHash) t >> putA a + RawMerge a ts -> + putWord8 2 >> putFoldable (putHash . unRawHash) ts >> putA a + +getCausal0 :: MonadGet m => m a -> m (Causal.Raw h a) +getCausal0 getA = getWord8 >>= \case + 0 -> RawOne <$> getA + 1 -> flip RawCons <$> (RawHash <$> getHash) <*> getA + 2 -> flip RawMerge . Set.fromList <$> getList (RawHash <$> getHash) <*> getA + x -> unknownTag "Causal0" x + +-- Like getCausal, but doesn't bother to read the actual value in the causal, +-- it just reads the hashes. Useful for more efficient implementation of +-- `Causal.before`. +-- getCausal00 :: MonadGet m => m Causal00 +-- getCausal00 = getWord8 >>= \case +-- 0 -> pure One00 +-- 1 -> Cons00 <$> getHash +-- 2 -> Merge00 . Set.fromList <$> getList getHash + +-- 1. Can no longer read a causal using just MonadGet; +-- need a way to construct the loader that forms its tail. +-- Same problem with loading Branch0 with monadic tails. +-- 2. Without the monadic tail, need external info to know how to +-- load the tail. When modifying a nested structure, we +-- need a way to save the intermediate nodes. (e.g. zipper?) +-- 3. We ran into trouble trying to intermingle the marshalling monad +-- (put/get) with the loading/saving monad (io). +-- 4. PutT was weird because we don't think we want the Codebase monad to +-- randomly be able to accumulate bytestrings (put) that don't even reset. +-- 5. We could specialize `Causal m e` to a particular monad that tries to do +-- the right things wrt caching? +-- putCausal0 :: MonadPut m => Causal a -> (a -> m ()) -> m () +-- putCausal0 = undefined + +-- This loads the tail in order to write it? +-- May be crucial to do so, if "loading" tail from `pure`, but +-- otherwise weird. We'd like to skip writing the tail if it already +-- exists, but how can we tell? +-- Also, we're not even supposed to be writing the tail into the same buffer +-- as head. We should be writing the hash of the tail though, so we can +-- know which file we need to load it from; loading another file is also +-- something we can't do in this model. +---- +-- putCausal :: (MonadPut m, Monad n) => Causal n a -> (a -> m ()) -> n (m ()) +-- putCausal (Causal.One hash a) putA = +-- pure $ putWord8 1 *> putHash hash *> putA a +-- putCausal (Causal.ConsN m) putA = do +-- (conss, tail) <- m +-- pure (putWord8 2 *> putFoldable conss (putPair' putHash putA)) +-- *> putCausal tail putA +-- putCausal (Causal.Merge hash a tails) putA = do +-- pure (putWord8 3 *> putHash hash *> putA a) +-- putFoldableN (Map.toList tails) $ putPair'' putHash (>>= (`putCausal` putA)) +-- putCausal (Causal.Cons _ _ _) _ = +-- error "deserializing 'Causal': the ConsN pattern should have matched here!" + + +-- getCausal :: MonadGet m => m a -> m (Causal a) +-- getCausal getA = getWord8 >>= \case +-- 1 -> Causal.One <$> getHash <*> getA +-- 2 -> Causal.consN <$> getList (getPair getHash getA) <*> getCausal getA +-- 3 -> Causal.Merge <$> getHash <*> getA <*> +-- (Map.fromList <$> getList (getPair getHash $ getCausal getA)) +-- x -> unknownTag "causal" x + +-- getCausal :: + +putLength :: + (MonadPut m, Integral n, Integral (Unsigned n), + Bits n, Bits (Unsigned n)) + => n -> m () +putLength = serialize . VarInt + +getLength :: + (MonadGet m, Integral n, Integral (Unsigned n), + Bits n, Bits (Unsigned n)) + => m n +getLength = unVarInt <$> deserialize + +putText :: MonadPut m => Text -> m () +putText text = do + let bs = encodeUtf8 text + putLength $ B.length bs + putByteString bs + +getText :: MonadGet m => m Text +getText = do + len <- getLength + bs <- B.copy <$> getBytes len + pure $ decodeUtf8 bs + +skipText :: MonadGet m => m () +skipText = do + len <- getLength + void $ getBytes len + +putFloat :: MonadPut m => Double -> m () +putFloat = serializeBE + +getFloat :: MonadGet m => m Double +getFloat = deserializeBE + +putNat :: MonadPut m => Word64 -> m () +putNat = putWord64be + +getNat :: MonadGet m => m Word64 +getNat = getWord64be + +putInt :: MonadPut m => Int64 -> m () +putInt = serializeBE + +getInt :: MonadGet m => m Int64 +getInt = deserializeBE + +putBoolean :: MonadPut m => Bool -> m () +putBoolean False = putWord8 0 +putBoolean True = putWord8 1 + +getBoolean :: MonadGet m => m Bool +getBoolean = go =<< getWord8 where + go 0 = pure False + go 1 = pure True + go t = unknownTag "Boolean" t + +putHash :: MonadPut m => Hash -> m () +putHash h = do + let bs = Hash.toBytes h + putLength (B.length bs) + putByteString bs + +getHash :: MonadGet m => m Hash +getHash = do + len <- getLength + bs <- B.copy <$> getBytes len + pure $ Hash.fromBytes bs + +putReference :: MonadPut m => Reference -> m () +putReference r = case r of + Reference.Builtin name -> do + putWord8 0 + putText name + Reference.Derived hash i n -> do + putWord8 1 + putHash hash + putLength i + putLength n + _ -> error "unpossible" + +getReference :: MonadGet m => m Reference +getReference = do + tag <- getWord8 + case tag of + 0 -> Reference.Builtin <$> getText + 1 -> Reference.DerivedId <$> (Reference.Id <$> getHash <*> getLength <*> getLength) + _ -> unknownTag "Reference" tag + +putReferent :: MonadPut m => Referent -> m () +putReferent = \case + Referent.Ref r -> do + putWord8 0 + putReference r + Referent.Con r i ct -> do + putWord8 1 + putReference r + putLength i + putConstructorType ct + +putConstructorType :: MonadPut m => CT.ConstructorType -> m () +putConstructorType = \case + CT.Data -> putWord8 0 + CT.Effect -> putWord8 1 + +getReferent :: MonadGet m => m Referent +getReferent = do + tag <- getWord8 + case tag of + 0 -> Referent.Ref <$> getReference + 1 -> Referent.Con <$> getReference <*> getLength <*> getConstructorType + _ -> unknownTag "getReferent" tag + +getConstructorType :: MonadGet m => m CT.ConstructorType +getConstructorType = getWord8 >>= \case + 0 -> pure CT.Data + 1 -> pure CT.Effect + t -> unknownTag "getConstructorType" t + +putMaybe :: MonadPut m => Maybe a -> (a -> m ()) -> m () +putMaybe Nothing _ = putWord8 0 +putMaybe (Just a) putA = putWord8 1 *> putA a + +getMaybe :: MonadGet m => m a -> m (Maybe a) +getMaybe getA = getWord8 >>= \tag -> case tag of + 0 -> pure Nothing + 1 -> Just <$> getA + _ -> unknownTag "Maybe" tag + +putFoldable + :: (Foldable f, MonadPut m) => (a -> m ()) -> f a -> m () +putFoldable putA as = do + putLength (length as) + traverse_ putA as + + +-- putFoldableN +-- :: forall f m n a +-- . (Traversable f, MonadPut m, Applicative n) +-- => f a +-- -> (a -> n (m ())) +-- -> n (m ()) +-- putFoldableN as putAn = +-- pure (putLength @m (length as)) *> (fmap sequence_ $ traverse putAn as) + +getFolded :: MonadGet m => (b -> a -> b) -> b -> m a -> m b +getFolded f z a = + foldl' f z <$> getList a + +getList :: MonadGet m => m a -> m [a] +getList a = getLength >>= (`replicateM` a) + +putABT + :: (MonadPut m, Foldable f, Functor f, Ord v) + => (v -> m ()) + -> (a -> m ()) + -> (forall x . (x -> m ()) -> f x -> m ()) + -> ABT.Term f v a + -> m () +putABT putVar putA putF abt = + putFoldable putVar fvs *> go (ABT.annotateBound'' abt) + where + fvs = Set.toList $ ABT.freeVars abt + go (ABT.Term _ (a, env) abt) = putA a *> case abt of + ABT.Var v -> putWord8 0 *> putVarRef env v + ABT.Tm f -> putWord8 1 *> putF go f + ABT.Abs v body -> putWord8 2 *> putVar v *> go body + ABT.Cycle body -> putWord8 3 *> go body + + putVarRef env v = case v `elemIndex` env of + Just i -> putWord8 0 *> putLength i + Nothing -> case v `elemIndex` fvs of + Just i -> putWord8 1 *> putLength i + Nothing -> error "impossible: var not free or bound" + +getABT + :: (MonadGet m, Foldable f, Functor f, Ord v) + => m v + -> m a + -> (forall x . m x -> m (f x)) + -> m (ABT.Term f v a) +getABT getVar getA getF = getList getVar >>= go [] where + go env fvs = do + a <- getA + tag <- getWord8 + case tag of + 0 -> do + tag <- getWord8 + case tag of + 0 -> ABT.annotatedVar a . (env !!) <$> getLength + 1 -> ABT.annotatedVar a . (fvs !!) <$> getLength + _ -> unknownTag "getABT.Var" tag + 1 -> ABT.tm' a <$> getF (go env fvs) + 2 -> do + v <- getVar + body <- go (v:env) fvs + pure $ ABT.abs' a v body + 3 -> ABT.cycle' a <$> go env fvs + _ -> unknownTag "getABT" tag + +putKind :: MonadPut m => Kind -> m () +putKind k = case k of + Kind.Star -> putWord8 0 + Kind.Arrow i o -> putWord8 1 *> putKind i *> putKind o + +getKind :: MonadGet m => m Kind +getKind = getWord8 >>= \tag -> case tag of + 0 -> pure Kind.Star + 1 -> Kind.Arrow <$> getKind <*> getKind + _ -> unknownTag "getKind" tag + +putType :: (MonadPut m, Ord v) + => (v -> m ()) -> (a -> m ()) + -> Type v a + -> m () +putType putVar putA = putABT putVar putA go where + go putChild t = case t of + Type.Ref r -> putWord8 0 *> putReference r + Type.Arrow i o -> putWord8 1 *> putChild i *> putChild o + Type.Ann t k -> putWord8 2 *> putChild t *> putKind k + Type.App f x -> putWord8 3 *> putChild f *> putChild x + Type.Effect e t -> putWord8 4 *> putChild e *> putChild t + Type.Effects es -> putWord8 5 *> putFoldable putChild es + Type.Forall body -> putWord8 6 *> putChild body + Type.IntroOuter body -> putWord8 7 *> putChild body + +getType :: (MonadGet m, Ord v) + => m v -> m a -> m (Type v a) +getType getVar getA = getABT getVar getA go where + go getChild = getWord8 >>= \tag -> case tag of + 0 -> Type.Ref <$> getReference + 1 -> Type.Arrow <$> getChild <*> getChild + 2 -> Type.Ann <$> getChild <*> getKind + 3 -> Type.App <$> getChild <*> getChild + 4 -> Type.Effect <$> getChild <*> getChild + 5 -> Type.Effects <$> getList getChild + 6 -> Type.Forall <$> getChild + 7 -> Type.IntroOuter <$> getChild + _ -> unknownTag "getType" tag + +putSymbol :: MonadPut m => Symbol -> m () +putSymbol (Symbol id typ) = putLength id *> putText (Var.rawName typ) + +getSymbol :: MonadGet m => m Symbol +getSymbol = Symbol <$> getLength <*> (Var.User <$> getText) + +putPattern :: MonadPut m => (a -> m ()) -> Pattern a -> m () +putPattern putA p = case p of + Pattern.Unbound a -> putWord8 0 *> putA a + Pattern.Var a -> putWord8 1 *> putA a + Pattern.Boolean a b -> putWord8 2 *> putA a *> putBoolean b + Pattern.Int a n -> putWord8 3 *> putA a *> putInt n + Pattern.Nat a n -> putWord8 4 *> putA a *> putNat n + Pattern.Float a n -> putWord8 5 *> putA a *> putFloat n + Pattern.Constructor a r cid ps -> + putWord8 6 + *> putA a + *> putReference r + *> putLength cid + *> putFoldable (putPattern putA) ps + Pattern.As a p -> putWord8 7 *> putA a *> putPattern putA p + Pattern.EffectPure a p -> putWord8 8 *> putA a *> putPattern putA p + Pattern.EffectBind a r cid args k -> + putWord8 9 + *> putA a + *> putReference r + *> putLength cid + *> putFoldable (putPattern putA) args + *> putPattern putA k + Pattern.SequenceLiteral a ps -> + putWord8 10 *> putA a *> putFoldable (putPattern putA) ps + Pattern.SequenceOp a l op r -> + putWord8 11 + *> putA a + *> putPattern putA l + *> putSeqOp op + *> putPattern putA r + Pattern.Text a t -> putWord8 12 *> putA a *> putText t + Pattern.Char a c -> putWord8 13 *> putA a *> putChar c + +putSeqOp :: MonadPut m => SeqOp -> m () +putSeqOp Pattern.Cons = putWord8 0 +putSeqOp Pattern.Snoc = putWord8 1 +putSeqOp Pattern.Concat = putWord8 2 + +getSeqOp :: MonadGet m => m SeqOp +getSeqOp = getWord8 >>= \case + 0 -> pure Pattern.Cons + 1 -> pure Pattern.Snoc + 2 -> pure Pattern.Concat + tag -> unknownTag "SeqOp" tag + +getPattern :: MonadGet m => m a -> m (Pattern a) +getPattern getA = getWord8 >>= \tag -> case tag of + 0 -> Pattern.Unbound <$> getA + 1 -> Pattern.Var <$> getA + 2 -> Pattern.Boolean <$> getA <*> getBoolean + 3 -> Pattern.Int <$> getA <*> getInt + 4 -> Pattern.Nat <$> getA <*> getNat + 5 -> Pattern.Float <$> getA <*> getFloat + 6 -> Pattern.Constructor <$> getA <*> getReference <*> getLength <*> getList + (getPattern getA) + 7 -> Pattern.As <$> getA <*> getPattern getA + 8 -> Pattern.EffectPure <$> getA <*> getPattern getA + 9 -> + Pattern.EffectBind + <$> getA + <*> getReference + <*> getLength + <*> getList (getPattern getA) + <*> getPattern getA + 10 -> Pattern.SequenceLiteral <$> getA <*> getList (getPattern getA) + 11 -> + Pattern.SequenceOp + <$> getA + <*> getPattern getA + <*> getSeqOp + <*> getPattern getA + 12 -> Pattern.Text <$> getA <*> getText + 13 -> Pattern.Char <$> getA <*> getChar + _ -> unknownTag "Pattern" tag + +putTerm :: (MonadPut m, Ord v) + => (v -> m ()) -> (a -> m ()) + -> Term v a + -> m () +putTerm putVar putA = putABT putVar putA go where + go putChild t = case t of + Term.Int n + -> putWord8 0 *> putInt n + Term.Nat n + -> putWord8 1 *> putNat n + Term.Float n + -> putWord8 2 *> putFloat n + Term.Boolean b + -> putWord8 3 *> putBoolean b + Term.Text t + -> putWord8 4 *> putText t + Term.Blank _ + -> error "can't serialize term with blanks" + Term.Ref r + -> putWord8 5 *> putReference r + Term.Constructor r cid + -> putWord8 6 *> putReference r *> putLength cid + Term.Request r cid + -> putWord8 7 *> putReference r *> putLength cid + Term.Handle h a + -> putWord8 8 *> putChild h *> putChild a + Term.App f arg + -> putWord8 9 *> putChild f *> putChild arg + Term.Ann e t + -> putWord8 10 *> putChild e *> putType putVar putA t + Term.Sequence vs + -> putWord8 11 *> putFoldable putChild vs + Term.If cond t f + -> putWord8 12 *> putChild cond *> putChild t *> putChild f + Term.And x y + -> putWord8 13 *> putChild x *> putChild y + Term.Or x y + -> putWord8 14 *> putChild x *> putChild y + Term.Lam body + -> putWord8 15 *> putChild body + Term.LetRec _ bs body + -> putWord8 16 *> putFoldable putChild bs *> putChild body + Term.Let _ b body + -> putWord8 17 *> putChild b *> putChild body + Term.Match s cases + -> putWord8 18 *> putChild s *> putFoldable (putMatchCase putA putChild) cases + Term.Char c + -> putWord8 19 *> putChar c + Term.TermLink r + -> putWord8 20 *> putReferent r + Term.TypeLink r + -> putWord8 21 *> putReference r + + putMatchCase :: MonadPut m => (a -> m ()) -> (x -> m ()) -> Term.MatchCase a x -> m () + putMatchCase putA putChild (Term.MatchCase pat guard body) = + putPattern putA pat *> putMaybe guard putChild *> putChild body + +getTerm :: (MonadGet m, Ord v) + => m v -> m a -> m (Term v a) +getTerm getVar getA = getABT getVar getA go where + go getChild = getWord8 >>= \tag -> case tag of + 0 -> Term.Int <$> getInt + 1 -> Term.Nat <$> getNat + 2 -> Term.Float <$> getFloat + 3 -> Term.Boolean <$> getBoolean + 4 -> Term.Text <$> getText + 5 -> Term.Ref <$> getReference + 6 -> Term.Constructor <$> getReference <*> getLength + 7 -> Term.Request <$> getReference <*> getLength + 8 -> Term.Handle <$> getChild <*> getChild + 9 -> Term.App <$> getChild <*> getChild + 10 -> Term.Ann <$> getChild <*> getType getVar getA + 11 -> Term.Sequence . Sequence.fromList <$> getList getChild + 12 -> Term.If <$> getChild <*> getChild <*> getChild + 13 -> Term.And <$> getChild <*> getChild + 14 -> Term.Or <$> getChild <*> getChild + 15 -> Term.Lam <$> getChild + 16 -> Term.LetRec False <$> getList getChild <*> getChild + 17 -> Term.Let False <$> getChild <*> getChild + 18 -> Term.Match <$> getChild + <*> getList (Term.MatchCase <$> getPattern getA <*> getMaybe getChild <*> getChild) + 19 -> Term.Char <$> getChar + 20 -> Term.TermLink <$> getReferent + 21 -> Term.TypeLink <$> getReference + _ -> unknownTag "getTerm" tag + +putPair :: MonadPut m => (a -> m ()) -> (b -> m ()) -> (a,b) -> m () +putPair putA putB (a,b) = putA a *> putB b + +putPair'' + :: (MonadPut m, Monad n) + => (a -> m ()) + -> (b -> n (m ())) + -> (a, b) + -> n (m ()) +putPair'' putA putBn (a, b) = pure (putA a) *> putBn b + +getPair :: MonadGet m => m a -> m b -> m (a,b) +getPair = liftA2 (,) + +putTuple3' + :: MonadPut m + => (a -> m ()) + -> (b -> m ()) + -> (c -> m ()) + -> (a, b, c) + -> m () +putTuple3' putA putB putC (a, b, c) = putA a *> putB b *> putC c + +getTuple3 :: MonadGet m => m a -> m b -> m c -> m (a,b,c) +getTuple3 = liftA3 (,,) + +putRelation :: MonadPut m => (a -> m ()) -> (b -> m ()) -> Relation a b -> m () +putRelation putA putB r = putFoldable (putPair putA putB) (Relation.toList r) + +getRelation :: (MonadGet m, Ord a, Ord b) => m a -> m b -> m (Relation a b) +getRelation getA getB = Relation.fromList <$> getList (getPair getA getB) + +putMap :: MonadPut m => (a -> m ()) -> (b -> m ()) -> Map a b -> m () +putMap putA putB m = putFoldable (putPair putA putB) (Map.toList m) + +getMap :: (MonadGet m, Ord a) => m a -> m b -> m (Map a b) +getMap getA getB = Map.fromList <$> getList (getPair getA getB) + +putTermEdit :: MonadPut m => TermEdit -> m () +putTermEdit (TermEdit.Replace r typing) = + putWord8 1 *> putReference r *> case typing of + TermEdit.Same -> putWord8 1 + TermEdit.Subtype -> putWord8 2 + TermEdit.Different -> putWord8 3 +putTermEdit TermEdit.Deprecate = putWord8 2 + +getTermEdit :: MonadGet m => m TermEdit +getTermEdit = getWord8 >>= \case + 1 -> TermEdit.Replace <$> getReference <*> (getWord8 >>= \case + 1 -> pure TermEdit.Same + 2 -> pure TermEdit.Subtype + 3 -> pure TermEdit.Different + t -> unknownTag "TermEdit.Replace" t + ) + 2 -> pure TermEdit.Deprecate + t -> unknownTag "TermEdit" t + +putTypeEdit :: MonadPut m => TypeEdit -> m () +putTypeEdit (TypeEdit.Replace r) = putWord8 1 *> putReference r +putTypeEdit TypeEdit.Deprecate = putWord8 2 + +getTypeEdit :: MonadGet m => m TypeEdit +getTypeEdit = getWord8 >>= \case + 1 -> TypeEdit.Replace <$> getReference + 2 -> pure TypeEdit.Deprecate + t -> unknownTag "TypeEdit" t + +putStar3 + :: MonadPut m + => (f -> m ()) + -> (d1 -> m ()) + -> (d2 -> m ()) + -> (d3 -> m ()) + -> Star3 f d1 d2 d3 + -> m () +putStar3 putF putD1 putD2 putD3 s = do + putFoldable putF (Star3.fact s) + putRelation putF putD1 (Star3.d1 s) + putRelation putF putD2 (Star3.d2 s) + putRelation putF putD3 (Star3.d3 s) + +getStar3 + :: (MonadGet m, Ord fact, Ord d1, Ord d2, Ord d3) + => m fact + -> m d1 + -> m d2 + -> m d3 + -> m (Star3 fact d1 d2 d3) +getStar3 getF getD1 getD2 getD3 = + Star3.Star3 + <$> (Set.fromList <$> getList getF) + <*> getRelation getF getD1 + <*> getRelation getF getD2 + <*> getRelation getF getD3 + +putBranchStar :: MonadPut m => (a -> m ()) -> (n -> m ()) -> Branch.Star a n -> m () +putBranchStar putA putN = + putStar3 putA putN putMetadataType (putPair putMetadataType putMetadataValue) + +getBranchStar :: (Ord a, Ord n, MonadGet m) => m a -> m n -> m (Branch.Star a n) +getBranchStar getA getN = getStar3 getA getN getMetadataType (getPair getMetadataType getMetadataValue) + +putLink :: MonadPut m => (Hash, mb) -> m () +putLink (h, _) = do + -- 0 means local; later we may have remote links with other ids + putWord8 0 + putHash h + +putChar :: MonadPut m => Char -> m () +putChar = serialize . VarInt . fromEnum + +getChar :: MonadGet m => m Char +getChar = toEnum . unVarInt <$> deserialize + +putNameSegment :: MonadPut m => NameSegment -> m () +putNameSegment = putText . NameSegment.toText + +getNameSegment :: MonadGet m => m NameSegment +getNameSegment = NameSegment <$> getText + +putRawBranch :: MonadPut m => Branch.Raw -> m () +putRawBranch (Branch.Raw terms types children edits) = do + putBranchStar putReferent putNameSegment terms + putBranchStar putReference putNameSegment types + putMap putNameSegment (putHash . unRawHash) children + putMap putNameSegment putHash edits + +getMetadataType :: MonadGet m => m Metadata.Type +getMetadataType = getReference + +putMetadataType :: MonadPut m => Metadata.Type -> m () +putMetadataType = putReference + +getMetadataValue :: MonadGet m => m Metadata.Value +getMetadataValue = getReference + +putMetadataValue :: MonadPut m => Metadata.Value -> m () +putMetadataValue = putReference + +getRawBranch :: MonadGet m => m Branch.Raw +getRawBranch = + Branch.Raw + <$> getBranchStar getReferent getNameSegment + <*> getBranchStar getReference getNameSegment + <*> getMap getNameSegment (RawHash <$> getHash) + <*> getMap getNameSegment getHash + +-- `getBranchDependencies` consumes the same data as `getRawBranch` +getBranchDependencies :: MonadGet m => m (BD.Branches n, BD.Dependencies) +getBranchDependencies = do + (terms1, types1) <- getTermStarDependencies + (terms2, types2) <- getTypeStarDependencies + childHashes <- fmap (RawHash . snd) <$> getList (getPair skipText getHash) + editHashes <- Set.fromList . fmap snd <$> getList (getPair skipText getHash) + pure ( childHashes `zip` repeat Nothing + , BD.Dependencies editHashes (terms1 <> terms2) (types1 <> types2) ) + where + -- returns things, metadata types, metadata values + getStarReferences :: + (MonadGet m, Ord r) => m r -> m ([r], [Metadata.Value]) + getStarReferences getR = do + void $ getList getR -- throw away the `facts` + -- d1: references and namesegments + rs :: [r] <- fmap fst <$> getList (getPair getR skipText) + -- d2: metadata type index + void $ getList (getPair getR getMetadataType) + -- d3: metadata (type, value) index + (_metadataTypes, metadataValues) <- unzip . fmap snd <$> + getList (getPair getR (getPair getMetadataType getMetadataValue)) + pure (rs, metadataValues) + + getTermStarDependencies :: MonadGet m => m (Set Reference.Id, Set Reference.Id) + getTermStarDependencies = do + (referents, mdValues) <- getStarReferences getReferent + let termIds = Set.fromList $ + [ i | Referent.Ref (Reference.DerivedId i) <- referents ] ++ + [ i | Reference.DerivedId i <- mdValues ] + declIds = Set.fromList $ + [ i | Referent.Con (Reference.DerivedId i) _cid _ct <- referents ] + pure (termIds, declIds) + + getTypeStarDependencies :: MonadGet m => m (Set Reference.Id, Set Reference.Id) + getTypeStarDependencies = do + (references, mdValues) <- getStarReferences getReference + let termIds = Set.fromList $ [ i | Reference.DerivedId i <- mdValues ] + declIds = Set.fromList $ [ i | Reference.DerivedId i <- references ] + pure (termIds, declIds) + +putDataDeclaration :: (MonadPut m, Ord v) + => (v -> m ()) -> (a -> m ()) + -> DataDeclaration v a + -> m () +putDataDeclaration putV putA decl = do + putModifier $ DataDeclaration.modifier decl + putA $ DataDeclaration.annotation decl + putFoldable putV (DataDeclaration.bound decl) + putFoldable (putTuple3' putA putV (putType putV putA)) (DataDeclaration.constructors' decl) + +getDataDeclaration :: (MonadGet m, Ord v) => m v -> m a -> m (DataDeclaration v a) +getDataDeclaration getV getA = DataDeclaration.DataDeclaration <$> + getModifier <*> + getA <*> + getList getV <*> + getList (getTuple3 getA getV (getType getV getA)) + +putModifier :: MonadPut m => DataDeclaration.Modifier -> m () +putModifier DataDeclaration.Structural = putWord8 0 +putModifier (DataDeclaration.Unique txt) = putWord8 1 *> putText txt + +getModifier :: MonadGet m => m DataDeclaration.Modifier +getModifier = getWord8 >>= \case + 0 -> pure DataDeclaration.Structural + 1 -> DataDeclaration.Unique <$> getText + tag -> unknownTag "DataDeclaration.Modifier" tag + +putEffectDeclaration :: + (MonadPut m, Ord v) => (v -> m ()) -> (a -> m ()) -> EffectDeclaration v a -> m () +putEffectDeclaration putV putA (DataDeclaration.EffectDeclaration d) = + putDataDeclaration putV putA d + +getEffectDeclaration :: (MonadGet m, Ord v) => m v -> m a -> m (EffectDeclaration v a) +getEffectDeclaration getV getA = + DataDeclaration.EffectDeclaration <$> getDataDeclaration getV getA + +putEither :: (MonadPut m) => (a -> m ()) -> (b -> m ()) -> Either a b -> m () +putEither putL _ (Left a) = putWord8 0 *> putL a +putEither _ putR (Right b) = putWord8 1 *> putR b + +getEither :: MonadGet m => m a -> m b -> m (Either a b) +getEither getL getR = getWord8 >>= \case + 0 -> Left <$> getL + 1 -> Right <$> getR + tag -> unknownTag "Either" tag + +formatSymbol :: S.Format Symbol +formatSymbol = S.Format getSymbol putSymbol + +putEdits :: MonadPut m => Patch -> m () +putEdits edits = + putRelation putReference putTermEdit (Patch._termEdits edits) >> + putRelation putReference putTypeEdit (Patch._typeEdits edits) + +getEdits :: MonadGet m => m Patch +getEdits = Patch <$> getRelation getReference getTermEdit + <*> getRelation getReference getTypeEdit diff --git a/parser-typechecker/src/Unison/Codebase/ShortBranchHash.hs b/parser-typechecker/src/Unison/Codebase/ShortBranchHash.hs new file mode 100644 index 0000000000..5496f5a1fc --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/ShortBranchHash.hs @@ -0,0 +1,35 @@ +module Unison.Codebase.ShortBranchHash where + +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Causal as Causal +import qualified Unison.Hash as Hash +import qualified Data.Text as Text +import qualified Data.Set as Set +import Data.Text (Text) + +newtype ShortBranchHash = + ShortBranchHash { toText :: Text } -- base32hex characters + deriving (Eq, Ord) + +toString :: ShortBranchHash -> String +toString = Text.unpack . toText + +toHash :: ShortBranchHash -> Maybe Branch.Hash +toHash = fmap Causal.RawHash . Hash.fromBase32Hex . toText + +fromHash :: Int -> Branch.Hash -> ShortBranchHash +fromHash len = + ShortBranchHash . Text.take len . Hash.base32Hex . Causal.unRawHash + +fullFromHash :: Branch.Hash -> ShortBranchHash +fullFromHash = ShortBranchHash . Hash.base32Hex . Causal.unRawHash + +-- abc -> SBH abc +-- #abc -> SBH abc +fromText :: Text -> Maybe ShortBranchHash +fromText t | Text.all (`Set.member` Hash.validBase32HexChars) t = + Just . ShortBranchHash . Text.dropWhile (=='#') $ t +fromText _ = Nothing + +instance Show ShortBranchHash where + show (ShortBranchHash h) = '#' : Text.unpack h diff --git a/parser-typechecker/src/Unison/Codebase/SyncMode.hs b/parser-typechecker/src/Unison/Codebase/SyncMode.hs new file mode 100644 index 0000000000..67f79a6518 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/SyncMode.hs @@ -0,0 +1,3 @@ +module Unison.Codebase.SyncMode where + +data SyncMode = ShortCircuit | Complete deriving (Eq, Show) diff --git a/parser-typechecker/src/Unison/Codebase/TermEdit.hs b/parser-typechecker/src/Unison/Codebase/TermEdit.hs new file mode 100644 index 0000000000..7e2239024f --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/TermEdit.hs @@ -0,0 +1,51 @@ +module Unison.Codebase.TermEdit where + +import Unison.Hashable (Hashable) +import qualified Unison.Hashable as H +import Unison.Reference (Reference) +import qualified Unison.Typechecker as Typechecker +import Unison.Type (Type) +import Unison.Var (Var) + +data TermEdit = Replace Reference Typing | Deprecate + deriving (Eq, Ord, Show) + +references :: TermEdit -> [Reference] +references (Replace r _) = [r] +references Deprecate = [] + +-- Replacements with the Same type can be automatically propagated. +-- Replacements with a Subtype can be automatically propagated but may result in dependents getting more general types, so requires re-inference. +-- Replacements of a Different type need to be manually propagated by the programmer. +data Typing = Same | Subtype | Different + deriving (Eq, Ord, Show) + +instance Hashable Typing where + tokens Same = [H.Tag 0] + tokens Subtype = [H.Tag 1] + tokens Different = [H.Tag 2] + +instance Hashable TermEdit where + tokens (Replace r t) = [H.Tag 0] ++ H.tokens r ++ H.tokens t + tokens Deprecate = [H.Tag 1] + +toReference :: TermEdit -> Maybe Reference +toReference (Replace r _) = Just r +toReference Deprecate = Nothing + +isTypePreserving :: TermEdit -> Bool +isTypePreserving e = case e of + Replace _ Same -> True + Replace _ Subtype -> True + _ -> False + +isSame :: TermEdit -> Bool +isSame e = case e of + Replace _ Same -> True + _ -> False + +typing :: Var v => Type v loc -> Type v loc -> Typing +typing newType oldType | Typechecker.isEqual newType oldType = Same + | Typechecker.isSubtype newType oldType = Subtype + | otherwise = Different + diff --git a/parser-typechecker/src/Unison/Codebase/TranscriptParser.hs b/parser-typechecker/src/Unison/Codebase/TranscriptParser.hs new file mode 100644 index 0000000000..8eb2b9e2bb --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/TranscriptParser.hs @@ -0,0 +1,427 @@ +{-# Language OverloadedStrings #-} +{-# Language BangPatterns #-} +{-# Language ViewPatterns #-} + +module Unison.Codebase.TranscriptParser ( + Stanza(..), FenceType, ExpectingError, Hidden, Err, UcmCommand(..), + run, parse, parseFile) + where + +-- import qualified Text.Megaparsec.Char as P +import Control.Concurrent.STM (atomically) +import Control.Exception (finally) +import Control.Monad.State (runStateT) +import Data.List (isSubsequenceOf) +import Data.IORef +import Prelude hiding (readFile, writeFile) +import System.Directory ( doesFileExist ) +import System.Exit (die) +import System.IO.Error (catchIOError) +import System.Environment (getProgName) +import Unison.Codebase (Codebase) +import Unison.Codebase.Editor.Command (LoadSourceResult (..)) +import Unison.Codebase.Editor.Input (Input (..), Event(UnisonFileChanged)) +import Unison.CommandLine +import Unison.CommandLine.InputPattern (InputPattern (aliases, patternName)) +import Unison.CommandLine.InputPatterns (validInputs) +import Unison.CommandLine.OutputMessages (notifyUser, notifyNumbered) +import Unison.Parser (Ann) +import Unison.Prelude +import Unison.PrettyTerminal +import Unison.Symbol (Symbol) +import Unison.CommandLine.Main (asciiartUnison, expandNumber) +import qualified Data.Char as Char +import qualified Data.Map as Map +import qualified Data.Text as Text +import qualified System.IO as IO +import qualified Data.Configurator as Config +import qualified Crypto.Random as Random +import qualified Text.Megaparsec as P +import qualified Unison.Codebase as Codebase +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Editor.HandleCommand as HandleCommand +import qualified Unison.Codebase.Editor.HandleInput as HandleInput +import qualified Unison.Codebase.Path as Path +import qualified Unison.Codebase.Runtime as Runtime +import qualified Unison.CommandLine.InputPattern as IP +import qualified Unison.Runtime.Rt1IO as Rt1 +import qualified Unison.Runtime.Interface as RTI +import qualified Unison.Util.Pretty as P +import qualified Unison.Util.TQueue as Q +import qualified Unison.Codebase.Editor.Output as Output +import Control.Lens (view) +import Control.Error (rightMay) + +type ExpectingError = Bool +type Err = String +type ScratchFileName = Text +type FenceType = Text + +data Hidden = Shown | HideOutput | HideAll + deriving (Eq, Show) +data UcmCommand = UcmCommand Path.Absolute Text + +data Stanza + = Ucm Hidden ExpectingError [UcmCommand] + | Unison Hidden ExpectingError (Maybe ScratchFileName) Text + | UnprocessedFence FenceType Text + | Unfenced Text + +instance Show UcmCommand where + show (UcmCommand path txt) = show path <> ">" <> Text.unpack txt + +instance Show Stanza where + show s = case s of + Ucm _ _ cmds -> unlines [ + "```ucm", + foldl (\x y -> x ++ show y) "" cmds, + "```" + ] + Unison _hide _ fname txt -> unlines [ + "```unison", + case fname of + Nothing -> Text.unpack txt <> "```\n" + Just fname -> unlines [ + "---", + "title: " <> Text.unpack fname, + "---", + Text.unpack txt, + "```", + "" ] + ] + UnprocessedFence typ txt -> unlines [ + "```" <> Text.unpack typ, + Text.unpack txt, + "```", "" ] + Unfenced txt -> Text.unpack txt + +parseFile :: FilePath -> IO (Either Err [Stanza]) +parseFile filePath = do + exists <- doesFileExist filePath + if exists then do + txt <- readUtf8 filePath + pure $ parse filePath txt + else + pure $ Left $ show filePath ++ " does not exist" + +parse :: String -> Text -> Either Err [Stanza] +parse srcName txt = case P.parse (stanzas <* P.eof) srcName txt of + Right a -> Right a + Left e -> Left (show e) + +run :: Maybe Bool -> FilePath -> FilePath -> [Stanza] -> Codebase IO Symbol Ann -> Branch.Cache IO -> IO Text +run newRt dir configFile stanzas codebase branchCache = do + let initialPath = Path.absoluteEmpty + putPrettyLn $ P.lines [ + asciiartUnison, "", + "Running the provided transcript file...", + "" + ] + root <- fromMaybe Branch.empty . rightMay <$> Codebase.getRootBranch codebase + do + pathRef <- newIORef initialPath + numberedArgsRef <- newIORef [] + inputQueue <- Q.newIO + cmdQueue <- Q.newIO + unisonFiles <- newIORef Map.empty + out <- newIORef mempty + hidden <- newIORef Shown + allowErrors <- newIORef False + hasErrors <- newIORef False + mStanza <- newIORef Nothing + (config, cancelConfig) <- + catchIOError (watchConfig configFile) $ \_ -> + die "Your .unisonConfig could not be loaded. Check that it's correct!" + runtime <- do + b <- maybe (Config.lookupDefault False config "new-runtime") pure newRt + if b then RTI.startRuntime else pure Rt1.runtime + traverse_ (atomically . Q.enqueue inputQueue) (stanzas `zip` [1..]) + let patternMap = + Map.fromList + $ validInputs + >>= (\p -> (patternName p, p) : ((, p) <$> aliases p)) + let + output' :: Bool -> String -> IO () + output' inputEcho msg = do + hide <- readIORef hidden + unless (hideOutput inputEcho hide) $ modifyIORef' out (\acc -> acc <> pure msg) + + hideOutput :: Bool -> Hidden -> Bool + hideOutput inputEcho = \case + Shown -> False + HideOutput -> True && (not inputEcho) + HideAll -> True + + output = output' False + outputEcho = output' True + + awaitInput = do + cmd <- atomically (Q.tryDequeue cmdQueue) + case cmd of + -- end of ucm block + Just Nothing -> do + output "\n```\n" + dieUnexpectedSuccess + awaitInput + -- ucm command to run + Just (Just p@(UcmCommand path lineTxt)) -> do + curPath <- readIORef pathRef + numberedArgs <- readIORef numberedArgsRef + if curPath /= path then do + atomically $ Q.undequeue cmdQueue (Just p) + pure $ Right (SwitchBranchI (Path.absoluteToPath' path)) + else case (>>= expandNumber numberedArgs) + . words . Text.unpack $ lineTxt of + [] -> awaitInput + cmd:args -> do + output ("\n" <> show p <> "\n") + case Map.lookup cmd patternMap of + -- invalid command is treated as a failure + Nothing -> + dieWithMsg + Just pat -> case IP.parse pat args of + Left msg -> do + output $ P.toPlain 65 (P.indentN 2 msg <> P.newline <> P.newline) + dieWithMsg + Right input -> pure $ Right input + + Nothing -> do + dieUnexpectedSuccess + writeIORef hidden Shown + writeIORef allowErrors False + maybeStanza <- atomically (Q.tryDequeue inputQueue) + _ <- writeIORef mStanza maybeStanza + case maybeStanza of + Nothing -> do + putStrLn "" + pure $ Right QuitI + Just (s,idx) -> do + putStr $ "\r⚙️ Processing stanza " ++ show idx ++ " of " + ++ show (length stanzas) ++ "." + IO.hFlush IO.stdout + case s of + Unfenced _ -> do + output $ show s + awaitInput + UnprocessedFence _ _ -> do + output $ show s + awaitInput + Unison hide errOk filename txt -> do + writeIORef hidden hide + outputEcho $ show s + writeIORef allowErrors errOk + output "```ucm\n" + atomically . Q.enqueue cmdQueue $ Nothing + modifyIORef' unisonFiles (Map.insert (fromMaybe "scratch.u" filename) txt) + pure $ Left (UnisonFileChanged (fromMaybe "scratch.u" filename) txt) + Ucm hide errOk cmds -> do + writeIORef hidden hide + writeIORef allowErrors errOk + writeIORef hasErrors False + output "```ucm" + traverse_ (atomically . Q.enqueue cmdQueue . Just) cmds + atomically . Q.enqueue cmdQueue $ Nothing + awaitInput + + loadPreviousUnisonBlock name = do + ufs <- readIORef unisonFiles + case Map.lookup name ufs of + Just uf -> + return (LoadSuccess uf) + Nothing -> + return InvalidSourceNameError + + cleanup = do Runtime.terminate runtime; cancelConfig + print o = do + msg <- notifyUser dir o + errOk <- readIORef allowErrors + let rendered = P.toPlain 65 (P.border 2 msg) + output rendered + when (Output.isFailure o) $ + if errOk then writeIORef hasErrors True + else dieWithMsg + + printNumbered o = do + let (msg, numberedArgs) = notifyNumbered o + errOk <- readIORef allowErrors + let rendered = P.toPlain 65 (P.border 2 msg) + output rendered + when (Output.isNumberedFailure o) $ + if errOk then writeIORef hasErrors True + else dieWithMsg + pure numberedArgs + + -- Looks at the current stanza and decides if it is contained in the + -- output so far. Appends it if not. + appendFailingStanza :: IO () + appendFailingStanza = do + stanzaOpt <- readIORef mStanza + currentOut <- readIORef out + let stnz = maybe "" show (fmap fst stanzaOpt :: Maybe Stanza) + unless (stnz `isSubsequenceOf` concat currentOut) $ + modifyIORef' out (\acc -> acc <> pure stnz) + + -- output ``` and new lines then call transcriptFailure + dieWithMsg :: forall a. IO a + dieWithMsg = do + executable <- getProgName + output "\n```\n\n" + appendFailingStanza + transcriptFailure out $ Text.unlines [ + "\128721", "", + "The transcript failed due to an error encountered in the stanza above.", "", + "Run `" <> Text.pack executable <> " -codebase " <> Text.pack dir <> "` " <> "to do more work with it."] + + dieUnexpectedSuccess :: IO () + dieUnexpectedSuccess = do + executable <- getProgName + errOk <- readIORef allowErrors + hasErr <- readIORef hasErrors + when (errOk && not hasErr) $ do + output "\n```\n\n" + appendFailingStanza + transcriptFailure out $ Text.unlines [ + "\128721", "", + "The transcript was expecting an error in the stanza above, but did not encounter one.", "", + "Run `" <> Text.pack executable <> " -codebase " <> Text.pack dir <> "` " <> "to do more work with it."] + + loop state = do + writeIORef pathRef (view HandleInput.currentPath state) + let free = runStateT (runMaybeT HandleInput.loop) state + rng i = pure $ Random.drgNewSeed (Random.seedFromInteger (fromIntegral i)) + (o, state') <- HandleCommand.commandLine config awaitInput + (const $ pure ()) + runtime + print + printNumbered + loadPreviousUnisonBlock + codebase + rng + branchCache + free + case o of + Nothing -> do + texts <- readIORef out + pure $ Text.concat (Text.pack <$> toList (texts :: Seq String)) + Just () -> do + writeIORef numberedArgsRef (HandleInput._numberedArgs state') + loop state' + (`finally` cleanup) + $ loop (HandleInput.loopState0 root initialPath) + +transcriptFailure :: IORef (Seq String) -> Text -> IO b +transcriptFailure out msg = do + texts <- readIORef out + die + . Text.unpack + $ Text.concat (Text.pack <$> toList (texts :: Seq String)) + <> "\n\n" + <> msg + +type P = P.Parsec () Text + +stanzas :: P [Stanza] +stanzas = P.many (fenced <|> unfenced) + +ucmCommand :: P UcmCommand +ucmCommand = do + P.lookAhead (word ".") + path <- P.takeWhile1P Nothing (/= '>') + void $ word ">" + line <- P.takeWhileP Nothing (/= '\n') <* spaces + path <- case Path.parsePath' (Text.unpack path) of + Right (Path.unPath' -> Left abs) -> pure abs + Right _ -> fail "expected absolute path" + Left e -> fail e + pure $ UcmCommand path line + +fenced :: P Stanza +fenced = do + fence + fenceType <- lineToken(word "ucm" <|> word "unison" <|> language) + stanza <- + if fenceType == "ucm" then do + hide <- hidden + err <- expectingError + _ <- spaces + cmds <- many ucmCommand + pure $ Ucm hide err cmds + else if fenceType == "unison" then do + -- todo: this has to be more interesting + -- ```unison:hide + -- ```unison + -- ```unison:hide:all scratch.u + hide <- lineToken hidden + err <- lineToken expectingError + fileName <- optional untilSpace1 + blob <- spaces *> untilFence + pure $ Unison hide err fileName blob + else UnprocessedFence fenceType <$> untilFence + fence + pure stanza + +-- Three backticks, consumes trailing spaces too +-- ``` +fence :: P () +fence = P.try $ do void (word "```"); spaces + +-- Parses up until next fence +unfenced :: P Stanza +unfenced = Unfenced <$> untilFence + +untilFence :: P Text +untilFence = do + _ <- P.lookAhead (P.takeP Nothing 1) + go mempty + where + go :: Seq Text -> P Text + go !acc = do + f <- P.lookAhead (P.optional fence) + case f of + Nothing -> do + oneOrTwoBackticks <- optional (word' "``" <|> word' "`") + let start = fromMaybe "" oneOrTwoBackticks + txt <- P.takeWhileP (Just "unfenced") (/= '`') + eof <- P.lookAhead (P.optional P.eof) + case eof of + Just _ -> pure $ fold (acc <> pure txt) + Nothing -> go (acc <> pure start <> pure txt) + Just _ -> pure $ fold acc + +word' :: Text -> P Text +word' txt = P.try $ do + chs <- P.takeP (Just $ show txt) (Text.length txt) + guard (chs == txt) + pure txt + +word :: Text -> P Text +word = word' + +-- token :: P a -> P a +-- token p = p <* spaces + +lineToken :: P a -> P a +lineToken p = p <* nonNewlineSpaces + +nonNewlineSpaces :: P () +nonNewlineSpaces = void $ P.takeWhileP Nothing (\ch -> ch `elem` (" \t" :: String)) + +hidden :: P Hidden +hidden = (\case Just x -> x; Nothing -> Shown) <$> optional go where + go = ((\_ -> HideAll) <$> (word ":hide:all")) <|> + ((\_ -> HideOutput) <$> (word ":hide")) + +expectingError :: P ExpectingError +expectingError = isJust <$> optional (word ":error") + +untilSpace1 :: P Text +untilSpace1 = P.takeWhile1P Nothing (not . Char.isSpace) + +language :: P Text +language = P.takeWhileP Nothing (\ch -> Char.isDigit ch || Char.isLower ch || ch == '_' ) + +spaces :: P () +spaces = void $ P.takeWhileP (Just "spaces") Char.isSpace + +-- single :: Char -> P Char +-- single t = P.satisfy (== t) diff --git a/parser-typechecker/src/Unison/Codebase/TypeEdit.hs b/parser-typechecker/src/Unison/Codebase/TypeEdit.hs new file mode 100644 index 0000000000..a6d2cd665c --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/TypeEdit.hs @@ -0,0 +1,20 @@ +module Unison.Codebase.TypeEdit where + +import Unison.Reference (Reference) +import Unison.Hashable (Hashable) +import qualified Unison.Hashable as H + +data TypeEdit = Replace Reference | Deprecate + deriving (Eq, Ord, Show) + +references :: TypeEdit -> [Reference] +references (Replace r) = [r] +references Deprecate = [] + +instance Hashable TypeEdit where + tokens (Replace r) = H.Tag 0 : H.tokens r + tokens Deprecate = [H.Tag 1] + +toReference :: TypeEdit -> Maybe Reference +toReference (Replace r) = Just r +toReference Deprecate = Nothing diff --git a/parser-typechecker/src/Unison/Codebase/Watch.hs b/parser-typechecker/src/Unison/Codebase/Watch.hs new file mode 100644 index 0000000000..d1ab5992b2 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Watch.hs @@ -0,0 +1,151 @@ +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Codebase.Watch where + +import Unison.Prelude + +import qualified UnliftIO as UnliftIO +import UnliftIO.Concurrent ( forkIO + , threadDelay + , killThread + ) +import UnliftIO ( MonadUnliftIO + , withRunInIO + , unliftIO ) +import UnliftIO.Directory ( getModificationTime + , listDirectory + , doesPathExist + ) +import UnliftIO.MVar ( newEmptyMVar, takeMVar + , tryTakeMVar, tryPutMVar, putMVar ) +import UnliftIO.STM ( atomically ) +import UnliftIO.Exception ( catch ) +import UnliftIO.IORef ( newIORef + , readIORef + , writeIORef + ) +import qualified Data.Map as Map +import qualified Data.Text.IO +import Data.Time.Clock ( UTCTime + , diffUTCTime + ) +import System.FSNotify ( Event(Added, Modified)) +import qualified System.FSNotify as FSNotify +import Unison.Util.TQueue ( TQueue ) +import qualified Unison.Util.TQueue as TQueue +import qualified Control.Concurrent.STM as STM + +untilJust :: Monad m => m (Maybe a) -> m a +untilJust act = act >>= maybe (untilJust act) return + +watchDirectory' + :: forall m. MonadUnliftIO m => FilePath -> m (m (), m (FilePath, UTCTime)) +watchDirectory' d = do + mvar <- newEmptyMVar + let handler :: Event -> m () + handler e = case e of + Added fp t False -> doIt fp t + Modified fp t False -> doIt fp t + _ -> pure () + where doIt fp t = do + _ <- tryTakeMVar mvar + putMVar mvar (fp, t) + -- janky: used to store the cancellation action returned + -- by `watchDir`, which is created asynchronously + cleanupRef <- newEmptyMVar + -- we don't like FSNotify's debouncing (it seems to drop later events) + -- so we will be doing our own instead + let config = FSNotify.defaultConfig { FSNotify.confDebounce = FSNotify.NoDebounce } + cancel <- forkIO $ withRunInIO $ \inIO -> + FSNotify.withManagerConf config $ \mgr -> do + cancelInner <- FSNotify.watchDir mgr d (const True) (inIO . handler) <|> (pure (pure ())) + putMVar cleanupRef $ liftIO cancelInner + forever $ threadDelay 1000000 + let cleanup :: m () + cleanup = join (takeMVar cleanupRef) >> killThread cancel + pure (cleanup, takeMVar mvar) + +collectUntilPause :: forall m a. MonadIO m => TQueue a -> Int -> m [a] +collectUntilPause queue minPauseµsec = do +-- 1. wait for at least one element in the queue + void . atomically $ TQueue.peek queue + + let go :: MonadIO m => m [a] + go = do + before <- atomically $ TQueue.enqueueCount queue + threadDelay minPauseµsec + after <- atomically $ TQueue.enqueueCount queue + -- if nothing new is on the queue, then return the contents + if before == after + then atomically $ TQueue.flush queue + else go + go + +watchDirectory :: forall m. MonadUnliftIO m + => FilePath -> (FilePath -> Bool) -> m (m (), m (FilePath, Text)) +watchDirectory dir allow = do + previousFiles <- newIORef Map.empty + (cancelWatch, watcher) <- watchDirectory' dir + let + existingFiles :: MonadIO m => m [(FilePath, UTCTime)] + existingFiles = do + files <- listDirectory dir + filtered <- filterM doesPathExist files + let withTime file = (file,) <$> getModificationTime file + sortOn snd <$> mapM withTime filtered + process :: MonadIO m => FilePath -> UTCTime -> m (Maybe (FilePath, Text)) + process file t = + if allow file then let + handle :: IOException -> m () + handle e = do + liftIO $ putStrLn $ "‼ Got an exception while reading: " <> file + liftIO $ print (e :: IOException) + go :: MonadUnliftIO m => m (Maybe (FilePath, Text)) + go = liftIO $ do + contents <- Data.Text.IO.readFile file + prevs <- readIORef previousFiles + case Map.lookup file prevs of + -- if the file's content's haven't changed and less than .5s + -- have elapsed, wait for the next update + Just (contents0, t0) + | contents == contents0 && (t `diffUTCTime` t0) < 0.5 -> + return Nothing + _ -> + Just (file, contents) <$ + writeIORef previousFiles (Map.insert file (contents, t) prevs) + in catch go (\e -> Nothing <$ handle e) + else return Nothing + queue <- TQueue.newIO + gate <- liftIO newEmptyMVar + ctx <- UnliftIO.askUnliftIO + -- We spawn a separate thread to siphon the file change events + -- into a queue, which can be debounced using `collectUntilPause` + enqueuer <- liftIO . forkIO $ do + takeMVar gate -- wait until gate open before starting + forever $ do + event@(file, _) <- UnliftIO.unliftIO ctx watcher + when (allow file) $ + STM.atomically $ TQueue.enqueue queue event + pending <- newIORef =<< existingFiles + let + await :: MonadIO m => m (FilePath, Text) + await = untilJust $ readIORef pending >>= \case + [] -> do + -- open the gate + tryPutMVar gate () + -- this debounces the events, waits for 50ms pause + -- in file change events + events <- collectUntilPause queue 50000 + -- traceM $ "Collected file change events" <> show events + case events of + [] -> pure Nothing + -- we pick the last of the events within the 50ms window + -- TODO: consider enqueing other events if there are + -- multiple events for different files + _ -> uncurry process $ last events + ((file, t):rest) -> do + writeIORef pending rest + process file t + cancel = cancelWatch >> killThread enqueuer + pure (cancel, await) diff --git a/parser-typechecker/src/Unison/Codecs.hs b/parser-typechecker/src/Unison/Codecs.hs new file mode 100644 index 0000000000..1ed01453aa --- /dev/null +++ b/parser-typechecker/src/Unison/Codecs.hs @@ -0,0 +1,340 @@ +{-# LANGUAGE PatternSynonyms #-} + +module Unison.Codecs where + +-- A format for encoding runtime values, with sharing for compiled nodes. + +import Unison.Prelude + +import Control.Arrow (second) +import Control.Monad.State +import Data.Bits (Bits) +import qualified Data.Bytes.Serial as BS +import Data.Bytes.Signed (Unsigned) +import Data.Bytes.VarInt (VarInt(..)) +import qualified Data.ByteString as B +import Data.ByteString.Builder (doubleBE, int64BE, toLazyByteString) +import qualified Data.ByteString.Lazy as BL +import Data.Bytes.Put +import qualified Unison.ABT as ABT +import qualified Unison.Blank as Blank +import qualified Unison.DataDeclaration as DD +import qualified Unison.Hash as Hash +import Unison.Reference (Reference, pattern Builtin, pattern Derived) +import qualified Unison.Referent as Referent +import qualified Unison.ConstructorType as ConstructorType +import Unison.Term +import Unison.UnisonFile (UnisonFile, pattern UnisonFile) +import qualified Unison.UnisonFile as UF +import Unison.Var (Var) +import qualified Unison.Var as Var +import Unison.Pattern (Pattern) +import qualified Unison.Pattern as Pattern + +type Pos = Word64 + +serializeTerm :: (MonadPut m, MonadState Pos m, Var v) + => Term v a + -> m Pos +serializeTerm x = do + let putTag = do putWord8 111; putWord8 0 + let incPosition = do pos <- get; modify' (+1); pure pos + case ABT.out x of + ABT.Var v -> do + putTag + putWord8 0 + lengthEncode $ Var.name v + incPosition + ABT.Abs v body -> do + pbody <- serializeTerm body + putTag + putWord8 1 + lengthEncode $ Var.name v + putBackref pbody + incPosition + ABT.Cycle body -> do + pbody <- serializeTerm body + putTag + putWord8 10 + putBackref pbody + incPosition + ABT.Tm f -> case f of + Ann e _ -> do + serializeTerm e -- ignore types (todo: revisit) + Ref ref -> do + putTag + putWord8 2 + serializeReference ref + incPosition + Constructor ref id -> do + putTag + putWord8 3 + serializeReference ref + putWord32be $ fromIntegral id + incPosition + Request ref id -> do + putTag + putWord8 4 + serializeReference ref + putWord32be $ fromIntegral id + incPosition + Text text -> do + putTag + putWord8 5 + lengthEncode text + incPosition + Int n -> do + putTag + putWord8 6 + serializeInt n + incPosition + Nat n -> do + putTag + putWord8 6 + serializeNat n + incPosition + Float n -> do + putTag + putWord8 6 + serializeFloat n + incPosition + Boolean b -> do + putTag + putWord8 6 + serializeBoolean b + incPosition + Sequence v -> do + elementPositions <- traverse serializeTerm v + putTag + putWord8 7 + putLength $ length elementPositions + traverse_ putBackref elementPositions + incPosition + Lam body -> do + pos <- serializeTerm body + putTag + putWord8 8 + putBackref pos + incPosition + App fn arg -> do + posf <- serializeTerm fn + posarg <- serializeTerm arg + putTag + putWord8 9 + putBackref posf + putLength (1 :: Int) + putBackref posarg + incPosition + Let _ binding body -> do + posbind <- serializeTerm binding + posbod <- serializeTerm body + putTag + putWord8 11 + putBackref posbind + putBackref posbod + incPosition + If c t f -> do + posc <- serializeTerm c + post <- serializeTerm t + posf <- serializeTerm f + putTag + putWord8 12 + putBackref posc + putBackref post + putBackref posf + incPosition + And x y -> do + posx <- serializeTerm x + posy <- serializeTerm y + putTag + putWord8 13 + putBackref posx + putBackref posy + incPosition + Or x y -> do + posx <- serializeTerm x + posy <- serializeTerm y + putTag + putWord8 14 + putBackref posx + putBackref posy + incPosition + Match scrutinee cases -> do + poss <- serializeTerm scrutinee + casePositions <- traverse serializeCase1 cases + putTag + putWord8 15 + putBackref poss + putLength $ length casePositions + traverse_ serializeCase2 casePositions + incPosition + Blank b -> error $ "cannot serialize program with blank " ++ + fromMaybe "" (Blank.nameb b) + Handle h body -> do + hpos <- serializeTerm h + bpos <- serializeTerm body + putTag + putWord8 16 + putBackref hpos + putBackref bpos + incPosition + LetRec _ bs body -> do + positions <- traverse serializeTerm bs + pbody <- serializeTerm body + putTag + putWord8 19 + putLength $ length positions + traverse_ putBackref positions + putBackref pbody + incPosition + Char c -> do + putTag + putWord8 20 + putWord64be $ fromIntegral $ fromEnum c + incPosition + TermLink ref -> do + putTag + putWord8 21 + serializeReferent ref + incPosition + TypeLink ref -> do + putTag + putWord8 22 + serializeReference ref + incPosition + +serializePattern :: MonadPut m => Pattern a -> m () +serializePattern p = case p of + -- note: the putWord8 0 is the tag before any unboxed pattern + Pattern.Boolean _ b -> putWord8 0 *> serializeBoolean b + Pattern.Int _ n -> putWord8 0 *> serializeInt n + Pattern.Nat _ n -> putWord8 0 *> serializeNat n + Pattern.Float _ n -> putWord8 0 *> serializeFloat n + Pattern.Var _ -> putWord8 1 + Pattern.Unbound _ -> putWord8 2 + Pattern.Constructor _ r cid ps -> do + putWord8 3 + serializeReference r + putWord32be $ fromIntegral cid + putLength (length ps) + traverse_ serializePattern ps + Pattern.As _ p -> do + putWord8 4 + serializePattern p + Pattern.EffectPure _ p -> do + putWord8 5 + serializePattern p + Pattern.EffectBind _ r cid ps k -> do + putWord8 6 + serializeReference r + putWord32be $ fromIntegral cid + putLength (length ps) + traverse_ serializePattern ps + serializePattern k + _ -> error "todo: delete me after deleting PatternP - serializePattern match failure" + +serializeFloat :: MonadPut m => Double -> m () +serializeFloat n = do + putByteString . BL.toStrict . toLazyByteString $ doubleBE n + putWord8 3 + +serializeNat :: MonadPut m => Word64 -> m () +serializeNat n = do + putWord64be n + putWord8 2 + +serializeInt :: MonadPut m => Int64 -> m () +serializeInt n = do + putByteString . BL.toStrict . toLazyByteString $ int64BE n + putWord8 1 + +serializeBoolean :: MonadPut m => Bool -> m () +serializeBoolean False = putWord64be 0 *> putWord8 0 +serializeBoolean True = putWord64be 1 *> putWord8 0 + +serializeCase2 :: MonadPut m => MatchCase loc Pos -> m () +serializeCase2 (MatchCase p guard body) = do + serializePattern p + serializeMaybe putBackref guard + putBackref body + +serializeCase1 :: (Var v, MonadPut m, MonadState Pos m) + => MatchCase p (Term v a) -> m (MatchCase p Pos) +serializeCase1 (MatchCase p guard body) = do + posg <- traverse serializeTerm guard + posb <- serializeTerm body + pure $ MatchCase p posg posb + +putBackref :: MonadPut m => Pos -> m () +putBackref = BS.serialize . VarInt + +putLength :: (MonadPut m, Integral n, Integral (Unsigned n), + Bits n, Bits (Unsigned n)) + => n -> m () +putLength = BS.serialize . VarInt + +serializeMaybe :: (MonadPut m) => (a -> m ()) -> Maybe a -> m () +serializeMaybe f b = case b of + Nothing -> putWord8 0 + Just x -> putWord8 1 *> f x + +lengthEncode :: MonadPut m => Text -> m () +lengthEncode text = do + let bs = encodeUtf8 text + putLength $ B.length bs + putByteString bs + +serializeFoldable :: (MonadPut m, Foldable f) => (a -> m ()) -> f a -> m () +serializeFoldable f fa = do + putLength $ length fa + traverse_ f fa + +serializeReferent :: MonadPut m => Referent.Referent -> m () +serializeReferent r = case r of + Referent.Ref r -> putWord8 0 *> serializeReference r + Referent.Con r cid ct -> do + putWord8 1 + serializeReference r + putLength cid + serializeConstructorType ct + +serializeConstructorType :: MonadPut m => ConstructorType.ConstructorType -> m () +serializeConstructorType ct = case ct of + ConstructorType.Data -> putWord8 0 + ConstructorType.Effect -> putWord8 1 + +serializeReference :: MonadPut m => Reference -> m () +serializeReference ref = case ref of + Builtin text -> do + putWord8 0 + lengthEncode text + Derived hash i n -> do + putWord8 1 + let bs = Hash.toBytes hash + putLength $ B.length bs + putByteString bs + putLength i + putLength n + _ -> error "impossible" + +serializeConstructorArities :: MonadPut m => Reference -> [Int] -> m () +serializeConstructorArities r constructorArities = do + serializeReference r + serializeFoldable (putWord32be . fromIntegral) constructorArities + +serializeFile + :: (MonadPut m, MonadState Pos m, Monoid a, Var v) + => UnisonFile v a -> Term v a -> m () +serializeFile uf@(UnisonFile dataDecls effectDecls _ _) tm = do + let body = UF.uberTerm' uf tm + let dataDecls' = second DD.constructorArities <$> toList dataDecls + let effectDecls' = + second (DD.constructorArities . DD.toDataDecl) <$> toList effectDecls + -- traceM $ show effectDecls' + serializeFoldable (uncurry serializeConstructorArities) dataDecls' + serializeFoldable (uncurry serializeConstructorArities) effectDecls' + -- NB: we rewrite the term to minimize away let rec cycles, as let rec + -- blocks aren't allowed to have effects + pos <- serializeTerm body + putWord8 0 + putBackref pos diff --git a/parser-typechecker/src/Unison/CommandLine.hs b/parser-typechecker/src/Unison/CommandLine.hs new file mode 100644 index 0000000000..0aca06d15f --- /dev/null +++ b/parser-typechecker/src/Unison/CommandLine.hs @@ -0,0 +1,221 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ViewPatterns #-} + + +module Unison.CommandLine where + +import Unison.Prelude + +import Control.Concurrent (forkIO, killThread) +import Control.Concurrent.STM (atomically) +import qualified Control.Monad.Extra as Monad +import qualified Control.Monad.Reader as Reader +import qualified Control.Monad.State as State +import Data.Configurator (autoReload, autoConfig) +import Data.Configurator.Types (Config, Worth (..)) +import Data.List (isSuffixOf, isPrefixOf) +import Data.ListLike (ListLike) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Data.Text as Text +import Prelude hiding (readFile, writeFile) +import qualified System.Console.Haskeline as Line +import System.FilePath ( takeFileName ) +import Unison.Codebase (Codebase) +import qualified Unison.Codebase as Codebase +import qualified Unison.Codebase.Branch as Branch +import Unison.Codebase.Causal ( Causal ) +import qualified Unison.Codebase.Causal as Causal +import Unison.Codebase.Editor.Input (Event(..), Input(..)) +import qualified Unison.Codebase.SearchResult as SR +import qualified Unison.Codebase.Watch as Watch +import Unison.CommandLine.InputPattern (InputPattern (parse)) +import qualified Unison.HashQualified' as HQ +import Unison.Names2 (Names0) +import qualified Unison.Util.ColorText as CT +import qualified Unison.Util.Find as Find +import qualified Unison.Util.Pretty as P +import Unison.Util.TQueue (TQueue) +import qualified Unison.Util.TQueue as Q + +allow :: FilePath -> Bool +allow p = + -- ignore Emacs .# prefixed files, see https://github.com/unisonweb/unison/issues/457 + not (".#" `isPrefixOf` takeFileName p) && + (isSuffixOf ".u" p || isSuffixOf ".uu" p) + +watchConfig :: FilePath -> IO (Config, IO ()) +watchConfig path = do + (config, t) <- autoReload autoConfig [Optional path] + pure (config, killThread t) + +watchFileSystem :: TQueue Event -> FilePath -> IO (IO ()) +watchFileSystem q dir = do + (cancel, watcher) <- Watch.watchDirectory dir allow + t <- forkIO . forever $ do + (filePath, text) <- watcher + atomically . Q.enqueue q $ UnisonFileChanged (Text.pack filePath) text + pure (cancel >> killThread t) + +watchBranchUpdates :: IO (Branch.Branch IO) -> TQueue Event -> Codebase IO v a -> IO (IO ()) +watchBranchUpdates currentRoot q codebase = do + (cancelExternalBranchUpdates, externalBranchUpdates) <- + Codebase.rootBranchUpdates codebase + thread <- forkIO . forever $ do + updatedBranches <- externalBranchUpdates + currentRoot <- currentRoot + -- Since there's some lag between when branch files are written and when + -- the OS generates a file watch event, we skip branch update events + -- that are causally before the current root. + -- + -- NB: Sadly, since the file watching API doesn't have a way to silence + -- the events from a specific individual write, this is ultimately a + -- heuristic. If a fairly recent head gets deposited at just the right + -- time, it would get ignored by this logic. This seems unavoidable. + let maxDepth = 20 -- if it's further back than this, consider it new + let isNew b = not <$> beforeHash maxDepth b (Branch._history currentRoot) + notBefore <- filterM isNew (toList updatedBranches) + when (length notBefore > 0) $ + atomically . Q.enqueue q . IncomingRootBranch $ Set.fromList notBefore + pure (cancelExternalBranchUpdates >> killThread thread) + + +-- `True` if `h` is found in the history of `c` within `maxDepth` path length +-- from the tip of `c` +beforeHash :: forall m h e . Monad m => Word -> Causal.RawHash h -> Causal m h e -> m Bool +beforeHash maxDepth h c = + Reader.runReaderT (State.evalStateT (go c) Set.empty) (0 :: Word) + where + go c | h == Causal.currentHash c = pure True + go c = do + currentDepth :: Word <- Reader.ask + if currentDepth >= maxDepth + then pure False + else do + seen <- State.get + cs <- lift . lift $ toList <$> sequence (Causal.children c) + let unseens = filter (\c -> c `Set.notMember` seen) cs + State.modify' (<> Set.fromList cs) + Monad.anyM (Reader.local (1+) . go) unseens + +warnNote :: String -> String +warnNote s = "⚠️ " <> s + +backtick :: IsString s => P.Pretty s -> P.Pretty s +backtick s = P.group ("`" <> s <> "`") + +backtickEOS :: IsString s => P.Pretty s -> P.Pretty s +backtickEOS s = P.group ("`" <> s <> "`.") + +tip :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s +tip s = P.column2 [("Tip:", P.wrap s)] + +note :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s +note s = P.column2 [("Note:", P.wrap s)] + +aside :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s -> P.Pretty s +aside a b = P.column2 [(a <> ":", b)] + +warn :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s +warn = emojiNote "⚠️" + +problem :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s +problem = emojiNote "❗️" + +bigproblem :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s +bigproblem = emojiNote "‼️" + +emojiNote :: (ListLike s Char, IsString s) => String -> P.Pretty s -> P.Pretty s +emojiNote lead s = P.group (fromString lead) <> "\n" <> P.wrap s + +nothingTodo :: (ListLike s Char, IsString s) => P.Pretty s -> P.Pretty s +nothingTodo = emojiNote "😶" + +completion :: String -> Line.Completion +completion s = Line.Completion s s True + +completion' :: String -> Line.Completion +completion' s = Line.Completion s s False + +prettyCompletion :: (String, P.Pretty P.ColorText) -> Line.Completion +-- -- discards formatting in favor of better alignment +-- prettyCompletion (s, p) = Line.Completion s (P.toPlainUnbroken p) True +-- preserves formatting, but Haskeline doesn't know how to align +prettyCompletion (s, p) = Line.Completion s (P.toAnsiUnbroken p) True + +-- avoids adding a space after successful completion +prettyCompletion' :: (String, P.Pretty P.ColorText) -> Line.Completion +prettyCompletion' (s, p) = Line.Completion s (P.toAnsiUnbroken p) False + +prettyCompletion'' :: Bool -> (String, P.Pretty P.ColorText) -> Line.Completion +prettyCompletion'' spaceAtEnd (s, p) = Line.Completion s (P.toAnsiUnbroken p) spaceAtEnd + +fuzzyCompleteHashQualified :: Names0 -> String -> [Line.Completion] +fuzzyCompleteHashQualified b q0@(HQ.fromString -> query) = case query of + Nothing -> [] + Just query -> + fixupCompletion q0 $ + makeCompletion <$> Find.fuzzyFindInBranch b query + where + makeCompletion (sr, p) = + prettyCompletion' (HQ.toString . SR.name $ sr, p) + +fuzzyComplete :: String -> [String] -> [Line.Completion] +fuzzyComplete q ss = + fixupCompletion q (prettyCompletion' <$> Find.simpleFuzzyFinder q ss id) + +exactComplete :: String -> [String] -> [Line.Completion] +exactComplete q ss = go <$> filter (isPrefixOf q) ss where + go s = prettyCompletion'' (s == q) + (s, P.hiBlack (P.string q) <> P.string (drop (length q) s)) + +prefixIncomplete :: String -> [String] -> [Line.Completion] +prefixIncomplete q ss = go <$> filter (isPrefixOf q) ss where + go s = prettyCompletion'' False + (s, P.hiBlack (P.string q) <> P.string (drop (length q) s)) + +-- workaround for https://github.com/judah/haskeline/issues/100 +-- if the common prefix of all the completions is smaller than +-- the query, we make all the replacements equal to the query, +-- which will preserve what the user has typed +fixupCompletion :: String -> [Line.Completion] -> [Line.Completion] +fixupCompletion _q [] = [] +fixupCompletion _q [c] = [c] +fixupCompletion q cs@(h:t) = let + commonPrefix (h1:t1) (h2:t2) | h1 == h2 = h1 : commonPrefix t1 t2 + commonPrefix _ _ = "" + overallCommonPrefix = + foldl commonPrefix (Line.replacement h) (Line.replacement <$> t) + in if not (q `isPrefixOf` overallCommonPrefix) + then [ c { Line.replacement = q } | c <- cs ] + else cs + +parseInput + :: Map String InputPattern -> [String] -> Either (P.Pretty CT.ColorText) Input +parseInput patterns ss = case ss of + [] -> Left "" + command : args -> case Map.lookup command patterns of + Just pat -> parse pat args + Nothing -> + Left + . warn + . P.wrap + $ "I don't know how to " + <> P.group (fromString command <> ".") + <> "Type `help` or `?` to get help." + +prompt :: String +prompt = "> " + +-- `plural [] "cat" "cats" = "cats"` +-- `plural ["meow"] "cat" "cats" = "cat"` +-- `plural ["meow", "meow"] "cat" "cats" = "cats"` +plural :: Foldable f => f a -> b -> b -> b +plural items one other = case toList items of + [_] -> one + _ -> other + +plural' :: Integral a => a -> b -> b -> b +plural' 1 one _other = one +plural' _ _one other = other diff --git a/parser-typechecker/src/Unison/CommandLine/DisplayValues.hs b/parser-typechecker/src/Unison/CommandLine/DisplayValues.hs new file mode 100644 index 0000000000..b3cb70364d --- /dev/null +++ b/parser-typechecker/src/Unison/CommandLine/DisplayValues.hs @@ -0,0 +1,96 @@ +{-# Language PatternSynonyms #-} +{-# Language OverloadedStrings #-} + +module Unison.CommandLine.DisplayValues where + +import Data.Foldable ( fold ) + +import Unison.Reference (Reference) +import Unison.Referent (Referent) +import Unison.Term (Term) +import Unison.Type (Type) +import Unison.Var (Var) +import qualified Unison.Builtin.Decls as DD +import qualified Unison.DataDeclaration as DD +import qualified Unison.DeclPrinter as DP +import qualified Unison.NamePrinter as NP +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.Referent as Referent +import qualified Unison.Reference as Reference +import qualified Unison.Term as Term +import qualified Unison.TermPrinter as TP +import qualified Unison.TypePrinter as TypePrinter +import qualified Unison.Util.Pretty as P +import qualified Unison.Util.SyntaxText as S + +type Pretty = P.Pretty P.ColorText + +displayTerm :: (Var v, Monad m) + => PPE.PrettyPrintEnvDecl + -> (Reference -> m (Maybe (Term v a))) + -> (Referent -> m (Maybe (Type v a))) + -> (Reference -> m (Maybe (Term v a))) + -> (Reference -> m (Maybe (DD.Decl v a))) + -> Term v a + -> m Pretty +displayTerm pped terms typeOf eval types tm = case tm of + -- todo: can dispatch on other things with special rendering + Term.Ref' r -> eval r >>= \case + Nothing -> pure $ termName (PPE.suffixifiedPPE pped) (Referent.Ref r) + Just tm -> displayDoc pped terms typeOf eval types tm + _ -> displayDoc pped terms typeOf eval types tm + +displayDoc :: forall v m a. (Var v, Monad m) + => PPE.PrettyPrintEnvDecl + -> (Reference -> m (Maybe (Term v a))) + -> (Referent -> m (Maybe (Type v a))) + -> (Reference -> m (Maybe (Term v a))) + -> (Reference -> m (Maybe (DD.Decl v a))) + -> Term v a + -> m Pretty +displayDoc pped terms typeOf evaluated types = go + where + go (DD.DocJoin docs) = fold <$> traverse go docs + go (DD.DocBlob txt) = pure $ P.paragraphyText txt + go (DD.DocLink (DD.LinkTerm (Term.TermLink' r))) = + pure $ P.underline (termName (PPE.suffixifiedPPE pped) r) + go (DD.DocLink (DD.LinkType (Term.TypeLink' r))) = + pure $ P.underline (typeName (PPE.suffixifiedPPE pped) r) + go (DD.DocSource (DD.LinkTerm (Term.TermLink' r))) = prettyTerm terms r + go (DD.DocSource (DD.LinkType (Term.TypeLink' r))) = prettyType r + go (DD.DocSignature (Term.TermLink' r)) = prettySignature r + go (DD.DocEvaluate (Term.TermLink' r)) = prettyEval evaluated r + go tm = pure $ TP.pretty (PPE.suffixifiedPPE pped) tm + prettySignature r = typeOf r >>= \case + Nothing -> pure $ termName (PPE.unsuffixifiedPPE pped) r + Just typ -> pure . P.group $ + TypePrinter.prettySignatures + (PPE.suffixifiedPPE pped) + [(PPE.termName (PPE.unsuffixifiedPPE pped) r, typ)] + prettyEval terms r = case r of + Referent.Ref (Reference.Builtin n) -> pure . P.syntaxToColor $ P.text n + Referent.Ref ref -> + let ppe = PPE.declarationPPE pped ref + in terms ref >>= \case + Nothing -> pure $ "😶 Missing term source for: " <> termName ppe r + Just tm -> pure $ TP.pretty ppe tm + Referent.Con r _ _ -> pure $ typeName (PPE.declarationPPE pped r) r + prettyTerm terms r = case r of + Referent.Ref (Reference.Builtin _) -> prettySignature r + Referent.Ref ref -> let ppe = PPE.declarationPPE pped ref in terms ref >>= \case + Nothing -> pure $ "😶 Missing term source for: " <> termName ppe r + Just tm -> pure . P.syntaxToColor $ P.group $ TP.prettyBinding ppe (PPE.termName ppe r) tm + Referent.Con r _ _ -> prettyType r + prettyType r = let ppe = PPE.declarationPPE pped r in types r >>= \case + Nothing -> pure $ "😶 Missing type source for: " <> typeName ppe r + Just ty -> pure . P.syntaxToColor $ P.group $ DP.prettyDecl ppe r (PPE.typeName ppe r) ty + +termName :: PPE.PrettyPrintEnv -> Referent -> Pretty +termName ppe r = P.syntaxToColor $ + NP.styleHashQualified'' (NP.fmt $ S.Referent r) name + where name = PPE.termName ppe r + +typeName :: PPE.PrettyPrintEnv -> Reference -> Pretty +typeName ppe r = P.syntaxToColor $ + NP.styleHashQualified'' (NP.fmt $ S.Reference r) name + where name = PPE.typeName ppe r diff --git a/parser-typechecker/src/Unison/CommandLine/InputPattern.hs b/parser-typechecker/src/Unison/CommandLine/InputPattern.hs new file mode 100644 index 0000000000..c1a55d6499 --- /dev/null +++ b/parser-typechecker/src/Unison/CommandLine/InputPattern.hs @@ -0,0 +1,92 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ViewPatterns #-} + + +module Unison.CommandLine.InputPattern where + +import qualified System.Console.Haskeline as Line +import Unison.Codebase (Codebase) +import Unison.Codebase.Branch (Branch) +import Unison.Codebase.Editor.Input (Input (..)) +import qualified Unison.Util.ColorText as CT +import qualified Unison.Util.Pretty as P +import Unison.Codebase.Path as Path + +-- InputPatterns accept some fixed number of Required arguments of various +-- types, followed by a variable number of a single type of argument. +data IsOptional + = Required -- 1, at the start + | Optional -- 0 or 1, at the end + | ZeroPlus -- 0 or more, at the end + | OnePlus -- 1 or more, at the end + deriving Show + +data InputPattern = InputPattern + { patternName :: String + , aliases :: [String] + , args :: [(IsOptional, ArgumentType)] + , help :: P.Pretty CT.ColorText + , parse :: [String] -> Either (P.Pretty CT.ColorText) Input + } + +data ArgumentType = ArgumentType + { typeName :: String + , suggestions :: forall m v a . Monad m + => String + -> Codebase m v a + -> Branch m + -> Path.Absolute + -> m [Line.Completion] + } +instance Show ArgumentType where + show at = "ArgumentType " <> typeName at + +-- `argType` gets called when the user tries to autocomplete an `i`th argument (zero-indexed). +-- todo: would be nice if we could alert the user if they try to autocomplete +-- past the end. It would also be nice if +argType :: InputPattern -> Int -> Maybe ArgumentType +argType ip i = go (i, args ip) where + -- Strategy: all of these input patterns take some number of arguments. + -- If it takes no arguments, then don't autocomplete. + go (_, []) = Nothing + -- If requesting the 0th of >=1 arguments, return it. + go (0, (_, t) : _) = Just t + -- Vararg parameters should appear at the end of the arg list, and work for + -- any later argument number. + go (_, [(ZeroPlus, t)]) = Just t + go (_, [(OnePlus, t)]) = Just t + -- Optional parameters only work at position 0, under this countdown scheme. + go (_, [(Optional, _)]) = Nothing + -- If requesting a later parameter, decrement and drop one. + go (n, (Required, _) : args) = go (n - 1, args) + -- The argument list spec is invalid if something follows optional or vararg + go _ = error $ "Input pattern " <> show (patternName ip) + <> " has an invalid argument list: " <> (show . fmap fst) (args ip) + +minArgs :: InputPattern -> Int +minArgs ip@(fmap fst . args -> args) = go args where + go [] = 0 + go (Required : args) = 1 + go args + go [_] = 0 + go _ = error $ "Invalid args for InputPattern (" + <> show (patternName ip) <> "): " <> show args + +maxArgs :: InputPattern -> Maybe Int +maxArgs ip@(fmap fst . args -> args) = go args where + go [] = Just 0 + go (Required : args) = (1 +) <$> go args + go [Optional] = Just 0 + go [_] = Nothing + go _ = error $ "Invalid args for InputPattern (" + <> show (patternName ip) <> "): " <> show args + +noSuggestions + :: Monad m + => String + -> Codebase m v a + -> Branch m + -> Path.Absolute + -> m [Line.Completion] +noSuggestions _ _ _ _ = pure [] + diff --git a/parser-typechecker/src/Unison/CommandLine/InputPatterns.hs b/parser-typechecker/src/Unison/CommandLine/InputPatterns.hs new file mode 100644 index 0000000000..f6e5a533b0 --- /dev/null +++ b/parser-typechecker/src/Unison/CommandLine/InputPatterns.hs @@ -0,0 +1,1549 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.CommandLine.InputPatterns where + +import Unison.Prelude + +import qualified Control.Lens.Cons as Cons +import qualified Control.Lens as Lens +import Data.Bifunctor (first) +import Data.List (intercalate, isPrefixOf) +import Data.List.Extra (nubOrdOn) +import qualified System.Console.Haskeline.Completion as Completion +import System.Console.Haskeline.Completion (Completion(Completion)) +import Unison.Codebase (Codebase) +import Unison.Codebase.Editor.Input (Input) +import qualified Unison.Codebase.SyncMode as SyncMode +import Unison.CommandLine.InputPattern + ( ArgumentType(..) + , InputPattern(InputPattern) + , IsOptional(..) + ) +import Unison.CommandLine +import Unison.Util.Monoid (intercalateMap) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Data.Text as Text +import qualified Text.Megaparsec as P +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Editor.Input as Input +import qualified Unison.Codebase.Path as Path +import qualified Unison.CommandLine.InputPattern as I +import qualified Unison.HashQualified as HQ +import qualified Unison.HashQualified' as HQ' +import qualified Unison.Name as Name +import qualified Unison.Names2 as Names +import qualified Unison.Util.ColorText as CT +import qualified Unison.Util.Pretty as P +import qualified Unison.Util.Relation as R +import qualified Unison.Codebase.Editor.SlurpResult as SR +import qualified Unison.Codebase.Editor.UriParser as UriParser +import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace) +import qualified Unison.Codebase.Editor.RemoteRepo as RemoteRepo +import Data.Tuple.Extra (uncurry3) + +showPatternHelp :: InputPattern -> P.Pretty CT.ColorText +showPatternHelp i = P.lines [ + P.bold (fromString $ I.patternName i) <> fromString + (if not . null $ I.aliases i + then " (or " <> intercalate ", " (I.aliases i) <> ")" + else ""), + P.wrap $ I.help i ] + +patternName :: InputPattern -> P.Pretty P.ColorText +patternName = fromString . I.patternName + +-- `example list ["foo", "bar"]` (haskell) becomes `list foo bar` (pretty) +makeExample, makeExampleNoBackticks :: InputPattern -> [P.Pretty CT.ColorText] -> P.Pretty CT.ColorText +makeExample p args = P.group . backtick $ makeExampleNoBackticks p args + +makeExampleNoBackticks p args = + P.group $ intercalateMap " " id (P.nonEmpty $ fromString (I.patternName p) : args) + +makeExample' :: InputPattern -> P.Pretty CT.ColorText +makeExample' p = makeExample p [] + +makeExampleEOS :: + InputPattern -> [P.Pretty CT.ColorText] -> P.Pretty CT.ColorText +makeExampleEOS p args = P.group $ + backtick (intercalateMap " " id (P.nonEmpty $ fromString (I.patternName p) : args)) <> "." + +helpFor :: InputPattern -> Either (P.Pretty CT.ColorText) Input +helpFor p = I.parse help [I.patternName p] + +mergeBuiltins :: InputPattern +mergeBuiltins = InputPattern "builtins.merge" [] [] + "Adds the builtins to `builtins.` in the current namespace (excluding `io` and misc)." + (const . pure $ Input.MergeBuiltinsI) + +mergeIOBuiltins :: InputPattern +mergeIOBuiltins = InputPattern "builtins.mergeio" [] [] + "Adds all the builtins to `builtins.` in the current namespace, including `io` and misc." + (const . pure $ Input.MergeIOBuiltinsI) + +updateBuiltins :: InputPattern +updateBuiltins = InputPattern + "builtins.update" + [] + [] + ( "Adds all the builtins that are missing from this namespace, " + <> "and deprecate the ones that don't exist in this version of Unison." + ) + (const . pure $ Input.UpdateBuiltinsI) + +todo :: InputPattern +todo = InputPattern + "todo" + [] + [(Optional, patchArg), (Optional, pathArg)] + (P.wrapColumn2 + [ ( makeExample' todo + , "lists the refactor work remaining in the default patch for the current" + <> " namespace." + ) + , ( makeExample todo [""] + , "lists the refactor work remaining in the given patch in the current " + <> "namespace." + ) + , ( makeExample todo ["", "[path]"] + , "lists the refactor work remaining in the given patch in given namespace." + ) + ] + ) + (\case + patchStr : ws -> mapLeft (warn . fromString) $ do + patch <- Path.parseSplit' Path.definitionNameSegment patchStr + branch <- case ws of + [] -> pure Path.relativeEmpty' + [pathStr] -> Path.parsePath' pathStr + _ -> Left "`todo` just takes a patch and one optional namespace" + Right $ Input.TodoI (Just patch) branch + [] -> Right $ Input.TodoI Nothing Path.relativeEmpty' + ) + +load :: InputPattern +load = InputPattern + "load" + [] + [(Optional, noCompletions)] + (P.wrapColumn2 + [ ( makeExample' load + , "parses, typechecks, and evaluates the most recent scratch file." + ) + , (makeExample load [""] + , "parses, typechecks, and evaluates the given scratch file." + ) + ] + ) + (\case + [] -> pure $ Input.LoadI Nothing + [file] -> pure $ Input.LoadI . Just $ file + _ -> Left (I.help load)) + + +add :: InputPattern +add = + InputPattern + "add" + [] + [(ZeroPlus, noCompletions)] + ("`add` adds to the codebase all the definitions from the most recently " + <> "typechecked file." + ) + $ \ws -> case traverse HQ'.fromString ws of + Just ws -> pure $ Input.AddI ws + Nothing -> + Left + . warn + . P.lines + . fmap fromString + . ("I don't know what these refer to:\n" :) + $ collectNothings HQ'.fromString ws + +previewAdd :: InputPattern +previewAdd = + InputPattern + "add.preview" + [] + [(ZeroPlus, noCompletions)] + ("`add.preview` previews additions to the codebase from the most recently " + <> "typechecked file. This command only displays cached typechecking " + <> "results. Use `load` to reparse & typecheck the file if the context " + <> "has changed." + ) + $ \ws -> case traverse HQ'.fromString ws of + Just ws -> pure $ Input.PreviewAddI ws + Nothing -> + Left + . warn + . P.lines + . fmap fromString + . ("I don't know what these refer to:\n" :) + $ collectNothings HQ'.fromString ws + +update :: InputPattern +update = InputPattern "update" + [] + [(Optional, patchArg) + ,(ZeroPlus, noCompletions)] + (P.wrap (makeExample' update <> "works like" + <> P.group (makeExample' add <> ",") + <> "except that if a definition in the file has the same name as an" + <> "existing definition, the name gets updated to point to the new" + <> "definition. If the old definition has any dependents, `update` will" + <> "add those dependents to a refactoring session, specified by an" + <> "optional patch.") + <> P.wrapColumn2 + [ (makeExample' update + , "adds all definitions in the .u file, noting replacements in the" + <> "default patch for the current namespace.") + , (makeExample update [""] + , "adds all definitions in the .u file, noting replacements in the" + <> "specified patch.") + , (makeExample update ["", "foo", "bar"] + , "adds `foo`, `bar`, and their dependents from the .u file, noting" + <> "any replacements into the specified patch.") + ] + ) + (\case + patchStr : ws -> do + patch <- first fromString $ Path.parseSplit' Path.definitionNameSegment patchStr + case traverse HQ'.fromString ws of + Just ws -> Right $ Input.UpdateI (Just patch) ws + Nothing -> + Left . warn . P.lines . fmap fromString . + ("I don't know what these refer to:\n" :) $ + collectNothings HQ'.fromString ws + [] -> Right $ Input.UpdateI Nothing [] ) + +previewUpdate :: InputPattern +previewUpdate = + InputPattern + "update.preview" + [] + [(ZeroPlus, noCompletions)] + ("`update.preview` previews updates to the codebase from the most " + <> "recently typechecked file. This command only displays cached " + <> "typechecking results. Use `load` to reparse & typecheck the file if " + <> "the context has changed." + ) + $ \ws -> case traverse HQ'.fromString ws of + Just ws -> pure $ Input.PreviewUpdateI ws + Nothing -> + Left + . warn + . P.lines + . fmap fromString + . ("I don't know what these refer to:\n" :) + $ collectNothings HQ'.fromString ws + +patch :: InputPattern +patch = InputPattern + "patch" + [] + [(Required, patchArg), (Optional, pathArg)] + ( P.wrap + $ makeExample' patch + <> "rewrites any definitions that depend on " + <> "definitions with type-preserving edits to use the updated versions of" + <> "these dependencies." + ) + (\case + patchStr : ws -> first fromString $ do + patch <- Path.parseSplit' Path.definitionNameSegment patchStr + branch <- case ws of + [pathStr] -> Path.parsePath' pathStr + _ -> pure Path.relativeEmpty' + pure $ Input.PropagatePatchI patch branch + [] -> + Left + $ warn + $ makeExample' patch + <> "takes a patch and an optional namespace." + ) + +view :: InputPattern +view = InputPattern + "view" + [] + [(OnePlus, definitionQueryArg)] + "`view foo` prints the definition of `foo`." + ( fmap (Input.ShowDefinitionI Input.ConsoleLocation) + . traverse parseHashQualifiedName + ) + +display :: InputPattern +display = InputPattern + "display" + [] + [(Required, definitionQueryArg)] + "`display foo` prints a rendered version of the term `foo`." + (\case + [s] -> Input.DisplayI Input.ConsoleLocation <$> parseHashQualifiedName s + _ -> Left (I.help display) + ) + + +displayTo :: InputPattern +displayTo = InputPattern + "display.to" + [] + [(Required, noCompletions), (Required, definitionQueryArg)] + ( P.wrap + $ makeExample displayTo ["", "foo"] + <> "prints a rendered version of the term `foo` to the given file." + ) + (\case + [file, s] -> + Input.DisplayI (Input.FileLocation file) <$> parseHashQualifiedName s + _ -> Left (I.help displayTo) + ) + +docs :: InputPattern +docs = InputPattern "docs" [] [(Required, definitionQueryArg)] + "`docs foo` shows documentation for the definition `foo`." + (\case + [s] -> first fromString $ Input.DocsI <$> Path.parseHQSplit' s + _ -> Left (I.help docs)) + +undo :: InputPattern +undo = InputPattern "undo" [] [] + "`undo` reverts the most recent change to the codebase." + (const $ pure Input.UndoI) + +viewByPrefix :: InputPattern +viewByPrefix = InputPattern + "view.recursive" + [] + [(OnePlus, definitionQueryArg)] + "`view.recursive Foo` prints the definitions of `Foo` and `Foo.blah`." + ( fmap (Input.ShowDefinitionByPrefixI Input.ConsoleLocation) + . traverse parseHashQualifiedName + ) + +find :: InputPattern +find = InputPattern + "find" + [] + [(ZeroPlus, fuzzyDefinitionQueryArg)] + (P.wrapColumn2 + [ ("`find`", "lists all definitions in the current namespace.") + , ( "`find foo`" + , "lists all definitions with a name similar to 'foo' in the current " + <> "namespace." + ) + , ( "`find foo bar`" + , "lists all definitions with a name similar to 'foo' or 'bar' in the " + <> "current namespace." + ) + ] + ) + (pure . Input.SearchByNameI False False) + +findShallow :: InputPattern +findShallow = InputPattern + "list" + ["ls"] + [(Optional, pathArg)] + (P.wrapColumn2 + [ ("`list`", "lists definitions and namespaces at the current level of the current namespace.") + , ( "`list foo`", "lists the 'foo' namespace." ) + , ( "`list .foo`", "lists the '.foo' namespace." ) + ] + ) + (\case + [] -> pure $ Input.FindShallowI Path.relativeEmpty' + [path] -> first fromString $ do + p <- Path.parsePath' path + pure $ Input.FindShallowI p + _ -> Left (I.help findShallow) + ) + +findVerbose :: InputPattern +findVerbose = InputPattern + "find.verbose" + ["list.verbose", "ls.verbose"] + [(ZeroPlus, fuzzyDefinitionQueryArg)] + ( "`find.verbose` searches for definitions like `find`, but includes hashes " + <> "and aliases in the results." + ) + (pure . Input.SearchByNameI True False) + +findPatch :: InputPattern +findPatch = InputPattern + "find.patch" + ["list.patch", "ls.patch"] + [] + (P.wrapColumn2 + [("`find.patch`", "lists all patches in the current namespace.")] + ) + (pure . const Input.FindPatchI) + +renameTerm :: InputPattern +renameTerm = InputPattern "move.term" ["rename.term"] + [(Required, exactDefinitionTermQueryArg) + ,(Required, newNameArg)] + "`move.term foo bar` renames `foo` to `bar`." + (\case + [oldName, newName] -> first fromString $ do + src <- Path.parseHQSplit' oldName + target <- Path.parseSplit' Path.definitionNameSegment newName + pure $ Input.MoveTermI src target + _ -> Left . P.warnCallout $ P.wrap + "`rename.term` takes two arguments, like `rename.term oldname newname`.") + +renameType :: InputPattern +renameType = InputPattern "move.type" ["rename.type"] + [(Required, exactDefinitionTypeQueryArg) + ,(Required, newNameArg)] + "`move.type foo bar` renames `foo` to `bar`." + (\case + [oldName, newName] -> first fromString $ do + src <- Path.parseHQSplit' oldName + target <- Path.parseSplit' Path.definitionNameSegment newName + pure $ Input.MoveTypeI src target + _ -> Left . P.warnCallout $ P.wrap + "`rename.type` takes two arguments, like `rename.type oldname newname`.") + +delete :: InputPattern +delete = InputPattern "delete" [] + [(OnePlus, definitionQueryArg)] + "`delete foo` removes the term or type name `foo` from the namespace." + (\case + [query] -> first fromString $ do + p <- Path.parseHQSplit' query + pure $ Input.DeleteI p + _ -> Left . P.warnCallout $ P.wrap + "`delete` takes an argument, like `delete name`." + ) + +deleteTerm :: InputPattern +deleteTerm = InputPattern "delete.term" [] + [(OnePlus, exactDefinitionTermQueryArg)] + "`delete.term foo` removes the term name `foo` from the namespace." + (\case + [query] -> first fromString $ do + p <- Path.parseHQSplit' query + pure $ Input.DeleteTermI p + _ -> Left . P.warnCallout $ P.wrap + "`delete.term` takes an argument, like `delete.term name`." + ) + +deleteType :: InputPattern +deleteType = InputPattern "delete.type" [] + [(OnePlus, exactDefinitionTypeQueryArg)] + "`delete.type foo` removes the type name `foo` from the namespace." + (\case + [query] -> first fromString $ do + p <- Path.parseHQSplit' query + pure $ Input.DeleteTypeI p + _ -> Left . P.warnCallout $ P.wrap + "`delete.type` takes an argument, like `delete.type name`." + ) + +deleteTermReplacementCommand :: String +deleteTermReplacementCommand = "delete.term-replacement" + +deleteTypeReplacementCommand :: String +deleteTypeReplacementCommand = "delete.type-replacement" + +deleteReplacement :: Bool -> InputPattern +deleteReplacement isTerm = InputPattern + commandName + [] + [(Required, if isTerm then exactDefinitionTermQueryArg else exactDefinitionTypeQueryArg), (Optional, patchArg)] + ( P.string + $ commandName + <> " ` removes any edit of the " + <> str + <> " `foo` from the patch `patch`, " + <> "or from the default patch if none is specified. Note that `foo` refers to the " + <> "original name for the " + <> str + <> " - not the one in place after the edit." + ) + (\case + query : patch -> do + patch <- + first fromString + . traverse (Path.parseSplit' Path.definitionNameSegment) + $ listToMaybe patch + q <- parseHashQualifiedName query + pure $ input q patch + _ -> + Left + . P.warnCallout + . P.wrapString + $ commandName + <> " needs arguments. See `help " + <> commandName + <> "`." + ) + where + input = if isTerm + then Input.RemoveTermReplacementI + else Input.RemoveTypeReplacementI + str = if isTerm then "term" else "type" + commandName = if isTerm + then deleteTermReplacementCommand + else deleteTypeReplacementCommand + +deleteTermReplacement :: InputPattern +deleteTermReplacement = deleteReplacement True + +deleteTypeReplacement :: InputPattern +deleteTypeReplacement = deleteReplacement False + +parseHashQualifiedName + :: String -> Either (P.Pretty CT.ColorText) HQ.HashQualified +parseHashQualifiedName s = + maybe + ( Left + . P.warnCallout + . P.wrap + $ P.string s + <> " is not a well-formed name, hash, or hash-qualified name. " + <> "I expected something like `foo`, `#abc123`, or `foo#abc123`." + ) + Right + $ HQ.fromString s + +aliasTerm :: InputPattern +aliasTerm = InputPattern "alias.term" [] + [(Required, exactDefinitionTermQueryArg), (Required, newNameArg)] + "`alias.term foo bar` introduces `bar` with the same definition as `foo`." + (\case + [oldName, newName] -> first fromString $ do + source <- Path.parseShortHashOrHQSplit' oldName + target <- Path.parseSplit' Path.definitionNameSegment newName + pure $ Input.AliasTermI source target + _ -> Left . warn $ P.wrap + "`alias.term` takes two arguments, like `alias.term oldname newname`." + ) + +aliasType :: InputPattern +aliasType = InputPattern "alias.type" [] + [(Required, exactDefinitionTypeQueryArg), (Required, newNameArg)] + "`alias.type Foo Bar` introduces `Bar` with the same definition as `Foo`." + (\case + [oldName, newName] -> first fromString $ do + source <- Path.parseShortHashOrHQSplit' oldName + target <- Path.parseSplit' Path.definitionNameSegment newName + pure $ Input.AliasTypeI source target + _ -> Left . warn $ P.wrap + "`alias.type` takes two arguments, like `alias.type oldname newname`." + ) + +aliasMany :: InputPattern +aliasMany = InputPattern "alias.many" ["copy"] + [(Required, definitionQueryArg), (OnePlus, exactDefinitionOrPathArg)] + (P.group . P.lines $ + [ P.wrap $ P.group (makeExample aliasMany ["", "[relative2...]", ""]) + <> "creates aliases `relative1`, `relative2`, ... in the namespace `namespace`." + , P.wrap $ P.group (makeExample aliasMany ["foo.foo", "bar.bar", ".quux"]) + <> "creates aliases `.quux.foo.foo` and `.quux.bar.bar`." + ]) + (\case + srcs@(_:_) Cons.:> dest -> first fromString $ do + sourceDefinitions <- traverse Path.parseHQSplit srcs + destNamespace <- Path.parsePath' dest + pure $ Input.AliasManyI sourceDefinitions destNamespace + _ -> Left (I.help aliasMany) + ) + + +cd :: InputPattern +cd = InputPattern "namespace" ["cd", "j"] [(Required, pathArg)] + (P.wrapColumn2 + [ (makeExample cd ["foo.bar"], + "descends into foo.bar from the current namespace.") + , (makeExample cd [".cat.dog"], + "sets the current namespace to the abolute namespace .cat.dog.") ]) + (\case + [p] -> first fromString $ do + p <- Path.parsePath' p + pure . Input.SwitchBranchI $ p + _ -> Left (I.help cd) + ) + +back :: InputPattern +back = InputPattern "back" ["popd"] [] + (P.wrapColumn2 + [ (makeExample back [], + "undoes the last" <> makeExample' cd <> "command.") + ]) + (\case + [] -> pure Input.PopBranchI + _ -> Left (I.help cd) + ) + +deleteBranch :: InputPattern +deleteBranch = InputPattern "delete.namespace" [] [(Required, pathArg)] + "`delete.namespace ` deletes the namespace `foo`" + (\case + ["."] -> first fromString . + pure $ Input.DeleteBranchI Nothing + [p] -> first fromString $ do + p <- Path.parseSplit' Path.definitionNameSegment p + pure . Input.DeleteBranchI $ Just p + _ -> Left (I.help deleteBranch) + ) + +deletePatch :: InputPattern +deletePatch = InputPattern "delete.patch" [] [(Required, patchArg)] + "`delete.patch ` deletes the patch `foo`" + (\case + [p] -> first fromString $ do + p <- Path.parseSplit' Path.definitionNameSegment p + pure . Input.DeletePatchI $ p + _ -> Left (I.help deletePatch) + ) + +movePatch :: String -> String -> Either (P.Pretty CT.ColorText) Input +movePatch src dest = first fromString $ do + src <- Path.parseSplit' Path.definitionNameSegment src + dest <- Path.parseSplit' Path.definitionNameSegment dest + pure $ Input.MovePatchI src dest + +copyPatch' :: String -> String -> Either (P.Pretty CT.ColorText) Input +copyPatch' src dest = first fromString $ do + src <- Path.parseSplit' Path.definitionNameSegment src + dest <- Path.parseSplit' Path.definitionNameSegment dest + pure $ Input.CopyPatchI src dest + +copyPatch :: InputPattern +copyPatch = InputPattern "copy.patch" + [] + [(Required, patchArg), (Required, newNameArg)] + "`copy.patch foo bar` copies the patch `foo` to `bar`." + (\case + [src, dest] -> copyPatch' src dest + _ -> Left (I.help copyPatch) + ) + +renamePatch :: InputPattern +renamePatch = InputPattern "move.patch" + ["rename.patch"] + [(Required, patchArg), (Required, newNameArg)] + "`move.patch foo bar` renames the patch `foo` to `bar`." + (\case + [src, dest] -> movePatch src dest + _ -> Left (I.help renamePatch) + ) + +renameBranch :: InputPattern +renameBranch = InputPattern "move.namespace" + ["rename.namespace"] + [(Required, pathArg), (Required, newNameArg)] + "`move.namespace foo bar` renames the path `bar` to `foo`." + (\case + [".", dest] -> first fromString $ do + dest <- Path.parseSplit' Path.definitionNameSegment dest + pure $ Input.MoveBranchI Nothing dest + [src, dest] -> first fromString $ do + src <- Path.parseSplit' Path.definitionNameSegment src + dest <- Path.parseSplit' Path.definitionNameSegment dest + pure $ Input.MoveBranchI (Just src) dest + _ -> Left (I.help renameBranch) + ) + +history :: InputPattern +history = InputPattern "history" [] + [(Optional, pathArg)] + (P.wrapColumn2 [ + (makeExample history [], "Shows the history of the current path."), + (makeExample history [".foo"], "Shows history of the path .foo."), + (makeExample history ["#9dndk3kbsk13nbpeu"], + "Shows the history of the namespace with the given hash." <> + "The full hash must be provided.") + ]) + (\case + [src] -> first fromString $ do + p <- Input.parseBranchId src + pure $ Input.HistoryI (Just 10) (Just 10) p + [] -> pure $ Input.HistoryI (Just 10) (Just 10) (Right Path.currentPath) + _ -> Left (I.help history) + ) + +forkLocal :: InputPattern +forkLocal = InputPattern "fork" ["copy.namespace"] [(Required, pathArg) + ,(Required, newNameArg)] + (makeExample forkLocal ["src", "dest"] <> "creates the namespace `dest` as a copy of `src`.") + (\case + [src, dest] -> first fromString $ do + src <- Input.parseBranchId src + dest <- Path.parsePath' dest + pure $ Input.ForkLocalBranchI src dest + _ -> Left (I.help forkLocal) + ) + +resetRoot :: InputPattern +resetRoot = InputPattern "reset-root" [] [(Required, pathArg)] + (P.wrapColumn2 [ + (makeExample resetRoot [".foo"], + "Reset the root namespace (along with its history) to that of the `.foo` namespace."), + (makeExample resetRoot ["#9dndk3kbsk13nbpeu"], + "Reset the root namespace (along with its history) to that of the namespace with hash `#9dndk3kbsk13nbpeu`.") + ]) + (\case + [src] -> first fromString $ do + src <- Input.parseBranchId src + pure $ Input.ResetRootI src + _ -> Left (I.help resetRoot)) + +pull :: InputPattern +pull = InputPattern + "pull" + [] + [(Optional, gitUrlArg), (Optional, pathArg)] + (P.lines + [ P.wrap + "The `pull` command merges a remote namespace into a local namespace." + , "" + , P.wrapColumn2 + [ ( "`pull remote local`" + , "merges the remote namespace `remote`" + <>"into the local namespace `local`." + ) + , ( "`pull remote`" + , "merges the remote namespace `remote`" + <>"into the current namespace") + , ( "`pull`" + , "merges the remote namespace configured in `.unisonConfig`" + <> "with the key `GitUrl.ns` where `ns` is the current namespace," + <> "into the current namespace") + ] + , "" + , P.wrap "where `remote` is a git repository, optionally followed by `:`" + <> "and an absolute remote path, such as:" + , P.indentN 2 . P.lines $ + [P.backticked "https://github.com/org/repo" + ,P.backticked "https://github.com/org/repo:.some.remote.path" + ] + ] + ) + (\case + [] -> + Right $ Input.PullRemoteBranchI Nothing Path.relativeEmpty' SyncMode.ShortCircuit + [url] -> do + ns <- parseUri "url" url + Right $ Input.PullRemoteBranchI (Just ns) Path.relativeEmpty' SyncMode.ShortCircuit + [url, path] -> do + ns <- parseUri "url" url + p <- first fromString $ Path.parsePath' path + Right $ Input.PullRemoteBranchI (Just ns) p SyncMode.ShortCircuit + _ -> Left (I.help pull) + ) + +pullExhaustive :: InputPattern +pullExhaustive = InputPattern + "debug.pull-exhaustive" + [] + [(Required, gitUrlArg), (Optional, pathArg)] + (P.lines + [ P.wrap $ + "The " <> makeExample' pullExhaustive <> "command can be used in place of" + <> makeExample' pull <> "to complete namespaces" + <> "which were pulled incompletely due to a bug in UCM" + <> "versions M1l and earlier. It may be extra slow!" + ] + ) + (\case + [] -> + Right $ Input.PullRemoteBranchI Nothing Path.relativeEmpty' SyncMode.Complete + [url] -> do + ns <- parseUri "url" url + Right $ Input.PullRemoteBranchI (Just ns) Path.relativeEmpty' SyncMode.Complete + [url, path] -> do + ns <- parseUri "url" url + p <- first fromString $ Path.parsePath' path + Right $ Input.PullRemoteBranchI (Just ns) p SyncMode.Complete + _ -> Left (I.help pull) + ) + +push :: InputPattern +push = InputPattern + "push" + [] + [(Required, gitUrlArg), (Optional, pathArg)] + (P.lines + [ P.wrap + "The `push` command merges a local namespace into a remote namespace." + , "" + , P.wrapColumn2 + [ ( "`push remote local`" + , "merges the contents of the local namespace `local`" + <> "into the remote namespace `remote`." + ) + , ( "`push remote`" + , "publishes the current namespace into the remote namespace `remote`") + , ( "`push`" + , "publishes the current namespace" + <> "into the remote namespace configured in `.unisonConfig`" + <> "with the key `GitUrl.ns` where `ns` is the current namespace") + ] + , "" + , P.wrap "where `remote` is a git repository, optionally followed by `:`" + <> "and an absolute remote path, such as:" + , P.indentN 2 . P.lines $ + [P.backticked "https://github.com/org/repo" + ,P.backticked "https://github.com/org/repo:.some.remote.path" + ] + ] + ) + (\case + [] -> + Right $ Input.PushRemoteBranchI Nothing Path.relativeEmpty' SyncMode.ShortCircuit + url : rest -> do + (repo, sbh, path) <- parseUri "url" url + when (isJust sbh) + $ Left "Can't push to a particular remote namespace hash." + p <- case rest of + [] -> Right Path.relativeEmpty' + [path] -> first fromString $ Path.parsePath' path + _ -> Left (I.help push) + Right $ Input.PushRemoteBranchI (Just (repo, path)) p SyncMode.ShortCircuit + ) + +pushExhaustive :: InputPattern +pushExhaustive = InputPattern + "debug.push-exhaustive" + [] + [(Required, gitUrlArg), (Optional, pathArg)] + (P.lines + [ P.wrap $ + "The " <> makeExample' pushExhaustive <> "command can be used in place of" + <> makeExample' push <> "to repair remote namespaces" + <> "which were pushed incompletely due to a bug in UCM" + <> "versions M1l and earlier. It may be extra slow!" + ] + ) + (\case + [] -> + Right $ Input.PushRemoteBranchI Nothing Path.relativeEmpty' SyncMode.Complete + url : rest -> do + (repo, sbh, path) <- parseUri "url" url + when (isJust sbh) + $ Left "Can't push to a particular remote namespace hash." + p <- case rest of + [] -> Right Path.relativeEmpty' + [path] -> first fromString $ Path.parsePath' path + _ -> Left (I.help push) + Right $ Input.PushRemoteBranchI (Just (repo, path)) p SyncMode.Complete + ) + +createPullRequest :: InputPattern +createPullRequest = InputPattern "pull-request.create" ["pr.create"] + [(Required, gitUrlArg), (Required, gitUrlArg), (Optional, pathArg)] + (P.group $ P.lines + [ P.wrap $ makeExample createPullRequest ["base", "head"] + <> "will generate a request to merge the remote repo `head`" + <> "into the remote repo `base`." + , "" + , "example: " <> + makeExampleNoBackticks createPullRequest ["https://github.com/unisonweb/base:.trunk", + "https://github.com/me/unison:.prs.base._myFeature" ] + ]) + (\case + [baseUrl, headUrl] -> do + baseRepo <- parseUri "baseRepo" baseUrl + headRepo <- parseUri "headRepo" headUrl + pure $ Input.CreatePullRequestI baseRepo headRepo + _ -> Left (I.help createPullRequest) + ) + +loadPullRequest :: InputPattern +loadPullRequest = InputPattern "pull-request.load" ["pr.load"] + [(Required, gitUrlArg), (Required, gitUrlArg), (Optional, pathArg)] + (P.lines + [P.wrap $ makeExample loadPullRequest ["base", "head"] + <> "will load a pull request for merging the remote repo `head` into the" + <> "remote repo `base`, staging each in the current namespace" + <> "(so make yourself a clean spot to work first)." + ,P.wrap $ makeExample loadPullRequest ["base", "head", "dest"] + <> "will load a pull request for merging the remote repo `head` into the" + <> "remote repo `base`, staging each in `dest`, which must be empty." + ]) + (\case + [baseUrl, headUrl] -> do + baseRepo <- parseUri "baseRepo" baseUrl + headRepo <- parseUri "topicRepo" headUrl + pure $ Input.LoadPullRequestI baseRepo headRepo Path.relativeEmpty' + [baseUrl, headUrl, dest] -> do + baseRepo <- parseUri "baseRepo" baseUrl + headRepo <- parseUri "topicRepo" headUrl + destPath <- first fromString $ Path.parsePath' dest + pure $ Input.LoadPullRequestI baseRepo headRepo destPath + _ -> Left (I.help loadPullRequest) + ) +parseUri :: String -> String -> Either (P.Pretty P.ColorText) RemoteNamespace +parseUri label input = do + ns <- first (fromString . show) -- turn any parsing errors into a Pretty. + (P.parse UriParser.repoPath label (Text.pack input)) + case (RemoteRepo.commit . Lens.view Lens._1) ns of + Nothing -> pure ns + Just commit -> Left . P.wrap $ + "I don't totally know how to address specific git commits (e.g. " + <> P.group (P.text commit <> ")") <> " yet." + <> "If you need this, add your 2¢ at" + <> P.backticked "https://github.com/unisonweb/unison/issues/1436" + +squashMerge :: InputPattern +squashMerge = + InputPattern "merge.squash" ["squash"] [(Required, pathArg), (Required, pathArg)] + (P.wrap $ makeExample squashMerge ["src","dest"] + <> "merges `src` namespace into `dest`," + <> "discarding the history of `src` in the process." + <> "The resulting `dest` will have (at most) 1" + <> "additional history entry.") + (\case + [src, dest] -> first fromString $ do + src <- Path.parsePath' src + dest <- Path.parsePath' dest + pure $ Input.MergeLocalBranchI src dest Branch.SquashMerge + _ -> Left (I.help squashMerge) + ) + +mergeLocal :: InputPattern +mergeLocal = InputPattern "merge" [] [(Required, pathArg) + ,(Optional, pathArg)] + (P.column2 [ + ("`merge src`", "merges `src` namespace into the current namespace"), + ("`merge src dest`", "merges `src` namespace into the `dest` namespace")]) + (\case + [src] -> first fromString $ do + src <- Path.parsePath' src + pure $ Input.MergeLocalBranchI src Path.relativeEmpty' Branch.RegularMerge + [src, dest] -> first fromString $ do + src <- Path.parsePath' src + dest <- Path.parsePath' dest + pure $ Input.MergeLocalBranchI src dest Branch.RegularMerge + _ -> Left (I.help mergeLocal) + ) + +diffNamespace :: InputPattern +diffNamespace = InputPattern + "diff.namespace" + [] + [(Required, pathArg), (Required, pathArg)] + (P.column2 + [ ( "`diff.namespace before after`" + , P.wrap + "shows how the namespace `after` differs from the namespace `before`" + ) + ] + ) + (\case + [before, after] -> first fromString $ do + before <- Path.parsePath' before + after <- Path.parsePath' after + pure $ Input.DiffNamespaceI before after + _ -> Left $ I.help diffNamespace + ) + +previewMergeLocal :: InputPattern +previewMergeLocal = InputPattern + "merge.preview" + [] + [(Required, pathArg), (Optional, pathArg)] + (P.column2 + [ ( "`merge.preview src`" + , "shows how the current namespace will change after a `merge src`." + ) + , ( "`merge.preview src dest`" + , "shows how `dest` namespace will change after a `merge src dest`." + ) + ] + ) + (\case + [src] -> first fromString $ do + src <- Path.parsePath' src + pure $ Input.PreviewMergeLocalBranchI src Path.relativeEmpty' + [src, dest] -> first fromString $ do + src <- Path.parsePath' src + dest <- Path.parsePath' dest + pure $ Input.PreviewMergeLocalBranchI src dest + _ -> Left (I.help previewMergeLocal) + ) + +replaceEdit + :: (HQ.HashQualified -> HQ.HashQualified -> Maybe Input.PatchPath -> Input) + -> String + -> InputPattern +replaceEdit f s = self + where + self = InputPattern + ("replace." <> s) + [] + [ (Required, definitionQueryArg) + , (Required, definitionQueryArg) + , (Optional, patchArg) + ] + (P.wrapColumn2 + [ ( makeExample self ["", "", ""] + , "Replace the " + <> P.string s + <> " in the given patch " + <> "with the " + <> P.string s + <> " ." + ) + , ( makeExample self ["", ""] + , "Replace the " + <> P.string s + <> " with in the default patch." + ) + ] + ) + (\case + source : target : patch -> do + patch <- + first fromString + <$> traverse (Path.parseSplit' Path.definitionNameSegment) + $ listToMaybe patch + sourcehq <- parseHashQualifiedName source + targethq <- parseHashQualifiedName target + pure $ f sourcehq targethq patch + _ -> Left $ I.help self + ) + +replaceType :: InputPattern +replaceType = replaceEdit Input.ReplaceTypeI "type" + +replaceTerm :: InputPattern +replaceTerm = replaceEdit Input.ReplaceTermI "term" + +viewReflog :: InputPattern +viewReflog = InputPattern + "reflog" + [] + [] + "`reflog` lists the changes that have affected the root namespace" + (\case + [] -> pure Input.ShowReflogI + _ -> Left . warn . P.string + $ I.patternName viewReflog ++ " doesn't take any arguments.") + +edit :: InputPattern +edit = InputPattern + "edit" + [] + [(OnePlus, definitionQueryArg)] + ( "`edit foo` prepends the definition of `foo` to the top of the most " + <> "recently saved file." + ) + ( fmap (Input.ShowDefinitionI Input.LatestFileLocation) + . traverse parseHashQualifiedName + ) + +topicNameArg :: ArgumentType +topicNameArg = + ArgumentType "topic" $ \q _ _ _ -> pure (exactComplete q $ Map.keys helpTopicsMap) + +helpTopics :: InputPattern +helpTopics = InputPattern + "help-topics" + ["help-topic"] + [(Optional, topicNameArg)] + ( "`help-topics` lists all topics and `help-topics ` shows an explanation of that topic." ) + (\case + [] -> Left topics + [topic] -> case Map.lookup topic helpTopicsMap of + Nothing -> Left . warn $ "I don't know of that topic. Try `help-topics`." + Just t -> Left t + _ -> Left $ warn "Use `help-topics ` or `help-topics`." + ) + where + topics = P.callout "🌻" $ P.lines [ + "Here's a list of topics I can tell you more about: ", + "", + P.indentN 2 $ P.sep "\n" (P.string <$> Map.keys helpTopicsMap), + "", + aside "Example" "use `help filestatus` to learn more about that topic." + ] + +helpTopicsMap :: Map String (P.Pretty P.ColorText) +helpTopicsMap = Map.fromList [ + ("testcache", testCacheMsg), + ("filestatus", fileStatusMsg), + ("messages.disallowedAbsolute", disallowedAbsoluteMsg), + ("namespaces", pathnamesMsg) + ] + where + blankline = ("","") + fileStatusMsg = P.callout "📓" . P.lines $ [ + P.wrap $ "Here's a list of possible status messages you might see" + <> "for definitions in a .u file.", "", + P.wrapColumn2 [ + (P.bold $ SR.prettyStatus SR.Collision, + "A definition with the same name as an existing definition. Doing" <> + "`update` instead of `add` will turn this failure into a successful" <> + "update."), + blankline, + (P.bold $ SR.prettyStatus SR.Conflicted, + "A definition with the same name as an existing definition." <> + "Resolving the conflict and then trying an `update` again will" <> + "turn this into a successful update."), + blankline, + (P.bold $ SR.prettyStatus SR.TermExistingConstructorCollision, + "A definition with the same name as an existing constructor for " <> + "some data type. Rename your definition or the data type before" <> + "trying again to `add` or `update`."), + blankline, + (P.bold $ SR.prettyStatus SR.ConstructorExistingTermCollision, + "A type defined in the file has a constructor that's named the" <> + "same as an existing term. Rename that term or your constructor" <> + "before trying again to `add` or `update`."), + blankline, + (P.bold $ SR.prettyStatus SR.BlockedDependency, + "This definition was blocked because it dependended on " <> + "a definition with a failed status."), + blankline, + (P.bold $ SR.prettyStatus SR.ExtraDefinition, + "This definition was added because it was a dependency of" <> + "a definition explicitly selected.") + ] + ] + testCacheMsg = P.callout "🎈" . P.lines $ [ + P.wrap $ "Unison caches the results of " <> P.blue "test>" + <> "watch expressions. Since these expressions are pure and" + <> "always yield the same result when evaluated, there's no need" + <> "to run them more than once!", + "", + P.wrap $ "A test is rerun only if it has changed, or if one" + <> "of the definitions it depends on has changed." + ] + pathnamesMsg = P.callout "\129488" . P.lines $ [ + P.wrap $ "There are two kinds of namespaces," <> P.group (P.blue "absolute" <> ",") + <> "such as" <> P.group ("(" <> P.blue ".foo.bar") + <> "or" <> P.group (P.blue ".base.math.+" <> ")") + <> "and" <> P.group (P.green "relative" <> ",") + <> "such as" <> P.group ("(" <> P.green "math.sqrt") + <> "or" <> P.group (P.green "util.List.++" <> ")."), + "", + P.wrap $ "Relative names are converted to absolute names by prepending the current namespace." + <> "For example, if your Unison prompt reads:", "", + P.indentN 2 $ P.blue ".foo.bar>", "", + "and your .u file looks like:", "", + P.indentN 2 $ P.green "x" <> " = 41", "", + P.wrap $ + "then doing an" <> P.blue "add" <> + "will create the definition with the absolute name" <> + P.group (P.blue ".foo.bar.x" <> " = 41"), + "", + P.wrap $ + "and you can refer to" <> P.green "x" <> "by its absolute name " <> + P.blue ".foo.bar.x" <> "elsewhere" <> "in your code. For instance:", "", + P.indentN 2 $ + "answerToLifeTheUniverseAndEverything = " <> P.blue ".foo.bar.x" <> " + 1" + ] + + disallowedAbsoluteMsg = P.callout "\129302" . P.lines $ [ + P.wrap $ + "Although I can understand absolute (ex: .foo.bar) or" <> + "relative (ex: util.math.sqrt) references to existing definitions" <> + P.group ("(" <> P.blue "help namespaces") <> "to learn more)," <> + "I can't yet handle giving new definitions with absolute names in a .u file.", + "", + P.wrap $ "As a workaround, you can give definitions with a relative name" + <> "temporarily (like `exports.blah.foo`) and then use `move.*` " + <> "or `merge` commands to move stuff around afterwards." + ] + +help :: InputPattern +help = InputPattern + "help" ["?"] [(Optional, commandNameArg)] + "`help` shows general help and `help ` shows help for one command." + (\case + [] -> Left $ intercalateMap "\n\n" showPatternHelp + (sortOn I.patternName validInputs) + [isHelp -> Just msg] -> Left msg + [cmd] -> case Map.lookup cmd commandsByName of + Nothing -> Left . warn $ "I don't know of that command. Try `help`." + Just pat -> Left $ showPatternHelp pat + _ -> Left $ warn "Use `help ` or `help`.") + where + commandsByName = Map.fromList [ + (n, i) | i <- validInputs, n <- I.patternName i : I.aliases i ] + isHelp s = Map.lookup s helpTopicsMap + +quit :: InputPattern +quit = InputPattern "quit" ["exit", ":q"] [] + "Exits the Unison command line interface." + (\case + [] -> pure Input.QuitI + _ -> Left "Use `quit`, `exit`, or to quit." + ) + +viewPatch :: InputPattern +viewPatch = InputPattern "view.patch" [] [(Required, patchArg)] + (P.wrapColumn2 + [ ( makeExample' viewPatch + , "Lists all the edits in the default patch." + ) + , ( makeExample viewPatch [""] + , "Lists all the edits in the given patch." + ) + ] + ) + (\case + [] -> Right $ Input.ListEditsI Nothing + [patchStr] -> mapLeft fromString $ do + patch <- Path.parseSplit' Path.definitionNameSegment patchStr + Right $ Input.ListEditsI (Just patch) + _ -> Left $ warn "`view.patch` takes a patch and that's it." + ) + +link :: InputPattern +link = InputPattern + "link" + [] + [(Required, definitionQueryArg), (OnePlus, definitionQueryArg)] + (fromString $ concat + [ "`link metadata defn` creates a link to `metadata` from `defn`. " + , "Use `links defn` or `links defn ` to view outgoing links, " + , "and `unlink metadata defn` to remove a link. The `defn` can be either the " + , "name of a term or type, multiple such names, or a range like `1-4` " + , "for a range of definitions listed by a prior `find` command." + ] + ) + (\case + md : defs -> first fromString $ do + md <- case HQ.fromString md of + Nothing -> Left "Invalid hash qualified identifier for metadata." + Just hq -> pure hq + defs <- traverse Path.parseHQSplit' defs + Right $ Input.LinkI md defs + _ -> Left (I.help link) + ) + +links :: InputPattern +links = InputPattern + "links" + [] + [(Required, definitionQueryArg), (Optional, definitionQueryArg)] + (P.column2 [ + (makeExample links ["defn"], "shows all outgoing links from `defn`."), + (makeExample links ["defn", ""], "shows all links of the given type.") ]) + (\case + src : rest -> first fromString $ do + src <- Path.parseHQSplit' src + let ty = case rest of + [] -> Nothing + _ -> Just $ unwords rest + in Right $ Input.LinksI src ty + _ -> Left (I.help links) + ) + +unlink :: InputPattern +unlink = InputPattern + "unlink" + ["delete.link"] + [(Required, definitionQueryArg), (OnePlus, definitionQueryArg)] + (fromString $ concat + [ "`unlink metadata defn` removes a link to `metadata` from `defn`." + , "The `defn` can be either the " + , "name of a term or type, multiple such names, or a range like `1-4` " + , "for a range of definitions listed by a prior `find` command." + ]) + (\case + md : defs -> first fromString $ do + md <- case HQ.fromString md of + Nothing -> Left "Invalid hash qualified identifier for metadata." + Just hq -> pure hq + defs <- traverse Path.parseHQSplit' defs + Right $ Input.UnlinkI md defs + _ -> Left (I.help unlink) + ) + +names :: InputPattern +names = InputPattern "names" [] + [(Required, definitionQueryArg)] + "`names foo` shows the hash and all known names for `foo`." + (\case + [thing] -> case HQ.fromString thing of + Just hq -> Right $ Input.NamesI hq + Nothing -> Left $ "I was looking for one of these forms: " + <> P.blue "foo .foo.bar foo#abc #abcde .foo.bar#asdf" + _ -> Left (I.help names) + ) + +dependents, dependencies :: InputPattern +dependents = InputPattern "dependents" [] [] + "List the dependents of the specified definition." + (\case + [thing] -> fmap Input.ListDependentsI $ parseHashQualifiedName thing + _ -> Left (I.help dependents)) +dependencies = InputPattern "dependencies" [] [] + "List the dependencies of the specified definition." + (\case + [thing] -> fmap Input.ListDependenciesI $ parseHashQualifiedName thing + _ -> Left (I.help dependencies)) + +debugNumberedArgs :: InputPattern +debugNumberedArgs = InputPattern "debug.numberedArgs" [] [] + "Dump the contents of the numbered args state." + (const $ Right Input.DebugNumberedArgsI) + +debugBranchHistory :: InputPattern +debugBranchHistory = InputPattern "debug.history" [] + [(Optional, noCompletions)] + "Dump codebase history, compatible with bit-booster.com/graph.html" + (const $ Right Input.DebugBranchHistoryI) + +debugFileHashes :: InputPattern +debugFileHashes = InputPattern "debug.file" [] [] + "View details about the most recent succesfully typechecked file." + (const $ Right Input.DebugTypecheckedUnisonFileI) + +test :: InputPattern +test = InputPattern "test" [] [] + "`test` runs unit tests for the current branch." + (const $ pure $ Input.TestI True True) + +execute :: InputPattern +execute = InputPattern + "run" + [] + [] + (P.wrapColumn2 + [ ( "`run mymain`" + , "Runs `!mymain`, where `mymain` is searched for in the most recent" + <> "typechecked file, or in the codebase." + ) + ] + ) + (\case + [w] -> pure . Input.ExecuteI $ w + _ -> Left $ showPatternHelp execute + ) + +createAuthor :: InputPattern +createAuthor = InputPattern "create.author" [] + [(Required, noCompletions), (Required, noCompletions)] + (makeExample createAuthor ["alicecoder", "\"Alice McGee\""] + <> "creates" <> backtick "alicecoder" <> "values in" + <> backtick "metadata.authors" <> "and" + <> backtickEOS "metadata.copyrightHolders") + (\case + symbolStr : authorStr@(_:_) -> first fromString $ do + symbol <- Path.definitionNameSegment symbolStr + -- let's have a real parser in not too long + let author :: Text + author = Text.pack $ case (unwords authorStr) of + quoted@('"':_) -> (init . tail) quoted + bare -> bare + pure $ Input.CreateAuthorI symbol author + _ -> Left $ showPatternHelp createAuthor + ) +validInputs :: [InputPattern] +validInputs = + [ help + , helpTopics + , load + , add + , previewAdd + , update + , previewUpdate + , delete + , forkLocal + , mergeLocal + , squashMerge + , previewMergeLocal + , diffNamespace + , names + , push + , pull + , pushExhaustive + , pullExhaustive + , createPullRequest + , loadPullRequest + , cd + , back + , deleteBranch + , renameBranch + , deletePatch + , renamePatch + , copyPatch + , find + , findShallow + , findVerbose + , view + , display + , displayTo + , docs + , findPatch + , viewPatch + , undo + , history + , edit + , renameTerm + , deleteTerm + , aliasTerm + , renameType + , deleteType + , aliasType + , aliasMany + , todo + , patch + , link + , unlink + , links + , createAuthor + , replaceTerm + , replaceType + , deleteTermReplacement + , deleteTypeReplacement + , test + , execute + , viewReflog + , resetRoot + , quit + , updateBuiltins + , mergeBuiltins + , mergeIOBuiltins + , dependents, dependencies + , debugNumberedArgs + , debugBranchHistory + , debugFileHashes + ] + +commandNames :: [String] +commandNames = validInputs >>= \i -> I.patternName i : I.aliases i + +commandNameArg :: ArgumentType +commandNameArg = + ArgumentType "command" $ \q _ _ _ -> pure (exactComplete q (commandNames <> Map.keys helpTopicsMap)) + +exactDefinitionOrPathArg :: ArgumentType +exactDefinitionOrPathArg = + ArgumentType "definition or path" $ + bothCompletors + (bothCompletors + (termCompletor exactComplete) + (typeCompletor exactComplete)) + (pathCompletor exactComplete (Set.map Path.toText . Branch.deepPaths)) + +fuzzyDefinitionQueryArg :: ArgumentType +fuzzyDefinitionQueryArg = + -- todo: improve this + ArgumentType "fuzzy definition query" $ + bothCompletors (termCompletor fuzzyComplete) + (typeCompletor fuzzyComplete) + +definitionQueryArg :: ArgumentType +definitionQueryArg = fuzzyDefinitionQueryArg { typeName = "definition query" } + +exactDefinitionTypeQueryArg :: ArgumentType +exactDefinitionTypeQueryArg = + ArgumentType "term definition query" $ typeCompletor exactComplete + +exactDefinitionTermQueryArg :: ArgumentType +exactDefinitionTermQueryArg = + ArgumentType "term definition query" $ termCompletor exactComplete + +typeCompletor :: Applicative m + => (String -> [String] -> [Completion]) + -> String + -> Codebase m v a + -> Branch.Branch m + -> Path.Absolute + -> m [Completion] +typeCompletor filterQuery = pathCompletor filterQuery go where + go = Set.map HQ'.toText . R.dom . Names.types . Names.names0ToNames . Branch.toNames0 + +termCompletor :: Applicative m + => (String -> [String] -> [Completion]) + -> String + -> Codebase m v a + -> Branch.Branch m + -> Path.Absolute + -> m [Completion] +termCompletor filterQuery = pathCompletor filterQuery go where + go = Set.map HQ'.toText . R.dom . Names.terms . Names.names0ToNames . Branch.toNames0 + +patchArg :: ArgumentType +patchArg = ArgumentType "patch" $ pathCompletor + exactComplete + (Set.map Name.toText . Map.keysSet . Branch.deepEdits) + +bothCompletors + :: (Monad m) + => (String -> t2 -> t3 -> t4 -> m [Completion]) + -> (String -> t2 -> t3 -> t4 -> m [Completion]) + -> String -> t2 -> t3 -> t4 -> m [Completion] +bothCompletors c1 c2 q code b currentPath = do + suggestions1 <- c1 q code b currentPath + suggestions2 <- c2 q code b currentPath + pure . fixupCompletion q + . nubOrdOn Completion.display + $ suggestions1 ++ suggestions2 + +pathCompletor + :: Applicative f + => (String -> [String] -> [Completion]) + -> (Branch.Branch0 m -> Set Text) + -> String + -> codebase + -> Branch.Branch m + -> Path.Absolute + -> f [Completion] +pathCompletor filterQuery getNames query _code b p = let + b0root = Branch.head b + b0local = Branch.getAt0 (Path.unabsolute p) b0root + -- todo: if these sets are huge, maybe trim results + in pure . filterQuery query . map Text.unpack $ + toList (getNames b0local) ++ + if "." `isPrefixOf` query then + map ("." <>) (toList (getNames b0root)) + else + [] + +pathArg :: ArgumentType +pathArg = ArgumentType "namespace" $ + pathCompletor exactComplete (Set.map Path.toText . Branch.deepPaths) + +newNameArg :: ArgumentType +newNameArg = ArgumentType "new-name" $ + pathCompletor prefixIncomplete + (Set.map ((<> ".") . Path.toText) . Branch.deepPaths) + +noCompletions :: ArgumentType +noCompletions = ArgumentType "word" I.noSuggestions + +-- Arya: I could imagine completions coming from previous git pulls +gitUrlArg :: ArgumentType +gitUrlArg = ArgumentType "git-url" $ \input _ _ _ -> case input of + "gh" -> complete "https://github.com/" + "gl" -> complete "https://gitlab.com/" + "bb" -> complete "https://bitbucket.com/" + "ghs" -> complete "git@github.com:" + "gls" -> complete "git@gitlab.com:" + "bbs" -> complete "git@bitbucket.com:" + _ -> pure [] + where complete s = pure [Completion s s False] + +collectNothings :: (a -> Maybe b) -> [a] -> [a] +collectNothings f as = [ a | (Nothing, a) <- map f as `zip` as ] + +patternFromInput :: Input -> InputPattern +patternFromInput = \case + Input.PushRemoteBranchI _ _ SyncMode.ShortCircuit -> push + Input.PushRemoteBranchI _ _ SyncMode.Complete -> pushExhaustive + Input.PullRemoteBranchI _ _ SyncMode.ShortCircuit -> pull + Input.PullRemoteBranchI _ _ SyncMode.Complete -> pushExhaustive + _ -> error "todo: finish this function" + +inputStringFromInput :: IsString s => Input -> P.Pretty s +inputStringFromInput = \case + i@(Input.PushRemoteBranchI rh p' _) -> + (P.string . I.patternName $ patternFromInput i) + <> (" " <> maybe mempty (P.text . uncurry RemoteRepo.printHead) rh) + <> " " <> P.shown p' + i@(Input.PullRemoteBranchI ns p' _) -> + (P.string . I.patternName $ patternFromInput i) + <> (" " <> maybe mempty (P.text . uncurry3 RemoteRepo.printNamespace) ns) + <> " " <> P.shown p' + _ -> error "todo: finish this function" diff --git a/parser-typechecker/src/Unison/CommandLine/Main.hs b/parser-typechecker/src/Unison/CommandLine/Main.hs new file mode 100644 index 0000000000..ba4f1fcd3c --- /dev/null +++ b/parser-typechecker/src/Unison/CommandLine/Main.hs @@ -0,0 +1,258 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.CommandLine.Main where + +import Unison.Prelude + +import Control.Concurrent.STM (atomically) +import Control.Exception (finally, catch, AsyncException(UserInterrupt), asyncExceptionFromException) +import Control.Monad.State (runStateT) +import Data.Configurator.Types (Config) +import Data.IORef +import Data.Tuple.Extra (uncurry3) +import Prelude hiding (readFile, writeFile) +import System.IO.Error (isDoesNotExistError) +import Unison.Codebase.Branch (Branch) +import qualified Unison.Codebase.Branch as Branch +import Unison.Codebase.Editor.Input (Input (..), Event) +import qualified Unison.Codebase.Editor.HandleInput as HandleInput +import qualified Unison.Codebase.Editor.HandleCommand as HandleCommand +import Unison.Codebase.Editor.Command (LoadSourceResult(..)) +import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace, printNamespace) +import Unison.Codebase.Runtime (Runtime) +import Unison.Codebase (Codebase) +import Unison.CommandLine +import Unison.PrettyTerminal +import Unison.CommandLine.InputPattern (ArgumentType (suggestions), InputPattern (aliases, patternName)) +import Unison.CommandLine.InputPatterns (validInputs) +import Unison.CommandLine.OutputMessages (notifyUser, notifyNumbered, shortenDirectory) +import Unison.Parser (Ann) +import Unison.Var (Var) +import qualified Control.Concurrent.Async as Async +import qualified Data.Map as Map +import qualified Data.Text as Text +import qualified Data.Text.IO +import qualified System.Console.Haskeline as Line +import qualified Crypto.Random as Random +import qualified Unison.Codebase.Path as Path +import qualified Unison.Codebase.Runtime as Runtime +import qualified Unison.Codebase as Codebase +import qualified Unison.CommandLine.InputPattern as IP +import qualified Unison.Util.Pretty as P +import qualified Unison.Util.TQueue as Q +import Text.Regex.TDFA +import Control.Lens (view) +import Control.Error (rightMay) + +-- Expand a numeric argument like `1` or a range like `3-9` +expandNumber :: [String] -> String -> [String] +expandNumber numberedArgs s = + maybe [s] + (map (\i -> fromMaybe (show i) . atMay numberedArgs $ i - 1)) + expandedNumber + where + rangeRegex = "([0-9]+)-([0-9]+)" :: String + (junk,_,moreJunk, ns) = + s =~ rangeRegex :: (String, String, String, [String]) + expandedNumber = + case readMay s of + Just i -> Just [i] + Nothing -> + -- check for a range + case (junk, moreJunk, ns) of + ("", "", [from, to]) -> + (\x y -> [x..y]) <$> readMay from <*> readMay to + _ -> Nothing + +getUserInput + :: (MonadIO m, Line.MonadException m) + => Map String InputPattern + -> Codebase m v a + -> Branch m + -> Path.Absolute + -> [String] + -> m Input +getUserInput patterns codebase branch currentPath numberedArgs = + Line.runInputT settings go + where + go = do + line <- Line.getInputLine $ + P.toANSI 80 ((P.green . P.shown) currentPath <> fromString prompt) + case line of + Nothing -> pure QuitI + Just l -> + case words l of + [] -> go + ws -> + case parseInput patterns . (>>= expandNumber numberedArgs) $ ws of + Left msg -> do + liftIO $ putPrettyLn msg + go + Right i -> pure i + settings = Line.Settings tabComplete (Just ".unisonHistory") True + tabComplete = Line.completeWordWithPrev Nothing " " $ \prev word -> + -- User hasn't finished a command name, complete from command names + if null prev + then pure . exactComplete word $ Map.keys patterns + -- User has finished a command name; use completions for that command + else case words $ reverse prev of + h : t -> fromMaybe (pure []) $ do + p <- Map.lookup h patterns + argType <- IP.argType p (length t) + pure $ suggestions argType word codebase branch currentPath + _ -> pure [] + +asciiartUnison :: P.Pretty P.ColorText +asciiartUnison = + P.red " _____" + <> P.hiYellow " _ " + <> P.newline + <> P.red "| | |" + <> P.hiRed "___" + <> P.hiYellow "|_|" + <> P.hiGreen "___ " + <> P.cyan "___ " + <> P.purple "___ " + <> P.newline + <> P.red "| | | " + <> P.hiYellow "| |" + <> P.hiGreen "_ -" + <> P.cyan "| . |" + <> P.purple " |" + <> P.newline + <> P.red "|_____|" + <> P.hiRed "_|_" + <> P.hiYellow "|_|" + <> P.hiGreen "___" + <> P.cyan "|___|" + <> P.purple "_|_|" + +welcomeMessage :: FilePath -> String -> P.Pretty P.ColorText +welcomeMessage dir version = + asciiartUnison + <> P.newline + <> P.newline + <> P.linesSpaced + [ P.wrap "Welcome to Unison!" + , P.wrap ("You are running version: " <> P.string version) + , P.wrap + ( "I'm currently watching for changes to .u files under " + <> (P.group . P.blue $ fromString dir) + ) + , P.wrap ("Type " <> P.hiBlue "help" <> " to get help. 😎") + ] + +hintFreshCodebase :: RemoteNamespace -> P.Pretty P.ColorText +hintFreshCodebase ns = + P.wrap $ "Enter " + <> (P.hiBlue . P.group) + ("pull " <> P.text (uncurry3 printNamespace ns) <> " .base") + <> "to set up the default base library. 🏗" + +main + :: forall v + . Var v + => FilePath + -> Maybe RemoteNamespace + -> Path.Absolute + -> (Config, IO ()) + -> [Either Event Input] + -> IO (Runtime v) + -> Codebase IO v Ann + -> Branch.Cache IO + -> String + -> IO () +main dir defaultBaseLib initialPath (config,cancelConfig) initialInputs startRuntime codebase branchCache version = do + dir' <- shortenDirectory dir + root <- fromMaybe Branch.empty . rightMay <$> Codebase.getRootBranch codebase + putPrettyLn $ case defaultBaseLib of + Just ns | Branch.isOne root -> + welcomeMessage dir' version <> P.newline <> P.newline <> hintFreshCodebase ns + _ -> welcomeMessage dir' version + eventQueue <- Q.newIO + do + runtime <- startRuntime + -- we watch for root branch tip changes, but want to ignore ones we expect. + rootRef <- newIORef root + pathRef <- newIORef initialPath + initialInputsRef <- newIORef initialInputs + numberedArgsRef <- newIORef [] + pageOutput <- newIORef True + cancelFileSystemWatch <- watchFileSystem eventQueue dir + cancelWatchBranchUpdates <- watchBranchUpdates (readIORef rootRef) + eventQueue + codebase + let patternMap = + Map.fromList + $ validInputs + >>= (\p -> (patternName p, p) : ((, p) <$> aliases p)) + getInput = do + root <- readIORef rootRef + path <- readIORef pathRef + numberedArgs <- readIORef numberedArgsRef + getUserInput patternMap codebase root path numberedArgs + loadSourceFile :: Text -> IO LoadSourceResult + loadSourceFile fname = + if allow $ Text.unpack fname + then + let handle :: IOException -> IO LoadSourceResult + handle e = + case e of + _ | isDoesNotExistError e -> return InvalidSourceNameError + _ -> return LoadError + go = do + contents <- Data.Text.IO.readFile $ Text.unpack fname + return $ LoadSuccess contents + in catch go handle + else return InvalidSourceNameError + notify = notifyUser dir >=> (\o -> + ifM (readIORef pageOutput) + (putPrettyNonempty o) + (putPrettyLnUnpaged o)) + let + awaitInput = do + -- use up buffered input before consulting external events + i <- readIORef initialInputsRef + (case i of + h:t -> writeIORef initialInputsRef t >> pure h + [] -> + -- Race the user input and file watch. + Async.race (atomically $ Q.peek eventQueue) getInput >>= \case + Left _ -> do + let e = Left <$> atomically (Q.dequeue eventQueue) + writeIORef pageOutput False + e + x -> do + writeIORef pageOutput True + pure x) `catch` interruptHandler + interruptHandler (asyncExceptionFromException -> Just UserInterrupt) = awaitInput + interruptHandler _ = pure $ Right QuitI + cleanup = do + Runtime.terminate runtime + cancelConfig + cancelFileSystemWatch + cancelWatchBranchUpdates + loop state = do + writeIORef pathRef (view HandleInput.currentPath state) + let free = runStateT (runMaybeT HandleInput.loop) state + + (o, state') <- HandleCommand.commandLine config awaitInput + (writeIORef rootRef) + runtime + notify + (\o -> let (p, args) = notifyNumbered o in + putPrettyNonempty p $> args) + loadSourceFile + codebase + (const Random.getSystemDRG) + branchCache + free + case o of + Nothing -> pure () + Just () -> do + writeIORef numberedArgsRef (HandleInput._numberedArgs state') + loop state' + (`finally` cleanup) + $ loop (HandleInput.loopState0 root initialPath) diff --git a/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs b/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs new file mode 100644 index 0000000000..4fc65ab260 --- /dev/null +++ b/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs @@ -0,0 +1,1977 @@ +{-# OPTIONS_GHC -fno-warn-partial-type-signatures #-} + +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PartialTypeSignatures #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE ViewPatterns #-} + + +module Unison.CommandLine.OutputMessages where + +import Unison.Prelude hiding (unlessM) + +import Unison.Codebase.Editor.Output +import qualified Unison.Codebase.Editor.Output as E +import qualified Unison.Codebase.Editor.Output as Output +import qualified Unison.Codebase.Editor.TodoOutput as TO +import qualified Unison.Codebase.Editor.SearchResult' as SR' +import qualified Unison.Codebase.Editor.Output.BranchDiff as OBD + + +import Control.Lens +import qualified Control.Monad.State.Strict as State +import Data.Bifunctor (first, second) +import Data.List (sort, stripPrefix) +import Data.List.Extra (nubOrdOn, nubOrd, notNull) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Data.Sequence as Seq +import qualified Data.Text as Text +import Data.Text.IO (readFile, writeFile) +import Data.Tuple.Extra (dupe, uncurry3) +import Prelude hiding (readFile, writeFile) +import System.Directory ( canonicalizePath + , doesFileExist + , getHomeDirectory + ) +import qualified Unison.ABT as ABT +import qualified Unison.UnisonFile as UF +import Unison.Codebase.GitError +import qualified Unison.Codebase.Path as Path +import qualified Unison.Codebase.Patch as Patch +import Unison.Codebase.Patch (Patch(..)) +import qualified Unison.Codebase.ShortBranchHash as SBH +import qualified Unison.Codebase.TermEdit as TermEdit +import qualified Unison.Codebase.TypeEdit as TypeEdit +import Unison.CommandLine ( bigproblem + , tip + , note + ) +import Unison.PrettyTerminal ( clearCurrentLine + , putPretty' + ) +import qualified Unison.CommandLine.InputPattern as IP1 +import Unison.CommandLine.InputPatterns (makeExample, makeExample') +import qualified Unison.CommandLine.InputPatterns as IP +import qualified Unison.Builtin.Decls as DD +import qualified Unison.DataDeclaration as DD +import qualified Unison.DeclPrinter as DeclPrinter +import qualified Unison.HashQualified as HQ +import qualified Unison.HashQualified' as HQ' +import Unison.Name (Name) +import qualified Unison.Name as Name +import Unison.NamePrinter (prettyHashQualified, + prettyReference, prettyReferent, + prettyLabeledDependency, + prettyNamedReference, + prettyNamedReferent, + prettyName, prettyShortHash, + styleHashQualified, + styleHashQualified', prettyHashQualified') +import Unison.Names2 (Names'(..), Names0) +import qualified Unison.Names2 as Names +import qualified Unison.Names3 as Names +import Unison.Parser (Ann, startingLine) +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.Codebase.Runtime as Runtime +import Unison.PrintError ( prettyParseError + , printNoteWithSource + , prettyResolutionFailures + ) +import qualified Unison.Reference as Reference +import Unison.Reference ( Reference ) +import qualified Unison.Referent as Referent +import Unison.Referent ( Referent ) +import qualified Unison.Result as Result +import qualified Unison.Term as Term +import Unison.Term (Term) +import Unison.Type (Type) +import qualified Unison.TermPrinter as TermPrinter +import qualified Unison.TypePrinter as TypePrinter +import qualified Unison.Util.ColorText as CT +import Unison.Util.Monoid ( intercalateMap + , unlessM + ) +import qualified Unison.Util.Pretty as P +import qualified Unison.Util.Relation as R +import Unison.Var (Var) +import qualified Unison.Var as Var +import qualified Unison.Codebase.Editor.SlurpResult as SlurpResult +import Unison.Codebase.Editor.DisplayThing (DisplayThing(MissingThing, BuiltinThing, RegularThing)) +import qualified Unison.Codebase.Editor.Input as Input +import qualified Unison.Hash as Hash +import qualified Unison.Codebase.Causal as Causal +import qualified Unison.Codebase.Editor.RemoteRepo as RemoteRepo +import qualified Unison.Util.List as List +import qualified Unison.Util.Monoid as Monoid +import Data.Tuple (swap) +import Unison.Codebase.ShortBranchHash (ShortBranchHash) +import qualified Unison.ShortHash as SH +import Unison.LabeledDependency as LD +import Unison.Codebase.Editor.RemoteRepo (RemoteRepo) + +type Pretty = P.Pretty P.ColorText + +shortenDirectory :: FilePath -> IO FilePath +shortenDirectory dir = do + home <- getHomeDirectory + pure $ case stripPrefix home dir of + Just d -> "~" <> d + Nothing -> dir + +renderFileName :: FilePath -> IO Pretty +renderFileName dir = P.group . P.blue . fromString <$> shortenDirectory dir + +notifyNumbered :: Var v => NumberedOutput v -> (Pretty, NumberedArgs) +notifyNumbered o = case o of + ShowDiffNamespace oldPrefix newPrefix ppe diffOutput -> + showDiffNamespace ShowNumbers ppe oldPrefix newPrefix diffOutput + + ShowDiffAfterDeleteDefinitions ppe diff -> + first (\p -> P.lines + [ p + , "" + , undoTip + ]) (showDiffNamespace ShowNumbers ppe e e diff) + + ShowDiffAfterDeleteBranch bAbs ppe diff -> + first (\p -> P.lines + [ p + , "" + , undoTip + ]) (showDiffNamespace ShowNumbers ppe bAbs bAbs diff) + + ShowDiffAfterModifyBranch b' _ _ (OBD.isEmpty -> True) -> + (P.wrap $ "Nothing changed in" <> prettyPath' b' <> ".", mempty) + ShowDiffAfterModifyBranch b' bAbs ppe diff -> + first (\p -> P.lines + [ P.wrap $ "Here's what changed in" <> prettyPath' b' <> ":" + , "" + , p + , "" + , undoTip + ]) (showDiffNamespace ShowNumbers ppe bAbs bAbs diff) + + ShowDiffAfterMerge _ _ _ (OBD.isEmpty -> True) -> + (P.wrap $ "Nothing changed as a result of the merge.", mempty) + ShowDiffAfterMerge dest' destAbs ppe diffOutput -> + first (\p -> P.lines [ + P.wrap $ "Here's what's changed in " <> prettyPath' dest' <> "after the merge:" + , "" + , p + , "" + , tip $ "You can use " <> IP.makeExample' IP.todo + <> "to see if this generated any work to do in this namespace" + <> "and " <> IP.makeExample' IP.test <> "to run the tests." + <> "Or you can use" <> IP.makeExample' IP.undo <> " or" + <> IP.makeExample' IP.viewReflog <> " to undo the results of this merge." + ]) (showDiffNamespace ShowNumbers ppe destAbs destAbs diffOutput) + + ShowDiffAfterMergePropagate dest' destAbs patchPath' ppe diffOutput -> + first (\p -> P.lines [ + P.wrap $ "Here's what's changed in " <> prettyPath' dest' + <> "after applying the patch at " <> P.group (prettyPath' patchPath' <> ":") + , "" + , p + , "" + , tip $ "You can use " + <> IP.makeExample IP.todo [prettyPath' patchPath', prettyPath' dest'] + <> "to see if this generated any work to do in this namespace" + <> "and " <> IP.makeExample' IP.test <> "to run the tests." + <> "Or you can use" <> IP.makeExample' IP.undo <> " or" + <> IP.makeExample' IP.viewReflog <> " to undo the results of this merge." + ]) (showDiffNamespace ShowNumbers ppe destAbs destAbs diffOutput) + + ShowDiffAfterMergePreview dest' destAbs ppe diffOutput -> + first (\p -> P.lines [ + P.wrap $ "Here's what would change in " <> prettyPath' dest' <> "after the merge:" + , "" + , p + ]) (showDiffNamespace ShowNumbers ppe destAbs destAbs diffOutput) + + ShowDiffAfterUndo ppe diffOutput -> + first (\p -> P.lines ["Here's the changes I undid", "", p ]) + (showDiffNamespace ShowNumbers ppe e e diffOutput) + + ShowDiffAfterPull dest' destAbs ppe diff -> + if OBD.isEmpty diff then + ("✅ Looks like " <> prettyPath' dest' <> " is up to date.", mempty) + else + first (\p -> P.lines [ + P.wrap $ "Here's what's changed in " <> prettyPath' dest' <> "after the pull:", "", + p, "", + undoTip + ]) + (showDiffNamespace ShowNumbers ppe destAbs destAbs diff) + ShowDiffAfterCreatePR baseRepo headRepo ppe diff -> + if OBD.isEmpty diff then + (P.wrap $ "Looks like there's no difference between " + <> prettyRemoteNamespace baseRepo + <> "and" + <> prettyRemoteNamespace headRepo <> "." + ,mempty) + else first (\p -> + (P.lines + [P.wrap $ "The changes summarized below are available for you to review," + <> "using the following command:" + ,"" + ,P.indentN 2 $ + IP.makeExampleNoBackticks + IP.loadPullRequest [(prettyRemoteNamespace baseRepo) + ,(prettyRemoteNamespace headRepo)] + ,"" + ,p])) (showDiffNamespace HideNumbers ppe e e diff) + -- todo: these numbers aren't going to work, + -- since the content isn't necessarily here. + -- Should we have a mode with no numbers? :P + + ShowDiffAfterCreateAuthor authorNS authorPath' bAbs ppe diff -> + first (\p -> P.lines + [ p + , "" + , tip $ "Add" <> prettyName "License" <> "values for" + <> prettyName (Name.fromSegment authorNS) + <> "under" <> P.group (prettyPath' authorPath' <> ".") + ]) (showDiffNamespace ShowNumbers ppe bAbs bAbs diff) + where + e = Path.absoluteEmpty + undoTip = tip $ "You can use" <> IP.makeExample' IP.undo + <> "or" <> IP.makeExample' IP.viewReflog + <> "to undo this change." + +prettyRemoteNamespace :: (RemoteRepo.RemoteRepo, + Maybe ShortBranchHash, Path.Path) + -> P.Pretty P.ColorText +prettyRemoteNamespace = + P.group . P.text . uncurry3 RemoteRepo.printNamespace + +notifyUser :: forall v . Var v => FilePath -> Output v -> IO Pretty +notifyUser dir o = case o of + Success -> pure $ P.bold "Done." + WarnIncomingRootBranch current hashes -> pure $ + if null hashes then P.wrap $ + "Please let someone know I generated an empty IncomingRootBranch" + <> " event, which shouldn't be possible!" + else P.lines + [ P.wrap $ (if length hashes == 1 then "A" else "Some") + <> "codebase" <> P.plural hashes "root" <> "appeared unexpectedly" + <> "with" <> P.group (P.plural hashes "hash" <> ":") + , "" + , (P.indentN 2 . P.oxfordCommas) + (map prettySBH $ toList hashes) + , "" + , P.wrap $ "and I'm not sure what to do about it." + <> "The last root namespace hash that I knew about was:" + , "" + , P.indentN 2 $ prettySBH current + , "" + , P.wrap $ "Now might be a good time to make a backup of your codebase. 😬" + , "" + , P.wrap $ "After that, you might try using the" <> makeExample' IP.forkLocal + <> "command to inspect the namespaces listed above, and decide which" + <> "one you want as your root." + <> "You can also use" <> makeExample' IP.viewReflog <> "to see the" + <> "last few root namespace hashes on record." + , "" + , P.wrap $ "Once you find one you like, you can use the" + <> makeExample' IP.resetRoot <> "command to set it." + ] + LoadPullRequest baseNS headNS basePath headPath mergedPath squashedPath -> pure $ P.lines + [ P.wrap $ "I checked out" <> prettyRemoteNamespace baseNS <> "to" <> P.group (prettyPath' basePath <> ".") + , P.wrap $ "I checked out" <> prettyRemoteNamespace headNS <> "to" <> P.group (prettyPath' headPath <> ".") + , "" + , P.wrap $ "The merged result is in" <> P.group (prettyPath' mergedPath <> ".") + , P.wrap $ "The (squashed) merged result is in" <> P.group (prettyPath' squashedPath <> ".") + , P.wrap $ "Use" <> + IP.makeExample IP.diffNamespace + [prettyPath' basePath, prettyPath' mergedPath] + <> "or" <> + IP.makeExample IP.diffNamespace + [prettyPath' basePath, prettyPath' squashedPath] + <> "to see what's been updated." + , P.wrap $ "Use" <> + IP.makeExample IP.todo + [ prettyPath' (snoc mergedPath "patch") + , prettyPath' mergedPath ] + <> "to see what work is remaining for the merge." + , P.wrap $ "Use" <> + IP.makeExample IP.push + [prettyRemoteNamespace baseNS, prettyPath' mergedPath] <> + "or" <> + IP.makeExample IP.push + [prettyRemoteNamespace baseNS, prettyPath' squashedPath] + <> "to push the changes." + ] + + DisplayDefinitions outputLoc ppe types terms -> + displayDefinitions outputLoc ppe types terms + DisplayRendered outputLoc pp -> + displayRendered outputLoc pp + DisplayLinks ppe md types terms -> + if Map.null md then pure $ P.wrap "Nothing to show here. Use the " + <> IP.makeExample' IP.link <> " command to add links from this definition." + else + pure $ intercalateMap "\n\n" go (Map.toList md) + where + go (_key, rs) = + displayDefinitions' ppe (Map.restrictKeys types rs) + (Map.restrictKeys terms rs) + TestResults stats ppe _showSuccess _showFailures oks fails -> case stats of + CachedTests 0 _ -> pure . P.callout "😶" $ "No tests to run." + CachedTests n n' | n == n' -> pure $ + P.lines [ cache, "", displayTestResults True ppe oks fails ] + CachedTests _n m -> pure $ + if m == 0 then "✅ " + else P.indentN 2 $ + P.lines [ "", cache, "", displayTestResults False ppe oks fails, "", "✅ " ] + where + NewlyComputed -> do + clearCurrentLine + pure $ P.lines [ + " " <> P.bold "New test results:", + "", + displayTestResults True ppe oks fails ] + where + cache = P.bold "Cached test results " <> "(`help testcache` to learn more)" + + TestIncrementalOutputStart ppe (n,total) r _src -> do + putPretty' $ P.shown (total - n) <> " tests left to run, current test: " + <> (P.syntaxToColor $ prettyHashQualified (PPE.termName ppe $ Referent.Ref r)) + pure mempty + + TestIncrementalOutputEnd _ppe (_n, _total) _r result -> do + clearCurrentLine + if isTestOk result then putPretty' " ✅ " + else putPretty' " 🚫 " + pure mempty + + MetadataMissingType ppe ref -> pure . P.fatalCallout . P.lines $ [ + P.wrap $ "The metadata value " <> P.red (prettyTermName ppe ref) + <> "is missing a type signature in the codebase.", + "", + P.wrap $ "This might be due to pulling an incomplete" + <> "or invalid codebase, or because files inside the codebase" + <> "are being deleted external to UCM." + ] + MetadataAmbiguous hq _ppe [] -> pure . P.warnCallout . + P.wrap $ "I couldn't find any metadata matching " + <> P.syntaxToColor (prettyHashQualified hq) + MetadataAmbiguous _ ppe refs -> pure . P.warnCallout . P.lines $ [ + P.wrap $ "I'm not sure which metadata value you're referring to" + <> "since there are multiple matches:", + "", + P.indentN 2 $ P.spaced (P.blue . prettyTermName ppe <$> refs), + "", + tip "Try again and supply one of the above definitions explicitly." + ] + + EvaluationFailure err -> pure err + SearchTermsNotFound hqs | null hqs -> pure mempty + SearchTermsNotFound hqs -> + pure + $ P.warnCallout "The following names were not found in the codebase. Check your spelling." + <> P.newline + <> (P.syntaxToColor $ P.indent " " (P.lines (prettyHashQualified <$> hqs))) + PatchNotFound _ -> + pure . P.warnCallout $ "I don't know about that patch." + NameNotFound _ -> + pure . P.warnCallout $ "I don't know about that name." + TermNotFound _ -> + pure . P.warnCallout $ "I don't know about that term." + TypeNotFound _ -> + pure . P.warnCallout $ "I don't know about that type." + TermAlreadyExists _ _ -> + pure . P.warnCallout $ "A term by that name already exists." + TypeAlreadyExists _ _ -> + pure . P.warnCallout $ "A type by that name already exists." + PatchAlreadyExists _ -> + pure . P.warnCallout $ "A patch by that name already exists." + BranchEmpty b -> pure . P.warnCallout . P.wrap $ + P.group (either P.shown prettyPath' b) <> "is an empty namespace." + BranchNotEmpty path -> + pure . P.warnCallout $ "I was expecting the namespace " <> prettyPath' path + <> " to be empty for this operation, but it isn't." + CantDelete ppe failed failedDependents -> pure . P.warnCallout $ + P.lines [ + P.wrap "I couldn't delete ", + "", P.indentN 2 $ listOfDefinitions' ppe False failed, + "", + "because it's still being used by these definitions:", + "", P.indentN 2 $ listOfDefinitions' ppe False failedDependents + ] + CantUndo reason -> case reason of + CantUndoPastStart -> pure . P.warnCallout $ "Nothing more to undo." + CantUndoPastMerge -> pure . P.warnCallout $ "Sorry, I can't undo a merge (not implemented yet)." + NoMainFunction main ppe ts -> pure . P.callout "😶" $ P.lines [ + P.wrap $ "I looked for a function" <> P.backticked (P.string main) + <> "in the most recently typechecked file and codebase but couldn't find one. It has to have the type:", + "", + P.indentN 2 $ P.lines [ P.string main <> " : " <> TypePrinter.pretty ppe t | t <- ts ] + ] + NoUnisonFile -> do + dir' <- canonicalizePath dir + fileName <- renderFileName dir' + pure . P.callout "😶" $ P.lines + [ P.wrap "There's nothing for me to add right now." + , "" + , P.column2 [(P.bold "Hint:", msg fileName)] ] + where + msg dir = P.wrap + $ "I'm currently watching for definitions in .u files under the" + <> dir + <> "directory. Make sure you've updated something there before using the" + <> makeExample' IP.add <> "or" <> makeExample' IP.update + <> "commands, or use" <> makeExample' IP.load <> "to load a file explicitly." + InvalidSourceName name -> + pure . P.callout "😶" $ P.wrap $ "The file " + <> P.blue (P.shown name) + <> " does not exist or is not a valid source file." + SourceLoadFailed name -> + pure . P.callout "😶" $ P.wrap $ "The file " + <> P.blue (P.shown name) + <> " could not be loaded." + BranchNotFound b -> + pure . P.warnCallout $ "The namespace " <> P.blue (P.shown b) <> " doesn't exist." + CreatedNewBranch path -> pure $ + "☝️ The namespace " <> P.blue (P.shown path) <> " is empty." + -- RenameOutput rootPath oldName newName r -> do + -- nameChange "rename" "renamed" oldName newName r + -- AliasOutput rootPath existingName newName r -> do + -- nameChange "alias" "aliased" existingName newName r + DeletedEverything -> + pure . P.wrap . P.lines $ + ["Okay, I deleted everything except the history." + ,"Use " <> IP.makeExample' IP.undo <> " to undo, or " + <> IP.makeExample' IP.mergeBuiltins + <> " to restore the absolute " + <> "basics to the current path."] + DeleteEverythingConfirmation -> + pure . P.warnCallout . P.lines $ + ["Are you sure you want to clear away everything?" + ,"You could use " <> IP.makeExample' IP.cd + <> " to switch to a new namespace instead."] + DeleteBranchConfirmation _uniqueDeletions -> error "todo" + -- let + -- pretty (branchName, (ppe, results)) = + -- header $ listOfDefinitions' ppe False results + -- where + -- header = plural uniqueDeletions id ((P.text branchName <> ":") `P.hang`) + -- + -- in putPrettyLn . P.warnCallout + -- $ P.wrap ("The" + -- <> plural uniqueDeletions "namespace contains" "namespaces contain" + -- <> "definitions that don't exist in any other branches:") + -- <> P.border 2 (mconcat (fmap pretty uniqueDeletions)) + -- <> P.newline + -- <> P.wrap "Please repeat the same command to confirm the deletion." + ListOfDefinitions ppe detailed results -> + listOfDefinitions ppe detailed results + ListOfLinks ppe results -> + listOfLinks ppe [ (name,tm) | (name,_ref,tm) <- results ] + ListNames _len [] [] -> pure . P.callout "😶" $ + P.wrap "I couldn't find anything by that name." + ListNames len types terms -> pure . P.sepNonEmpty "\n\n" $ [ + formatTypes types, formatTerms terms ] + where + formatTerms tms = + P.lines . P.nonEmpty $ P.plural tms (P.blue "Term") : (go <$> tms) where + go (ref, hqs) = P.column2 + [ ("Hash:", P.syntaxToColor (prettyReferent len ref)) + , ("Names: ", P.group (P.spaced (P.bold . P.syntaxToColor . prettyHashQualified' <$> toList hqs))) + ] + formatTypes types = + P.lines . P.nonEmpty $ P.plural types (P.blue "Type") : (go <$> types) where + go (ref, hqs) = P.column2 + [ ("Hash:", P.syntaxToColor (prettyReference len ref)) + , ("Names:", P.group (P.spaced (P.bold . P.syntaxToColor . prettyHashQualified' <$> toList hqs))) + ] + -- > names foo + -- Terms: + -- Hash: #asdflkjasdflkjasdf + -- Names: .util.frobnicate foo blarg.mcgee + -- + -- Term (with hash #asldfkjsdlfkjsdf): .util.frobnicate, foo, blarg.mcgee + -- Types (with hash #hsdflkjsdfsldkfj): Optional, Maybe, foo + ListShallow ppe entries -> pure $ + -- todo: make a version of prettyNumberedResult to support 3-columns + if null entries then P.lit "nothing to show" + else numberedEntries entries + where + numberedEntries :: [ShallowListEntry v a] -> P.Pretty P.ColorText + numberedEntries entries = + (P.column3 . fmap f) ([(1::Integer)..] `zip` fmap formatEntry entries) + where + f (i, (p1, p2)) = (P.hiBlack . fromString $ show i <> ".", p1, p2) + formatEntry :: ShallowListEntry v a -> (P.Pretty P.ColorText, P.Pretty P.ColorText) + formatEntry = \case + ShallowTermEntry _r hq ot -> + (P.syntaxToColor . prettyHashQualified' . fmap Name.fromSegment $ hq + , P.lit "(" <> maybe "type missing" (TypePrinter.pretty ppe) ot <> P.lit ")" ) + ShallowTypeEntry r hq -> + (P.syntaxToColor . prettyHashQualified' . fmap Name.fromSegment $ hq + ,isBuiltin r) + ShallowBranchEntry ns count -> + ((P.syntaxToColor . prettyName . Name.fromSegment) ns <> "/" + ,case count of + 1 -> P.lit ("(1 definition)") + _n -> P.lit "(" <> P.shown count <> P.lit " definitions)") + ShallowPatchEntry ns -> + ((P.syntaxToColor . prettyName . Name.fromSegment) ns + ,P.lit "(patch)") + isBuiltin = \case + Reference.Builtin{} -> P.lit "(builtin type)" + Reference.DerivedId{} -> P.lit "(type)" + + SlurpOutput input ppe s -> let + isPast = case input of Input.AddI{} -> True + Input.UpdateI{} -> True + _ -> False + in pure $ SlurpResult.pretty isPast ppe s + + NoExactTypeMatches -> + pure . P.callout "☝️" $ P.wrap "I couldn't find exact type matches, resorting to fuzzy matching..." + TypeParseError src e -> + pure . P.fatalCallout $ P.lines [ + P.wrap "I couldn't parse the type you supplied:", + "", + prettyParseError src e + ] + ParseResolutionFailures src es -> pure $ + prettyResolutionFailures src es + TypeHasFreeVars typ -> + pure . P.warnCallout $ P.lines [ + P.wrap "The type uses these names, but I'm not sure what they are:", + P.sep ", " (map (P.text . Var.name) . toList $ ABT.freeVars typ) + ] + ParseErrors src es -> + pure . P.sep "\n\n" $ prettyParseError (Text.unpack src) <$> es + TypeErrors src ppenv notes -> do + let showNote = + intercalateMap "\n\n" (printNoteWithSource ppenv (Text.unpack src)) + . map Result.TypeError + pure . showNote $ notes + Evaluated fileContents ppe bindings watches -> + if null watches then pure "\n" + else + -- todo: hashqualify binding names if necessary to distinguish them from + -- defs in the codebase. In some cases it's fine for bindings to + -- shadow codebase names, but you don't want it to capture them in + -- the decompiled output. + let prettyBindings = P.bracket . P.lines $ + P.wrap "The watch expression(s) reference these definitions:" : "" : + [(P.syntaxToColor $ TermPrinter.prettyBinding ppe (HQ.unsafeFromVar v) b) + | (v, b) <- bindings] + prettyWatches = P.sep "\n\n" [ + watchPrinter fileContents ppe ann kind evald isCacheHit | + (ann,kind,evald,isCacheHit) <- + sortOn (\(a,_,_,_)->a) . toList $ watches ] + -- todo: use P.nonempty + in pure $ if null bindings then prettyWatches + else prettyBindings <> "\n" <> prettyWatches + + DisplayConflicts termNamespace typeNamespace -> + pure $ P.sepNonEmpty "\n\n" [ + showConflicts "terms" terms, + showConflicts "types" types + ] + where + terms = R.dom termNamespace + types = R.dom typeNamespace + showConflicts :: Foldable f => Pretty -> f Name -> Pretty + showConflicts thingsName things = + if (null things) then mempty + else P.lines [ + "These " <> thingsName <> " have conflicts: ", "", + P.lines [ (" " <> prettyName x) | x <- toList things ] + ] + -- TODO: Present conflicting TermEdits and TypeEdits + -- if we ever allow users to edit hashes directly. + Typechecked sourceName ppe slurpResult uf -> do + let fileStatusMsg = SlurpResult.pretty False ppe slurpResult + let containsWatchExpressions = notNull $ UF.watchComponents uf + if UF.nonEmpty uf then do + fileName <- renderFileName $ Text.unpack sourceName + pure $ P.linesNonEmpty ([ + if fileStatusMsg == mempty then + P.okCallout $ fileName <> " changed." + else if SlurpResult.isAllDuplicates slurpResult then + P.wrap $ "I found and" + <> P.bold "typechecked" <> "the definitions in " + <> P.group (fileName <> ".") + <> "This file " <> P.bold "has been previously added" <> "to the codebase." + else + P.linesSpaced $ [ + P.wrap $ "I found and" + <> P.bold "typechecked" <> "these definitions in " + <> P.group (fileName <> ".") + <> "If you do an " + <> IP.makeExample' IP.add + <> " or " + <> P.group (IP.makeExample' IP.update <> ",") + <> "here's how your codebase would" + <> "change:" + , P.indentN 2 $ SlurpResult.pretty False ppe slurpResult + ] + ] ++ if containsWatchExpressions then [ + "", + P.wrap $ "Now evaluating any watch expressions" + <> "(lines starting with `>`)... " + <> P.group (P.hiBlack "Ctrl+C cancels.") + ] else []) + else if (null $ UF.watchComponents uf) then pure . P.wrap $ + "I loaded " <> P.text sourceName <> " and didn't find anything." + else pure mempty + + TodoOutput names todo -> pure (todoOutput names todo) + GitError input e -> pure $ case e of + CouldntParseRootBranch repo s -> P.wrap $ "I couldn't parse the string" + <> P.red (P.string s) <> "into a namespace hash, when opening the repository at" + <> P.group (prettyRepoBranch repo <> ".") + NoGit -> P.wrap $ + "I couldn't find git. Make sure it's installed and on your path." + CloneException repo msg -> P.wrap $ + "I couldn't clone the repository at" <> prettyRepoBranch repo <> ";" + <> "the error was:" <> (P.indentNAfterNewline 2 . P.group . P.string) msg + PushNoOp repo -> P.wrap $ + "The repository at" <> prettyRepoBranch repo <> "is already up-to-date." + PushException repo msg -> P.wrap $ + "I couldn't push to the repository at" <> prettyRepoRevision repo <> ";" + <> "the error was:" <> (P.indentNAfterNewline 2 . P.group . P.string) msg + UnrecognizableCacheDir uri localPath -> P.wrap $ "A cache directory for" + <> P.backticked (P.text uri) <> "already exists at" + <> P.backticked' (P.string localPath) "," <> "but it doesn't seem to" + <> "be a git repository, so I'm not sure what to do next. Delete it?" + UnrecognizableCheckoutDir uri localPath -> P.wrap $ "I tried to clone" + <> P.backticked (P.text uri) <> "into a cache directory at" + <> P.backticked' (P.string localPath) "," <> "but I can't recognize the" + <> "result as a git repository, so I'm not sure what to do next." + PushDestinationHasNewStuff repo -> + P.callout "⏸" . P.lines $ [ + P.wrap $ "The repository at" <> prettyRepoRevision repo + <> "has some changes I don't know about.", + "", + P.wrap $ "If you want to " <> push <> "you can do:", "", + P.indentN 2 pull, "", + P.wrap $ + "to merge these changes locally," <> + "then try your" <> push <> "again." + ] + where + push = P.group . P.backticked . P.string . IP1.patternName $ IP.patternFromInput input + pull = P.group . P.backticked $ IP.inputStringFromInput input + CouldntLoadRootBranch repo hash -> P.wrap + $ "I couldn't load the designated root hash" + <> P.group ("(" <> fromString (Hash.showBase32Hex hash) <> ")") + <> "from the repository at" <> prettyRepoRevision repo + NoRemoteNamespaceWithHash repo sbh -> P.wrap + $ "The repository at" <> prettyRepoRevision repo + <> "doesn't contain a namespace with the hash prefix" + <> (P.blue . P.text . SBH.toText) sbh + RemoteNamespaceHashAmbiguous repo sbh hashes -> P.lines [ + P.wrap $ "The namespace hash" <> prettySBH sbh + <> "at" <> prettyRepoRevision repo + <> "is ambiguous." + <> "Did you mean one of these hashes?", + "", + P.indentN 2 $ P.lines + (prettySBH . SBH.fromHash ((Text.length . SBH.toText) sbh * 2) + <$> Set.toList hashes), + "", + P.wrap "Try again with a few more hash characters to disambiguate." + ] + SomeOtherError msg -> P.callout "‼" . P.lines $ [ + P.wrap "I ran into an error:", "", + P.indentN 2 (P.string msg), "", + P.wrap $ "Check the logging messages above for more info." + ] + ListEdits patch ppe -> do + let + types = Patch._typeEdits patch + terms = Patch._termEdits patch + + prettyTermEdit (r, TermEdit.Deprecate) = + (P.syntaxToColor . prettyHashQualified . PPE.termName ppe . Referent.Ref $ r + , "-> (deprecated)") + prettyTermEdit (r, TermEdit.Replace r' _typing) = + (P.syntaxToColor . prettyHashQualified . PPE.termName ppe . Referent.Ref $ r + , "-> " <> (P.syntaxToColor . prettyHashQualified . PPE.termName ppe . Referent.Ref $ r')) + prettyTypeEdit (r, TypeEdit.Deprecate) = + (P.syntaxToColor . prettyHashQualified $ PPE.typeName ppe r + , "-> (deprecated)") + prettyTypeEdit (r, TypeEdit.Replace r') = + (P.syntaxToColor . prettyHashQualified $ PPE.typeName ppe r + , "-> " <> (P.syntaxToColor . prettyHashQualified . PPE.typeName ppe $ r')) + pure $ P.sepNonEmpty "\n\n" [ + if R.null types then mempty + else "Edited Types:" `P.hang` + P.column2 (prettyTypeEdit <$> R.toList types), + if R.null terms then mempty + else "Edited Terms:" `P.hang` + P.column2 (prettyTermEdit <$> R.toList terms), + if R.null types && R.null terms then "This patch is empty." + else tip . P.string $ "To remove entries from a patch, use " + <> IP.deleteTermReplacementCommand <> " or " + <> IP.deleteTypeReplacementCommand <> ", as appropriate." + ] + BustedBuiltins (Set.toList -> new) (Set.toList -> old) -> + -- todo: this could be prettier! Have a nice list like `find` gives, but + -- that requires querying the codebase to determine term types. Probably + -- the only built-in types will be primitive types like `Int`, so no need + -- to look up decl types. + -- When we add builtin terms, they may depend on new derived types, so + -- these derived types should be added to the branch too; but not + -- necessarily ever be automatically deprecated. (A library curator might + -- deprecate them; more work needs to go into the idea of sharing deprecations and stuff. + pure . P.warnCallout . P.lines $ + case (new, old) of + ([],[]) -> error "BustedBuiltins busted, as there were no busted builtins." + ([], old) -> + P.wrap ("This codebase includes some builtins that are considered deprecated. Use the " <> makeExample' IP.updateBuiltins <> " command when you're ready to work on eliminating them from your codebase:") + : "" + : fmap (P.text . Reference.toText) old + (new, []) -> P.wrap ("This version of Unison provides builtins that are not part of your codebase. Use " <> makeExample' IP.updateBuiltins <> " to add them:") + : "" : fmap (P.text . Reference.toText) new + (new@(_:_), old@(_:_)) -> + [ P.wrap + ("Sorry and/or good news! This version of Unison supports a different set of builtins than this codebase uses. You can use " + <> makeExample' IP.updateBuiltins + <> " to add the ones you're missing and deprecate the ones I'm missing. 😉" + ) + , "You're missing:" `P.hang` P.lines (fmap (P.text . Reference.toText) new) + , "I'm missing:" `P.hang` P.lines (fmap (P.text . Reference.toText) old) + ] + ListOfPatches patches -> pure $ + if null patches then P.lit "nothing to show" + else numberedPatches patches + where + numberedPatches :: Set Name -> P.Pretty P.ColorText + numberedPatches patches = + (P.column2 . fmap format) ([(1::Integer)..] `zip` (toList patches)) + where + format (i, p) = (P.hiBlack . fromString $ show i <> ".", prettyName p) + ConfiguredMetadataParseError p md err -> + pure . P.fatalCallout . P.lines $ + [ P.wrap $ "I couldn't understand the default metadata that's set for " + <> prettyPath' p <> " in .unisonConfig." + , P.wrap $ "The value I found was" + <> (P.backticked . P.blue . P.string) md + <> "but I encountered the following error when trying to parse it:" + , "" + , err + ] + NoConfiguredGitUrl pp p -> + pure . P.fatalCallout . P.wrap $ + "I don't know where to " <> + pushPull "push to!" "pull from!" pp <> + (if Path.isRoot' p then "" + else "Add a line like `GitUrl." <> P.shown p + <> " = ' to .unisonConfig. " + ) + <> "Type `help " <> pushPull "push" "pull" pp <> + "` for more information." + +-- | ConfiguredGitUrlParseError PushPull Path' Text String + ConfiguredGitUrlParseError pp p url err -> + pure . P.fatalCallout . P.lines $ + [ P.wrap $ "I couldn't understand the GitUrl that's set for" + <> prettyPath' p <> "in .unisonConfig" + , P.wrap $ "The value I found was" <> (P.backticked . P.blue . P.text) url + <> "but I encountered the following error when trying to parse it:" + , "" + , P.string err + , "" + , P.wrap $ "Type" <> P.backticked ("help " <> pushPull "push" "pull" pp) + <> "for more information." + ] +-- | ConfiguredGitUrlIncludesShortBranchHash ShortBranchHash + ConfiguredGitUrlIncludesShortBranchHash pp repo sbh remotePath -> + pure . P.lines $ + [ P.wrap + $ "The `GitUrl.` entry in .unisonConfig for the current path has the value" + <> (P.group . (<>",") . P.blue . P.text) + (RemoteRepo.printNamespace repo (Just sbh) remotePath) + <> "which specifies a namespace hash" + <> P.group (P.blue (prettySBH sbh) <> ".") + , "" + , P.wrap $ + pushPull "I can't push to a specific hash, because it's immutable." + ("It's no use for repeated pulls," + <> "because you would just get the same immutable namespace each time.") + pp + , "" + , P.wrap $ "You can use" + <> P.backticked ( + pushPull "push" "pull" pp + <> " " + <> P.text (RemoteRepo.printNamespace repo Nothing remotePath)) + <> "if you want to" <> pushPull "push onto" "pull from" pp + <> "the latest." + ] + NoBranchWithHash _h -> pure . P.callout "😶" $ + P.wrap $ "I don't know of a namespace with that hash." + NotImplemented -> pure $ P.wrap "That's not implemented yet. Sorry! 😬" + BranchAlreadyExists p -> pure . P.wrap $ + "The namespace" <> prettyPath' p <> "already exists." + LabeledReferenceNotFound hq -> + pure . P.callout "\129300" . P.wrap . P.syntaxToColor $ + "Sorry, I couldn't find anything named" <> prettyHashQualified hq <> "." + LabeledReferenceAmbiguous hashLen hq (LD.partition -> (tps, tms)) -> + pure . P.callout "\129300" . P.lines $ [ + P.wrap "That name is ambiguous. It could refer to any of the following definitions:" + , "" + , P.indentN 2 (P.lines (map qualifyTerm tms ++ map qualifyType tps)) + ] + where + qualifyTerm :: Referent -> P.Pretty P.ColorText + qualifyTerm = P.syntaxToColor . case hq of + HQ.NameOnly n -> prettyNamedReferent hashLen n + HQ.HashQualified n _ -> prettyNamedReferent hashLen n + HQ.HashOnly _ -> prettyReferent hashLen + qualifyType :: Reference -> P.Pretty P.ColorText + qualifyType = P.syntaxToColor . case hq of + HQ.NameOnly n -> prettyNamedReference hashLen n + HQ.HashQualified n _ -> prettyNamedReference hashLen n + HQ.HashOnly _ -> prettyReference hashLen + DeleteNameAmbiguous hashLen p tms tys -> + pure . P.callout "\129300" . P.lines $ [ + P.wrap "That name is ambiguous. It could refer to any of the following definitions:" + , "" + , P.indentN 2 (P.lines (map qualifyTerm (Set.toList tms) ++ map qualifyType (Set.toList tys))) + , "" + , P.wrap "You may:" + , "" + , P.indentN 2 . P.bulleted $ + [ P.wrap "Delete one by an unambiguous name, given above." + , P.wrap "Delete them all by re-issuing the previous command." + ] + ] + where + name :: Name + name = Path.toName' (HQ'.toName (Path.unsplitHQ' p)) + qualifyTerm :: Referent -> P.Pretty P.ColorText + qualifyTerm = P.syntaxToColor . prettyNamedReferent hashLen name + qualifyType :: Reference -> P.Pretty P.ColorText + qualifyType = P.syntaxToColor . prettyNamedReference hashLen name + TermAmbiguous _ _ -> pure "That term is ambiguous." + HashAmbiguous h rs -> pure . P.callout "\129300" . P.lines $ [ + P.wrap $ "The hash" <> prettyShortHash h <> "is ambiguous." + <> "Did you mean one of these hashes?", + "", + P.indentN 2 $ P.lines (P.shown <$> Set.toList rs), + "", + P.wrap "Try again with a few more hash characters to disambiguate." + ] + BranchHashAmbiguous h rs -> pure . P.callout "\129300" . P.lines $ [ + P.wrap $ "The namespace hash" <> prettySBH h <> "is ambiguous." + <> "Did you mean one of these hashes?", + "", + P.indentN 2 $ P.lines (prettySBH <$> Set.toList rs), + "", + P.wrap "Try again with a few more hash characters to disambiguate." + ] + BadName n -> + pure . P.wrap $ P.string n <> " is not a kind of name I understand." + TermNotFound' sh -> + pure $ "I could't find a term with hash " + <> (prettyShortHash sh) + TypeNotFound' sh -> + pure $ "I could't find a type with hash " + <> (prettyShortHash sh) + NothingToPatch _patchPath dest -> pure $ + P.callout "😶" . P.wrap + $ "This had no effect. Perhaps the patch has already been applied" + <> "or it doesn't intersect with the definitions in" + <> P.group (prettyPath' dest <> ".") + PatchNeedsToBeConflictFree -> + pure . P.wrap $ + "I tried to auto-apply the patch, but couldn't because it contained" + <> "contradictory entries." + PatchInvolvesExternalDependents _ _ -> + pure "That patch involves external dependents." + ShowReflog [] -> pure . P.warnCallout $ "The reflog appears to be empty!" + ShowReflog entries -> pure $ + P.lines [ + P.wrap $ "Here is a log of the root namespace hashes," + <> "starting with the most recent," + <> "along with the command that got us there." + <> "Try:", + "", + -- `head . tail` is safe: entries never has 1 entry, and [] is handled above + let e2 = head . tail $ entries in + P.indentN 2 . P.wrapColumn2 $ [ + (IP.makeExample IP.forkLocal ["2", ".old"], + ""), + (IP.makeExample IP.forkLocal [prettySBH . Output.hash $ e2, ".old"], + "to make an old namespace accessible again,"), + (mempty,mempty), + (IP.makeExample IP.resetRoot [prettySBH . Output.hash $ e2], + "to reset the root namespace and its history to that of the specified" + <> "namespace.") + ], + "", + P.numberedList . fmap renderEntry $ entries + ] + where + renderEntry :: Output.ReflogEntry -> P.Pretty CT.ColorText + renderEntry (Output.ReflogEntry hash reason) = P.wrap $ + P.blue (prettySBH hash) <> " : " <> P.text reason + History _cap history tail -> pure $ + P.lines [ + note $ "The most recent namespace hash is immediately below this message.", "", + P.sep "\n\n" [ go h diff | (h,diff) <- reverse history ], "", + tailMsg + ] + where + tailMsg = case tail of + E.EndOfLog h -> P.lines [ + "□ " <> prettySBH h <> " (start of history)" + ] + E.MergeTail h hs -> P.lines [ + P.wrap $ "This segment of history starts with a merge." <> ex, + "", + "⊙ " <> prettySBH h, + "⑃", + P.lines (prettySBH <$> hs) + ] + E.PageEnd h _n -> P.lines [ + P.wrap $ "There's more history before the versions shown here." <> ex, "", + dots, "", + "⊙ " <> prettySBH h, + "" + ] + dots = "⠇" + go hash diff = P.lines [ + "⊙ " <> prettySBH hash, + "", + P.indentN 2 $ prettyDiff diff + ] + ex = "Use" <> IP.makeExample IP.history ["#som3n4m3space"] + <> "to view history starting from a given namespace hash." + StartOfCurrentPathHistory -> pure $ + P.wrap "You're already at the very beginning! 🙂" + PullAlreadyUpToDate ns dest -> pure . P.callout "😶" $ + P.wrap $ prettyPath' dest <> "was already up-to-date with" + <> P.group (prettyRemoteNamespace ns <> ".") + + MergeAlreadyUpToDate src dest -> pure . P.callout "😶" $ + P.wrap $ prettyPath' dest <> "was already up-to-date with" + <> P.group (prettyPath' src <> ".") + PreviewMergeAlreadyUpToDate src dest -> pure . P.callout "😶" $ + P.wrap $ prettyPath' dest <> "is already up-to-date with" + <> P.group (prettyPath' src <> ".") + DumpNumberedArgs args -> pure . P.numberedList $ fmap P.string args + NoConflictsOrEdits -> + pure (P.okCallout "No conflicts or edits in progress.") + NoOp -> pure $ P.string "I didn't make any changes." + DefaultMetadataNotification -> pure $ P.wrap "I added some default metadata." + DumpBitBooster head map -> let + go output [] = output + go output (head : queue) = case Map.lookup head map of + Nothing -> go (renderLine head [] : output) queue + Just tails -> go (renderLine head tails : output) (queue ++ tails) + where + renderHash = take 10 . Text.unpack . Hash.base32Hex . Causal.unRawHash + renderLine head tail = + (renderHash head) ++ "|" ++ intercalateMap " " renderHash tail ++ + case Map.lookup (Hash.base32Hex . Causal.unRawHash $ head) tags of + Just t -> "|tag: " ++ t + Nothing -> "" + -- some specific hashes that we want to label in the output + tags :: Map Text String + tags = Map.fromList . fmap swap $ + [ ("unisonbase 2019/8/6", "54s9qjhaonotuo4sp6ujanq7brngk32f30qt5uj61jb461h9fcca6vv5levnoo498bavne4p65lut6k6a7rekaruruh9fsl19agu8j8") + , ("unisonbase 2019/8/5", "focmbmg7ca7ht7opvjaqen58fobu3lijfa9adqp7a1l1rlkactd7okoimpfmd0ftfmlch8gucleh54t3rd1e7f13fgei86hnsr6dt1g") + , ("unisonbase 2019/7/31", "jm2ltsg8hh2b3c3re7aru6e71oepkqlc3skr2v7bqm4h1qgl3srucnmjcl1nb8c9ltdv56dpsgpdur1jhpfs6n5h43kig5bs4vs50co") + , ("unisonbase 2019/7/25", "an1kuqsa9ca8tqll92m20tvrmdfk0eksplgjbda13evdlngbcn5q72h8u6nb86ojr7cvnemjp70h8cq1n95osgid1koraq3uk377g7g") + , ("ucm m1b", "o6qocrqcqht2djicb1gcmm5ct4nr45f8g10m86bidjt8meqablp0070qae2tvutnvk4m9l7o1bkakg49c74gduo9eati20ojf0bendo") + , ("ucm m1, m1a", "auheev8io1fns2pdcnpf85edsddj27crpo9ajdujum78dsncvfdcdu5o7qt186bob417dgmbd26m8idod86080bfivng1edminu3hug") + ] + + in pure $ P.lines [ + P.lines (fmap fromString . reverse . nubOrd $ go [] [head]), + "", + "Paste that output into http://bit-booster.com/graph.html" + ] + ListDependents hqLength ld names missing -> pure $ + if names == mempty && missing == mempty + then c (prettyLabeledDependency hqLength ld) <> " doesn't have any dependents." + else + "Dependents of " <> c (prettyLabeledDependency hqLength ld) <> ":\n\n" <> + (P.indentN 2 (P.numberedColumn2Header num pairs)) + where + num n = P.hiBlack $ P.shown n <> "." + header = (P.hiBlack "Reference", P.hiBlack "Name") + pairs = header : (fmap (first c . second c) $ + [ (p $ Reference.toShortHash r, prettyName n) | (n, r) <- names ] ++ + [ (p $ Reference.toShortHash r, "(no name available)") | r <- toList missing ]) + p = prettyShortHash . SH.take hqLength + c = P.syntaxToColor + -- this definition is identical to the previous one, apart from the word + -- "Dependencies", but undecided about whether or how to refactor + ListDependencies hqLength ld names missing -> pure $ + if names == mempty && missing == mempty + then c (prettyLabeledDependency hqLength ld) <> " doesn't have any dependencies." + else + "Dependencies of " <> c (prettyLabeledDependency hqLength ld) <> ":\n\n" <> + (P.indentN 2 (P.numberedColumn2Header num pairs)) + where + num n = P.hiBlack $ P.shown n <> "." + header = (P.hiBlack "Reference", P.hiBlack "Name") + pairs = header : (fmap (first c . second c) $ + [ (p $ Reference.toShortHash r, prettyName n) | (n, r) <- names ] ++ + [ (p $ Reference.toShortHash r, "(no name available)") | r <- toList missing ]) + p = prettyShortHash . SH.take hqLength + c = P.syntaxToColor + DumpUnisonFileHashes hqLength datas effects terms -> + pure . P.syntaxToColor . P.lines $ + (effects <&> \(n,r) -> "ability " <> + prettyHashQualified' (HQ'.take hqLength . HQ'.fromNamedReference n $ Reference.DerivedId r)) <> + (datas <&> \(n,r) -> "type " <> + prettyHashQualified' (HQ'.take hqLength . HQ'.fromNamedReference n $ Reference.DerivedId r)) <> + (terms <&> \(n,r) -> + prettyHashQualified' (HQ'.take hqLength . HQ'.fromNamedReference n $ Reference.DerivedId r)) + + where + _nameChange _cmd _pastTenseCmd _oldName _newName _r = error "todo" + -- do + -- when (not . Set.null $ E.changedSuccessfully r) . putPrettyLn . P.okCallout $ + -- P.wrap $ "I" <> pastTenseCmd <> "the" + -- <> ns (E.changedSuccessfully r) + -- <> P.blue (prettyName oldName) + -- <> "to" <> P.group (P.green (prettyName newName) <> ".") + -- when (not . Set.null $ E.oldNameConflicted r) . putPrettyLn . P.warnCallout $ + -- (P.wrap $ "I couldn't" <> cmd <> "the" + -- <> ns (E.oldNameConflicted r) + -- <> P.blue (prettyName oldName) + -- <> "to" <> P.green (prettyName newName) + -- <> "because of conflicts.") + -- <> "\n\n" + -- <> tip ("Use " <> makeExample' IP.todo <> " to view more information on conflicts and remaining work.") + -- when (not . Set.null $ E.newNameAlreadyExists r) . putPrettyLn . P.warnCallout $ + -- (P.wrap $ "I couldn't" <> cmd <> P.blue (prettyName oldName) + -- <> "to" <> P.green (prettyName newName) + -- <> "because the " + -- <> ns (E.newNameAlreadyExists r) + -- <> "already exist(s).") + -- <> "\n\n" + -- <> tip + -- ("Use" <> makeExample IP.rename [prettyName newName, ""] <> "to make" <> prettyName newName <> "available.") +-- where +-- ns targets = P.oxfordCommas $ +-- map (fromString . Names.renderNameTarget) (toList targets) + +prettyPath' :: Path.Path' -> Pretty +prettyPath' p' = + if Path.isCurrentPath p' + then "the current namespace" + else P.blue (P.shown p') + +prettyRelative :: Path.Relative -> Pretty +prettyRelative = P.blue . P.shown + +prettySBH :: IsString s => ShortBranchHash -> P.Pretty s +prettySBH hash = P.group $ "#" <> P.text (SBH.toText hash) + +formatMissingStuff :: (Show tm, Show typ) => + [(HQ.HashQualified, tm)] -> [(HQ.HashQualified, typ)] -> Pretty +formatMissingStuff terms types = + (unlessM (null terms) . P.fatalCallout $ + P.wrap "The following terms have a missing or corrupted type signature:" + <> "\n\n" + <> P.column2 [ (P.syntaxToColor $ prettyHashQualified name, fromString (show ref)) | (name, ref) <- terms ]) <> + (unlessM (null types) . P.fatalCallout $ + P.wrap "The following types weren't found in the codebase:" + <> "\n\n" + <> P.column2 [ (P.syntaxToColor $ prettyHashQualified name, fromString (show ref)) | (name, ref) <- types ]) + +displayDefinitions' :: Var v => Ord a1 + => PPE.PrettyPrintEnvDecl + -> Map Reference.Reference (DisplayThing (DD.Decl v a1)) + -> Map Reference.Reference (DisplayThing (Term v a1)) + -> Pretty +displayDefinitions' ppe0 types terms = P.syntaxToColor $ P.sep "\n\n" (prettyTypes <> prettyTerms) + where + ppeBody r = PPE.declarationPPE ppe0 r + ppeDecl = PPE.unsuffixifiedPPE ppe0 + prettyTerms = map go . Map.toList + -- sort by name + $ Map.mapKeys (first (PPE.termName ppeDecl . Referent.Ref) . dupe) terms + prettyTypes = map go2 . Map.toList + $ Map.mapKeys (first (PPE.typeName ppeDecl) . dupe) types + go ((n, r), dt) = + case dt of + MissingThing r -> missing n r + BuiltinThing -> builtin n + RegularThing tm -> TermPrinter.prettyBinding (ppeBody r) n tm + go2 ((n, r), dt) = + case dt of + MissingThing r -> missing n r + BuiltinThing -> builtin n + RegularThing decl -> case decl of + Left d -> DeclPrinter.prettyEffectDecl (ppeBody r) r n d + Right d -> DeclPrinter.prettyDataDecl (ppeBody r) r n d + builtin n = P.wrap $ "--" <> prettyHashQualified n <> " is built-in." + missing n r = P.wrap ( + "-- The name " <> prettyHashQualified n <> " is assigned to the " + <> "reference " <> fromString (show r ++ ",") + <> "which is missing from the codebase.") + <> P.newline + <> tip "You might need to repair the codebase manually." + +displayRendered :: Maybe FilePath -> Pretty -> IO Pretty +displayRendered outputLoc pp = + maybe (pure pp) scratchAndDisplay outputLoc + where + scratchAndDisplay path = do + path' <- canonicalizePath path + prependToFile pp path' + pure (message pp path') + where + prependToFile pp path = do + existingContents <- do + exists <- doesFileExist path + if exists then readFile path + else pure "" + writeFile path . Text.pack . P.toPlain 80 $ + P.lines [ pp, "", P.text existingContents ] + message pp path = + P.callout "☝️" $ P.lines [ + P.wrap $ "I added this to the top of " <> fromString path, + "", + P.indentN 2 pp + ] + +displayDefinitions :: Var v => Ord a1 => + Maybe FilePath + -> PPE.PrettyPrintEnvDecl + -> Map Reference.Reference (DisplayThing (DD.Decl v a1)) + -> Map Reference.Reference (DisplayThing (Term v a1)) + -> IO Pretty +displayDefinitions _outputLoc _ppe types terms | Map.null types && Map.null terms = + pure $ P.callout "😶" "No results to display." +displayDefinitions outputLoc ppe types terms = + maybe displayOnly scratchAndDisplay outputLoc + where + displayOnly = pure code + scratchAndDisplay path = do + path' <- canonicalizePath path + prependToFile code path' + pure (message code path') + where + prependToFile code path = do + existingContents <- do + exists <- doesFileExist path + if exists then readFile path + else pure "" + writeFile path . Text.pack . P.toPlain 80 $ + P.lines [ code, "" + , "---- " <> "Anything below this line is ignored by Unison." + , "", P.text existingContents ] + message code path = + P.callout "☝️" $ P.lines [ + P.wrap $ "I added these definitions to the top of " <> fromString path, + "", + P.indentN 2 code, + "", + P.wrap $ + "You can edit them there, then do" <> makeExample' IP.update <> + "to replace the definitions currently in this namespace." + ] + code = displayDefinitions' ppe types terms + +displayTestResults :: Bool -- whether to show the tip + -> PPE.PrettyPrintEnv + -> [(Reference, Text)] + -> [(Reference, Text)] + -> Pretty +displayTestResults showTip ppe oks fails = let + name r = P.text (HQ.toText $ PPE.termName ppe (Referent.Ref r)) + okMsg = + if null oks then mempty + else P.column2 [ (P.green "◉ " <> name r, " " <> P.green (P.text msg)) | (r, msg) <- oks ] + okSummary = + if null oks then mempty + else "✅ " <> P.bold (P.num (length oks)) <> P.green " test(s) passing" + failMsg = + if null fails then mempty + else P.column2 [ (P.red "✗ " <> name r, " " <> P.red (P.text msg)) | (r, msg) <- fails ] + failSummary = + if null fails then mempty + else "🚫 " <> P.bold (P.num (length fails)) <> P.red " test(s) failing" + tipMsg = + if not showTip || (null oks && null fails) then mempty + else tip $ "Use " <> P.blue ("view " <> name (fst $ head (fails ++ oks))) <> "to view the source of a test." + in if null oks && null fails then "😶 No tests available." + else P.sep "\n\n" . P.nonEmpty $ [ + okMsg, failMsg, + P.sep ", " . P.nonEmpty $ [failSummary, okSummary], tipMsg] + +unsafePrettyTermResultSig' :: Var v => + PPE.PrettyPrintEnv -> SR'.TermResult' v a -> Pretty +unsafePrettyTermResultSig' ppe = \case + SR'.TermResult' (HQ'.toHQ -> name) (Just typ) _r _aliases -> + head (TypePrinter.prettySignatures' ppe [(name,typ)]) + _ -> error "Don't pass Nothing" + +-- produces: +-- -- #5v5UtREE1fTiyTsTK2zJ1YNqfiF25SkfUnnji86Lms#0 +-- Optional.None, Maybe.Nothing : Maybe a +unsafePrettyTermResultSigFull' :: Var v => + PPE.PrettyPrintEnv -> SR'.TermResult' v a -> Pretty +unsafePrettyTermResultSigFull' ppe = \case + SR'.TermResult' (HQ'.toHQ -> hq) (Just typ) r (Set.map HQ'.toHQ -> aliases) -> + P.lines + [ P.hiBlack "-- " <> greyHash (HQ.fromReferent r) + , P.group $ + P.commas (fmap greyHash $ hq : toList aliases) <> " : " + <> (P.syntaxToColor $ TypePrinter.pretty0 ppe mempty (-1) typ) + , mempty + ] + _ -> error "Don't pass Nothing" + where greyHash = styleHashQualified' id P.hiBlack + +prettyTypeResultHeader' :: Var v => SR'.TypeResult' v a -> Pretty +prettyTypeResultHeader' (SR'.TypeResult' (HQ'.toHQ -> name) dt r _aliases) = + prettyDeclTriple (name, r, dt) + +-- produces: +-- -- #5v5UtREE1fTiyTsTK2zJ1YNqfiF25SkfUnnji86Lms +-- type Optional +-- type Maybe +prettyTypeResultHeaderFull' :: Var v => SR'.TypeResult' v a -> Pretty +prettyTypeResultHeaderFull' (SR'.TypeResult' (HQ'.toHQ -> name) dt r (Set.map HQ'.toHQ -> aliases)) = + P.lines stuff <> P.newline + where + stuff = + (P.hiBlack "-- " <> greyHash (HQ.fromReference r)) : + fmap (\name -> prettyDeclTriple (name, r, dt)) + (name : toList aliases) + where greyHash = styleHashQualified' id P.hiBlack + +prettyDeclTriple :: Var v => + (HQ.HashQualified, Reference.Reference, DisplayThing (DD.Decl v a)) + -> Pretty +prettyDeclTriple (name, _, displayDecl) = case displayDecl of + BuiltinThing -> P.hiBlack "builtin " <> P.hiBlue "type " <> P.blue (P.syntaxToColor $ prettyHashQualified name) + MissingThing _ -> mempty -- these need to be handled elsewhere + RegularThing decl -> case decl of + Left ed -> P.syntaxToColor $ DeclPrinter.prettyEffectHeader name ed + Right dd -> P.syntaxToColor $ DeclPrinter.prettyDataHeader name dd + +prettyDeclPair :: Var v => + PPE.PrettyPrintEnv -> (Reference, DisplayThing (DD.Decl v a)) + -> Pretty +prettyDeclPair ppe (r, dt) = prettyDeclTriple (PPE.typeName ppe r, r, dt) + +renderNameConflicts :: Set.Set Name -> Set.Set Name -> Pretty +renderNameConflicts conflictedTypeNames conflictedTermNames = + unlessM (null allNames) $ P.callout "❓" . P.sep "\n\n" . P.nonEmpty $ [ + showConflictedNames "types" conflictedTypeNames, + showConflictedNames "terms" conflictedTermNames, + tip $ "This occurs when merging branches that both independently introduce the same name. Use " + <> makeExample IP.view (prettyName <$> take 3 allNames) + <> "to see the conflicting defintions, then use " + <> makeExample' (if (not . null) conflictedTypeNames + then IP.renameType else IP.renameTerm) + <> "to resolve the conflicts." + ] + where + allNames = toList (conflictedTermNames <> conflictedTypeNames) + showConflictedNames things conflictedNames = + unlessM (Set.null conflictedNames) $ + P.wrap ("These" <> P.bold (things <> "have conflicting definitions:")) + `P.hang` P.commas (P.blue . prettyName <$> toList conflictedNames) + +renderEditConflicts :: + PPE.PrettyPrintEnv -> Patch -> Pretty +renderEditConflicts ppe Patch{..} = + unlessM (null editConflicts) . P.callout "❓" . P.sep "\n\n" $ [ + P.wrap $ "These" <> P.bold "definitions were edited differently" + <> "in namespaces that have been merged into this one." + <> "You'll have to tell me what to use as the new definition:", + P.indentN 2 (P.lines (formatConflict <$> editConflicts)) +-- , tip $ "Use " <> makeExample IP.resolve [name (head editConflicts), " "] <> " to pick a replacement." -- todo: eventually something with `edit` + ] + where + -- todo: could possibly simplify all of this, but today is a copy/paste day. + editConflicts :: [Either (Reference, Set TypeEdit.TypeEdit) (Reference, Set TermEdit.TermEdit)] + editConflicts = + (fmap Left . Map.toList . R.toMultimap . R.filterManyDom $ _typeEdits) <> + (fmap Right . Map.toList . R.toMultimap . R.filterManyDom $ _termEdits) + typeName r = styleHashQualified P.bold (PPE.typeName ppe r) + termName r = styleHashQualified P.bold (PPE.termName ppe (Referent.Ref r)) + formatTypeEdits (r, toList -> es) = P.wrap $ + "The type" <> typeName r <> "was" <> + (if TypeEdit.Deprecate `elem` es + then "deprecated and also replaced with" + else "replaced with") <> + P.oxfordCommas [ typeName r | TypeEdit.Replace r <- es ] + formatTermEdits (r, toList -> es) = P.wrap $ + "The term" <> termName r <> "was" <> + (if TermEdit.Deprecate `elem` es + then "deprecated and also replaced with" + else "replaced with") <> + P.oxfordCommas [ termName r | TermEdit.Replace r _ <- es ] + formatConflict = either formatTypeEdits formatTermEdits + +type Numbered = State.State (Int, Seq.Seq String) + +todoOutput :: Var v => PPE.PrettyPrintEnvDecl -> TO.TodoOutput v a -> Pretty +todoOutput ppe todo = + todoConflicts <> todoEdits + where + ppeu = PPE.unsuffixifiedPPE ppe + ppes = PPE.suffixifiedPPE ppe + (frontierTerms, frontierTypes) = TO.todoFrontier todo + (dirtyTerms, dirtyTypes) = TO.todoFrontierDependents todo + corruptTerms = + [ (PPE.termName ppeu (Referent.Ref r), r) | (r, Nothing) <- frontierTerms ] + corruptTypes = + [ (PPE.typeName ppeu r, r) | (r, MissingThing _) <- frontierTypes ] + goodTerms ts = + [ (PPE.termName ppeu (Referent.Ref r), typ) | (r, Just typ) <- ts ] + todoConflicts = if TO.noConflicts todo then mempty else P.lines . P.nonEmpty $ + [ renderEditConflicts ppeu (TO.editConflicts todo) + , renderNameConflicts conflictedTypeNames conflictedTermNames ] + where + -- If a conflict is both an edit and a name conflict, we show it in the edit + -- conflicts section + c :: Names0 + c = removeEditConflicts (TO.editConflicts todo) (TO.nameConflicts todo) + conflictedTypeNames = (R.dom . Names.types) c + conflictedTermNames = (R.dom . Names.terms) c + -- e.g. `foo#a` has been independently updated to `foo#b` and `foo#c`. + -- This means there will be a name conflict: + -- foo -> #b + -- foo -> #c + -- as well as an edit conflict: + -- #a -> #b + -- #a -> #c + -- We want to hide/ignore the name conflicts that are also targets of an + -- edit conflict, so that the edit conflict will be dealt with first. + -- For example, if hash `h` has multiple edit targets { #x, #y, #z, ...}, + -- we'll temporarily remove name conflicts pointing to { #x, #y, #z, ...}. + removeEditConflicts :: Ord n => Patch -> Names' n -> Names' n + removeEditConflicts Patch{..} Names{..} = Names terms' types' where + terms' = R.filterRan (`Set.notMember` conflictedTermEditTargets) terms + types' = R.filterRan (`Set.notMember` conflictedTypeEditTargets) types + conflictedTypeEditTargets :: Set Reference + conflictedTypeEditTargets = + Set.fromList $ toList (R.ran typeEditConflicts) >>= TypeEdit.references + conflictedTermEditTargets :: Set Referent.Referent + conflictedTermEditTargets = + Set.fromList . fmap Referent.Ref + $ toList (R.ran termEditConflicts) >>= TermEdit.references + typeEditConflicts = R.filterDom (`R.manyDom` _typeEdits) _typeEdits + termEditConflicts = R.filterDom (`R.manyDom` _termEdits) _termEdits + + + todoEdits = unlessM (TO.noEdits todo) . P.callout "🚧" . P.sep "\n\n" . P.nonEmpty $ + [ P.wrap ("The namespace has" <> fromString (show (TO.todoScore todo)) + <> "transitive dependent(s) left to upgrade." + <> "Your edit frontier is the dependents of these definitions:") + , P.indentN 2 . P.lines $ ( + (prettyDeclPair ppeu <$> toList frontierTypes) ++ + TypePrinter.prettySignatures' ppes (goodTerms frontierTerms) + ) + , P.wrap "I recommend working on them in the following order:" + , P.numberedList $ + let unscore (_score,a,b) = (a,b) + in (prettyDeclPair ppeu . unscore <$> toList dirtyTypes) ++ + TypePrinter.prettySignatures' + ppes + (goodTerms $ unscore <$> dirtyTerms) + , formatMissingStuff corruptTerms corruptTypes + ] + +listOfDefinitions :: + Var v => PPE.PrettyPrintEnv -> E.ListDetailed -> [SR'.SearchResult' v a] -> IO Pretty +listOfDefinitions ppe detailed results = + pure $ listOfDefinitions' ppe detailed results + +listOfLinks :: + Var v => PPE.PrettyPrintEnv -> [(HQ.HashQualified, Maybe (Type v a))] -> IO Pretty +listOfLinks _ [] = pure . P.callout "😶" . P.wrap $ + "No results. Try using the " <> + IP.makeExample IP.link [] <> + "command to add metadata to a definition." +listOfLinks ppe results = pure $ P.lines [ + P.numberedColumn2 num [ + (P.syntaxToColor $ prettyHashQualified hq, ": " <> prettyType typ) | (hq,typ) <- results + ], "", + tip $ "Try using" <> IP.makeExample IP.display ["1"] + <> "to display the first result or" + <> IP.makeExample IP.view ["1"] <> "to view its source." + ] + where + num i = P.hiBlack $ P.shown i <> "." + prettyType Nothing = "❓ (missing a type for this definition)" + prettyType (Just t) = TypePrinter.pretty ppe t + +data ShowNumbers = ShowNumbers | HideNumbers +-- | `ppe` is just for rendering type signatures +-- `oldPath, newPath :: Path.Absolute` are just for producing fully-qualified +-- numbered args +showDiffNamespace :: forall v . Var v + => ShowNumbers + -> PPE.PrettyPrintEnv + -> Path.Absolute + -> Path.Absolute + -> OBD.BranchDiffOutput v Ann + -> (Pretty, NumberedArgs) +showDiffNamespace _ _ _ _ diffOutput | OBD.isEmpty diffOutput = + ("The namespaces are identical.", mempty) +showDiffNamespace sn ppe oldPath newPath OBD.BranchDiffOutput{..} = + (P.sepNonEmpty "\n\n" p, toList args) + where + (p, (menuSize, args)) = (`State.runState` (0::Int, Seq.empty)) $ sequence [ + if (not . null) newTypeConflicts + || (not . null) newTermConflicts + then do + prettyUpdatedTypes :: [Pretty] <- traverse prettyUpdateType newTypeConflicts + prettyUpdatedTerms :: [Pretty] <- traverse prettyUpdateTerm newTermConflicts + pure $ P.sepNonEmpty "\n\n" + [ P.red "New name conflicts:" + , P.indentN 2 . P.sepNonEmpty "\n\n" $ prettyUpdatedTypes <> prettyUpdatedTerms + ] + else pure mempty + ,if (not . null) resolvedTypeConflicts + || (not . null) resolvedTermConflicts + then do + prettyUpdatedTypes :: [Pretty] <- traverse prettyUpdateType resolvedTypeConflicts + prettyUpdatedTerms :: [Pretty] <- traverse prettyUpdateTerm resolvedTermConflicts + pure $ P.sepNonEmpty "\n\n" + [ P.bold "Resolved name conflicts:" + , P.indentN 2 . P.sepNonEmpty "\n\n" $ prettyUpdatedTypes <> prettyUpdatedTerms + ] + else pure mempty + ,if (not . null) updatedTypes + || (not . null) updatedTerms + || propagatedUpdates > 0 + || (not . null) updatedPatches + then do + prettyUpdatedTypes :: [Pretty] <- traverse prettyUpdateType updatedTypes + prettyUpdatedTerms :: [Pretty] <- traverse prettyUpdateTerm updatedTerms + prettyUpdatedPatches :: [Pretty] <- traverse (prettySummarizePatch newPath) updatedPatches + pure $ P.sepNonEmpty "\n\n" + [ P.bold "Updates:" + , P.indentNonEmptyN 2 . P.sepNonEmpty "\n\n" $ prettyUpdatedTypes <> prettyUpdatedTerms + , if propagatedUpdates > 0 + then P.indentN 2 + $ P.wrap (P.hiBlack $ "There were " + <> P.shown propagatedUpdates + <> "auto-propagated updates.") + else mempty + , P.indentNonEmptyN 2 . P.linesNonEmpty $ prettyUpdatedPatches + ] + else pure mempty + ,if (not . null) addedTypes + || (not . null) addedTerms + || (not . null) addedPatches + then do + prettyAddedTypes :: Pretty <- prettyAddTypes addedTypes + prettyAddedTerms :: Pretty <- prettyAddTerms addedTerms + prettyAddedPatches :: [Pretty] <- traverse (prettySummarizePatch newPath) addedPatches + pure $ P.sepNonEmpty "\n\n" + [ P.bold "Added definitions:" + , P.indentNonEmptyN 2 $ P.linesNonEmpty [prettyAddedTypes, prettyAddedTerms] + , P.indentNonEmptyN 2 $ P.lines prettyAddedPatches + ] + else pure mempty + ,if (not . null) removedTypes + || (not . null) removedTerms + || (not . null) removedPatches + then do + prettyRemovedTypes :: Pretty <- prettyRemoveTypes removedTypes + prettyRemovedTerms :: Pretty <- prettyRemoveTerms removedTerms + prettyRemovedPatches :: [Pretty] <- traverse (prettyNamePatch oldPath) removedPatches + pure $ P.sepNonEmpty "\n\n" + [ P.bold "Removed definitions:" + , P.indentN 2 $ P.linesNonEmpty [ prettyRemovedTypes + , prettyRemovedTerms + , P.linesNonEmpty prettyRemovedPatches ] + ] + else pure mempty + ,if (not . null) renamedTypes + || (not . null) renamedTerms + then do + results <- prettyRenameGroups renamedTypes renamedTerms + pure $ P.sepNonEmpty "\n\n" + [ P.bold "Name changes:" + , P.indentN 2 . P.sepNonEmpty "\n\n" $ results + ] + -- todo: change separator to just '\n' here if all the results are 1 to 1 + else pure mempty + ] + + {- new implementation + 23. X ┐ => (added) 24. X' + 25. X2 ┘ (removed) 26. X2 + -} + prettyRenameGroups :: [OBD.RenameTypeDisplay v a] + -> [OBD.RenameTermDisplay v a] + -> Numbered [Pretty] + prettyRenameGroups types terms = + (<>) <$> traverse (prettyGroup . (over (_1._1) Referent.Ref)) + (types `zip` [0..]) + <*> traverse prettyGroup (terms `zip` [length types ..]) + where + leftNamePad :: Int = foldl1' max $ + map (foldl1' max . map HQ'.nameLength . toList . view _3) terms <> + map (foldl1' max . map HQ'.nameLength . toList . view _3) types + prettyGroup :: ((Referent, b, Set HQ'.HashQualified, Set HQ'.HashQualified), Int) + -> Numbered Pretty + prettyGroup ((r, _, olds, news),i) = let + -- [ "peach ┐" + -- , "peach' ┘"] + olds' :: [Numbered Pretty] = + map (\(oldhq, oldp) -> numHQ' oldPath oldhq r <&> (\n -> n <> " " <> oldp)) + . (zip (toList olds)) + . P.boxRight + . map (P.rightPad leftNamePad . phq') + $ toList olds + + added' = toList $ Set.difference news olds + removed' = toList $ Set.difference olds news + -- [ "(added) 24. X'" + -- , "(removed) 26. X2" + -- ] + + news' :: [Numbered Pretty] = + map (number addedLabel) added' ++ map (number removedLabel) removed' + where + addedLabel = "(added)" + removedLabel = "(removed)" + number label name = + numHQ' newPath name r <&> + (\num -> num <> " " <> phq' name <> " " <> label) + + buildTable :: [Numbered Pretty] -> [Numbered Pretty] -> Numbered Pretty + buildTable lefts rights = let + hlefts = if i == 0 then pure (P.bold "Original") : lefts + else lefts + hrights = if i == 0 then pure (P.bold "Changes") : rights else rights + in P.column2UnzippedM @Numbered mempty hlefts hrights + + in buildTable olds' news' + + prettyUpdateType :: OBD.UpdateTypeDisplay v a -> Numbered Pretty + {- + 1. ability Foo#pqr x y + 2. - AllRightsReserved : License + 3. + MIT : License + 4. ability Foo#abc + 5. - apiDocs : License + 6. + MIT : License + -} + prettyUpdateType (Nothing, mdUps) = + P.column2 <$> traverse (mdTypeLine newPath) mdUps + {- + 1. ┌ ability Foo#pqr x y + 2. └ ability Foo#xyz a b + ⧩ + 4. ┌ ability Foo#abc + │ 5. - apiDocs : Doc + │ 6. + MIT : License + 7. └ ability Foo#def + 8. - apiDocs : Doc + 9. + MIT : License + + 1. ┌ foo#abc : Nat -> Nat -> Poop + 2. └ foo#xyz : Nat + ↓ + 4. foo : Poop + 5. + foo.docs : Doc + -} + prettyUpdateType (Just olds, news) = + do + olds <- traverse (mdTypeLine oldPath) [ (name,r,decl,mempty) | (name,r,decl) <- olds ] + news <- traverse (mdTypeLine newPath) news + let (oldnums, olddatas) = unzip olds + let (newnums, newdatas) = unzip news + pure . P.column2 $ + zip (oldnums <> [""] <> newnums) + (P.boxLeft olddatas <> [downArrow] <> P.boxLeft newdatas) + + {- + 13. ┌ability Yyz (+1 metadata) + 14. └ability copies.Yyz (+2 metadata) + -} + prettyAddTypes :: [OBD.AddedTypeDisplay v a] -> Numbered Pretty + prettyAddTypes = fmap P.lines . traverse prettyGroup where + prettyGroup :: OBD.AddedTypeDisplay v a -> Numbered Pretty + prettyGroup (hqmds, r, odecl) = do + pairs <- traverse (prettyLine r odecl) hqmds + let (nums, decls) = unzip pairs + let boxLeft = case hqmds of _:_:_ -> P.boxLeft; _ -> id + pure . P.column2 $ zip nums (boxLeft decls) + prettyLine :: Reference -> Maybe (DD.DeclOrBuiltin v a) -> (HQ'.HashQualified, [OBD.MetadataDisplay v a]) -> Numbered (Pretty, Pretty) + prettyLine r odecl (hq, mds) = do + n <- numHQ' newPath hq (Referent.Ref r) + pure . (n,) $ prettyDecl hq odecl <> case length mds of + 0 -> mempty + c -> " (+" <> P.shown c <> " metadata)" + + prettyAddTerms :: [OBD.AddedTermDisplay v a] -> Numbered Pretty + prettyAddTerms = fmap (P.column3 . mconcat) . traverse prettyGroup . reorderTerms where + reorderTerms = sortOn (not . Referent.isConstructor . view _2) + prettyGroup :: OBD.AddedTermDisplay v a -> Numbered [(Pretty, Pretty, Pretty)] + prettyGroup (hqmds, r, otype) = do + pairs <- traverse (prettyLine r otype) hqmds + let (nums, names, decls) = unzip3 pairs + boxLeft = case hqmds of _:_:_ -> P.boxLeft; _ -> id + pure $ zip3 nums (boxLeft names) decls + prettyLine r otype (hq, mds) = do + n <- numHQ' newPath hq r + pure . (n, phq' hq, ) $ ": " <> prettyType otype <> case length mds of + 0 -> mempty + c -> " (+" <> P.shown c <> " metadata)" + + prettySummarizePatch, prettyNamePatch :: Path.Absolute -> OBD.PatchDisplay -> Numbered Pretty + -- 12. patch p (added 3 updates, deleted 1) + prettySummarizePatch prefix (name, patchDiff) = do + n <- numPatch prefix name + let addCount = (R.size . view Patch.addedTermEdits) patchDiff + + (R.size . view Patch.addedTypeEdits) patchDiff + delCount = (R.size . view Patch.removedTermEdits) patchDiff + + (R.size . view Patch.removedTypeEdits) patchDiff + messages = + (if addCount > 0 then ["added " <> P.shown addCount] else []) ++ + (if delCount > 0 then ["deleted " <> P.shown addCount] else []) + message = case messages of + [] -> mempty + x : ys -> " (" <> P.commas (x <> " updates" : ys) <> ")" + pure $ n <> P.bold " patch " <> prettyName name <> message + -- 18. patch q + prettyNamePatch prefix (name, _patchDiff) = do + n <- numPatch prefix name + pure $ n <> P.bold " patch " <> prettyName name + + {- + Removes: + + 10. ┌ oldn'busted : Nat -> Nat -> Poop + 11. └ oldn'busted' + 12. ability BadType + 13. patch defunctThingy + -} + prettyRemoveTypes :: [OBD.RemovedTypeDisplay v a] -> Numbered Pretty + prettyRemoveTypes = fmap P.lines . traverse prettyGroup where + prettyGroup :: OBD.RemovedTypeDisplay v a -> Numbered Pretty + prettyGroup (hqs, r, odecl) = do + lines <- traverse (prettyLine r odecl) hqs + let (nums, decls) = unzip lines + boxLeft = case hqs of _:_:_ -> P.boxLeft; _ -> id + pure . P.column2 $ zip nums (boxLeft decls) + prettyLine r odecl hq = do + n <- numHQ' newPath hq (Referent.Ref r) + pure (n, prettyDecl hq odecl) + + prettyRemoveTerms :: [OBD.RemovedTermDisplay v a] -> Numbered Pretty + prettyRemoveTerms = fmap (P.column3 . mconcat) . traverse prettyGroup . reorderTerms where + reorderTerms = sortOn (not . Referent.isConstructor . view _2) + prettyGroup :: OBD.RemovedTermDisplay v a -> Numbered [(Pretty, Pretty, Pretty)] + prettyGroup ([], r, _) = + error $ "trying to remove " <> show r <> " without any names." + prettyGroup (hq1:hqs, r, otype) = do + line1 <- prettyLine1 r otype hq1 + lines <- traverse (prettyLine r) hqs + let (nums, names, decls) = unzip3 (line1:lines) + boxLeft = case hqs of _:_ -> P.boxLeft; _ -> id + pure $ zip3 nums (boxLeft names) decls + prettyLine1 r otype hq = do + n <- numHQ' newPath hq r + pure (n, phq' hq, ": " <> prettyType otype) + prettyLine r hq = do + n <- numHQ' newPath hq r + pure (n, phq' hq, mempty) + + downArrow = P.bold "↓" + mdTypeLine :: Path.Absolute -> OBD.TypeDisplay v a -> Numbered (Pretty, Pretty) + mdTypeLine p (hq, r, odecl, mddiff) = do + n <- numHQ' p hq (Referent.Ref r) + fmap ((n,) . P.linesNonEmpty) . sequence $ + [ pure $ prettyDecl hq odecl + , P.indentN leftNumsWidth <$> prettyMetadataDiff mddiff ] + + -- + 2. MIT : License + -- - 3. AllRightsReserved : License + mdTermLine :: Path.Absolute -> Int -> OBD.TermDisplay v a -> Numbered (Pretty, Pretty) + mdTermLine p namesWidth (hq, r, otype, mddiff) = do + n <- numHQ' p hq r + fmap ((n,) . P.linesNonEmpty) . sequence $ + [ pure $ P.rightPad namesWidth (phq' hq) <> " : " <> prettyType otype + , prettyMetadataDiff mddiff ] + -- , P.indentN 2 <$> prettyMetadataDiff mddiff ] + + prettyUpdateTerm :: OBD.UpdateTermDisplay v a -> Numbered Pretty + prettyUpdateTerm (Nothing, newTerms) = + if null newTerms then error "Super invalid UpdateTermDisplay" else + fmap P.column2 $ traverse (mdTermLine newPath namesWidth) newTerms + where namesWidth = foldl1' max $ fmap (HQ'.nameLength . view _1) newTerms + prettyUpdateTerm (Just olds, news) = + fmap P.column2 $ do + olds <- traverse (mdTermLine oldPath namesWidth) [ (name,r,typ,mempty) | (name,r,typ) <- olds ] + news <- traverse (mdTermLine newPath namesWidth) news + let (oldnums, olddatas) = unzip olds + let (newnums, newdatas) = unzip news + pure $ zip (oldnums <> [""] <> newnums) + (P.boxLeft olddatas <> [downArrow] <> P.boxLeft newdatas) + where namesWidth = foldl1' max $ fmap (HQ'.nameLength . view _1) news + <> fmap (HQ'.nameLength . view _1) olds + + prettyMetadataDiff :: OBD.MetadataDiff (OBD.MetadataDisplay v a) -> Numbered Pretty + prettyMetadataDiff OBD.MetadataDiff{..} = P.column2M $ + map (elem oldPath "- ") removedMetadata <> + map (elem newPath "+ ") addedMetadata + where + elem p x (hq, r, otype) = do + num <- numHQ p hq r + pure (x <> num <> " " <> phq hq, ": " <> prettyType otype) + + prettyType = maybe (P.red "type not found") (TypePrinter.pretty ppe) + prettyDecl hq = + maybe (P.red "type not found") + (P.syntaxToColor . DeclPrinter.prettyDeclOrBuiltinHeader (HQ'.toHQ hq)) + phq' :: _ -> Pretty = P.syntaxToColor . prettyHashQualified' + phq :: _ -> Pretty = P.syntaxToColor . prettyHashQualified + -- + -- DeclPrinter.prettyDeclHeader : HQ -> Either + numPatch :: Path.Absolute -> Name -> Numbered Pretty + numPatch prefix name = + addNumberedArg . Name.toString . Name.makeAbsolute $ Path.prefixName prefix name + + numHQ :: Path.Absolute -> HQ.HashQualified -> Referent -> Numbered Pretty + numHQ prefix hq r = addNumberedArg (HQ.toString hq') + where + hq' = HQ.requalify (fmap (Name.makeAbsolute . Path.prefixName prefix) hq) r + + numHQ' :: Path.Absolute -> HQ'.HashQualified -> Referent -> Numbered Pretty + numHQ' prefix hq r = addNumberedArg (HQ'.toString hq') + where + hq' = HQ'.requalify (fmap (Name.makeAbsolute . Path.prefixName prefix) hq) r + + addNumberedArg :: String -> Numbered Pretty + addNumberedArg s = case sn of + ShowNumbers -> do + (n, args) <- State.get + State.put (n+1, args Seq.|> s) + pure $ padNumber (n+1) + HideNumbers -> pure mempty + + padNumber :: Int -> Pretty + padNumber n = P.hiBlack . P.rightPad leftNumsWidth $ P.shown n <> "." + + leftNumsWidth = length (show menuSize) + length ("." :: String) + +noResults :: Pretty +noResults = P.callout "😶" $ + P.wrap $ "No results. Check your spelling, or try using tab completion " + <> "to supply command arguments." + +listOfDefinitions' :: Var v + => PPE.PrettyPrintEnv -- for printing types of terms :-\ + -> E.ListDetailed + -> [SR'.SearchResult' v a] + -> Pretty +listOfDefinitions' ppe detailed results = + if null results then noResults + else P.lines . P.nonEmpty $ prettyNumberedResults : + [formatMissingStuff termsWithMissingTypes missingTypes + ,unlessM (null missingBuiltins) . bigproblem $ P.wrap + "I encountered an inconsistency in the codebase; these definitions refer to built-ins that this version of unison doesn't know about:" `P.hang` + P.column2 ( (P.bold "Name", P.bold "Built-in") + -- : ("-", "-") + : fmap (bimap (P.syntaxToColor . prettyHashQualified) + (P.text . Referent.toText)) missingBuiltins) + ] + where + prettyNumberedResults = P.numberedList prettyResults + -- todo: group this by namespace + prettyResults = + map (SR'.foldResult' renderTerm renderType) + (filter (not.missingType) results) + where + (renderTerm, renderType) = + if detailed then + (unsafePrettyTermResultSigFull' ppe, prettyTypeResultHeaderFull') + else + (unsafePrettyTermResultSig' ppe, prettyTypeResultHeader') + missingType (SR'.Tm _ Nothing _ _) = True + missingType (SR'.Tp _ (MissingThing _) _ _) = True + missingType _ = False + -- termsWithTypes = [(name,t) | (name, Just t) <- sigs0 ] + -- where sigs0 = (\(name, _, typ) -> (name, typ)) <$> terms + termsWithMissingTypes = + [ (HQ'.toHQ name, r) + | SR'.Tm name Nothing (Referent.Ref (Reference.DerivedId r)) _ <- results ] + missingTypes = nubOrdOn snd $ + [ (HQ'.toHQ name, Reference.DerivedId r) + | SR'.Tp name (MissingThing r) _ _ <- results ] <> + [ (HQ'.toHQ name, r) + | SR'.Tm name Nothing (Referent.toTypeReference -> Just r) _ <- results] + missingBuiltins = results >>= \case + SR'.Tm name Nothing r@(Referent.Ref (Reference.Builtin _)) _ -> [(HQ'.toHQ name,r)] + _ -> [] + +watchPrinter + :: Var v + => Text + -> PPE.PrettyPrintEnv + -> Ann + -> UF.WatchKind + -> Term v () + -> Runtime.IsCacheHit + -> Pretty +watchPrinter src ppe ann kind term isHit = + P.bracket + $ let + lines = Text.lines src + lineNum = fromMaybe 1 $ startingLine ann + lineNumWidth = length (show lineNum) + extra = " " <> replicate (length kind) ' ' -- for the ` | > ` after the line number + line = lines !! (lineNum - 1) + addCache p = if isHit then p <> " (cached)" else p + renderTest (Term.App' (Term.Constructor' _ id) (Term.Text' msg)) = + "\n" <> if id == DD.okConstructorId + then addCache + (P.green "✅ " <> P.bold "Passed" <> P.green (P.text msg')) + else if id == DD.failConstructorId + then addCache + (P.red "🚫 " <> P.bold "FAILED" <> P.red (P.text msg')) + else P.red "❓ " <> TermPrinter.pretty ppe term + where + msg' = if Text.take 1 msg == " " then msg + else " " <> msg + + renderTest x = + fromString $ "\n Unison bug: " <> show x <> " is not a test." + in + P.lines + [ fromString (show lineNum) <> " | " <> P.text line + , case (kind, term) of + (UF.TestWatch, Term.Sequence' tests) -> foldMap renderTest tests + _ -> P.lines + [ fromString (replicate lineNumWidth ' ') + <> fromString extra + <> (if isHit then id else P.purple) "⧩" + , P.indentN (lineNumWidth + length extra) + . (if isHit then id else P.bold) + $ TermPrinter.pretty ppe term + ] + ] + +filestatusTip :: Pretty +filestatusTip = tip "Use `help filestatus` to learn more." + +prettyDiff :: Names.Diff -> Pretty +prettyDiff diff = let + orig = Names.originalNames diff + adds = Names.addedNames diff + removes = Names.removedNames diff + + addedTerms = [ (n,r) | (n,r) <- R.toList (Names.terms0 adds) + , not $ R.memberRan r (Names.terms0 removes) ] + addedTypes = [ (n,r) | (n,r) <- R.toList (Names.types0 adds) + , not $ R.memberRan r (Names.types0 removes) ] + added = sort (hqTerms ++ hqTypes) + where + hqTerms = [ Names.hqName adds n (Right r) | (n, r) <- addedTerms ] + hqTypes = [ Names.hqName adds n (Left r) | (n, r) <- addedTypes ] + + removedTerms = [ (n,r) | (n,r) <- R.toList (Names.terms0 removes) + , not $ R.memberRan r (Names.terms0 adds) + , Set.notMember n addedTermsSet ] where + addedTermsSet = Set.fromList (map fst addedTerms) + removedTypes = [ (n,r) | (n,r) <- R.toList (Names.types0 removes) + , not $ R.memberRan r (Names.types0 adds) + , Set.notMember n addedTypesSet ] where + addedTypesSet = Set.fromList (map fst addedTypes) + removed = sort (hqTerms ++ hqTypes) + where + hqTerms = [ Names.hqName removes n (Right r) | (n, r) <- removedTerms ] + hqTypes = [ Names.hqName removes n (Left r) | (n, r) <- removedTypes ] + + movedTerms = [ (n,n2) | (n,r) <- R.toList (Names.terms0 removes) + , n2 <- toList (R.lookupRan r (Names.terms adds)) ] + movedTypes = [ (n,n2) | (n,r) <- R.toList (Names.types removes) + , n2 <- toList (R.lookupRan r (Names.types adds)) ] + moved = Name.sortNamed fst . nubOrd $ (movedTerms <> movedTypes) + + copiedTerms = List.multimap [ + (n,n2) | (n2,r) <- R.toList (Names.terms0 adds) + , not (R.memberRan r (Names.terms0 removes)) + , n <- toList (R.lookupRan r (Names.terms0 orig)) ] + copiedTypes = List.multimap [ + (n,n2) | (n2,r) <- R.toList (Names.types0 adds) + , not (R.memberRan r (Names.types0 removes)) + , n <- toList (R.lookupRan r (Names.types0 orig)) ] + copied = Name.sortNamed fst $ + Map.toList (Map.unionWith (<>) copiedTerms copiedTypes) + in + P.sepNonEmpty "\n\n" [ + if not $ null added then + P.lines [ + -- todo: split out updates + P.green "+ Adds / updates:", "", + P.indentN 2 . P.wrap $ + P.sep " " (P.syntaxToColor . prettyHashQualified' <$> added) + ] + else mempty, + if not $ null removed then + P.lines [ + P.hiBlack "- Deletes:", "", + P.indentN 2 . P.wrap $ + P.sep " " (P.syntaxToColor . prettyHashQualified' <$> removed) + ] + else mempty, + if not $ null moved then + P.lines [ + P.purple "> Moves:", "", + P.indentN 2 $ + P.column2 $ + (P.hiBlack "Original name", P.hiBlack "New name") : + [ (prettyName n,prettyName n2) | (n, n2) <- moved ] + ] + else mempty, + if not $ null copied then + P.lines [ + P.yellow "= Copies:", "", + P.indentN 2 $ + P.column2 $ + (P.hiBlack "Original name", P.hiBlack "New name(s)") : + [ (prettyName n, P.sep " " (prettyName <$> ns)) + | (n, ns) <- copied ] + ] + else mempty + ] + +prettyTermName :: PPE.PrettyPrintEnv -> Referent -> Pretty +prettyTermName ppe r = P.syntaxToColor $ + prettyHashQualified (PPE.termName ppe r) + +prettyRepoRevision :: RemoteRepo -> Pretty +prettyRepoRevision (RemoteRepo.GitRepo url treeish) = + P.blue (P.text url) <> prettyRevision treeish + where + prettyRevision treeish = + Monoid.fromMaybe $ + treeish <&> \treeish -> "at revision" <> P.blue (P.text treeish) + +prettyRepoBranch :: RemoteRepo -> Pretty +prettyRepoBranch (RemoteRepo.GitRepo url treeish) = + P.blue (P.text url) <> prettyRevision treeish + where + prettyRevision treeish = + Monoid.fromMaybe $ + treeish <&> \treeish -> "at branch" <> P.blue (P.text treeish) + +isTestOk :: Term v Ann -> Bool +isTestOk tm = case tm of + Term.Sequence' ts -> all isSuccess ts where + isSuccess (Term.App' (Term.Constructor' ref cid) _) = + cid == DD.okConstructorId && + ref == DD.testResultRef + isSuccess _ = False + _ -> False diff --git a/parser-typechecker/src/Unison/DeclPrinter.hs b/parser-typechecker/src/Unison/DeclPrinter.hs new file mode 100644 index 0000000000..b0f6f9ec84 --- /dev/null +++ b/parser-typechecker/src/Unison/DeclPrinter.hs @@ -0,0 +1,187 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.DeclPrinter where + +import Unison.Prelude + +import Data.List ( isPrefixOf ) +import qualified Data.Map as Map +import Unison.DataDeclaration ( DataDeclaration + , EffectDeclaration + , toDataDecl + ) +import qualified Unison.DataDeclaration as DD +import qualified Unison.ConstructorType as CT +import Unison.HashQualified ( HashQualified ) +import qualified Unison.HashQualified as HQ +import qualified Unison.Name as Name +import Unison.NamePrinter ( styleHashQualified'' ) +import Unison.PrettyPrintEnv ( PrettyPrintEnv ) +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.Referent as Referent +import Unison.Reference ( Reference(DerivedId) ) +import qualified Unison.Util.SyntaxText as S +import Unison.Util.SyntaxText ( SyntaxText ) +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import qualified Unison.TypePrinter as TypePrinter +import Unison.Util.Pretty ( Pretty ) +import qualified Unison.Util.Pretty as P +import Unison.Var ( Var ) +import qualified Unison.Var as Var + +prettyDecl + :: Var v + => PrettyPrintEnv + -> Reference + -> HashQualified + -> DD.Decl v a + -> Pretty SyntaxText +prettyDecl ppe r hq d = case d of + Left e -> prettyEffectDecl ppe r hq e + Right dd -> prettyDataDecl ppe r hq dd + +prettyEffectDecl + :: Var v + => PrettyPrintEnv + -> Reference + -> HashQualified + -> EffectDeclaration v a + -> Pretty SyntaxText +prettyEffectDecl ppe r name = prettyGADT ppe r name . toDataDecl + +prettyGADT + :: Var v + => PrettyPrintEnv + -> Reference + -> HashQualified + -> DataDeclaration v a + -> Pretty SyntaxText +prettyGADT env r name dd = P.hang header . P.lines $ constructor <$> zip + [0 ..] + (DD.constructors' dd) + where + constructor (n, (_, _, t)) = + prettyPattern env r name n + <> (fmt S.TypeAscriptionColon " :") + `P.hang` TypePrinter.pretty0 env Map.empty (-1) t + header = prettyEffectHeader name (DD.EffectDeclaration dd) <> (fmt S.ControlKeyword " where") + +prettyPattern + :: PrettyPrintEnv -> Reference -> HashQualified -> Int -> Pretty SyntaxText +prettyPattern env r namespace n = styleHashQualified'' (fmt S.Constructor) + ( HQ.stripNamespace (fromMaybe "" $ Name.toText <$> HQ.toName namespace) + $ PPE.patternName env r n + ) + +prettyDataDecl + :: Var v + => PrettyPrintEnv + -> Reference + -> HashQualified + -> DataDeclaration v a + -> Pretty SyntaxText +prettyDataDecl env r name dd = + (header <>) . P.sep (fmt S.DelimiterChar (" | " `P.orElse` "\n | ")) $ constructor <$> zip + [0 ..] + (DD.constructors' dd) + where + constructor (n, (_, _, (Type.ForallsNamed' _ t))) = constructor' n t + constructor (n, (_, _, t) ) = constructor' n t + constructor' n t = case Type.unArrows t of + Nothing -> prettyPattern env r name n + Just ts -> case fieldNames env r name dd of + Nothing -> P.group . P.hang' (prettyPattern env r name n) " " + $ P.spaced (TypePrinter.prettyRaw env Map.empty 10 <$> init ts) + Just fs -> P.group $ (fmt S.DelimiterChar "{ ") + <> P.sep ((fmt S.DelimiterChar ",") <> " " `P.orElse` "\n ") + (field <$> zip fs (init ts)) + <> (fmt S.DelimiterChar " }") + field (fname, typ) = P.group $ styleHashQualified'' (fmt S.Constructor) fname <> + (fmt S.TypeAscriptionColon " :") `P.hang` TypePrinter.prettyRaw env Map.empty (-1) typ + header = prettyDataHeader name dd <> (fmt S.DelimiterChar (" = " `P.orElse` "\n = ")) + +-- Comes up with field names for a data declaration which has the form of a +-- record, like `type Pt = { x : Int, y : Int }`. Works by generating the +-- record accessor terms for the data type, hashing these terms, and then +-- checking the `PrettyPrintEnv` for the names of those hashes. If the names for +-- these hashes are: +-- +-- `Pt.x`, `Pt.x.set`, `Pt.x.modify`, `Pt.y`, `Pt.y.set`, `Pt.y.modify` +-- +-- then this matches the naming convention generated by the parser, and we +-- return `x` and `y` as the field names. +-- +-- This function bails with `Nothing` if the names aren't an exact match for +-- the expected record naming convention. +fieldNames + :: forall v a . Var v + => PrettyPrintEnv + -> Reference + -> HashQualified + -> DataDeclaration v a + -> Maybe [HashQualified] +fieldNames env r name dd = case DD.constructors dd of + [(_, typ)] -> let + vars :: [v] + vars = [ Var.freshenId (fromIntegral n) (Var.named "_") | n <- [0..Type.arity typ - 1]] + accessors = DD.generateRecordAccessors (map (,()) vars) (HQ.toVar name) r + hashes = Term.hashComponents (Map.fromList accessors) + names = [ (r, HQ.toString . PPE.termName env . Referent.Ref $ DerivedId r) + | r <- fst <$> Map.elems hashes ] + fieldNames = Map.fromList + [ (r, f) | (r, n) <- names + , typename <- pure (HQ.toString name) + , typename `isPrefixOf` n + -- drop the typename and the following '.' + , rest <- pure $ drop (length typename + 1) n + , (f, rest) <- pure $ span (/= '.') rest + , rest `elem` ["",".set",".modify"] ] + in if Map.size fieldNames == length names then + Just [ HQ.unsafeFromString name + | v <- vars + , Just (ref, _) <- [Map.lookup (Var.namespaced [HQ.toVar name, v]) hashes] + , Just name <- [Map.lookup ref fieldNames] ] + else Nothing + _ -> Nothing + +prettyModifier :: DD.Modifier -> Pretty SyntaxText +prettyModifier DD.Structural = mempty +prettyModifier (DD.Unique _uid) = + fmt S.DataTypeModifier "unique" -- <> ("[" <> P.text uid <> "] ") + +prettyDataHeader :: Var v => HashQualified -> DD.DataDeclaration v a -> Pretty SyntaxText +prettyDataHeader name dd = + P.sepNonEmpty " " [ + prettyModifier (DD.modifier dd), + fmt S.DataTypeKeyword "type", + styleHashQualified'' (fmt $ S.HashQualifier name) name, + P.sep " " (fmt S.DataTypeParams . P.text . Var.name <$> DD.bound dd) ] + +prettyEffectHeader :: Var v => HashQualified -> DD.EffectDeclaration v a -> Pretty SyntaxText +prettyEffectHeader name ed = P.sepNonEmpty " " [ + prettyModifier (DD.modifier (DD.toDataDecl ed)), + fmt S.DataTypeKeyword "ability", + styleHashQualified'' (fmt $ S.HashQualifier name) name, + P.sep " " (fmt S.DataTypeParams . P.text . Var.name <$> DD.bound (DD.toDataDecl ed)) ] + +prettyDeclHeader + :: Var v + => HashQualified + -> Either (DD.EffectDeclaration v a) (DD.DataDeclaration v a) + -> Pretty SyntaxText +prettyDeclHeader name (Left e) = prettyEffectHeader name e +prettyDeclHeader name (Right d) = prettyDataHeader name d + +prettyDeclOrBuiltinHeader + :: Var v + => HashQualified + -> DD.DeclOrBuiltin v a + -> Pretty SyntaxText +prettyDeclOrBuiltinHeader name (DD.Builtin ctype) = case ctype of + CT.Data -> fmt S.DataTypeKeyword "builtin type " <> styleHashQualified'' (fmt $ S.HashQualifier name) name + CT.Effect -> fmt S.DataTypeKeyword "builtin ability " <> styleHashQualified'' (fmt $ S.HashQualifier name) name +prettyDeclOrBuiltinHeader name (DD.Decl e) = prettyDeclHeader name e + +fmt :: S.Element -> Pretty S.SyntaxText -> Pretty S.SyntaxText +fmt = P.withSyntax diff --git a/parser-typechecker/src/Unison/FileParser.hs b/parser-typechecker/src/Unison/FileParser.hs new file mode 100644 index 0000000000..5cb9972d0a --- /dev/null +++ b/parser-typechecker/src/Unison/FileParser.hs @@ -0,0 +1,289 @@ +{-# Language DeriveTraversable #-} +{-# Language OverloadedStrings #-} + +module Unison.FileParser where + +import Unison.Prelude + +import qualified Unison.ABT as ABT +import Control.Lens +import Control.Monad.Reader (local, asks) +import qualified Data.Map as Map +import Prelude hiding (readFile) +import qualified Text.Megaparsec as P +import Unison.DataDeclaration (DataDeclaration, EffectDeclaration) +import qualified Unison.DataDeclaration as DD +import qualified Unison.Lexer as L +import Unison.Parser +import Unison.Term (Term) +import qualified Unison.Term as Term +import qualified Unison.TermParser as TermParser +import Unison.Type (Type) +import qualified Unison.Type as Type +import qualified Unison.TypeParser as TypeParser +import Unison.UnisonFile (UnisonFile(..), environmentFor) +import qualified Unison.UnisonFile as UF +import qualified Unison.Util.List as List +import Unison.Var (Var) +import qualified Unison.Var as Var +import qualified Unison.Names3 as Names +import qualified Unison.Name as Name + +resolutionFailures :: Ord v => [Names.ResolutionFailure v Ann] -> P v x +resolutionFailures es = P.customFailure (ResolutionFailures es) + +file :: forall v . Var v => P v (UnisonFile v Ann) +file = do + _ <- openBlock + -- The file may optionally contain top-level imports, + -- which are parsed and applied to the type decls and term stanzas + (namesStart, imports) <- TermParser.imports <* optional semi + (dataDecls, effectDecls, parsedAccessors) <- declarations + env <- case environmentFor (Names.currentNames namesStart) dataDecls effectDecls of + Right (Right env) -> pure env + Right (Left es) -> P.customFailure $ TypeDeclarationErrors es + Left es -> resolutionFailures (toList es) + let importNames = [(Name.fromVar v, Name.fromVar v2) | (v,v2) <- imports ] + let locals = Names.importing0 importNames (UF.names env) + local (\e -> e { names = Names.push locals namesStart }) $ do + names <- asks names + stanzas0 <- local (\e -> e { names = names }) $ sepBy semi stanza + let stanzas = fmap (TermParser.substImports names imports) <$> stanzas0 + _ <- closeBlock + let (termsr, watchesr) = foldl' go ([], []) stanzas + go (terms, watches) s = case s of + WatchBinding kind _ ((_, v), at) -> + (terms, (kind,(v,Term.generalizeTypeSignatures at)) : watches) + WatchExpression kind guid _ at -> + (terms, (kind, (Var.unnamedTest guid, Term.generalizeTypeSignatures at)) : watches) + Binding ((_, v), at) -> ((v,Term.generalizeTypeSignatures at) : terms, watches) + Bindings bs -> ([(v,Term.generalizeTypeSignatures at) | ((_,v), at) <- bs ] ++ terms, watches) + let (terms, watches) = (reverse termsr, reverse watchesr) + -- local term bindings shadow any same-named thing from the outer codebase scope + let locals = stanzas0 >>= getVars + let curNames = Names.deleteTerms0 (Name.fromVar <$> locals) (Names.currentNames names) + terms <- case List.validate (traverse $ Term.bindSomeNames curNames) terms of + Left es -> resolutionFailures (toList es) + Right terms -> pure terms + watches <- case List.validate (traverse . traverse $ Term.bindSomeNames curNames) watches of + Left es -> resolutionFailures (toList es) + Right ws -> pure ws + let toPair (tok, _) = (L.payload tok, ann tok) + accessors = + [ DD.generateRecordAccessors (toPair <$> fields) (L.payload typ) r + | (typ, fields) <- parsedAccessors + , Just (r,_) <- [Map.lookup (L.payload typ) (UF.datas env)] + ] + uf = UnisonFileId (UF.datasId env) (UF.effectsId env) (terms <> join accessors) + (List.multimap watches) + pure uf + +-- A stanza is either a watch expression like: +-- > 1 + x +-- > z = x + 1 +-- Or it is a binding like: +-- foo : Nat -> Nat +-- foo x = x + 42 +-- Or it is a namespace like: +-- namespace Woot where +-- x = 42 +-- y = 17 +-- which parses as [(Woot.x, 42), (Woot.y, 17)] + +data Stanza v term + = WatchBinding UF.WatchKind Ann ((Ann, v), term) + | WatchExpression UF.WatchKind Text Ann term + | Binding ((Ann, v), term) + | Bindings [((Ann, v), term)] deriving (Foldable, Traversable, Functor) + +getVars :: Var v => Stanza v term -> [v] +getVars = \case + WatchBinding _ _ ((_,v), _) -> [v] + WatchExpression _ guid _ _ -> [Var.unnamedTest guid] + Binding ((_,v), _) -> [v] + Bindings bs -> [ v | ((_,v), _) <- bs ] + +stanza :: Var v => P v (Stanza v (Term v Ann)) +stanza = watchExpression <|> unexpectedAction <|> binding <|> namespace + where + unexpectedAction = failureIf (TermParser.blockTerm $> getErr) binding + getErr = do + t <- anyToken + t2 <- optional anyToken + P.customFailure $ DidntExpectExpression t t2 + watchExpression = do + (kind, guid, ann) <- watched + _ <- closed + msum [ + WatchBinding kind ann <$> TermParser.binding, + WatchExpression kind guid ann <$> TermParser.blockTerm ] + binding = Binding <$> TermParser.binding + namespace = tweak <$> TermParser.namespaceBlock where + tweak ns = Bindings (TermParser.toBindings [ns]) + +watched :: Var v => P v (UF.WatchKind, Text, Ann) +watched = P.try $ do + kind <- optional wordyIdString + guid <- uniqueName 10 + op <- optional (L.payload <$> P.lookAhead symbolyIdString) + guard (op == Just ">") + tok <- anyToken + guard $ maybe True (`L.touches` tok) kind + pure (maybe UF.RegularWatch L.payload kind, guid, maybe mempty ann kind <> ann tok) + +closed :: Var v => P v () +closed = P.try $ do + op <- optional (L.payload <$> P.lookAhead closeBlock) + case op of Just () -> P.customFailure EmptyWatch + _ -> pure () + +-- The parsed form of record accessors, as in: +-- +-- type Additive a = { zero : a, (+) : a -> a -> a } +-- +-- The `Token v` is the variable name and location (here `zero` and `(+)`) of +-- each field, and the type is the type of that field +type Accessors v = [(L.Token v, [(L.Token v, Type v Ann)])] + +declarations :: Var v => P v + (Map v (DataDeclaration v Ann), + Map v (EffectDeclaration v Ann), + Accessors v) +declarations = do + declarations <- many $ declaration <* optional semi + let (dataDecls0, effectDecls) = partitionEithers declarations + dataDecls = [(a,b) | (a,b,_) <- dataDecls0 ] + multimap :: Ord k => [(k,v)] -> Map k [v] + multimap = foldl' mi Map.empty + mi m (k,v) = Map.insertWith (++) k [v] m + mds = multimap dataDecls + mes = multimap effectDecls + mdsBad = Map.filter (\xs -> length xs /= 1) mds + mesBad = Map.filter (\xs -> length xs /= 1) mes + if Map.null mdsBad && Map.null mesBad then + pure (Map.fromList dataDecls, + Map.fromList effectDecls, + join . map (view _3) $ dataDecls0) + else + P.customFailure . DuplicateTypeNames $ + [ (v, DD.annotation <$> ds) | (v, ds) <- Map.toList mdsBad ] <> + [ (v, DD.annotation . DD.toDataDecl <$> es) | (v, es) <- Map.toList mesBad ] + +modifier :: Var v => P v (L.Token DD.Modifier) +modifier = do + o <- optional (openBlockWith "unique") + case o of + Nothing -> fmap (const DD.Structural) <$> P.lookAhead anyToken + Just tok -> do + uid <- do + o <- optional (reserved "[" *> wordyIdString <* reserved "]") + case o of + Nothing -> uniqueName 32 + Just uid -> pure (fromString . L.payload $ uid) + pure (DD.Unique uid <$ tok) + +declaration :: Var v + => P v (Either (v, DataDeclaration v Ann, Accessors v) + (v, EffectDeclaration v Ann)) +declaration = do + mod <- modifier + fmap Right (effectDeclaration mod) <|> fmap Left (dataDeclaration mod) + +dataDeclaration + :: forall v + . Var v + => L.Token DD.Modifier + -> P v (v, DataDeclaration v Ann, Accessors v) +dataDeclaration mod = do + _ <- fmap void (reserved "type") <|> openBlockWith "type" + (name, typeArgs) <- + (,) <$> TermParser.verifyRelativeVarName prefixDefinitionName + <*> many (TermParser.verifyRelativeVarName prefixDefinitionName) + let typeArgVs = L.payload <$> typeArgs + eq <- reserved "=" + let + -- go gives the type of the constructor, given the types of + -- the constructor arguments, e.g. Cons becomes forall a . a -> List a -> List a + go :: L.Token v -> [Type v Ann] -> (Ann, v, Type v Ann) + go ctorName ctorArgs = let + arrow i o = Type.arrow (ann i <> ann o) i o + app f arg = Type.app (ann f <> ann arg) f arg + -- ctorReturnType e.g. `Optional a` + ctorReturnType = foldl' app (tok Type.var name) (tok Type.var <$> typeArgs) + -- ctorType e.g. `a -> Optional a` + -- or just `Optional a` in the case of `None` + ctorType = foldr arrow ctorReturnType ctorArgs + ctorAnn = ann ctorName <> + (if null ctorArgs then mempty else ann (last ctorArgs)) + in (ann ctorName, Var.namespaced [L.payload name, L.payload ctorName], + Type.foralls ctorAnn typeArgVs ctorType) + prefixVar = TermParser.verifyRelativeVarName prefixDefinitionName + dataConstructor = go <$> prefixVar <*> many TypeParser.valueTypeLeaf + record = do + _ <- openBlockWith "{" + fields <- sepBy1 (reserved "," <* optional semi) $ + liftA2 (,) (prefixVar <* reserved ":") TypeParser.valueType + _ <- closeBlock + pure ([go name (snd <$> fields)], [(name, fields)]) + (constructors, accessors) <- + msum [record, (,[]) <$> sepBy (reserved "|") dataConstructor] + _ <- closeBlock + let -- the annotation of the last constructor if present, + -- otherwise ann of name + closingAnn :: Ann + closingAnn = last (ann eq : ((\(_,_,t) -> ann t) <$> constructors)) + pure (L.payload name, + DD.mkDataDecl' (L.payload mod) (ann mod <> closingAnn) typeArgVs constructors, + accessors) + +effectDeclaration + :: Var v => L.Token DD.Modifier -> P v (v, EffectDeclaration v Ann) +effectDeclaration mod = do + _ <- fmap void (reserved "ability") <|> openBlockWith "ability" + name <- TermParser.verifyRelativeVarName prefixDefinitionName + typeArgs <- many (TermParser.verifyRelativeVarName prefixDefinitionName) + let typeArgVs = L.payload <$> typeArgs + blockStart <- openBlockWith "where" + constructors <- sepBy semi (constructor typeArgs name) + -- `ability` opens a block, as does `where` + _ <- closeBlock <* closeBlock + let closingAnn = + last $ ann blockStart : ((\(_, _, t) -> ann t) <$> constructors) + pure + ( L.payload name + , DD.mkEffectDecl' (L.payload mod) + (ann mod <> closingAnn) + typeArgVs + constructors + ) + where + constructor + :: Var v => [L.Token v] -> L.Token v -> P v (Ann, v, Type v Ann) + constructor typeArgs name = + explodeToken + <$> TermParser.verifyRelativeVarName prefixDefinitionName + <* reserved ":" + <*> ( Type.generalizeLowercase mempty + . ensureEffect + <$> TypeParser.computationType + ) + where + explodeToken v t = (ann v, Var.namespaced [L.payload name, L.payload v], t) + -- If the effect is not syntactically present in the constructor types, + -- add them after parsing. + ensureEffect t = case t of + Type.Effect' _ _ -> modEffect t + x -> Type.editFunctionResult modEffect x + modEffect t = case t of + Type.Effect' es t -> go es t + t -> go [] t + toTypeVar t = Type.av' (ann t) (Var.name $ L.payload t) + headIs t v = case t of + Type.Apps' (Type.Var' x) _ -> x == v + Type.Var' x -> x == v + _ -> False + go es t = + let es' = if any (`headIs` L.payload name) es + then es + else Type.apps' (toTypeVar name) (toTypeVar <$> typeArgs) : es + in Type.cleanupAbilityLists $ Type.effect (ABT.annotation t) es' t diff --git a/parser-typechecker/src/Unison/FileParsers.hs b/parser-typechecker/src/Unison/FileParsers.hs new file mode 100644 index 0000000000..aee5ea3546 --- /dev/null +++ b/parser-typechecker/src/Unison/FileParsers.hs @@ -0,0 +1,196 @@ +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE UnicodeSyntax #-} + +module Unison.FileParsers where + +import Unison.Prelude + +import Control.Lens (view, _3) +import qualified Unison.Parser as Parser +import Control.Monad.State (evalStateT) +import Control.Monad.Writer (tell) +import Data.Bifunctor ( first ) +import qualified Data.Foldable as Foldable +import qualified Data.Map as Map +import Data.List (partition) +import qualified Data.Set as Set +import qualified Data.Sequence as Seq +import Data.Text (unpack) +import qualified Unison.ABT as ABT +import qualified Unison.Blank as Blank +import qualified Unison.Name as Name +import qualified Unison.Names3 as Names +import Unison.Parser (Ann) +import qualified Unison.Parsers as Parsers +import qualified Unison.Referent as Referent +import Unison.Reference (Reference) +import Unison.Result (Note (..), Result, pattern Result, ResultT, CompilerBug(..)) +import qualified Unison.Result as Result +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import qualified Unison.Typechecker as Typechecker +import qualified Unison.Typechecker.TypeLookup as TL +import qualified Unison.Typechecker.Context as Context +import qualified Unison.UnisonFile as UF +import qualified Unison.Util.List as List +import qualified Unison.Util.Relation as Rel +import Unison.Var (Var) +import qualified Unison.Var as Var +import Unison.Names3 (Names0) + +type Term v = Term.Term v Ann +type Type v = Type.Type v Ann +type UnisonFile v = UF.UnisonFile v Ann +type Result' v = Result (Seq (Note v Ann)) + +convertNotes :: Ord v => Typechecker.Notes v ann -> Seq (Note v ann) +convertNotes (Typechecker.Notes bugs es is) = + (CompilerBug . TypecheckerBug <$> bugs) <> (TypeError <$> es) <> (TypeInfo <$> Seq.fromList is') where + is' = snd <$> List.uniqueBy' f ([(1::Word)..] `zip` Foldable.toList is) + f (_, Context.TopLevelComponent cs) = Right [ v | (v,_,_) <- cs ] + f (i, _) = Left i + -- each round of TDNR emits its own TopLevelComponent notes, so we remove + -- duplicates (based on var name and location), preferring the later note as + -- that will have the latest typechecking info + +parseAndSynthesizeFile + :: (Var v, Monad m) + => [Type v] + -> (Set Reference -> m (TL.TypeLookup v Ann)) + -> Parser.ParsingEnv + -> FilePath + -> Text + -> ResultT + (Seq (Note v Ann)) + m + (Either Names0 (UF.TypecheckedUnisonFile v Ann)) +parseAndSynthesizeFile ambient typeLookupf env filePath src = do + uf <- Result.fromParsing $ Parsers.parseFile filePath (unpack src) env + let names0 = Names.currentNames (Parser.names env) + (tm, tdnrMap, typeLookup) <- resolveNames typeLookupf names0 uf + let (Result notes' r) = synthesizeFile ambient typeLookup tdnrMap uf tm + tell notes' $> maybe (Left (UF.toNames uf )) Right r + +type TDNRMap v = Map Typechecker.Name [Typechecker.NamedReference v Ann] + +resolveNames + :: (Var v, Monad m) + => (Set Reference -> m (TL.TypeLookup v Ann)) + -> Names.Names0 + -> UnisonFile v + -> ResultT + (Seq (Note v Ann)) + m + (Term v, TDNRMap v, TL.TypeLookup v Ann) +resolveNames typeLookupf preexistingNames uf = do + let tm = UF.typecheckingTerm uf + deps = Term.dependencies tm + possibleDeps = [ (Name.toText name, Var.name v, r) | + (name, r) <- Rel.toList (Names.terms0 preexistingNames), + v <- Set.toList (Term.freeVars tm), + Name.unqualified name == Name.unqualified (Name.fromVar v) ] + possibleRefs = Referent.toReference . view _3 <$> possibleDeps + tl <- lift . lift . fmap (UF.declsToTypeLookup uf <>) + $ typeLookupf (deps <> Set.fromList possibleRefs) + let fqnsByShortName = List.multimap $ + [ (shortname, nr) | + (name, shortname, r) <- possibleDeps, + typ <- toList $ TL.typeOfReferent tl r, + let nr = Typechecker.NamedReference name typ (Right r) ] <> + [ (shortname, nr) | + (name, r) <- Rel.toList (Names.terms0 $ UF.toNames uf), + typ <- toList $ TL.typeOfReferent tl r, + let shortname = Name.toText $ Name.unqualified name, + let nr = Typechecker.NamedReference (Name.toText name) typ (Right r) ] + pure (tm, fqnsByShortName, tl) + +synthesizeFile' + :: forall v + . Var v + => [Type v] + -> TL.TypeLookup v Ann + -> UnisonFile v + -> Result (Seq (Note v Ann)) (UF.TypecheckedUnisonFile v Ann) +synthesizeFile' ambient tl uf = + synthesizeFile ambient tl mempty uf $ UF.typecheckingTerm uf + +synthesizeFile + :: forall v + . Var v + => [Type v] + -> TL.TypeLookup v Ann + -> TDNRMap v + -> UnisonFile v + -> Term v + -> Result (Seq (Note v Ann)) (UF.TypecheckedUnisonFile v Ann) +synthesizeFile ambient tl fqnsByShortName uf term = do + let -- substitute Blanks for any remaining free vars in UF body + tdnrTerm = Term.prepareTDNR term + env0 = Typechecker.Env ambient tl fqnsByShortName + Result notes mayType = + evalStateT (Typechecker.synthesizeAndResolve env0) tdnrTerm + -- If typechecking succeeded, reapply the TDNR decisions to user's term: + Result (convertNotes notes) mayType >>= \_typ -> do + let infos = Foldable.toList $ Typechecker.infos notes + (topLevelComponents :: [[(v, Term v, Type v)]]) <- + let + topLevelBindings :: Map v (Term v) + topLevelBindings = Map.mapKeys Var.reset $ extractTopLevelBindings tdnrTerm + extractTopLevelBindings (Term.LetRecNamedAnnotatedTop' True _ bs body) = + Map.fromList (first snd <$> bs) <> extractTopLevelBindings body + extractTopLevelBindings _ = Map.empty + tlcsFromTypechecker = + List.uniqueBy' (fmap vars) + [ t | Context.TopLevelComponent t <- infos ] + where vars (v, _, _) = v + strippedTopLevelBinding (v, typ, redundant) = do + tm <- case Map.lookup v topLevelBindings of + Nothing -> + Result.compilerBug $ Result.TopLevelComponentNotFound v term + Just (Term.Ann' x _) | redundant -> pure x + Just x -> pure x + -- The Var.reset removes any freshening added during typechecking + pure (Var.reset v, tm, typ) + in + -- use tlcsFromTypechecker to inform annotation-stripping decisions + traverse (traverse strippedTopLevelBinding) tlcsFromTypechecker + let doTdnr = applyTdnrDecisions infos + doTdnrInComponent (v, t, tp) = (\t -> (v, t, tp)) <$> doTdnr t + _ <- doTdnr tdnrTerm + tdnredTlcs <- (traverse . traverse) doTdnrInComponent topLevelComponents + let (watches', terms') = partition isWatch tdnredTlcs + isWatch = all (\(v,_,_) -> Set.member v watchedVars) + watchedVars = Set.fromList [ v | (v, _) <- UF.allWatches uf ] + tlcKind [] = error "empty TLC, should never occur" + tlcKind tlc@((v,_,_):_) = let + hasE k = + elem v . fmap fst $ Map.findWithDefault [] k (UF.watches uf) + in case Foldable.find hasE (Map.keys $ UF.watches uf) of + Nothing -> error "wat" + Just kind -> (kind, tlc) + pure $ UF.typecheckedUnisonFile + (UF.dataDeclarationsId uf) + (UF.effectDeclarationsId uf) + terms' + (map tlcKind watches') + where + applyTdnrDecisions + :: [Context.InfoNote v Ann] + -> Term v + -> Result' v (Term v) + applyTdnrDecisions infos tdnrTerm = foldM go tdnrTerm decisions + where + -- UF data/effect ctors + builtins + TLC Term.vars + go term _decision@(shortv, loc, replacement) = + ABT.visit (resolve shortv loc replacement) term + decisions = + [ (v, loc, replacement) | Context.Decision v loc replacement <- infos ] + -- resolve (v,loc) in a matching Blank to whatever `fqn` maps to in `names` + resolve shortv loc replacement t = case t of + Term.Blank' (Blank.Recorded (Blank.Resolve loc' name)) + | loc' == loc && Var.nameStr shortv == name -> + -- loc of replacement already chosen correctly by whatever made the + -- Decision + pure . pure $ replacement + _ -> Nothing diff --git a/parser-typechecker/src/Unison/Lexer.hs b/parser-typechecker/src/Unison/Lexer.hs new file mode 100644 index 0000000000..ff3918cfae --- /dev/null +++ b/parser-typechecker/src/Unison/Lexer.hs @@ -0,0 +1,777 @@ +{-# LANGUAGE OverloadedLists #-} +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE DeriveTraversable #-} + +module Unison.Lexer where + +import Unison.Prelude + +import Control.Lens.TH (makePrisms) +import qualified Control.Monad.State as S +import Data.Char +import Data.List +import qualified Data.List.NonEmpty as Nel +import Unison.Util.Monoid (intercalateMap) +import qualified Data.Map.Strict as Map +import qualified Data.Set as Set +import GHC.Exts (sortWith) +import Text.Megaparsec.Error (ShowToken(..)) +import Unison.ShortHash ( ShortHash ) +import qualified Unison.ShortHash as SH + +data Err + = InvalidWordyId String + | InvalidSymbolyId String + | InvalidShortHash String + | Both Err Err + | MissingFractional String -- ex `1.` rather than `1.04` + | MissingExponent String -- ex `1e` rather than `1e3` + | UnknownLexeme + | TextLiteralMissingClosingQuote String + | InvalidEscapeCharacter Char + | LayoutError + | CloseWithoutMatchingOpen String String -- open, close + deriving (Eq,Ord,Show) -- richer algebra + +-- Design principle: +-- `[Lexeme]` should be sufficient information for parsing without +-- further knowledge of spacing or indentation levels +-- any knowledge of comments +data Lexeme + = Open String -- start of a block + | Semi IsVirtual -- separator between elements of a block + | Close -- end of a block + | Reserved String -- reserved tokens such as `{`, `(`, `type`, `of`, etc + | Textual String -- text literals, `"foo bar"` + | Character Char -- character literals, `?X` + | Backticks String (Maybe ShortHash) -- an identifier in backticks + | WordyId String (Maybe ShortHash) -- a (non-infix) identifier + | SymbolyId String (Maybe ShortHash) -- an infix identifier + | Blank String -- a typed hole or placeholder + | Numeric String -- numeric literals, left unparsed + | Hash ShortHash -- hash literals + | Err Err + deriving (Eq,Show,Ord) + +type IsVirtual = Bool -- is it a virtual semi or an actual semi? + +makePrisms ''Lexeme + +simpleWordyId :: String -> Lexeme +simpleWordyId = flip WordyId Nothing + +simpleSymbolyId :: String -> Lexeme +simpleSymbolyId = flip SymbolyId Nothing + +data Token a = Token { + payload :: a, + start :: Pos, + end :: Pos +} deriving (Eq, Ord, Show, Functor) + +notLayout :: Token Lexeme -> Bool +notLayout t = case payload t of + Close -> False + Semi _ -> False + Open _ -> False + _ -> True + +instance ShowToken (Token Lexeme) where + showTokens xs = + join . Nel.toList . S.evalState (traverse go xs) . end $ Nel.head xs + where + go :: Token Lexeme -> S.State Pos String + go tok = do + prev <- S.get + S.put $ end tok + pure $ pad prev (start tok) ++ pretty (payload tok) + pretty (Open s) = s + pretty (Reserved w) = w + pretty (Textual t) = '"' : t ++ ['"'] + pretty (Character c) = + case showEscapeChar c of + Just c -> "?\\" ++ [c] + Nothing -> '?' : [c] + pretty (Backticks n h) = + '`' : n ++ (toList h >>= SH.toString) ++ ['`'] + pretty (WordyId n h) = n ++ (toList h >>= SH.toString) + pretty (SymbolyId n h) = n ++ (toList h >>= SH.toString) + pretty (Blank s) = "_" ++ s + pretty (Numeric n) = n + pretty (Hash sh) = show sh + pretty (Err e) = show e + pretty Close = "" + pretty (Semi True) = "" + pretty (Semi False) = ";" + pad (Pos line1 col1) (Pos line2 col2) = + if line1 == line2 + then replicate (col2 - col1) ' ' + else replicate (line2 - line1) '\n' ++ replicate col2 ' ' + +instance Applicative Token where + pure a = Token a (Pos 0 0) (Pos 0 0) + Token f start _ <*> Token a _ end = Token (f a) start end + +type Line = Int +type Column = Int + +data Pos = Pos {-# Unpack #-} !Line {-# Unpack #-} !Column deriving (Eq,Ord,Show) + +instance Semigroup Pos where (<>) = mappend + +instance Monoid Pos where + mempty = Pos 0 0 + Pos line col `mappend` Pos line2 col2 = + if line2 == 0 then Pos line (col + col2) + else Pos (line + line2) col2 + +line :: Pos -> Line +line (Pos line _) = line + +column :: Pos -> Column +column (Pos _ column) = column + +-- `True` if the tokens are adjacent, with no space separating the two +touches :: Token a -> Token b -> Bool +touches (end -> t) (start -> t2) = + line t == line t2 && column t == column t2 + +type BlockName = String +type Layout = [(BlockName,Column)] + +top :: Layout -> Column +top [] = 1 +top ((_,h):_) = h + +-- todo: make Layout a NonEmpty +topBlockName :: Layout -> Maybe BlockName +topBlockName [] = Nothing +topBlockName ((name,_):_) = Just name + +topHasClosePair :: Layout -> Bool +topHasClosePair [] = False +topHasClosePair ((name,_):_) = name == "{" || name == "(" + +findNearest :: Layout -> Set BlockName -> Maybe BlockName +findNearest l ns = + case topBlockName l of + Just n -> if Set.member n ns then Just n else findNearest (pop l) ns + Nothing -> Nothing + +pop :: [a] -> [a] +pop = drop 1 + +topLeftCorner :: Pos +topLeftCorner = Pos 1 1 + +data T a = T a [T a] [a] | L a deriving (Functor, Foldable, Traversable) + +headToken :: T a -> a +headToken (T a _ _) = a +headToken (L a) = a + +instance Show a => Show (T a) where + show (L a) = show a + show (T open mid close) = + show open ++ "\n" + ++ indent " " (intercalateMap "\n" show mid) ++ "\n" + ++ intercalateMap "" show close + where + indent by s = by ++ (s >>= go by) + go by '\n' = '\n' : by + go _ c = [c] + +reorderTree :: ([T a] -> [T a]) -> T a -> T a +reorderTree _ l@(L _) = l +reorderTree f (T open mid close) = T open (f (reorderTree f <$> mid)) close + +tree :: [Token Lexeme] -> T (Token Lexeme) +tree toks = one toks const where + one (open@(payload -> Open _) : ts) k = many (T open) [] ts k + one (t : ts) k = k (L t) ts + one [] k = k lastErr [] where + lastErr = case drop (length toks - 1) toks of + [] -> L (Token (Err LayoutError) topLeftCorner topLeftCorner) + (t : _) -> L $ t { payload = Err LayoutError } + + many open acc [] k = k (open (reverse acc) []) [] + many open acc (t@(payload -> Close) : ts) k = k (open (reverse acc) [t]) ts + many open acc ts k = one ts $ \t ts -> many open (t:acc) ts k + +stanzas :: [T (Token Lexeme)] -> [[T (Token Lexeme)]] +stanzas = go [] where + go acc [] = [reverse acc] + go acc (t:ts) = case payload $ headToken t of + Semi _ -> reverse (t : acc) : go [] ts + _ -> go (t:acc) ts + +-- Moves type and effect declarations to the front of the token stream +-- and move `use` statements to the front of each block +reorder :: [T (Token Lexeme)] -> [T (Token Lexeme)] +reorder = join . sortWith f . stanzas + where + f [] = 3 :: Int + f (t : _) = case payload $ headToken t of + Open "type" -> 1 + Open "unique" -> 1 + Open "ability" -> 1 + Reserved "use" -> 0 + _ -> 3 :: Int + +lexer :: String -> String -> [Token Lexeme] +lexer scope rem = + let t = tree $ lexer0 scope rem + -- after reordering can end up with trailing semicolon at the end of + -- a block, which we remove with this pass + fixup ((payload -> Semi _) : t@(payload -> Close) : tl) = t : fixup tl + fixup [] = [] + fixup (h : t) = h : fixup t + in fixup . toList $ reorderTree reorder t + +lexer0 :: String -> String -> [Token Lexeme] +lexer0 scope rem = + tweak $ Token (Open scope) topLeftCorner topLeftCorner + : pushLayout scope [] topLeftCorner rem + where + -- hacky postprocessing pass to do some cleanup of stuff that's annoying to + -- fix without adding more state to the lexer: + -- - 1+1 lexes as [1, +1], convert this to [1, +, 1] + -- - when a semi followed by a virtual semi, drop the virtual, lets you + -- write + -- foo x = action1; + -- 2 + tweak [] = [] + tweak (h@(payload -> Semi False):(payload -> Semi True):t) = h : tweak t + tweak (h@(payload -> Reserved _):t) = h : tweak t + tweak (t1:t2@(payload -> Numeric num):rem) + | notLayout t1 && touches t1 t2 && isSigned num = + t1 : Token (SymbolyId (take 1 num) Nothing) + (start t2) + (inc $ start t2) + : Token (Numeric (drop 1 num)) (inc $ start t2) (end t2) + : tweak rem + tweak (h:t) = h : tweak t + isSigned num = all (\ch -> ch == '-' || ch == '+') $ take 1 num + -- skip whitespace and comments + goWhitespace :: Layout -> Pos -> String -> [Token Lexeme] + goWhitespace l pos rem = span' isSpace rem $ \case + (_spaces, '-':'-':'-':_rem) -> popLayout0 l pos [] + (spaces, '-':'-':rem) -> spanThru' (/= '\n') rem $ \(ignored, rem) -> + goWhitespace l (incBy ('-':'-':ignored) . incBy spaces $ pos) rem + (spaces, rem) -> popLayout l (incBy spaces pos) rem + + popLayout :: Layout -> Pos -> String -> [Token Lexeme] + popLayout l pos rem = case matchKeyword' layoutCloseAndOpenKeywords rem of + Nothing -> case matchKeyword' layoutCloseOnlyKeywords rem of + Nothing -> popLayout0 l pos rem + Just (kw, rem) -> + let end = incBy kw pos + in Token Close pos end + : Token (Reserved kw) pos end + : goWhitespace (pop l) (incBy kw pos) rem + Just (kw, rem) -> + let kw' = layoutCloseAndOpenKeywordMap kw l in + case closes (openingKeyword kw') kw' l pos of + (Nothing, ts) -> ts ++ recover l (incBy kw pos) rem + (Just l, ts) -> + let end = incBy kw pos + in ts ++ [Token (Open kw) pos end] ++ pushLayout kw' l end rem + + -- Examine current column and pop the layout stack + -- and emit `Semi` / `Close` tokens as needed + popLayout0 :: Layout -> Pos -> String -> [Token Lexeme] + popLayout0 l p [] = replicate (length l) $ Token Close p p + popLayout0 l p@(Pos _ c2) rem + | top l == c2 = Token (Semi True) p p : go l p rem + | top l < c2 || topHasClosePair l = go l p rem + | top l > c2 = Token Close p p : popLayout0 (pop l) p rem + | otherwise = error "impossible" + + -- todo: is there a reason we want this to be more than just: + -- go1 (top l + 1 : l) pos rem + -- looks for the next non whitespace, non-comment character, and + -- pushes its column onto the layout stack + pushLayout :: BlockName -> Layout -> Pos -> String -> [Token Lexeme] + pushLayout b l pos rem = span' isSpace rem $ \case + (_spaces, '-':'-':'-':_rem) -> + -- short circuit - everything after `---` is ignored + popLayout0 ((b,column pos):l) pos [] + (spaces, '-':'-':rem) -> spanThru' (/= '\n') rem $ \(ignored, rem) -> + pushLayout b l (incBy ('-':'-':ignored) . incBy spaces $ pos) rem + (spaces, rem) -> + let topcol = top l + pos' = incBy spaces pos + col' = column pos' + in + if b == "=" && col' <= topcol then + -- force closing by introducing a fake col +1 layout + popLayout0 ((b, col' + 1) : l) pos' rem + else + go ((b, col') : l) pos' rem + + -- Figure out how many elements must be popped from the layout stack + -- before finding a matching `Open` token + findClose :: String -> Layout -> Maybe Int + findClose _ [] = Nothing + findClose s ((h,_):tl) = if s == h then Just 1 else (1+) <$> findClose s tl + + -- Closes a layout block with the given open/close pair, e.g `close "(" ")"` + close :: String -> String -> Layout -> Pos -> String -> [Token Lexeme] + close open close l pos rem = case findClose open l of + Nothing -> [Token (Err $ CloseWithoutMatchingOpen open close) pos pos] + Just n -> + let closes = replicate n $ Token Close pos (incBy close pos) + in closes ++ goWhitespace (drop n l) (incBy close pos) rem + + -- If the close is well-formed, returns a new layout stack and the correct + -- number of `Close` tokens. If the close isn't well-formed (has no match), + -- `Nothing` is returned along an error token. + closes :: String -> String -> Layout -> Pos + -> (Maybe Layout, [Token Lexeme]) + closes open close l pos = case findClose open l of + Nothing -> (Nothing, + [Token (Err $ CloseWithoutMatchingOpen open close) pos (incBy close pos)]) + Just n -> + (Just $ drop n l, replicate n $ Token Close pos (incBy close pos)) + + -- assuming we've dealt with whitespace and layout, read a token + go :: Layout -> Pos -> String -> [Token Lexeme] + go l pos rem = case rem of + [] -> popLayout0 l pos [] + '?' : '\\' : c : rem -> + case parseEscapeChar c of + Just c -> + let end = inc $ inc $ inc pos in + Token (Character c) pos end : goWhitespace l end rem + Nothing -> + [Token (Err $ InvalidEscapeCharacter c) pos pos] + '?' : c : rem -> + let end = inc $ inc pos in + Token (Character c) pos end : goWhitespace l end rem + '[' : ':' : rem -> + let end = inc . inc $ pos in + Token (Open "[:") pos (inc . inc $ pos) : lexDoc l end rem + -- '{' and '(' both introduce a block, which is closed by '}' and ')' + -- The lexer doesn't distinguish among closing blocks: all the ways of + -- closing a block emit the same sort of token, `Close`. + -- + -- Note: within {}'s, `->` does not open a block, since `->` is used + -- inside request patterns like `{State.set s -> k}` + '{' : rem -> Token (Open "{") pos (inc pos) : pushLayout "{" l (inc pos) rem + '}' : rem -> close "{" "}" l pos rem + '(' : rem -> Token (Open "(") pos (inc pos) : pushLayout "(" l (inc pos) rem + ')' : rem -> close "(" ")" l pos rem + ';' : rem -> Token (Semi False) pos (inc pos) : goWhitespace l (inc pos) rem + ch : rem | Set.member ch delimiters -> + Token (Reserved [ch]) pos (inc pos) : goWhitespace l (inc pos) rem + op : rem@(c : _) + | isDelayOrForce op + && (isSpace c || isAlphaNum c + || Set.member c delimiters || isDelayOrForce c) -> + Token (Reserved [op]) pos (inc pos) : goWhitespace l (inc pos) rem + ':' : rem@(c : _) | isSpace c || isAlphaNum c -> + Token (Reserved ":") pos (inc pos) : goWhitespace l (inc pos) rem + '@' : rem -> + Token (Reserved "@") pos (inc pos) : goWhitespace l (inc pos) rem + '_' : rem | hasSep rem -> + Token (Blank "") pos (inc pos) : goWhitespace l (inc pos) rem + '_' : (wordyId -> Right (id, rem)) -> + let pos' = incBy id $ inc pos + in Token (Blank id) pos pos' : goWhitespace l pos' rem + '&' : '&' : rem -> + let end = incBy "&&" pos + in Token (Reserved "&&") pos end : goWhitespace l end rem + '|' : '|' : rem -> + let end = incBy "||" pos + in Token (Reserved "||") pos end : goWhitespace l end rem + '|' : c : rem | isSpace c || isAlphaNum c -> + Token (Reserved "|") pos (inc pos) : goWhitespace l (inc pos) (c:rem) + '=' : rem@(c : _) | isSpace c || isAlphaNum c -> + let end = inc pos + in case topBlockName l of + -- '=' does not open a layout block if within a type declaration + Just "type" -> Token (Reserved "=") pos end : goWhitespace l end rem + Just "unique" -> Token (Reserved "=") pos end : goWhitespace l end rem + Just _ -> Token (Open "=") pos end : pushLayout "=" l end rem + Nothing -> Token (Err LayoutError) pos pos : recover l pos rem + '-' : '>' : rem@(c : _) + | isSpace c || isAlphaNum c || Set.member c delimiters -> + let end = incBy "->" pos + in case topBlockName l of + Just "match-with" -> -- `->` opens a block when pattern-matching only + Token (Open "->") pos end : pushLayout "->" l end rem + Just "cases" -> -- `->` opens a block when pattern-matching only + Token (Open "->") pos end : pushLayout "->" l end rem + Just _ -> Token (Reserved "->") pos end : goWhitespace l end rem + Nothing -> Token (Err LayoutError) pos pos : recover l pos rem + + -- string literals and backticked identifiers + '"' : rem -> case splitStringLit rem of + Right (delta, lit, rem) -> let end = pos <> delta in + Token (Textual lit) pos end : goWhitespace l end rem + Left (TextLiteralMissingClosingQuote _) -> + [Token (Err $ TextLiteralMissingClosingQuote rem) pos pos] + Left err -> [Token (Err err) pos pos] + '`' : rem -> case wordyId rem of + Left e -> Token (Err e) pos pos : recover l pos rem + Right (id, rem) -> + if ['#'] `isPrefixOf` rem then + case shortHash rem of + Left e -> Token (Err e) pos pos : recover l pos rem + Right (h, rem) -> + let end = inc . incBy id . incBy (SH.toString h) . inc $ pos + in Token (Backticks id (Just h)) pos end + : goWhitespace l end (pop rem) + else + let end = inc . incBy id . inc $ pos + in Token (Backticks id Nothing) pos end + : goWhitespace l end (pop rem) + + rem@('#' : _) -> case shortHash rem of + Left e -> Token (Err e) pos pos : recover l pos rem + Right (h, rem) -> + let end = incBy (SH.toString h) pos + in Token (Hash h) pos end : goWhitespace l end rem + -- keywords and identifiers + (symbolyId -> Right (id, rem')) -> case numericLit rem of + Right (Just (num, rem)) -> + let end = incBy num pos + in Token (Numeric num) pos end : goWhitespace l end rem + _ -> if ['#'] `isPrefixOf` rem' then + case shortHash rem' of + Left e -> Token (Err e) pos pos : recover l pos rem' + Right (h, rem) -> + let end = incBy id . incBy (SH.toString h) $ pos + in Token (SymbolyId id (Just h)) pos end + : goWhitespace l end rem + else + let end = incBy id pos + in Token (SymbolyId id Nothing) pos end : goWhitespace l end rem' + (wordyId -> Right (id, rem)) -> + if ['#'] `isPrefixOf` rem then + case shortHash rem of + Left e -> Token (Err e) pos pos : recover l pos rem + Right (h, rem) -> + let end = incBy id . incBy (SH.toString h) $ pos + in Token (WordyId id (Just h)) pos end + : goWhitespace l end rem + else let end = incBy id pos + in Token (WordyId id Nothing) pos end : goWhitespace l end rem + (matchKeyword -> Just (kw,rem)) -> + let end = incBy kw pos in + case kw of + -- `unique type` lexes as [Open "unique", Reserved "type"] + -- `type` lexes as [Open "type"] + -- `unique ability` lexes as [Open "unique", Reserved "ability"] + -- `ability` lexes as [Open "ability"] + kw@"unique" -> + Token (Open kw) pos end + : goWhitespace ((kw, column $ inc pos) : l) end rem + kw@"ability" | topBlockName l /= Just "unique" -> + Token (Open kw) pos end + : goWhitespace ((kw, column $ inc pos) : l) end rem + kw@"type" | topBlockName l /= Just "unique" -> + Token (Open kw) pos end + : goWhitespace ((kw, column $ inc pos) : l) end rem + kw | Set.member kw layoutKeywords -> + Token (Open kw) pos end : pushLayout kw l end rem + | otherwise -> Token (Reserved kw) pos end : goWhitespace l end rem + + -- numeric literals + rem -> case numericLit rem of + Right (Just (num, rem)) -> + let end = incBy num pos in Token (Numeric num) pos end : goWhitespace l end rem + Right Nothing -> Token (Err UnknownLexeme) pos pos : recover l pos rem + Left e -> Token (Err e) pos pos : recover l pos rem + + lexDoc l pos rem = case span (\c -> isSpace c && not (c == '\n')) rem of + (spaces,rem) -> docBlob l pos' rem pos' [] + where pos' = incBy spaces pos + + docBlob l pos rem blobStart acc = case rem of + '@' : (hqToken (inc pos) -> Just (tok, rem)) -> + let pos' = inc $ end tok in + Token (Textual (reverse acc)) blobStart pos : + tok : + docBlob l pos' rem pos' [] + '@' : (docType (inc pos) -> Just (typTok, pos', rem)) -> + Token (Textual (reverse acc)) blobStart pos : case rem of + (hqToken pos' -> Just (tok, rem)) -> + let pos'' = inc (end tok) in + typTok : tok : docBlob l pos'' rem pos' [] + _ -> recover l pos rem + '\\' : '@' : rem -> docBlob l (incBy "\\@" pos) rem blobStart ('@':acc) + '\\' : ':' : ']' : rem -> docBlob l (incBy "\\:]" pos) rem blobStart (']':':':acc) + ':' : ']' : rem -> + let pos' = inc . inc $ pos in + (if null acc then id + else (Token (Textual (reverse + $ dropWhile (\c -> isSpace c && not (c == '\n')) acc)) blobStart pos :)) $ + Token Close pos pos' : goWhitespace l pos' rem + [] -> recover l pos rem + ch : rem -> docBlob l (incBy [ch] pos) rem blobStart (ch:acc) + + docType :: Pos -> String -> Maybe (Token Lexeme, Pos, String) + docType pos rem = case rem of + -- this crazy one liner parses [], as a pattern match + '[' : (span (/= ']') -> (typ, ']' : (span isSpace -> (spaces, rem)))) -> + -- advance past [, , ], + let pos' = incBy typ . inc . incBy spaces . inc $ pos in + -- the reserved token doesn't include the `[]` chars + Just (Token (Reserved typ) (inc pos) (incBy typ . inc $ pos), pos', rem) + _ -> Nothing + + hqToken :: Pos -> String -> Maybe (Token Lexeme, String) + hqToken pos rem = case rem of + (shortHash -> Right (h, rem)) -> + Just (Token (Hash h) pos (incBy (SH.toString h) pos), rem) + (wordyId -> Right (id, rem)) -> case rem of + (shortHash -> Right (h, rem)) -> + Just (Token (WordyId id $ Just h) pos (incBy id . incBy (SH.toString h) $ pos), rem) + _ -> Just (Token (WordyId id Nothing) pos (incBy id pos), rem) + (symbolyId -> Right (id, rem)) -> case rem of + (shortHash -> Right (h, rem)) -> + Just (Token (SymbolyId id $ Just h) pos (incBy id . incBy (SH.toString h) $ pos), rem) + _ -> Just (Token (SymbolyId id Nothing) pos (incBy id pos), rem) + _ -> Nothing + + recover _l _pos _rem = [] + +isDelayOrForce :: Char -> Bool +isDelayOrForce op = op == '\''|| op == '!' + +matchKeyword :: String -> Maybe (String,String) +matchKeyword = matchKeyword' keywords + +matchKeyword' :: Set String -> String -> Maybe (String,String) +matchKeyword' keywords s = case break isSep s of + (kw, rem) | Set.member kw keywords -> Just (kw, rem) + _ -> Nothing + +-- Split into a string literal and the remainder, and a delta which includes +-- both the starting and ending `"` character +-- The input string should only start with a '"' if the string literal is empty +splitStringLit :: String -> Either Err (Pos, String, String) +splitStringLit = go (inc mempty) "" where + -- n tracks the raw character delta of this literal + go !n !acc ('\\':s:rem) = case parseEscapeChar s of + Just e -> go (inc . inc $ n) (e:acc) rem + Nothing -> Left $ InvalidEscapeCharacter s + go !n !acc ('"':rem) = Right (inc n, reverse acc, rem) + go !n !acc (x:rem) = go (inc n) (x:acc) rem + go _ _ [] = Left $ TextLiteralMissingClosingQuote "" + +-- Mapping between characters and their escape codes. Use parse/showEscapeChar +-- to convert. +escapeChars :: [(Char, Char)] +escapeChars = + [ ('0', '\0') + , ('a', '\a') + , ('b', '\b') + , ('f', '\f') + , ('n', '\n') + , ('r', '\r') + , ('t', '\t') + , ('v', '\v') + , ('s', ' ') + , ('\'', '\'') + , ('"', '"') + , ('\\', '\\') + ] + +-- Map a escape symbol to it's character literal +parseEscapeChar :: Char -> Maybe Char +parseEscapeChar c = + Map.lookup c (Map.fromList escapeChars) + +-- Inverse of parseEscapeChar; map a character to its escaped version: +showEscapeChar :: Char -> Maybe Char +showEscapeChar c = + Map.lookup c (Map.fromList [(x, y) | (y, x) <- escapeChars]) + +numericLit :: String -> Either Err (Maybe (String,String)) +numericLit = go + where + go ('+':s) = go2 "+" s + go ('-':s) = go2 "-" s + go s = go2 "" s + go2 sign s = case span isDigit s of + (num@(_:_), []) -> pure $ pure (sign ++ num, []) + (num@(_:_), '.':rem) -> case span isDigit rem of + (fractional@(_:_), []) -> + pure $ pure (sign ++ num ++ "." ++ fractional, []) + (fractional@(_:_), c:rem) + | c `elem` "eE" -> goExp (sign ++ num ++ "." ++ fractional) rem + | isSep c -> pure $ pure (sign ++ num ++ "." ++ fractional, c:rem) + | otherwise -> pure Nothing + ([], _) -> Left (MissingFractional (sign ++ num ++ ".")) + (num@(_:_), c:rem) | c `elem` "eE" -> goExp (sign ++ num) rem + (num@(_:_), c:rem) -> pure $ pure (sign ++ num, c:rem) + ([], _) -> pure Nothing + goExp signNum rem = case rem of + ('+':s) -> goExp' signNum "+" s + ('-':s) -> goExp' signNum "-" s + s -> goExp' signNum "" s + goExp' signNum expSign exp = case span isDigit exp of + (_:_, []) -> + pure $ pure (signNum ++ "e" ++ expSign ++ exp, []) + (exp'@(_:_), c:rem) + | isSep c -> pure $ pure (signNum ++ "e" ++ expSign ++ exp', c:rem) + | otherwise -> pure Nothing + ([], _) -> Left (MissingExponent (signNum ++ "e" ++ expSign)) + +isSep :: Char -> Bool +isSep c = isSpace c || Set.member c delimiters + +hasSep :: String -> Bool +hasSep [] = True +hasSep (ch:_) = isSep ch + +-- Not a keyword, '.' delimited list of wordyId0 (should not include a trailing '.') +wordyId0 :: String -> Either Err (String, String) +wordyId0 s = span' wordyIdChar s $ \case + (id@(ch:_), rem) | not (Set.member id keywords) + && wordyIdStartChar ch + -> Right (id, rem) + (id, _rem) -> Left (InvalidWordyId id) + +wordyIdStartChar :: Char -> Bool +wordyIdStartChar ch = isAlpha ch || isEmoji ch || ch == '_' + +wordyIdChar :: Char -> Bool +wordyIdChar ch = + isAlphaNum ch || isEmoji ch || ch `elem` "_!'" + +isEmoji :: Char -> Bool +isEmoji c = c >= '\x1F300' && c <= '\x1FAFF' + +symbolyId :: String -> Either Err (String, String) +symbolyId r@('.':s) + | s == "" = symbolyId0 r -- + | isSpace (head s) = symbolyId0 r -- lone dot treated as an operator + | isDelimiter (head s) = symbolyId0 r -- + | otherwise = (\(s, rem) -> ('.':s, rem)) <$> symbolyId' s +symbolyId s = symbolyId' s + +-- Is a '.' delimited list of wordyId, with a final segment of `symbolyId0` +symbolyId' :: String -> Either Err (String, String) +symbolyId' s = case wordyId0 s of + Left _ -> symbolyId0 s + Right (wid, '.':rem) -> case symbolyId rem of + Left e -> Left e + Right (rest, rem) -> Right (wid <> "." <> rest, rem) + Right (w,_) -> Left (InvalidSymbolyId w) + +wordyId :: String -> Either Err (String, String) +wordyId ('.':s) = (\(s,rem) -> ('.':s,rem)) <$> wordyId' s +wordyId s = wordyId' s + +-- Is a '.' delimited list of wordyId +wordyId' :: String -> Either Err (String, String) +wordyId' s = case wordyId0 s of + Left e -> Left e + Right (wid, '.':rem@(ch:_)) | wordyIdStartChar ch -> case wordyId rem of + Left e -> Left e + Right (rest, rem) -> Right (wid <> "." <> rest, rem) + Right (w,rem) -> Right (w,rem) + +-- Is a `ShortHash` +shortHash :: String -> Either Err (ShortHash, String) +shortHash s = case SH.fromString potentialHash of + Nothing -> Left (InvalidShortHash potentialHash) + Just x -> Right (x, rem) + where (potentialHash, rem) = break ((||) <$> isSpace <*> (== '`')) s + +-- Returns either an error or an id and a remainder +symbolyId0 :: String -> Either Err (String, String) +symbolyId0 s = span' symbolyIdChar s $ \case + (id@(_:_), rem) | not (Set.member id reservedOperators) -> Right (id, rem) + (id, _rem) -> Left (InvalidSymbolyId id) + +symbolyIdChar :: Char -> Bool +symbolyIdChar ch = Set.member ch symbolyIdChars + +symbolyIdChars :: Set Char +symbolyIdChars = Set.fromList "!$%^&*-=+<>.~\\/|:" + +keywords :: Set String +keywords = Set.fromList [ + "if", "then", "else", "forall", "∀", + "handle", "with", "unique", + "where", "use", + "true", "false", + "type", "ability", "alias", "typeLink", "termLink", + "let", "namespace", "match", "cases"] + +-- These keywords introduce a layout block +layoutKeywords :: Set String +layoutKeywords = + Set.fromList [ + "if", "handle", "let", "where", "match", "cases" + ] + +-- These keywords end a layout block and begin another layout block +layoutCloseAndOpenKeywords :: Set String +layoutCloseAndOpenKeywords = Set.fromList ["then", "else", "with"] + +-- Use a transformed block name to disambiguate certain keywords +layoutCloseAndOpenKeywordMap :: String -- close-and-open keyword + -> Layout -- layout + -> BlockName -- transformed blockname for keyword +layoutCloseAndOpenKeywordMap "with" l = + case findNearest l (Set.fromList ["handle", "match"]) of + Just "match" -> "match-with" + Just "handle" -> "handle-with" + _ -> "with" +layoutCloseAndOpenKeywordMap kw _ = kw + +openingKeyword :: BlockName -> String +openingKeyword "then" = "if" +openingKeyword "else" = "then" +openingKeyword "with" = "match or handle" -- hack!! +openingKeyword "match-with" = "match" +openingKeyword "handle-with" = "handle" +openingKeyword kw = error $ "Not sure what the opening keyword is for: " <> kw + +-- These keywords end a layout block +layoutCloseOnlyKeywords :: Set String +layoutCloseOnlyKeywords = Set.fromList ["}"] + +delimiters :: Set Char +delimiters = Set.fromList "()[]{},?;" + +isDelimiter :: Char -> Bool +isDelimiter ch = Set.member ch delimiters + +reservedOperators :: Set String +reservedOperators = Set.fromList ["->", ":", "&&", "||"] + +inc :: Pos -> Pos +inc (Pos line col) = Pos line (col + 1) + +incBy :: String -> Pos -> Pos +incBy rem pos@(Pos line col) = case rem of + [] -> pos + '\r':rem -> incBy rem $ Pos line col + '\n':rem -> incBy rem $ Pos (line + 1) 1 + _:rem -> incBy rem $ Pos line (col + 1) + +debugLex'' :: [Token Lexeme] -> String +debugLex'' = show . fmap payload . tree + +debugLex' :: String -> String +debugLex' = debugLex'' . lexer "debugLex" + +debugLex''' :: String -> String -> String +debugLex''' s = debugLex'' . lexer s + +span' :: (a -> Bool) -> [a] -> (([a],[a]) -> r) -> r +span' f a k = k (span f a) + +spanThru' :: (a -> Bool) -> [a] -> (([a],[a]) -> r) -> r +spanThru' f a k = case span f a of + (l, []) -> k (l, []) + (l, lz:r) -> k (l ++ [lz], r) diff --git a/parser-typechecker/src/Unison/NamePrinter.hs b/parser-typechecker/src/Unison/NamePrinter.hs new file mode 100644 index 0000000000..d86d94d3a6 --- /dev/null +++ b/parser-typechecker/src/Unison/NamePrinter.hs @@ -0,0 +1,81 @@ +module Unison.NamePrinter where + +import Unison.Prelude + +import qualified Unison.HashQualified as HQ +import qualified Unison.HashQualified' as HQ' +import Unison.LabeledDependency (LabeledDependency) +import qualified Unison.LabeledDependency as LD +import Unison.Name (Name) +import qualified Unison.Name as Name +import Unison.Reference (Reference) +import Unison.Referent (Referent) +import Unison.ShortHash (ShortHash) +import qualified Unison.ShortHash as SH +import Unison.Util.SyntaxText (SyntaxText) +import qualified Unison.Util.SyntaxText as S +import Unison.Util.Pretty (Pretty) +import qualified Unison.Util.Pretty as PP + +prettyName :: IsString s => Name -> Pretty s +prettyName = PP.text . Name.toText + +prettyHashQualified :: HQ.HashQualified -> Pretty SyntaxText +prettyHashQualified hq = styleHashQualified' id (fmt $ S.HashQualifier hq) hq + +prettyHashQualified' :: HQ'.HashQualified -> Pretty SyntaxText +prettyHashQualified' = prettyHashQualified . HQ'.toHQ + +prettyHashQualified0 :: IsString s => HQ.HashQualified -> Pretty s +prettyHashQualified0 = PP.text . HQ.toText + +-- | Pretty-print a reference as a name and the given number of characters of +-- its hash. +prettyNamedReference :: Int -> Name -> Reference -> Pretty SyntaxText +prettyNamedReference len name = + prettyHashQualified . HQ.take len . HQ.fromNamedReference name + +-- | Pretty-print a referent as a name and the given number of characters of its +-- hash. +prettyNamedReferent :: Int -> Name -> Referent -> Pretty SyntaxText +prettyNamedReferent len name = + prettyHashQualified . HQ.take len . HQ.fromNamedReferent name + +-- | Pretty-print a reference as the given number of characters of its hash. +prettyReference :: Int -> Reference -> Pretty SyntaxText +prettyReference len = + prettyHashQualified . HQ.take len . HQ.fromReference + +-- | Pretty-print a referent as the given number of characters of its hash. +prettyReferent :: Int -> Referent -> Pretty SyntaxText +prettyReferent len = + prettyHashQualified . HQ.take len . HQ.fromReferent + +prettyLabeledDependency :: Int -> LabeledDependency -> Pretty SyntaxText +prettyLabeledDependency len = LD.fold (prettyReference len) (prettyReferent len) + +prettyShortHash :: IsString s => ShortHash -> Pretty s +prettyShortHash = fromString . SH.toString + +styleHashQualified :: + IsString s => (Pretty s -> Pretty s) -> HQ.HashQualified -> Pretty s +styleHashQualified style hq = styleHashQualified' style id hq + +styleHashQualified' :: + IsString s => (Pretty s -> Pretty s) + -> (Pretty s -> Pretty s) + -> HQ.HashQualified + -> Pretty s +styleHashQualified' nameStyle hashStyle = \case + HQ.NameOnly n -> nameStyle (prettyName n) + HQ.HashOnly h -> hashStyle (prettyShortHash h) + HQ.HashQualified n h -> + PP.group $ nameStyle (prettyName n) <> hashStyle (prettyShortHash h) + +styleHashQualified'' :: (Pretty SyntaxText -> Pretty SyntaxText) + -> HQ.HashQualified + -> Pretty SyntaxText +styleHashQualified'' nameStyle hq = styleHashQualified' nameStyle (fmt $ S.HashQualifier hq) hq + +fmt :: S.Element -> Pretty S.SyntaxText -> Pretty S.SyntaxText +fmt = PP.withSyntax diff --git a/parser-typechecker/src/Unison/Parser.hs b/parser-typechecker/src/Unison/Parser.hs new file mode 100644 index 0000000000..ad874269fb --- /dev/null +++ b/parser-typechecker/src/Unison/Parser.hs @@ -0,0 +1,443 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Parser where + +import Unison.Prelude + +import qualified Crypto.Random as Random +import Data.Bytes.Put (runPutS) +import Data.Bytes.Serial ( serialize ) +import Data.Bytes.VarInt ( VarInt(..) ) +import Data.Bifunctor (bimap) +import qualified Data.Char as Char +import Data.List.NonEmpty (NonEmpty (..)) +-- import Data.Maybe +import qualified Data.Set as Set +import qualified Data.Text as Text +import Data.Typeable (Proxy (..)) +import Text.Megaparsec (runParserT) +import qualified Text.Megaparsec as P +import qualified Text.Megaparsec.Char as P +import qualified Unison.ABT as ABT +import qualified Unison.Hash as Hash +import qualified Unison.HashQualified as HQ +import qualified Unison.Lexer as L +import Unison.Pattern (Pattern) +import qualified Unison.Pattern as Pattern +import Unison.Term (MatchCase (..)) +import Unison.Var (Var) +import qualified Unison.Var as Var +import qualified Unison.UnisonFile as UF +import Unison.Name as Name +import Unison.Names3 (Names) +import qualified Unison.Names3 as Names +import Control.Monad.Reader.Class (asks) +import qualified Unison.Hashable as Hashable +import Unison.Referent (Referent) +import Unison.Reference (Reference) + +debug :: Bool +debug = False + +type P v = P.ParsecT (Error v) Input ((->) ParsingEnv) +type Token s = P.Token s +type Err v = P.ParseError (Token Input) (Error v) + +data ParsingEnv = + ParsingEnv { uniqueNames :: UniqueName + , names :: Names + } + +newtype UniqueName = UniqueName (L.Pos -> Int -> Maybe Text) + +instance Semigroup UniqueName where (<>) = mappend +instance Monoid UniqueName where + mempty = UniqueName (\_ _ -> Nothing) + mappend (UniqueName f) (UniqueName g) = + UniqueName $ \pos len -> f pos len <|> g pos len + + +uniqueBase32Namegen :: forall gen. Random.DRG gen => gen -> UniqueName +uniqueBase32Namegen rng = + UniqueName $ \pos lenInBase32Hex -> go pos lenInBase32Hex rng + where + -- if the identifier starts with a number, try again, since + -- we want the name to work as a valid wordyId + go :: L.Pos -> Int -> gen -> Maybe Text + go pos lenInBase32Hex rng0 = let + (bytes,rng) = Random.randomBytesGenerate 32 rng0 + posBytes = runPutS $ do + serialize $ VarInt (L.line pos) + serialize $ VarInt (L.column pos) + h = Hashable.accumulate' $ bytes <> posBytes + b58 = Hash.base32Hex h + in if Char.isDigit (Text.head b58) then go pos lenInBase32Hex rng + else Just . Text.take lenInBase32Hex $ b58 + + +uniqueName :: Var v => Int -> P v Text +uniqueName lenInBase32Hex = do + UniqueName mkName <- asks uniqueNames + pos <- L.start <$> P.lookAhead anyToken + let none = Hash.base32Hex . Hash.fromBytes . encodeUtf8 . Text.pack $ show pos + pure . fromMaybe none $ mkName pos lenInBase32Hex + +data Error v + = SignatureNeedsAccompanyingBody (L.Token v) + | DisallowedAbsoluteName (L.Token Name) + | EmptyBlock (L.Token String) + | UnknownAbilityConstructor (L.Token HQ.HashQualified) (Set (Reference, Int)) + | UnknownDataConstructor (L.Token HQ.HashQualified) (Set (Reference, Int)) + | UnknownTerm (L.Token HQ.HashQualified) (Set Referent) + | UnknownType (L.Token HQ.HashQualified) (Set Reference) + | UnknownId (L.Token HQ.HashQualified) (Set Referent) (Set Reference) + | ExpectedBlockOpen String (L.Token L.Lexeme) + | EmptyWatch + | UseInvalidPrefixSuffix (Either (L.Token Name) (L.Token Name)) (Maybe [L.Token Name]) + | UseEmpty (L.Token String) -- an empty `use` statement + | DidntExpectExpression (L.Token L.Lexeme) (Maybe (L.Token L.Lexeme)) + | TypeDeclarationErrors [UF.Error v Ann] + | ResolutionFailures [Names.ResolutionFailure v Ann] + | DuplicateTypeNames [(v, [Ann])] + | DuplicateTermNames [(v, [Ann])] + deriving (Show, Eq, Ord) + +data Ann + = Intrinsic -- { sig :: String, start :: L.Pos, end :: L.Pos } + | External + | Ann { start :: L.Pos, end :: L.Pos } + deriving (Eq, Ord, Show) + +startingLine :: Ann -> Maybe L.Line +startingLine (Ann (L.line -> line) _) = Just line +startingLine _ = Nothing + +instance Monoid Ann where + mempty = External + mappend = (<>) + +instance Semigroup Ann where + Ann s1 _ <> Ann _ e2 = Ann s1 e2 + -- If we have a concrete location from a file, use it + External <> a = a + a <> External = a + Intrinsic <> a = a + a <> Intrinsic = a + +tokenToPair :: L.Token a -> (Ann, a) +tokenToPair t = (ann t, L.payload t) + +newtype Input = Input { inputStream :: [L.Token L.Lexeme] } + deriving (Eq, Ord, Show) + +instance P.Stream Input where + type Token Input = L.Token L.Lexeme + type Tokens Input = Input + + tokenToChunk pxy = P.tokensToChunk pxy . pure + + tokensToChunk _ = Input + + chunkToTokens _ = inputStream + + chunkLength pxy = length . P.chunkToTokens pxy + + chunkEmpty pxy = null . P.chunkToTokens pxy + + positionAt1 _ sp t = setPos sp (L.start t) + + positionAtN pxy sp = + maybe sp (setPos sp . L.start) . listToMaybe . P.chunkToTokens pxy + + advance1 _ _ cp = setPos cp . L.end + + advanceN _ _ cp = setPos cp . L.end . last . inputStream + + take1_ (P.chunkToTokens proxy -> []) = Nothing + take1_ (P.chunkToTokens proxy -> t:ts) = Just (t, P.tokensToChunk proxy ts) + take1_ _ = error "Unpossible" + + takeN_ n (P.chunkToTokens proxy -> []) | n > 0 = Nothing + takeN_ n ts = + Just + . join bimap (P.tokensToChunk proxy) + . splitAt n $ P.chunkToTokens proxy ts + + takeWhile_ p = join bimap (P.tokensToChunk proxy) . span p . inputStream + +setPos :: P.SourcePos -> L.Pos -> P.SourcePos +setPos sp lp = + P.SourcePos (P.sourceName sp) (P.mkPos $ L.line lp) (P.mkPos $ L.column lp) + +class Annotated a where + ann :: a -> Ann + +instance Annotated Ann where + ann = id + +instance Annotated (L.Token a) where + ann (L.Token _ s e) = Ann s e + +instance Annotated a => Annotated (ABT.Term f v a) where + ann = ann . ABT.annotation + +instance Annotated a => Annotated (Pattern a) where + ann = ann . Pattern.loc + +instance (Annotated a, Annotated b) => Annotated (MatchCase a b) where + ann (MatchCase p _ b) = ann p <> ann b + +label :: (Ord v, Show a) => String -> P v a -> P v a +label = P.label +-- label = P.dbg + +traceRemainingTokens :: Ord v => String -> P v () +traceRemainingTokens label = do + remainingTokens <- lookAhead $ many anyToken + let + _ = + trace ("REMAINDER " ++ label ++ ":\n" ++ L.debugLex'' remainingTokens) () + pure () + +mkAnn :: (Annotated a, Annotated b) => a -> b -> Ann +mkAnn x y = ann x <> ann y + +tok :: (Ann -> a -> b) -> L.Token a -> b +tok f (L.Token a start end) = f (Ann start end) a + +peekAny :: Ord v => P v (L.Token L.Lexeme) +peekAny = P.lookAhead P.anyChar + +lookAhead :: Ord v => P v a -> P v a +lookAhead = P.lookAhead + +anyToken :: Ord v => P v (L.Token L.Lexeme) +anyToken = P.anyChar + +failCommitted :: Ord v => Error v -> P v x +failCommitted e = do + void anyToken <|> void P.eof + P.customFailure e + +proxy :: Proxy Input +proxy = Proxy + +root :: Ord v => P v a -> P v a +root p = (openBlock *> p) <* closeBlock <* P.eof + +-- | +rootFile :: Ord v => P v a -> P v a +rootFile p = p <* P.eof + +run' :: Ord v => P v a -> String -> String -> ParsingEnv -> Either (Err v) a +run' p s name env = + let lex = if debug + then L.lexer name (trace (L.debugLex''' "lexer receives" s) s) + else L.lexer name s + pTraced = traceRemainingTokens "parser receives" *> p + env' = env { names = Names.suffixify (names env) } + in runParserT pTraced name (Input lex) env' + +run :: Ord v => P v a -> String -> ParsingEnv -> Either (Err v) a +run p s = run' p s "" + +-- Virtual pattern match on a lexeme. +queryToken :: Ord v => (L.Lexeme -> Maybe a) -> P v (L.Token a) +queryToken f = P.token go Nothing + where go t@(f . L.payload -> Just s) = Right $ fmap (const s) t + go x = Left (pure (P.Tokens (x:|[])), Set.empty) + +-- Consume a block opening and return the string that opens the block. +openBlock :: Ord v => P v (L.Token String) +openBlock = queryToken getOpen + where + getOpen (L.Open s) = Just s + getOpen _ = Nothing + +openBlockWith :: Ord v => String -> P v (L.Token ()) +openBlockWith s = void <$> P.satisfy ((L.Open s ==) . L.payload) + +-- Match a particular lexeme exactly, and consume it. +matchToken :: Ord v => L.Lexeme -> P v (L.Token L.Lexeme) +matchToken x = P.satisfy ((==) x . L.payload) + +-- The package name that refers to the root, literally just `.` +importDotId :: Ord v => P v (L.Token Name) +importDotId = queryToken go where + go (L.SymbolyId "." Nothing) = Just (Name.fromString ".") + go _ = Nothing + +-- Consume a virtual semicolon +semi :: Ord v => P v (L.Token ()) +semi = queryToken go where + go (L.Semi _) = Just () + go _ = Nothing + +-- Consume the end of a block +closeBlock :: Ord v => P v (L.Token ()) +closeBlock = void <$> matchToken L.Close + +wordyPatternName :: Var v => P v (L.Token v) +wordyPatternName = queryToken $ \case + L.WordyId s Nothing -> Just $ Var.nameds s + _ -> Nothing + +-- Parse an prefix identifier e.g. Foo or (+), discarding any hash +prefixDefinitionName :: Var v => P v (L.Token v) +prefixDefinitionName = + wordyDefinitionName <|> parenthesize symbolyDefinitionName + +-- Parse a wordy identifier e.g. Foo, discarding any hash +wordyDefinitionName :: Var v => P v (L.Token v) +wordyDefinitionName = queryToken $ \case + L.WordyId s _ -> Just $ Var.nameds s + L.Blank s -> Just $ Var.nameds ("_" <> s) + _ -> Nothing + +-- Parse a wordyId as a String, rejecting any hash +wordyIdString :: Ord v => P v (L.Token String) +wordyIdString = queryToken $ \case + L.WordyId s Nothing -> Just s + _ -> Nothing + +-- Parse a wordyId as a Name, rejecting any hash +importWordyId :: Ord v => P v (L.Token Name) +importWordyId = (fmap . fmap) Name.fromString wordyIdString + +-- The `+` in: use Foo.bar + as a Name +importSymbolyId :: Ord v => P v (L.Token Name) +importSymbolyId = (fmap . fmap) Name.fromString symbolyIdString + +-- Parse a symbolyId as a String, rejecting any hash +symbolyIdString :: Ord v => P v (L.Token String) +symbolyIdString = queryToken $ \case + L.SymbolyId s Nothing -> Just s + _ -> Nothing + +-- Parse an infix id e.g. + or `cons`, discarding any hash +infixDefinitionName :: Var v => P v (L.Token v) +infixDefinitionName = symbolyDefinitionName <|> backticked where + backticked :: Var v => P v (L.Token v) + backticked = queryToken $ \case + L.Backticks s _ -> Just $ Var.nameds s + _ -> Nothing + +-- Parse a symboly ID like >>= or &&, discarding any hash +symbolyDefinitionName :: Var v => P v (L.Token v) +symbolyDefinitionName = queryToken $ \case + L.SymbolyId s _ -> Just $ Var.nameds s + _ -> Nothing + +parenthesize :: Ord v => P v a -> P v a +parenthesize p = P.try (openBlockWith "(" *> p) <* closeBlock + +hqPrefixId, hqInfixId :: Ord v => P v (L.Token HQ.HashQualified) +hqPrefixId = hqWordyId_ <|> parenthesize hqSymbolyId_ +hqInfixId = hqSymbolyId_ <|> hqBacktickedId_ + +-- Parse a hash-qualified alphanumeric identifier +hqWordyId_ :: Ord v => P v (L.Token HQ.HashQualified) +hqWordyId_ = queryToken $ \case + L.WordyId "" (Just h) -> Just $ HQ.HashOnly h + L.WordyId s (Just h) -> Just $ HQ.HashQualified (Name.fromString s) h + L.WordyId s Nothing -> Just $ HQ.NameOnly (Name.fromString s) + L.Hash h -> Just $ HQ.HashOnly h + L.Blank s | not (null s) -> Just $ HQ.NameOnly (Name.fromString ("_" <> s)) + _ -> Nothing + +-- Parse a hash-qualified symboly ID like >>=#foo or && +hqSymbolyId_ :: Ord v => P v (L.Token HQ.HashQualified) +hqSymbolyId_ = queryToken $ \case + L.SymbolyId "" (Just h) -> Just $ HQ.HashOnly h + L.SymbolyId s (Just h) -> Just $ HQ.HashQualified (Name.fromString s) h + L.SymbolyId s Nothing -> Just $ HQ.NameOnly (Name.fromString s) + _ -> Nothing + +hqBacktickedId_ :: Ord v => P v (L.Token HQ.HashQualified) +hqBacktickedId_ = queryToken $ \case + L.Backticks "" (Just h) -> Just $ HQ.HashOnly h + L.Backticks s (Just h) -> Just $ HQ.HashQualified (Name.fromString s) h + L.Backticks s Nothing -> Just $ HQ.NameOnly (Name.fromString s) + _ -> Nothing + +-- Parse a reserved word +reserved :: Ord v => String -> P v (L.Token String) +reserved w = label w $ queryToken getReserved + where getReserved (L.Reserved w') | w == w' = Just w + getReserved _ = Nothing + +-- Parse a placeholder or typed hole +blank :: Ord v => P v (L.Token String) +blank = label "blank" $ queryToken getBlank + where getBlank (L.Blank s) = Just ('_' : s) + getBlank _ = Nothing + +numeric :: Ord v => P v (L.Token String) +numeric = queryToken getNumeric + where getNumeric (L.Numeric s) = Just s + getNumeric _ = Nothing + +sepBy :: Ord v => P v a -> P v b -> P v [b] +sepBy sep pb = P.sepBy pb sep + +sepBy1 :: Ord v => P v a -> P v b -> P v [b] +sepBy1 sep pb = P.sepBy1 pb sep + +character :: Ord v => P v (L.Token Char) +character = queryToken getChar + where getChar (L.Character c) = Just c + getChar _ = Nothing + +string :: Ord v => P v (L.Token Text) +string = queryToken getString + where getString (L.Textual s) = Just (Text.pack s) + getString _ = Nothing + +tupleOrParenthesized :: Ord v => P v a -> (Ann -> a) -> (a -> a -> a) -> P v a +tupleOrParenthesized p unit pair = do + open <- openBlockWith "(" + es <- sepBy (reserved "," *> optional semi) p + close <- optional semi *> closeBlock + pure $ go es open close + where + go [t] _ _ = t + go as s e = foldr pair (unit (ann s <> ann e)) as + +seq :: Ord v => (Ann -> [a] -> a) -> P v a -> P v a +seq f p = f' <$> reserved "[" <*> elements <*> trailing + where + f' open elems close = f (ann open <> ann close) elems + trailing = optional semi *> reserved "]" + sep = P.try $ optional semi *> reserved "," <* optional semi + elements = sepBy sep p + +chainr1 :: Ord v => P v a -> P v (a -> a -> a) -> P v a +chainr1 p op = go1 where + go1 = p >>= go2 + go2 hd = do { op <- op; op hd <$> go1 } <|> pure hd + +-- Parse `p` 1+ times, combining with `op` +chainl1 :: Ord v => P v a -> P v (a -> a -> a) -> P v a +chainl1 p op = foldl (flip ($)) <$> p <*> P.many (flip <$> op <*> p) + +-- If `p` would succeed, this fails uncommitted. +-- Otherwise, `failIfOk` used to produce the output +failureIf :: Ord v => P v (P v b) -> P v a -> P v b +failureIf failIfOk p = do + dontwant <- P.try . P.lookAhead $ failIfOk + p <- P.try $ P.lookAhead (optional p) + when (isJust p) $ fail "failureIf" + dontwant + +-- Gives this var an id based on its position - a useful trick to +-- obtain a variable whose id won't match any other id in the file +-- `positionalVar a Var.missingResult` +positionalVar :: (Annotated a, Var v) => a -> v -> v +positionalVar a v = + let s = start (ann a) + line = fromIntegral $ L.line s + col = fromIntegral $ L.column s + -- this works as long as no lines more than 50k characters + in Var.freshenId (line * 50000 + col) v diff --git a/parser-typechecker/src/Unison/Parsers.hs b/parser-typechecker/src/Unison/Parsers.hs new file mode 100644 index 0000000000..be787c3d1d --- /dev/null +++ b/parser-typechecker/src/Unison/Parsers.hs @@ -0,0 +1,88 @@ +module Unison.Parsers where + +import Unison.Prelude + +import qualified Data.Text as Text +import Data.Text.IO ( readFile ) +import Prelude hiding ( readFile ) +import qualified Unison.Names3 as Names +import qualified Unison.Builtin as Builtin +import qualified Unison.FileParser as FileParser +import Unison.Parser ( Ann ) +import qualified Unison.Parser as Parser +import Unison.PrintError ( prettyParseError + , defaultWidth ) +import Unison.Symbol ( Symbol ) +import Unison.Term ( Term ) +import qualified Unison.TermParser as TermParser +import Unison.Type ( Type ) +import qualified Unison.TypeParser as TypeParser +import Unison.UnisonFile ( UnisonFile ) +import qualified Unison.Util.Pretty as Pr +import Unison.Var ( Var ) + +unsafeGetRightFrom :: (Var v, Show v) => String -> Either (Parser.Err v) a -> a +unsafeGetRightFrom src = + either (error . Pr.toANSI defaultWidth . prettyParseError src) id + +parse + :: Var v + => Parser.P v a + -> String + -> Parser.ParsingEnv + -> Either (Parser.Err v) a +parse p = Parser.run (Parser.root p) + +parseTerm + :: Var v + => String + -> Parser.ParsingEnv + -> Either (Parser.Err v) (Term v Ann) +parseTerm = parse TermParser.term + +parseType + :: Var v + => String + -> Parser.ParsingEnv + -> Either (Parser.Err v) (Type v Ann) +parseType = Parser.run (Parser.root TypeParser.valueType) + +parseFile + :: Var v + => FilePath + -> String + -> Parser.ParsingEnv + -> Either (Parser.Err v) (UnisonFile v Ann) +parseFile filename s = Parser.run' (Parser.rootFile FileParser.file) s filename + +readAndParseFile + :: Var v + => Parser.ParsingEnv + -> FilePath + -> IO (Either (Parser.Err v) (UnisonFile v Ann)) +readAndParseFile penv fileName = do + txt <- readFile fileName + let src = Text.unpack txt + pure $ parseFile fileName src penv + +unsafeParseTerm :: Var v => String -> Parser.ParsingEnv -> Term v Ann +unsafeParseTerm s = fmap (unsafeGetRightFrom s) . parseTerm $ s + +unsafeReadAndParseFile + :: Parser.ParsingEnv -> FilePath -> IO (UnisonFile Symbol Ann) +unsafeReadAndParseFile penv fileName = do + txt <- readFile fileName + let str = Text.unpack txt + pure . unsafeGetRightFrom str $ parseFile fileName str penv + +unsafeParseFileBuiltinsOnly + :: FilePath -> IO (UnisonFile Symbol Ann) +unsafeParseFileBuiltinsOnly = + unsafeReadAndParseFile $ Parser.ParsingEnv + mempty + (Names.Names Builtin.names0 mempty) + +unsafeParseFile + :: String -> Parser.ParsingEnv -> UnisonFile Symbol Ann +unsafeParseFile s pEnv = unsafeGetRightFrom s $ parseFile "" s pEnv + diff --git a/parser-typechecker/src/Unison/Path.hs b/parser-typechecker/src/Unison/Path.hs new file mode 100644 index 0000000000..5ce88ed774 --- /dev/null +++ b/parser-typechecker/src/Unison/Path.hs @@ -0,0 +1,54 @@ +-- | +-- Provides a typeclass for a general concept of a path into +-- a treelike structure. We have a root or empty path, paths +-- may be concatenated, and a pair of paths may be factored into +-- paths relative to their lowest common ancestor in the tree. + +module Unison.Path where + +import Unison.Prelude + +-- | Satisfies: +-- * `extend root p == p` and `extend p root == p` +-- * `extend` is associative, `extend (extend p1 p2) p3 == extend p1 (extend p2 p3)` +-- * `lca root p == root` and `lca p root == root` +-- * `case factor p p2 of (r,p',p2') -> extend r p' == p && extend r p2' == p2` +class Path p where + -- | The root or empty path + root :: p + -- | Concatenate two paths + extend :: p -> p -> p + -- | Extract the lowest common ancestor and the path from the LCA to each argument + factor :: p -> p -> (p,(p,p)) + -- | Satisfies `factor (parent p) p == (parent p, (root, tl)` and + -- `extend (parent p) tl == p` + parent :: p -> p + +-- | Compute the lowest common ancestor of two paths +lca :: Path p => p -> p -> p +lca p p2 = fst (factor p p2) + +-- | `isSubpath p1 p2` is true if `p2 == extend p1 x` for some `x` +isSubpath :: (Eq p, Path p) => p -> p -> Bool +isSubpath p1 p2 = lca p1 p2 == p1 + +instance Eq a => Path (Maybe a) where + root = Nothing + extend = (<|>) + parent _ = Nothing + factor p1 p2 | p1 == p2 = (p1, (Nothing, Nothing)) + factor p1 p2 = (Nothing, (p1,p2)) + +instance Eq a => Path [a] where + root = [] + extend = (++) + parent p | null p = [] + parent p = init p + factor p1 p2 = (take shared p1, (drop shared p1, drop shared p2)) + where shared = length (takeWhile id $ zipWith (==) p1 p2) + +instance Path () where + root = () + parent _ = () + extend _ _ = () + factor u _ = (u,(u,u)) diff --git a/parser-typechecker/src/Unison/PrettyPrintEnv.hs b/parser-typechecker/src/Unison/PrettyPrintEnv.hs new file mode 100644 index 0000000000..b20802cb90 --- /dev/null +++ b/parser-typechecker/src/Unison/PrettyPrintEnv.hs @@ -0,0 +1,142 @@ +{-# Language OverloadedStrings #-} + +module Unison.PrettyPrintEnv where + +import Unison.Prelude + +import Unison.HashQualified ( HashQualified ) +import Unison.Name ( Name ) +import Unison.Names3 ( Names ) +import Unison.Reference ( Reference ) +import Unison.Referent ( Referent ) +import Unison.Util.List (safeHead) +import qualified Data.Map as Map +import qualified Unison.HashQualified as HQ +import qualified Unison.Name as Name +import qualified Unison.Names3 as Names +import qualified Unison.Reference as Reference +import qualified Unison.Referent as Referent +import qualified Unison.ConstructorType as CT +import qualified Unison.HashQualified' as HQ' +import qualified Data.Set as Set + +data PrettyPrintEnv = PrettyPrintEnv { + -- names for terms, constructors, and requests + terms :: Referent -> Maybe HashQualified, + -- names for types + types :: Reference -> Maybe HashQualified } + +patterns :: PrettyPrintEnv -> Reference -> Int -> Maybe HashQualified +patterns ppe r cid = terms ppe (Referent.Con r cid CT.Data) + <|>terms ppe (Referent.Con r cid CT.Effect) + +instance Show PrettyPrintEnv where + show _ = "PrettyPrintEnv" + +fromNames :: Int -> Names -> PrettyPrintEnv +fromNames len names = PrettyPrintEnv terms' types' where + terms' r = shortestName . Set.map HQ'.toHQ $ Names.termName len r names + types' r = shortestName . Set.map HQ'.toHQ $ Names.typeName len r names + shortestName ns = safeHead $ HQ.sortByLength (toList ns) + +fromSuffixNames :: Int -> Names -> PrettyPrintEnv +fromSuffixNames len names = fromNames len (Names.suffixify names) + +fromNamesDecl :: Int -> Names -> PrettyPrintEnvDecl +fromNamesDecl len names = + PrettyPrintEnvDecl (fromNames len names) (fromSuffixNames len names) + +-- A pair of PrettyPrintEnvs: +-- - suffixifiedPPE uses the shortest unique suffix +-- - unsuffixifiedPPE uses the shortest full name +-- +-- Generally, we want declarations LHS (the `x` in `x = 23`) to use the +-- unsuffixified names, so the LHS is an accurate description of where in the +-- namespace the definition lives. For everywhere else, we can use the +-- suffixified version. +data PrettyPrintEnvDecl = PrettyPrintEnvDecl { + unsuffixifiedPPE :: PrettyPrintEnv, + suffixifiedPPE :: PrettyPrintEnv + } deriving Show + +-- declarationPPE uses the full name for references that are +-- part the same cycle as the input reference, used to ensures +-- recursive definitions are printed properly, for instance: +-- +-- foo.bar x = foo.bar x +-- and not +-- foo.bar x = bar x +declarationPPE :: PrettyPrintEnvDecl -> Reference -> PrettyPrintEnv +declarationPPE ppe rd = PrettyPrintEnv tm ty where + comp = Reference.members (Reference.componentFor rd) + tm r0@(Referent.Ref r) = if Set.member r comp + then terms (unsuffixifiedPPE ppe) r0 + else terms (suffixifiedPPE ppe) r0 + tm r = terms (suffixifiedPPE ppe) r + ty r = if Set.member r comp then types (unsuffixifiedPPE ppe) r + else types (suffixifiedPPE ppe) r + +-- Left-biased union of environments +unionLeft :: PrettyPrintEnv -> PrettyPrintEnv -> PrettyPrintEnv +unionLeft e1 e2 = PrettyPrintEnv + (\r -> terms e1 r <|> terms e2 r) + (\r -> types e1 r <|> types e2 r) + +assignTermName :: Referent -> HashQualified -> PrettyPrintEnv -> PrettyPrintEnv +assignTermName r name = (fromTermNames [(r,name)] `unionLeft`) + +fromTypeNames :: [(Reference,HashQualified)] -> PrettyPrintEnv +fromTypeNames types = let + m = Map.fromList types + in PrettyPrintEnv (const Nothing) (`Map.lookup` m) + +fromTermNames :: [(Referent,HashQualified)] -> PrettyPrintEnv +fromTermNames tms = let + m = Map.fromList tms + in PrettyPrintEnv (`Map.lookup` m) (const Nothing) + +-- todo: these need to be a dynamic length, but we need additional info +todoHashLength :: Int +todoHashLength = 10 + +termName :: PrettyPrintEnv -> Referent -> HashQualified +termName env r = + fromMaybe (HQ.take todoHashLength $ HQ.fromReferent r) (terms env r) + +typeName :: PrettyPrintEnv -> Reference -> HashQualified +typeName env r = + fromMaybe (HQ.take todoHashLength $ HQ.fromReference r) (types env r) + +patternName :: PrettyPrintEnv -> Reference -> Int -> HashQualified +patternName env r cid = + case patterns env r cid of + Just name -> name + Nothing -> HQ.take todoHashLength $ HQ.fromPattern r cid + +instance Monoid PrettyPrintEnv where + mempty = PrettyPrintEnv (const Nothing) (const Nothing) + mappend = unionLeft +instance Semigroup PrettyPrintEnv where + (<>) = mappend + +-- Type aliases relating to Fully-Qualified Names, e.g. 'Acme.API.foo' +-- Used primarily by the FQN elision code - see TermPrinter.PrintAnnotation. + +-- Note that a Suffix can include dots. +type Suffix = Text +-- Each member of a Prefix list is dot-free. +type Prefix = [Text] +-- Keys are FQNs, values are shorter names which are equivalent, thanks to use +-- statements that are in scope. +type Imports = Map Name Suffix + +-- Give the shortened version of an FQN, if there's been a `use` statement for that FQN. +elideFQN :: Imports -> HQ.HashQualified -> HQ.HashQualified +elideFQN imports hq = + let hash = HQ.toHash hq + name' = do name <- HQ.toName hq + let hit = fmap Name.unsafeFromText (Map.lookup name imports) + -- Cut out the "const id $" to get tracing of FQN elision attempts. + let t = const id $ trace ("hit: " ++ show hit ++ " finding: " ++ show hq ++ " in imports: " ++ show imports) + t (pure $ fromMaybe name hit) + in HQ.fromNameHash name' hash diff --git a/parser-typechecker/src/Unison/PrettyTerminal.hs b/parser-typechecker/src/Unison/PrettyTerminal.hs new file mode 100644 index 0000000000..bcedc524bf --- /dev/null +++ b/parser-typechecker/src/Unison/PrettyTerminal.hs @@ -0,0 +1,51 @@ +module Unison.PrettyTerminal where + +import Unison.Util.Less (less) +import qualified Unison.Util.Pretty as P +import qualified Unison.Util.ColorText as CT +import qualified System.Console.Terminal.Size as Terminal +import Data.List (dropWhileEnd) +import Data.Char (isSpace) + +stripSurroundingBlanks :: String -> String +stripSurroundingBlanks s = unlines (dropWhile isBlank . dropWhileEnd isBlank $ lines s) where + isBlank line = all isSpace line + +-- like putPrettyLn' but prints a blank line before and after. +putPrettyLn :: P.Pretty CT.ColorText -> IO () +putPrettyLn p | p == mempty = pure () +putPrettyLn p = do + width <- getAvailableWidth + less . P.toANSI width $ P.border 2 p + +putPrettyLnUnpaged :: P.Pretty CT.ColorText -> IO () +putPrettyLnUnpaged p | p == mempty = pure () +putPrettyLnUnpaged p = do + width <- getAvailableWidth + putStrLn . P.toANSI width $ P.border 2 p + +putPrettyLn' :: P.Pretty CT.ColorText -> IO () +putPrettyLn' p | p == mempty = pure () +putPrettyLn' p = do + width <- getAvailableWidth + less . P.toANSI width $ p + +clearCurrentLine :: IO () +clearCurrentLine = do + width <- getAvailableWidth + putStr "\r" + putStr . replicate width $ ' ' + putStr "\r" + +putPretty' :: P.Pretty CT.ColorText -> IO () +putPretty' p = do + width <- getAvailableWidth + putStr . P.toANSI width $ p + +getAvailableWidth :: IO Int +getAvailableWidth = + maybe 80 (\s -> 100 `min` Terminal.width s) <$> Terminal.size + +putPrettyNonempty :: P.Pretty P.ColorText -> IO () +putPrettyNonempty msg = do + if msg == mempty then pure () else putPrettyLn msg diff --git a/parser-typechecker/src/Unison/PrintError.hs b/parser-typechecker/src/Unison/PrintError.hs new file mode 100644 index 0000000000..dd73a2371d --- /dev/null +++ b/parser-typechecker/src/Unison/PrintError.hs @@ -0,0 +1,1243 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.PrintError where + +import Unison.Prelude + +import Control.Lens ((%~)) +import Control.Lens.Tuple (_1, _2, _3) +import Data.List (intersperse) +import Data.List.Extra (nubOrd) +import qualified Data.List.NonEmpty as Nel +import qualified Data.Map as Map +import Data.Sequence (Seq (..)) +import qualified Data.Set as Set +import qualified Data.Text as Text +import Data.Void (Void) +import qualified Text.Megaparsec as P +import qualified Unison.ABT as ABT +import Unison.Builtin.Decls (pattern TupleType') +import qualified Unison.HashQualified as HQ +import Unison.Kind (Kind) +import qualified Unison.Kind as Kind +import qualified Unison.Lexer as L +import Unison.Parser (Ann (..), Annotated, ann) +import qualified Unison.Parser as Parser +import qualified Unison.Reference as R +import Unison.Referent (Referent) +import Unison.Result (Note (..)) +import qualified Unison.Settings as Settings +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import qualified Unison.Typechecker.Context as C +import Unison.Typechecker.TypeError +import qualified Unison.Typechecker.TypeVar as TypeVar +import qualified Unison.UnisonFile as UF +import Unison.Util.AnnotatedText (AnnotatedText) +import qualified Unison.Util.AnnotatedText as AT +import Unison.Util.ColorText (Color) +import qualified Unison.Util.ColorText as Color +import Unison.Util.Monoid (intercalateMap) +import Unison.Util.Range (Range (..), startingLine) +import Unison.Var (Var) +import qualified Unison.Var as Var +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.TermPrinter as TermPrinter +import qualified Unison.Util.Pretty as Pr +import Unison.Util.Pretty (Pretty, ColorText) +import qualified Unison.Names3 as Names +import qualified Unison.Name as Name +import Unison.HashQualified (HashQualified) +import Unison.Type (Type) +import Unison.NamePrinter (prettyHashQualified0) + +type Env = PPE.PrettyPrintEnv + +pattern Code = Color.Blue +pattern Type1 = Color.HiBlue +pattern Type2 = Color.Green +pattern ErrorSite = Color.HiRed +pattern TypeKeyword = Color.Yellow +pattern AbilityKeyword = Color.Green +pattern Identifier = Color.Bold + +defaultWidth :: Pr.Width +defaultWidth = 60 + +fromOverHere' + :: Ord a + => String + -> [Maybe (Range, a)] + -> [Maybe (Range, a)] + -> Pretty (AnnotatedText a) +fromOverHere' s spots0 removing = + fromOverHere s (catMaybes spots0) (catMaybes removing) + +fromOverHere + :: Ord a => String -> [(Range, a)] -> [(Range, a)] -> Pretty (AnnotatedText a) +fromOverHere src spots0 removing = + let spots = toList $ Set.fromList spots0 Set.\\ Set.fromList removing + in case length spots of + 0 -> mempty + 1 -> "\n from right here:\n\n" <> showSource src spots + _ -> "\n from these spots, respectively:\n\n" <> showSource src spots + +showTypeWithProvenance + :: (Var v, Annotated a, Ord style) + => Env + -> String + -> style + -> Type v a + -> Pretty (AnnotatedText style) +showTypeWithProvenance env src color typ = + style color (renderType' env typ) + <> ".\n" + <> fromOverHere' src [styleAnnotated color typ] [] + +styleAnnotated :: Annotated a => sty -> a -> Maybe (Range, sty) +styleAnnotated sty a = (, sty) <$> rangeForAnnotated a + +style :: s -> String -> Pretty (AnnotatedText s) +style sty str = Pr.lit . AT.annotate sty $ fromString str + +stylePretty :: Color -> Pretty ColorText -> Pretty ColorText +stylePretty sty str = Pr.map (AT.annotate sty) str + +describeStyle :: Color -> Pretty ColorText +describeStyle ErrorSite = "in " <> style ErrorSite "red" +describeStyle Type1 = "in " <> style Type1 "blue" +describeStyle Type2 = "in " <> style Type2 "green" +describeStyle _ = "" + +-- Render an informational typechecking note +renderTypeInfo + :: forall v loc sty + . (Var v, Annotated loc, Ord loc, Show loc) + => TypeInfo v loc + -> Env + -> Pretty (AnnotatedText sty) +renderTypeInfo i env = case i of + TopLevelComponent {..} -> case definitions of + [def] -> + Pr.wrap "🌟 I found and typechecked a definition:" <> Pr.newline <> mconcat + (renderOne def) + [] -> mempty + _ -> + Pr.wrap "🎁 These mutually dependent definitions typechecked:" + <> Pr.newline + <> intercalateMap Pr.newline (foldMap ("\t" <>) . renderOne) definitions + where + renderOne :: IsString s => (v, Type v loc, RedundantTypeAnnotation) -> [s] + renderOne (v, typ, _) = + [fromString . Text.unpack $ Var.name v, " : ", renderType' env typ] + + +-- Render a type error +renderTypeError + :: forall v loc + . (Var v, Annotated loc, Ord loc, Show loc) + => TypeError v loc + -> Env + -> String + -> Pretty ColorText +renderTypeError e env src = case e of + BooleanMismatch {..} -> mconcat + [ Pr.wrap $ mconcat + [ preamble + , " " + , style Type1 "Boolean" + , ", but this one is " + , style Type2 (renderType' env foundType) + , ":" + ] + , Pr.lineSkip + , showSourceMaybes src [siteS] + , fromOverHere' src [typeS] [siteS] + , debugNoteLoc $ mconcat + [ "loc debug:" + , "\n mismatchSite: " + , annotatedToEnglish mismatchSite + , "\n foundType: " + , annotatedToEnglish foundType + , "\n" + ] + , debugSummary note + ] + where + siteS = styleAnnotated Type2 mismatchSite + typeS = styleAnnotated Type2 foundType + preamble = case getBooleanMismatch of + CondMismatch -> + "The condition for an " + <> style ErrorSite "if" + <> "-expression has to be" + AndMismatch -> + "The arguments to " <> style ErrorSite "and" <> " have to be" + OrMismatch -> + "The arguments to " <> style ErrorSite "or" <> " have to be" + GuardMismatch -> + "The guard expression for a " + <> style ErrorSite "match" + <> "/" + <> style ErrorSite "with" + <> " has to be" + + ExistentialMismatch {..} -> mconcat + [ Pr.wrap $ mconcat + [ preamble + , " " + , "Here, one is " + , style Type1 (renderType' env expectedType) + , " and another is " + , style Type2 (renderType' env foundType) + , ":"] + , Pr.lineSkip + , showSourceMaybes src [mismatchSiteS, expectedLocS] + , fromOverHere' src + [expectedTypeS, mismatchedTypeS] + [mismatchSiteS, expectedLocS] + , intLiteralSyntaxTip mismatchSite expectedType + , debugNoteLoc $ mconcat + [ "\nloc debug:" + , "\n mismatchSite: " + , annotatedToEnglish mismatchSite + , "\n foundType: " + , annotatedToEnglish foundType + , "\n expectedType: " + , annotatedToEnglish expectedType + , "\n expectedLoc: " + , annotatedToEnglish expectedLoc + , "\n" + ] + , debugSummary note + ] + where + mismatchedTypeS = styleAnnotated Type2 foundType + mismatchSiteS = styleAnnotated Type2 mismatchSite + expectedTypeS = styleAnnotated Type1 expectedType + expectedLocS = styleAnnotated Type1 expectedLoc + preamble = case getExistentialMismatch of + IfBody -> mconcat + [ "The " + , style ErrorSite "else" + , " clause of an " + , style ErrorSite "if" + , " expression needs to have the same type as the " + , style ErrorSite "then" + , " clause." + ] + VectorBody -> "The elements of a vector all need to have the same type." + CaseBody -> mconcat + [ "Each case of a " + , style ErrorSite "match" + , "/" + , style ErrorSite "with" + , " expression " + , "need to have the same type." + ] + NotFunctionApplication {..} -> mconcat + [ "This looks like a function call, but with a " + , style Type1 (renderType' env ft) + , " where the function should be. Are you missing an operator?\n\n" + , annotatedAsStyle Type1 src f + , debugSummary note + ] + FunctionApplication {..} + -> let + fte = Type.removePureEffects ft + fteFreeVars = Set.map TypeVar.underlying $ ABT.freeVars fte + showVar (v, _t) = Set.member v fteFreeVars + solvedVars' = filter showVar solvedVars + in + mconcat + [ "The " + , ordinal argNum + , " argument to the function " + , style ErrorSite (renderTerm env f) + , " is " + , style Type2 (renderType' env foundType) + , ", but I was expecting " + , style Type1 (renderType' env expectedType) + , ":\n\n" + , showSourceMaybes src + [ (, Type1) <$> rangeForAnnotated expectedType + , (, Type2) <$> rangeForAnnotated foundType + , (, Type2) <$> rangeForAnnotated arg + , (, ErrorSite) <$> rangeForAnnotated f ] + , intLiteralSyntaxTip arg expectedType + -- todo: factor this out and use in ExistentialMismatch and any other + -- "recursive subtypes" situations + , case leafs of + Nothing -> mempty + Just (foundLeaf, expectedLeaf) -> mconcat + [ "\n" + , "More specifically, I found " + , style Type2 (renderType' env foundLeaf) + , " where I was expecting " + , style Type1 (renderType' env expectedLeaf) + , ":\n\n" + , showSourceMaybes + src + [ (, Type1) <$> rangeForAnnotated expectedLeaf + , (, Type2) <$> rangeForAnnotated foundLeaf + ] + ] + , case solvedVars' of + _ : _ -> + let + go :: (v, C.Type v loc) -> Pretty ColorText + go (v, t) = mconcat + [ " " + , renderVar v + , " = " + , style ErrorSite (renderType' env t) + , ", from here:\n\n" + , showSourceMaybes + src + [(, ErrorSite) <$> rangeForAnnotated t] + , "\n" + ] + in + mconcat + [ "\n" + , "because the " + , style ErrorSite (renderTerm env f) + , " function has type" + , "\n\n" + , " " + , renderType' env fte + , "\n\n" + , "where:" + , "\n\n" + , mconcat (go <$> solvedVars') + ] + [] -> mempty + , debugNoteLoc + . mconcat + $ [ "\nloc debug:" + , style ErrorSite "\n f: " + , annotatedToEnglish f + , style Type2 "\n foundType: " + , annotatedToEnglish foundType + , style Type1 "\n expectedType: " + , annotatedToEnglish expectedType + -- , "\n expectedLoc: ", annotatedToEnglish expectedLoc + ] + , debugSummary note + ] + Mismatch {..} -> mconcat + [ "I found a value of type " + , style Type1 (renderType' env foundLeaf) + , " where I expected to find one of type " + , style Type2 (renderType' env expectedLeaf) + , ":\n\n" + , showSourceMaybes + src + [ -- these are overwriting the colored ranges for some reason? + -- (,Color.ForceShow) <$> rangeForAnnotated mismatchSite + -- , (,Color.ForceShow) <$> rangeForType foundType + -- , (,Color.ForceShow) <$> rangeForType expectedType + -- , + (, Type1) <$> rangeForAnnotated mismatchSite + , (, Type2) <$> rangeForAnnotated expectedLeaf + ] + , fromOverHere' src + [styleAnnotated Type1 foundLeaf] + [styleAnnotated Type1 mismatchSite] + , intLiteralSyntaxTip mismatchSite expectedType + , debugNoteLoc + . mconcat + $ [ "\nloc debug:" + , "\n mismatchSite: " + , annotatedToEnglish mismatchSite + , "\n foundType: " + , annotatedToEnglish foundType + , "\n foundLeaf: " + , annotatedToEnglish foundLeaf + , "\n expectedType: " + , annotatedToEnglish expectedType + , "\n expectedLeaf: " + , annotatedToEnglish expectedLeaf + , "\n" + ] + , debugSummary note + ] + AbilityCheckFailure {..} -> mconcat + [ "The expression " + , describeStyle ErrorSite + , " " + , case toList requested of + [] -> error "unpossible" + [e] -> "needs the {" <> renderType' env e <> "} ability," + requested -> + " needs these abilities: {" + <> commas (renderType' env) requested + <> "}," + , " but " + , case toList ambient of + [] -> "this location does not have access to any abilities." + [e] -> + "this location only has access to the {" + <> renderType' env e + <> "} ability," + ambient -> + "this location only has access to these abilities: " + <> "{" + <> commas (renderType' env) ambient + <> "}" + , "\n\n" + , annotatedAsErrorSite src abilityCheckFailureSite + , debugSummary note + ] + UnguardedLetRecCycle vs locs _ -> mconcat + [ "These definitions depend on each other cyclically but aren't guarded " + , "by a lambda: " <> intercalateMap ", " renderVar vs + , "\n" + , showSourceMaybes src [ (,ErrorSite) <$> rangeForAnnotated loc | loc <- locs ]] + + UnknownType {..} -> mconcat [ + if ann typeSite == Intrinsic then + "I don't know about the builtin type " <> style ErrorSite (renderVar unknownTypeV) <> ". " + else if ann typeSite == External then + "I don't know about the type " <> style ErrorSite (renderVar unknownTypeV) <> ". " + else + "I don't know about the type " <> style ErrorSite (renderVar unknownTypeV) <> ":\n" + <> annotatedAsErrorSite src typeSite + , "Make sure it's imported and spelled correctly." + ] + UnknownTerm {..} -> + let (correct, wrongTypes, wrongNames) = + foldr sep id suggestions ([], [], []) + sep (C.Suggestion name typ _ match) r = + case match of + C.Exact -> (_1 %~ ((name, typ) :)) . r + C.WrongType -> (_2 %~ ((name, typ) :)) . r + C.WrongName -> (_3 %~ ((name, typ) :)) . r + in mconcat + [ "I'm not sure what " + , style ErrorSite (Var.nameStr unknownTermV) + , " means at " + , annotatedToEnglish termSite + , "\n\n" + , annotatedAsErrorSite src termSite + , case expectedType of + Type.Var' (TypeVar.Existential _ _) -> "\nThere are no constraints on its type." + _ -> + "\nWhatever it is, it has a type that conforms to " + <> style Type1 (renderType' env expectedType) + <> ".\n" + -- ++ showTypeWithProvenance env src Type1 expectedType + , case correct of + [] -> case wrongTypes of + [] -> case wrongNames of + [] -> mempty + wrongs -> formatWrongs wrongNameText wrongs + wrongs -> formatWrongs wrongTypeText wrongs + suggs -> mconcat + [ "I found some terms in scope that have matching names and types. " + , "Maybe you meant one of these:\n\n" + , intercalateMap "\n" formatSuggestion suggs + ] + ] + DuplicateDefinitions {..} -> + mconcat + [ Pr.wrap $ mconcat + [ "I found" + , Pr.shown (length defns) + , names + , "with multiple definitions:" + ] + , Pr.lineSkip + , Pr.spaced ((\(v, _locs) -> renderVar v) <$> defns) + , debugSummary note + ] + where + names = + case defns of + _ Nel.:| [] -> "name" + _ -> "names" + Other (C.cause -> C.HandlerOfUnexpectedType loc typ) -> + Pr.lines [ + Pr.wrap "The handler used here", "", + annotatedAsErrorSite src loc, + Pr.wrap $ + "has type " <> stylePretty ErrorSite (Pr.group (renderType' env typ)) + <> "but I'm expecting a function of the form" + <> Pr.group (Pr.blue (renderType' env exHandler) <> ".") + ] + where + exHandler :: C.Type v loc + exHandler = fmap (const loc) $ + Type.arrow () + (Type.apps' (Type.ref () Type.effectRef) + [Type.var () (Var.named "e"), Type.var () (Var.named "a") ]) + (Type.var () (Var.named "o")) + + Other note -> mconcat + [ "Sorry, you hit an error we didn't make a nice message for yet.\n\n" + , "Here is a summary of the Note:\n" + , summary note + ] + where + wrongTypeText pl = mconcat + [ "I found " + , pl "a term" "some terms" + , " in scope with " + , pl "a " "" + , "matching name" + , pl "" "s" + , " but " + , pl "a " "" + , "different type" + , pl "" "s" + , ". " + , "If " + , pl "this" "one of these" + , " is what you meant, try using the fully qualified name and I might " + , "be able to give you a more illuminating error message: \n\n" + ] + wrongNameText pl = mconcat + [ "I found " + , pl "a term" "some terms" + , " in scope with " + , pl "a " "" + , "matching type" + , pl "" "s" + , " but " + , pl "a " "" + , "different name" + , pl "" "s" + , ". " + , "Maybe you meant " + , pl "this" "one of these" + , ":\n\n" + ] + formatSuggestion :: (Text, C.Type v loc) -> Pretty ColorText + formatSuggestion (name, typ) = + " - " <> fromString (Text.unpack name) <> " : " <> renderType' env typ + formatWrongs txt wrongs = + let sz = length wrongs + pl a b = if sz == 1 then a else b + in mconcat [txt pl, intercalateMap "\n" formatSuggestion wrongs] + ordinal :: (IsString s) => Int -> s + ordinal n = fromString $ show n ++ case last (show n) of + '1' -> "st" + '2' -> "nd" + '3' -> "rd" + _ -> "th" + debugNoteLoc a = if Settings.debugNoteLoc then a else mempty + debugSummary :: C.ErrorNote v loc -> Pretty ColorText + debugSummary note = + if Settings.debugNoteSummary then summary note else mempty + summary :: C.ErrorNote v loc -> Pretty ColorText + summary note = mconcat + [ "\n" + , " simple cause:\n" + , " " + , simpleCause (C.cause note) + , "\n" + , case toList (C.path note) of + [] -> " path: (empty)\n" + l -> " path:\n" <> mconcat (simplePath <$> l) + ] + simplePath :: C.PathElement v loc -> Pretty ColorText + simplePath e = " " <> simplePath' e <> "\n" + simplePath' :: C.PathElement v loc -> Pretty ColorText + simplePath' = \case + C.InSynthesize e -> "InSynthesize e=" <> renderTerm env e + C.InSubtype t1 t2 -> + "InSubtype t1=" <> renderType' env t1 <> ", t2=" <> renderType' env t2 + C.InCheck e t -> + "InCheck e=" <> renderTerm env e <> "," <> " t=" <> renderType' env t + C.InInstantiateL v t -> + "InInstantiateL v=" <> renderVar v <> ", t=" <> renderType' env t + C.InInstantiateR t v -> + "InInstantiateR t=" <> renderType' env t <> " v=" <> renderVar v + C.InSynthesizeApp t e n -> + "InSynthesizeApp t=" + <> renderType' env t + <> ", e=" + <> renderTerm env e + <> ", n=" + <> fromString (show n) + C.InFunctionCall vs f ft es -> + "InFunctionCall vs=[" + <> commas renderVar vs + <> "]" + <> ", f=" + <> renderTerm env f + <> ", ft=" + <> renderType' env ft + <> ", es=[" + <> commas (renderTerm env) es + <> "]" + C.InIfCond -> "InIfCond" + C.InIfBody loc -> "InIfBody thenBody=" <> annotatedToEnglish loc + C.InAndApp -> "InAndApp" + C.InOrApp -> "InOrApp" + C.InVectorApp loc -> "InVectorApp firstTerm=" <> annotatedToEnglish loc + C.InMatch loc -> "InMatch firstBody=" <> annotatedToEnglish loc + C.InMatchGuard -> "InMatchGuard" + C.InMatchBody -> "InMatchBody" + simpleCause :: C.Cause v loc -> Pretty ColorText + simpleCause = \case + C.TypeMismatch c -> + mconcat ["TypeMismatch\n", " context:\n", renderContext env c] + C.HandlerOfUnexpectedType loc typ -> + mconcat ["HandlerOfUnexpectedType\n", Pr.shown loc, "type:\n", renderType' env typ ] + C.IllFormedType c -> + mconcat ["IllFormedType\n", " context:\n", renderContext env c] + C.UnguardedLetRecCycle vs _ts -> + "Unguarded cycle of definitions: " <> + foldMap renderVar vs + C.UnknownSymbol loc v -> mconcat + [ "UnknownSymbol: " + , annotatedToEnglish loc + , " " <> renderVar v + , "\n\n" + , annotatedAsErrorSite src loc + ] + C.UnknownTerm loc v suggestions typ -> mconcat + [ "UnknownTerm: " + , annotatedToEnglish loc + , " " + , renderVar v + , "\n\n" + , annotatedAsErrorSite src loc + , "Suggestions: " + , mconcat (renderSuggestion env <$> suggestions) + , "\n\n" + , "Type: " + , renderType' env typ + ] + C.AbilityCheckFailure ambient requested c -> mconcat + [ "AbilityCheckFailure: " + , "ambient={" + , commas (renderType' env) ambient + , "} requested={" + , commas (renderType' env) requested + , "}\n" + , renderContext env c + ] + C.EffectConstructorWrongArgCount e a r cid -> mconcat + [ "EffectConstructorWrongArgCount:" + , " expected=" + , (fromString . show) e + , ", actual=" + , (fromString . show) a + , ", reference=" + , showConstructor env r cid + ] + C.MalformedEffectBind ctorType ctorResult es -> mconcat + [ "MalformedEffectBind: " + , " ctorType=" + , renderType' env ctorType + , " ctorResult=" + , renderType' env ctorResult + , " effects=" + , fromString (show es) + ] + C.PatternArityMismatch loc typ args -> mconcat + [ "PatternArityMismatch:\n" + , " loc=" + , annotatedToEnglish loc + , "\n" + , " typ=" + , renderType' env typ + , "\n" + , " args=" + , fromString (show args) + , "\n" + ] + C.DuplicateDefinitions vs -> + let go :: (v, [loc]) -> Pretty (AnnotatedText a) + go (v, locs) = + "[" + <> renderVar v + <> mconcat (intersperse " : " $ annotatedToEnglish <$> locs) + <> "]" + in "DuplicateDefinitions:" <> mconcat (go <$> Nel.toList vs) + C.ConcatPatternWithoutConstantLength loc typ -> mconcat + [ "ConcatPatternWithoutConstantLength:\n" + , " loc=" + , annotatedToEnglish loc + , "\n" + , " typ=" + , renderType' env typ + , "\n" + ] + +renderContext + :: (Var v, Ord loc) => Env -> C.Context v loc -> Pretty (AnnotatedText a) +renderContext env ctx@(C.Context es) = " Γ\n " + <> intercalateMap "\n " (showElem ctx . fst) (reverse es) + where + shortName :: (Var v, IsString loc) => v -> loc + shortName = fromString . Text.unpack . Var.name + showElem + :: (Var v, Ord loc) + => C.Context v loc + -> C.Element v loc + -> Pretty (AnnotatedText a) + showElem _ctx (C.Var v) = case v of + TypeVar.Universal x -> "@" <> renderVar x + TypeVar.Existential _ x -> "'" <> renderVar x + showElem ctx (C.Solved _ v (Type.Monotype t)) = + "'" <> shortName v <> " = " <> renderType' env (C.apply ctx t) + showElem ctx (C.Ann v t) = + shortName v <> " : " <> renderType' env (C.apply ctx t) + showElem _ (C.Marker v) = "|" <> shortName v <> "|" + +renderTerm :: (IsString s, Var v) => Env -> C.Term v loc -> s +renderTerm env e = + let s = Color.toPlain $ TermPrinter.pretty' (Just 80) env (TypeVar.lowerTerm e) + in if length s > Settings.renderTermMaxLength + then fromString (take Settings.renderTermMaxLength s <> "...") + else fromString s + +-- | renders a type with no special styling +renderType' :: (IsString s, Var v) => Env -> Type v loc -> s +renderType' env typ = + fromString . Pr.toPlain defaultWidth $ renderType env (const id) typ + +-- | `f` may do some styling based on `loc`. +-- | You can pass `(const id)` if no styling is needed, or call `renderType'`. +renderType + :: Var v + => Env + -> (loc -> Pretty (AnnotatedText a) -> Pretty (AnnotatedText a)) + -> Type v loc + -> Pretty (AnnotatedText a) +renderType env f t = renderType0 env f (0 :: Int) (Type.removePureEffects t) + where + wrap :: (IsString a, Semigroup a) => a -> a -> Bool -> a -> a + wrap start end test s = if test then start <> s <> end else s + paren = wrap "(" ")" + curly = wrap "{" "}" + renderType0 env f p t = f (ABT.annotation t) $ case t of + Type.Ref' r -> showTypeRef env r + Type.Arrow' i (Type.Effect1' e o) -> + paren (p >= 2) $ go 2 i <> " ->{" <> go 1 e <> "} " <> go 1 o + Type.Arrow' i o -> paren (p >= 2) $ go 2 i <> " -> " <> go 1 o + Type.Ann' t k -> paren True $ go 1 t <> " : " <> renderKind k + TupleType' ts -> paren True $ commas (go 0) ts + Type.Apps' (Type.Ref' (R.Builtin "Sequence")) [arg] -> + "[" <> go 0 arg <> "]" + Type.Apps' f' args -> paren (p >= 3) $ spaces (go 3) (f' : args) + Type.Effects' es -> curly (p >= 3) $ commas (go 0) es + Type.Effect' es t -> case es of + [] -> go p t + _ -> "{" <> commas (go 0) es <> "} " <> go 3 t + Type.Effect1' e t -> paren (p >= 3) $ "{" <> go 0 e <> "}" <> go 3 t + Type.ForallsNamed' vs body -> + paren (p >= 1) $ if not Settings.debugRevealForalls + then go 0 body + else "forall " <> spaces renderVar vs <> " . " <> go 1 body + Type.Var' v -> renderVar v + _ -> error $ "pattern match failure in PrintError.renderType " ++ show t + where go = renderType0 env f + +renderSuggestion + :: (IsString s, Semigroup s, Var v) => Env -> C.Suggestion v loc -> s +renderSuggestion env sug = + fromString (Text.unpack $ C.suggestionName sug) <> " : " <> renderType' + env + (C.suggestionType sug) + +spaces :: (IsString a, Monoid a) => (b -> a) -> [b] -> a +spaces = intercalateMap " " + +arrows :: (IsString a, Monoid a) => (b -> a) -> [b] -> a +arrows = intercalateMap " ->" + +commas :: (IsString a, Monoid a) => (b -> a) -> [b] -> a +commas = intercalateMap ", " + +renderVar :: (IsString a, Var v) => v -> a +renderVar = fromString . Text.unpack . Var.name + +renderVar' :: (Var v, Annotated a) => Env -> C.Context v a -> v -> String +renderVar' env ctx v = case C.lookupSolved ctx v of + Nothing -> "unsolved" + Just t -> renderType' env $ Type.getPolytype t + +prettyVar :: Var v => v -> Pretty ColorText +prettyVar = Pr.text . Var.name + +renderKind :: Kind -> Pretty (AnnotatedText a) +renderKind Kind.Star = "*" +renderKind (Kind.Arrow k1 k2) = renderKind k1 <> " -> " <> renderKind k2 + +showTermRef :: IsString s => Env -> Referent -> s +showTermRef env r = fromString . HQ.toString $ PPE.termName env r + +showTypeRef :: IsString s => Env -> R.Reference -> s +showTypeRef env r = fromString . HQ.toString $ PPE.typeName env r + +-- todo: do something different/better if cid not found +showConstructor :: IsString s => Env -> R.Reference -> Int -> s +showConstructor env r cid = fromString . HQ.toString $ + PPE.patternName env r cid + +styleInOverallType + :: (Var v, Annotated a, Eq a) + => Env + -> C.Type v a + -> C.Type v a + -> Color + -> Pretty ColorText +styleInOverallType e overallType leafType c = renderType e f overallType + where + f loc s = if loc == ABT.annotation leafType then Color.style c <$> s else s + +_posToEnglish :: IsString s => L.Pos -> s +_posToEnglish (L.Pos l c) = + fromString $ "Line " ++ show l ++ ", Column " ++ show c + +rangeForToken :: L.Token a -> Range +rangeForToken t = Range (L.start t) (L.end t) + +rangeToEnglish :: IsString s => Range -> s +rangeToEnglish (Range (L.Pos l c) (L.Pos l' c')) = + fromString + $ let showColumn = True + in + if showColumn + then if l == l' + then if c == c' + then "line " ++ show l ++ ", column " ++ show c + else "line " ++ show l ++ ", columns " ++ show c ++ "-" ++ show c' + else + "line " + ++ show l + ++ ", column " + ++ show c + ++ " through " + ++ "line " + ++ show l' + ++ ", column " + ++ show c' + else if l == l' + then "line " ++ show l + else "lines " ++ show l ++ "—" ++ show l' + +annotatedToEnglish :: (Annotated a, IsString s) => a -> s +annotatedToEnglish a = case ann a of + Intrinsic -> "an intrinsic" + External -> "an external" + Ann start end -> rangeToEnglish $ Range start end + + +rangeForAnnotated :: Annotated a => a -> Maybe Range +rangeForAnnotated a = case ann a of + Intrinsic -> Nothing + External -> Nothing + Ann start end -> Just $ Range start end + +showLexerOutput :: Bool +showLexerOutput = False + +renderNoteAsANSI + :: (Var v, Annotated a, Show a, Ord a) + => Pr.Width + -> Env + -> String + -> Note v a + -> String +renderNoteAsANSI w e s n = Pr.toANSI w $ printNoteWithSource e s n + +renderParseErrorAsANSI :: Var v => Pr.Width -> String -> Parser.Err v -> String +renderParseErrorAsANSI w src = Pr.toANSI w . prettyParseError src + +printNoteWithSource + :: (Var v, Annotated a, Show a, Ord a) + => Env + -> String + -> Note v a + -> Pretty ColorText +printNoteWithSource env _s (TypeInfo n) = prettyTypeInfo n env +printNoteWithSource _env s (Parsing e) = prettyParseError s e +printNoteWithSource env s (TypeError e) = prettyTypecheckError e env s +printNoteWithSource _env _s (NameResolutionFailures _es) = undefined +printNoteWithSource _env s (InvalidPath path term) = + fromString ("Invalid Path: " ++ show path ++ "\n") + <> annotatedAsErrorSite s term +printNoteWithSource _env s (UnknownSymbol v a) = + fromString ("Unknown symbol `" ++ Text.unpack (Var.name v) ++ "`\n\n") + <> annotatedAsErrorSite s a +printNoteWithSource _env _s (CompilerBug c) = + fromString $ "Compiler bug: " <> show c + +_printPosRange :: String -> L.Pos -> L.Pos -> String +_printPosRange s (L.Pos startLine startCol) _end = + -- todo: multi-line ranges + -- todo: ranges + _printArrowsAtPos s startLine startCol + +_printArrowsAtPos :: String -> Int -> Int -> String +_printArrowsAtPos s line column = + let lineCaret s i = s ++ if i == line then "\n" ++ columnCaret else "" + columnCaret = replicate (column - 1) '-' ++ "^" + source = unlines (uncurry lineCaret <$> lines s `zip` [1 ..]) + in source + +-- Wow, epic view pattern for picking out a lexer error +pattern LexerError ts e <- Just (P.Tokens (firstLexerError -> Just (ts, e))) + +firstLexerError :: Foldable t => t (L.Token L.Lexeme) -> Maybe ([L.Token L.Lexeme], L.Err) +firstLexerError (toList -> ts@((L.payload -> L.Err e) : _)) = Just (ts, e) +firstLexerError _ = Nothing + +prettyParseError + :: forall v + . Var v + => String + -> Parser.Err v + -> Pretty ColorText +prettyParseError s = \case + P.TrivialError _ (LexerError ts (L.CloseWithoutMatchingOpen open close)) _ -> + "❗️ I found a closing " <> style ErrorSite (fromString close) <> + " here without a matching " <> style ErrorSite (fromString open) <> ".\n\n" <> + showSource s ((\t -> (rangeForToken t, ErrorSite)) <$> ts) + P.TrivialError sp unexpected expected + -> fromString + (P.parseErrorPretty @_ @Void (P.TrivialError sp unexpected expected)) + <> (case unexpected of + Just (P.Tokens (toList -> ts)) -> case ts of + [] -> mempty + _ -> showSource s $ (\t -> (rangeForToken t, ErrorSite)) <$> ts + _ -> mempty + ) + <> lexerOutput + P.FancyError _sp fancyErrors -> + mconcat (go' <$> Set.toList fancyErrors) <> lexerOutput + where + go' :: P.ErrorFancy (Parser.Error v) -> Pretty ColorText + go' (P.ErrorFail s) = + "The parser failed with this message:\n" <> fromString s + go' (P.ErrorIndentation ordering indent1 indent2) = mconcat + [ "The parser was confused by the indentation.\n" + , "It was expecting the reference level (" + , fromString (show indent1) + , ")\nto be " + , fromString (show ordering) + , " than/to the actual level (" + , fromString (show indent2) + , ").\n" + ] + go' (P.ErrorCustom e) = go e + errorVar v = style ErrorSite . fromString . Text.unpack $ Var.name v + go :: Parser.Error v -> Pretty ColorText + -- | UseInvalidPrefixSuffix (Either (L.Token Name) (L.Token Name)) (Maybe [L.Token Name]) + go (Parser.UseEmpty tok) = msg where + msg = Pr.indentN 2 . Pr.callout "😶" $ Pr.lines [ + Pr.wrap $ "I was expecting something after the " <> Pr.hiRed "use" <> "keyword", "", + tokenAsErrorSite s tok, + useExamples + ] + go (Parser.UseInvalidPrefixSuffix prefix suffix) = msg where + msg :: Pretty ColorText + msg = Pr.indentN 2 . Pr.blockedCallout . Pr.lines $ case (prefix, suffix) of + (Left tok, Just _) -> [ + Pr.wrap "The first argument of a `use` statement can't be an operator name:", "", + tokenAsErrorSite s tok, + useExamples + ] + (tok0, Nothing) -> let tok = either id id tok0 in [ + Pr.wrap $ "I was expecting something after " <> Pr.hiRed "here:", "", + tokenAsErrorSite s tok, + case Name.parent (L.payload tok) of + Nothing -> useExamples + Just parent -> Pr.wrap $ + "You can write" <> + Pr.group (Pr.blue $ "use " <> Pr.shown parent <> " " + <> Pr.shown (Name.unqualified (L.payload tok))) <> + "to introduce " <> Pr.backticked (Pr.shown (Name.unqualified (L.payload tok))) <> + "as a local alias for " <> Pr.backticked (Pr.shown (L.payload tok)) + ] + (Right tok, _) -> [ -- this is unpossible but rather than bomb, nice msg + "You found a Unison bug 🐞 here:", "", + tokenAsErrorSite s tok, + Pr.wrap $ + "This looks like a valid `use` statement," <> + "but the parser didn't recognize it. This is a Unison bug." + ] + go (Parser.DisallowedAbsoluteName t) = msg where + msg :: Pretty ColorText + msg = Pr.indentN 2 $ Pr.fatalCallout $ Pr.lines [ + Pr.wrap $ "I don't currently support creating definitions that start with" + <> Pr.group (Pr.blue "'.'" <> ":"), + "", + tokenAsErrorSite s t, + Pr.wrap $ "Use " <> Pr.blue "help messages.disallowedAbsolute" <> "to learn more.", + "" + ] + go (Parser.DuplicateTypeNames ts) = intercalateMap "\n\n" showDup ts where + showDup (v, locs) = + "I found multiple types with the name " <> errorVar v <> ":\n\n" <> + annotatedsStartingLineAsStyle ErrorSite s locs + go (Parser.DuplicateTermNames ts) = + Pr.fatalCallout $ intercalateMap "\n\n" showDup ts + where + showDup (v, locs) = Pr.lines [ + Pr.wrap $ + "I found multiple bindings with the name " <> Pr.group (errorVar v <> ":"), + annotatedsStartingLineAsStyle ErrorSite s locs + ] + go (Parser.TypeDeclarationErrors es) = let + unknownTypes = [ (v, a) | UF.UnknownType v a <- es ] + dupDataAndAbilities = [ (v, a, a2) | UF.DupDataAndAbility v a a2 <- es ] + unknownTypesMsg = + mconcat [ "I don't know about the type(s) " + , intercalateMap ", " errorVar (nubOrd $ fst <$> unknownTypes) + , ":\n\n" + , annotatedsAsStyle ErrorSite s (snd <$> unknownTypes) + ] + dupDataAndAbilitiesMsg = intercalateMap "\n\n" dupMsg dupDataAndAbilities + dupMsg (v, a, a2) = + mconcat [ "I found two types called " <> errorVar v <> ":" + , "\n\n" + , annotatedsStartingLineAsStyle ErrorSite s [a, a2]] + in if null unknownTypes + then dupDataAndAbilitiesMsg + else if null dupDataAndAbilities then unknownTypesMsg + else unknownTypesMsg <> "\n\n" <> dupDataAndAbilitiesMsg + go (Parser.DidntExpectExpression _tok (Just t@(L.payload -> L.SymbolyId "::" Nothing))) + = mconcat + [ "This looks like the start of an expression here but I was expecting a binding." + , "\nDid you mean to use a single " <> style Code ":" + , " here for a type signature?" + , "\n\n" + , tokenAsErrorSite s t + ] + go (Parser.DidntExpectExpression tok _nextTok) = mconcat + [ "This looks like the start of an expression here \n\n" + , tokenAsErrorSite s tok + , "\nbut at the file top-level, I expect one of the following:" + , "\n" + , "\n - A binding, like " <> t <> style Code " = 42" <> " OR" + , "\n " <> t <> style Code " : Nat" + , "\n " <> t <> style Code " = 42" + , "\n - A watch expression, like " <> style Code "> " <> t <> style Code + " + 1" + , "\n - An `ability` declaration, like " + <> style Code "ability Foo where ..." + , "\n - A `type` declaration, like " + <> style Code "type Optional a = None | Some a" + , "\n - A `namespace` declaration, like " + <> style Code "namespace Seq where ..." + , "\n" + ] + where t = style Code (fromString (P.showTokens (pure tok))) + go (Parser.ExpectedBlockOpen blockName tok@(L.payload -> L.Close)) = mconcat + [ "I was expecting an indented block following the " <> + "`" <> fromString blockName <> "` keyword\n" + , "but instead found an outdent:\n\n" + , tokenAsErrorSite s tok ] -- todo: @aryairani why is this displaying weirdly? + go (Parser.ExpectedBlockOpen blockName tok) = mconcat + [ "I was expecting an indented block following the " <> + "`" <> fromString blockName <> "` keyword\n" + , "but instead found this token:\n" + , tokenAsErrorSite s tok ] + go (Parser.SignatureNeedsAccompanyingBody tok) = mconcat + [ "You provided a type signature, but I didn't find an accompanying\n" + , "binding after it. Could it be a spelling mismatch?\n" + , tokenAsErrorSite s tok + ] + go (Parser.EmptyBlock tok) = mconcat + [ "I expected a block after this (" + , describeStyle ErrorSite + , "), " + , "but there wasn't one. Maybe check your indentation:\n" + , tokenAsErrorSite s tok + ] + go Parser.EmptyWatch = + "I expected a non-empty watch expression and not just \">\"" + go (Parser.UnknownAbilityConstructor tok _referents) = unknownConstructor "ability" tok + go (Parser.UnknownDataConstructor tok _referents) = unknownConstructor "data" tok + go (Parser.UnknownId tok referents references) = Pr.lines + [ if missing then + "I couldn't resolve the reference " <> style ErrorSite (HQ.toString (L.payload tok)) <> "." + else + "The reference " <> style ErrorSite (HQ.toString (L.payload tok)) <> " was ambiguous." + , "" + , tokenAsErrorSite s $ HQ.toString <$> tok + , if missing then "Make sure it's spelled correctly." + else "Try hash-qualifying the term you meant to reference." + ] + where missing = Set.null referents && Set.null references + go (Parser.UnknownTerm tok referents) = Pr.lines + [ if Set.null referents then + "I couldn't find a term for " <> style ErrorSite (HQ.toString (L.payload tok)) <> "." + else + "The term reference " <> style ErrorSite (HQ.toString (L.payload tok)) <> " was ambiguous." + , "" + , tokenAsErrorSite s $ HQ.toString <$> tok + , if missing then "Make sure it's spelled correctly." + else "Try hash-qualifying the term you meant to reference." + ] + where + missing = Set.null referents + go (Parser.UnknownType tok referents) = Pr.lines + [ if Set.null referents then + "I couldn't find a type for " <> style ErrorSite (HQ.toString (L.payload tok)) <> "." + else + "The type reference " <> style ErrorSite (HQ.toString (L.payload tok)) <> " was ambiguous." + , "" + , tokenAsErrorSite s $ HQ.toString <$> tok + , if missing then "Make sure it's spelled correctly." + else "Try hash-qualifying the type you meant to reference." + ] + where + missing = Set.null referents + go (Parser.ResolutionFailures failures) = + Pr.border 2 . prettyResolutionFailures s $ failures + unknownConstructor + :: String -> L.Token HashQualified -> Pretty ColorText + unknownConstructor ctorType tok = Pr.lines [ + (Pr.wrap . mconcat) [ "I don't know about any " + , fromString ctorType + , " constructor named " + , Pr.group ( + stylePretty ErrorSite (prettyHashQualified0 (L.payload tok)) <> + "." + ) + , "Maybe make sure it's correctly spelled and that you've imported it:" + ] + , "" + , tokenAsErrorSite s tok + ] + lexerOutput :: Pretty (AnnotatedText a) + lexerOutput = if showLexerOutput + then "\nLexer output:\n" <> fromString (L.debugLex' s) + else mempty + +annotatedAsErrorSite + :: Annotated a => String -> a -> Pretty ColorText +annotatedAsErrorSite = annotatedAsStyle ErrorSite + +annotatedAsStyle + :: (Ord style, Annotated a) + => style + -> String + -> a + -> Pretty (AnnotatedText style) +annotatedAsStyle style s ann = + showSourceMaybes s [(, style) <$> rangeForAnnotated ann] + +annotatedsAsErrorSite :: (Annotated a) => String -> [a] -> Pretty ColorText +annotatedsAsErrorSite = annotatedsAsStyle ErrorSite + +annotatedsAsStyle :: (Annotated a) => Color -> String -> [a] -> Pretty ColorText +annotatedsAsStyle style src as = + showSourceMaybes src [ (, style) <$> rangeForAnnotated a | a <- as ] + +annotatedsStartingLineAsStyle + :: (Annotated a) => Color -> String -> [a] -> Pretty ColorText +annotatedsStartingLineAsStyle style src as = showSourceMaybes + src + [ (, style) <$> (startingLine <$> rangeForAnnotated a) | a <- as ] + +tokenAsErrorSite :: String -> L.Token a -> Pretty ColorText +tokenAsErrorSite src tok = showSource1 src (rangeForToken tok, ErrorSite) + +tokensAsErrorSite :: String -> [L.Token a] -> Pretty ColorText +tokensAsErrorSite src ts = + showSource src [(rangeForToken t, ErrorSite) | t <- ts ] + +showSourceMaybes + :: Ord a => String -> [Maybe (Range, a)] -> Pretty (AnnotatedText a) +showSourceMaybes src annotations = showSource src $ catMaybes annotations + +showSource :: Ord a => String -> [(Range, a)] -> Pretty (AnnotatedText a) +showSource src annotations = Pr.lit . AT.condensedExcerptToText 6 $ AT.markup + (fromString src) + (Map.fromList annotations) + +showSource1 :: Ord a => String -> (Range, a) -> Pretty (AnnotatedText a) +showSource1 src annotation = showSource src [annotation] + +findTerm :: Seq (C.PathElement v loc) -> Maybe loc +findTerm = go + where + go (C.InSynthesize t :<| _) = Just $ ABT.annotation t + go (C.InCheck t _ :<| _) = Just $ ABT.annotation t + go (C.InSynthesizeApp _ t _ :<| _) = Just $ ABT.annotation t + go (_ :<| t) = go t + go Empty = Nothing + +prettyTypecheckError + :: (Var v, Ord loc, Show loc, Parser.Annotated loc) + => C.ErrorNote v loc + -> Env + -> String + -> Pretty ColorText +prettyTypecheckError = renderTypeError . typeErrorFromNote + +prettyTypeInfo + :: (Var v, Ord loc, Show loc, Parser.Annotated loc) + => C.InfoNote v loc + -> Env + -> Pretty ColorText +prettyTypeInfo n e = + maybe "" (`renderTypeInfo` e) (typeInfoFromNote n) + +intLiteralSyntaxTip + :: C.Term v loc -> C.Type v loc -> Pretty ColorText +intLiteralSyntaxTip term expectedType = case (term, expectedType) of + (Term.Nat' n, Type.Ref' r) | r == Type.intRef -> + "\nTip: Use the syntax " + <> style Type2 ("+" <> show n) + <> " to produce an " + <> style Type2 "Int" + <> "." + _ -> "" + +prettyResolutionFailures + :: (Annotated a, Var v) + => String + -> [Names.ResolutionFailure v a] + -> Pretty ColorText +prettyResolutionFailures s failures = Pr.callout "❓" $ Pr.linesNonEmpty + [ Pr.wrap + ("I couldn't resolve any of" <> style ErrorSite "these" <> "symbols:") + , "" + , annotatedsAsErrorSite s + $ [ a | Names.TermResolutionFailure _ a _ <- failures ] + ++ [ a | Names.TypeResolutionFailure _ a _ <- failures ] + , let + conflicts = + nubOrd + $ [ v + | Names.TermResolutionFailure v _ s <- failures + , Set.size s > 1 + ] + ++ [ v + | Names.TypeResolutionFailure v _ s <- failures + , Set.size s > 1 + ] + allVars = + nubOrd + $ [ v | Names.TermResolutionFailure v _ _ <- failures ] + ++ [ v | Names.TypeResolutionFailure v _ _ <- failures ] + in + "Using these fully qualified names:" + `Pr.hang` Pr.spaced (prettyVar <$> allVars) + <> "\n" + <> if null conflicts + then "" + else Pr.spaced (prettyVar <$> conflicts) + <> Pr.bold " are currently conflicted symbols" + ] + +useExamples :: Pretty ColorText +useExamples = Pr.lines [ + "Here's a few examples of valid `use` statements:", "", + Pr.indentN 2 . Pr.column2 $ + [ (Pr.blue "use math sqrt", Pr.wrap "Introduces `sqrt` as a local alias for `math.sqrt`") + , (Pr.blue "use List :+", Pr.wrap "Introduces `:+` as a local alias for `List.:+`.") + , (Pr.blue "use .foo bar.baz", Pr.wrap "Introduces `bar.baz` as a local alias for the absolute name `.foo.bar.baz`") ] + ] diff --git a/parser-typechecker/src/Unison/Result.hs b/parser-typechecker/src/Unison/Result.hs new file mode 100644 index 0000000000..c0569c9113 --- /dev/null +++ b/parser-typechecker/src/Unison/Result.hs @@ -0,0 +1,90 @@ +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE Rank2Types #-} + +module Unison.Result where + +import Unison.Prelude + +import Control.Monad.Except ( ExceptT(..) ) +import Data.Functor.Identity +import qualified Control.Monad.Fail as Fail +import qualified Control.Monad.Morph as Morph +import Control.Monad.Writer ( WriterT(..) + , runWriterT + , MonadWriter(..) + ) +import Unison.Name ( Name ) +import qualified Unison.Parser as Parser +import Unison.Paths ( Path ) +import Unison.Term ( Term ) +import qualified Unison.Typechecker.Context as Context +import Control.Error.Util ( note) +import qualified Unison.Names3 as Names + +type Result notes = ResultT notes Identity + +type ResultT notes f = MaybeT (WriterT notes f) + +data Note v loc + = Parsing (Parser.Err v) + | NameResolutionFailures [Names.ResolutionFailure v loc] + | InvalidPath Path (Term v loc) -- todo: move me! + | UnknownSymbol v loc + | TypeError (Context.ErrorNote v loc) + | TypeInfo (Context.InfoNote v loc) + | CompilerBug (CompilerBug v loc) + deriving Show + +data CompilerBug v loc + = TopLevelComponentNotFound v (Term v loc) + | ResolvedNameNotFound v loc Name + | TypecheckerBug (Context.CompilerBug v loc) + deriving Show + +result :: Result notes a -> Maybe a +result (Result _ may) = may + +pattern Result notes may = MaybeT (WriterT (Identity (may, notes))) +{-# COMPLETE Result #-} + +isSuccess :: Functor f => ResultT note f a -> f Bool +isSuccess = (isJust . fst <$>) . runResultT + +isFailure :: Functor f => ResultT note f a -> f Bool +isFailure = (isNothing . fst <$>) . runResultT + +toMaybe :: Functor f => ResultT note f a -> f (Maybe a) +toMaybe = (fst <$>) . runResultT + +runResultT :: ResultT notes f a -> f (Maybe a, notes) +runResultT = runWriterT . runMaybeT + +-- Returns the `Result` in the `f` functor. +getResult :: Functor f => ResultT notes f a -> f (Result notes a) +getResult r = uncurry (flip Result) <$> runResultT r + +toEither :: Functor f => ResultT notes f a -> ExceptT notes f a +toEither r = ExceptT (go <$> runResultT r) + where go (may, notes) = note notes may + +tell1 :: Monad f => note -> ResultT (Seq note) f () +tell1 = tell . pure + +fromParsing + :: Monad f => Either (Parser.Err v) a -> ResultT (Seq (Note v loc)) f a +fromParsing (Left e) = do + tell1 $ Parsing e + Fail.fail "" +fromParsing (Right a) = pure a + +tellAndFail :: Monad f => note -> ResultT (Seq note) f a +tellAndFail note = tell1 note *> Fail.fail "Elegantly and responsibly" + +compilerBug :: Monad f => CompilerBug v loc -> ResultT (Seq (Note v loc)) f a +compilerBug = tellAndFail . CompilerBug + +hoist + :: (Monad f, Monoid notes) + => (forall a. f a -> g a) + -> ResultT notes f b -> ResultT notes g b +hoist morph = Morph.hoist (Morph.hoist morph) diff --git a/parser-typechecker/src/Unison/Runtime/ANF.hs b/parser-typechecker/src/Unison/Runtime/ANF.hs new file mode 100644 index 0000000000..03bb169d59 --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/ANF.hs @@ -0,0 +1,1432 @@ +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE ViewPatterns #-} +{-# Language OverloadedStrings #-} +{-# Language PatternGuards #-} +{-# Language PatternSynonyms #-} +{-# Language ScopedTypeVariables #-} +{-# Language GeneralizedNewtypeDeriving #-} + +module Unison.Runtime.ANF + ( optimize + , fromTerm + , fromTerm' + , term + , minimizeCyclesOrCrash + , pattern TVar + , pattern TLit + , pattern TApp + , pattern TApv + , pattern TCom + , pattern TCon + , pattern TKon + , pattern TReq + , pattern TPrm + , pattern TIOp + , pattern THnd + , pattern TLet + , pattern TFrc + , pattern TLets + , pattern TName + , pattern TBind + , pattern TTm + , pattern TBinds + , pattern TBinds' + , pattern TShift + , pattern TMatch + , Mem(..) + , Lit(..) + , SuperNormal(..) + , SuperGroup(..) + , POp(..) + , IOp(..) + , close + , saturate + , float + , lamLift + , ANormalBF(..) + , ANormalTF(.., AApv, ACom, ACon, AKon, AReq, APrm, AIOp) + , ANormal + , ANormalT + , RTag + , CTag + , Tag(..) + , packTags + , unpackTags + , ANFM + , Branched(..) + , Func(..) + , superNormalize + , anfTerm + , sink + , prettyGroup + ) where + +import Unison.Prelude + +import Control.Monad.Reader (ReaderT(..), asks, local) +import Control.Monad.State (State, runState, MonadState(..), modify, gets) +import Control.Lens (snoc, unsnoc) + +import Data.Bifunctor (Bifunctor(..)) +import Data.Bifoldable (Bifoldable(..)) +import Data.Bits ((.&.), (.|.), shiftL, shiftR) +import Data.List hiding (and,or) +import Prelude hiding (abs,and,or,seq) +import qualified Prelude +import Unison.Term hiding (resolve, fresh, float) +import Unison.Var (Var, typed) +import Unison.Util.EnumContainers as EC +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Data.Text as Text +import qualified Unison.ABT as ABT +import qualified Unison.ABT.Normalized as ABTN +import qualified Unison.Term as Term +import qualified Unison.Type as Ty +import qualified Unison.Builtin.Decls as Ty (unitRef,seqViewRef) +import qualified Unison.Var as Var +import Unison.Typechecker.Components (minimize') +import Unison.Pattern (SeqOp(..)) +import qualified Unison.Pattern as P +import Unison.Reference (Reference(..)) +import Unison.Referent (Referent) + +newtype ANF v a = ANF_ { term :: Term v a } + +-- Replace all lambdas with free variables with closed lambdas. +-- Works by adding a parameter for each free variable. These +-- synthetic parameters are added before the existing lambda params. +-- For example, `(x -> x + y + z)` becomes `(y z x -> x + y + z) y z`. +-- As this replacement has the same type as the original lambda, it +-- can be done as a purely local transformation, without updating any +-- call sites of the lambda. +-- +-- The transformation is shallow and doesn't transform the body of +-- lambdas it finds inside of `t`. +lambdaLift :: (Var v, Semigroup a) => (v -> v) -> Term v a -> Term v a +lambdaLift liftVar t = result where + result = ABT.visitPure go t + go t@(LamsNamed' vs body) = Just $ let + fvs = ABT.freeVars t + fvsLifted = [ (v, liftVar v) | v <- toList fvs ] + a = ABT.annotation t + subs = [(v, var a v') | (v,v') <- fvsLifted ] + in if Set.null fvs then lam' a vs body -- `lambdaLift body` would make transform deep + else apps' (lam' a (map snd fvsLifted ++ vs) (ABT.substs subs body)) + (snd <$> subs) + go _ = Nothing + +closure :: Var v => Map v (Set v, Set v) -> Map v (Set v) +closure m0 = trace (snd <$> m0) + where + refs = fst <$> m0 + + expand acc fvs rvs + = fvs <> foldMap (\r -> Map.findWithDefault mempty r acc) rvs + + trace acc + | acc == acc' = acc + | otherwise = trace acc' + where + acc' = Map.intersectionWith (expand acc) acc refs + +expandRec + :: (Var v, Monoid a) + => Set v + -> [(v, Term v a)] + -> [(v, Term v a)] +expandRec keep vbs = mkSub <$> fvl + where + mkSub (v, fvs) = (v, apps' (var mempty v) (var mempty <$> fvs)) + + fvl = Map.toList + . fmap (Set.toList) + . closure + $ Set.partition (`Set.member` keep) + . ABT.freeVars + <$> Map.fromList vbs + +expandSimple + :: (Var v, Monoid a) + => Set v + -> (v, Term v a) + -> (v, Term v a) +expandSimple keep (v, bnd) = (v, apps' (var a v) evs) + where + a = ABT.annotation bnd + fvs = ABT.freeVars bnd + evs = map (var a) . Set.toList $ Set.difference fvs keep + + +abstract :: (Var v) => Set v -> Term v a -> Term v a +abstract keep bnd = lam' a evs bnd + where + a = ABT.annotation bnd + fvs = ABT.freeVars bnd + evs = Set.toList $ Set.difference fvs keep + +enclose + :: (Var v, Monoid a) + => Set v + -> (Set v -> Term v a -> Term v a) + -> Term v a + -> Maybe (Term v a) +enclose keep rec (LetRecNamedTop' top vbs bd) + = Just $ letRec' top lvbs lbd + where + xpnd = expandRec keep' vbs + keep' = Set.union keep . Set.fromList . map fst $ vbs + lvbs = (map.fmap) (rec keep' . abstract keep' . ABT.substs xpnd) vbs + lbd = rec keep' . ABT.substs xpnd $ bd +-- will be lifted, so keep this variable +enclose keep rec (Let1NamedTop' top v b@(LamsNamed' vs bd) e) + = Just . let1' top [(v, lamb)] . rec (Set.insert v keep) + $ ABT.subst v av e + where + (_, av) = expandSimple keep (v, b) + keep' = Set.difference keep $ Set.fromList vs + fvs = ABT.freeVars b + evs = Set.toList $ Set.difference fvs keep + a = ABT.annotation b + lbody = rec keep' bd + lamb = lam' a (evs ++ vs) lbody +enclose keep rec t@(LamsNamed' vs body) + = Just $ if null evs then lamb else apps' lamb $ map (var a) evs + where + -- remove shadowed variables + keep' = Set.difference keep $ Set.fromList vs + fvs = ABT.freeVars t + evs = Set.toList $ Set.difference fvs keep + a = ABT.annotation t + lbody = rec keep' body + lamb = lam' a (evs ++ vs) lbody +enclose keep rec t@(Handle' h body) + | isStructured body + = Just . handle (ABT.annotation t) h $ apps' lamb args + where + fvs = ABT.freeVars body + evs = Set.toList $ Set.difference fvs keep + a = ABT.annotation body + lbody = rec keep body + fv = Var.freshIn fvs $ typed Var.Eta + args | null evs = [constructor a Ty.unitRef 0] + | otherwise = var a <$> evs + lamb | null evs = lam' a [fv] lbody + | otherwise = lam' a evs lbody +enclose _ _ _ = Nothing + +isStructured :: Var v => Term v a -> Bool +isStructured (Var' _) = False +isStructured (Lam' _) = False +isStructured (Nat' _) = False +isStructured (Int' _) = False +isStructured (Float' _) = False +isStructured (Text' _) = False +isStructured (Char' _) = False +isStructured (Constructor' _ _) = False +isStructured (Apps' Constructor'{} args) = any isStructured args +isStructured (If' b t f) + = isStructured b || isStructured t || isStructured f +isStructured (And' l r) = isStructured l || isStructured r +isStructured (Or' l r) = isStructured l || isStructured r +isStructured _ = True + +close :: (Var v, Monoid a) => Set v -> Term v a -> Term v a +close keep tm = ABT.visitPure (enclose keep close) tm + +type FloatM v a r = State (Set v, [(v, Term v a)]) r + +freshFloat :: Var v => Set v -> v -> v +freshFloat avoid (Var.freshIn avoid -> v0) + = case Var.typeOf v0 of + Var.User nm + | v <- typed (Var.User $ nm <> w) , v `Set.notMember` avoid + -> v + | otherwise + -> freshFloat (Set.insert v0 avoid) v0 + _ -> v0 + where + w = Text.pack . show $ Var.freshId v0 + +letFloater + :: (Var v, Monoid a) + => (Term v a -> FloatM v a (Term v a)) + -> [(v, Term v a)] -> Term v a + -> FloatM v a (Term v a) +letFloater rec vbs e = do + cvs <- gets fst + let shadows = [ (v, freshFloat cvs v) + | (v, _) <- vbs, Set.member v cvs ] + shadowMap = Map.fromList shadows + rn v = Map.findWithDefault v v shadowMap + shvs = Set.fromList $ map (rn.fst) vbs + modify (first $ (<>shvs)) + fvbs <- traverse (\(v, b) -> (,) (rn v) <$> rec' (ABT.changeVars shadowMap b)) vbs + modify (second (++ fvbs)) + pure $ ABT.changeVars shadowMap e + where + rec' b@(LamsNamed' vs bd) = lam' (ABT.annotation b) vs <$> rec bd + rec' b = rec b + +lamFloater + :: (Var v, Monoid a) + => Maybe v -> a -> [v] -> Term v a -> FloatM v a v +lamFloater mv a vs bd + = state $ \(cvs, ctx) -> + let v = ABT.freshIn cvs $ fromMaybe (typed Var.Float) mv + in (v, (Set.insert v cvs, ctx <> [(v, lam' a vs bd)])) + +floater + :: (Var v, Monoid a) + => (Term v a -> FloatM v a (Term v a)) + -> Term v a -> Maybe (FloatM v a (Term v a)) +floater rec (LetRecNamed' vbs e) = Just $ letFloater rec vbs e >>= rec +floater rec (Let1Named' v b e) + | LamsNamed' vs bd <- b + = Just $ rec bd + >>= lamFloater (Just v) a vs + >>= \lv -> rec $ ABT.changeVars (Map.singleton v lv) e + where a = ABT.annotation b +floater rec tm@(LamsNamed' vs bd) = Just $ do + bd <- rec bd + lv <- lamFloater Nothing a vs bd + pure $ var a lv + where a = ABT.annotation tm +floater _ _ = Nothing + +float :: (Var v, Monoid a) => Term v a -> Term v a +float tm = case runState (go tm) (Set.empty, []) of + (bd, (_, ctx)) -> letRec' True ctx bd + where + go = ABT.visit $ floater go + -- tm | LetRecNamedTop' _ vbs e <- tm0 + -- , (pre, rec, post) <- reduceCycle vbs + -- = let1' False pre . letRec' False rec . let1' False post $ e + -- | otherwise = tm0 + +deannotate :: Var v => Term v a -> Term v a +deannotate = ABT.visitPure $ \case + Ann' c _ -> Just $ deannotate c + _ -> Nothing + +lamLift :: (Var v, Monoid a) => Term v a -> Term v a +lamLift = float . close Set.empty . deannotate + +saturate + :: (Var v, Monoid a) + => Map (Reference,Int) Int -> Term v a -> Term v a +saturate dat = ABT.visitPure $ \case + Apps' f@(Constructor' r t) args -> sat r t f args + Apps' f@(Request' r t) args -> sat r t f args + f@(Constructor' r t) -> sat r t f [] + f@(Request' r t) -> sat r t f [] + _ -> Nothing + where + frsh avoid _ = + let v = Var.freshIn avoid $ typed Var.Eta + in (Set.insert v avoid, v) + sat r t f args = case Map.lookup (r,t) dat of + Just n + | m < n + , vs <- snd $ mapAccumL frsh fvs [1..n-m] + , nargs <- var mempty <$> vs + -> Just . lam' mempty vs . apps' f $ args' ++ nargs + | m > n + , (sargs, eargs) <- splitAt n args' + , sv <- Var.freshIn fvs $ typed Var.Eta + -> Just + . let1' False [(sv,apps' f sargs)] + $ apps' (var mempty sv) eargs + _ -> Just (apps' f args') + where + m = length args + fvs = foldMap freeVars args + args' = saturate dat <$> args + +optimize :: forall a v . (Semigroup a, Var v) => Term v a -> Term v a +optimize t = go t where + ann = ABT.annotation + go (Let1' b body) | canSubstLet b body = go (ABT.bind body b) + go e@(App' f arg) = case go f of + Lam' f -> go (ABT.bind f arg) + f -> app (ann e) f (go arg) + go (If' (Boolean' False) _ f) = go f + go (If' (Boolean' True) t _) = go t + -- todo: can simplify match expressions + go e@(ABT.Var' _) = e + go e@(ABT.Tm' f) = case e of + Lam' _ -> e -- optimization is shallow - don't descend into lambdas + _ -> ABT.tm' (ann e) (go <$> f) + go e@(ABT.out -> ABT.Cycle body) = ABT.cycle' (ann e) (go body) + go e@(ABT.out -> ABT.Abs v body) = ABT.abs' (ann e) v (go body) + go e = e + + -- test for whether an expression `let x = y in body` can be + -- reduced by substituting `y` into `body`. We only substitute + -- when `y` is a variable or a primitive, otherwise this might + -- end up duplicating evaluation or changing the order that + -- effects are evaluated + canSubstLet expr _body + | isLeaf expr = True + -- todo: if number of occurrences of the binding is 1 and the + -- binding is pure, okay to substitute + | otherwise = False + +isLeaf :: ABT.Term (F typeVar typeAnn patternAnn) v a -> Bool +isLeaf (Var' _) = True +isLeaf (Int' _) = True +isLeaf (Float' _) = True +isLeaf (Nat' _) = True +isLeaf (Text' _) = True +isLeaf (Boolean' _) = True +isLeaf (Constructor' _ _) = True +isLeaf (TermLink' _) = True +isLeaf (TypeLink' _) = True +isLeaf _ = False + +minimizeCyclesOrCrash :: Var v => Term v a -> Term v a +minimizeCyclesOrCrash t = case minimize' t of + Right t -> t + Left e -> error $ "tried to minimize let rec with duplicate definitions: " + ++ show (fst <$> toList e) + +fromTerm' :: (Monoid a, Var v) => (v -> v) -> Term v a -> Term v a +fromTerm' liftVar t = term (fromTerm liftVar t) + +fromTerm :: forall a v . (Monoid a, Var v) => (v -> v) -> Term v a -> ANF v a +fromTerm liftVar t = ANF_ (go $ lambdaLift liftVar t) where + ann = ABT.annotation + isRef (Ref' _) = True + isRef _ = False + fixup :: Set v -- if we gotta create new vars, avoid using these + -> ([Term v a] -> Term v a) -- do this with ANF'd args + -> [Term v a] -- the args (not all in ANF already) + -> Term v a -- the ANF'd term + fixup used f args = let + args' = Map.fromList $ toVar =<< (args `zip` [0..]) + toVar (b, i) | isLeaf b = [] + | otherwise = [(i, Var.freshIn used (Var.named . Text.pack $ "arg" ++ show i))] + argsANF = map toANF (args `zip` [0..]) + toANF (b,i) = maybe b (var (ann b)) $ Map.lookup i args' + addLet (b,i) body = maybe body (\v -> let1' False [(v,go b)] body) (Map.lookup i args') + in foldr addLet (f argsANF) (args `zip` [(0::Int)..]) + go :: Term v a -> Term v a + go e@(Apps' f args) + | (isRef f || isLeaf f) && all isLeaf args = e + | not (isRef f || isLeaf f) = + let f' = ABT.fresh e (Var.named "f") + in let1' False [(f', go f)] (go $ apps' (var (ann f) f') args) + | otherwise = fixup (ABT.freeVars e) (apps' f) args + go e@(Handle' h body) + | isLeaf h = handle (ann e) h (go body) + | otherwise = let h' = ABT.fresh e (Var.named "handler") + in let1' False [(h', go h)] (handle (ann e) (var (ann h) h') (go body)) + go e@(If' cond t f) + | isLeaf cond = iff (ann e) cond (go t) (go f) + | otherwise = let cond' = ABT.fresh e (Var.named "cond") + in let1' False [(cond', go cond)] (iff (ann e) (var (ann cond) cond') (go t) (go f)) + go e@(Match' scrutinee cases) + | isLeaf scrutinee = match (ann e) scrutinee (fmap go <$> cases) + | otherwise = let scrutinee' = ABT.fresh e (Var.named "scrutinee") + in let1' False [(scrutinee', go scrutinee)] + (match (ann e) + (var (ann scrutinee) scrutinee') + (fmap go <$> cases)) + -- MatchCase RHS, shouldn't conflict with LetRec + go (ABT.Abs1NA' avs t) = ABT.absChain' avs (go t) + go e@(And' x y) + | isLeaf x = and (ann e) x (go y) + | otherwise = + let x' = ABT.fresh e (Var.named "argX") + in let1' False [(x', go x)] (and (ann e) (var (ann x) x') (go y)) + go e@(Or' x y) + | isLeaf x = or (ann e) x (go y) + | otherwise = + let x' = ABT.fresh e (Var.named "argX") + in let1' False [(x', go x)] (or (ann e) (var (ann x) x') (go y)) + go e@(Var' _) = e + go e@(Int' _) = e + go e@(Nat' _) = e + go e@(Float' _) = e + go e@(Boolean' _) = e + go e@(Text' _) = e + go e@(Char' _) = e + go e@(Blank' _) = e + go e@(Ref' _) = e + go e@(TermLink' _) = e + go e@(TypeLink' _) = e + go e@(RequestOrCtor' _ _) = e + go e@(Lam' _) = e -- ANF conversion is shallow - + -- don't descend into closed lambdas + go (Let1Named' v b e) = let1' False [(v, go b)] (go e) + -- top = False because we don't care to emit typechecker notes about TLDs + go (LetRecNamed' bs e) = letRec' False (fmap (second go) bs) (go e) + go e@(Sequence' vs) = + if all isLeaf vs then e + else fixup (ABT.freeVars e) (seq (ann e)) (toList vs) + go e@(Ann' tm typ) = Term.ann (ann e) (go tm) typ + go e = error $ "ANF.term: I thought we got all of these\n" <> show e + +data Mem = UN | BX deriving (Eq,Ord,Show,Enum) + +-- Context entries with evaluation strategy +data CTE v s + = ST [v] [Mem] s + | LZ v (Either Word64 v) [v] + deriving (Show) + +pattern ST1 v m s = ST [v] [m] s + +data ANormalBF v e + = ALet [Mem] (ANormalTF v e) e + | AName (Either Word64 v) [v] e + | ATm (ANormalTF v e) + deriving (Show) + +data ANormalTF v e + = ALit Lit + | AMatch v (Branched e) + | AShift RTag e + | AHnd [RTag] v e + | AApp (Func v) [v] + | AFrc v + | AVar v + deriving (Show) + +-- Types representing components that will go into the runtime tag of +-- a data type value. RTags correspond to references, while CTags +-- correspond to constructors. +newtype RTag = RTag Word64 deriving (Eq,Ord,Show,Read,EC.EnumKey) +newtype CTag = CTag Word16 deriving (Eq,Ord,Show,Read,EC.EnumKey) + +class Tag t where rawTag :: t -> Word64 +instance Tag RTag where rawTag (RTag w) = w +instance Tag CTag where rawTag (CTag w) = fromIntegral w + +packTags :: RTag -> CTag -> Word64 +packTags (RTag rt) (CTag ct) = ri .|. ci + where + ri = rt `shiftL` 16 + ci = fromIntegral ct + +unpackTags :: Word64 -> (RTag, CTag) +unpackTags w = (RTag $ w `shiftR` 16, CTag . fromIntegral $ w .&. 0xFFFF) + +ensureRTag :: (Ord n, Show n, Num n) => String -> n -> r -> r +ensureRTag s n x + | n > 0xFFFFFFFFFFFF = error $ s ++ "@RTag: too large: " ++ show n + | otherwise = x + +ensureCTag :: (Ord n, Show n, Num n) => String -> n -> r -> r +ensureCTag s n x + | n > 0xFFFF = error $ s ++ "@CTag: too large: " ++ show n + | otherwise = x + +instance Enum RTag where + toEnum i = ensureRTag "toEnum" i . RTag $ toEnum i + fromEnum (RTag w) = fromEnum w + +instance Enum CTag where + toEnum i = ensureCTag "toEnum" i . CTag $ toEnum i + fromEnum (CTag w) = fromEnum w + +instance Num RTag where + fromInteger i = ensureRTag "fromInteger" i . RTag $ fromInteger i + (+) = error "RTag: +" + (*) = error "RTag: *" + abs = error "RTag: abs" + signum = error "RTag: signum" + negate = error "RTag: negate" + +instance Num CTag where + fromInteger i = ensureCTag "fromInteger" i . CTag $ fromInteger i + (+) = error "CTag: +" + (*) = error "CTag: *" + abs = error "CTag: abs" + signum = error "CTag: signum" + negate = error "CTag: negate" + +instance Functor (ANormalBF v) where + fmap f (ALet m bn bo) = ALet m (f <$> bn) $ f bo + fmap f (AName n as bo) = AName n as $ f bo + fmap f (ATm tm) = ATm $ f <$> tm + +instance Bifunctor ANormalBF where + bimap f g (ALet m bn bo) = ALet m (bimap f g bn) $ g bo + bimap f g (AName n as bo) = AName (f <$> n) (f <$> as) $ g bo + bimap f g (ATm tm) = ATm (bimap f g tm) + +instance Bifoldable ANormalBF where + bifoldMap f g (ALet _ b e) = bifoldMap f g b <> g e + bifoldMap f g (AName n as e) = foldMap f n <> foldMap f as <> g e + bifoldMap f g (ATm e) = bifoldMap f g e + +instance Functor (ANormalTF v) where + fmap _ (AVar v) = AVar v + fmap _ (ALit l) = ALit l + fmap f (AMatch v br) = AMatch v $ f <$> br + fmap f (AHnd rs h e) = AHnd rs h $ f e + fmap f (AShift i e) = AShift i $ f e + fmap _ (AFrc v) = AFrc v + fmap _ (AApp f args) = AApp f args + +instance Bifunctor ANormalTF where + bimap f _ (AVar v) = AVar (f v) + bimap _ _ (ALit l) = ALit l + bimap f g (AMatch v br) = AMatch (f v) $ fmap g br + bimap f g (AHnd rs v e) = AHnd rs (f v) $ g e + bimap _ g (AShift i e) = AShift i $ g e + bimap f _ (AFrc v) = AFrc (f v) + bimap f _ (AApp fu args) = AApp (fmap f fu) $ fmap f args + +instance Bifoldable ANormalTF where + bifoldMap f _ (AVar v) = f v + bifoldMap _ _ (ALit _) = mempty + bifoldMap f g (AMatch v br) = f v <> foldMap g br + bifoldMap f g (AHnd _ h e) = f h <> g e + bifoldMap _ g (AShift _ e) = g e + bifoldMap f _ (AFrc v) = f v + bifoldMap f _ (AApp func args) = foldMap f func <> foldMap f args + +matchLit :: Term v a -> Maybe Lit +matchLit (Int' i) = Just $ I i +matchLit (Nat' n) = Just $ N n +matchLit (Float' f) = Just $ F f +matchLit (Text' t) = Just $ T t +matchLit (Char' c) = Just $ C c +matchLit _ = Nothing + +pattern Lit' l <- (matchLit -> Just l) +pattern TLet v m bn bo = ABTN.TTm (ALet [m] bn (ABTN.TAbs v bo)) +pattern TLets vs ms bn bo = ABTN.TTm (ALet ms bn (ABTN.TAbss vs bo)) +pattern TName v f as bo = ABTN.TTm (AName f as (ABTN.TAbs v bo)) +pattern TTm e = ABTN.TTm (ATm e) +{-# complete TLets, TName, TTm #-} + +pattern TLit l = TTm (ALit l) + +pattern TApp f args = TTm (AApp f args) +pattern AApv v args = AApp (FVar v) args +pattern TApv v args = TApp (FVar v) args +pattern ACom r args = AApp (FComb r) args +pattern TCom r args = TApp (FComb r) args +pattern ACon r t args = AApp (FCon r t) args +pattern TCon r t args = TApp (FCon r t) args +pattern AKon v args = AApp (FCont v) args +pattern TKon v args = TApp (FCont v) args +pattern AReq r t args = AApp (FReq r t) args +pattern TReq r t args = TApp (FReq r t) args +pattern APrm p args = AApp (FPrim (Left p)) args +pattern TPrm p args = TApp (FPrim (Left p)) args +pattern AIOp p args = AApp (FPrim (Right p)) args +pattern TIOp p args = TApp (FPrim (Right p)) args + +pattern THnd rs h b = TTm (AHnd rs h b) +pattern TShift i v e = TTm (AShift i (ABTN.TAbs v e)) +pattern TMatch v cs = TTm (AMatch v cs) +pattern TFrc v = TTm (AFrc v) +pattern TVar v = TTm (AVar v) + +{-# complete + TLet, TName, TVar, TApp, TFrc, TLit, THnd, TShift, TMatch + #-} +{-# complete + TLet, TName, + TVar, TFrc, + TApv, TCom, TCon, TKon, TReq, TPrm, TIOp, + TLit, THnd, TShift, TMatch + #-} + +bind :: Var v => Cte v -> ANormal v -> ANormal v +bind (ST us ms bu) = TLets us ms bu +bind (LZ u f as) = TName u f as + +unbind :: Var v => ANormal v -> Maybe (Cte v, ANormal v) +unbind (TLets us ms bu bd) = Just (ST us ms bu, bd) +unbind (TName u f as bd) = Just (LZ u f as, bd) +unbind _ = Nothing + +unbinds :: Var v => ANormal v -> (Ctx v, ANormal v) +unbinds (TLets us ms bu (unbinds -> (ctx, bd))) = (ST us ms bu:ctx, bd) +unbinds (TName u f as (unbinds -> (ctx, bd))) = (LZ u f as:ctx, bd) +unbinds tm = ([], tm) + +unbinds' :: Var v => ANormal v -> (Ctx v, ANormalT v) +unbinds' (TLets us ms bu (unbinds' -> (ctx, bd))) = (ST us ms bu:ctx, bd) +unbinds' (TName u f as (unbinds' -> (ctx, bd))) = (LZ u f as:ctx, bd) +unbinds' (TTm tm) = ([], tm) + +pattern TBind bn bd <- (unbind -> Just (bn, bd)) + where TBind bn bd = bind bn bd + +pattern TBinds :: Var v => Ctx v -> ANormal v -> ANormal v +pattern TBinds ctx bd <- (unbinds -> (ctx, bd)) + where TBinds ctx bd = foldr bind bd ctx + +pattern TBinds' :: Var v => Ctx v -> ANormalT v -> ANormal v +pattern TBinds' ctx bd <- (unbinds' -> (ctx, bd)) + where TBinds' ctx bd = foldr bind (TTm bd) ctx + +{-# complete TBinds' #-} + +data SeqEnd = SLeft | SRight + deriving (Eq, Ord, Enum, Show) + +data Branched e + = MatchIntegral (EnumMap Word64 e) (Maybe e) + | MatchText (Map.Map Text e) (Maybe e) + | MatchRequest (EnumMap RTag (EnumMap CTag ([Mem], e))) e + | MatchEmpty + | MatchData Reference (EnumMap CTag ([Mem], e)) (Maybe e) + | MatchSum (EnumMap Word64 ([Mem], e)) + deriving (Show, Functor, Foldable, Traversable) + +data BranchAccum v + = AccumEmpty + | AccumIntegral + Reference + (Maybe (ANormal v)) + (EnumMap Word64 (ANormal v)) + | AccumText + (Maybe (ANormal v)) + (Map.Map Text (ANormal v)) + | AccumDefault (ANormal v) + | AccumPure (ANormal v) + | AccumRequest + (EnumMap RTag (EnumMap CTag ([Mem],ANormal v))) + (Maybe (ANormal v)) + | AccumData + Reference + (Maybe (ANormal v)) + (EnumMap CTag ([Mem],ANormal v)) + | AccumSeqEmpty (ANormal v) + | AccumSeqView + SeqEnd + (Maybe (ANormal v)) -- empty + (ANormal v) -- cons/snoc + | AccumSeqSplit + SeqEnd + Int -- split at + (Maybe (ANormal v)) -- default + (ANormal v) -- split + +instance Semigroup (BranchAccum v) where + AccumEmpty <> r = r + l <> AccumEmpty = l + AccumIntegral rl dl cl <> AccumIntegral rr dr cr + | rl == rr = AccumIntegral rl (dl <|> dr) $ cl <> cr + AccumText dl cl <> AccumText dr cr + = AccumText (dl <|> dr) (cl <> cr) + AccumData rl dl cl <> AccumData rr dr cr + | rl == rr = AccumData rl (dl <|> dr) (cl <> cr) + AccumDefault dl <> AccumIntegral r _ cr + = AccumIntegral r (Just dl) cr + AccumDefault dl <> AccumText _ cr + = AccumText (Just dl) cr + AccumDefault dl <> AccumData rr _ cr + = AccumData rr (Just dl) cr + AccumIntegral r dl cl <> AccumDefault dr + = AccumIntegral r (dl <|> Just dr) cl + AccumText dl cl <> AccumDefault dr + = AccumText (dl <|> Just dr) cl + AccumData rl dl cl <> AccumDefault dr + = AccumData rl (dl <|> Just dr) cl + l@(AccumPure _) <> AccumPure _ = l + AccumPure dl <> AccumRequest hr _ = AccumRequest hr (Just dl) + AccumRequest hl dl <> AccumPure dr + = AccumRequest hl (dl <|> Just dr) + AccumRequest hl dl <> AccumRequest hr dr + = AccumRequest hm $ dl <|> dr + where + hm = EC.unionWith (<>) hl hr + l@(AccumSeqEmpty _) <> AccumSeqEmpty _ = l + AccumSeqEmpty eml <> AccumSeqView er _ cnr + = AccumSeqView er (Just eml) cnr + AccumSeqView el eml cnl <> AccumSeqEmpty emr + = AccumSeqView el (eml <|> Just emr) cnl + AccumSeqView el eml cnl <> AccumSeqView er emr _ + | el /= er + = error "AccumSeqView: trying to merge views of opposite ends" + | otherwise = AccumSeqView el (eml <|> emr) cnl + AccumSeqView _ _ _ <> AccumDefault _ + = error "seq views may not have defaults" + AccumDefault _ <> AccumSeqView _ _ _ + = error "seq views may not have defaults" + AccumSeqSplit el nl dl bl <> AccumSeqSplit er nr dr _ + | el /= er + = error "AccumSeqSplit: trying to merge splits at opposite ends" + | nl /= nr + = error + "AccumSeqSplit: trying to merge splits at different positions" + | otherwise + = AccumSeqSplit el nl (dl <|> dr) bl + AccumDefault dl <> AccumSeqSplit er nr _ br + = AccumSeqSplit er nr (Just dl) br + AccumSeqSplit el nl dl bl <> AccumDefault dr + = AccumSeqSplit el nl (dl <|> Just dr) bl + _ <> _ = error $ "cannot merge data cases for different types" + +instance Monoid (BranchAccum e) where + mempty = AccumEmpty + +data Func v + -- variable + = FVar v + -- top-level combinator + | FComb !Word64 + -- continuation jump + | FCont v + -- data constructor + | FCon !RTag !CTag + -- ability request + | FReq !RTag !CTag + -- prim op + | FPrim (Either POp IOp) + deriving (Show, Functor, Foldable, Traversable) + +data Lit + = I Int64 + | N Word64 + | F Double + | T Text + | C Char + | LM Referent + | LY Reference + deriving (Show) + +litRef :: Lit -> Reference +litRef (I _) = Ty.intRef +litRef (N _) = Ty.natRef +litRef (F _) = Ty.floatRef +litRef (T _) = Ty.textRef +litRef (C _) = Ty.charRef +litRef (LM _) = Ty.termLinkRef +litRef (LY _) = Ty.typeLinkRef + +data POp + -- Int + = ADDI | SUBI | MULI | DIVI -- +,-,*,/ + | SGNI | NEGI | MODI -- sgn,neg,mod + | POWI | SHLI | SHRI -- pow,shiftl,shiftr + | INCI | DECI | LEQI | EQLI -- inc,dec,<=,== + -- Nat + | ADDN | SUBN | MULN | DIVN -- +,-,*,/ + | MODN | TZRO | LZRO -- mod,trailing/leadingZeros + | POWN | SHLN | SHRN -- pow,shiftl,shiftr + | ANDN | IORN | XORN | COMN -- and,or,xor,complement + | INCN | DECN | LEQN | EQLN -- inc,dec,<=,== + -- Float + | ADDF | SUBF | MULF | DIVF -- +,-,*,/ + | MINF | MAXF | LEQF | EQLF -- min,max,<=,== + | POWF | EXPF | SQRT | LOGF -- pow,exp,sqrt,log + | LOGB -- logBase + | ABSF | CEIL | FLOR | TRNF -- abs,ceil,floor,truncate + | RNDF -- round + -- Trig + | COSF | ACOS | COSH | ACSH -- cos,acos,cosh,acosh + | SINF | ASIN | SINH | ASNH -- sin,asin,sinh,asinh + | TANF | ATAN | TANH | ATNH -- tan,atan,tanh,atanh + | ATN2 -- atan2 + -- Text + | CATT | TAKT | DRPT | SIZT -- ++,take,drop,size + | UCNS | USNC | EQLT | LEQT -- uncons,unsnoc,==,<= + | PAKT | UPKT -- pack,unpack + -- Sequence + | CATS | TAKS | DRPS | SIZS -- ++,take,drop,size + | CONS | SNOC | IDXS | BLDS -- cons,snoc,at,build + | VWLS | VWRS | SPLL | SPLR -- viewl,viewr,splitl,splitr + -- Bytes + | PAKB | UPKB | TAKB | DRPB -- pack,unpack,take,drop + | IDXB | SIZB | FLTB | CATB -- index,size,flatten,append + -- Conversion + | ITOF | NTOF | ITOT | NTOT + | TTOI | TTON | TTOF | FTOT + -- Concurrency + | FORK + -- Universal operations + | EQLU | CMPU | EROR + -- Debug + | PRNT | INFO + deriving (Show,Eq,Ord) + +data IOp + = OPENFI | CLOSFI | ISFEOF | ISFOPN + | ISSEEK | SEEKFI | POSITN | STDHND + | GBUFFR | SBUFFR + | GTLINE | GTTEXT | PUTEXT + | SYTIME | GTMPDR | GCURDR | SCURDR + | DCNTNS | FEXIST | ISFDIR + | CRTDIR | REMDIR | RENDIR + | REMOFI | RENAFI | GFTIME | GFSIZE + | SRVSCK | LISTEN | CLISCK | CLOSCK + | SKACPT | SKSEND | SKRECV + | THKILL | THDELY + | MVNEWF | MVNEWE | MVTAKE | MVTAKT -- new,new empty,take,trytake + | MVPUTB | MVPUTT | MVSWAP | MVEMPT -- put,tryput,swap,isempty + | MVREAD | MVREAT -- read,tryread + deriving (Show,Eq,Ord,Enum,Bounded) + +type ANormal = ABTN.Term ANormalBF +type ANormalT v = ANormalTF v (ANormal v) + +type Cte v = CTE v (ANormalT v) +type Ctx v = [Cte v] + +-- Should be a completely closed term +data SuperNormal v + = Lambda { conventions :: [Mem], bound :: ANormal v } + deriving (Show) +data SuperGroup v + = Rec + { group :: [(v, SuperNormal v)] + , entry :: SuperNormal v + } deriving (Show) + +type ANFM v + = ReaderT (Set v, Reference -> Word64, Reference -> RTag) + (State (Word64, [(v, SuperNormal v)])) + +resolveTerm :: Reference -> ANFM v Word64 +resolveTerm r = asks $ \(_, rtm, _) -> rtm r + +resolveType :: Reference -> ANFM v RTag +resolveType r = asks $ \(_, _, rty) -> rty r + +groupVars :: ANFM v (Set v) +groupVars = asks $ \(grp, _, _) -> grp + +bindLocal :: Ord v => [v] -> ANFM v r -> ANFM v r +bindLocal vs + = local $ \(gr, rw, rt) -> (gr Set.\\ Set.fromList vs, rw, rt) + +freshANF :: Var v => Word64 -> v +freshANF fr = Var.freshenId fr $ typed Var.ANFBlank + +fresh :: Var v => ANFM v v +fresh = state $ \(fr, cs) -> (freshANF fr, (fr+1, cs)) + +contextualize :: Var v => ANormalT v -> ANFM v (Ctx v, v) +contextualize (AVar cv) = do + gvs <- groupVars + if cv `Set.notMember` gvs + then pure ([], cv) + else do fresh <&> \bv -> ([ST1 bv BX $ AApv cv []], bv) +contextualize tm = fresh <&> \fv -> ([ST1 fv BX tm], fv) + +record :: Var v => (v, SuperNormal v) -> ANFM v () +record p = modify $ \(fr, to) -> (fr, p:to) + +superNormalize + :: Var v + => (Reference -> Word64) + -> (Reference -> RTag) + -> Term v a + -> SuperGroup v +superNormalize rtm rty tm = Rec l c + where + (bs, e) | LetRecNamed' bs e <- tm = (bs, e) + | otherwise = ([], tm) + grp = Set.fromList $ fst <$> bs + comp = traverse_ superBinding bs *> toSuperNormal e + subc = runReaderT comp (grp, rtm, rty) + (c, (_,l)) = runState subc (0, []) + +superBinding :: Var v => (v, Term v a) -> ANFM v () +superBinding (v, tm) = do + nf <- toSuperNormal tm + modify $ \(cvs, ctx) -> (cvs, (v,nf):ctx) + +toSuperNormal :: Var v => Term v a -> ANFM v (SuperNormal v) +toSuperNormal tm = do + grp <- groupVars + if not . Set.null . (Set.\\ grp) $ freeVars tm + then error $ "free variables in supercombinator: " ++ show tm + else Lambda (BX<$vs) . ABTN.TAbss vs <$> bindLocal vs (anfTerm body) + where + (vs, body) = fromMaybe ([], tm) $ unLams' tm + +anfTerm :: Var v => Term v a -> ANFM v (ANormal v) +anfTerm tm = uncurry TBinds' <$> anfBlock tm + +floatableCtx :: Ctx v -> Bool +floatableCtx = all p + where + p (LZ _ _ _) = True + p (ST _ _ tm) = q tm + q (ALit _) = True + q (AVar _) = True + q (ACon _ _ _) = True + q _ = False + +anfHandled :: Var v => Term v a -> ANFM v (Ctx v, ANormalT v) +anfHandled body = anfBlock body >>= \case + (ctx, t@ACon{}) -> fresh <&> \v -> (ctx ++ [ST1 v BX t], AVar v) + (ctx, t@(ALit l)) -> fresh <&> \v -> (ctx ++ [ST1 v cc t], AVar v) + where + cc = case l of T{} -> BX ; LM{} -> BX ; LY{} -> BX ; _ -> UN + p -> pure p + +anfBlock :: Var v => Term v a -> ANFM v (Ctx v, ANormalT v) +anfBlock (Var' v) = pure ([], AVar v) +anfBlock (If' c t f) = do + (cctx, cc) <- anfBlock c + cf <- anfTerm f + ct <- anfTerm t + (cx, v) <- contextualize cc + let cases = MatchData + (Builtin $ Text.pack "Boolean") + (EC.mapSingleton 0 ([], cf)) + (Just ct) + pure (cctx ++ cx, AMatch v cases) +anfBlock (And' l r) = do + (lctx, vl) <- anfArg l + (rctx, vr) <- anfArg r + i <- resolveTerm $ Builtin "Boolean.and" + pure (lctx ++ rctx, ACom i [vl, vr]) +anfBlock (Or' l r) = do + (lctx, vl) <- anfArg l + (rctx, vr) <- anfArg r + i <- resolveTerm $ Builtin "Boolean.or" + pure (lctx ++ rctx, ACom i [vl, vr]) +anfBlock (Handle' h body) + = anfArg h >>= \(hctx, vh) -> + anfHandled body >>= \case + (ctx, ACom f as) | floatableCtx ctx -> do + v <- fresh + pure (hctx ++ ctx ++ [LZ v (Left f) as], AApp (FVar vh) [v]) + (ctx, AApv f as) | floatableCtx ctx -> do + v <- fresh + pure (hctx ++ ctx ++ [LZ v (Right f) as], AApp (FVar vh) [v]) + (ctx, AVar v) | floatableCtx ctx -> do + pure (hctx ++ ctx, AApp (FVar vh) [v]) + p@(_, _) -> + error $ "handle body should be a simple call: " ++ show p +anfBlock (Match' scrut cas) = do + (sctx, sc) <- anfBlock scrut + (cx, v) <- contextualize sc + brn <- anfCases v cas + case brn of + AccumDefault (TBinds' dctx df) -> do + pure (sctx ++ cx ++ dctx, df) + AccumRequest _ Nothing -> + error "anfBlock: AccumRequest without default" + AccumPure (ABTN.TAbss us bd) + | [u] <- us + , TBinds' bx bd <- bd + -> case cx of + [] -> pure (sctx ++ [ST1 u BX (AFrc v)] ++ bx, bd) + [ST1 _ BX tm] -> pure (sctx ++ [ST1 u BX tm] ++ bx, bd) + _ -> error "anfBlock|AccumPure: impossible" + | otherwise -> error "pure handler with too many variables" + AccumRequest abr (Just df) -> do + (r, vs) <- do + r <- fresh + v <- fresh + gvs <- groupVars + let hfb = ABTN.TAbs v . TMatch v $ MatchRequest abr df + hfvs = Set.toList $ ABTN.freeVars hfb `Set.difference` gvs + record (r, Lambda (BX <$ hfvs ++ [v]) . ABTN.TAbss hfvs $ hfb) + pure (r, hfvs) + hv <- fresh + let msc | [ST1 _ BX tm] <- cx = tm + | [ST _ _ _] <- cx = error "anfBlock: impossible" + | otherwise = AFrc v + pure ( sctx ++ [LZ hv (Right r) vs] + , AHnd (EC.keys abr) hv . TTm $ msc + ) + AccumText df cs -> + pure (sctx ++ cx, AMatch v $ MatchText cs df) + AccumIntegral r df cs -> do + i <- fresh + let dcs = MatchData r + (EC.mapSingleton 0 ([UN], ABTN.TAbss [i] ics)) + Nothing + ics = TMatch i $ MatchIntegral cs df + pure (sctx ++ cx, AMatch v dcs) + AccumData r df cs -> + pure (sctx ++ cx, AMatch v $ MatchData r cs df) + AccumSeqEmpty _ -> + error "anfBlock: non-exhaustive AccumSeqEmpty" + AccumSeqView en (Just em) bd -> do + r <- fresh + op <- case en of + SLeft -> resolveTerm $ Builtin "List.viewl" + _ -> resolveTerm $ Builtin "List.viewr" + pure ( sctx ++ cx ++ [ST1 r BX (ACom op [v])] + , AMatch r + $ MatchData Ty.seqViewRef + (EC.mapFromList + [ (0, ([], em)) + , (1, ([BX,BX], bd)) + ] + ) + Nothing + ) + AccumSeqView {} -> + error "anfBlock: non-exhaustive AccumSeqView" + AccumSeqSplit en n mdf bd -> do + i <- fresh + r <- fresh + t <- fresh + pure ( sctx ++ cx ++ [lit i, split i r] + , AMatch r . MatchSum $ mapFromList + [ (0, ([], df t)) + , (1, ([BX,BX], bd)) + ]) + where + op | SLeft <- en = SPLL + | otherwise = SPLR + lit i = ST1 i UN (ALit . N $ fromIntegral n) + split i r = ST1 r UN (APrm op [i,v]) + df t + = fromMaybe + ( TLet t BX (ALit (T "non-exhaustive split")) + $ TPrm EROR [t]) + mdf + AccumEmpty -> pure (sctx ++ cx, AMatch v MatchEmpty) +anfBlock (Let1Named' v b e) + = anfBlock b >>= \(bctx, cb) -> bindLocal [v] $ do + (ectx, ce) <- anfBlock e + pure (bctx ++ ST1 v BX cb : ectx, ce) +anfBlock (Apps' f args) = do + (fctx, cf) <- anfFunc f + (actx, cas) <- anfArgs args + pure (fctx ++ actx, AApp cf cas) +anfBlock (Constructor' r t) + = resolveType r <&> \rt -> ([], ACon rt (toEnum t) []) +anfBlock (Request' r t) = do + r <- resolveType r + pure ([], AReq r (toEnum t) []) +anfBlock (Boolean' b) = + resolveType Ty.booleanRef <&> \rt -> + ([], ACon rt (if b then 1 else 0) []) +anfBlock (Lit' l@(T _)) = + pure ([], ALit l) +anfBlock (Lit' l) = do + lv <- fresh + rt <- resolveType $ litRef l + pure ([ST1 lv UN $ ALit l], ACon rt 0 [lv]) +anfBlock (Ref' r) = + resolveTerm r <&> \n -> ([], ACom n []) +anfBlock (Blank' _) = do + ev <- fresh + pure ([ST1 ev BX (ALit (T "Blank"))], APrm EROR [ev]) +anfBlock (TermLink' r) = pure ([], ALit (LM r)) +anfBlock (TypeLink' r) = pure ([], ALit (LY r)) +anfBlock (Sequence' as) = fmap (APrm BLDS) <$> anfArgs tms + where + tms = toList as +anfBlock t = error $ "anf: unhandled term: " ++ show t + +-- Note: this assumes that patterns have already been translated +-- to a state in which every case matches a single layer of data, +-- with no guards, and no variables ignored. This is not checked +-- completely. +anfInitCase + :: Var v + => v + -> MatchCase p (Term v a) + -> ANFM v (BranchAccum v) +anfInitCase u (MatchCase p guard (ABT.AbsN' vs bd)) + | Just _ <- guard = error "anfInitCase: unexpected guard" + | P.Unbound _ <- p + , [] <- vs + = AccumDefault <$> anfBody bd + | P.Var _ <- p + , [v] <- vs + = AccumDefault . ABTN.rename v u <$> anfBody bd + | P.Var _ <- p + = error $ "vars: " ++ show (length vs) + | P.Int _ (fromIntegral -> i) <- p + = AccumIntegral Ty.intRef Nothing . EC.mapSingleton i <$> anfBody bd + | P.Nat _ i <- p + = AccumIntegral Ty.natRef Nothing . EC.mapSingleton i <$> anfBody bd + | P.Boolean _ b <- p + , t <- if b then 1 else 0 + = AccumData Ty.booleanRef Nothing + . EC.mapSingleton t . ([],) <$> anfBody bd + | P.Text _ t <- p + , [] <- vs + = AccumText Nothing . Map.singleton t <$> anfBody bd + | P.Constructor _ r t ps <- p = do + us <- expandBindings ps vs + AccumData r Nothing + . EC.mapSingleton (toEnum t) + . (BX<$us,) + . ABTN.TAbss us + <$> anfBody bd + | P.EffectPure _ q <- p = do + us <- expandBindings [q] vs + AccumPure . ABTN.TAbss us <$> anfBody bd + | P.EffectBind _ r t ps pk <- p = do + exp <- expandBindings (snoc ps pk) vs + let (us, uk) + = maybe (error "anfInitCase: unsnoc impossible") id + $ unsnoc exp + n <- resolveType r + jn <- resolveTerm $ Builtin "jumpCont" + kf <- fresh + flip AccumRequest Nothing + . EC.mapSingleton n + . EC.mapSingleton (toEnum t) + . (BX<$us,) + . ABTN.TAbss us + . TShift n kf + . TName uk (Left jn) [kf] + <$> anfBody bd + | P.SequenceLiteral _ [] <- p + = AccumSeqEmpty <$> anfBody bd + | P.SequenceOp _ l op r <- p + , Concat <- op + , P.SequenceLiteral p ll <- l = do + us <- expandBindings [P.Var p, r] vs + AccumSeqSplit SLeft (length ll) Nothing + . ABTN.TAbss us + <$> anfBody bd + | P.SequenceOp _ l op r <- p + , Concat <- op + , P.SequenceLiteral p rl <- r = do + us <- expandBindings [l, P.Var p] vs + AccumSeqSplit SLeft (length rl) Nothing + . ABTN.TAbss us + <$> anfBody bd + | P.SequenceOp _ l op r <- p = do + us <- expandBindings [l,r] vs + let dir = case op of Cons -> SLeft ; _ -> SRight + AccumSeqView dir Nothing . ABTN.TAbss us <$> anfBody bd + where + anfBody tm = bindLocal vs $ anfTerm tm +anfInitCase _ (MatchCase p _ _) + = error $ "anfInitCase: unexpected pattern: " ++ show p + +expandBindings' + :: Var v + => Word64 + -> [P.Pattern p] + -> [v] + -> Either String (Word64, [v]) +expandBindings' fr [] [] = Right (fr, []) +expandBindings' fr (P.Unbound _:ps) vs + = fmap (u :) <$> expandBindings' (fr+1) ps vs + where u = freshANF fr +expandBindings' fr (P.Var _:ps) (v:vs) + = fmap (v :) <$> expandBindings' fr ps vs +expandBindings' _ [] (_:_) + = Left "expandBindings': more bindings than expected" +expandBindings' _ (_:_) [] + = Left "expandBindings': more patterns than expected" +expandBindings' _ _ _ + = Left $ "expandBindings': unexpected pattern" + +expandBindings :: Var v => [P.Pattern p] -> [v] -> ANFM v [v] +expandBindings ps vs + = state $ \(fr,co) -> case expandBindings' fr ps vs of + Left err -> error $ err ++ " " ++ show (ps, vs) + Right (fr,l) -> (l, (fr,co)) + +anfCases + :: Var v + => v + -> [MatchCase p (Term v a)] + -> ANFM v (BranchAccum v) +anfCases u = fmap fold . traverse (anfInitCase u) + +anfFunc :: Var v => Term v a -> ANFM v (Ctx v, Func v) +anfFunc (Var' v) = pure ([], FVar v) +anfFunc (Ref' r) + = resolveTerm r <&> \n -> ([], FComb n) +anfFunc (Constructor' r t) + = resolveType r <&> \rt -> ([], FCon rt $ toEnum t) +anfFunc (Request' r t) + = resolveType r <&> \rt -> ([], FReq rt $ toEnum t) +anfFunc tm = do + (fctx, ctm) <- anfBlock tm + (cx, v) <- contextualize ctm + pure (fctx ++ cx, FVar v) + +anfArg :: Var v => Term v a -> ANFM v (Ctx v, v) +anfArg tm = do + (ctx, ctm) <- anfBlock tm + (cx, v) <- contextualize ctm + pure (ctx ++ cx, v) + +anfArgs :: Var v => [Term v a] -> ANFM v (Ctx v, [v]) +anfArgs tms = first concat . unzip <$> traverse anfArg tms + +sink :: Var v => v -> Mem -> ANormalT v -> ANormal v -> ANormal v +sink v mtm tm = dive $ freeVarsT tm + where + frsh l r = Var.freshIn (l <> r) $ Var.typed Var.ANFBlank + + directVars = bifoldMap Set.singleton (const mempty) + freeVarsT = bifoldMap Set.singleton ABTN.freeVars + + dive _ exp | v `Set.notMember` ABTN.freeVars exp = exp + dive avoid exp@(TName u f as bo) + | v `elem` as + = let w = frsh avoid (ABTN.freeVars exp) + in TLet w mtm tm $ ABTN.rename v w exp + | otherwise + = TName u f as (dive avoid' bo) + where avoid' = Set.insert u avoid + dive avoid exp@(TLets us ms bn bo) + | v `Set.member` directVars bn -- we need to stop here + = let w = frsh avoid (ABTN.freeVars exp) + in TLet w mtm tm $ ABTN.rename v w exp + | otherwise + = TLets us ms bn' $ dive avoid' bo + where + avoid' = Set.fromList us <> avoid + bn' | v `Set.notMember` freeVarsT bn = bn + | otherwise = dive avoid' <$> bn + dive avoid exp@(TTm tm) + | v `Set.member` directVars tm -- same as above + = let w = frsh avoid (ABTN.freeVars exp) + in TLet w mtm tm $ ABTN.rename v w exp + | otherwise = TTm $ dive avoid <$> tm + +indent :: Int -> ShowS +indent ind = showString (replicate (ind*2) ' ') + +prettyGroup :: Var v => SuperGroup v -> ShowS +prettyGroup (Rec grp ent) + = showString "let rec\n" + . foldr f id grp + . showString "entry" + . prettySuperNormal 1 ent + where + f (v,sn) r = indent 1 . pvar v + . prettySuperNormal 2 sn . showString "\n" . r + +pvar :: Var v => v -> ShowS +pvar v = showString . Text.unpack $ Var.name v + +prettyVars :: Var v => [v] -> ShowS +prettyVars + = foldr (\v r -> showString " " . pvar v . r) id + +prettyLVars :: Var v => [Mem] -> [v] -> ShowS +prettyLVars [] [] = showString " " +prettyLVars (c:cs) (v:vs) + = showString " " + . showParen True (pvar v . showString ":" . shows c) + . prettyLVars cs vs + +prettyLVars [] (_:_) = error "more variables than conventions" +prettyLVars (_:_) [] = error "more conventions than variables" + +prettyRBind :: Var v => [v] -> ShowS +prettyRBind [] = showString "()" +prettyRBind [v] = pvar v +prettyRBind (v:vs) + = showParen True + $ pvar v . foldr (\v r -> shows v . showString "," . r) id vs + +prettySuperNormal :: Var v => Int -> SuperNormal v -> ShowS +prettySuperNormal ind (Lambda ccs (ABTN.TAbss vs tm)) + = prettyLVars ccs vs + . showString "=" + . prettyANF False (ind+1) tm + +prettyANF :: Var v => Bool -> Int -> ANormal v -> ShowS +prettyANF m ind tm = case tm of + TLets vs _ bn bo + -> showString "\n" + . indent ind + . prettyRBind vs + . showString " =" + . prettyANFT False (ind+1) bn + . prettyANF True ind bo + TName v f vs bo + -> showString "\n" + . indent ind + . prettyRBind [v] + . showString " := " + . prettyLZF f + . prettyVars vs + . prettyANF True ind bo + TTm tm + -> prettyANFT m ind tm + _ -> shows tm + +prettySpace :: Bool -> Int -> ShowS +prettySpace False _ = showString " " +prettySpace True ind = showString "\n" . indent ind + +prettyANFT :: Var v => Bool -> Int -> ANormalT v -> ShowS +prettyANFT m ind tm = prettySpace m ind . case tm of + ALit l -> shows l + AFrc v -> showString "!" . pvar v + AVar v -> pvar v + AApp f vs -> prettyFunc f . prettyVars vs + AMatch v bs + -> showString "match " + . pvar v . showString " with" + . prettyBranches (ind+1) bs + AShift r (ABTN.TAbss vs bo) + -> showString "shift[" . shows r . showString "]" + . prettyVars vs . showString "." + . prettyANF False (ind+1) bo + AHnd rs v bo + -> showString "handle" . prettyTags rs + . prettyANF False (ind+1) bo + . showString " with " . pvar v + +prettyLZF :: Var v => Either Word64 v -> ShowS +prettyLZF (Left w) = showString "ENV(" . shows w . showString ") " +prettyLZF (Right v) = pvar v . showString " " + +prettyTags :: [RTag] -> ShowS +prettyTags [] = showString "{}" +prettyTags (r:rs) + = showString "{" . shows r + . foldr (\t r -> shows t . showString "," . r) id rs + . showString "}" + +prettyFunc :: Var v => Func v -> ShowS +prettyFunc (FVar v) = pvar v . showString " " +prettyFunc (FCont v) = pvar v . showString " " +prettyFunc (FComb w) = showString "ENV(" . shows w . showString ")" +prettyFunc (FCon r t) + = showString "CON(" + . shows r . showString "," . shows t + . showString ")" +prettyFunc (FReq r t) + = showString "REQ(" + . shows r . showString "," . shows t + . showString ")" +prettyFunc (FPrim op) = either shows shows op . showString " " + +prettyBranches :: Var v => Int -> Branched (ANormal v) -> ShowS +prettyBranches ind bs = case bs of + MatchEmpty -> showString "{}" + MatchIntegral bs df + -> maybe id (\e -> prettyCase ind (showString "_") e id) df + . foldr (uncurry $ prettyCase ind . shows) id (mapToList bs) + MatchText bs df + -> maybe id (\e -> prettyCase ind (showString "_") e id) df + . foldr (uncurry $ prettyCase ind . shows) id (Map.toList bs) + MatchData _ bs df + -> maybe id (\e -> prettyCase ind (showString "_") e id) df + . foldr (uncurry $ prettyCase ind . shows) id + (mapToList $ snd <$> bs) + MatchRequest bs df + -> foldr (\(r,m) s -> + foldr (\(c,e) -> prettyCase ind (prettyReq r c) e) + s (mapToList $ snd <$> m)) + (prettyCase ind (prettyReq 0 0) df id) (mapToList bs) + MatchSum bs + -> foldr (uncurry $ prettyCase ind . shows) id + (mapToList $ snd <$> bs) + -- _ -> error "prettyBranches: todo" + where + prettyReq r c + = showString "REQ(" + . shows r . showString "," . shows c + . showString ")" + +prettyCase :: Var v => Int -> ShowS -> ANormal v -> ShowS -> ShowS +prettyCase ind sc (ABTN.TAbss vs e) r + = showString "\n" . indent ind . sc . prettyVars vs + . showString " ->" . prettyANF False (ind+1) e . r diff --git a/parser-typechecker/src/Unison/Runtime/Builtin.hs b/parser-typechecker/src/Unison/Runtime/Builtin.hs new file mode 100644 index 0000000000..062937f579 --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Builtin.hs @@ -0,0 +1,1465 @@ +{-# language RankNTypes #-} +{-# language ViewPatterns #-} +{-# language PatternGuards #-} +{-# language TypeApplications #-} +{-# language OverloadedStrings #-} +{-# language ScopedTypeVariables #-} +{-# language FunctionalDependencies #-} + +module Unison.Runtime.Builtin + ( builtinLookup + , builtinTermNumbering + , builtinTypeNumbering + , builtinTermBackref + , builtinTypeBackref + , builtinForeigns + , numberedTermLookup + ) where + +import Control.Exception (IOException, try) +import Control.Monad (void) + +import Unison.ABT.Normalized hiding (TTm) +import Unison.Reference +import Unison.Runtime.ANF as ANF +import Unison.Var +import Unison.Symbol +import Unison.Runtime.Stack (Closure) +import Unison.Runtime.Foreign.Function +import Unison.Runtime.IOSource + +import qualified Unison.Type as Ty +import qualified Unison.Builtin.Decls as Ty + +import Unison.Util.EnumContainers as EC + +import Data.Word (Word64) +import Data.Text as Text (Text, pack, unpack) + +import Data.Set (Set, insert) + +import Data.Map (Map) +import qualified Data.Map as Map + +import qualified Unison.Util.Bytes as Bytes +import Network.Socket as SYS + ( accept + ) +import Network.Simple.TCP as SYS + ( HostPreference(..) + , bindSock + , connectSock + , listenSock + , closeSock + , send + , recv + ) +import System.IO as SYS + ( openFile + , hClose + , hGetBuffering + , hSetBuffering + , hIsEOF + , hIsOpen + , hIsSeekable + , hSeek + , hTell + , stdin, stdout, stderr + ) +import Data.Text.IO as SYS + ( hGetLine + , hPutStr + ) +import Control.Concurrent as SYS + ( threadDelay + , killThread + ) +import Control.Concurrent.MVar as SYS +import Data.Time.Clock.POSIX as SYS + ( getPOSIXTime + , utcTimeToPOSIXSeconds + ) +import System.Directory as SYS + ( getCurrentDirectory + , setCurrentDirectory + , getTemporaryDirectory + , getDirectoryContents + , doesPathExist + -- , doesDirectoryExist + , renameDirectory + , removeFile + , renameFile + , createDirectoryIfMissing + , removeDirectoryRecursive + , getModificationTime + , getFileSize + ) + + +freshes :: Var v => Int -> [v] +freshes = freshes' mempty + +freshes' :: Var v => Set v -> Int -> [v] +freshes' avoid0 = go avoid0 [] + where + go _ vs 0 = vs + go avoid vs n + = let v = freshIn avoid $ typed ANFBlank + in go (insert v avoid) (v:vs) (n-1) + +boolTag, intTag, natTag, floatTag, charTag :: RTag +boolTag = rtag Ty.booleanRef +intTag = rtag Ty.intRef +natTag = rtag Ty.natRef +floatTag = rtag Ty.floatRef +charTag = rtag Ty.charRef + +optionTag, eitherTag, pairTag, seqViewTag :: RTag +optionTag = rtag Ty.optionalRef +eitherTag = rtag eitherReference +pairTag = rtag Ty.pairRef +seqViewTag = rtag Ty.seqViewRef + +fls, tru :: Var v => ANormal v +fls = TCon boolTag 0 [] +tru = TCon boolTag 1 [] + +boolift :: Var v => v -> ANormalT v +boolift v + = AMatch v $ MatchIntegral (mapFromList [(0,fls), (1,tru)]) Nothing + +notlift :: Var v => v -> ANormalT v +notlift v + = AMatch v $ MatchIntegral (mapFromList [(1,fls), (0,tru)]) Nothing + +unbox :: Var v => v -> Reference -> v -> ANormal v -> ANormal v +unbox v0 r v b + = TMatch v0 + $ MatchData r (mapSingleton 0 $ ([UN], TAbs v b)) Nothing + +unenum :: Var v => Int -> v -> Reference -> v -> ANormal v -> ANormal v +unenum n v0 r v nx + = TMatch v0 $ MatchData r cases Nothing + where + mkCase i = (toEnum i, ([], TLet v UN (ALit . I $ fromIntegral i) nx)) + cases = mapFromList . fmap mkCase $ [0..n-1] + +unop0 :: Var v => Int -> ([v] -> ANormal v) -> SuperNormal v +unop0 n f + = Lambda [BX] + . TAbss [x0] + $ f xs + where + xs@(x0:_) = freshes (1+n) + +binop0 :: Var v => Int -> ([v] -> ANormal v) -> SuperNormal v +binop0 n f + = Lambda [BX,BX] + . TAbss [x0,y0] + $ f xs + where + xs@(x0:y0:_) = freshes (2+n) + +unop :: Var v => POp -> Reference -> SuperNormal v +unop pop rf = unop' pop rf rf + +unop' :: Var v => POp -> Reference -> Reference -> SuperNormal v +unop' pop rfi rfo + = unop0 2 $ \[x0,x,r] + -> unbox x0 rfi x + . TLet r UN (APrm pop [x]) + $ TCon (rtag rfo) 0 [r] + +binop :: Var v => POp -> Reference -> SuperNormal v +binop pop rf = binop' pop rf rf rf + +binop' + :: Var v + => POp + -> Reference -> Reference -> Reference + -> SuperNormal v +binop' pop rfx rfy rfr + = binop0 3 $ \[x0,y0,x,y,r] + -> unbox x0 rfx x + . unbox y0 rfy y + . TLet r UN (APrm pop [x,y]) + $ TCon (rtag rfr) 0 [r] + +cmpop :: Var v => POp -> Reference -> SuperNormal v +cmpop pop rf + = binop0 3 $ \[x0,y0,x,y,b] + -> unbox x0 rf x + . unbox y0 rf y + . TLet b UN (APrm pop [x,y]) + $ TTm $ boolift b + +cmpopb :: Var v => POp -> Reference -> SuperNormal v +cmpopb pop rf + = binop0 3 $ \[x0,y0,x,y,b] + -> unbox x0 rf x + . unbox y0 rf y + . TLet b UN (APrm pop [y,x]) + $ TTm $ boolift b + +cmpopn :: Var v => POp -> Reference -> SuperNormal v +cmpopn pop rf + = binop0 3 $ \[x0,y0,x,y,b] + -> unbox x0 rf x + . unbox y0 rf y + . TLet b UN (APrm pop [x,y]) + $ TTm $ notlift b + +cmpopbn :: Var v => POp -> Reference -> SuperNormal v +cmpopbn pop rf + = binop0 3 $ \[x0,y0,x,y,b] + -> unbox x0 rf x + . unbox y0 rf y + . TLet b UN (APrm pop [y,x]) + $ TTm $ notlift b + +addi,subi,muli,divi,modi,shli,shri,powi :: Var v => SuperNormal v +addi = binop ADDI Ty.intRef +subi = binop SUBI Ty.intRef +muli = binop MULI Ty.intRef +divi = binop DIVI Ty.intRef +modi = binop MODI Ty.intRef +shli = binop' SHLI Ty.intRef Ty.natRef Ty.intRef +shri = binop' SHRI Ty.intRef Ty.natRef Ty.intRef +powi = binop' POWI Ty.intRef Ty.natRef Ty.intRef + +addn,subn,muln,divn,modn,shln,shrn,pown :: Var v => SuperNormal v +addn = binop ADDN Ty.natRef +subn = binop' SUBN Ty.natRef Ty.natRef Ty.intRef +muln = binop MULN Ty.natRef +divn = binop DIVN Ty.natRef +modn = binop MODN Ty.natRef +shln = binop SHLN Ty.natRef +shrn = binop SHRN Ty.natRef +pown = binop POWN Ty.natRef + +eqi, eqn, lti, ltn, lei, len :: Var v => SuperNormal v +eqi = cmpop EQLI Ty.intRef +lti = cmpopbn LEQI Ty.intRef +lei = cmpop LEQI Ty.intRef +eqn = cmpop EQLN Ty.natRef +ltn = cmpopbn LEQN Ty.natRef +len = cmpop LEQN Ty.natRef + +gti, gtn, gei, gen :: Var v => SuperNormal v +gti = cmpopn LEQI Ty.intRef +gei = cmpopb LEQI Ty.intRef +gtn = cmpopn LEQN Ty.intRef +gen = cmpopb LEQN Ty.intRef + +neqi, neqn :: Var v => SuperNormal v +neqi = cmpopn EQLI Ty.intRef +neqn = cmpopn EQLN Ty.intRef + +inci, incn :: Var v => SuperNormal v +inci = unop INCI Ty.intRef +incn = unop INCN Ty.natRef + +sgni, negi :: Var v => SuperNormal v +sgni = unop SGNI Ty.intRef +negi = unop NEGI Ty.intRef + +lzeron, tzeron, lzeroi, tzeroi :: Var v => SuperNormal v +lzeron = unop LZRO Ty.natRef +tzeron = unop TZRO Ty.natRef +lzeroi = unop' LZRO Ty.intRef Ty.natRef +tzeroi = unop' TZRO Ty.intRef Ty.natRef + +andn, orn, xorn, compln :: Var v => SuperNormal v +andn = binop ANDN Ty.natRef +orn = binop IORN Ty.natRef +xorn = binop XORN Ty.natRef +compln = unop COMN Ty.natRef + +addf, subf, mulf, divf, powf, sqrtf, logf, logbf + :: Var v => SuperNormal v +addf = binop ADDF Ty.floatRef +subf = binop SUBF Ty.floatRef +mulf = binop MULF Ty.floatRef +divf = binop DIVF Ty.floatRef +powf = binop POWF Ty.floatRef +sqrtf = unop SQRT Ty.floatRef +logf = unop LOGF Ty.floatRef +logbf = binop LOGB Ty.floatRef + +expf, absf :: Var v => SuperNormal v +expf = unop EXPF Ty.floatRef +absf = unop ABSF Ty.floatRef + +cosf, sinf, tanf, acosf, asinf, atanf :: Var v => SuperNormal v +cosf = unop COSF Ty.floatRef +sinf = unop SINF Ty.floatRef +tanf = unop TANF Ty.floatRef +acosf = unop ACOS Ty.floatRef +asinf = unop ASIN Ty.floatRef +atanf = unop ATAN Ty.floatRef + +coshf, sinhf, tanhf, acoshf, asinhf, atanhf, atan2f + :: Var v => SuperNormal v +coshf = unop COSH Ty.floatRef +sinhf = unop SINH Ty.floatRef +tanhf = unop TANH Ty.floatRef +acoshf = unop ACSH Ty.floatRef +asinhf = unop ASNH Ty.floatRef +atanhf = unop ATNH Ty.floatRef +atan2f = binop ATN2 Ty.floatRef + +ltf, gtf, lef, gef, eqf, neqf :: Var v => SuperNormal v +ltf = cmpopbn LEQF Ty.floatRef +gtf = cmpopn LEQF Ty.floatRef +lef = cmpop LEQF Ty.floatRef +gef = cmpopb LEQF Ty.floatRef +eqf = cmpop EQLF Ty.floatRef +neqf = cmpopn EQLF Ty.floatRef + +minf, maxf :: Var v => SuperNormal v +minf = binop MINF Ty.floatRef +maxf = binop MAXF Ty.floatRef + +ceilf, floorf, truncf, roundf, i2f, n2f :: Var v => SuperNormal v +ceilf = unop' CEIL Ty.floatRef Ty.intRef +floorf = unop' FLOR Ty.floatRef Ty.intRef +truncf = unop' TRNF Ty.floatRef Ty.intRef +roundf = unop' RNDF Ty.floatRef Ty.intRef +i2f = unop' ITOF Ty.intRef Ty.floatRef +n2f = unop' NTOF Ty.natRef Ty.floatRef + +trni :: Var v => SuperNormal v +trni = unop0 3 $ \[x0,x,z,b] + -> unbox x0 Ty.intRef x + . TLet z UN (ALit $ I 0) + . TLet b UN (APrm LEQI [x, z]) + . TMatch b + $ MatchIntegral + (mapSingleton 1 $ TCon natTag 0 [z]) + (Just $ TCon natTag 0 [x]) + +modular :: Var v => POp -> (Bool -> ANormal v) -> SuperNormal v +modular pop ret + = unop0 3 $ \[x0,x,m,t] + -> unbox x0 Ty.intRef x + . TLet t UN (ALit $ I 2) + . TLet m UN (APrm pop [x,t]) + . TMatch m + $ MatchIntegral + (mapSingleton 1 $ ret True) + (Just $ ret False) + +evni, evnn, oddi, oddn :: Var v => SuperNormal v +evni = modular MODI (\b -> if b then fls else tru) +oddi = modular MODI (\b -> if b then tru else fls) +evnn = modular MODN (\b -> if b then fls else tru) +oddn = modular MODN (\b -> if b then tru else fls) + +dropn :: Var v => SuperNormal v +dropn = binop0 4 $ \[x0,y0,x,y,b,r] + -> unbox x0 Ty.natRef x + . unbox y0 Ty.natRef y + . TLet b UN (APrm LEQN [x,y]) + . TLet r UN + (AMatch b $ MatchIntegral + (mapSingleton 1 $ TLit $ N 0) + (Just $ TPrm SUBN [x,y])) + $ TCon (rtag Ty.natRef) 0 [r] + +appendt, taket, dropt, sizet, unconst, unsnoct :: Var v => SuperNormal v +appendt = binop0 0 $ \[x,y] -> TPrm CATT [x,y] +taket = binop0 1 $ \[x0,y,x] + -> unbox x0 Ty.natRef x + $ TPrm TAKT [x,y] +dropt = binop0 1 $ \[x0,y,x] + -> unbox x0 Ty.natRef x + $ TPrm DRPT [x,y] +sizet = unop0 1 $ \[x,r] + -> TLet r UN (APrm SIZT [x]) + $ TCon (rtag Ty.natRef) 0 [r] +unconst = unop0 5 $ \[x,t,c0,c,y,p] + -> TLet t UN (APrm UCNS [x]) + . TMatch t . MatchSum $ mapFromList + [ (0, ([], TCon optionTag 0 [])) + , (1, ([UN,BX], TAbss [c0,y] + . TLet c BX (ACon charTag 0 [c0]) + . TLet p BX (ACon pairTag 0 [c,y]) + $ TCon optionTag 1 [p])) + ] +unsnoct = unop0 5 $ \[x,t,c0,c,y,p] + -> TLet t UN (APrm USNC [x]) + . TMatch t . MatchSum $ mapFromList + [ (0, ([], TCon optionTag 0 [])) + , (1, ([BX,UN], TAbss [y,c0] + . TLet c BX (ACon charTag 0 [c0]) + . TLet p BX (ACon pairTag 0 [y,c]) + $ TCon optionTag 1 [p])) + ] + +appends, conss, snocs :: Var v => SuperNormal v +appends = binop0 0 $ \[x,y] -> TPrm CATS [x,y] +conss = binop0 0 $ \[x,y] -> TPrm CONS [x,y] +snocs = binop0 0 $ \[x,y] -> TPrm SNOC [x,y] + +takes, drops, sizes, ats, emptys :: Var v => SuperNormal v +takes = binop0 1 $ \[x0,y,x] + -> unbox x0 Ty.natRef x + $ TPrm TAKS [x,y] +drops = binop0 1 $ \[x0,y,x] + -> unbox x0 Ty.natRef x + $ TPrm DRPS [x,y] +sizes = unop0 1 $ \[x,r] + -> TLet r UN (APrm SIZS [x]) + $ TCon natTag 0 [r] +ats = binop0 3 $ \[x0,y,x,t,r] + -> unbox x0 Ty.natRef x + . TLet t UN (APrm IDXS [x,y]) + . TMatch t . MatchSum $ mapFromList + [ (0, ([], TCon optionTag 0 [])) + , (1, ([BX], TAbs r $ TCon optionTag 1 [r])) + ] +emptys = Lambda [] $ TPrm BLDS [] + +viewls, viewrs :: Var v => SuperNormal v +viewls = unop0 3 $ \[s,u,h,t] + -> TLet u UN (APrm VWLS [s]) + . TMatch u . MatchSum $ mapFromList + [ (0, ([], TCon seqViewTag 0 [])) + , (1, ([BX,BX], TAbss [h,t] $ TCon seqViewTag 1 [h,t])) + ] +viewrs = unop0 3 $ \[s,u,i,l] + -> TLet u UN (APrm VWRS [s]) + . TMatch u . MatchSum $ mapFromList + [ (0, ([], TCon seqViewTag 0 [])) + , (1, ([BX,BX], TAbss [i,l] $ TCon seqViewTag 1 [i,l])) + ] + +eqt, neqt, leqt, geqt, lesst, great :: Var v => SuperNormal v +eqt = binop0 1 $ \[x,y,b] + -> TLet b UN (APrm EQLT [x,y]) + . TTm $ boolift b +neqt = binop0 1 $ \[x,y,b] + -> TLet b UN (APrm EQLT [x,y]) + . TTm $ notlift b +leqt = binop0 1 $ \[x,y,b] + -> TLet b UN (APrm LEQT [x,y]) + . TTm $ boolift b +geqt = binop0 1 $ \[x,y,b] + -> TLet b UN (APrm LEQT [y,x]) + . TTm $ boolift b +lesst = binop0 1 $ \[x,y,b] + -> TLet b UN (APrm LEQT [y,x]) + . TTm $ notlift b +great = binop0 1 $ \[x,y,b] + -> TLet b UN (APrm LEQT [x,y]) + . TTm $ notlift b + +packt, unpackt :: Var v => SuperNormal v +packt = unop0 0 $ \[s] -> TPrm PAKT [s] +unpackt = unop0 0 $ \[t] -> TPrm UPKT [t] + +packb, unpackb, emptyb, appendb :: Var v => SuperNormal v +packb = unop0 0 $ \[s] -> TPrm PAKB [s] +unpackb = unop0 0 $ \[b] -> TPrm UPKB [b] +emptyb + = Lambda [] + . TLet es BX (APrm BLDS []) + $ TPrm PAKB [es] + where + [es] = freshes 1 +appendb = binop0 0 $ \[x,y] -> TPrm CATB [x,y] + +takeb, dropb, atb, sizeb, flattenb :: Var v => SuperNormal v +takeb = binop0 1 $ \[n0,b,n] + -> unbox n0 Ty.natRef n + $ TPrm TAKB [n,b] + +dropb = binop0 1 $ \[n0,b,n] + -> unbox n0 Ty.natRef n + $ TPrm DRPB [n,b] + +atb = binop0 4 $ \[n0,b,n,t,r0,r] + -> unbox n0 Ty.natRef n + . TLet t UN (APrm IDXB [n,b]) + . TMatch t . MatchSum $ mapFromList + [ (0, ([], TCon optionTag 0 [])) + , (1, ([UN], TAbs r0 + . TLet r BX (ACon natTag 0 [r0]) + $ TCon optionTag 1 [r])) + ] + +sizeb = unop0 1 $ \[b,n] + -> TLet n UN (APrm SIZB [b]) + $ TCon natTag 0 [n] + +flattenb = unop0 0 $ \[b] -> TPrm FLTB [b] + +i2t, n2t, f2t :: Var v => SuperNormal v +i2t = unop0 1 $ \[n0,n] + -> unbox n0 Ty.intRef n + $ TPrm ITOT [n] +n2t = unop0 1 $ \[n0,n] + -> unbox n0 Ty.natRef n + $ TPrm NTOT [n] +f2t = unop0 1 $ \[f0,f] + -> unbox f0 Ty.floatRef f + $ TPrm FTOT [f] + +t2i, t2n, t2f :: Var v => SuperNormal v +t2i = unop0 3 $ \[x,t,n0,n] + -> TLet t UN (APrm TTOI [x]) + . TMatch t . MatchSum $ mapFromList + [ (0, ([], TCon optionTag 0 [])) + , (1, ([UN], TAbs n0 + . TLet n BX (ACon intTag 0 [n0]) + $ TCon optionTag 1 [n])) + ] +t2n = unop0 3 $ \[x,t,n0,n] + -> TLet t UN (APrm TTON [x]) + . TMatch t . MatchSum $ mapFromList + [ (0, ([], TCon optionTag 0 [])) + , (1, ([UN], TAbs n0 + . TLet n BX (ACon natTag 0 [n0]) + $ TCon optionTag 1 [n])) + ] +t2f = unop0 3 $ \[x,t,f0,f] + -> TLet t UN (APrm TTOF [x]) + . TMatch t . MatchSum $ mapFromList + [ (0, ([], TCon optionTag 0 [])) + , (1, ([UN], TAbs f0 + . TLet f BX (ACon floatTag 0 [f0]) + $ TCon optionTag 1 [f])) + ] + +equ :: Var v => SuperNormal v +equ = binop0 1 $ \[x,y,b] + -> TLet b UN (APrm EQLU [x,y]) + . TTm $ boolift b + +cmpu :: Var v => SuperNormal v +cmpu = binop0 2 $ \[x,y,c,i] + -> TLet c UN (APrm CMPU [x,y]) + . TLet i UN (APrm DECI [c]) + $ TCon intTag 0 [i] + +ltu :: Var v => SuperNormal v +ltu = binop0 1 $ \[x,y,c] + -> TLet c UN (APrm CMPU [x,y]) + . TMatch c + $ MatchIntegral + (mapFromList [ (0, TCon boolTag 1 []) ]) + (Just $ TCon boolTag 0 []) + +gtu :: Var v => SuperNormal v +gtu = binop0 1 $ \[x,y,c] + -> TLet c UN (APrm CMPU [x,y]) + . TMatch c + $ MatchIntegral + (mapFromList [ (2, TCon boolTag 1 []) ]) + (Just $ TCon boolTag 0 []) + +geu :: Var v => SuperNormal v +geu = binop0 1 $ \[x,y,c] + -> TLet c UN (APrm CMPU [x,y]) + . TMatch c + $ MatchIntegral + (mapFromList [ (0, TCon boolTag 0 []) ]) + (Just $ TCon boolTag 1 []) + +leu :: Var v => SuperNormal v +leu = binop0 1 $ \[x,y,c] + -> TLet c UN (APrm CMPU [x,y]) + . TMatch c + $ MatchIntegral + (mapFromList [ (2, TCon boolTag 0 []) ]) + (Just $ TCon boolTag 1 []) + +notb :: Var v => SuperNormal v +notb = unop0 0 $ \[b] + -> TMatch b . flip (MatchData Ty.booleanRef) Nothing + $ mapFromList [ (0, ([], tru)), (1, ([], fls)) ] + +orb :: Var v => SuperNormal v +orb = binop0 0 $ \[p,q] + -> TMatch p . flip (MatchData Ty.booleanRef) Nothing + $ mapFromList [ (1, ([], tru)), (0, ([], TVar q)) ] + +andb :: Var v => SuperNormal v +andb = binop0 0 $ \[p,q] + -> TMatch p . flip (MatchData Ty.booleanRef) Nothing + $ mapFromList [ (0, ([], fls)), (1, ([], TVar q)) ] + +-- unsafeCoerce, used for numeric types where conversion is a +-- no-op on the representation. Ideally this will be inlined and +-- eliminated so that no instruction is necessary. +cast :: Var v => Reference -> Reference -> SuperNormal v +cast ri ro + = unop0 1 $ \[x0,x] + -> unbox x0 ri x + $ TCon (rtag ro) 0 [x] + +jumpk :: Var v => SuperNormal v +jumpk = binop0 0 $ \[k,a] -> TKon k [a] + +bug :: Var v => SuperNormal v +bug = unop0 0 $ \[x] -> TPrm EROR [x] + +watch :: Var v => SuperNormal v +watch + = binop0 0 $ \[t,v] + -> TLets [] [] (APrm PRNT [t]) + $ TVar v + +type IOOP = forall v. Var v => Set v -> ([Mem], ANormal v) + +maybe'result'direct + :: Var v + => IOp -> [v] + -> v -> v + -> ANormal v +maybe'result'direct ins args t r + = TLet t UN (AIOp ins args) + . TMatch t . MatchSum $ mapFromList + [ (0, ([], TCon optionTag 0 [])) + , (1, ([BX], TAbs r $ TCon optionTag 1 [r])) + ] + +io'error'result0 + :: Var v + => IOp -> [v] + -> v -> [Mem] -> [v] -> v + -> ANormal v -> ANormal v +io'error'result0 ins args ior ccs vs e nx + = TLet ior UN (AIOp ins args) + . TMatch ior . MatchSum + $ mapFromList + [ (0, ([BX], TAbs e $ TCon eitherTag 0 [e])) + , (1, (ccs, TAbss vs nx)) + ] + +io'error'result'let + :: Var v + => IOp -> [v] + -> v -> [Mem] -> [v] -> v -> v -> ANormalT v + -> ANormal v +io'error'result'let ins args ior ccs vs e r m + = io'error'result0 ins args ior ccs vs e + . TLet r BX m + $ TCon eitherTag 1 [r] + +io'error'result'direct + :: Var v + => IOp -> [v] + -> v -> v -> v + -> ANormal v +io'error'result'direct ins args ior e r + = io'error'result0 ins args ior [BX] [r] e + $ TCon eitherTag 1 [r] + +io'error'result'unit + :: Var v + => IOp -> [v] + -> v -> v -> v + -> ANormal v +io'error'result'unit ins args ior e r + = io'error'result'let ins args ior [] [] e r + $ ACon (rtag Ty.unitRef) 0 [] + +io'error'result'bool + :: Var v + => IOp -> [v] + -> v -> (v -> ANormalT v) -> v -> v -> v -> ANormal v +io'error'result'bool ins args ior encode b e r + = io'error'result'let ins args ior [UN] [b] e r + $ encode b + +open'file :: IOOP +open'file avoid + = ([BX,BX],) + . TAbss [fp,m0] + . unenum 4 m0 ioModeReference m + $ io'error'result'direct OPENFI [fp,m] ior e r + where + [m0,fp,m,ior,e,r] = freshes' avoid 6 + +close'file :: IOOP +close'file avoid + = ([BX],) + . TAbss [h] + $ io'error'result'unit CLOSFI [h] ior e r + where + [h,ior,e,r] = freshes' avoid 4 + +is'file'eof :: IOOP +is'file'eof avoid + = ([BX],) + . TAbss [h] + $ io'error'result'bool ISFEOF [h] ior boolift b e r + where + [h,b,ior,e,r] = freshes' avoid 5 + +is'file'open :: IOOP +is'file'open avoid + = ([BX],) + . TAbss [h] + $ io'error'result'bool ISFOPN [h] ior boolift b e r + where + [h,b,ior,e,r] = freshes' avoid 5 + +is'seekable :: IOOP +is'seekable avoid + = ([BX],) + . TAbss [h] + $ io'error'result'bool ISSEEK [h] ior boolift b e r + where + [h,b,ior,e,r] = freshes' avoid 5 + +standard'handle :: IOOP +standard'handle avoid + = ([BX],) + . TAbss [n0] + . unbox n0 Ty.natRef n + . TLet r UN (AIOp STDHND [n]) + . TMatch r . MatchSum + $ mapFromList + [ (0, ([], TCon optionTag 0 [])) + , (1, ([BX], TAbs h $ TCon optionTag 1 [h])) + ] + where + [n0,n,h,r] = freshes' avoid 4 + +seek'handle :: IOOP +seek'handle avoid + = ([BX,BX,BX],) + . TAbss [h,sm0,po0] + . unenum 3 sm0 seekModeReference sm + . unbox po0 Ty.natRef po + $ io'error'result'unit SEEKFI [h,sm,po] ior e r + where + [sm0,po0,h,sm,po,ior,e,r] = freshes' avoid 8 + +handle'position :: IOOP +handle'position avoid + = ([BX],) + . TAbss [h] + . io'error'result'let POSITN [h] ior [UN] [i] e r + $ (ACon (rtag Ty.intRef) 0 [i]) + where + [h,i,ior,e,r] = freshes' avoid 5 + +get'buffering :: IOOP +get'buffering avoid + = ([BX],) + . TAbss [h] + . io'error'result'let GBUFFR [h] ior [UN] [bu] e r + . AMatch bu . MatchSum + $ mapFromList + [ (0, ([], TCon (rtag Ty.optionalRef) 0 [])) + , (1, ([], line)) + , (2, ([], block'nothing)) + , (3, ([UN], TAbs n $ block'n)) + ] + where + [h,bu,ior,e,r,m,n,b] = freshes' avoid 8 + final = TCon (rtag Ty.optionalRef) 1 [b] + block = TLet b BX (ACon (rtag bufferModeReference) 1 [m]) $ final + + line + = TLet b BX (ACon (rtag bufferModeReference) 0 []) $ final + block'nothing + = TLet m BX (ACon (rtag Ty.optionalRef) 0 []) + $ block + block'n + = TLet m BX (ACon (rtag Ty.optionalRef) 1 [n]) + $ block + +set'buffering :: IOOP +set'buffering avoid + = ([BX,BX],) + . TAbss [h,bm0] + . TMatch bm0 . flip (MatchData Ty.optionalRef) Nothing + $ mapFromList + [ (0, ([], none'branch)) + , (1, ([BX], TAbs bm just'branch'0)) + ] + where + [t,ior,e,r,h,mbs,bs0,bs,bm0,bm] = freshes' avoid 10 + none'branch + = TLet t UN (ALit $ I 0) + $ io'error'result'unit SBUFFR [h,t] ior e r + just'branch'0 + = TMatch bm . flip (MatchData bufferModeReference) Nothing + $ mapFromList + [ (0, ([] + , TLet t UN (ALit $ I 1) + $ io'error'result'unit SBUFFR [h,t] ior e r + )) + , (1, ([BX], TAbs mbs just'branch'1)) + ] + just'branch'1 + = TMatch mbs + . flip (MatchData Ty.optionalRef) Nothing + $ mapFromList + [ (0, ([] + , TLet t UN (ALit $ I 2) + $ io'error'result'unit SBUFFR [h,t] ior e r)) + , (1, ([BX] + , TAbs bs0 + . unbox bs0 Ty.natRef bs + . TLet t UN (ALit $ I 3) + $ io'error'result'unit SBUFFR [h,t,bs] ior e r)) + ] + +get'line :: IOOP +get'line avoid + = ([BX],) + . TAbss [h] + $ io'error'result'direct GTLINE [h] ior e r + where + [h,ior,e,r] = freshes' avoid 4 + +get'text :: IOOP +get'text avoid + = ([BX],) + . TAbss [h] + $ io'error'result'direct GTTEXT [h] ior e r + where + [h,ior,e,r] = freshes' avoid 4 + +put'text :: IOOP +put'text avoid + = ([BX,BX],) + . TAbss [h,tx] + $ io'error'result'direct PUTEXT [h,tx] ior e r + where + [h,tx,ior,e,r] = freshes' avoid 5 + +system'time :: IOOP +system'time avoid + = ([],) + . io'error'result'let SYTIME [] ior [UN] [n] e r + $ ACon (rtag Ty.natRef) 0 [n] + where + [n,ior,e,r] = freshes' avoid 4 + +get'temp'directory :: IOOP +get'temp'directory avoid + = ([],) + . io'error'result'let GTMPDR [] ior [BX] [t] e r + $ ACon (rtag filePathReference) 0 [t] + where + [t,ior,e,r] = freshes' avoid 4 + +get'current'directory :: IOOP +get'current'directory avoid + = ([],) + . io'error'result'let GCURDR [] ior [BX] [t] e r + $ ACon (rtag filePathReference) 0 [r] + where + [t,e,r,ior] = freshes' avoid 4 + +set'current'directory :: IOOP +set'current'directory avoid + = ([BX],) + . TAbs fp + $ io'error'result'unit SCURDR [fp] ior e r + where + [fp,ior,e,r] = freshes' avoid 4 + +-- directory'contents +-- DCNTNS +-- directoryContents_ : io.FilePath -> Either io.Error [io.FilePath] + + +file'exists :: IOOP +file'exists avoid + = ([BX],) + . TAbs fp + $ io'error'result'bool FEXIST [fp] ior boolift b e r + where + [fp,b,ior,e,r] = freshes' avoid 5 + +is'directory :: IOOP +is'directory avoid + = ([BX],) + . TAbs fp + $ io'error'result'bool ISFDIR [fp] ior boolift b e r + where + [fp,b,ior,e,r] = freshes' avoid 5 + +create'directory :: IOOP +create'directory avoid + = ([BX],) + . TAbs fp + $ io'error'result'unit CRTDIR [fp] ior e r + where + [fp,ior,e,r] = freshes' avoid 4 + +remove'directory :: IOOP +remove'directory avoid + = ([BX],) + . TAbs fp + $ io'error'result'unit REMDIR [fp] ior e r + where + [fp,ior,e,r] = freshes' avoid 4 + +rename'directory :: IOOP +rename'directory avoid + = ([BX,BX],) + . TAbss [from,to] + $ io'error'result'unit RENDIR [from,to] ior e r + where + [from,to,ior,e,r] = freshes' avoid 5 + +remove'file :: IOOP +remove'file avoid + = ([BX],) + . TAbs fp + $ io'error'result'unit REMOFI [fp] ior e r + where + [fp,ior,e,r] = freshes' avoid 4 + +rename'file :: IOOP +rename'file avoid + = ([BX,BX],) + . TAbss [from,to] + $ io'error'result'unit RENAFI [from,to] ior e r + where + [from,to,ior,e,r] = freshes' avoid 5 + +get'file'timestamp :: IOOP +get'file'timestamp avoid + = ([BX],) + . TAbs fp + . io'error'result'let GFTIME [fp] ior [UN] [n] e r + $ ACon (rtag Ty.natRef) 0 [n] + where + [fp,n,ior,e,r] = freshes' avoid 5 + +get'file'size :: IOOP +get'file'size avoid + = ([BX],) + . TAbs fp + . io'error'result'let GFSIZE [fp] ior [UN] [n] e r + $ ACon (rtag Ty.natRef) 0 [n] + where + [fp,n,ior,e,r] = freshes' avoid 5 + +server'socket :: IOOP +server'socket avoid + = ([BX,BX],) + . TAbss [mhn,sn] + . TMatch mhn . flip (MatchData Ty.optionalRef) Nothing + $ mapFromList + [ (0, ([], none'branch)) + , (1, ([BX], TAbs hn just'branch)) + ] + where + [mhn,sn,hn,t,ior,e,r] = freshes' avoid 7 + none'branch + = TLet t UN (ALit $ I 0) + $ io'error'result'direct SRVSCK [t,sn] ior e r + just'branch + = TLet t UN (ALit $ I 1) + $ io'error'result'direct SRVSCK [t,hn,sn] ior e r + +listen :: IOOP +listen avoid + = ([BX],) + . TAbs sk + $ io'error'result'direct LISTEN [sk] ior e r + where + [sk,ior,e,r] = freshes' avoid 4 + +client'socket :: IOOP +client'socket avoid + = ([BX,BX],) + . TAbss [hn,sn] + $ io'error'result'direct CLISCK [hn,sn] ior e r + where + [hn,sn,r,ior,e] = freshes' avoid 5 + +close'socket :: IOOP +close'socket avoid + = ([BX,BX],) + . TAbs sk + $ io'error'result'unit CLOSCK [sk] ior e r + where + [sk,ior,e,r] = freshes' avoid 4 + +socket'accept :: IOOP +socket'accept avoid + = ([BX],) + . TAbs sk + $ io'error'result'direct SKACPT [sk] ior e r + where + [sk,r,e,ior] = freshes' avoid 4 + +socket'send :: IOOP +socket'send avoid + = ([BX,BX],) + . TAbss [sk,by] + $ io'error'result'unit SKSEND [sk,by] ior e r + where + [sk,by,ior,e,r] = freshes' avoid 5 + +socket'receive :: IOOP +socket'receive avoid + = ([BX,BX],) + . TAbss [sk,n0] + . unbox n0 Ty.natRef n + . io'error'result'let SKRECV [sk,n] ior [UN] [mt] e r + . AMatch mt . MatchSum + $ mapFromList + [ (0, ([], TCon (rtag Ty.optionalRef) 0 [])) + , (1, ([BX], TAbs b $ TCon (rtag Ty.optionalRef) 1 [b])) + ] + where + [n0,sk,n,ior,e,r,b,mt] = freshes' avoid 8 + +fork'comp :: IOOP +fork'comp avoid + = ([BX],) + . TAbs lz + $ TPrm FORK [lz] + where + [lz] = freshes' avoid 3 + +mvar'new :: IOOP +mvar'new avoid + = ([BX],) + . TAbs init + $ TIOp MVNEWF [init] + where + [init] = freshes' avoid 1 + +mvar'empty :: IOOP +mvar'empty _ + = ([],) + $ TIOp MVNEWE [] + +mvar'take :: IOOP +mvar'take avoid + = ([BX],) + . TAbs mv + $ io'error'result'direct MVTAKE [mv] ior e r + where + [mv,ior,e,r] = freshes' avoid 4 + +mvar'try'take :: IOOP +mvar'try'take avoid + = ([BX],) + . TAbss [mv,x] + $ maybe'result'direct MVPUTT [mv,x] t r + where + [mv,x,t,r] = freshes' avoid 4 + +mvar'put :: IOOP +mvar'put avoid + = ([BX,BX],) + . TAbss [mv,x] + $ io'error'result'unit MVPUTB [mv,x] ior e r + where + [mv,x,ior,e,r] = freshes' avoid 5 + +mvar'try'put :: IOOP +mvar'try'put avoid + = ([BX,BX],) + . TAbss [mv,x] + . TLet b UN (AIOp MVPUTT [mv,x]) + . TTm $ boolift b + where + [mv,x,b] = freshes' avoid 3 + +mvar'swap :: IOOP +mvar'swap avoid + = ([BX,BX],) + . TAbss [mv,x] + $ io'error'result'direct MVSWAP [mv,x] ior e r + where + [mv,x,ior,e,r] = freshes' avoid 5 + +mvar'is'empty :: IOOP +mvar'is'empty avoid + = ([BX],) + . TAbs mv + . TLet b UN (AIOp MVEMPT [mv]) + . TTm $ boolift b + where + [mv,b] = freshes' avoid 2 + +mvar'read :: IOOP +mvar'read avoid + = ([BX],) + . TAbs mv + $ io'error'result'direct MVREAD [mv] ior e r + where + [mv,ior,e,r] = freshes' avoid 4 + +mvar'try'read :: IOOP +mvar'try'read avoid + = ([BX],) + . TAbs mv + $ maybe'result'direct MVREAT [mv] t r + where + [mv,t,r] = freshes' avoid 3 + +builtinLookup :: Var v => Map.Map Reference (SuperNormal v) +builtinLookup + = Map.fromList + $ map (\(t, f) -> (Builtin t, f)) + [ ("Int.+", addi) + , ("Int.-", subi) + , ("Int.*", muli) + , ("Int./", divi) + , ("Int.mod", modi) + , ("Int.==", eqi) + , ("Int.!=", neqi) + , ("Int.<", lti) + , ("Int.<=", lei) + , ("Int.>", gti) + , ("Int.>=", gei) + , ("Int.increment", inci) + , ("Int.signum", sgni) + , ("Int.negate", negi) + , ("Int.truncate0", trni) + , ("Int.isEven", evni) + , ("Int.isOdd", oddi) + , ("Int.shiftLeft", shli) + , ("Int.shiftRight", shri) + , ("Int.trailingZeros", tzeroi) + , ("Int.leadingZeros", lzeroi) + , ("Int.pow", powi) + , ("Int.toText", i2t) + , ("Int.fromText", t2i) + , ("Int.toFloat", i2f) + + , ("Nat.+", addn) + , ("Nat.-", subn) + , ("Nat.sub", subn) + , ("Nat.*", muln) + , ("Nat./", divn) + , ("Nat.mod", modn) + , ("Nat.==", eqn) + , ("Int.!=", neqn) + , ("Nat.<", ltn) + , ("Nat.<=", len) + , ("Nat.>", gtn) + , ("Nat.>=", gen) + , ("Nat.increment", incn) + , ("Nat.isEven", evnn) + , ("Nat.isOdd", oddn) + , ("Nat.shiftLeft", shln) + , ("Nat.shiftRight", shrn) + , ("Nat.trailingZeros", tzeron) + , ("Nat.leadingZeros", lzeron) + , ("Nat.and", andn) + , ("Nat.or", orn) + , ("Nat.xor", xorn) + , ("Nat.complement", compln) + , ("Nat.pow", pown) + , ("Nat.drop", dropn) + , ("Nat.toInt", cast Ty.natRef Ty.intRef) + , ("Nat.toFloat", n2f) + , ("Nat.toText", n2t) + , ("Nat.fromText", t2n) + + , ("Float.+", addf) + , ("Float.-", subf) + , ("Float.*", mulf) + , ("Float./", divf) + , ("Float.pow", powf) + , ("Float.log", logf) + , ("Float.logBase", logbf) + , ("Float.sqrt", sqrtf) + + , ("Float.min", minf) + , ("Float.max", maxf) + + , ("Float.<", ltf) + , ("Float.>", gtf) + , ("Float.<=", lef) + , ("Float.>=", gef) + , ("Float.==", eqf) + , ("Float.!=", neqf) + + , ("Float.acos", acosf) + , ("Float.asin", asinf) + , ("Float.atan", atanf) + , ("Float.cos", cosf) + , ("Float.sin", sinf) + , ("Float.tan", tanf) + + , ("Float.acosh", acoshf) + , ("Float.asinh", asinhf) + , ("Float.atanh", atanhf) + , ("Float.cosh", coshf) + , ("Float.sinh", sinhf) + , ("Float.tanh", tanhf) + + , ("Float.exp", expf) + , ("Float.abs", absf) + + , ("Float.ceiling", ceilf) + , ("Float.floor", floorf) + , ("Float.round", roundf) + , ("Float.truncate", truncf) + , ("Float.atan2", atan2f) + + , ("Float.toText", f2t) + , ("Float.fromText", t2f) + + -- text + , ("Text.empty", Lambda [] $ TLit (T "")) + , ("Text.++", appendt) + , ("Text.take", taket) + , ("Text.drop", dropt) + , ("Text.size", sizet) + , ("Text.==", eqt) + , ("Text.!=", neqt) + , ("Text.<=", leqt) + , ("Text.>=", geqt) + , ("Text.<", lesst) + , ("Text.>", great) + , ("Text.uncons", unconst) + , ("Text.unsnoc", unsnoct) + , ("Text.toCharList", unpackt) + , ("Text.fromCharList", packt) + + , ("Boolean.not", notb) + , ("Boolean.or", orb) + , ("Boolean.and", andb) + + , ("bug", bug) + , ("todo", bug) + , ("Debug.watch", watch) + + , ("Char.toNat", cast Ty.charRef Ty.natRef) + , ("Char.fromNat", cast Ty.natRef Ty.charRef) + + , ("Bytes.empty", emptyb) + , ("Bytes.fromList", packb) + , ("Bytes.toList", unpackb) + , ("Bytes.++", appendb) + , ("Bytes.take", takeb) + , ("Bytes.drop", dropb) + , ("Bytes.at", atb) + , ("Bytes.size", sizeb) + , ("Bytes.flatten", flattenb) + + , ("List.take", takes) + , ("List.drop", drops) + , ("List.size", sizes) + , ("List.++", appends) + , ("List.at", ats) + , ("List.cons", conss) + , ("List.snoc", snocs) + , ("List.empty", emptys) + , ("List.viewl", viewls) + , ("List.viewr", viewrs) +-- +-- , B "Debug.watch" $ forall1 "a" (\a -> text --> a --> a) + , ("Universal.==", equ) + , ("Universal.compare", cmpu) + , ("Universal.>", gtu) + , ("Universal.<", ltu) + , ("Universal.>=", geu) + , ("Universal.<=", leu) + + , ("jumpCont", jumpk) + + , ("IO.openFile", ioComb open'file) + , ("IO.closeFile", ioComb close'file) + , ("IO.isFileEOF", ioComb is'file'eof) + , ("IO.isFileOpen", ioComb is'file'open) + , ("IO.isSeekable", ioComb is'seekable) + , ("IO.seekHandle", ioComb seek'handle) + , ("IO.handlePosition", ioComb handle'position) + , ("IO.getBuffering", ioComb get'buffering) + , ("IO.setBuffering", ioComb set'buffering) + , ("IO.getLine", ioComb get'line) + , ("IO.getText", ioComb get'text) + , ("IO.putText", ioComb put'text) + , ("IO.systemTime", ioComb system'time) + , ("IO.getTempDirectory", ioComb get'temp'directory) + , ("IO.getCurrentDirectory", ioComb get'current'directory) + , ("IO.setCurrentDirectory", ioComb set'current'directory) + , ("IO.fileExists", ioComb file'exists) + , ("IO.isDirectory", ioComb is'directory) + , ("IO.createDirectory", ioComb create'directory) + , ("IO.removeDirectory", ioComb remove'directory) + , ("IO.renameDirectory", ioComb rename'directory) + , ("IO.removeFile", ioComb remove'file) + , ("IO.renameFile", ioComb rename'file) + , ("IO.getFileTimestamp", ioComb get'file'timestamp) + , ("IO.getFileSize", ioComb get'file'size) + , ("IO.serverSocket", ioComb server'socket) + , ("IO.listen", ioComb listen) + , ("IO.clientSocket", ioComb client'socket) + , ("IO.closeSocket", ioComb close'socket) + , ("IO.socketAccept", ioComb socket'accept) + , ("IO.socketSend", ioComb socket'send) + , ("IO.socketReceive", ioComb socket'receive) + , ("IO.forkComp", ioComb fork'comp) + , ("IO.stdHandle", ioComb standard'handle) + + , ("MVar.new", ioComb mvar'new) + , ("MVar.empty", ioComb mvar'empty) + , ("MVar.take", ioComb mvar'take) + , ("MVar.tryTake", ioComb mvar'try'take) + , ("MVar.put", ioComb mvar'put) + , ("MVar.tryPut", ioComb mvar'try'put) + , ("MVar.swap", ioComb mvar'swap) + , ("MVar.isEmpty", ioComb mvar'is'empty) + , ("MVar.read", ioComb mvar'read) + , ("MVar.tryRead", ioComb mvar'try'read) + ] + +ioComb :: Var v => IOOP -> SuperNormal v +ioComb ioop = uncurry Lambda (ioop mempty) + +mkForeignIOE + :: (ForeignConvention a, ForeignConvention r) + => (a -> IO r) -> ForeignFunc +mkForeignIOE f = mkForeign $ \a -> tryIOE (f a) + where + tryIOE :: IO a -> IO (Either IOException a) + tryIOE = try + +dummyFF :: ForeignFunc +dummyFF = FF ee ee ee + where + ee = error "dummyFF" + +-- Implementations of ANF IO operations +iopToForeign :: ANF.IOp -> ForeignFunc +iopToForeign ANF.OPENFI = mkForeignIOE $ uncurry openFile +iopToForeign ANF.CLOSFI = mkForeignIOE hClose +iopToForeign ANF.ISFEOF = mkForeignIOE hIsEOF +iopToForeign ANF.ISFOPN = mkForeignIOE hIsOpen +iopToForeign ANF.ISSEEK = mkForeignIOE hIsSeekable +iopToForeign ANF.SEEKFI + = mkForeignIOE $ \(h,sm,n) -> hSeek h sm (fromIntegral (n :: Int)) +iopToForeign ANF.POSITN + -- TODO: truncating integer + = mkForeignIOE $ \h -> fromInteger @Word64 <$> hTell h +iopToForeign ANF.GBUFFR = mkForeignIOE hGetBuffering +iopToForeign ANF.SBUFFR = mkForeignIOE $ uncurry hSetBuffering +iopToForeign ANF.GTLINE = mkForeignIOE hGetLine +iopToForeign ANF.GTTEXT + = dummyFF -- mkForeignIOE $ \h -> pure . Right . Wrap <$> hGetText h +iopToForeign ANF.PUTEXT = mkForeignIOE $ uncurry hPutStr +iopToForeign ANF.SYTIME = mkForeignIOE $ \() -> getPOSIXTime +iopToForeign ANF.GTMPDR = mkForeignIOE $ \() -> getTemporaryDirectory +iopToForeign ANF.GCURDR = mkForeignIOE $ \() -> getCurrentDirectory +iopToForeign ANF.SCURDR = mkForeignIOE setCurrentDirectory +iopToForeign ANF.DCNTNS + = mkForeignIOE $ fmap (fmap Text.pack) . getDirectoryContents +iopToForeign ANF.FEXIST = mkForeignIOE doesPathExist +iopToForeign ANF.ISFDIR = dummyFF +iopToForeign ANF.CRTDIR + = mkForeignIOE $ createDirectoryIfMissing True +iopToForeign ANF.REMDIR = mkForeignIOE removeDirectoryRecursive +iopToForeign ANF.RENDIR = mkForeignIOE $ uncurry renameDirectory +iopToForeign ANF.REMOFI = mkForeignIOE removeFile +iopToForeign ANF.RENAFI = mkForeignIOE $ uncurry renameFile +iopToForeign ANF.GFTIME + = mkForeignIOE $ fmap utcTimeToPOSIXSeconds . getModificationTime +iopToForeign ANF.GFSIZE + -- TODO: truncating integer + = mkForeignIOE $ \fp -> fromInteger @Word64 <$> getFileSize fp +iopToForeign ANF.SRVSCK + = mkForeignIOE $ \(mhst,port) -> + () <$ SYS.bindSock (hostPreference mhst) port +iopToForeign ANF.LISTEN = mkForeignIOE $ \sk -> SYS.listenSock sk 2048 +iopToForeign ANF.CLISCK + = mkForeignIOE $ void . uncurry SYS.connectSock +iopToForeign ANF.CLOSCK = mkForeignIOE SYS.closeSock +iopToForeign ANF.SKACPT + = mkForeignIOE $ void . SYS.accept +iopToForeign ANF.SKSEND + = mkForeignIOE $ \(sk,bs) -> SYS.send sk (Bytes.toByteString bs) +iopToForeign ANF.SKRECV + = mkForeignIOE $ \(hs,n) -> + fmap Bytes.fromByteString <$> SYS.recv hs n +iopToForeign ANF.THKILL = mkForeignIOE killThread +iopToForeign ANF.THDELY = mkForeignIOE threadDelay +iopToForeign ANF.STDHND + = mkForeign $ \(n :: Int) -> case n of + 0 -> pure (Just SYS.stdin) + 1 -> pure (Just SYS.stdout) + 2 -> pure (Just SYS.stderr) + _ -> pure Nothing +iopToForeign ANF.MVNEWF + = mkForeign $ \(c :: Closure) -> newMVar c +iopToForeign ANF.MVNEWE = mkForeign $ \() -> newEmptyMVar @Closure +iopToForeign ANF.MVTAKE + = mkForeignIOE $ \(mv :: MVar Closure) -> takeMVar mv +iopToForeign ANF.MVTAKT + = mkForeign $ \(mv :: MVar Closure) -> tryTakeMVar mv +iopToForeign ANF.MVPUTB + = mkForeignIOE $ \(mv :: MVar Closure, x) -> putMVar mv x +iopToForeign ANF.MVPUTT + = mkForeign $ \(mv :: MVar Closure, x) -> tryPutMVar mv x +iopToForeign ANF.MVSWAP + = mkForeignIOE $ \(mv :: MVar Closure, x) -> swapMVar mv x +iopToForeign ANF.MVEMPT + = mkForeign $ \(mv :: MVar Closure) -> isEmptyMVar mv +iopToForeign ANF.MVREAD + = mkForeignIOE $ \(mv :: MVar Closure) -> readMVar mv +iopToForeign ANF.MVREAT + = mkForeign $ \(mv :: MVar Closure) -> tryReadMVar mv + +hostPreference :: Maybe Text -> SYS.HostPreference +hostPreference Nothing = SYS.HostAny +hostPreference (Just host) = SYS.Host $ Text.unpack host + +typeReferences :: [(Reference, RTag)] +typeReferences + = zip + [ Ty.natRef + , Ty.optionalRef + , Ty.unitRef + , Ty.pairRef + , Ty.booleanRef + , Ty.intRef + , Ty.floatRef + , Ty.booleanRef + , Ty.textRef + , Ty.charRef + , eitherReference + , filePathReference + , bufferModeReference + , Ty.effectRef + , Ty.vectorRef + , Ty.seqViewRef + ] [1..] + +numberedTermLookup :: Var v => EnumMap Word64 (SuperNormal v) +numberedTermLookup + = mapFromList . zip [1..] . Map.elems $ builtinLookup + +rtag :: Reference -> RTag +rtag r | Just x <- Map.lookup r builtinTypeNumbering = x + | otherwise = error $ "rtag: unknown reference: " ++ show r + +builtinTermNumbering :: Map Reference Word64 +builtinTermNumbering + = Map.fromList (zip (Map.keys $ builtinLookup @Symbol) [1..]) + +builtinTermBackref :: EnumMap Word64 Reference +builtinTermBackref + = mapFromList . zip [1..] . Map.keys $ builtinLookup @Symbol + +builtinTypeNumbering :: Map Reference RTag +builtinTypeNumbering = Map.fromList typeReferences + +builtinTypeBackref :: EnumMap RTag Reference +builtinTypeBackref = mapFromList $ swap <$> typeReferences + where swap (x, y) = (y, x) + +builtinForeigns :: EnumMap Word64 ForeignFunc +builtinForeigns + = mapFromList + . fmap (\iop -> (fromIntegral $ fromEnum iop, iopToForeign iop)) + $ [minBound..maxBound] diff --git a/parser-typechecker/src/Unison/Runtime/Debug.hs b/parser-typechecker/src/Unison/Runtime/Debug.hs new file mode 100644 index 0000000000..a049fdbcf6 --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Debug.hs @@ -0,0 +1,52 @@ + +module Unison.Runtime.Debug + ( traceComb + , traceCombs + , tracePretty + , tracePrettyGroup + ) where + +import Data.Word + +import qualified Unison.Term as Tm +import Unison.Var (Var) +import Unison.PrettyPrintEnv (PrettyPrintEnv) +import Unison.TermPrinter (pretty) +import Unison.Util.Pretty (toANSI) +import Unison.Util.EnumContainers + +import Unison.Runtime.ANF +import Unison.Runtime.MCode + +import Debug.Trace + +type Term v = Tm.Term v () + +traceComb :: Bool -> Word64 -> Comb -> Bool +traceComb False _ _ = True +traceComb True w c = trace (prettyComb w c "\n") True + +traceCombs + :: Bool + -> (Comb, EnumMap Word64 Comb, Word64) + -> (Comb, EnumMap Word64 Comb, Word64) +traceCombs False c = c +traceCombs True c = trace (prettyCombs c "") c + +tracePretty + :: Var v + => PrettyPrintEnv + -> Bool + -> Term v + -> Term v +tracePretty _ False tm = tm +tracePretty ppe True tm = trace (toANSI 50 $ pretty ppe tm) tm + +tracePrettyGroup + :: Var v + => Bool + -> SuperGroup v + -> SuperGroup v +tracePrettyGroup False g = g +tracePrettyGroup True g = trace (prettyGroup g "") g + diff --git a/parser-typechecker/src/Unison/Runtime/Decompile.hs b/parser-typechecker/src/Unison/Runtime/Decompile.hs new file mode 100644 index 0000000000..2780fc4bc0 --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Decompile.hs @@ -0,0 +1,120 @@ +{-# language PatternGuards #-} +{-# language TupleSections #-} +{-# language PatternSynonyms #-} + +module Unison.Runtime.Decompile + ( decompile ) where + +import Prelude hiding (seq) + +import Data.String (fromString) +import Data.Sequence (Seq) +import Data.Word (Word64) + +import Unison.ABT (absChain, substs, pattern AbsN') +import Unison.Term + ( Term + , nat, int, char, float, boolean, constructor, app, apps', text + , seq, seq', builtin + ) +import Unison.Type + ( natRef, intRef, charRef, floatRef, booleanRef, vectorRef + ) +import Unison.Var (Var) +import Unison.Reference (Reference) + +import Unison.Runtime.ANF (RTag, CTag, Tag(..)) +import Unison.Runtime.Foreign + (Foreign, maybeUnwrapBuiltin, maybeUnwrapForeign) +import Unison.Runtime.Stack + (Closure(..), pattern DataC, pattern PApV, IComb(..)) + +import Unison.Codebase.Runtime (Error) +import Unison.Util.Pretty (lit) + +import qualified Unison.Util.Bytes as By + +import Unsafe.Coerce -- for Int -> Double + +con :: Var v => Reference -> CTag -> Term v () +con rf ct = constructor () rf . fromIntegral $ rawTag ct + +err :: String -> Either Error a +err = Left . lit . fromString + +decompile + :: Var v + => (RTag -> Maybe Reference) + -> (Word64 -> Maybe (Term v ())) + -> Closure + -> Either Error (Term v ()) +decompile tyRef _ (DataC rt ct [] []) + | Just rf <- tyRef rt + , rf == booleanRef + = boolean () <$> tag2bool ct +decompile tyRef _ (DataC rt ct [i] []) + | Just rf <- tyRef rt + = decompileUnboxed rf ct i +decompile tyRef topTerms (DataC rt ct [] bs) + | Just rf <- tyRef rt + = apps' (con rf ct) <$> traverse (decompile tyRef topTerms) bs +decompile tyRef topTerms (PApV (IC rt _) [] bs) + | Just t <- topTerms rt + = substitute t <$> traverse (decompile tyRef topTerms) bs + | otherwise + = err "reference to unknown combinator" +decompile _ _ cl@(PAp _ _ _) + = err $ "cannot decompile a partial application to unboxed values: " + ++ show cl +decompile _ _ (DataC{}) + = err "cannot decompile data type with multiple unboxed fields" +decompile _ _ BlackHole = err "exception" +decompile _ _ (Captured{}) = err "decompiling a captured continuation" +decompile tyRef topTerms (Foreign f) = decompileForeign tyRef topTerms f + +tag2bool :: CTag -> Either Error Bool +tag2bool c = case rawTag c of + 0 -> Right False + 1 -> Right True + _ -> err "bad boolean tag" + +substitute :: Var v => Term v () -> [Term v ()] -> Term v () +substitute (AbsN' vs bd) ts = align [] vs ts + where + align vts (v:vs) (t:ts) = align ((v,t):vts) vs ts + align vts vs [] = substs vts (absChain vs bd) + -- this should not happen + align vts [] ts = apps' (substs vts bd) ts +-- TODO: these aliases are not actually very conveniently written +substitute _ _ = error "impossible" + +decompileUnboxed + :: Var v => Reference -> CTag -> Int -> Either Error (Term v ()) +decompileUnboxed r _ i + | r == natRef = pure . nat () $ fromIntegral i + | r == intRef = pure . int () $ fromIntegral i + | r == floatRef = pure . float () $ unsafeCoerce i + | r == charRef = pure . char () $ toEnum i +decompileUnboxed r _ _ + = err $ "cannot decompile unboxed data type with reference: " ++ show r + +decompileForeign + :: Var v + => (RTag -> Maybe Reference) + -> (Word64 -> Maybe (Term v ())) + -> Foreign + -> Either Error (Term v ()) +decompileForeign tyRef topTerms f + | Just t <- maybeUnwrapBuiltin f = Right $ text () t + | Just b <- maybeUnwrapBuiltin f = Right $ decompileBytes b + | Just s <- unwrapSeq f + = seq' () <$> traverse (decompile tyRef topTerms) s +decompileForeign _ _ _ = err "cannot decompile Foreign" + +decompileBytes :: Var v => By.Bytes -> Term v () +decompileBytes + = app () (builtin () $ fromString "Bytes.fromList") + . seq () . fmap (nat () . fromIntegral) . By.toWord8s + +unwrapSeq :: Foreign -> Maybe (Seq Closure) +unwrapSeq = maybeUnwrapForeign vectorRef diff --git a/parser-typechecker/src/Unison/Runtime/Exception.hs b/parser-typechecker/src/Unison/Runtime/Exception.hs new file mode 100644 index 0000000000..109ebbfced --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Exception.hs @@ -0,0 +1,18 @@ + +module Unison.Runtime.Exception where + +import Control.Exception +import Data.String (fromString) + +import Unison.Runtime.Stack +import Unison.Util.Pretty as P + +data RuntimeExn + = PE (P.Pretty P.ColorText) + | BU Closure + deriving (Show) +instance Exception RuntimeExn + +die :: String -> IO a +die = throwIO . PE . P.lit . fromString + diff --git a/parser-typechecker/src/Unison/Runtime/Foreign.hs b/parser-typechecker/src/Unison/Runtime/Foreign.hs new file mode 100644 index 0000000000..349735bafe --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Foreign.hs @@ -0,0 +1,93 @@ +{-# language GADTs #-} +{-# language BangPatterns #-} +{-# language PatternGuards #-} +{-# language ScopedTypeVariables #-} + +module Unison.Runtime.Foreign + ( Foreign(..) + , unwrapForeign + , maybeUnwrapForeign + , wrapBuiltin + , maybeUnwrapBuiltin + , unwrapBuiltin + , BuiltinForeign(..) + ) where + +import Control.Concurrent (ThreadId) +import Data.Text (Text) +import Data.Tagged (Tagged(..)) +import Network.Socket (Socket) +import System.IO (Handle) +import Unison.Util.Bytes (Bytes) +import Unison.Reference (Reference) +import Unison.Referent (Referent) +import qualified Unison.Type as Ty + +import Unsafe.Coerce + +data Foreign where + Wrap :: Reference -> e -> Foreign + +promote :: (a -> a -> r) -> b -> c -> r +promote (~~) x y = unsafeCoerce x ~~ unsafeCoerce y + +ref2eq :: Reference -> Maybe (a -> b -> Bool) +ref2eq r + | r == Ty.textRef = Just $ promote ((==) @Text) + | r == Ty.termLinkRef = Just $ promote ((==) @Referent) + | r == Ty.typeLinkRef = Just $ promote ((==) @Reference) + | otherwise = Nothing + +ref2cmp :: Reference -> Maybe (a -> b -> Ordering) +ref2cmp r + | r == Ty.textRef = Just $ promote (compare @Text) + | r == Ty.termLinkRef = Just $ promote (compare @Referent) + | r == Ty.typeLinkRef = Just $ promote (compare @Reference) + | otherwise = Nothing + +instance Eq Foreign where + Wrap rl t == Wrap rr u + | rl == rr , Just (~~) <- ref2eq rl = t ~~ u + _ == _ = error "Eq Foreign" + +instance Ord Foreign where + Wrap rl t `compare` Wrap rr u + | rl == rr, Just cmp <- ref2cmp rl = cmp t u + compare _ _ = error "Ord Foreign" + +instance Show Foreign where + showsPrec p !(Wrap r _) + = showParen (p>9) + $ showString "Wrap " . showsPrec 10 r . showString " _" + +unwrapForeign :: Foreign -> a +unwrapForeign (Wrap _ e) = unsafeCoerce e + +maybeUnwrapForeign :: Reference -> Foreign -> Maybe a +maybeUnwrapForeign rt (Wrap r e) + | rt == r = Just (unsafeCoerce e) + | otherwise = Nothing + +class BuiltinForeign f where + foreignRef :: Tagged f Reference + +instance BuiltinForeign Text where foreignRef = Tagged Ty.textRef +instance BuiltinForeign Bytes where foreignRef = Tagged Ty.bytesRef +instance BuiltinForeign Handle where foreignRef = Tagged Ty.fileHandleRef +instance BuiltinForeign Socket where foreignRef = Tagged Ty.socketRef +instance BuiltinForeign ThreadId where foreignRef = Tagged Ty.threadIdRef + +wrapBuiltin :: forall f. BuiltinForeign f => f -> Foreign +wrapBuiltin x = Wrap r x + where + Tagged r = foreignRef :: Tagged f Reference + +unwrapBuiltin :: BuiltinForeign f => Foreign -> f +unwrapBuiltin (Wrap _ x) = unsafeCoerce x + +maybeUnwrapBuiltin :: forall f. BuiltinForeign f => Foreign -> Maybe f +maybeUnwrapBuiltin (Wrap r x) + | r == r0 = Just (unsafeCoerce x) + | otherwise = Nothing + where + Tagged r0 = foreignRef :: Tagged f Reference diff --git a/parser-typechecker/src/Unison/Runtime/Foreign/Function.hs b/parser-typechecker/src/Unison/Runtime/Foreign/Function.hs new file mode 100644 index 0000000000..43e9fe15fa --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Foreign/Function.hs @@ -0,0 +1,311 @@ +{-# language GADTs #-} +{-# language DataKinds #-} +{-# language ViewPatterns #-} +{-# language RecordWildCards #-} + +module Unison.Runtime.Foreign.Function + ( ForeignFunc(..) + , ForeignConvention(..) + , mkForeign + ) + where + +import GHC.IO.Exception (IOException(..), IOErrorType(..)) + +import Control.Concurrent (ThreadId) +import Control.Concurrent.MVar (MVar) +import Data.Foldable (toList) +import Data.Text (Text, pack, unpack) +import Data.Time.Clock.POSIX (POSIXTime) +import qualified Data.Sequence as Sq +import Data.Word (Word64) +import Network.Socket (Socket) +import System.IO (BufferMode(..), SeekMode, Handle, IOMode) +import Unison.Util.Bytes (Bytes) + +import Unison.Type (mvarRef) + +import Unison.Runtime.ANF (Mem(..)) +import Unison.Runtime.MCode +import Unison.Runtime.Exception +import Unison.Runtime.Foreign +import Unison.Runtime.Stack + +-- Foreign functions operating on stacks +data ForeignFunc where + FF :: (Stack 'UN -> Stack 'BX -> Args -> IO a) + -> (Stack 'UN -> Stack 'BX -> r -> IO (Stack 'UN, Stack 'BX)) + -> (a -> IO r) + -> ForeignFunc + +instance Show ForeignFunc where + show _ = "ForeignFunc" +instance Eq ForeignFunc where + _ == _ = error "Eq ForeignFunc" +instance Ord ForeignFunc where + compare _ _ = error "Ord ForeignFunc" + +class ForeignConvention a where + readForeign + :: [Int] -> [Int] -> Stack 'UN -> Stack 'BX -> IO ([Int], [Int], a) + writeForeign + :: Stack 'UN -> Stack 'BX -> a -> IO (Stack 'UN, Stack 'BX) + +mkForeign + :: (ForeignConvention a, ForeignConvention r) + => (a -> IO r) + -> ForeignFunc +mkForeign ev = FF readArgs writeForeign ev + where + readArgs ustk bstk (argsToLists -> (us,bs)) + = readForeign us bs ustk bstk >>= \case + ([], [], a) -> pure a + _ -> die "mkForeign: too many arguments for foreign function" + +instance ForeignConvention Int where + readForeign (i:us) bs ustk _ = (us,bs,) <$> peekOff ustk i + readForeign [ ] _ _ _ = foreignCCError "Int" + writeForeign ustk bstk i = do + ustk <- bump ustk + (ustk, bstk) <$ poke ustk i + +instance ForeignConvention Word64 where + readForeign (i:us) bs ustk _ = (us,bs,) <$> peekOffN ustk i + readForeign [] _ _ _ = foreignCCError "Word64" + writeForeign ustk bstk n = do + ustk <- bump ustk + (ustk, bstk) <$ pokeN ustk n + +instance ForeignConvention Closure where + readForeign us (i:bs) _ bstk = (us,bs,) <$> peekOff bstk i + readForeign _ [ ] _ _ = foreignCCError "Closure" + writeForeign ustk bstk c = do + bstk <- bump bstk + (ustk, bstk) <$ poke bstk c + +instance ForeignConvention Text where + readForeign = readForeignBuiltin + writeForeign = writeForeignBuiltin + +instance ForeignConvention Bytes where + readForeign = readForeignBuiltin + writeForeign = writeForeignBuiltin + +instance ForeignConvention Socket where + readForeign = readForeignBuiltin + writeForeign = writeForeignBuiltin + +instance ForeignConvention ThreadId where + readForeign = readForeignBuiltin + writeForeign = writeForeignBuiltin + +instance ForeignConvention Handle where + readForeign = readForeignBuiltin + writeForeign = writeForeignBuiltin + +instance ForeignConvention POSIXTime where + readForeign = readForeignAs (fromIntegral :: Int -> POSIXTime) + writeForeign = writeForeignAs (round :: POSIXTime -> Int) + +instance ForeignConvention a => ForeignConvention (Maybe a) where + readForeign (i:us) bs ustk bstk + = peekOff ustk i >>= \case + 0 -> pure (us, bs, Nothing) + 1 -> fmap Just <$> readForeign us bs ustk bstk + _ -> foreignCCError "Maybe" + readForeign [] _ _ _ = foreignCCError "Maybe" + + writeForeign ustk bstk Nothing = do + ustk <- bump ustk + (ustk,bstk) <$ poke ustk 0 + writeForeign ustk bstk (Just x) = do + (ustk,bstk) <- writeForeign ustk bstk x + ustk <- bump ustk + (ustk,bstk) <$ poke ustk 1 + +instance (ForeignConvention a, ForeignConvention b) + => ForeignConvention (Either a b) + where + readForeign (i:us) bs ustk bstk + = peekOff ustk i >>= \case + 0 -> readForeignAs Left us bs ustk bstk + 1 -> readForeignAs Right us bs ustk bstk + _ -> foreignCCError "Either" + readForeign _ _ _ _ = foreignCCError "Either" + + writeForeign ustk bstk (Left a) = do + (ustk,bstk) <- writeForeign ustk bstk a + ustk <- bump ustk + (ustk,bstk) <$ poke ustk 0 + writeForeign ustk bstk (Right b) = do + (ustk,bstk) <- writeForeign ustk bstk b + ustk <- bump ustk + (ustk,bstk) <$ poke ustk 1 + +ioeDecode :: Int -> IOErrorType +ioeDecode 0 = AlreadyExists +ioeDecode 1 = NoSuchThing +ioeDecode 2 = ResourceBusy +ioeDecode 3 = ResourceExhausted +ioeDecode 4 = EOF +ioeDecode 5 = IllegalOperation +ioeDecode 6 = PermissionDenied +ioeDecode 7 = UserError +ioeDecode _ = error "ioeDecode" + +ioeEncode :: IOErrorType -> Int +ioeEncode AlreadyExists = 0 +ioeEncode NoSuchThing = 1 +ioeEncode ResourceBusy = 2 +ioeEncode ResourceExhausted = 3 +ioeEncode EOF = 4 +ioeEncode IllegalOperation = 5 +ioeEncode PermissionDenied = 6 +ioeEncode UserError = 7 +ioeEncode _ = error "ioeDecode" + +instance ForeignConvention IOException where + readForeign = readForeignAs (bld . ioeDecode) + where + bld t = IOError Nothing t "" "" Nothing Nothing + + writeForeign = writeForeignAs (ioeEncode . ioe_type) + +readForeignAs + :: ForeignConvention a + => (a -> b) + -> [Int] -> [Int] + -> Stack 'UN -> Stack 'BX + -> IO ([Int], [Int], b) +readForeignAs f us bs ustk bstk = fmap f <$> readForeign us bs ustk bstk + +writeForeignAs + :: ForeignConvention b + => (a -> b) + -> Stack 'UN -> Stack 'BX + -> a -> IO (Stack 'UN, Stack 'BX) +writeForeignAs f ustk bstk x = writeForeign ustk bstk (f x) + +readForeignEnum + :: Enum a + => [Int] -> [Int] -> Stack 'UN -> Stack 'BX + -> IO ([Int], [Int], a) +readForeignEnum = readForeignAs toEnum + +writeForeignEnum + :: Enum a + => Stack 'UN -> Stack 'BX -> a + -> IO (Stack 'UN, Stack 'BX) +writeForeignEnum = writeForeignAs fromEnum + +readForeignBuiltin + :: BuiltinForeign b + => [Int] -> [Int] -> Stack 'UN -> Stack 'BX + -> IO ([Int], [Int], b) +readForeignBuiltin = readForeignAs (unwrapBuiltin . marshalToForeign) + +writeForeignBuiltin + :: BuiltinForeign b + => Stack 'UN -> Stack 'BX -> b + -> IO (Stack 'UN, Stack 'BX) +writeForeignBuiltin = writeForeignAs (Foreign . wrapBuiltin) + +instance ForeignConvention Double where + readForeign (i:us) bs ustk _ = (us,bs,) <$> peekOffD ustk i + readForeign _ _ _ _ = foreignCCError "Double" + writeForeign ustk bstk d = bump ustk >>= \ustk -> + (ustk,bstk) <$ pokeD ustk d + +instance ForeignConvention Bool where + readForeign = readForeignEnum + writeForeign = writeForeignEnum +instance ForeignConvention String where + readForeign = readForeignAs unpack + writeForeign = writeForeignAs pack + +instance ForeignConvention SeekMode where + readForeign = readForeignEnum + writeForeign = writeForeignEnum +instance ForeignConvention IOMode where + readForeign = readForeignEnum + writeForeign = writeForeignEnum + +instance ForeignConvention () where + readForeign us bs _ _ = pure (us, bs, ()) + writeForeign ustk bstk _ = pure (ustk, bstk) + +instance (ForeignConvention a, ForeignConvention b) + => ForeignConvention (a,b) + where + readForeign us bs ustk bstk = do + (us,bs,a) <- readForeign us bs ustk bstk + (us,bs,b) <- readForeign us bs ustk bstk + pure (us, bs, (a,b)) + + writeForeign ustk bstk (x, y) = do + (ustk, bstk) <- writeForeign ustk bstk y + writeForeign ustk bstk x + +instance ( ForeignConvention a + , ForeignConvention b + , ForeignConvention c + ) + => ForeignConvention (a,b,c) + where + readForeign us bs ustk bstk = do + (us,bs,a) <- readForeign us bs ustk bstk + (us,bs,b) <- readForeign us bs ustk bstk + (us,bs,c) <- readForeign us bs ustk bstk + pure (us, bs, (a,b,c)) + + writeForeign ustk bstk (a,b,c) = do + (ustk,bstk) <- writeForeign ustk bstk c + (ustk,bstk) <- writeForeign ustk bstk b + writeForeign ustk bstk a + +instance ForeignConvention BufferMode where + readForeign (i:us) bs ustk bstk + = peekOff ustk i >>= \case + 0 -> pure (us, bs, NoBuffering) + 1 -> pure (us, bs, LineBuffering) + 2 -> pure (us, bs, BlockBuffering Nothing) + 3 -> fmap (BlockBuffering . Just) + <$> readForeign us bs ustk bstk + _ -> foreignCCError "BufferMode" + readForeign _ _ _ _ = foreignCCError "BufferMode" + writeForeign ustk bstk bm = bump ustk >>= \ustk -> + case bm of + NoBuffering -> (ustk,bstk) <$ poke ustk 0 + LineBuffering -> (ustk,bstk) <$ poke ustk 1 + BlockBuffering Nothing -> (ustk,bstk) <$ poke ustk 2 + BlockBuffering (Just n) -> do + poke ustk n + ustk <- bump ustk + (ustk,bstk) <$ poke ustk 3 + +instance ForeignConvention [Closure] where + readForeign us (i:bs) _ bstk + = (us,bs,) . toList <$> peekOffS bstk i + readForeign _ _ _ _ = foreignCCError "[Closure]" + writeForeign ustk bstk l = do + bstk <- bump bstk + (ustk,bstk) <$ pokeS bstk (Sq.fromList l) + +instance ForeignConvention (MVar Closure) where + readForeign = readForeignAs (unwrapForeign . marshalToForeign) + writeForeign = writeForeignAs (Foreign . Wrap mvarRef) + +instance {-# overlappable #-} BuiltinForeign b => ForeignConvention [b] + where + readForeign us (i:bs) _ bstk + = (us,bs,) . fmap (unwrapForeign . marshalToForeign) + . toList <$> peekOffS bstk i + readForeign _ _ _ _ = foreignCCError "[b]" + writeForeign ustk bstk l = do + bstk <- bump bstk + (ustk,bstk) <$ pokeS bstk (Foreign . wrapBuiltin <$> Sq.fromList l) + +foreignCCError :: String -> IO a +foreignCCError nm + = die $ "mismatched foreign calling convention for `" ++ nm ++ "`" + diff --git a/parser-typechecker/src/Unison/Runtime/IOSource.hs b/parser-typechecker/src/Unison/Runtime/IOSource.hs new file mode 100644 index 0000000000..519c620bdf --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/IOSource.hs @@ -0,0 +1,559 @@ +{-# Language ViewPatterns #-} +{-# Language PatternSynonyms #-} +{-# LANGUAGE OverloadedStrings #-} +{-# Language QuasiQuotes #-} + +module Unison.Runtime.IOSource where + +import Unison.Prelude + +import Control.Lens (view, _1) +import Control.Monad.Identity (runIdentity, Identity) +import Data.List (elemIndex, genericIndex) +import Text.RawString.QQ (r) +import Unison.Codebase.CodeLookup (CodeLookup(..)) +import Unison.FileParsers (parseAndSynthesizeFile) +import Unison.Parser (Ann(..)) +import Unison.Symbol (Symbol) +import qualified Data.Map as Map +import qualified Unison.Builtin as Builtin +import qualified Unison.Codebase.CodeLookup as CL +import qualified Unison.DataDeclaration as DD +import qualified Unison.Parser as Parser +import qualified Unison.Reference as R +import qualified Unison.Result as Result +import qualified Unison.Typechecker.TypeLookup as TL +import qualified Unison.UnisonFile as UF +import qualified Unison.Var as Var +import qualified Unison.Names3 as Names + +typecheckedFile :: UF.TypecheckedUnisonFile Symbol Ann +typecheckedFile = typecheckedFile' + +typecheckedFile' :: forall v. Var.Var v => UF.TypecheckedUnisonFile v Ann +typecheckedFile' = let + tl :: a -> Identity (TL.TypeLookup v Ann) + tl = const $ pure (External <$ Builtin.typeLookup) + env = Parser.ParsingEnv mempty (Names.Names Builtin.names0 mempty) + r = parseAndSynthesizeFile [] tl env "" source + in case runIdentity $ Result.runResultT r of + (Nothing, notes) -> error $ "parsing failed: " <> show notes + (Just Left{}, notes) -> error $ "typechecking failed" <> show notes + (Just (Right file), _) -> file + +typecheckedFileTerms :: Map.Map Symbol R.Reference +typecheckedFileTerms = view _1 <$> UF.hashTerms typecheckedFile + +termNamed :: String -> R.Reference +termNamed s = fromMaybe (error $ "No builtin term called: " <> s) + $ Map.lookup (Var.nameds s) typecheckedFileTerms + +codeLookup :: CodeLookup Symbol Identity Ann +codeLookup = CL.fromUnisonFile $ UF.discardTypes typecheckedFile + +typeNamedId :: String -> R.Id +typeNamedId s = + case Map.lookup (Var.nameds s) (UF.dataDeclarationsId' typecheckedFile) of + Nothing -> error $ "No builtin type called: " <> s + Just (r, _) -> r + +typeNamed :: String -> R.Reference +typeNamed = R.DerivedId . typeNamedId + +abilityNamedId :: String -> R.Id +abilityNamedId s = + case Map.lookup (Var.nameds s) (UF.effectDeclarationsId' typecheckedFile) of + Nothing -> error $ "No builtin ability called: " <> s + Just (r, _) -> r + +ioHash :: R.Id +ioHash = abilityNamedId "io.IO" + +ioReference, bufferModeReference, eitherReference, ioModeReference, optionReference, errorReference, errorTypeReference, seekModeReference, threadIdReference, socketReference, handleReference, epochTimeReference, isTestReference, isPropagatedReference, filePathReference, hostNameReference, serviceNameReference + :: R.Reference +ioReference = R.DerivedId ioHash +bufferModeReference = typeNamed "io.BufferMode" +eitherReference = typeNamed "Either" +ioModeReference = typeNamed "io.Mode" +optionReference = typeNamed "Optional" +errorReference = typeNamed "io.Error" +errorTypeReference = typeNamed "io.ErrorType" +seekModeReference = typeNamed "io.SeekMode" +threadIdReference = typeNamed "io.ThreadId" +socketReference = typeNamed "io.Socket" +handleReference = typeNamed "io.Handle" +epochTimeReference = typeNamed "io.EpochTime" +isTestReference = typeNamed "IsTest" +isPropagatedReference = typeNamed "IsPropagated" +filePathReference = typeNamed "io.FilePath" +hostNameReference = typeNamed "io.HostName" +serviceNameReference = typeNamed "io.ServiceName" + +isTest :: (R.Reference, R.Reference) +isTest = (isTestReference, termNamed "metadata.isTest") + +isPropagatedValue :: R.Reference +isPropagatedValue = termNamed "metadata.isPropagated" + +eitherLeftId, eitherRightId, someId, noneId, ioErrorId, handleId, socketId, threadIdId, epochTimeId, bufferModeLineId, bufferModeBlockId, filePathId :: DD.ConstructorId +eitherLeftId = constructorNamed eitherReference "Either.Left" +eitherRightId = constructorNamed eitherReference "Either.Right" +someId = constructorNamed optionReference "Optional.Some" +noneId = constructorNamed optionReference "Optional.None" +ioErrorId = constructorNamed errorReference "io.Error.Error" +handleId = constructorNamed handleReference "io.Handle.Handle" +socketId = constructorNamed socketReference "io.Socket.Socket" +threadIdId = constructorNamed threadIdReference "io.ThreadId.ThreadId" +epochTimeId = constructorNamed epochTimeReference "io.EpochTime.EpochTime" +bufferModeLineId = constructorNamed bufferModeReference "io.BufferMode.Line" +bufferModeBlockId = constructorNamed bufferModeReference "io.BufferMode.Block" +filePathId = constructorNamed filePathReference "io.FilePath.FilePath" + +mkErrorType :: Text -> DD.ConstructorId +mkErrorType = constructorNamed errorTypeReference + +alreadyExistsId, noSuchThingId, resourceBusyId, resourceExhaustedId, eofId, illegalOperationId, permissionDeniedId, userErrorId + :: DD.ConstructorId +alreadyExistsId = mkErrorType "io.ErrorType.AlreadyExists" +noSuchThingId = mkErrorType "io.ErrorType.NoSuchThing" +resourceBusyId = mkErrorType "io.ErrorType.ResourceBusy" +resourceExhaustedId = mkErrorType "io.ErrorType.ResourceExhausted" +eofId = mkErrorType "io.ErrorType.EOF" +illegalOperationId = mkErrorType "io.ErrorType.IllegalOperation" +permissionDeniedId = mkErrorType "io.ErrorType.PermissionDenied" +userErrorId = mkErrorType "io.ErrorType.UserError" + +constructorNamed :: R.Reference -> Text -> DD.ConstructorId +constructorNamed ref name = + case runIdentity . getTypeDeclaration codeLookup $ R.unsafeId ref of + Nothing -> + error + $ "There's a bug in the Unison runtime. Couldn't find type " + <> show ref + Just decl -> + fromMaybe + ( error + $ "Unison runtime bug. The type " + <> show ref + <> " has no constructor named " + <> show name + ) + . elemIndex name + . DD.constructorNames + $ DD.asDataDecl decl + +constructorName :: R.Reference -> DD.ConstructorId -> Text +constructorName ref cid = + case runIdentity . getTypeDeclaration codeLookup $ R.unsafeId ref of + Nothing -> + error + $ "There's a bug in the Unison runtime. Couldn't find type " + <> show ref + Just decl -> genericIndex (DD.constructorNames $ DD.asDataDecl decl) cid + +-- .. todo - fill in the rest of these + +source :: Text +source = fromString [r| + +type Either a b = Left a | Right b + +type Optional a = None | Some a + +unique[b28d929d0a73d2c18eac86341a3bb9399f8550c11b5f35eabb2751e6803ccc20] type + IsPropagated = IsPropagated + +d1 Doc.++ d2 = + use Doc + match (d1,d2) with + (Join ds, Join ds2) -> Join (ds Sequence.++ ds2) + (Join ds, _) -> Join (ds `Sequence.snoc` d2) + (_, Join ds) -> Join (d1 `Sequence.cons` ds) + _ -> Join [d1,d2] + +unique[q1905679b27a97a4098bc965574da880c1074183a2c55ff1d481619c7fb8a1e1] type + Author = { guid : GUID, name : Text } + +unique[ee1c051034fa0671ea66e7c708ba552003bd3cf657bd28bf0051f1f8cdfcba53] type + CopyrightHolder = { guid : GUID, name : Text} + +unique[bed6724af0d5f47f80cdea1b6023d35f120137ee0556e57154a9fc8b62fe5fed] type + License = { copyrightHolders : [CopyrightHolder] + , years : [Year] + , licenseType : LicenseType } + +-- Use `Doc` here to get nice text-wrapping when viewing +-- and to avoid needing to stick hard line breaks in the license +unique[d875fa1ea7ef3adf8e29417c6c8b01a1830c4c6bd10dcca9d4196388462e0b7a] type LicenseType = LicenseType Doc + +unique[cb8469a1b41a63655062226556eaccf06129a2641af61fe7edef9c485c94a870] type GUID = GUID Bytes + +-- Common era years +unique[u9ae6694152966cf1b0c1f4ad901a77e1acd7bbe16595fd27b07435ac45dab05] type Year = Year Nat + +-- This is linked to definitions that are considered tests +unique[e6dca08b40458b03ca1660cfbdaecaa7279b42d18257898b5fd1c34596aac36f] type + IsTest = IsTest + +-- Create references for these that can be used as metadata. +-- (Reminder: Metadata is references, not values.) +metadata.isTest = IsTest.IsTest +metadata.isPropagated = IsPropagated.IsPropagated + +-- Handles are unique identifiers. +-- The implementation of IO in the runtime will supply Haskell +-- file handles and map those to Unison handles. +-- A pure implementation of I/O might use some kind of pure supply +-- of unique IDs instead. +unique[d4597403ec40fd4fbee57c62b8096f9c3d382dff01f20108546fe3530a927e86] type + io.Handle = Handle Text + +-- Ditto for sockets +unique[e1d94401fde8b2546d6dfc54e93f11e6a9285a7ea765d3255da19122a42715d3] type + io.Socket = Socket Text + +-- Builtin handles: standard in, out, error + +use io Error Mode Handle IO Socket ThreadId HostName FilePath EpochTime + BufferMode SeekMode ServiceName + +use io.Handle Handle + +namespace io where + stdin : Handle + stdin = Handle "stdin" + + stdout : Handle + stdout = Handle "stdout" + + stderr : Handle + stderr = Handle "stderr" + + -- Throw an I/O error on the left as an effect in `IO` + rethrow : (Either io.Error a) -> {IO} a + rethrow x = match x with + Either.Left e -> io.IO.throw e + Either.Right a -> a + + -- Print a line to the standard output + printLine : Text ->{IO} () + printLine t = + putText stdout t + putText stdout "\n" + + -- Read a line from the standard input + readLine : '{IO} Text + readLine = '(getLine stdin) + + -- Built-ins + + -- Open a named file in the given mode, yielding an open file handle + openFile : FilePath -> Mode ->{IO} Handle + openFile f m = rethrow (io.IO.openFile_ f m) + + -- Close an open file handle + closeFile : Handle ->{IO} () + closeFile f = rethrow (io.IO.closeFile_ f) + + -- Check whether a file handle has reached the end of the file + isFileEOF : Handle ->{IO} Boolean + isFileEOF h = rethrow (io.IO.isFileEOF_ h) + + -- Check whether a file handle is open + isFileOpen : Handle ->{IO} Boolean + isFileOpen h = rethrow (io.IO.isFileOpen_ h) + + -- Get a line of text from a text file handle + getLine : Handle ->{IO} Text + getLine h = rethrow (io.IO.getLine_ h) + + -- Get the entire contents of a file as a single block of text + getText : Handle ->{IO} Text + getText h = rethrow (io.IO.getText_ h) + + -- Write some text to a file + putText : Handle -> Text ->{IO} () + putText h t = rethrow (io.IO.putText_ h t) + + -- Get epoch system time + systemTime : '{IO} EpochTime + systemTime = '(rethrow (io.IO.systemTime_)) + + -- Does the file handle support `seek`? + isSeekable : Handle -> {IO} Boolean + isSeekable h = rethrow (io.IO.isSeekable_ h) + + -- Seek to a position in a file handle + seek : Handle -> SeekMode -> Int ->{IO} () + seek h m i = rethrow (io.IO.seek_ h m i) + + -- Ask for the position of a file handle + position : Handle ->{IO} Int + position h = rethrow (io.IO.position_ h) + + -- Get the buffer mode of a file handle + getBuffering : Handle ->{IO} (Optional BufferMode) + getBuffering h = rethrow (io.IO.getBuffering_ h) + + -- Set the buffer mode for a file handle + setBuffering : Handle -> Optional BufferMode ->{IO} () + setBuffering h bm = rethrow (io.IO.setBuffering_ h bm) + + -- Get the path to a temporary directory managed by the operating system + getTemporaryDirectory : '{IO} FilePath + getTemporaryDirectory = '(rethrow (io.IO.getTemporaryDirectory_)) + + -- Get the current working directory + getCurrentDirectory : '{IO} FilePath + getCurrentDirectory = '(rethrow (io.IO.getCurrentDirectory_)) + + -- Set the current working directory + setCurrentDirectory : FilePath -> {IO} () + setCurrentDirectory d = rethrow (io.IO.setCurrentDirectory_ d) + + -- List the contents of a directory + directoryContents : FilePath -> {IO} [FilePath] + directoryContents d = rethrow (io.IO.directoryContents_ d) + + -- Check if a path exists + fileExists : FilePath -> {IO} Boolean + fileExists d = rethrow (io.IO.fileExists_ d) + + -- Check if a path is a directory + isDirectory : FilePath -> {IO} Boolean + isDirectory d = rethrow (io.IO.isDirectory_ d) + + -- Create a directory at the given path, including parent directories + createDirectory : FilePath -> {IO} () + createDirectory d = rethrow (io.IO.createDirectory_ d) + + -- Remove the directory at the given path + removeDirectory : FilePath -> {IO} () + removeDirectory d = rethrow (io.IO.removeDirectory_ d) + + -- Move a directory from one path to another + renameDirectory : FilePath -> FilePath -> {IO} () + renameDirectory from to = rethrow (io.IO.renameDirectory_ from to) + + -- Remove a file from the file system + removeFile : FilePath -> {IO} () + removeFile d = rethrow (io.IO.removeFile_ d) + + -- Move a file from one path to another + renameFile : FilePath -> FilePath -> {IO} () + renameFile from to = rethrow (io.IO.renameFile_ from to) + + -- Get the timestamp of a file + getFileTimestamp : FilePath -> {IO} EpochTime + getFileTimestamp d = rethrow (io.IO.getFileTimestamp_ d) + + -- Get the size of a file in bytes + getFileSize : FilePath -> {IO} Nat + getFileSize d = rethrow (io.IO.getFileSize_ d) + + -- Create a socket bound to the given local port/service. + -- If a hostname is not given, this will use any available host. + serverSocket : Optional HostName -> ServiceName -> {IO} Socket + serverSocket host service = rethrow (io.IO.serverSocket_ host service) + + -- Start listening for connections on the given socket. + listen : Socket -> {IO} () + listen s = rethrow (io.IO.listen_ s) + + -- Create a socket connected to the given remote address. + clientSocket : HostName -> ServiceName -> {IO} Socket + clientSocket host service = rethrow (io.IO.clientSocket_ host service) + + -- Close a socket and all connections to it. + closeSocket : Socket -> {IO} () + closeSocket s = rethrow (io.IO.closeSocket_ s) + + -- Accept a connection on a socket. + -- Returns a socket that can send and receive data on a new connection + accept : Socket -> {IO} Socket + accept s = rethrow (io.IO.accept_ s) + + -- Send some bytes to a socket. + send : Socket -> Bytes -> {IO} () + send s bs = rethrow (io.IO.send_ s bs) + + -- Read the specified number of bytes from a socket. + receive : Socket -> Nat ->{IO} (Optional Bytes) + receive s n = rethrow (io.IO.receive_ s n) + + -- Fork a new thread. + fork : '{IO} a -> {IO} ThreadId + fork a = rethrow (io.IO.fork_ a) + + -- Kill a running thread. + kill : ThreadId -> {IO} () + kill t = rethrow (io.IO.kill_ t) + + -- Suspend the current thread for a number of microseconds. + delay : Nat -> {IO} () + delay n = rethrow (io.IO.delay_ n) + + -- Safely acquire and release a resource + bracket : '{IO} a -> (a ->{IO} b) -> (a ->{IO} c) -> {IO} c + bracket acquire release what = rethrow (io.IO.bracket_ acquire release what) + + -- Run the given computation, and if it throws an error + -- handle the error with the given handler. + -- catch : '{IO} a -> (io.Error ->{IO} a) ->{IO} a + -- catch c h = + -- k io = match io with + -- { IO.throw e } -> h e + -- x -> x + -- handle k in c + +-- IO Modes from the Haskell API +type io.Mode = Read | Write | Append | ReadWrite + +-- IO error types from the Haskell API +unique[bb57f367a3740d4a1608b9e0eee14fd744ec9e368f1529550cb436ef56c0b268] type + io.ErrorType + = AlreadyExists + | NoSuchThing + | ResourceBusy + | ResourceExhausted + | EOF + | IllegalOperation + | PermissionDenied + | UserError + +unique[b5c578f0a9977ed54a5a12b580dc6b0b2ba37bc3f517f48d1b3285a7f3e8c6bc] type + io.ErrorLocation = ErrorLocation Text +unique[e6ca048b6bf540f93617c0ef9506afcbb490427a9581a01d51ffad39cdf2c554] type + io.ErrorDescription = ErrorDescription Text +unique[d5d61b0a65f1d448dbdeed8af688f0bdbab6b3f775400da370eb5bfc34e428d5] type + io.FilePath = FilePath Text + +type io.Error = Error io.ErrorType Text + +unique[cad7ab802bd143f0b674155c9caf18dde7145d16867a02659534d7bb01a5e287] type + io.SeekMode = Absolute | Relative | FromEnd + +-- If the buffer size is not specified, +-- use an implementation-specific size. +unique[e65de145a461a771de93d6c7885acae28552d77f8ae460bc8bf5de6f2a15ff77] type + io.BufferMode = Line | Block (Optional Nat) + +unique[e1f48f31982a720ae895c0bf4e6ea9a950f5c00d3a73101ad31e63461b7beded] type + io.EpochTime = EpochTime Nat + +-- Either a host name e.g., "unisonweb.org" or a numeric host address +-- string consisting of a dotted decimal IPv4 address +-- e.g., "192.168.0.1". +unique[c7279b501764751edc66f1f7b532e68354fc4704c9eb1ed201f01c894cdd86f4] type + io.HostName = HostName Text + +-- For example a port number like "8080" +unique[ee4ff0bda526b0513e4c7b7387b39811ce57938ddb31a77fdb0ff00ee2717c33] type + io.ServiceName = ServiceName Text + +unique[a38186de35c9fcd29d2b359b2148f9f890732413d91575af39d025fcded67e89] type + io.ThreadId = ThreadId Text + +ability io.IO where + + -- Basic file IO + openFile_ : io.FilePath -> io.Mode -> (Either io.Error io.Handle) + closeFile_ : io.Handle -> (Either io.Error ()) + isFileEOF_ : io.Handle -> (Either io.Error Boolean) + isFileOpen_ : io.Handle -> (Either io.Error Boolean) + + -- Text input and output + + --getChar : io.Handle -> Char + getLine_ : io.Handle -> (Either io.Error Text) + -- Get the entire contents of the file as text + getText_ : io.Handle -> (Either io.Error Text) + -- putChar : io.Handle -> Char -> () + putText_ : io.Handle -> Text -> (Either io.Error ()) + + -- Throw an error as an `io.IO` effect + throw : io.Error -> a + + -- File positioning + isSeekable_ : io.Handle -> (Either io.Error Boolean) + seek_ : io.Handle -> io.SeekMode -> Int -> (Either io.Error ()) + position_ : io.Handle -> (Either io.Error Int) + + -- File buffering + getBuffering_ : io.Handle -> Either io.Error (Optional io.BufferMode) + setBuffering_ : io.Handle -> Optional io.BufferMode -> (Either io.Error ()) + + -- Should we expose mutable arrays for byte buffering? + -- Inclined to say no, although that sounds a lot like + -- a decision to just be slow. + -- We'll need a byte buffer manipulation library in that case. + + -- getBytes : io.Handle -> Nat -> Bytes + -- putBytes : io.Handle -> Bytes -> () + + -- getBytes : io.Handle -> Nat -> ByteArray -> Nat + -- putBytes : io.Handle -> Nat -> ByteArray -> () + + systemTime_ : (Either io.Error io.EpochTime) + + -- File system operations + getTemporaryDirectory_ : (Either io.Error io.FilePath) + getCurrentDirectory_ : (Either io.Error io.FilePath) + setCurrentDirectory_ : io.FilePath -> (Either io.Error ()) + directoryContents_ : io.FilePath -> Either io.Error [io.FilePath] + fileExists_ : io.FilePath -> (Either io.Error Boolean) + isDirectory_ : io.FilePath -> (Either io.Error Boolean) + createDirectory_ : io.FilePath -> (Either io.Error ()) + removeDirectory_ : io.FilePath -> (Either io.Error ()) + renameDirectory_ : io.FilePath -> io.FilePath -> (Either io.Error ()) + removeFile_ : io.FilePath -> (Either io.Error ()) + renameFile_ : io.FilePath -> io.FilePath -> (Either io.Error ()) + getFileTimestamp_ : io.FilePath -> (Either io.Error io.EpochTime) + getFileSize_ : io.FilePath -> (Either io.Error Nat) + + -- Simple TCP Networking + + -- Create a socket bound to the given local address. + -- If a hostname is not given, this will use any available host. + serverSocket_ : Optional io.HostName -> + io.ServiceName -> (Either io.Error io.Socket) + -- Start listening for connections + listen_ : io.Socket -> (Either io.Error ()) + + -- Create a socket connected to the given remote address + clientSocket_ : io.HostName -> + io.ServiceName -> (Either io.Error io.Socket) + + closeSocket_ : io.Socket -> (Either io.Error ()) + + --socketToio.Handle : Socket -> Mode -> (Either io.Error io.Handle) + --handleToSocket : io.Handle -> (Either io.Error Socket) + + -- Accept a connection on a socket. + -- Returns a socket that can send and receive data on a new connection + accept_ : io.Socket -> (Either io.Error io.Socket) + + -- Send some bytes to a socket. + send_ : io.Socket -> Bytes -> (Either io.Error ()) + + -- Read the spefified number of bytes from the socket. + receive_ : io.Socket -> Nat -> (Either io.Error (Optional Bytes)) + + -- scatter/gather mode network I/O + -- sendMany : Socket -> [Bytes] -> Int + + -- Threading -- + + -- Fork a thread + fork_ : '{io.IO} a -> (Either io.Error io.ThreadId) + + -- Kill a running thread + kill_ : io.ThreadId -> (Either io.Error ()) + + -- Suspend the current thread for a number of microseconds. + delay_ : Nat -> (Either io.Error ()) + + -- Safely acquire and release a resource + bracket_ : '{io.IO} a -> (a ->{io.IO} b) -> (a ->{io.IO} c) ->{io.IO} (Either io.Error c) + +|] diff --git a/parser-typechecker/src/Unison/Runtime/IR.hs b/parser-typechecker/src/Unison/Runtime/IR.hs new file mode 100644 index 0000000000..b24d804bb5 --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/IR.hs @@ -0,0 +1,1196 @@ +{-# Language DeriveFoldable #-} +{-# Language DeriveTraversable #-} +{-# Language OverloadedStrings #-} +{-# Language PartialTypeSignatures #-} +{-# Language StrictData #-} +{-# Language ViewPatterns #-} +{-# Language PatternSynonyms #-} + +module Unison.Runtime.IR where + +import Unison.Prelude + +import Control.Monad.State.Strict (StateT, gets, modify, runStateT) +import Data.Bifunctor (first, second) +import Data.IORef +import Unison.Hash (Hash) +import Unison.NamePrinter (prettyHashQualified0) +import Unison.Referent (Referent) +import Unison.Symbol (Symbol) +import Unison.Util.CyclicEq (CyclicEq, cyclicEq) +import Unison.Util.CyclicOrd (CyclicOrd, cyclicOrd) +import Unison.Util.Monoid (intercalateMap) +import Unison.Var (Var) +import qualified Data.Map as Map +import qualified Data.Sequence as Sequence +import qualified Data.Set as Set +import qualified Unison.ABT as ABT +import qualified Unison.Builtin.Decls as DD +import qualified Unison.PatternCompat as Pattern +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.Reference as R +import qualified Unison.Runtime.ANF as ANF +import qualified Unison.Term as Term +import qualified Unison.TermPrinter as TP +import qualified Unison.Util.Bytes as Bytes +import qualified Unison.Util.ColorText as CT +import qualified Unison.Util.CycleTable as CyT +import qualified Unison.Util.CyclicOrd as COrd +import qualified Unison.Util.Pretty as P +import qualified Unison.Var as Var + +type Pos = Int +type Arity = Int +type ConstructorId = Int +type Term v = Term.Term v () + +data CompilationEnv e cont + = CompilationEnv { toIR' :: Map R.Reference (IR e cont) + , constructorArity' :: Map (R.Reference, Int) Int } + +toIR :: CompilationEnv e cont -> R.Reference -> Maybe (IR e cont) +toIR = flip Map.lookup . toIR' + +constructorArity :: CompilationEnv e cont -> R.Reference -> Int -> Maybe Int +constructorArity e r i = Map.lookup (r,i) $ constructorArity' e + +-- SymbolC = Should this variable be compiled as a LazySlot? +data SymbolC = + SymbolC { isLazy :: Bool + , underlyingSymbol :: Symbol + }-- deriving Show +instance Show SymbolC where + show (SymbolC lazy s) = (if lazy then "'" else "") <> show s + +makeLazy :: SymbolC -> SymbolC +makeLazy s = s { isLazy = True } + +toSymbolC :: Symbol -> SymbolC +toSymbolC = SymbolC False + +-- Values, in normal form +type RefID = Int + +data Value e cont + = I Int64 | F Double | N Word64 | B Bool | T Text | C Char | Bs Bytes.Bytes + | TermLink Referent + | TypeLink R.Reference + | Lam Arity (UnderapplyStrategy e cont) (IR e cont) + | Data R.Reference ConstructorId [Value e cont] + | Sequence (Sequence.Seq (Value e cont)) + | Ref RefID Symbol (IORef (Value e cont)) + | Pure (Value e cont) + | Requested (Req e cont) + | Cont cont + | UninitializedLetRecSlot Symbol [(Symbol, IR e cont)] (IR e cont) + +instance (Eq cont, Eq e) => Eq (Value e cont) where + I x == I y = x == y + F x == F y = x == y + N x == N y = x == y + B x == B y = x == y + T x == T y = x == y + C x == C y = x == y + Bs x == Bs y = x == y + Lam n us _ == Lam n2 us2 _ = n == n2 && us == us2 + Data r1 cid1 vs1 == Data r2 cid2 vs2 = r1 == r2 && cid1 == cid2 && vs1 == vs2 + Sequence vs == Sequence vs2 = vs == vs2 + Ref _ _ io1 == Ref _ _ io2 = io1 == io2 + Pure x == Pure y = x == y + Requested r1 == Requested r2 = r1 == r2 + Cont k1 == Cont k2 = k1 == k2 + TermLink r1 == TermLink r2 = r1 == r2 + TypeLink r1 == TypeLink r2 = r1 == r2 + _ == _ = False + +instance (Eq cont, Eq e) => Eq (UnderapplyStrategy e cont) where + FormClosure h _ vs == FormClosure h2 _ vs2 = h == h2 && vs == vs2 + Specialize h _ vs == Specialize h2 _ vs2 = h == h2 && vs == vs2 + _ == _ = False + +-- would have preferred to make pattern synonyms +maybeToOptional :: Maybe (Value e cont) -> Value e cont +maybeToOptional = \case + Just a -> Data DD.optionalRef 1 [a] + Nothing -> Data DD.optionalRef 0 [] + +unit :: Value e cont +unit = Data DD.unitRef 0 [] + +pair :: (Value e cont, Value e cont) -> Value e cont +pair (a, b) = Data DD.pairRef 0 [a, b] + +tuple :: [Value e cont] -> Value e cont +tuple [v] = v +tuple vs = foldr (curry pair) unit vs + +-- When a lambda is underapplied, for instance, `(x y -> x) 19`, we can do +-- one of two things: we can substitute away the arguments that have +-- been applied, in this example, creating the lambda `x -> 19`. This +-- is called specialization and requires recompiling the lambda with its new +-- body. +-- +-- The other option is to just stash the arguments until the rest of the +-- args are supplied later. This keeps the original lambda around and +-- doesn't involve recompiling. This would just create the closure +-- `((x y -> x) 19)`, which when given one more arg, would call the original +-- `x y -> x` function with both arguments. +-- +-- Specialization can be done for any Unison term definition, like +-- +-- blah x y = x + y +-- +-- Closure formation is used for: +-- +-- * builtin functions +-- * constructor functions +-- +-- The reason is that builtins and constructor functions don't have a body +-- with variables that we could substitute - the functions only compute +-- to anything when all the arguments are available. + +data UnderapplyStrategy e cont + = FormClosure Hash (Term SymbolC) [Value e cont] -- head is the latest argument + | Specialize Hash (Term SymbolC) [(SymbolC, Value e cont)] -- same + deriving (Show) + +decompileUnderapplied :: (External e, External cont) => UnderapplyStrategy e cont -> DS (Term Symbol) +decompileUnderapplied u = case u of -- todo: consider unlambda-lifting here + FormClosure _ lam vals -> + Term.apps' (Term.vmap underlyingSymbol lam) . reverse <$> + traverse decompileImpl vals + Specialize _ lam symvals -> do + lam <- Term.apps' (Term.vmap underlyingSymbol lam) . reverse <$> + traverse (decompileImpl . snd) symvals + pure $ Term.betaReduce lam + +-- Patterns - for now this follows Unison.Pattern exactly, but +-- we may switch to more efficient runtime representation of patterns +data Pattern + = PatternI Int64 | PatternF Double | PatternN Word64 | PatternB Bool | PatternT Text | PatternC Char + | PatternData R.Reference ConstructorId [Pattern] + | PatternSequenceLiteral [Pattern] + | PatternSequenceCons Pattern Pattern + | PatternSequenceSnoc Pattern Pattern + -- `Either Int Int` here represents the known constant length of either + -- the left or right side of a sequence concat operation + | PatternSequenceConcat (Either Int Int) Pattern Pattern + | PatternPure Pattern + | PatternBind R.Reference ConstructorId [Pattern] Pattern + | PatternAs Pattern + | PatternIgnore + | PatternVar deriving (Eq,Show) + +-- Leaf level instructions - these return immediately without using any stack +data Z e cont + = Slot Pos + | LazySlot Pos + | Val (Value e cont) + | External e + deriving (Eq) + +-- The `Set Int` is the set of de bruijn indices that are free in the body +-- of `Let` instructions. +type IR e cont = IR' (Set Int) (Z e cont) + +-- Computations - evaluation reduces these to values +data IR' ann z + = Leaf z + -- Ints + | AddI z z | SubI z z | MultI z z | DivI z z + | GtI z z | LtI z z | GtEqI z z | LtEqI z z | EqI z z + | SignumI z | NegateI z | Truncate0I z | ModI z z + | PowI z z | ShiftLI z z | ShiftRI z z | BitAndI z z + | BitOrI z z | BitXorI z z | ComplementI z | LeadZeroI z + | TrailZeroI z + -- Nats + | AddN z z | DropN z z | SubN z z | MultN z z | DivN z z + | GtN z z | LtN z z | GtEqN z z | LtEqN z z | EqN z z + | ModN z z | ToIntN z | PowN z z | ShiftLN z z | ShiftRN z z + | BitOrN z z | BitXorN z z | BitAndN z z | ComplementN z + | LeadZeroN z | TrailZeroN z + -- Floats + | AddF z z | SubF z z | MultF z z | DivF z z + | GtF z z | LtF z z | GtEqF z z | LtEqF z z | EqF z z + -- Universals + | EqU z z -- universal equality + | CompareU z z -- universal ordering + -- Debugging/Utilities + | Todo z + | Bug z + -- Control flow + + -- `Let` has an `ann` associated with it, e.g `ann = Set Int` which is the + -- set of "free" stack slots referenced by the body of the `let` + | Let Symbol (IR' ann z) (IR' ann z) ann + | LetRec [(Symbol, IR' ann z)] (IR' ann z) + | MakeSequence [z] + | Apply (IR' ann z) [z] + | Construct R.Reference ConstructorId [z] + | Request R.Reference ConstructorId [z] + | Handle z (IR' ann z) + | If z (IR' ann z) (IR' ann z) + | And z (IR' ann z) + | Or z (IR' ann z) + | Not z + -- pattern, optional guard, rhs + | Match z [(Pattern, [Symbol], Maybe (IR' ann z), IR' ann z)] + deriving (Functor,Foldable,Traversable,Eq,Show) + +prettyZ :: PPE.PrettyPrintEnv + -> (e -> P.Pretty String) + -> (cont -> P.Pretty String) + -> Z e cont + -> P.Pretty String +prettyZ ppe prettyE prettyCont z = case z of + Slot i -> "@" <> P.shown i + LazySlot i -> "'@" <> P.shown i + Val v -> prettyValue ppe prettyE prettyCont v + External e -> "External" `P.hang` prettyE e + +prettyIR :: PPE.PrettyPrintEnv + -> (e -> P.Pretty String) + -> (cont -> P.Pretty String) + -> IR e cont + -> P.Pretty String +prettyIR ppe prettyE prettyCont = pir + where + unlets (Let s hd tl _) = (Just s, hd) : unlets tl + unlets e = [(Nothing, e)] + pz = prettyZ ppe prettyE prettyCont + pir ir = case ir of + Leaf z -> pz z + AddI a b -> P.parenthesize $ "AddI" `P.hang` P.spaced [pz a, pz b] + SubI a b -> P.parenthesize $ "SubI" `P.hang` P.spaced [pz a, pz b] + MultI a b -> P.parenthesize $ "MultI" `P.hang` P.spaced [pz a, pz b] + DivI a b -> P.parenthesize $ "DivI" `P.hang` P.spaced [pz a, pz b] + GtI a b -> P.parenthesize $ "GtI" `P.hang` P.spaced [pz a, pz b] + LtI a b -> P.parenthesize $ "LtI" `P.hang` P.spaced [pz a, pz b] + GtEqI a b -> P.parenthesize $ "GtEqI" `P.hang` P.spaced [pz a, pz b] + LtEqI a b -> P.parenthesize $ "LtEqI" `P.hang` P.spaced [pz a, pz b] + EqI a b -> P.parenthesize $ "EqI" `P.hang` P.spaced [pz a, pz b] + SignumI a -> P.parenthesize $ "SignumI" `P.hang` P.spaced [pz a] + NegateI a -> P.parenthesize $ "NegateI" `P.hang` P.spaced [pz a] + Truncate0I a -> P.parenthesize $ "Truncate0I" `P.hang` P.spaced [pz a] + ModI a b -> P.parenthesize $ "ModI" `P.hang` P.spaced [pz a, pz b] + PowI a b -> P.parenthesize $ "PowI" `P.hang` P.spaced [pz a, pz b] + ShiftRI a b -> P.parenthesize $ "ShiftRI" `P.hang` P.spaced [pz a, pz b] + ShiftLI a b -> P.parenthesize $ "ShiftLI" `P.hang` P.spaced [pz a, pz b] + BitAndI a b -> P.parenthesize $ "BitAndI" `P.hang` P.spaced [pz a, pz b] + BitOrI a b -> P.parenthesize $ "BitOrI" `P.hang` P.spaced [pz a, pz b] + BitXorI a b -> P.parenthesize $ "BitXorI" `P.hang` P.spaced [pz a, pz b] + ComplementI a -> P.parenthesize $ "ComplementI" `P.hang` P.spaced [pz a] + LeadZeroI a -> P.parenthesize $ "LeadZeroI" `P.hang` P.spaced [pz a] + TrailZeroI a -> P.parenthesize $ "TrailZeroI" `P.hang` P.spaced [pz a] + + AddN a b -> P.parenthesize $ "AddN" `P.hang` P.spaced [pz a, pz b] + SubN a b -> P.parenthesize $ "SubN" `P.hang` P.spaced [pz a, pz b] + DropN a b -> P.parenthesize $ "DropN" `P.hang` P.spaced [pz a, pz b] + MultN a b -> P.parenthesize $ "MultN" `P.hang` P.spaced [pz a, pz b] + DivN a b -> P.parenthesize $ "DivN" `P.hang` P.spaced [pz a, pz b] + GtN a b -> P.parenthesize $ "GtN" `P.hang` P.spaced [pz a, pz b] + LtN a b -> P.parenthesize $ "LtN" `P.hang` P.spaced [pz a, pz b] + GtEqN a b -> P.parenthesize $ "GtEqN" `P.hang` P.spaced [pz a, pz b] + LtEqN a b -> P.parenthesize $ "LtEqN" `P.hang` P.spaced [pz a, pz b] + EqN a b -> P.parenthesize $ "EqN" `P.hang` P.spaced [pz a, pz b] + ModN a b -> P.parenthesize $ "ModN" `P.hang` P.spaced [pz a, pz b] + ToIntN a -> P.parenthesize $ "ToIntN" `P.hang` P.spaced [pz a] + PowN a b -> P.parenthesize $ "PowN" `P.hang` P.spaced [pz a, pz b] + ShiftLN a b -> P.parenthesize $ "ShiftLN" `P.hang` P.spaced [pz a, pz b] + ShiftRN a b -> P.parenthesize $ "ShiftRN" `P.hang` P.spaced [pz a, pz b] + BitAndN a b -> P.parenthesize $ "BitAndN" `P.hang` P.spaced [pz a, pz b] + BitOrN a b -> P.parenthesize $ "BitOrN" `P.hang` P.spaced [pz a, pz b] + BitXorN a b -> P.parenthesize $ "BitXorN" `P.hang` P.spaced [pz a, pz b] + ComplementN a -> P.parenthesize $ "ComplementN" `P.hang` P.spaced [pz a] + LeadZeroN a -> P.parenthesize $ "LeadZeroN" `P.hang` P.spaced [pz a] + TrailZeroN a -> P.parenthesize $ "TrailZeroN" `P.hang` P.spaced [pz a] + + AddF a b -> P.parenthesize $ "AddF" `P.hang` P.spaced [pz a, pz b] + SubF a b -> P.parenthesize $ "SubF" `P.hang` P.spaced [pz a, pz b] + MultF a b -> P.parenthesize $ "MultF" `P.hang` P.spaced [pz a, pz b] + DivF a b -> P.parenthesize $ "DivF" `P.hang` P.spaced [pz a, pz b] + GtF a b -> P.parenthesize $ "GtF" `P.hang` P.spaced [pz a, pz b] + LtF a b -> P.parenthesize $ "LtF" `P.hang` P.spaced [pz a, pz b] + GtEqF a b -> P.parenthesize $ "GtEqF" `P.hang` P.spaced [pz a, pz b] + LtEqF a b -> P.parenthesize $ "LtEqF" `P.hang` P.spaced [pz a, pz b] + EqF a b -> P.parenthesize $ "EqF" `P.hang` P.spaced [pz a, pz b] + EqU a b -> P.parenthesize $ "EqU" `P.hang` P.spaced [pz a, pz b] + CompareU a b -> P.parenthesize $ "CompareU" `P.hang` P.spaced [pz a, pz b] + Bug a -> P.parenthesize $ "Bug" `P.hang` P.spaced [pz a] + Todo a -> P.parenthesize $ "Todo" `P.hang` P.spaced [pz a] + ir@Let{} -> + P.group $ "let" `P.hang` P.lines (blockElem <$> block) + where + block = unlets ir + blockElem (Nothing, binding) = pir binding + blockElem (Just name, binding) = + (P.shown name <> " =") `P.hang` pir binding + LetRec bs body -> P.group $ "letrec" `P.hang` P.lines ls + where + blockElem (Nothing, binding) = pir binding + blockElem (Just name, binding) = + (P.shown name <> " =") `P.hang` pir binding + ls = fmap blockElem $ [ (Just n, ir) | (n,ir) <- bs ] + ++ [(Nothing, body)] + MakeSequence vs -> P.group $ + P.surroundCommas "[" "]" (pz <$> vs) + Apply fn args -> P.parenthesize $ pir fn `P.hang` P.spaced (pz <$> args) + Construct r cid args -> P.parenthesize $ + ("Construct " <> prettyHashQualified0 (PPE.patternName ppe r cid)) + `P.hang` + P.surroundCommas "[" "]" (pz <$> args) + Request r cid args -> P.parenthesize $ + ("Request " <> prettyHashQualified0 (PPE.patternName ppe r cid)) + `P.hang` + P.surroundCommas "[" "]" (pz <$> args) + Handle h body -> P.parenthesize $ + P.group ("Handle " <> pz h) `P.hang` pir body + If cond t f -> P.parenthesize $ + ("If " <> pz cond) `P.hang` P.spaced [pir t, pir f] + And x y -> P.parenthesize $ "And" `P.hang` P.spaced [pz x, pir y] + Or x y -> P.parenthesize $ "Or" `P.hang` P.spaced [pz x, pir y] + Not x -> P.parenthesize $ "Not" `P.hang` pz x + Match scrute cases -> P.parenthesize $ + P.group ("Match " <> pz scrute) `P.hang` P.lines (pcase <$> cases) + where + pcase (pat, vs, guard, rhs) = let + lhs = P.spaced . P.nonEmpty $ + [ P.parenthesize (P.shown pat), P.shown vs, maybe mempty pir guard ] + in (lhs <> " ->" `P.hang` pir rhs) + +prettyValue :: PPE.PrettyPrintEnv + -> (e -> P.Pretty String) + -> (cont -> P.Pretty String) + -> Value e cont + -> P.Pretty String +prettyValue ppe prettyE prettyCont = pv + where + pv v = case v of + I i -> (if i >= 0 then "+" else "" ) <> P.string (show i) + F d -> P.shown d + N n -> P.shown n + B b -> if b then "true" else "false" + T t -> P.shown t + C c -> P.shown c + Bs bs -> P.shown bs + TermLink r -> P.parenthesize $ + ("TermLink " <> prettyHashQualified0 (PPE.termName ppe r)) + TypeLink r -> P.parenthesize $ + ("TypeLink " <> prettyHashQualified0 (PPE.typeName ppe r)) + Lam arity _u b -> P.parenthesize $ + ("Lambda " <> P.string (show arity)) `P.hang` + prettyIR ppe prettyE prettyCont b + Data r cid vs -> P.parenthesize $ + ("Data " <> prettyHashQualified0 (PPE.patternName ppe r cid)) `P.hang` + P.surroundCommas "[" "]" (pv <$> vs) + Sequence vs -> P.surroundCommas "[" "]" (pv <$> vs) + Ref id name _ -> P.parenthesize $ + P.sep " " ["Ref", P.shown id, P.shown name] + Pure v -> P.surroundCommas "{" "}" [pv v] + Requested (Req r cid vs cont) -> P.parenthesize $ + ("Request " <> prettyHashQualified0 (PPE.patternName ppe r cid)) + `P.hang` + P.spaced [ + P.surroundCommas "[" "]" (pv <$> vs), + prettyCont cont + ] + Cont k -> P.parenthesize $ "Cont" `P.hang` prettyCont k + UninitializedLetRecSlot s _ _ -> P.parenthesize $ + "Uninitialized " <> P.shown s + +-- Contains the effect ref and ctor id, the args, and the continuation +-- which expects the result at the top of the stack +data Req e cont = Req R.Reference ConstructorId [Value e cont] cont + deriving (Eq,Show) + +-- Annotate all `z` values with the number of outer bindings, useful for +-- tracking free variables or converting away from debruijn indexing. +-- Currently used as an implementation detail by `specializeIR`. +annotateDepth :: IR' a z -> IR' a (z, Int) +annotateDepth = go 0 where + go depth ir = case ir of + -- Only the binders modify the depth + Let v b body ann -> Let v (go depth b) (go (depth + 1) body) ann + LetRec bs body -> let + depth' = depth + length bs + in LetRec (second (go depth') <$> bs) (go depth' body) + Match scrute cases -> Match (scrute, depth) (tweak <$> cases) where + tweak (pat, boundVars, guard, rhs) = let + depth' = depth + length boundVars + in (pat, boundVars, go depth' <$> guard, go depth' rhs) + -- All the other cases just leave depth alone and recurse + Apply f args -> Apply (go depth f) ((,depth) <$> args) + Handle f body -> Handle (f,depth) (go depth body) + If c a b -> If (c,depth) (go depth a) (go depth b) + And a b -> And (a,depth) (go depth b) + Or a b -> Or (a,depth) (go depth b) + ir -> (,depth) <$> ir + +-- Given an environment mapping of de bruijn indices to values, specialize +-- the given `IR` by replacing slot lookups with the provided values. +specializeIR :: Map Int (Value e cont) -> IR' a (Z e cont) -> IR' a (Z e cont) +specializeIR env ir = let + ir' = annotateDepth ir + go (s@(Slot i), depth) = maybe s Val $ Map.lookup (i - depth) env + go (s@(LazySlot i), depth) = maybe s Val $ Map.lookup (i - depth) env + go (s,_) = s + in go <$> ir' + +compile :: (Show e, Show cont) => CompilationEnv e cont -> Term Symbol -> IR e cont +compile env t = compile0 env [] + (ABT.rewriteDown ANF.minimizeCyclesOrCrash $ Term.vmap toSymbolC t) + +freeVars :: [(SymbolC,a)] -> Term SymbolC -> Set SymbolC +freeVars bound t = + -- let fv = trace "free:" . traceShowId $ ABT.freeVars t + -- bv = trace "bound:" . traceShowId $ Set.fromList (fst <$> bound) + -- in trace "difference:" . traceShowId $ fv `Set.difference` bv + ABT.freeVars t `Set.difference` Set.fromList (fst <$> bound) + +-- Main compilation function - converts an arbitrary term to an `IR`. +-- Takes a way of resolving `Reference`s and an environment of variables, +-- some of which may already be precompiled to `V`s. (This occurs when +-- recompiling a function that is being partially applied) +compile0 + :: (Show e, Show cont) + => CompilationEnv e cont + -> [(SymbolC, Maybe (Value e cont))] + -> Term SymbolC + -> IR e cont +compile0 env bound t = + if Set.null fvs then + -- Annotates the term with this [(SymbolC, Maybe (Value e))] + -- where a `Just v` indicates an immediate value, and `Nothing` indicates + -- a stack lookup is needed at the stack index equal to the symbol's index. + -- ABT.annotateBound' produces an initial annotation consisting of the a + -- stack of bound variables, with the innermost bound variable at the top. + -- We tag each of these with `Nothing`, and then tack on the immediates at + -- the end. Their indices don't correspond to stack positions (although + -- they may reflect shadowing). + let wrangle vars = ((,Nothing) <$> vars) ++ bound + t0 = ANF.fromTerm' makeLazy t + _msg = "ANF form:\n" <> + TP.pretty' (Just 80) mempty t0 <> + "\n---------" + in go (wrangle <$> ABT.annotateBound' t0) + else + error $ "can't compile a term with free variables: " ++ show (toList fvs) + where + fvs = freeVars bound t + go t = case t of + Term.Nat' n -> Leaf . Val . N $ n + Term.Int' n -> Leaf . Val . I $ n + Term.Float' n -> Leaf . Val . F $ n + Term.Boolean' n -> Leaf . Val . B $ n + Term.Text' n -> Leaf . Val . T $ n + Term.Char' n -> Leaf . Val . C $ n + Term.TermLink' r -> Leaf . Val . TermLink $ r + Term.TypeLink' r -> Leaf . Val . TypeLink $ r + Term.And' x y -> And (toZ "and" t x) (go y) + Term.LamsNamed' vs body -> Leaf . Val $ + Lam (length vs) + (Specialize (ABT.hash t) (void t) []) + (compile0 env (ABT.annotation body) (void body)) + Term.Or' x y -> Or (toZ "or" t x) (go y) + Term.Let1Named' v b body -> Let (underlyingSymbol v) (go b) (go body) (freeSlots body) + Term.LetRecNamed' bs body -> + LetRec ((\(v,b) -> (underlyingSymbol v, go b)) <$> bs) (go body) + Term.Constructor' r cid -> ctorIR con (Term.constructor()) r cid where + con 0 r cid [] = Leaf . Val $ Data r cid [] + con _ r cid args = Construct r cid args + Term.Request' r cid -> ctorIR (const Request) (Term.request()) r cid + Term.Apps' f args -> Apply (go f) (map (toZ "apply-args" t) args) + Term.Handle' h body -> Handle (toZ "handle" t h) (go body) + Term.Ann' e _ -> go e + Term.Match' scrutinee cases -> + Match (toZ "match" t scrutinee) (compileCase <$> cases) + ABT.Abs1NA' _ body -> go body + Term.If' cond ifT ifF -> If (toZ "cond" t cond) (go ifT) (go ifF) + Term.Var' _ -> Leaf $ toZ "var" t t + Term.Ref' r -> case toIR env r of + Nothing -> error $ reportBug "B8920912182" msg where + msg = "The program being compiled referenced this definition " <> + show r <> "\nbut the compilation environment has no compiled form for this reference." + Just ir -> ir + Term.Sequence' vs -> MakeSequence . toList . fmap (toZ "sequence" t) $ vs + _ -> error $ "TODO - don't know how to compile this term:\n" + <> (CT.toPlain . P.render 80 . TP.pretty mempty $ void t) + where + compileVar _ v [] = unknown v + compileVar i v ((v',o):tl) + | v == v' = case o of + Nothing | isLazy v -> LazySlot i + | otherwise -> Slot i + Just v -> Val v + | isJust o = compileVar i v tl + | otherwise = compileVar (i + 1) v tl + + -- freeSlots :: _ -> Set Int + freeSlots t = let + vars = ABT.freeVars t + env = ABT.annotation t + in Set.fromList $ toList vars >>= \v -> case compileVar 0 v env of + Slot i -> [i] + LazySlot i -> [i] + _ -> [] + + ctorIR :: (Int -> R.Reference -> Int -> [Z e cont] -> IR e cont) + -> (R.Reference -> Int -> Term SymbolC) + -> R.Reference -> Int -> IR e cont + ctorIR con src r cid = case constructorArity env r cid of + Nothing -> error $ "the compilation env is missing info about how " + ++ "to compile this constructor: " ++ show (r, cid) ++ "\n" ++ show (constructorArity' env) + Just 0 -> con 0 r cid [] + -- Just 0 -> Leaf . Val $ Data "Optional" 0 + Just arity -> Leaf . Val $ Lam arity (FormClosure (ABT.hash s) s []) ir + where + s = src r cid + -- if `arity` is 1, then `Slot 0` is the sole argument. + -- if `arity` is 2, then `Slot 1` is the first arg, and `Slot 0` + -- get the second arg, etc. + -- Note: [1..10] is inclusive of both `1` and `10` + ir = con arity r cid (reverse $ map Slot [0 .. (arity - 1)]) + + unknown v = error $ "free variable during compilation: " ++ show v + toZ _msg t (Term.Var' v) = compileVar 0 v (ABT.annotation t) + toZ msg _t e = case go e of + Leaf v -> v + e -> error $ msg ++ ": ANF should have eliminated any non-Z arguments from: " ++ show e + compileCase (Term.MatchCase pat guard rhs@(ABT.unabs -> (vs,_))) = + (compilePattern pat, underlyingSymbol <$> vs, go <$> guard, go rhs) + + getSeqLength :: Pattern.Pattern -> Maybe Int + getSeqLength p = case p of + Pattern.SequenceLiteral ps -> Just (length ps) + Pattern.SequenceOp l op r -> case op of + Pattern.Snoc -> (+ 1) <$> getSeqLength l + Pattern.Cons -> (+ 1) <$> getSeqLength r + Pattern.Concat -> (+) <$> getSeqLength l <*> getSeqLength r + Pattern.As p -> getSeqLength p + _ -> Nothing + + compilePattern :: Pattern.Pattern -> Pattern + compilePattern pat = case pat of + Pattern.Unbound -> PatternIgnore + Pattern.Var -> PatternVar + Pattern.Boolean b -> PatternB b + Pattern.Int n -> PatternI n + Pattern.Nat n -> PatternN n + Pattern.Float n -> PatternF n + Pattern.Text t -> PatternT t + Pattern.Char c -> PatternC c + Pattern.Constructor r cid args -> PatternData r cid (compilePattern <$> args) + Pattern.As pat -> PatternAs (compilePattern pat) + Pattern.EffectPure p -> PatternPure (compilePattern p) + Pattern.EffectBind r cid args k -> PatternBind r cid (compilePattern <$> args) (compilePattern k) + Pattern.SequenceLiteral ps -> PatternSequenceLiteral (compilePattern <$> ps) + Pattern.SequenceOp l op r -> case op of + Pattern.Snoc -> PatternSequenceSnoc (compilePattern l) (compilePattern r) + Pattern.Cons -> PatternSequenceCons (compilePattern l) (compilePattern r) + Pattern.Concat -> fromMaybe concatErr ((concat Left <$> getSeqLength l) <|> (concat Right <$> getSeqLength r)) + where + concat :: (Int -> Either Int Int) -> Int -> Pattern + concat f i = PatternSequenceConcat (f i) (compilePattern l) (compilePattern r) + concatErr = error $ "At least one side of a concat must have a constant length. " <> + "This code should never be reached as this constraint is " <> + "applied in the typechecker." + + _ -> error $ "todo - compilePattern " ++ show pat + +type DS = StateT (Map Symbol (Term Symbol), Set RefID) IO + +runDS :: DS (Term Symbol) -> IO (Term Symbol) +runDS ds = do + (body, (letRecBindings, _)) <- runStateT ds mempty + pure $ if null letRecBindings then body + else Term.letRec' False (Map.toList letRecBindings) body + +decompile :: (External e, External cont) => Value e cont -> IO (Term Symbol) +decompile v = runDS (decompileImpl v) + +decompileImpl :: + (External e, External cont) => Value e cont -> DS (Term Symbol) +decompileImpl v = case v of + I n -> pure $ Term.int () n + N n -> pure $ Term.nat () n + F n -> pure $ Term.float () n + B b -> pure $ Term.boolean () b + T t -> pure $ Term.text () t + C c -> pure $ Term.char () c + Bs bs -> pure $ Term.builtin() "Bytes.fromList" `Term.apps'` [bsv] where + bsv = Term.seq'() . Sequence.fromList $ + [ Term.nat() (fromIntegral w8) | w8 <- Bytes.toWord8s bs ] + Lam _ f _ -> decompileUnderapplied f + Data r cid args -> + Term.apps' <$> pure (Term.constructor() r cid) + <*> traverse decompileImpl (toList args) + Sequence vs -> Term.seq' () <$> traverse decompileImpl vs + Ref id symbol ioref -> do + seen <- gets snd + symbol <- pure $ Var.freshenId (fromIntegral id) symbol + if Set.member id seen then + pure $ Term.var () symbol + else do + modify (second $ Set.insert id) + t <- decompileImpl =<< lift (readIORef ioref) + modify (first $ Map.insert symbol t) + pure (Term.etaNormalForm t) + Cont k -> liftIO $ decompileExternal k + Pure a -> do + -- `{a}` doesn't have a term syntax, so it's decompiled as + -- `handle (x -> x) in a`, which has the type `Request ambient e a` + a <- decompileImpl a + pure $ Term.handle() id a + Requested (Req r cid vs k) -> do + -- `{req a b -> k}` doesn't have a term syntax, so it's decompiled as + -- `handle (x -> x) in k (req a b)` + vs <- traverse decompileImpl vs + kt <- liftIO $ decompileExternal k + pure . Term.handle() id $ + Term.apps' kt [Term.apps' (Term.request() r cid) vs] + UninitializedLetRecSlot _b _bs _body -> + error "unpossible - decompile UninitializedLetRecSlot" + TermLink r -> pure $ Term.termLink() r + TypeLink r -> pure $ Term.typeLink() r + where + idv = Var.named "x" + id = Term.lam () idv (Term.var() idv) + + +boundVarsIR :: IR e cont -> Set Symbol +boundVarsIR = \case + Let v b body _ -> Set.singleton v <> boundVarsIR b <> boundVarsIR body + LetRec bs body -> Set.fromList (fst <$> bs) <> foldMap (boundVarsIR . snd) bs <> boundVarsIR body + Apply lam _ -> boundVarsIR lam + Handle _ body -> boundVarsIR body + If _ t f -> foldMap boundVarsIR [t,f] + And _ b -> boundVarsIR b + Or _ b -> boundVarsIR b + Match _ cases -> foldMap doCase cases + where doCase (_, _, b, body) = maybe mempty boundVarsIR b <> boundVarsIR body + -- I added all these cases for exhaustiveness checking in the future, + -- and also because I needed the patterns for decompileIR anyway. + -- Sure is ugly though. This ghc doesn't support Language MultiCase. + -- I want to be able to say `_ -> mempty` where _ refers to exactly the other + -- cases that existed at the time I wrote it! + Leaf _ -> mempty + AddI _ _ -> mempty + SubI _ _ -> mempty + MultI _ _ -> mempty + DivI _ _ -> mempty + GtI _ _ -> mempty + LtI _ _ -> mempty + GtEqI _ _ -> mempty + LtEqI _ _ -> mempty + EqI _ _ -> mempty + SignumI _ -> mempty + NegateI _ -> mempty + Truncate0I _ -> mempty + ModI _ _ -> mempty + PowI _ _ -> mempty + ShiftRI _ _ -> mempty + ShiftLI _ _ -> mempty + BitAndI _ _ -> mempty + BitOrI _ _ -> mempty + BitXorI _ _ -> mempty + ComplementI _ -> mempty + TrailZeroI _ -> mempty + LeadZeroI _ -> mempty + AddN _ _ -> mempty + DropN _ _ -> mempty + SubN _ _ -> mempty + MultN _ _ -> mempty + DivN _ _ -> mempty + GtN _ _ -> mempty + LtN _ _ -> mempty + GtEqN _ _ -> mempty + LtEqN _ _ -> mempty + EqN _ _ -> mempty + ModN _ _ -> mempty + PowN _ _ -> mempty + ShiftLN _ _ -> mempty + ShiftRN _ _ -> mempty + ToIntN _ -> mempty + BitAndN _ _ -> mempty + BitOrN _ _ -> mempty + BitXorN _ _ -> mempty + ComplementN _ -> mempty + LeadZeroN _ -> mempty + TrailZeroN _ -> mempty + AddF _ _ -> mempty + SubF _ _ -> mempty + MultF _ _ -> mempty + DivF _ _ -> mempty + GtF _ _ -> mempty + LtF _ _ -> mempty + GtEqF _ _ -> mempty + LtEqF _ _ -> mempty + EqF _ _ -> mempty + EqU _ _ -> mempty + CompareU _ _ -> mempty + Bug _ -> mempty + Todo _ -> mempty + MakeSequence _ -> mempty + Construct{} -> mempty + Request{} -> mempty + Not{} -> mempty + +class External e where + decompileExternal :: e -> IO (Term Symbol) + +decompileIR + :: (External e, External cont) => [Symbol] -> IR e cont -> DS (Term Symbol) +decompileIR stack = \case + -- added all these cases for exhaustiveness checking in the future, + -- and also because I needed the patterns for decompileIR anyway. + Leaf z -> decompileZ z + AddI x y -> builtin "Int.+" [x,y] + SubI x y -> builtin "Int.-" [x,y] + MultI x y -> builtin "Int.*" [x,y] + DivI x y -> builtin "Int./" [x,y] + GtI x y -> builtin "Int.>" [x,y] + LtI x y -> builtin "Int.<" [x,y] + GtEqI x y -> builtin "Int.>=" [x,y] + LtEqI x y -> builtin "Int.<=" [x,y] + EqI x y -> builtin "Int.==" [x,y] + SignumI x -> builtin "Int.signum" [x] + NegateI x -> builtin "Int.negate" [x] + Truncate0I x -> builtin "Int.truncate0" [x] + ModI x y -> builtin "Int.mod" [x,y] + PowI x y -> builtin "Int.pow" [x,y] + ShiftRI x y -> builtin "Int.shiftRight" [x,y] + ShiftLI x y -> builtin "Int.shiftLeft" [x,y] + BitAndI x y -> builtin "Int.and" [x,y] + BitOrI x y -> builtin "Int.or" [x,y] + BitXorI x y -> builtin "Int.xor" [x,y] + ComplementI x -> builtin "Int.complement" [x] + LeadZeroI x -> builtin "Int.leadingZeros" [x] + TrailZeroI x -> builtin "Int.trailingZeros" [x] + AddN x y -> builtin "Nat.+" [x,y] + DropN x y -> builtin "Nat.drop" [x,y] + SubN x y -> builtin "Nat.sub" [x,y] + MultN x y -> builtin "Nat.*" [x,y] + DivN x y -> builtin "Nat./" [x,y] + GtN x y -> builtin "Nat.>" [x,y] + LtN x y -> builtin "Nat.<" [x,y] + GtEqN x y -> builtin "Nat.>=" [x,y] + LtEqN x y -> builtin "Nat.<=" [x,y] + EqN x y -> builtin "Nat.==" [x,y] + ModN x y -> builtin "Nat.mod" [x,y] + ToIntN x -> builtin "Nat.toInt" [x] + PowN x y -> builtin "Nat.pow" [x,y] + ShiftRN x y -> builtin "Nat.shiftRight" [x,y] + ShiftLN x y -> builtin "Nat.shiftLeft" [x,y] + BitAndN x y -> builtin "Nat.and" [x,y] + BitOrN x y -> builtin "Nat.or" [x,y] + BitXorN x y -> builtin "Nat.xor" [x,y] + ComplementN x -> builtin "Nat.complement" [x] + LeadZeroN x -> builtin "Nat.leadingZeros" [x] + TrailZeroN x -> builtin "Nat.trailingZeros" [x] + AddF x y -> builtin "Float.+" [x,y] + SubF x y -> builtin "Float.-" [x,y] + MultF x y -> builtin "Float.*" [x,y] + DivF x y -> builtin "Float./" [x,y] + GtF x y -> builtin "Float.>" [x,y] + LtF x y -> builtin "Float.<" [x,y] + GtEqF x y -> builtin "Float.>=" [x,y] + LtEqF x y -> builtin "Float.<=" [x,y] + EqF x y -> builtin "Float.==" [x,y] + EqU x y -> builtin "Universal.==" [x,y] + CompareU x y -> builtin "Universal.compare" [x,y] + Bug x -> builtin "bug" [x] + Todo x -> builtin "todo" [x] + Let v b body _ -> do + b' <- decompileIR stack b + body' <- decompileIR (v:stack) body + pure $ Term.let1_ False [(v, b')] body' + LetRec bs body -> do + let stack' = reverse (fmap fst bs) ++ stack + secondM f (x,y) = (x,) <$> f y + bs' <- traverse (secondM $ decompileIR stack') bs + body' <- decompileIR stack' body + pure $ Term.letRec' False bs' body' + MakeSequence args -> + Term.seq() <$> traverse decompileZ args + Apply lam args -> + Term.apps' <$> decompileIR stack lam <*> traverse decompileZ args + Construct r cid args -> + Term.apps' (Term.constructor() r cid) <$> traverse decompileZ args + Request r cid args -> + Term.apps' (Term.request() r cid) <$> traverse decompileZ args + Handle h body -> + Term.handle() <$> decompileZ h <*> decompileIR stack body + If c t f -> + Term.iff() <$> decompileZ c <*> decompileIR stack t <*> decompileIR stack f + And x y -> + Term.and() <$> decompileZ x <*> decompileIR stack y + Or x y -> + Term.or() <$> decompileZ x <*> decompileIR stack y + Not x -> builtin "Boolean.not" [x] + Match scrutinee cases -> + Term.match () <$> decompileZ scrutinee <*> traverse decompileMatchCase cases + where + builtin :: (External e, External cont) => Text -> [Z e cont] -> DS (Term Symbol) + builtin t args = + Term.apps' (Term.ref() (R.Builtin t)) <$> traverse decompileZ args + at :: Pos -> Term Symbol + at i = Term.var() (stack !! i) + decompileZ :: (External e, External cont) => Z e cont -> DS (Term Symbol) + decompileZ = \case + Slot p -> pure $ at p + LazySlot p -> pure $ at p + Val v -> decompileImpl v + External e -> liftIO $ decompileExternal e + decompilePattern :: Pattern -> Pattern.Pattern + decompilePattern = \case + PatternI i -> Pattern.Int i + PatternN n -> Pattern.Nat n + PatternF f -> Pattern.Float f + PatternB b -> Pattern.Boolean b + PatternT t -> Pattern.Text t + PatternC c -> Pattern.Char c + PatternData r cid pats -> + Pattern.Constructor r cid (d <$> pats) + PatternSequenceLiteral ps -> Pattern.SequenceLiteral $ decompilePattern <$> ps + PatternSequenceCons l r -> Pattern.SequenceOp (decompilePattern l) Pattern.Cons (decompilePattern r) + PatternSequenceSnoc l r -> Pattern.SequenceOp (decompilePattern l) Pattern.Snoc (decompilePattern r) + PatternSequenceConcat _ l r -> Pattern.SequenceOp (decompilePattern l) Pattern.Concat (decompilePattern r) + PatternPure pat -> Pattern.EffectPure (d pat) + PatternBind r cid pats k -> + Pattern.EffectBind r cid (d <$> pats) (d k) + PatternAs pat -> Pattern.As (d pat) + PatternIgnore -> Pattern.Unbound + PatternVar -> Pattern.Var + d = decompilePattern + decompileMatchCase (pat, vars, guard, body) = do + let stack' = reverse vars ++ stack + guard' <- traverse (decompileIR stack') guard + body' <- decompileIR stack' body + pure $ Term.MatchCase (d pat) guard' body' + +instance (Show e, Show cont) => Show (Z e cont) where + show (LazySlot i) = "'#" ++ show i + show (Slot i) = "#" ++ show i + show (Val v) = show v + show (External e) = "External:" <> show e + +freeSlots :: IR e cont -> Set Int +freeSlots ir = case ir of + Let _ _ _ free -> decrementFrees free + LetRec bs body -> let + n = length bs + in foldMap (decrementFreesBy n . freeSlots . snd) bs <> + decrementFreesBy n (freeSlots body) + Apply lam args -> freeSlots lam <> foldMap free args + Handle h body -> free h <> freeSlots body + If c t f -> free c <> freeSlots t <> freeSlots f + And x y -> free x <> freeSlots y + Or x y -> free x <> freeSlots y + Match scrutinee cases -> free scrutinee <> foldMap freeInCase cases where + freeInCase (_pat, bound, guard, rhs) = let + n = length bound + in decrementFreesBy n (freeSlots rhs) <> + maybe mempty (decrementFreesBy n . freeSlots) guard + _ -> foldMap free (toList ir) + where + free z = case z of + Slot i -> Set.singleton i + LazySlot i -> Set.singleton i + _ -> Set.empty + +-- todo: could make this more efficient +decrementFreesBy :: Int -> Set Int -> Set Int +decrementFreesBy 0 s = s +decrementFreesBy n s = decrementFreesBy (n-1) (decrementFrees s) + +decrementFrees :: Set Int -> Set Int +decrementFrees frees = + Set.map (\x -> x - 1) (Set.delete 0 frees) + +let' :: Symbol -> IR e cont -> IR e cont -> IR e cont +let' name binding body = + Let name binding body (decrementFrees $ freeSlots body) + +builtins :: Map R.Reference (IR e cont) +builtins = Map.fromList $ arity0 <> arityN + where + -- slot = Leaf . Slot + val = Leaf . Val + underapply name = + let r = Term.ref() $ R.Builtin name :: Term SymbolC + in FormClosure (ABT.hash r) r [] + var = Var.named "x" + arity0 = [ (R.Builtin name, val value) | (name, value) <- + [ ("Text.empty", T "") + , ("Sequence.empty", Sequence mempty) + , ("Bytes.empty", Bs mempty) + ] ] + arityN = [ (R.Builtin name, Leaf . Val $ Lam arity (underapply name) ir) | + (name, arity, ir) <- + [ ("Int.+", 2, AddI (Slot 1) (Slot 0)) + , ("Int.-", 2, SubI (Slot 1) (Slot 0)) + , ("Int.*", 2, MultI (Slot 1) (Slot 0)) + , ("Int./", 2, DivI (Slot 1) (Slot 0)) + , ("Int.<", 2, LtI (Slot 1) (Slot 0)) + , ("Int.>", 2, GtI (Slot 1) (Slot 0)) + , ("Int.<=", 2, LtEqI (Slot 1) (Slot 0)) + , ("Int.>=", 2, GtEqI (Slot 1) (Slot 0)) + , ("Int.==", 2, EqI (Slot 1) (Slot 0)) + , ("Int.and", 2, BitAndI (Slot 1) (Slot 0)) + , ("Int.or", 2, BitOrI (Slot 1) (Slot 0)) + , ("Int.xor", 2, BitXorI (Slot 1) (Slot 0)) + , ("Int.complement", 1, ComplementI (Slot 0)) + , ("Int.increment", 1, AddI (Val (I 1)) (Slot 0)) + , ("Int.signum", 1, SignumI (Slot 0)) + , ("Int.negate", 1, NegateI (Slot 0)) + , ("Int.truncate0", 1, Truncate0I (Slot 0)) + , ("Int.mod", 2, ModI (Slot 1) (Slot 0)) + , ("Int.pow", 2, PowI (Slot 1) (Slot 0)) + , ("Int.shiftLeft", 2, ShiftLI (Slot 1) (Slot 0)) + , ("Int.shiftRight", 2, ShiftRI (Slot 1) (Slot 0)) + , ("Int.leadingZeros", 1, LeadZeroI (Slot 0)) + , ("Int.trailingZeros", 1, TrailZeroI (Slot 0)) + , ("Int.isEven", 1, let' var (ModI (Slot 0) (Val (I 2))) + (EqI (Val (I 0)) (Slot 0))) + , ("Int.isOdd", 1, let' var (ModI (Slot 0) (Val (I 2))) + (let' var (EqI (Val (I 0)) (Slot 0)) + (Not (Slot 0)))) + + , ("Nat.+", 2, AddN (Slot 1) (Slot 0)) + , ("Nat.drop", 2, DropN (Slot 1) (Slot 0)) + , ("Nat.sub", 2, SubN (Slot 1) (Slot 0)) + , ("Nat.*", 2, MultN (Slot 1) (Slot 0)) + , ("Nat./", 2, DivN (Slot 1) (Slot 0)) + , ("Nat.<", 2, LtN (Slot 1) (Slot 0)) + , ("Nat.>", 2, GtN (Slot 1) (Slot 0)) + , ("Nat.<=", 2, LtEqN (Slot 1) (Slot 0)) + , ("Nat.>=", 2, GtEqN (Slot 1) (Slot 0)) + , ("Nat.==", 2, EqN (Slot 1) (Slot 0)) + , ("Nat.and", 2, BitAndN (Slot 1) (Slot 0)) + , ("Nat.or", 2, BitOrN (Slot 1) (Slot 0)) + , ("Nat.xor", 2, BitXorN (Slot 1) (Slot 0)) + , ("Nat.complement", 1, ComplementN (Slot 0)) + , ("Nat.increment", 1, AddN (Val (N 1)) (Slot 0)) + , ("Nat.mod", 2, ModN (Slot 1) (Slot 0)) + , ("Nat.pow", 2, PowN (Slot 1) (Slot 0)) + , ("Nat.shiftLeft", 2, ShiftLN (Slot 1) (Slot 0)) + , ("Nat.shiftRight", 2, ShiftRN (Slot 1) (Slot 0)) + , ("Nat.leadingZeros", 1, LeadZeroN (Slot 0)) + , ("Nat.trailingZeros", 1, TrailZeroN (Slot 0)) + , ("Nat.isEven", 1, let' var (ModN (Slot 0) (Val (N 2))) + (EqN (Val (N 0)) (Slot 0))) + , ("Nat.isOdd", 1, let' var (ModN (Slot 0) (Val (N 2))) + (let' var (EqN (Val (N 0)) (Slot 0)) + (Not (Slot 0)))) + , ("Nat.toInt", 1, ToIntN (Slot 0)) + + , ("Float.+", 2, AddF (Slot 1) (Slot 0)) + , ("Float.-", 2, SubF (Slot 1) (Slot 0)) + , ("Float.*", 2, MultF (Slot 1) (Slot 0)) + , ("Float./", 2, DivF (Slot 1) (Slot 0)) + , ("Float.<", 2, LtF (Slot 1) (Slot 0)) + , ("Float.>", 2, GtF (Slot 1) (Slot 0)) + , ("Float.<=", 2, LtEqF (Slot 1) (Slot 0)) + , ("Float.>=", 2, GtEqF (Slot 1) (Slot 0)) + , ("Float.==", 2, EqF (Slot 1) (Slot 0)) + + , ("Universal.==", 2, EqU (Slot 1) (Slot 0)) + , ("Universal.compare", 2, CompareU (Slot 1) (Slot 0)) + , ("Universal.<", 2, let' var (CompareU (Slot 1) (Slot 0)) + (LtI (Slot 0) (Val (I 0)))) + , ("Universal.>", 2, let' var (CompareU (Slot 1) (Slot 0)) + (GtI (Slot 0) (Val (I 0)))) + , ("Universal.>=", 2, let' var (CompareU (Slot 1) (Slot 0)) + (GtEqI (Slot 0) (Val (I 0)))) + , ("Universal.<=", 2, let' var (CompareU (Slot 1) (Slot 0)) + (LtEqI (Slot 0) (Val (I 0)))) + , ("Boolean.not", 1, Not (Slot 0)) + , ("bug", 1, Bug (Slot 0)) + , ("todo", 1, Todo (Slot 0)) + ]] + +-- boring instances + +instance Eq SymbolC where + SymbolC _ s == SymbolC _ s2 = s == s2 + +instance Ord SymbolC where + SymbolC _ s `compare` SymbolC _ s2 = s `compare` s2 + +instance ABT.Var SymbolC where + freshIn vs (SymbolC i s) = + SymbolC i (ABT.freshIn (Set.map underlyingSymbol vs) s) + +instance Var SymbolC where + typed s = SymbolC False (Var.typed s) + typeOf (SymbolC _ s) = Var.typeOf s + freshId (SymbolC _ s) = Var.freshId s + freshenId n (SymbolC i s) = SymbolC i (Var.freshenId n s) + +instance (Show e, Show cont) => Show (Value e cont) where + show (I n) = show n + show (F n) = show n + show (N n) = show n + show (B b) = show b + show (T t) = show t + show (C c) = show c + show (Bs bs) = show bs + show (Lam n e ir) = "(Lam " <> show n <> " " <> show e <> " (" <> show ir <> "))" + show (Data r cid vs) = "(Data " <> show r <> " " <> show cid <> " " <> show vs <> ")" + show (Sequence vs) = "[" <> intercalateMap ", " show vs <> "]" + show (Ref n s _) = "(Ref " <> show n <> " " <> show s <> ")" + show (TermLink r) = "(TermLink " <> show r <> ")" + show (TypeLink r) = "(TypeLink " <> show r <> ")" + show (Pure v) = "(Pure " <> show v <> ")" + show (Requested r) = "(Requested " <> show r <> ")" + show (Cont ir) = "(Cont " <> show ir <> ")" + show (UninitializedLetRecSlot b bs _body) = + "(UninitializedLetRecSlot " <> show b <> " in " <> show (fst <$> bs)<> ")" + +compilationEnv0 :: CompilationEnv e cont +compilationEnv0 = CompilationEnv builtins mempty + +instance Semigroup (CompilationEnv e cont) where (<>) = mappend + +instance Monoid (CompilationEnv e cont) where + mempty = CompilationEnv mempty mempty + mappend c1 c2 = CompilationEnv ir ctor where + ir = toIR' c1 <> toIR' c2 + ctor = constructorArity' c1 <> constructorArity' c2 + +instance (CyclicEq e, CyclicEq cont) => CyclicEq (UnderapplyStrategy e cont) where + cyclicEq h1 h2 (FormClosure hash1 _ vs1) (FormClosure hash2 _ vs2) = + if hash1 == hash2 then cyclicEq h1 h2 vs1 vs2 + else pure False + cyclicEq h1 h2 (Specialize hash1 _ vs1) (Specialize hash2 _ vs2) = + if hash1 == hash2 then cyclicEq h1 h2 (snd <$> vs1) (snd <$> vs2) + else pure False + cyclicEq _ _ _ _ = pure False + +instance (CyclicEq e, CyclicEq cont) => CyclicEq (Req e cont) where + cyclicEq h1 h2 (Req r1 c1 vs1 k1) (Req r2 c2 vs2 k2) = + if r1 == r2 && c1 == c2 then do + b <- cyclicEq h1 h2 vs1 vs2 + if b then cyclicEq h1 h2 k1 k2 + else pure False + else pure False + +instance (CyclicEq e, CyclicEq cont) => CyclicEq (Value e cont) where + cyclicEq _ _ (I x) (I y) = pure (x == y) + cyclicEq _ _ (F x) (F y) = pure (x == y) + cyclicEq _ _ (N x) (N y) = pure (x == y) + cyclicEq _ _ (B x) (B y) = pure (x == y) + cyclicEq _ _ (T x) (T y) = pure (x == y) + cyclicEq _ _ (C x) (C y) = pure (x == y) + cyclicEq _ _ (Bs x) (Bs y) = pure (x == y) + cyclicEq _ _ (TermLink x) (TermLink y) = pure (x == y) + cyclicEq _ _ (TypeLink x) (TypeLink y) = pure (x == y) + cyclicEq h1 h2 (Lam arity1 us _) (Lam arity2 us2 _) = + if arity1 == arity2 then cyclicEq h1 h2 us us2 + else pure False + cyclicEq h1 h2 (Data r1 c1 vs1) (Data r2 c2 vs2) = + if r1 == r2 && c1 == c2 then cyclicEq h1 h2 vs1 vs2 + else pure False + cyclicEq h1 h2 (Sequence v1) (Sequence v2) = cyclicEq h1 h2 v1 v2 + cyclicEq h1 h2 (Ref r1 _ io1) (Ref r2 _ io2) = + if io1 == io2 then pure True + else do + a <- CyT.lookup r1 h1 + b <- CyT.lookup r2 h2 + case (a,b) of + -- We haven't encountered these refs before, descend into them and + -- compare contents. + (Nothing, Nothing) -> do + CyT.insertEnd r1 h1 + CyT.insertEnd r2 h2 + r1 <- readIORef io1 + r2 <- readIORef io2 + cyclicEq h1 h2 r1 r2 + -- We've encountered these refs before, compare the positions where + -- they were first encountered + (Just r1, Just r2) -> pure (r1 == r2) + _ -> pure False + cyclicEq h1 h2 (Pure a) (Pure b) = cyclicEq h1 h2 a b + cyclicEq h1 h2 (Requested r1) (Requested r2) = cyclicEq h1 h2 r1 r2 + cyclicEq h1 h2 (Cont k1) (Cont k2) = cyclicEq h1 h2 k1 k2 + cyclicEq _ _ _ _ = pure False + +constructorId :: Value e cont -> Int +constructorId v = case v of + I _ -> 0 + F _ -> 1 + N _ -> 2 + B _ -> 3 + T _ -> 4 + Bs _ -> 5 + Lam{} -> 6 + Data{} -> 7 + Sequence _ -> 8 + Pure _ -> 9 + Requested _ -> 10 + Ref{} -> 11 + Cont _ -> 12 + C _ -> 13 + UninitializedLetRecSlot{} -> 14 + TermLink _ -> 15 + TypeLink _ -> 16 + +instance (CyclicOrd e, CyclicOrd cont) => CyclicOrd (UnderapplyStrategy e cont) where + cyclicOrd h1 h2 (FormClosure hash1 _ vs1) (FormClosure hash2 _ vs2) = + COrd.bothOrd' h1 h2 hash1 hash2 vs1 vs2 + cyclicOrd h1 h2 (Specialize hash1 _ vs1) (Specialize hash2 _ vs2) = + COrd.bothOrd' h1 h2 hash1 hash2 (map snd vs1) (map snd vs2) + cyclicOrd _ _ FormClosure{} _ = pure LT + cyclicOrd _ _ Specialize{} _ = pure GT + +instance (CyclicOrd e, CyclicOrd cont) => CyclicOrd (Req e cont) where + cyclicOrd h1 h2 (Req r1 c1 vs1 k1) (Req r2 c2 vs2 k2) = case compare r1 r2 of + EQ -> do + o <- COrd.bothOrd' h1 h2 c1 c2 vs1 vs2 + o <- case o of + EQ -> cyclicOrd h1 h2 k1 k2 + _ -> pure o + case o of + EQ -> pure (r1 `compare` r2) + _ -> pure o + c -> pure c + +instance (CyclicOrd e, CyclicOrd cont) => CyclicOrd (Value e cont) where + cyclicOrd _ _ (I x) (I y) = pure (x `compare` y) + cyclicOrd _ _ (F x) (F y) = pure (x `compare` y) + cyclicOrd _ _ (N x) (N y) = pure (x `compare` y) + cyclicOrd _ _ (B x) (B y) = pure (x `compare` y) + cyclicOrd _ _ (T x) (T y) = pure (x `compare` y) + cyclicOrd _ _ (C x) (C y) = pure (x `compare` y) + cyclicOrd _ _ (Bs x) (Bs y) = pure (x `compare` y) + cyclicOrd _ _ (TermLink x) (TermLink y) = pure (x `compare` y) + cyclicOrd _ _ (TypeLink x) (TypeLink y) = pure (x `compare` y) + cyclicOrd h1 h2 (Lam arity1 us _) (Lam arity2 us2 _) = + COrd.bothOrd' h1 h2 arity1 arity2 us us2 + cyclicOrd h1 h2 (Data r1 c1 vs1) (Data r2 c2 vs2) = + COrd.bothOrd' h1 h2 c1 c2 vs1 vs2 >>= \o -> case o of + EQ -> pure (r1 `compare` r2) + _ -> pure o + cyclicOrd h1 h2 (Sequence v1) (Sequence v2) = cyclicOrd h1 h2 v1 v2 + cyclicOrd h1 h2 (Ref r1 _ io1) (Ref r2 _ io2) = + if io1 == io2 then pure EQ + else do + a <- CyT.lookup r1 h1 + b <- CyT.lookup r2 h2 + case (a,b) of + -- We haven't encountered these refs before, descend into them and + -- compare contents. + (Nothing, Nothing) -> do + CyT.insertEnd r1 h1 + CyT.insertEnd r2 h2 + r1 <- readIORef io1 + r2 <- readIORef io2 + cyclicOrd h1 h2 r1 r2 + -- We've encountered these refs before, compare the positions where + -- they were first encountered + (Just r1, Just r2) -> pure (r1 `compare` r2) + _ -> pure $ a `compare` b + cyclicOrd h1 h2 (Pure a) (Pure b) = cyclicOrd h1 h2 a b + cyclicOrd h1 h2 (Requested r1) (Requested r2) = cyclicOrd h1 h2 r1 r2 + cyclicOrd h1 h2 (Cont k1) (Cont k2) = cyclicOrd h1 h2 k1 k2 + cyclicOrd _ _ v1 v2 = pure $ constructorId v1 `compare` constructorId v2 diff --git a/parser-typechecker/src/Unison/Runtime/Interface.hs b/parser-typechecker/src/Unison/Runtime/Interface.hs new file mode 100644 index 0000000000..c502b326b8 --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Interface.hs @@ -0,0 +1,225 @@ +{-# language DataKinds #-} +{-# language PatternGuards #-} +{-# language ScopedTypeVariables #-} + +module Unison.Runtime.Interface + ( startRuntime + ) where + +import Control.Exception (try) +import Control.Monad (foldM, (<=<)) + +import Data.Bifunctor (first,second) +import Data.Foldable +import Data.IORef +import Data.Word (Word64) + +import qualified Data.Map.Strict as Map + +import qualified Unison.Term as Tm +import Unison.Var (Var) + +import Unison.DataDeclaration (declFields) +import qualified Unison.LabeledDependency as RF +import Unison.Reference (Reference) +import qualified Unison.Reference as RF + +import Unison.Util.EnumContainers as EC + +import Unison.Codebase.CodeLookup (CodeLookup(..)) +import Unison.Codebase.Runtime (Runtime(..), Error) +import Unison.Codebase.MainTerm (builtinMain) + +import Unison.Parser (Ann(External)) +import Unison.PrettyPrintEnv +import Unison.TermPrinter + +import Unison.Runtime.ANF +import Unison.Runtime.Builtin +import Unison.Runtime.Decompile +import Unison.Runtime.Exception +import Unison.Runtime.Machine (SEnv(SEnv), apply0) +import Unison.Runtime.MCode +import Unison.Runtime.Pattern +import Unison.Runtime.Stack + +type Term v = Tm.Term v () + +data EvalCtx v + = ECtx + { freshTy :: Int + , freshTm :: Word64 + , refTy :: Map.Map RF.Reference RTag + , refTm :: Map.Map RF.Reference Word64 + , combs :: EnumMap Word64 Comb + , dspec :: DataSpec + , backrefTy :: EnumMap RTag RF.Reference + , backrefTm :: EnumMap Word64 (Term v) + , backrefComb :: EnumMap Word64 RF.Reference + } + +uncurryDspec :: DataSpec -> Map.Map (Reference,Int) Int +uncurryDspec = Map.fromList . concatMap f . Map.toList + where + f (r,l) = zipWith (\n c -> ((r,n),c)) [0..] $ either id id l + +numberLetRec :: Word64 -> Term v -> EnumMap Word64 (Term v) +numberLetRec frsh (Tm.LetRecNamed' bs e) + = mapFromList . zip [frsh..] $ e : map snd bs +numberLetRec _ _ = error "impossible" + +baseContext :: forall v. Var v => EvalCtx v +baseContext + = ECtx + { freshTy = fty + , freshTm = ftm + , refTy = builtinTypeNumbering + , refTm = builtinTermNumbering + , combs = emitComb @v mempty <$> numberedTermLookup + , dspec = builtinDataSpec + , backrefTy = builtinTypeBackref + , backrefTm = Tm.ref () <$> builtinTermBackref + , backrefComb = builtinTermBackref + } + where + ftm = 1 + maximum builtinTermNumbering + fty = (1+) . fromEnum $ maximum builtinTypeNumbering + +-- allocTerm +-- :: Var v +-- => CodeLookup v m () +-- -> EvalCtx v +-- -> RF.Reference +-- -> IO (EvalCtx v) +-- allocTerm _ _ b@(RF.Builtin _) +-- = die $ "Unknown builtin term reference: " ++ show b +-- allocTerm _ _ (RF.DerivedId _) +-- = die $ "TODO: allocTerm: hash reference" + +allocType + :: EvalCtx v + -> RF.Reference + -> Either [Int] [Int] + -> IO (EvalCtx v) +allocType _ b@(RF.Builtin _) _ + = die $ "Unknown builtin type reference: " ++ show b +allocType ctx r cons + = pure $ ctx + { refTy = Map.insert r rt $ refTy ctx + , backrefTy = mapInsert rt r $ backrefTy ctx + , dspec = Map.insert r cons $ dspec ctx + , freshTy = fresh + } + where + (rt, fresh) + | Just rt <- Map.lookup r $ refTy ctx = (rt, freshTy ctx) + | frsh <- freshTy ctx = (toEnum $ frsh, frsh + 1) + +collectDeps + :: Var v + => CodeLookup v IO () + -> Term v + -> IO ([(Reference, Either [Int] [Int])], [Reference]) +collectDeps cl tm + = (,tms) <$> traverse getDecl tys + where + chld = toList $ Tm.labeledDependencies tm + categorize = either (first . (:)) (second . (:)) . RF.toReference + (tys, tms) = foldr categorize ([],[]) chld + getDecl ty@(RF.DerivedId i) = + (ty,) . maybe (Right []) declFields + <$> getTypeDeclaration cl i + getDecl r = pure (r,Right []) + +loadDeps + :: Var v + => CodeLookup v IO () + -> EvalCtx v + -> Term v + -> IO (EvalCtx v) +loadDeps cl ctx tm = do + (tys, _ ) <- collectDeps cl tm + -- TODO: terms + foldM (uncurry . allocType) ctx $ filter p tys + where + p (r@RF.DerivedId{},_) + = r `Map.notMember` dspec ctx + || r `Map.notMember` refTy ctx + p _ = False + +addCombs :: EnumMap Word64 Comb -> EvalCtx v -> EvalCtx v +addCombs m ctx = ctx { combs = m <> combs ctx } + +addTermBackrefs :: EnumMap Word64 (Term v) -> EvalCtx v -> EvalCtx v +addTermBackrefs refs ctx = ctx { backrefTm = refs <> backrefTm ctx } + +refresh :: Word64 -> EvalCtx v -> EvalCtx v +refresh w ctx = ctx { freshTm = w } + +ref :: Ord k => Show k => Map.Map k v -> k -> v +ref m k + | Just x <- Map.lookup k m = x + | otherwise = error $ "unknown reference: " ++ show k + +compileTerm + :: Var v => Word64 -> Term v -> EvalCtx v -> EvalCtx v +compileTerm w tm ctx + = finish + . fmap + ( emitCombs frsh + . superNormalize (ref $ refTm ctx) (ref $ refTy ctx)) + . bkrf + . lamLift + . splitPatterns (dspec ctx) + . saturate (uncurryDspec $ dspec ctx) + $ tm + where + frsh = freshTm ctx + bkrf tm = (numberLetRec frsh tm, tm) + finish (recs, (main, aux, frsh')) + = refresh frsh' + . addTermBackrefs recs + . addCombs (mapInsert w main aux) + $ ctx + +watchHook :: IORef Closure -> Stack 'UN -> Stack 'BX -> IO () +watchHook r _ bstk = peek bstk >>= writeIORef r + +evalInContext + :: Var v + => PrettyPrintEnv + -> EvalCtx v + -> Word64 + -> IO (Either Error (Term v)) +evalInContext ppe ctx w = do + r <- newIORef BlackHole + let hook = watchHook r + senv = SEnv + (combs ctx) + builtinForeigns + (backrefComb ctx) + (backrefTy ctx) + result <- traverse (const $ readIORef r) + . first prettyError + <=< try $ apply0 (Just hook) senv w + pure $ decom =<< result + where + decom = decompile (`EC.lookup`backrefTy ctx) (`EC.lookup`backrefTm ctx) + prettyError (PE p) = p + prettyError (BU c) = either id (pretty ppe) $ decom c + +startRuntime :: Var v => IO (Runtime v) +startRuntime = do + ctxVar <- newIORef baseContext + pure $ Runtime + { terminate = pure () + , evaluate = \cl ppe tm -> do + ctx <- readIORef ctxVar + ctx <- loadDeps cl ctx tm + writeIORef ctxVar ctx + let init = freshTm ctx + ctx <- pure $ refresh (init+1) ctx + ctx <- pure $ compileTerm init tm ctx + evalInContext ppe ctx init + , mainType = builtinMain External + } diff --git a/parser-typechecker/src/Unison/Runtime/MCode.hs b/parser-typechecker/src/Unison/Runtime/MCode.hs new file mode 100644 index 0000000000..57b606e7b4 --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/MCode.hs @@ -0,0 +1,1229 @@ +{-# language GADTs #-} +{-# language BangPatterns #-} +{-# language DeriveFunctor #-} +{-# language PatternGuards #-} +{-# language EmptyDataDecls #-} +{-# language PatternSynonyms #-} + +module Unison.Runtime.MCode + ( Args'(..) + , Args(..) + , MLit(..) + , Instr(..) + , Section(.., MatchT, MatchW) + , Comb(..) + , Ref(..) + , UPrim1(..) + , UPrim2(..) + , BPrim1(..) + , BPrim2(..) + , Branch(..) + , bcount + , ucount + , emitCombs + , emitComb + , argsToLists + , prettyCombs + , prettyComb + ) where + +import GHC.Stack (HasCallStack) + +import Control.Applicative (liftA2) + +import Data.Bifunctor (bimap,first) +import Data.Coerce +import Data.List (partition) +import Data.Word (Word64) + +import Data.Primitive.PrimArray + +import qualified Data.Map.Strict as M +import Unison.Util.EnumContainers as EC + +import Data.Text (Text) + +import Unison.Var (Var) +import Unison.ABT.Normalized (pattern TAbss) +import Unison.Reference (Reference) +import Unison.Referent (Referent) +import Unison.Runtime.ANF + ( ANormal + , ANormalT + , ANormalTF(..) + , Branched(..) + , Func(..) + , Mem(..) + , SuperNormal(..) + , SuperGroup(..) + , RTag + , CTag + , Tag(..) + , packTags + , pattern TVar + , pattern TLit + , pattern TApp + , pattern TPrm + , pattern TIOp + , pattern THnd + , pattern TFrc + , pattern TShift + , pattern TLets + , pattern TName + , pattern TTm + , pattern TMatch + ) +import qualified Unison.Runtime.ANF as ANF + +-- This outlines some of the ideas/features in this core +-- language, and how they may be used to implement features of +-- the surface language. + +----------------------- +-- Delimited control -- +----------------------- + +-- There is native support for delimited control operations in +-- the core language. This means we can: +-- 1. delimit a block of code with an integer tagged prompt, +-- which corresponds to pushing a frame onto the +-- continuation with said tag +-- 2. capture a portion of the continuation up to a particular +-- tag frame and turn it into a value, which _removes_ the +-- tag frame from the continuation in the process +-- 3. push such a captured value back onto the continuation + +-- TBD: Since the captured continuations in _delimited_ control +-- are (in this case impure) functions, it may make sense to make +-- the representation of functions support these captured +-- continuations directly. + +-- The obvious use case of this feature is effects and handlers. +-- Delimiting a block with a prompt is part of installing a +-- handler for said block at least naively. The other part is +-- establishing the code that should be executed for each +-- operation to be handled. + +-- It's important (I believe) in #2 that the prompt be removed +-- from the continuation by a control effect. The captured +-- continuation not being automatically delimited corresponds to +-- a shallow handler's obligation to re-establish the handling of +-- a re-invoked computation if it wishes to do so. The delimiter +-- being removed from the capturing code's continuation +-- corresponds to a handler being allowed to yield effects from +-- the same siganture that it is handling. + +-- In special cases, it should be possible to omit use of control +-- effects in handlers. At the least, if a handler case resumes +-- the computation in tail position, it should be unnecessary to +-- capture the continuation at all. If all cases act this way, we +-- don't need a delimiter, because we will never capture. + +-- TBD: it may make more sense to have prompt pushing be part of +-- some other construct, due to A-normal forms of the code. + +----------------------------- +-- Unboxed sum-of-products -- +----------------------------- + +-- It is not usually stated this way, but one of the core +-- features of the STG machine is that functions/closures can +-- return unboxed sum-of-products types. This is actually the way +-- _all_ data types work in STG. The discriminee of a case +-- statement must eventually return by pushing several values +-- onto the stack (the product part) and specifying which branch +-- to return to (the sum part). + +-- The way heap allocated data is produced is that an +-- intermediate frame may be in the continuation that grabs this +-- information from the local storage and puts it into the heap. +-- If this frame were omitted, only the unboxed component would +-- be left. Also, in STG, the heap allocated data is just a means +-- of reconstructing its unboxed analogue. Evaluating a heap +-- allocated data type value just results in pushing its stored +-- fields back on the stack, and immediately returning the tag. + +-- The portion of this with the heap allocation frame omitted +-- seems to be a natural match for the case analysis portion of +-- handlers. A naive implementation of an effect algebra is as +-- the data type of the polynomial functor generated by the +-- signature, and handling corresponds to case analysis. However, +-- in a real implementation, we don't want a heap allocated +-- representation of this algebra, because its purpose is control +-- flow. Each operation will be handled once as it occurs, and we +-- won't save work by remembering some reified representation of +-- which operations were used. + +-- Since handlers in unison are written as functions, it seems to +-- make sense to define a calling convention for unboxed +-- sum-of-products as arguments. Variable numbers of stack +-- positions could be pushed for such arguments, with tags +-- specifying which case is being provided. + +-- TBD: sum arguments to a function correspond to a product of +-- functions, so it's possible that the calling convention for +-- these functions should be similar to returning to a case, +-- where we push arguments and then select which of several +-- pieces of code to jump to. This view also seems relevant to +-- the optimized implementation of certain forms of handler, +-- where we want effects to just directly select some code to +-- execute based on state that has been threaded to that point. + +-- One thing to note: it probably does not make sense to +-- completely divide returns into unboxed returns and allocation +-- frames. The reason this works in STG is laziness. Naming a +-- computation with `let` does not do any evaluation, but it does +-- allocate space for its (boxed) result. The only thing that +-- _does_ demand evaluation is case analysis. So, if a value with +-- sum type is being evaluated, we know it must be about to be +-- unpacked, and it makes little sense to pack it on the stack, +-- though we can build a closure version of it in the writeback +-- location established by `let`. + +-- By contrast, in unison a `let` of a sum type evaluates it +-- immediately, even if no one is analyzing it. So we might waste +-- work rearranging the stack with the unpacked contents when we +-- only needed the closure version to begin with. Instead, we +-- gain the ability to make the unpacking operation use no stack, +-- because we know what we are unpacking must be a value. Turning +-- boxed function calls into unboxed versions thus seems like a +-- situational optimization, rather than a universal calling +-- convention. + +------------------------------- +-- Delimited Dynamic Binding -- +------------------------------- + +-- There is a final component to the implementation of ability +-- handlers in this runtime system, and that is dynamically +-- scoped variables associated to each prompt. Each prompt +-- corresponds to an ability signature, and `reset` to a handler +-- for said signature, but we need storage space for the code +-- installed by said handler. It is possible to implement +-- dynamically scoped variables entirely with delimited +-- continuations, but it is more efficient to keep track of the +-- storage directly when manipulating the continuations. + +-- The dynamic scoping---and how it interacts with +-- continuations---corresponds to the nested structure of +-- handlers. Installing a handler establishes a variable scope, +-- shadowing outer scopes for the same prompt. Shifting, however, +-- can exit these scopes dynamically. So, for instance, if we +-- have a structure like: + +-- reset 0 $ ... +-- reset 1 $ ... +-- reset 0 $ ... +-- shift 1 + +-- We have nested scopes 0>1>0, with the second 0 shadowing the +-- first. However, when we shift to 1, the inner 0 scope is +-- captured into the continuation, and uses of the 0 ability in +-- will be handled by the outer handler until it is shadowed +-- again (and the captured continuation will re-establish the +-- shadowing). + +-- Mutation of the variables is possible, but mutation only +-- affects the current scope. Essentially, the dynamic scoping is +-- of mutable references, and when scope changes, we switch +-- between different references, and the mutation of each +-- reference does not affect the others. The purpose of the +-- mutation is to enable more efficient implementation of +-- certain recursive, 'deep' handlers, since those can operate +-- more like stateful code than control operators. + +data Args' + = Arg1 !Int + | Arg2 !Int !Int + -- frame index of each argument to the function + | ArgN {-# unpack #-} !(PrimArray Int) + | ArgR !Int !Int + +data Args + = ZArgs + | UArg1 !Int + | UArg2 !Int !Int + | BArg1 !Int + | BArg2 !Int !Int + | DArg2 !Int !Int + | UArgR !Int !Int + | BArgR !Int !Int + | DArgR !Int !Int !Int !Int + | BArgN !(PrimArray Int) + | UArgN !(PrimArray Int) + | DArgN !(PrimArray Int) !(PrimArray Int) + | DArgV !Int !Int + deriving (Show, Eq, Ord) + +argsToLists :: Args -> ([Int], [Int]) +argsToLists ZArgs = ([],[]) +argsToLists (UArg1 i) = ([i],[]) +argsToLists (UArg2 i j) = ([i,j],[]) +argsToLists (BArg1 i) = ([],[i]) +argsToLists (BArg2 i j) = ([],[i,j]) +argsToLists (DArg2 i j) = ([i],[j]) +argsToLists (UArgR i l) = (take l [i..], []) +argsToLists (BArgR i l) = ([], take l [i..]) +argsToLists (DArgR ui ul bi bl) = (take ul [ui..], take bl [bi..]) +argsToLists (BArgN bs) = ([], primArrayToList bs) +argsToLists (UArgN us) = (primArrayToList us, []) +argsToLists (DArgN us bs) = (primArrayToList us, primArrayToList bs) +argsToLists (DArgV _ _) = error "argsToLists: DArgV" + +ucount, bcount :: Args -> Int + +ucount (UArg1 _) = 1 +ucount (UArg2 _ _) = 2 +ucount (DArg2 _ _) = 1 +ucount (UArgR _ l) = l +ucount (DArgR _ l _ _) = l +ucount _ = 0 +{-# inline ucount #-} + +bcount (BArg1 _) = 1 +bcount (BArg2 _ _) = 2 +bcount (DArg2 _ _) = 1 +bcount (BArgR _ l) = l +bcount (DArgR _ _ _ l) = l +bcount (BArgN a) = sizeofPrimArray a +bcount _ = 0 +{-# inline bcount #-} + +data UPrim1 + -- integral + = DECI | INCI | NEGI | SGNI -- decrement,increment,negate,signum + | LZRO | TZRO | COMN -- leading/trailingZeroes,complement + -- floating + | ABSF | EXPF | LOGF | SQRT -- abs,exp,log,sqrt + | COSF | ACOS | COSH | ACSH -- cos,acos,cosh,acosh + | SINF | ASIN | SINH | ASNH -- sin,asin,sinh,asinh + | TANF | ATAN | TANH | ATNH -- tan,atan,tanh,atanh + | ITOF | NTOF | CEIL | FLOR -- intToFloat,natToFloat,ceiling,floor + | TRNF | RNDF -- truncate,round + deriving (Show, Eq, Ord) + +data UPrim2 + -- integral + = ADDI | SUBI | MULI | DIVI | MODI -- +,-,*,/,mod + | SHLI | SHRI | SHRN | POWI -- shiftl,shiftr,shiftr,pow + | EQLI | LEQI | LEQN -- ==,<=,<= + | ANDN | IORN | XORN -- and,or,xor + -- floating + | EQLF | LEQF -- ==,<= + | ADDF | SUBF | MULF | DIVF | ATN2 -- +,-,*,/,atan2 + | POWF | LOGB | MAXF | MINF -- pow,low,max,min + deriving (Show, Eq, Ord) + +data BPrim1 + -- text + = SIZT | USNC | UCNS -- size,unsnoc,uncons + | ITOT | NTOT | FTOT -- intToText,natToText,floatToText + | TTOI | TTON | TTOF -- textToInt,textToNat,textToFloat + | PAKT | UPKT -- pack,unpack + -- sequence + | VWLS | VWRS | SIZS -- viewl,viewr,size + | PAKB | UPKB | SIZB -- pack,unpack,size + | FLTB -- flatten + -- general + | THRO -- throw + deriving (Show, Eq, Ord) + +data BPrim2 + -- universal + = EQLU | CMPU -- ==,compare + -- text + | DRPT | CATT | TAKT -- drop,append,take + | EQLT | LEQT | LEST -- ==,<=,< + -- sequence + | DRPS | CATS | TAKS -- drop,append,take + | CONS | SNOC | IDXS -- cons,snoc,index + | SPLL | SPLR -- splitLeft,splitRight + -- bytes + | TAKB | DRPB | IDXB | CATB -- take,drop,index,append + deriving (Show, Eq, Ord) + +data MLit + = MI !Int + | MD !Double + | MT !Text + | MM !Referent + | MY !Reference + deriving (Show, Eq, Ord) + +-- Instructions for manipulating the data stack in the main portion of +-- a block +data Instr + -- 1-argument unboxed primitive operations + = UPrim1 !UPrim1 -- primitive instruction + !Int -- index of prim argument + + -- 2-argument unboxed primitive operations + | UPrim2 !UPrim2 -- primitive instruction + !Int -- index of first prim argument + !Int -- index of second prim argument + + -- 1-argument primitive operations that may involve boxed values + | BPrim1 !BPrim1 + !Int + + -- 2-argument primitive operations that may involve boxed values + | BPrim2 !BPrim2 + !Int + !Int + + -- Call out to a Haskell function. This is considerably slower + -- for very simple operations, hence the primops. + | ForeignCall !Bool -- catch exceptions + !Word64 -- FFI call + !Args -- arguments + + -- Set the value of a dynamic reference + | SetDyn !Word64 -- the prompt tag of the reference + !Int -- the stack index of the closure to store + + -- Capture the continuation up to a given marker. + | Capture !Word64 -- the prompt tag + + -- This is essentially the opposite of `Call`. Pack a given + -- statically known function into a closure with arguments. + -- No stack is necessary, because no nested evaluation happens, + -- so the instruction directly takes a follow-up. + | Name !Ref !Args + + -- Dump some debugging information about the machine state to + -- the screen. + | Info !String -- prefix for output + + -- Pack a data type value into a closure and place it + -- on the stack. + | Pack !Word64 -- tag + !Args -- arguments to pack + + -- Unpack the contents of a data type onto the stack + | Unpack !Int -- stack index of data to unpack + + -- Push a particular value onto the appropriate stack + | Lit !MLit -- value to push onto the stack + + -- Print a value on the unboxed stack + | Print !Int -- index of the primitive value to print + + -- Put a delimiter on the continuation + | Reset !(EnumSet Word64) -- prompt ids + + | Fork !Section + | Seq !Args + deriving (Show, Eq, Ord) + +data Section + -- Apply a function to arguments. This is the 'slow path', and + -- handles applying functions from arbitrary sources. This + -- requires checks to determine what exactly should happen. + = App + !Bool -- skip argument check for known calling convention + !Ref -- function to call + !Args -- arguments + + -- This is the 'fast path', for when we statically know we're + -- making an exactly saturated call to a statically known + -- function. This allows skipping various checks that can cost + -- time in very tight loops. This also allows skipping the + -- stack check if we know that the current stack allowance is + -- sufficient for where we're jumping to. + | Call + !Bool -- skip stack check + !Word64 -- global function reference + !Args -- arguments + + -- Jump to a captured continuation value. + | Jump + !Int -- index of captured continuation + !Args -- arguments to send to continuation + + -- Branch on the value in the unboxed data stack + | Match !Int -- index of unboxed item to match on + !Branch -- branches + + -- Yield control to the current continuation, with arguments + | Yield !Args -- values to yield + + -- Prefix an instruction onto a section + | Ins !Instr !Section + + -- Sequence two sections. The second is pushed as a return + -- point for the results of the first. Stack modifications in + -- the first are lost on return to the second. + | Let !Section !Section + + -- Throw an exception with the given message + | Die String + + -- Immediately stop a thread of interpretation. This is more of + -- a debugging tool than a proper operation to target. + | Exit + deriving (Show, Eq, Ord) + +data Comb + = Lam !Int -- Number of unboxed arguments + !Int -- Number of boxed arguments + !Int -- Maximum needed unboxed frame size + !Int -- Maximum needed boxed frame size + !Section -- Entry + deriving (Show, Eq, Ord) + +data Ref + = Stk !Int -- stack reference to a closure + | Env !Word64 -- global environment reference to a combinator + | Dyn !Word64 -- dynamic scope reference to a closure + deriving (Show, Eq, Ord) + +data Branch + -- if tag == n then t else f + = Test1 !Word64 + !Section + !Section + | Test2 !Word64 !Section -- if tag == m then ... + !Word64 !Section -- else if tag == n then ... + !Section -- else ... + | TestW !Section + !(EnumMap Word64 Section) + | TestT !Section + !(M.Map Text Section) + deriving (Show, Eq, Ord) + +-- Convenience patterns for matches used in the algorithms below. +pattern MatchW i d cs = Match i (TestW d cs) +pattern MatchT i d cs = Match i (TestT d cs) + +-- Representation of the variable context available in the current +-- frame. This tracks tags that have been dumped to the stack for +-- proper indexing. The `Block` constructor is used to mark when we +-- go into the first portion of a `Let`, to track the size of that +-- sub-frame. +data Ctx v + = ECtx + | Block (Ctx v) + | Tag (Ctx v) + | Var v Mem (Ctx v) + deriving (Show) + +-- Represents the context formed by the top-level let rec around a +-- set of definitions. Previous steps have normalized the term to +-- only contain a single recursive binding group. The variables in +-- this binding group are resolved to numbered combinators rather +-- than stack positions. +type RCtx v = M.Map v Word64 + +-- Add a sequence of variables and corresponding calling conventions +-- to the context. +ctx :: [v] -> [Mem] -> Ctx v +ctx vs cs = pushCtx (zip vs cs) ECtx + +-- Look up a variable in the context, getting its position on the +-- relevant stack and its calling convention if it is there. +ctxResolve :: Var v => Ctx v -> v -> Maybe (Int,Mem) +ctxResolve ctx v = walk 0 0 ctx + where + walk _ _ ECtx = Nothing + walk ui bi (Block ctx) = walk ui bi ctx + walk ui bi (Tag ctx) = walk (ui+1) bi ctx + walk ui bi (Var x m ctx) + | v == x = case m of BX -> Just (bi,m) ; UN -> Just (ui,m) + | otherwise = walk ui' bi' ctx + where + (ui', bi') = case m of BX -> (ui,bi+1) ; UN -> (ui+1,bi) + +-- Add a sequence of variables and calling conventions to the context. +pushCtx :: [(v,Mem)] -> Ctx v -> Ctx v +pushCtx new old = foldr (uncurry Var) old new + +-- Concatenate two contexts +catCtx :: Ctx v -> Ctx v -> Ctx v +catCtx ECtx r = r +catCtx (Tag l) r = Tag $ catCtx l r +catCtx (Block l) r = Block $ catCtx l r +catCtx (Var v m l) r = Var v m $ catCtx l r + +-- Split the context after a particular variable +breakAfter :: Eq v => (v -> Bool) -> Ctx v -> (Ctx v, Ctx v) +breakAfter _ ECtx = (ECtx, ECtx) +breakAfter p (Tag vs) = first Tag $ breakAfter p vs +breakAfter p (Block vs) = first Block $ breakAfter p vs +breakAfter p (Var v m vs) = (Var v m lvs, rvs) + where + (lvs, rvs) + | p v = (ECtx, vs) + | otherwise = breakAfter p vs + +-- Modify the context to contain the variables introduced by an +-- unboxed sum +sumCtx :: Var v => Ctx v -> v -> [(v,Mem)] -> Ctx v +sumCtx ctx v vcs + | (lctx, rctx) <- breakAfter (== v) ctx + = catCtx lctx $ pushCtx vcs rctx + +-- Look up a variable in the top let rec context +rctxResolve :: Var v => RCtx v -> v -> Maybe Word64 +rctxResolve ctx u = M.lookup u ctx + +-- Compile a top-level definition group to a collection of combinators. +-- The values in the recursive group are numbered according to the +-- provided word. +emitCombs + :: Var v => Word64 -> SuperGroup v + -> (Comb, EnumMap Word64 Comb, Word64) +emitCombs frsh (Rec grp ent) + = (emitComb rec ent, EC.mapFromList aux, frsh') + where + frsh' = frsh + fromIntegral (length grp) + (rvs, cmbs) = unzip grp + rec = M.fromList $ zip rvs [frsh..] + aux = zip [frsh..] $ emitComb rec <$> cmbs + +-- Type for aggregating the necessary stack frame size. First field is +-- unboxed size, second is boxed. The Applicative instance takes the +-- point-wise maximum, so that combining values from different branches +-- results in finding the maximum value of either size necessary. +data Counted a = C !Int !Int a + deriving (Functor) + +instance Applicative Counted where + pure = C 0 0 + C u0 b0 f <*> C u1 b1 x = C (max u0 u1) (max b0 b1) (f x) + +-- Counts the stack space used by a context and annotates a value +-- with it. +countCtx :: Ctx v -> a -> Counted a +countCtx = go 0 0 + where + go !ui !bi (Var _ UN ctx) = go (ui+1) bi ctx + go ui bi (Var _ BX ctx) = go ui (bi+1) ctx + go ui bi (Tag ctx) = go (ui+1) bi ctx + go ui bi (Block ctx) = go ui bi ctx + go ui bi ECtx = C ui bi + +emitComb :: Var v => RCtx v -> SuperNormal v -> Comb +emitComb rec (Lambda ccs (TAbss vs bd)) + = Lam 0 (length vs) u b s + where C u b s = emitSection rec (ctx vs ccs) bd + +addCount :: Int -> Int -> Counted a -> Counted a +addCount i j (C u b x) = C (u+i) (b+j) x + +-- Emit a machine code section from an ANF term +emitSection + :: Var v + => RCtx v -> Ctx v -> ANormal v + -> Counted Section +emitSection rec ctx (TLets us ms bu bo) + = emitLet rec ctx bu $ emitSection rec ectx bo + where + ectx = pushCtx (zip us ms) ctx +emitSection rec ctx (TName u (Left f) args bo) + = emitClosures rec ctx args $ \ctx as + -> Ins (Name (Env f) as) <$> emitSection rec (Var u BX ctx) bo +emitSection rec ctx (TName u (Right v) args bo) + | Just (i,BX) <- ctxResolve ctx v + = emitClosures rec ctx args $ \ctx as + -> Ins (Name (Stk i) as) <$> emitSection rec (Var u BX ctx) bo + | Just n <- rctxResolve rec v + = emitClosures rec ctx args $ \ctx as + -> Ins (Name (Env n) as) <$> emitSection rec (Var u BX ctx) bo + | otherwise = emitSectionVErr v +emitSection rec ctx (TVar v) + | Just (i,BX) <- ctxResolve ctx v = countCtx ctx . Yield $ BArg1 i + | Just (i,UN) <- ctxResolve ctx v = countCtx ctx . Yield $ UArg1 i + | Just j <- rctxResolve rec v = countCtx ctx $ App False (Env j) ZArgs + | otherwise = emitSectionVErr v +emitSection _ ctx (TPrm p args) + -- 3 is a conservative estimate of how many extra stack slots + -- a prim op will need for its results. + = addCount 3 3 . countCtx ctx + . Ins (emitPOp p $ emitArgs ctx args) . Yield $ DArgV i j + where + (i, j) = countBlock ctx +emitSection _ ctx (TIOp p args) + = addCount 3 3 . countCtx ctx + . Ins (emitIOp p $ emitArgs ctx args) . Yield $ DArgV i j + where + (i, j) = countBlock ctx +emitSection rec ctx (TApp f args) + = emitClosures rec ctx args $ \ctx as + -> countCtx ctx $ emitFunction rec ctx f as +emitSection _ ctx (TLit l) + = c . countCtx ctx . Ins (emitLit l) . Yield $ litArg l + where + c | ANF.T{} <- l = addCount 0 1 + | ANF.LM{} <- l = addCount 0 1 + | ANF.LY{} <- l = addCount 0 1 + | otherwise = addCount 1 0 +emitSection rec ctx (TMatch v bs) + | Just (i,BX) <- ctxResolve ctx v + , MatchData _ cs df <- bs + = Ins (Unpack i) + <$> emitDataMatching rec ctx cs df + | Just (i,BX) <- ctxResolve ctx v + , MatchRequest hs df <- bs + = Ins (Unpack i) + <$> emitRequestMatching rec ctx hs df + | Just (i,UN) <- ctxResolve ctx v + , MatchIntegral cs df <- bs + = emitIntegralMatching rec ctx i cs df + | Just (i,BX) <- ctxResolve ctx v + , MatchText cs df <- bs + = emitTextMatching rec ctx i cs df + | Just (i,UN) <- ctxResolve ctx v + , MatchSum cs <- bs + = emitSumMatching rec ctx v i cs + | Just (_,cc) <- ctxResolve ctx v + = error + $ "emitSection: mismatched calling convention for match: " + ++ matchCallingError cc bs + | otherwise + = error + $ "emitSection: could not resolve match variable: " ++ show (ctx,v) +emitSection rec ctx (THnd rts h b) + | Just (i,BX) <- ctxResolve ctx h + = Ins (Reset (EC.setFromList rs)) + . flip (foldr (\r -> Ins (SetDyn r i))) rs + <$> emitSection rec ctx b + | otherwise = emitSectionVErr h + where + rs = rawTag <$> rts + +emitSection rec ctx (TShift i v e) + = Ins (Capture $ rawTag i) + <$> emitSection rec (Var v BX ctx) e +emitSection _ ctx (TFrc v) + | Just (i,BX) <- ctxResolve ctx v + = countCtx ctx $ App False (Stk i) ZArgs + | Just _ <- ctxResolve ctx v = error + $ "emitSection: values to be forced must be boxed: " ++ show v + | otherwise = emitSectionVErr v +emitSection _ _ tm = error $ "emitSection: unhandled code: " ++ show tm + +-- Emit the code for a function call +emitFunction :: Var v => RCtx v -> Ctx v -> Func v -> Args -> Section +emitFunction rec ctx (FVar v) as + | Just (i,BX) <- ctxResolve ctx v + = App False (Stk i) as + | Just j <- rctxResolve rec v + = App False (Env j) as + | otherwise = emitSectionVErr v +emitFunction _ _ (FComb n) as + | False -- known saturated call + = Call False n as + | False -- known unsaturated call + = Ins (Name (Env n) as) $ Yield (BArg1 0) + | otherwise -- slow path + = App False (Env n) as +emitFunction _ _ (FCon r t) as + = Ins (Pack (packTags r t) as) + . Yield $ BArg1 0 +emitFunction _ _ (FReq a e) as + -- Currently implementing packed calling convention for abilities + = Ins (Lit (MI . fromIntegral $ rawTag e)) + . Ins (Pack (rawTag a) (reqArgs as)) + . App True (Dyn $ rawTag a) $ BArg1 0 +emitFunction _ ctx (FCont k) as + | Just (i, BX) <- ctxResolve ctx k = Jump i as + | Nothing <- ctxResolve ctx k = emitFunctionVErr k + | otherwise = error $ "emitFunction: continuations are boxed" +emitFunction _ _ (FPrim _) _ + = error "emitFunction: impossible" + +-- Modify function arguments for packing into a request +reqArgs :: Args -> Args +reqArgs = \case + ZArgs -> UArg1 0 + UArg1 i -> UArg2 0 (i+1) + UArg2 i j + | i == 0 && j == 1 -> UArgR 0 3 + | otherwise -> UArgN (fl [0,i+1,j+1]) + BArg1 i -> DArg2 0 i + BArg2 i j + | j == i+1 -> DArgR 0 1 i 2 + | otherwise -> DArgN (fl [0]) (fl [i,j]) + DArg2 i j + | i == 0 -> DArgR 0 2 j 1 + | otherwise -> DArgN (fl [0,i+1]) (fl [j]) + UArgR i l + | i == 0 -> UArgR 0 (l+1) + | otherwise -> UArgN (fl $ [0] ++ Prelude.take l [i+1..]) + BArgR i l -> DArgR 0 1 i l + DArgR ui ul bi bl + | ui == 0 -> DArgR 0 (ul+1) bi bl + | otherwise -> DArgN (fl $ [0] ++ Prelude.take ul [ui+1..]) + (fl $ Prelude.take bl [bi..]) + UArgN us -> UArgN (fl $ [0] ++ fmap (+1) (tl us)) + BArgN bs -> DArgN (fl [0]) bs + DArgN us bs -> DArgN (fl $ [0] ++ fmap (+1) (tl us)) bs + DArgV i j -> DArgV i j + where + fl = primArrayFromList + tl = primArrayToList + +countBlock :: Ctx v -> (Int, Int) +countBlock = go 0 0 + where + go !ui !bi (Var _ UN ctx) = go (ui+1) bi ctx + go ui bi (Var _ BX ctx) = go ui (bi+1) ctx + go ui bi (Tag ctx) = go (ui+1) bi ctx + go ui bi _ = (ui, bi) + +matchCallingError :: Mem -> Branched v -> String +matchCallingError cc b = "(" ++ show cc ++ "," ++ brs ++ ")" + where + brs | MatchData _ _ _ <- b = "MatchData" + | MatchEmpty <- b = "MatchEmpty" + | MatchIntegral _ _ <- b = "MatchIntegral" + | MatchRequest _ _ <- b = "MatchRequest" + | MatchSum _ <- b = "MatchSum" + | MatchText _ _ <- b = "MatchText" + +emitSectionVErr :: (Var v, HasCallStack) => v -> a +emitSectionVErr v + = error + $ "emitSection: could not resolve function variable: " ++ show v + +emitFunctionVErr :: (Var v, HasCallStack) => v -> a +emitFunctionVErr v + = error + $ "emitFunction: could not resolve function variable: " ++ show v + +litArg :: ANF.Lit -> Args +litArg ANF.T{} = BArg1 0 +litArg ANF.LM{} = BArg1 0 +litArg ANF.LY{} = BArg1 0 +litArg _ = UArg1 0 + +-- Emit machine code for a let expression. Some expressions do not +-- require a machine code Let, which uses more complicated stack +-- manipulation. +emitLet + :: Var v + => RCtx v -> Ctx v -> ANormalT v + -> Counted Section + -> Counted Section +emitLet _ _ (ALit l) + = fmap (Ins $ emitLit l) +emitLet _ ctx (AApp (FComb n) args) + -- We should be able to tell if we are making a saturated call + -- or not here. We aren't carrying the information here yet, though. + | False -- not saturated + = fmap (Ins . Name (Env n) $ emitArgs ctx args) +emitLet _ ctx (AApp (FCon r n) args) + = fmap (Ins . Pack (packTags r n) $ emitArgs ctx args) +emitLet _ ctx (AApp (FPrim p) args) + = fmap (Ins . either emitPOp emitIOp p $ emitArgs ctx args) +emitLet rec ctx bnd + = liftA2 Let (emitSection rec (Block ctx) (TTm bnd)) + +-- Translate from ANF prim ops to machine code operations. The +-- machine code operations are divided with respect to more detailed +-- information about expected number and types of arguments. +emitPOp :: ANF.POp -> Args -> Instr +-- Integral +emitPOp ANF.ADDI = emitP2 ADDI +emitPOp ANF.ADDN = emitP2 ADDI +emitPOp ANF.SUBI = emitP2 SUBI +emitPOp ANF.SUBN = emitP2 SUBI +emitPOp ANF.MULI = emitP2 MULI +emitPOp ANF.MULN = emitP2 MULI +emitPOp ANF.DIVI = emitP2 DIVI +emitPOp ANF.DIVN = emitP2 DIVI +emitPOp ANF.MODI = emitP2 MODI -- TODO: think about how these behave +emitPOp ANF.MODN = emitP2 MODI -- TODO: think about how these behave +emitPOp ANF.POWI = emitP2 POWI +emitPOp ANF.POWN = emitP2 POWI +emitPOp ANF.SHLI = emitP2 SHLI +emitPOp ANF.SHLN = emitP2 SHLI -- Note: left shift behaves uniformly +emitPOp ANF.SHRI = emitP2 SHRI +emitPOp ANF.SHRN = emitP2 SHRN +emitPOp ANF.LEQI = emitP2 LEQI +emitPOp ANF.LEQN = emitP2 LEQN +emitPOp ANF.EQLI = emitP2 EQLI +emitPOp ANF.EQLN = emitP2 EQLI + +emitPOp ANF.SGNI = emitP1 SGNI +emitPOp ANF.NEGI = emitP1 NEGI +emitPOp ANF.INCI = emitP1 INCI +emitPOp ANF.INCN = emitP1 INCI +emitPOp ANF.DECI = emitP1 DECI +emitPOp ANF.DECN = emitP1 DECI +emitPOp ANF.TZRO = emitP1 TZRO +emitPOp ANF.LZRO = emitP1 LZRO +emitPOp ANF.ANDN = emitP2 ANDN +emitPOp ANF.IORN = emitP2 IORN +emitPOp ANF.XORN = emitP2 XORN +emitPOp ANF.COMN = emitP1 COMN + +-- Float +emitPOp ANF.ADDF = emitP2 ADDF +emitPOp ANF.SUBF = emitP2 SUBF +emitPOp ANF.MULF = emitP2 MULF +emitPOp ANF.DIVF = emitP2 DIVF +emitPOp ANF.LEQF = emitP2 LEQF +emitPOp ANF.EQLF = emitP2 EQLF + +emitPOp ANF.MINF = emitP2 MINF +emitPOp ANF.MAXF = emitP2 MAXF + +emitPOp ANF.POWF = emitP2 POWF +emitPOp ANF.EXPF = emitP1 EXPF +emitPOp ANF.ABSF = emitP1 ABSF +emitPOp ANF.SQRT = emitP1 SQRT +emitPOp ANF.LOGF = emitP1 LOGF +emitPOp ANF.LOGB = emitP2 LOGB + +emitPOp ANF.CEIL = emitP1 CEIL +emitPOp ANF.FLOR = emitP1 FLOR +emitPOp ANF.TRNF = emitP1 TRNF +emitPOp ANF.RNDF = emitP1 RNDF + +emitPOp ANF.COSF = emitP1 COSF +emitPOp ANF.SINF = emitP1 SINF +emitPOp ANF.TANF = emitP1 TANF +emitPOp ANF.COSH = emitP1 COSH +emitPOp ANF.SINH = emitP1 SINH +emitPOp ANF.TANH = emitP1 TANH +emitPOp ANF.ACOS = emitP1 ACOS +emitPOp ANF.ATAN = emitP1 ATAN +emitPOp ANF.ASIN = emitP1 ASIN +emitPOp ANF.ACSH = emitP1 ACSH +emitPOp ANF.ASNH = emitP1 ASNH +emitPOp ANF.ATNH = emitP1 ATNH +emitPOp ANF.ATN2 = emitP2 ATN2 + +-- conversions +emitPOp ANF.ITOF = emitP1 ITOF +emitPOp ANF.NTOF = emitP1 NTOF +emitPOp ANF.ITOT = emitBP1 ITOT +emitPOp ANF.NTOT = emitBP1 NTOT +emitPOp ANF.FTOT = emitBP1 FTOT +emitPOp ANF.TTON = emitBP1 TTON +emitPOp ANF.TTOI = emitBP1 TTOI +emitPOp ANF.TTOF = emitBP1 TTOF + +-- text +emitPOp ANF.CATT = emitBP2 CATT +emitPOp ANF.TAKT = emitBP2 TAKT +emitPOp ANF.DRPT = emitBP2 DRPT +emitPOp ANF.SIZT = emitBP1 SIZT +emitPOp ANF.UCNS = emitBP1 UCNS +emitPOp ANF.USNC = emitBP1 USNC +emitPOp ANF.EQLT = emitBP2 EQLT +emitPOp ANF.LEQT = emitBP2 LEQT +emitPOp ANF.PAKT = emitBP1 PAKT +emitPOp ANF.UPKT = emitBP1 UPKT + +-- sequence +emitPOp ANF.CATS = emitBP2 CATS +emitPOp ANF.TAKS = emitBP2 TAKS +emitPOp ANF.DRPS = emitBP2 DRPS +emitPOp ANF.SIZS = emitBP1 SIZS +emitPOp ANF.CONS = emitBP2 CONS +emitPOp ANF.SNOC = emitBP2 SNOC +emitPOp ANF.IDXS = emitBP2 IDXS +emitPOp ANF.VWLS = emitBP1 VWLS +emitPOp ANF.VWRS = emitBP1 VWRS +emitPOp ANF.SPLL = emitBP2 SPLL +emitPOp ANF.SPLR = emitBP2 SPLR + +-- bytes +emitPOp ANF.PAKB = emitBP1 PAKB +emitPOp ANF.UPKB = emitBP1 UPKB +emitPOp ANF.TAKB = emitBP2 TAKB +emitPOp ANF.DRPB = emitBP2 DRPB +emitPOp ANF.IDXB = emitBP2 IDXB +emitPOp ANF.SIZB = emitBP1 SIZB +emitPOp ANF.FLTB = emitBP1 FLTB +emitPOp ANF.CATB = emitBP2 CATB + +-- universal comparison +emitPOp ANF.EQLU = emitBP2 EQLU +emitPOp ANF.CMPU = emitBP2 CMPU + +-- error call +emitPOp ANF.EROR = emitBP1 THRO + +-- non-prim translations +emitPOp ANF.BLDS = Seq +emitPOp ANF.FORK = \case + BArg1 i -> Fork $ App True (Stk i) ZArgs + _ -> error "fork takes exactly one boxed argument" +emitPOp ANF.PRNT = \case + BArg1 i -> Print i + _ -> error "print takes exactly one boxed argument" +emitPOp ANF.INFO = \case + ZArgs -> Info "debug" + _ -> error "info takes no arguments" +-- handled in emitSection because Die is not an instruction + +-- Emit machine code for ANF IO operations. These are all translated +-- to 'foreing function' calls, but there is a special case for the +-- standard handle access function, because it does not yield an +-- explicit error. +emitIOp :: ANF.IOp -> Args -> Instr +emitIOp iop = ForeignCall True (fromIntegral $ fromEnum iop) + +-- Helper functions for packing the variable argument representation +-- into the indexes stored in prim op instructions +emitP1 :: UPrim1 -> Args -> Instr +emitP1 p (UArg1 i) = UPrim1 p i +emitP1 p a + = error $ "wrong number of args for unary unboxed primop: " + ++ show (p, a) + +emitP2 :: UPrim2 -> Args -> Instr +emitP2 p (UArg2 i j) = UPrim2 p i j +emitP2 p a + = error $ "wrong number of args for binary unboxed primop: " + ++ show (p, a) + +emitBP1 :: BPrim1 -> Args -> Instr +emitBP1 p (UArg1 i) = BPrim1 p i +emitBP1 p (BArg1 i) = BPrim1 p i +emitBP1 p a + = error $ "wrong number of args for unary boxed primop: " + ++ show (p,a) + +emitBP2 :: BPrim2 -> Args -> Instr +emitBP2 p (UArg2 i j) = BPrim2 p i j +emitBP2 p (BArg2 i j) = BPrim2 p i j +emitBP2 p (DArg2 i j) = BPrim2 p i j +emitBP2 p a + = error $ "wrong number of args for binary boxed primop: " + ++ show (p,a) + +emitDataMatching + :: Var v + => RCtx v + -> Ctx v + -> EnumMap CTag ([Mem], ANormal v) + -> Maybe (ANormal v) + -> Counted Section +emitDataMatching rec ctx cs df + = MatchW 0 <$> edf <*> traverse (emitCase rec ctx) (coerce cs) + where + -- Note: this is not really accurate. A default data case needs + -- stack space corresponding to the actual data that shows up there. + -- However, we currently don't use default cases for data. + edf | Just co <- df = emitSection rec ctx co + | otherwise = countCtx ctx $ Die "missing data case" + +-- Emits code corresponding to an unboxed sum match. +-- The match is against a tag on the stack, and cases introduce +-- variables to the middle of the context, because the fields were +-- already there, but it was unknown how many there were until +-- branching on the tag. +emitSumMatching + :: Var v + => RCtx v + -> Ctx v + -> v + -> Int + -> EnumMap Word64 ([Mem], ANormal v) + -> Counted Section +emitSumMatching rec ctx v i cs + = MatchW i edf <$> traverse (emitSumCase rec ctx v) cs + where + edf = Die "uncovered unboxed sum case" + +emitRequestMatching + :: Var v + => RCtx v + -> Ctx v + -> EnumMap RTag (EnumMap CTag ([Mem], ANormal v)) + -> ANormal v + -> Counted Section +emitRequestMatching rec ctx hs df = MatchW 0 edf <$> tops + where + tops = mapInsert 0 + <$> emitCase rec ctx ([BX], df) + <*> traverse f (coerce hs) + f cs = MatchW 1 edf <$> traverse (emitCase rec ctx) cs + edf = Die "unhandled ability" + +emitIntegralMatching + :: Var v + => RCtx v + -> Ctx v + -> Int + -> EnumMap Word64 (ANormal v) + -> Maybe (ANormal v) + -> Counted Section +emitIntegralMatching rec ctx i cs df + = MatchW i <$> edf <*> traverse (emitCase rec ctx . ([],)) cs + where + edf | Just co <- df = emitSection rec ctx co + | otherwise = countCtx ctx $ Die "missing integral case" + +emitTextMatching + :: Var v + => RCtx v + -> Ctx v + -> Int + -> M.Map Text (ANormal v) + -> Maybe (ANormal v) + -> Counted Section +emitTextMatching rec ctx i cs df + = MatchT i <$> edf <*> traverse (emitCase rec ctx . ([],)) cs + where + edf | Just co <- df = emitSection rec ctx co + | otherwise = countCtx ctx $ Die "missing text case" + +emitCase + :: Var v + => RCtx v -> Ctx v -> ([Mem], ANormal v) + -> Counted Section +emitCase rec ctx (ccs, TAbss vs bo) + = emitSection rec (Tag $ pushCtx (zip vs ccs) ctx) bo + +emitSumCase + :: Var v + => RCtx v -> Ctx v -> v -> ([Mem], ANormal v) + -> Counted Section +emitSumCase rec ctx v (ccs, TAbss vs bo) + = emitSection rec (sumCtx ctx v $ zip vs ccs) bo + +emitLit :: ANF.Lit -> Instr +emitLit l = Lit $ case l of + ANF.I i -> MI $ fromIntegral i + ANF.N n -> MI $ fromIntegral n + ANF.C c -> MI $ fromEnum c + ANF.F d -> MD d + ANF.T t -> MT t + ANF.LM r -> MM r + ANF.LY r -> MY r + +-- Emits some fix-up code for calling functions. Some of the +-- variables in scope come from the top-level let rec, but these +-- are definitions, not values on the stack. These definitions cannot +-- be passed directly as function arguments, and must have a +-- corresponding stack entry allocated first. So, this function inserts +-- these allocations and passes the appropriate context into the +-- provided continuation. +emitClosures + :: Var v + => RCtx v -> Ctx v -> [v] + -> (Ctx v -> Args -> Counted Section) + -> Counted Section +emitClosures rec ctx args k + = allocate ctx args $ \ctx -> k ctx $ emitArgs ctx args + where + allocate ctx [] k = k ctx + allocate ctx (a:as) k + | Just _ <- ctxResolve ctx a = allocate ctx as k + | Just n <- rctxResolve rec a + = Ins (Name (Env n) ZArgs) <$> allocate (Var a BX ctx) as k + | otherwise + = error $ "emitClosures: unknown reference: " ++ show a + +emitArgs :: Var v => Ctx v -> [v] -> Args +emitArgs ctx args + | Just l <- traverse (ctxResolve ctx) args = demuxArgs l + | otherwise + = error $ "could not resolve argument variables: " ++ show args + +-- Turns a list of stack positions and calling conventions into the +-- argument format expected in the machine code. +demuxArgs :: [(Int,Mem)] -> Args +demuxArgs as0 + = case bimap (fmap fst) (fmap fst) $ partition ((==UN).snd) as0 of + ([],[]) -> ZArgs + ([],[i]) -> BArg1 i + ([],[i,j]) -> BArg2 i j + ([i],[]) -> UArg1 i + ([i,j],[]) -> UArg2 i j + ([i],[j]) -> DArg2 i j + ([],bs) -> BArgN $ primArrayFromList bs + (us,[]) -> UArgN $ primArrayFromList us + -- TODO: handle ranges + (us,bs) -> DArgN (primArrayFromList us) (primArrayFromList bs) + +indent :: Int -> ShowS +indent ind = showString (replicate (ind*2) ' ') + +prettyCombs + :: (Comb, EnumMap Word64 Comb, Word64) + -> ShowS +prettyCombs (c, es, w) + = foldr (\(w,c) r -> prettyComb w c . showString "\n" . r) + id (mapToList es) + . showString "\n" . prettyComb w c + +prettyComb :: Word64 -> Comb -> ShowS +prettyComb w (Lam ua ba _ _ s) + = shows w . shows [ua,ba] + . showString ":\n" . prettySection 2 s + +prettySection :: Int -> Section -> ShowS +prettySection ind sec + = indent ind . case sec of + App _ r as -> + showString "App " + . showsPrec 12 r . showString " " . prettyArgs as + Call _ i as -> + showString "Call " . shows i . showString " " . prettyArgs as + Jump i as -> + showString "Jump " . shows i . showString " " . prettyArgs as + Match i bs -> + showString "Match " . shows i . showString "\n" + . prettyBranches (ind+1) bs + Yield as -> showString "Yield " . prettyArgs as + Ins i nx -> + prettyIns i . showString "\n" . prettySection ind nx + Let s n -> + showString "Let\n" . prettySection (ind+2) s + . showString "\n" . prettySection ind n + Die s -> showString $ "Die " ++ s + Exit -> showString "Exit" + +prettyBranches :: Int -> Branch -> ShowS +prettyBranches ind bs + = case bs of + Test1 i e df -> pdf df . picase i e + Test2 i ei j ej df -> pdf df . picase i ei . picase j ej + TestW df m -> + pdf df . foldr (\(i,e) r -> picase i e . r) id (mapToList m) + TestT df m -> + pdf df . foldr (\(i,e) r -> ptcase i e . r) id (M.toList m) + where + pdf e = indent ind . showString "DFLT ->\n" . prettySection (ind+1) e + ptcase t e + = showString "\n" . indent ind . shows t . showString " ->\n" + . prettySection (ind+1) e + picase i e + = showString "\n" . indent ind . shows i . showString " ->\n" + . prettySection (ind+1) e + +un :: ShowS +un = ('U':) + +bx :: ShowS +bx = ('B':) + +prettyIns :: Instr -> ShowS +prettyIns (Pack i as) + = showString "Pack " . shows i . (' ':) . prettyArgs as +prettyIns i = shows i + +prettyArgs :: Args -> ShowS +prettyArgs ZArgs = shows @[Int] [] +prettyArgs (UArg1 i) = un . shows [i] +prettyArgs (BArg1 i) = bx . shows [i] +prettyArgs (UArg2 i j) = un . shows [i,j] +prettyArgs (BArg2 i j) = bx . shows [i,j] +prettyArgs (DArg2 i j) = un . shows [i] . (' ':) . bx . shows [j] +prettyArgs (UArgR i l) = un . shows (Prelude.take l [i..]) +prettyArgs (BArgR i l) = bx . shows (Prelude.take l [i..]) +prettyArgs (DArgR i l j k) + = un . shows (Prelude.take l [i..]) . (' ':) + . bx . shows (Prelude.take k [j..]) +prettyArgs (UArgN v) = un . shows (primArrayToList v) +prettyArgs (BArgN v) = bx . shows (primArrayToList v) +prettyArgs (DArgN u b) + = un . shows (primArrayToList u) . (' ':) + . bx . shows (primArrayToList b) +prettyArgs (DArgV i j) = ('V':) . shows [i,j] diff --git a/parser-typechecker/src/Unison/Runtime/Machine.hs b/parser-typechecker/src/Unison/Runtime/Machine.hs new file mode 100644 index 0000000000..f9c5e5f115 --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Machine.hs @@ -0,0 +1,1257 @@ +{-# language DataKinds #-} +{-# language RankNTypes #-} +{-# language BangPatterns #-} +{-# language PatternGuards #-} + +module Unison.Runtime.Machine where + +import Data.Maybe (fromMaybe) + +import Data.Bits +import Data.Foldable (toList) +import Data.Traversable +import Data.Word (Word64) + +import qualified Data.Text as Tx +import qualified Data.Text.IO as Tx +import qualified Data.Sequence as Sq +import qualified Data.Map.Strict as M + +import Control.Exception +import Control.Lens ((<&>)) +import Control.Concurrent (forkIOWithUnmask, ThreadId) + +import qualified Data.Primitive.PrimArray as PA + +import Text.Read (readMaybe) + +import Unison.Reference (Reference) + +import Unison.Runtime.ANF (Mem(..), RTag) +import Unison.Runtime.Exception +import Unison.Runtime.Foreign +import Unison.Runtime.Foreign.Function +import Unison.Runtime.Stack +import Unison.Runtime.MCode + +import qualified Unison.Type as Rf +import qualified Unison.Runtime.IOSource as Rf + +import qualified Unison.Util.Bytes as By +import Unison.Util.EnumContainers as EC + +type Tag = Word64 + +-- dynamic environment +type DEnv = EnumMap Word64 Closure + +-- static environment +data SEnv + = SEnv + { combs :: !(EnumMap Word64 Comb) + , foreignFuncs :: !(EnumMap Word64 ForeignFunc) + , combRefs :: !(EnumMap Word64 Reference) + , tagRefs :: !(EnumMap RTag Reference) + } + +type Unmask = forall a. IO a -> IO a + +info :: Show a => String -> a -> IO () +info ctx x = infos ctx (show x) +infos :: String -> String -> IO () +infos ctx s = putStrLn $ ctx ++ ": " ++ s + +-- Entry point for evaluating a section +eval0 :: SEnv -> Section -> IO () +eval0 !env !co = do + ustk <- alloc + bstk <- alloc + mask $ \unmask -> eval unmask env mempty ustk bstk KE co + +-- Entry point for evaluating a numbered combinator. +-- An optional callback for the base of the stack may be supplied. +-- +-- This is the entry point actually used in the interactive +-- environment currently. +apply0 + :: Maybe (Stack 'UN -> Stack 'BX -> IO ()) + -> SEnv -> Word64 -> IO () +apply0 !callback !env !i + | Just cmb <- EC.lookup i (combs env) = do + ustk <- alloc + bstk <- alloc + mask $ \unmask -> + apply unmask env mempty ustk bstk k0 True ZArgs + $ PAp (IC i cmb) unull bnull + | otherwise = die $ "apply0: unknown combinator: " ++ show i + where + k0 = maybe KE (CB . Hook) callback + +lookupDenv :: Word64 -> DEnv -> Closure +lookupDenv p denv = fromMaybe BlackHole $ EC.lookup p denv + +exec + :: Unmask -> SEnv -> DEnv + -> Stack 'UN -> Stack 'BX -> K + -> Instr + -> IO (DEnv, Stack 'UN, Stack 'BX, K) +exec _ !_ !denv !ustk !bstk !k (Info tx) = do + info tx ustk + info tx bstk + info tx k + pure (denv, ustk, bstk, k) +exec _ !env !denv !ustk !bstk !k (Name r args) = do + bstk <- name ustk bstk args =<< resolve env denv bstk r + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (SetDyn p i) = do + clo <- peekOff bstk i + pure (EC.mapInsert p clo denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (Capture p) = do + (sk,denv,ustk,bstk,useg,bseg,k) <- splitCont denv ustk bstk k p + bstk <- bump bstk + poke bstk $ Captured sk useg bseg + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (UPrim1 op i) = do + ustk <- uprim1 ustk op i + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (UPrim2 op i j) = do + ustk <- uprim2 ustk op i j + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (BPrim1 op i) = do + (ustk,bstk) <- bprim1 ustk bstk op i + pure (denv, ustk, bstk, k) +exec _ !env !denv !ustk !bstk !k (BPrim2 EQLU i j) = do + x <- peekOff bstk i + y <- peekOff bstk j + ustk <- bump ustk + poke ustk + $ case universalCompare cmb tag compare x y of + EQ -> 1 + _ -> 0 + pure (denv, ustk, bstk, k) + where + cmb w | Just r <- EC.lookup w (combRefs env) = r + | otherwise = error $ "exec: unknown combinator: " ++ show w + tag t | Just r <- EC.lookup t (tagRefs env) = r + | otherwise = error $ "exec: unknown data: " ++ show t +exec _ !env !denv !ustk !bstk !k (BPrim2 CMPU i j) = do + x <- peekOff bstk i + y <- peekOff bstk j + ustk <- bump ustk + poke ustk . fromEnum $ universalCompare cmb tag compare x y + pure (denv, ustk, bstk, k) + where + cmb w | Just r <- EC.lookup w (combRefs env) = r + | otherwise = error $ "exec: unknown combinator: " ++ show w + tag t | Just r <- EC.lookup t (tagRefs env) = r + | otherwise = error $ "exec: unknown data: " ++ show t +exec _ !_ !denv !ustk !bstk !k (BPrim2 op i j) = do + (ustk,bstk) <- bprim2 ustk bstk op i j + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (Pack t args) = do + clo <- buildData ustk bstk t args + bstk <- bump bstk + poke bstk clo + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (Unpack i) = do + (ustk, bstk) <- dumpData ustk bstk =<< peekOff bstk i + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (Print i) = do + t <- peekOffBi bstk i + Tx.putStrLn t + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (Lit (MI n)) = do + ustk <- bump ustk + poke ustk n + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (Lit (MD d)) = do + ustk <- bump ustk + pokeD ustk d + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (Lit (MT t)) = do + bstk <- bump bstk + poke bstk (Foreign (Wrap Rf.textRef t)) + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (Lit (MM r)) = do + bstk <- bump bstk + poke bstk (Foreign (Wrap Rf.termLinkRef r)) + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (Lit (MY r)) = do + bstk <- bump bstk + poke bstk (Foreign (Wrap Rf.typeLinkRef r)) + pure (denv, ustk, bstk, k) +exec _ !_ !denv !ustk !bstk !k (Reset ps) = do + pure (denv, ustk, bstk, Mark ps clos k) + where clos = EC.restrictKeys denv ps +exec _ !_ !denv !ustk !bstk !k (Seq as) = do + l <- closureArgs bstk as + bstk <- bump bstk + pokeS bstk $ Sq.fromList l + pure (denv, ustk, bstk, k) +exec unmask !env !denv !ustk !bstk !k (ForeignCall _ w args) + | Just (FF arg res ev) <- EC.lookup w (foreignFuncs env) + = uncurry (denv,,,k) <$> + unmask (arg ustk bstk args >>= ev >>= res ustk bstk) + | otherwise + = die $ "reference to unknown foreign function: " ++ show w +exec unmask !env !denv !ustk !bstk !k (Fork lz) = do + tid <- + unmask $ + forkEval env denv k lz <$> duplicate ustk <*> duplicate bstk + bstk <- bump bstk + poke bstk . Foreign . Wrap Rf.threadIdReference $ tid + pure (denv, ustk, bstk, k) +{-# inline exec #-} + +maskTag :: Word64 -> Word64 +maskTag i = i .&. 0xFFFF + +eval :: Unmask -> SEnv -> DEnv + -> Stack 'UN -> Stack 'BX -> K -> Section -> IO () +eval unmask !env !denv !ustk !bstk !k (Match i (TestT df cs)) = do + t <- peekOffBi bstk i + eval unmask env denv ustk bstk k $ selectTextBranch t df cs +eval unmask !env !denv !ustk !bstk !k (Match i br) = do + n <- peekOffN ustk i + eval unmask env denv ustk bstk k $ selectBranch n br +eval unmask !env !denv !ustk !bstk !k (Yield args) + | asize ustk + asize bstk > 0 , BArg1 i <- args = do + peekOff bstk i >>= apply unmask env denv ustk bstk k False ZArgs + | otherwise = do + (ustk, bstk) <- moveArgs ustk bstk args + ustk <- frameArgs ustk + bstk <- frameArgs bstk + yield unmask env denv ustk bstk k +eval unmask !env !denv !ustk !bstk !k (App ck r args) = + resolve env denv bstk r + >>= apply unmask env denv ustk bstk k ck args +eval unmask !env !denv !ustk !bstk !k (Call ck n args) + | Just cmb <- EC.lookup n (combs env) + = enter unmask env denv ustk bstk k ck args cmb + | otherwise = die $ "eval: unknown combinator: " ++ show n +eval unmask !env !denv !ustk !bstk !k (Jump i args) = + peekOff bstk i >>= jump unmask env denv ustk bstk k args +eval unmask !env !denv !ustk !bstk !k (Let nw nx) = do + (ustk, ufsz, uasz) <- saveFrame ustk + (bstk, bfsz, basz) <- saveFrame bstk + eval unmask env denv ustk bstk (Push ufsz bfsz uasz basz nx k) nw +eval unmask !env !denv !ustk !bstk !k (Ins i nx) = do + (denv, ustk, bstk, k) <- exec unmask env denv ustk bstk k i + eval unmask env denv ustk bstk k nx +eval _ !_ !_ !_ !_ !_ Exit = pure () +eval _ !_ !_ !_ !_ !_ (Die s) = die s +{-# noinline eval #-} + +forkEval + :: SEnv -> DEnv + -> K -> Section -> Stack 'UN -> Stack 'BX -> IO ThreadId +forkEval env denv k nx ustk bstk = forkIOWithUnmask $ \unmask -> do + (denv, ustk, bstk, k) <- discardCont denv ustk bstk k 0 + eval unmask env denv ustk bstk k nx +{-# inline forkEval #-} + +-- fast path application +enter + :: Unmask -> SEnv -> DEnv -> Stack 'UN -> Stack 'BX -> K + -> Bool -> Args -> Comb -> IO () +enter unmask !env !denv !ustk !bstk !k !ck !args !comb = do + ustk <- if ck then ensure ustk uf else pure ustk + bstk <- if ck then ensure bstk bf else pure bstk + (ustk, bstk) <- moveArgs ustk bstk args + ustk <- acceptArgs ustk ua + bstk <- acceptArgs bstk ba + eval unmask env denv ustk bstk k entry + where + Lam ua ba uf bf entry = comb +{-# inline enter #-} + +-- fast path by-name delaying +name :: Stack 'UN -> Stack 'BX -> Args -> Closure -> IO (Stack 'BX) +name !ustk !bstk !args clo = case clo of + PAp comb useg bseg -> do + (useg, bseg) <- closeArgs I ustk bstk useg bseg args + bstk <- bump bstk + poke bstk $ PAp comb useg bseg + pure bstk + _ -> die $ "naming non-function: " ++ show clo +{-# inline name #-} + +-- slow path application +apply + :: Unmask -> SEnv -> DEnv -> Stack 'UN -> Stack 'BX -> K + -> Bool -> Args -> Closure -> IO () +apply unmask !env !denv !ustk !bstk !k !ck !args clo = case clo of + PAp comb@(Lam_ ua ba uf bf entry) useg bseg + | ck || ua <= uac && ba <= bac -> do + ustk <- ensure ustk uf + bstk <- ensure bstk bf + (ustk, bstk) <- moveArgs ustk bstk args + ustk <- dumpSeg ustk useg A + bstk <- dumpSeg bstk bseg A + ustk <- acceptArgs ustk ua + bstk <- acceptArgs bstk ba + eval unmask env denv ustk bstk k entry + | otherwise -> do + (useg, bseg) <- closeArgs C ustk bstk useg bseg args + ustk <- discardFrame =<< frameArgs ustk + bstk <- discardFrame =<< frameArgs bstk + bstk <- bump bstk + poke bstk $ PAp comb useg bseg + yield unmask env denv ustk bstk k + where + uac = asize ustk + ucount args + uscount useg + bac = asize bstk + bcount args + bscount bseg + clo | ZArgs <- args, asize ustk == 0, asize bstk == 0 -> do + ustk <- discardFrame ustk + bstk <- discardFrame bstk + bstk <- bump bstk + poke bstk clo + yield unmask env denv ustk bstk k + | otherwise -> die $ "applying non-function: " ++ show clo +{-# inline apply #-} + +jump + :: Unmask -> SEnv -> DEnv + -> Stack 'UN -> Stack 'BX -> K + -> Args -> Closure -> IO () +jump unmask !env !denv !ustk !bstk !k !args clo = case clo of + Captured sk useg bseg -> do + (useg, bseg) <- closeArgs K ustk bstk useg bseg args + ustk <- discardFrame ustk + bstk <- discardFrame bstk + ustk <- dumpSeg ustk useg . F $ ucount args + bstk <- dumpSeg bstk bseg . F $ bcount args + repush unmask env ustk bstk denv sk k + _ -> die "jump: non-cont" +{-# inline jump #-} + +repush + :: Unmask -> SEnv + -> Stack 'UN -> Stack 'BX -> DEnv -> K -> K -> IO () +repush unmask !env !ustk !bstk = go + where + go !denv KE !k = yield unmask env denv ustk bstk k + go !denv (Mark ps cs sk) !k = go denv' sk $ Mark ps cs' k + where + denv' = cs <> EC.withoutKeys denv ps + cs' = EC.restrictKeys denv ps + go !denv (Push un bn ua ba nx sk) !k + = go denv sk $ Push un bn ua ba nx k + go !_ (CB _) !_ = die "repush: impossible" +{-# inline repush #-} + +moveArgs + :: Stack 'UN -> Stack 'BX + -> Args -> IO (Stack 'UN, Stack 'BX) +moveArgs !ustk !bstk ZArgs = do + ustk <- discardFrame ustk + bstk <- discardFrame bstk + pure (ustk, bstk) +moveArgs !ustk !bstk (DArgV i j) = do + ustk <- if ul > 0 + then prepareArgs ustk (ArgR 0 ul) + else discardFrame ustk + bstk <- if bl > 0 + then prepareArgs bstk (ArgR 0 bl) + else discardFrame bstk + pure (ustk, bstk) + where + ul = fsize ustk - i + bl = fsize bstk - j +moveArgs !ustk !bstk (UArg1 i) = do + ustk <- prepareArgs ustk (Arg1 i) + bstk <- discardFrame bstk + pure (ustk, bstk) +moveArgs !ustk !bstk (UArg2 i j) = do + ustk <- prepareArgs ustk (Arg2 i j) + bstk <- discardFrame bstk + pure (ustk, bstk) +moveArgs !ustk !bstk (UArgR i l) = do + ustk <- prepareArgs ustk (ArgR i l) + bstk <- discardFrame bstk + pure (ustk, bstk) +moveArgs !ustk !bstk (BArg1 i) = do + ustk <- discardFrame ustk + bstk <- prepareArgs bstk (Arg1 i) + pure (ustk, bstk) +moveArgs !ustk !bstk (BArg2 i j) = do + ustk <- discardFrame ustk + bstk <- prepareArgs bstk (Arg2 i j) + pure (ustk, bstk) +moveArgs !ustk !bstk (BArgR i l) = do + ustk <- discardFrame ustk + bstk <- prepareArgs bstk (ArgR i l) + pure (ustk, bstk) +moveArgs !ustk !bstk (DArg2 i j) = do + ustk <- prepareArgs ustk (Arg1 i) + bstk <- prepareArgs bstk (Arg1 j) + pure (ustk, bstk) +moveArgs !ustk !bstk (DArgR ui ul bi bl) = do + ustk <- prepareArgs ustk (ArgR ui ul) + bstk <- prepareArgs bstk (ArgR bi bl) + pure (ustk, bstk) +moveArgs !ustk !bstk (UArgN as) = do + ustk <- prepareArgs ustk (ArgN as) + bstk <- discardFrame bstk + pure (ustk, bstk) +moveArgs !ustk !bstk (BArgN as) = do + ustk <- discardFrame ustk + bstk <- prepareArgs bstk (ArgN as) + pure (ustk, bstk) +moveArgs !ustk !bstk (DArgN us bs) = do + ustk <- prepareArgs ustk (ArgN us) + bstk <- prepareArgs bstk (ArgN bs) + pure (ustk, bstk) +{-# inline moveArgs #-} + +closureArgs :: Stack 'BX -> Args -> IO [Closure] +closureArgs !_ ZArgs = pure [] +closureArgs !bstk (BArg1 i) = do + x <- peekOff bstk i + pure [x] +closureArgs !bstk (BArg2 i j) = do + x <- peekOff bstk i + y <- peekOff bstk j + pure [x,y] +closureArgs !bstk (BArgR i l) + = for (take l [i..]) (peekOff bstk) +closureArgs !bstk (BArgN bs) + = for (PA.primArrayToList bs) (peekOff bstk) +closureArgs !_ _ + = error "closure arguments can only be boxed." +{-# inline closureArgs #-} + +buildData + :: Stack 'UN -> Stack 'BX -> Tag -> Args -> IO Closure +buildData !_ !_ !t ZArgs = pure $ Enum t +buildData !ustk !_ !t (UArg1 i) = do + x <- peekOff ustk i + pure $ DataU1 t x +buildData !ustk !_ !t (UArg2 i j) = do + x <- peekOff ustk i + y <- peekOff ustk j + pure $ DataU2 t x y +buildData !_ !bstk !t (BArg1 i) = do + x <- peekOff bstk i + pure $ DataB1 t x +buildData !_ !bstk !t (BArg2 i j) = do + x <- peekOff bstk i + y <- peekOff bstk j + pure $ DataB2 t x y +buildData !ustk !bstk !t (DArg2 i j) = do + x <- peekOff ustk i + y <- peekOff bstk j + pure $ DataUB t x y +buildData !ustk !_ !t (UArgR i l) = do + useg <- augSeg I ustk unull (Just $ ArgR i l) + pure $ DataG t useg bnull +buildData !_ !bstk !t (BArgR i l) = do + bseg <- augSeg I bstk bnull (Just $ ArgR i l) + pure $ DataG t unull bseg +buildData !ustk !bstk !t (DArgR ui ul bi bl) = do + useg <- augSeg I ustk unull (Just $ ArgR ui ul) + bseg <- augSeg I bstk bnull (Just $ ArgR bi bl) + pure $ DataG t useg bseg +buildData !ustk !_ !t (UArgN as) = do + useg <- augSeg I ustk unull (Just $ ArgN as) + pure $ DataG t useg bnull +buildData !_ !bstk !t (BArgN as) = do + bseg <- augSeg I bstk bnull (Just $ ArgN as) + pure $ DataG t unull bseg +buildData !ustk !bstk !t (DArgN us bs) = do + useg <- augSeg I ustk unull (Just $ ArgN us) + bseg <- augSeg I bstk bnull (Just $ ArgN bs) + pure $ DataG t useg bseg +buildData !ustk !bstk !t (DArgV ui bi) = do + useg <- if ul > 0 + then augSeg I ustk unull (Just $ ArgR 0 ul) + else pure unull + bseg <- if bl > 0 + then augSeg I bstk bnull (Just $ ArgR 0 bl) + else pure bnull + pure $ DataG t useg bseg + where + ul = fsize ustk - ui + bl = fsize bstk - bi +{-# inline buildData #-} + +dumpData + :: Stack 'UN -> Stack 'BX -> Closure -> IO (Stack 'UN, Stack 'BX) +dumpData !ustk !bstk (Enum t) = do + ustk <- bump ustk + pokeN ustk $ maskTag t + pure (ustk, bstk) +dumpData !ustk !bstk (DataU1 t x) = do + ustk <- bumpn ustk 2 + pokeOff ustk 1 x + pokeN ustk $ maskTag t + pure (ustk, bstk) +dumpData !ustk !bstk (DataU2 t x y) = do + ustk <- bumpn ustk 3 + pokeOff ustk 2 y + pokeOff ustk 1 x + pokeN ustk $ maskTag t + pure (ustk, bstk) +dumpData !ustk !bstk (DataB1 t x) = do + ustk <- bump ustk + bstk <- bump bstk + poke bstk x + pokeN ustk $ maskTag t + pure (ustk, bstk) +dumpData !ustk !bstk (DataB2 t x y) = do + ustk <- bump ustk + bstk <- bumpn bstk 2 + pokeOff bstk 1 y + poke bstk x + pokeN ustk $ maskTag t + pure (ustk, bstk) +dumpData !ustk !bstk (DataUB t x y) = do + ustk <- bumpn ustk 2 + bstk <- bump bstk + pokeOff ustk 1 x + poke bstk y + pokeN ustk $ maskTag t + pure (ustk, bstk) +dumpData !ustk !bstk (DataG t us bs) = do + ustk <- dumpSeg ustk us S + bstk <- dumpSeg bstk bs S + ustk <- bump ustk + pokeN ustk $ maskTag t + pure (ustk, bstk) +dumpData !_ !_ clo = die $ "dumpData: bad closure: " ++ show clo +{-# inline dumpData #-} + +-- Note: although the representation allows it, it is impossible +-- to under-apply one sort of argument while over-applying the +-- other. Thus, it is unnecessary to worry about doing tricks to +-- only grab a certain number of arguments. +closeArgs + :: Augment + -> Stack 'UN -> Stack 'BX + -> Seg 'UN -> Seg 'BX + -> Args -> IO (Seg 'UN, Seg 'BX) +closeArgs mode !ustk !bstk !useg !bseg args = + (,) <$> augSeg mode ustk useg uargs + <*> augSeg mode bstk bseg bargs + where + (uargs, bargs) = case args of + ZArgs -> (Nothing, Nothing) + UArg1 i -> (Just $ Arg1 i, Nothing) + BArg1 i -> (Nothing, Just $ Arg1 i) + UArg2 i j -> (Just $ Arg2 i j, Nothing) + BArg2 i j -> (Nothing, Just $ Arg2 i j) + UArgR i l -> (Just $ ArgR i l, Nothing) + BArgR i l -> (Nothing, Just $ ArgR i l) + DArg2 i j -> (Just $ Arg1 i, Just $ Arg1 j) + DArgR ui ul bi bl -> (Just $ ArgR ui ul, Just $ ArgR bi bl) + UArgN as -> (Just $ ArgN as, Nothing) + BArgN as -> (Nothing, Just $ ArgN as) + DArgN us bs -> (Just $ ArgN us, Just $ ArgN bs) + DArgV ui bi -> (ua, ba) + where + ua | ul > 0 = Just $ ArgR 0 ul + | otherwise = Nothing + ba | bl > 0 = Just $ ArgR 0 bl + | otherwise = Nothing + ul = fsize ustk - ui + bl = fsize bstk - bi + +peekForeign :: Stack 'BX -> Int -> IO a +peekForeign bstk i + = peekOff bstk i >>= \case + Foreign x -> pure $ unwrapForeign x + _ -> die "bad foreign argument" +{-# inline peekForeign #-} + +uprim1 :: Stack 'UN -> UPrim1 -> Int -> IO (Stack 'UN) +uprim1 !ustk DECI !i = do + m <- peekOff ustk i + ustk <- bump ustk + poke ustk (m-1) + pure ustk +uprim1 !ustk INCI !i = do + m <- peekOff ustk i + ustk <- bump ustk + poke ustk (m+1) + pure ustk +uprim1 !ustk NEGI !i = do + m <- peekOff ustk i + ustk <- bump ustk + poke ustk (-m) + pure ustk +uprim1 !ustk SGNI !i = do + m <- peekOff ustk i + ustk <- bump ustk + poke ustk (signum m) + pure ustk +uprim1 !ustk ABSF !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (abs d) + pure ustk +uprim1 !ustk CEIL !i = do + d <- peekOffD ustk i + ustk <- bump ustk + poke ustk (ceiling d) + pure ustk +uprim1 !ustk FLOR !i = do + d <- peekOffD ustk i + ustk <- bump ustk + poke ustk (floor d) + pure ustk +uprim1 !ustk TRNF !i = do + d <- peekOffD ustk i + ustk <- bump ustk + poke ustk (truncate d) + pure ustk +uprim1 !ustk RNDF !i = do + d <- peekOffD ustk i + ustk <- bump ustk + poke ustk (round d) + pure ustk +uprim1 !ustk EXPF !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (exp d) + pure ustk +uprim1 !ustk LOGF !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (log d) + pure ustk +uprim1 !ustk SQRT !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (sqrt d) + pure ustk +uprim1 !ustk COSF !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (cos d) + pure ustk +uprim1 !ustk SINF !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (sin d) + pure ustk +uprim1 !ustk TANF !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (tan d) + pure ustk +uprim1 !ustk COSH !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (cosh d) + pure ustk +uprim1 !ustk SINH !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (sinh d) + pure ustk +uprim1 !ustk TANH !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (tanh d) + pure ustk +uprim1 !ustk ACOS !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (acos d) + pure ustk +uprim1 !ustk ASIN !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (asin d) + pure ustk +uprim1 !ustk ATAN !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (atan d) + pure ustk +uprim1 !ustk ASNH !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (asinh d) + pure ustk +uprim1 !ustk ACSH !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (acosh d) + pure ustk +uprim1 !ustk ATNH !i = do + d <- peekOffD ustk i + ustk <- bump ustk + pokeD ustk (atanh d) + pure ustk +uprim1 !ustk ITOF !i = do + n <- peekOff ustk i + ustk <- bump ustk + pokeD ustk (fromIntegral n) + pure ustk +uprim1 !ustk NTOF !i = do + n <- peekOffN ustk i + ustk <- bump ustk + pokeD ustk (fromIntegral n) + pure ustk +uprim1 !ustk LZRO !i = do + n <- peekOffN ustk i + ustk <- bump ustk + poke ustk (countLeadingZeros n) + pure ustk +uprim1 !ustk TZRO !i = do + n <- peekOffN ustk i + ustk <- bump ustk + poke ustk (countTrailingZeros n) + pure ustk +uprim1 !ustk COMN !i = do + n <- peekOffN ustk i + ustk <- bump ustk + pokeN ustk (complement n) + pure ustk +{-# inline uprim1 #-} + +uprim2 :: Stack 'UN -> UPrim2 -> Int -> Int -> IO (Stack 'UN) +uprim2 !ustk ADDI !i !j = do + m <- peekOff ustk i + n <- peekOff ustk j + ustk <- bump ustk + poke ustk (m+n) + pure ustk +uprim2 !ustk SUBI !i !j = do + m <- peekOff ustk i + n <- peekOff ustk j + ustk <- bump ustk + poke ustk (m-n) + pure ustk +uprim2 !ustk MULI !i !j = do + m <- peekOff ustk i + n <- peekOff ustk j + ustk <- bump ustk + poke ustk (m*n) + pure ustk +uprim2 !ustk DIVI !i !j = do + m <- peekOff ustk i + n <- peekOff ustk j + ustk <- bump ustk + poke ustk (m`div`n) + pure ustk +uprim2 !ustk MODI !i !j = do + m <- peekOff ustk i + n <- peekOff ustk j + ustk <- bump ustk + poke ustk (m`mod`n) + pure ustk +uprim2 !ustk SHLI !i !j = do + m <- peekOff ustk i + n <- peekOff ustk j + ustk <- bump ustk + poke ustk (m`shiftL`n) + pure ustk +uprim2 !ustk SHRI !i !j = do + m <- peekOff ustk i + n <- peekOff ustk j + ustk <- bump ustk + poke ustk (m`shiftR`n) + pure ustk +uprim2 !ustk SHRN !i !j = do + m <- peekOffN ustk i + n <- peekOff ustk j + ustk <- bump ustk + pokeN ustk (m`shiftR`n) + pure ustk +uprim2 !ustk POWI !i !j = do + m <- peekOff ustk i + n <- peekOffN ustk j + ustk <- bump ustk + poke ustk (m^n) + pure ustk +uprim2 !ustk EQLI !i !j = do + m <- peekOff ustk i + n <- peekOff ustk j + ustk <- bump ustk + poke ustk $ if m == n then 1 else 0 + pure ustk +uprim2 !ustk LEQI !i !j = do + m <- peekOff ustk i + n <- peekOff ustk j + ustk <- bump ustk + poke ustk $ if m <= n then 1 else 0 + pure ustk +uprim2 !ustk LEQN !i !j = do + m <- peekOffN ustk i + n <- peekOffN ustk j + ustk <- bump ustk + poke ustk $ if m <= n then 1 else 0 + pure ustk +uprim2 !ustk ADDF !i !j = do + x <- peekOffD ustk i + y <- peekOffD ustk j + ustk <- bump ustk + pokeD ustk (x + y) + pure ustk +uprim2 !ustk SUBF !i !j = do + x <- peekOffD ustk i + y <- peekOffD ustk j + ustk <- bump ustk + pokeD ustk (x - y) + pure ustk +uprim2 !ustk MULF !i !j = do + x <- peekOffD ustk i + y <- peekOffD ustk j + ustk <- bump ustk + pokeD ustk (x * y) + pure ustk +uprim2 !ustk DIVF !i !j = do + x <- peekOffD ustk i + y <- peekOffD ustk j + ustk <- bump ustk + pokeD ustk (x / y) + pure ustk +uprim2 !ustk LOGB !i !j = do + x <- peekOffD ustk i + y <- peekOffD ustk j + ustk <- bump ustk + pokeD ustk (logBase x y) + pure ustk +uprim2 !ustk POWF !i !j = do + x <- peekOffD ustk i + y <- peekOffD ustk j + ustk <- bump ustk + pokeD ustk (x ** y) + pure ustk +uprim2 !ustk MAXF !i !j = do + x <- peekOffD ustk i + y <- peekOffD ustk j + ustk <- bump ustk + pokeD ustk (max x y) + pure ustk +uprim2 !ustk MINF !i !j = do + x <- peekOffD ustk i + y <- peekOffD ustk j + ustk <- bump ustk + pokeD ustk (min x y) + pure ustk +uprim2 !ustk EQLF !i !j = do + x <- peekOffD ustk i + y <- peekOffD ustk j + ustk <- bump ustk + pokeD ustk (if x == y then 1 else 0) + pure ustk +uprim2 !ustk LEQF !i !j = do + x <- peekOffD ustk i + y <- peekOffD ustk j + ustk <- bump ustk + pokeD ustk (if x <= y then 1 else 0) + pure ustk +uprim2 !ustk ATN2 !i !j = do + x <- peekOffD ustk i + y <- peekOffD ustk j + ustk <- bump ustk + pokeD ustk (atan2 x y) + pure ustk +uprim2 !ustk ANDN !i !j = do + x <- peekOffN ustk i + y <- peekOffN ustk j + ustk <- bump ustk + pokeN ustk (x .&. y) + pure ustk +uprim2 !ustk IORN !i !j = do + x <- peekOffN ustk i + y <- peekOffN ustk j + ustk <- bump ustk + pokeN ustk (x .|. y) + pure ustk +uprim2 !ustk XORN !i !j = do + x <- peekOffN ustk i + y <- peekOffN ustk j + ustk <- bump ustk + pokeN ustk (xor x y) + pure ustk +{-# inline uprim2 #-} + +bprim1 + :: Stack 'UN -> Stack 'BX -> BPrim1 -> Int + -> IO (Stack 'UN, Stack 'BX) +bprim1 !ustk !bstk SIZT i = do + t <- peekOffBi bstk i + ustk <- bump ustk + poke ustk $ Tx.length t + pure (ustk, bstk) +bprim1 !ustk !bstk SIZS i = do + s <- peekOffS bstk i + ustk <- bump ustk + poke ustk $ Sq.length s + pure (ustk, bstk) +bprim1 !ustk !bstk ITOT i = do + n <- peekOff ustk i + bstk <- bump bstk + pokeBi bstk . Tx.pack $ show n + pure (ustk, bstk) +bprim1 !ustk !bstk NTOT i = do + n <- peekOffN ustk i + bstk <- bump bstk + pokeBi bstk . Tx.pack $ show n + pure (ustk, bstk) +bprim1 !ustk !bstk FTOT i = do + f <- peekOffD ustk i + bstk <- bump bstk + pokeBi bstk . Tx.pack $ show f + pure (ustk, bstk) +bprim1 !ustk !bstk USNC i + = peekOffBi bstk i >>= \t -> case Tx.unsnoc t of + Nothing -> do + ustk <- bump ustk + poke ustk 0 + pure (ustk, bstk) + Just (t, c) -> do + ustk <- bumpn ustk 2 + bstk <- bump bstk + pokeOff ustk 1 $ fromEnum c + poke ustk 1 + pokeBi bstk t + pure (ustk, bstk) +bprim1 !ustk !bstk UCNS i + = peekOffBi bstk i >>= \t -> case Tx.uncons t of + Nothing -> do + ustk <- bump ustk + poke ustk 0 + pure (ustk, bstk) + Just (c, t) -> do + ustk <- bumpn ustk 2 + bstk <- bump bstk + pokeOff ustk 1 $ fromEnum c + poke ustk 1 + pokeBi bstk t + pure (ustk, bstk) +bprim1 !ustk !bstk TTOI i + = peekOffBi bstk i >>= \t -> case readm $ Tx.unpack t of + Nothing -> do + ustk <- bump ustk + poke ustk 0 + pure (ustk, bstk) + Just n -> do + ustk <- bumpn ustk 2 + poke ustk 1 + pokeOff ustk 1 n + pure (ustk, bstk) + where + readm ('+':s) = readMaybe s + readm s = readMaybe s +bprim1 !ustk !bstk TTON i + = peekOffBi bstk i >>= \t -> case readMaybe $ Tx.unpack t of + Nothing -> do + ustk <- bump ustk + poke ustk 0 + pure (ustk, bstk) + Just n -> do + ustk <- bumpn ustk 2 + poke ustk 1 + pokeOffN ustk 1 n + pure (ustk, bstk) +bprim1 !ustk !bstk TTOF i + = peekOffBi bstk i >>= \t -> case readMaybe $ Tx.unpack t of + Nothing -> do + ustk <- bump ustk + poke ustk 0 + pure (ustk, bstk) + Just f -> do + ustk <- bumpn ustk 2 + poke ustk 1 + pokeOffD ustk 1 f + pure (ustk, bstk) +bprim1 !ustk !bstk VWLS i + = peekOffS bstk i >>= \case + Sq.Empty -> do + ustk <- bump ustk + poke ustk 0 + pure (ustk, bstk) + x Sq.:<| xs -> do + ustk <- bump ustk + poke ustk 1 + bstk <- bumpn bstk 2 + pokeOffS bstk 1 xs + poke bstk x + pure (ustk, bstk) +bprim1 !ustk !bstk VWRS i + = peekOffS bstk i >>= \case + Sq.Empty -> do + ustk <- bump ustk + poke ustk 0 + pure (ustk, bstk) + xs Sq.:|> x -> do + ustk <- bump ustk + poke ustk 1 + bstk <- bumpn bstk 2 + pokeOff bstk 1 x + pokeS bstk xs + pure (ustk, bstk) +bprim1 !ustk !bstk PAKT i = do + s <- peekOffS bstk i + bstk <- bump bstk + pokeBi bstk . Tx.pack . toList $ clo2char <$> s + pure (ustk, bstk) + where + clo2char (DataU1 655360 i) = toEnum i + clo2char c = error $ "pack text: non-character closure: " ++ show c +bprim1 !ustk !bstk UPKT i = do + t <- peekOffBi bstk i + bstk <- bump bstk + pokeS bstk . Sq.fromList + . fmap (DataU1 655360 . fromEnum) . Tx.unpack $ t + pure (ustk, bstk) +bprim1 !ustk !bstk PAKB i = do + s <- peekOffS bstk i + bstk <- bump bstk + pokeBi bstk . By.fromWord8s . fmap clo2w8 $ toList s + pure (ustk, bstk) + where + clo2w8 (DataU1 65536 n) = toEnum n + clo2w8 c = error $ "pack bytes: non-natural closure: " ++ show c +bprim1 !ustk !bstk UPKB i = do + b <- peekOffBi bstk i + bstk <- bump bstk + pokeS bstk . Sq.fromList . fmap (DataU1 65536 . fromEnum) + $ By.toWord8s b + pure (ustk, bstk) +bprim1 !ustk !bstk SIZB i = do + b <- peekOffBi bstk i + ustk <- bump ustk + poke ustk $ By.size b + pure (ustk, bstk) +bprim1 !ustk !bstk FLTB i = do + b <- peekOffBi bstk i + bstk <- bump bstk + pokeBi bstk $ By.flatten b + pure (ustk, bstk) +bprim1 !_ !bstk THRO i + = throwIO . BU =<< peekOff bstk i +{-# inline bprim1 #-} + +bprim2 + :: Stack 'UN -> Stack 'BX -> BPrim2 -> Int -> Int + -> IO (Stack 'UN, Stack 'BX) +bprim2 !ustk !bstk EQLU i j = do + x <- peekOff bstk i + y <- peekOff bstk j + ustk <- bump ustk + poke ustk $ if x == y then 1 else 0 + pure (ustk, bstk) +bprim2 !ustk !bstk DRPT i j = do + n <- peekOff ustk i + t <- peekOffBi bstk j + bstk <- bump bstk + pokeBi bstk $ Tx.drop n t + pure (ustk, bstk) +bprim2 !ustk !bstk CATT i j = do + x <- peekOffBi bstk i + y <- peekOffBi bstk j + bstk <- bump bstk + pokeBi bstk $ Tx.append x y + pure (ustk, bstk) +bprim2 !ustk !bstk TAKT i j = do + n <- peekOff ustk i + t <- peekOffBi bstk j + bstk <- bump bstk + pokeBi bstk $ Tx.take n t + pure (ustk, bstk) +bprim2 !ustk !bstk EQLT i j = do + x <- peekOffBi @Tx.Text bstk i + y <- peekOffBi bstk j + ustk <- bump ustk + poke ustk $ if x == y then 1 else 0 + pure (ustk, bstk) +bprim2 !ustk !bstk LEQT i j = do + x <- peekOffBi @Tx.Text bstk i + y <- peekOffBi bstk j + ustk <- bump ustk + poke ustk $ if x <= y then 1 else 0 + pure (ustk, bstk) +bprim2 !ustk !bstk LEST i j = do + x <- peekOffBi @Tx.Text bstk i + y <- peekOffBi bstk j + ustk <- bump ustk + poke ustk $ if x < y then 1 else 0 + pure (ustk, bstk) +bprim2 !ustk !bstk DRPS i j = do + n <- peekOff ustk i + s <- peekOffS bstk j + bstk <- bump bstk + pokeS bstk $ Sq.drop n s + pure (ustk, bstk) +bprim2 !ustk !bstk TAKS i j = do + n <- peekOff ustk i + s <- peekOffS bstk j + bstk <- bump bstk + pokeS bstk $ Sq.take n s + pure (ustk, bstk) +bprim2 !ustk !bstk CONS i j = do + x <- peekOff bstk i + s <- peekOffS bstk j + bstk <- bump bstk + pokeS bstk $ x Sq.<| s + pure (ustk, bstk) +bprim2 !ustk !bstk SNOC i j = do + s <- peekOffS bstk i + x <- peekOff bstk j + bstk <- bump bstk + pokeS bstk $ s Sq.|> x + pure (ustk, bstk) +bprim2 !ustk !bstk CATS i j = do + x <- peekOffS bstk i + y <- peekOffS bstk j + bstk <- bump bstk + pokeS bstk $ x Sq.>< y + pure (ustk, bstk) +bprim2 !ustk !bstk IDXS i j = do + n <- peekOff ustk i + s <- peekOffS bstk j + case Sq.lookup n s of + Nothing -> do + ustk <- bump ustk + poke ustk 0 + pure (ustk, bstk) + Just x -> do + ustk <- bump ustk + poke ustk 1 + bstk <- bump bstk + poke bstk x + pure (ustk, bstk) +bprim2 !ustk !bstk SPLL i j = do + n <- peekOff ustk i + s <- peekOffS bstk j + if Sq.length s < n then do + ustk <- bump ustk + poke ustk 0 + pure (ustk, bstk) + else do + ustk <- bump ustk + poke ustk 1 + bstk <- bumpn bstk 2 + let (l,r) = Sq.splitAt n s + pokeOffS bstk 1 r + pokeS bstk l + pure (ustk, bstk) +bprim2 !ustk !bstk SPLR i j = do + n <- peekOff ustk i + s <- peekOffS bstk j + if Sq.length s < n then do + ustk <- bump ustk + poke ustk 0 + pure (ustk, bstk) + else do + ustk <- bump ustk + poke ustk 1 + bstk <- bumpn bstk 2 + let (l,r) = Sq.splitAt (Sq.length s - n) s + pokeOffS bstk 1 r + pokeS bstk l + pure (ustk, bstk) +bprim2 !ustk !bstk TAKB i j = do + n <- peekOff ustk i + b <- peekOffBi bstk j + bstk <- bump bstk + pokeBi bstk $ By.take n b + pure (ustk, bstk) +bprim2 !ustk !bstk DRPB i j = do + n <- peekOff ustk i + b <- peekOffBi bstk j + bstk <- bump bstk + pokeBi bstk $ By.drop n b + pure (ustk, bstk) +bprim2 !ustk !bstk IDXB i j = do + n <- peekOff ustk i + b <- peekOffBi bstk j + ustk <- bump ustk + ustk <- case By.at n b of + Nothing -> ustk <$ poke ustk 0 + Just x -> do + poke ustk $ fromIntegral x + ustk <- bump ustk + ustk <$ poke ustk 0 + pure (ustk, bstk) +bprim2 !ustk !bstk CATB i j = do + l <- peekOffBi bstk i + r <- peekOffBi bstk j + bstk <- bump bstk + pokeBi bstk (l <> r :: By.Bytes) + pure (ustk, bstk) +bprim2 !ustk !bstk CMPU _ _ = pure (ustk, bstk) -- impossible +{-# inline bprim2 #-} + +yield + :: Unmask -> SEnv -> DEnv + -> Stack 'UN -> Stack 'BX -> K -> IO () +yield unmask !env !denv !ustk !bstk !k = leap denv k + where + leap !denv0 (Mark ps cs k) = do + let denv = cs <> EC.withoutKeys denv0 ps + clo = denv0 EC.! EC.findMin ps + poke bstk . DataB1 0 =<< peek bstk + apply unmask env denv ustk bstk k False (BArg1 0) clo + leap !denv (Push ufsz bfsz uasz basz nx k) = do + ustk <- restoreFrame ustk ufsz uasz + bstk <- restoreFrame bstk bfsz basz + eval unmask env denv ustk bstk k nx + leap _ (CB (Hook f)) = f ustk bstk + leap _ KE = pure () +{-# inline yield #-} + +selectTextBranch + :: Tx.Text -> Section -> M.Map Tx.Text Section -> Section +selectTextBranch t df cs = M.findWithDefault df t cs +{-# inline selectTextBranch #-} + +selectBranch :: Tag -> Branch -> Section +selectBranch t (Test1 u y n) + | t == u = y + | otherwise = n +selectBranch t (Test2 u cu v cv e) + | t == u = cu + | t == v = cv + | otherwise = e +selectBranch t (TestW df cs) = lookupWithDefault df t cs +selectBranch _ (TestT {}) = error "impossible" +{-# inline selectBranch #-} + +splitCont + :: DEnv -> Stack 'UN -> Stack 'BX -> K + -> Word64 -> IO (K, DEnv, Stack 'UN, Stack 'BX, Seg 'UN, Seg 'BX, K) +splitCont !denv !ustk !bstk !k !p + = walk denv (asize ustk) (asize bstk) KE k + where + walk !denv !usz !bsz !ck KE + = die "fell off stack" >> finish denv usz bsz ck KE + walk !denv !usz !bsz !ck (CB _) + = die "fell off stack" >> finish denv usz bsz ck KE + walk !denv !usz !bsz !ck (Mark ps cs k) + | EC.member p ps = finish denv' usz bsz ck k + | otherwise = walk denv' usz bsz (Mark ps cs' ck) k + where + denv' = cs <> EC.withoutKeys denv ps + cs' = EC.restrictKeys denv ps + walk !denv !usz !bsz !ck (Push un bn ua ba br k) + = walk denv (usz+un+ua) (bsz+bn+ba) (Push un bn ua ba br ck) k + + finish !denv !usz !bsz !ck !k = do + (useg, ustk) <- grab ustk usz + (bseg, bstk) <- grab bstk bsz + return (ck, denv, ustk, bstk, useg, bseg, k) +{-# inline splitCont #-} + +discardCont + :: DEnv -> Stack 'UN -> Stack 'BX -> K + -> Word64 -> IO (DEnv, Stack 'UN, Stack 'BX, K) +discardCont denv ustk bstk k p + = splitCont denv ustk bstk k p + <&> \(_, denv, ustk, bstk, _, _, k) -> (denv, ustk, bstk, k) +{-# inline discardCont #-} + +resolve :: SEnv -> DEnv -> Stack 'BX -> Ref -> IO Closure +resolve env _ _ (Env i) = case EC.lookup i (combs env) of + Just cmb -> return $ PAp (IC i cmb) unull bnull + _ -> die $ "resolve: looked up unknown combinator: " ++ show i +resolve _ _ bstk (Stk i) = peekOff bstk i +resolve _ denv _ (Dyn i) = case EC.lookup i denv of + Just clo -> pure clo + _ -> die $ "resolve: looked up bad dynamic: " ++ show i diff --git a/parser-typechecker/src/Unison/Runtime/Pattern.hs b/parser-typechecker/src/Unison/Runtime/Pattern.hs new file mode 100644 index 0000000000..e598c1ed94 --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Pattern.hs @@ -0,0 +1,720 @@ +{-# language BangPatterns #-} +{-# language ViewPatterns #-} +{-# language PatternGuards #-} +{-# language TupleSections #-} +{-# language PatternSynonyms #-} +{-# language OverloadedStrings #-} + +module Unison.Runtime.Pattern + ( DataSpec + , splitPatterns + , builtinDataSpec + ) where + +import Control.Lens ((<&>)) +import Control.Monad.State (State, state, evalState, runState, modify) + +import Data.List (transpose) +import Data.Maybe (catMaybes, listToMaybe) +import Data.Word (Word64) + +import Data.Set (Set, member) +import qualified Data.Set as Set + +import Unison.ABT + (absChain', visitPure, pattern AbsN', changeVars) +import Unison.Builtin.Decls (builtinDataDecls, builtinEffectDecls) +import Unison.DataDeclaration (declFields) +import Unison.Pattern +import qualified Unison.Pattern as P +import Unison.Reference (Reference(..)) +import Unison.Symbol (Symbol) +import Unison.Term hiding (Term) +import qualified Unison.Term as Tm +import Unison.Var (Var, typed, freshIn, freshenId, Type(Pattern)) + +import qualified Unison.Type as Rf + +import Data.Map.Strict + (Map, toList, fromListWith, insertWith) +import qualified Data.Map.Strict as Map + +type Term v = Tm.Term v () + +-- Represents the number of fields of constructors of a data type/ +-- ability in order of constructors +type Cons = [Int] +type NCons = [(Int,Int)] + +-- Maps references to the constructor information for abilities (left) +-- and data types (right) +type DataSpec = Map Reference (Either Cons Cons) + +data PType = PData Reference | PReq (Set Reference) | Unknown + +instance Semigroup PType where + Unknown <> r = r + l <> Unknown = l + t@(PData l) <> PData r + | l == r = t + PReq l <> PReq r = PReq (l <> r) + _ <> _ = error "inconsistent pattern matching types" + +instance Monoid PType where + mempty = Unknown + mappend = (<>) + +type Ctx v = Map v PType + +-- Representation of a row in a pattern compilation matrix. +-- There is a list of patterns annotated with the variables they +-- are matching against, an optional guard, and the body of the +-- 'match' clause associated with this row. +data PatternRow v + = PR + { matches :: [Pattern v] + , guard :: Maybe (Term v) + , body :: Term v + } deriving (Show) + +-- This is the data and ability 'constructor' information for all +-- the things defined in Haskell source code. +builtinDataSpec :: DataSpec +builtinDataSpec = Map.fromList decls + where + decls = [ (DerivedId x, declFields $ Right y) + | (_,x,y) <- builtinDataDecls @Symbol ] + ++ [ (DerivedId x, declFields $ Left y) + | (_,x,y) <- builtinEffectDecls @Symbol ] + +-- A pattern compilation matrix is just a list of rows. There is +-- no need for the rows to have uniform length; the variable +-- annotations on the patterns in the rows keep track of what +-- should be matched against when decomposing a matrix. +data PatternMatrix v + = PM { _rows :: [PatternRow v] } + deriving Show + +-- Heuristics guide the pattern compilation. They inspect the +-- pattern matrix and (may) choose a variable to split on next. +-- The full strategy will consist of multiple heuristics composed +-- in series. +type Heuristic v = PatternMatrix v -> Maybe v + +choose :: [Heuristic v] -> PatternMatrix v -> v +choose [] (PM (PR (p:_) _ _ : _)) = loc p +choose [] _ = error "pattern matching: failed to choose a splitting" +choose (h:hs) m + | Just i <- h m = i + | otherwise = choose hs m + +refutable :: P.Pattern a -> Bool +refutable (P.Unbound _) = False +refutable (P.Var _) = False +refutable _ = True + +rowIrrefutable :: PatternRow v -> Bool +rowIrrefutable (PR ps _ _) = null ps + +firstRow :: ([P.Pattern v] -> Maybe v) -> Heuristic v +firstRow f (PM (r:_)) = f $ matches r +firstRow _ _ = Nothing + +heuristics :: [Heuristic v] +heuristics = [firstRow $ fmap loc . listToMaybe] + +extractVar :: Var v => P.Pattern v -> Maybe v +extractVar p + | P.Unbound{} <- p = Nothing + | otherwise = Just (loc p) + +extractVars :: Var v => [P.Pattern v] -> [v] +extractVars = catMaybes . fmap extractVar + +-- Splits a data type pattern, yielding its subpatterns. The provided +-- integers are the tag and number of fields for the constructor being +-- matched against. A constructor pattern thus only yields results if +-- it matches the tag (and number of subpatterns as a consistency +-- check), while variables can also match and know how many subpatterns +-- to yield. +-- +-- The outer list indicates success of the match. It could be Maybe, +-- but elsewhere these results are added to a list, so it is more +-- convenient to yield a list here. +decomposePattern + :: Var v + => Reference -> Int -> Int -> P.Pattern v + -> [[P.Pattern v]] +decomposePattern rf0 t nfields p@(P.Constructor _ rf u ps) + | t == u + , rf0 == rf + = if length ps == nfields + then [ps] + else error err + where + err = "decomposePattern: wrong number of constructor fields: " + ++ show (nfields, p) +decomposePattern rf0 t nfields p@(P.EffectBind _ rf u ps pk) + | t == u + , rf0 == rf + = if length ps == nfields + then [ps ++ [pk]] + else error err + where + err = "decomposePattern: wrong number of ability fields: " + ++ show (nfields, p) +decomposePattern _ t _ (P.EffectPure _ p) + | t == -1 = [[p]] +decomposePattern _ _ nfields (P.Var _) + = [replicate nfields (P.Unbound (typed Pattern))] +decomposePattern _ _ nfields (P.Unbound _) + = [replicate nfields (P.Unbound (typed Pattern))] +decomposePattern _ _ _ (P.SequenceLiteral _ _) + = error "decomposePattern: sequence literal" +decomposePattern _ _ _ _ = [] + +matchBuiltin :: P.Pattern a -> Maybe (P.Pattern ()) +matchBuiltin (P.Var _) = Just $ P.Unbound () +matchBuiltin (P.Unbound _) = Just $ P.Unbound () +matchBuiltin (P.Nat _ n) = Just $ P.Nat () n +matchBuiltin (P.Int _ n) = Just $ P.Int () n +matchBuiltin (P.Text _ t) = Just $ P.Text () t +matchBuiltin (P.Char _ c) = Just $ P.Char () c +matchBuiltin (P.Float _ d) = Just $ P.Float () d +matchBuiltin _ = Nothing + +-- Represents the various cases that that may occur when performing +-- a sequence match. These fall into two groups: +-- +-- E, C, S: empty, cons, snoc +-- L, R, DL, DR: split left/right, insufficient elements +-- +-- These groups correspond to different sorts of matches we can compile +-- to. We can view the left/right end of a sequence, or attempt to +-- split a sequence at a specified offset from the left/right side. +data SeqMatch = E | C | S | L Int | R Int | DL Int | DR Int + deriving (Eq,Ord,Show) + +seqPSize :: P.Pattern v -> Maybe Int +seqPSize (P.SequenceLiteral _ l) = Just $ length l +seqPSize (P.SequenceOp _ _ Cons r) = (1+) <$> seqPSize r +seqPSize (P.SequenceOp _ l Snoc _) = (1+) <$> seqPSize l +seqPSize (P.SequenceOp _ l Concat r) = (+) <$> seqPSize l <*> seqPSize r +seqPSize _ = Nothing + +-- Decides a sequence matching operation to perform based on a column +-- of patterns that match against it. Literals do not really force a +-- bias, so the decision of which side to view is postponed to +-- subsequent patterns if we encounter a literal first. A literal with +-- priority does block a splitting pattern, though. +decideSeqPat :: [P.Pattern v] -> [SeqMatch] +decideSeqPat = go False + where + go _ [] = [E,C] + go _ (P.SequenceLiteral{} : ps) = go True ps + go _ (P.SequenceOp _ _ Snoc _ : _) = [E,S] + go _ (P.SequenceOp _ _ Cons _ : _) = [E,C] + + go guard (P.SequenceOp _ l Concat r : _) + | guard = [E,C] -- prefer prior literals + | Just n <- seqPSize l = [L n, DL n] + | Just n <- seqPSize r = [R n, DR n] + go b (P.Unbound _ : ps) = go b ps + go b (P.Var _ : ps) = go b ps + go _ (p:_) + = error $ "Cannot process sequence pattern: " ++ show p + +-- Represents the possible correspondences between a sequence pattern +-- and a sequence matching compilation target. Unlike data matching, +-- where a pattern either matches or is disjoint from a tag, sequence +-- patterns can overlap in non-trivial ways where it would be difficult +-- to avoid re-testing the original list. +data SeqCover v + = Cover [P.Pattern v] + | Disjoint + | Overlap + +-- Determines how a pattern corresponds to a sequence matching +-- compilation target. +decomposeSeqP :: Var v => Set v -> SeqMatch -> P.Pattern v -> SeqCover v +decomposeSeqP _ E (P.SequenceLiteral _ []) = Cover [] +decomposeSeqP _ E _ = Disjoint + +decomposeSeqP _ C (P.SequenceOp _ l Cons r) = Cover [l,r] +decomposeSeqP _ C (P.SequenceOp _ _ Concat _) = Overlap +decomposeSeqP _ C (P.SequenceLiteral _ []) = Disjoint +decomposeSeqP avoid C (P.SequenceLiteral _ (p:ps)) + = Cover [p, P.SequenceLiteral u ps] + where u = freshIn avoid $ typed Pattern + +decomposeSeqP _ S (P.SequenceOp _ l Snoc r) = Cover [l,r] +decomposeSeqP _ S (P.SequenceOp _ _ Concat _) = Overlap +decomposeSeqP _ S (P.SequenceLiteral _ []) = Disjoint +decomposeSeqP avoid S (P.SequenceLiteral _ ps) + = Cover [P.SequenceLiteral u (init ps), last ps] + where u = freshIn avoid $ typed Pattern + +decomposeSeqP _ (L n) (P.SequenceOp _ l Concat r) + | Just m <- seqPSize l + , n == m + = Cover [l,r] + | otherwise = Overlap +decomposeSeqP avoid (L n) (P.SequenceLiteral _ ps) + | length ps >= n + , (pl, pr) <- splitAt n ps + = Cover $ P.SequenceLiteral u <$> [pl,pr] + | otherwise = Disjoint + where u = freshIn avoid $ typed Pattern + +decomposeSeqP _ (R n) (P.SequenceOp _ l Concat r) + | Just m <- seqPSize r + , n == m + = Cover [l,r] +decomposeSeqP avoid (R n) (P.SequenceLiteral _ ps) + | length ps >= n + , (pl, pr) <- splitAt (length ps - n) ps + = Cover $ P.SequenceLiteral u <$> [pl,pr] + | otherwise = Disjoint + where u = freshIn avoid $ typed Pattern + +decomposeSeqP _ (DL n) (P.SequenceOp _ l Concat _) + | Just m <- seqPSize l , n == m = Disjoint +decomposeSeqP _ (DL n) (P.SequenceLiteral _ ps) + | length ps >= n = Disjoint + +decomposeSeqP _ (DR n) (P.SequenceOp _ _ Concat r) + | Just m <- seqPSize r , n == m = Disjoint +decomposeSeqP _ (DR n) (P.SequenceLiteral _ ps) + | length ps >= n = Disjoint + +decomposeSeqP _ _ _ = Overlap + +-- Splits a pattern row with respect to matching a variable against a +-- data type constructor. If the row would match the specified +-- constructor, the subpatterns and resulting row are yielded. A list +-- is used as the result value to indicate success or failure to match, +-- because these results are accumulated into a larger list elsewhere. +splitRow + :: Var v + => v + -> Reference + -> Int + -> Int + -> PatternRow v + -> [([P.Pattern v], PatternRow v)] +splitRow v rf t nfields (PR (break ((==v).loc) -> (pl, sp : pr)) g b) + = decomposePattern rf t nfields sp + <&> \subs -> (subs, PR (pl ++ filter refutable subs ++ pr) g b) +splitRow _ _ _ _ row = [([],row)] + +-- Splits a row with respect to a variable, expecting that the +-- variable will be matched against a builtin pattern (non-data type, +-- non-request, non-sequence). In addition to returning the +-- subpatterns and new row, returns a version of the pattern that was +-- matched against the variable that may be collected to determine the +-- cases the built-in value is matched against. +splitRowBuiltin + :: Var v + => v + -> PatternRow v + -> [(P.Pattern (), [([P.Pattern v], PatternRow v)])] +splitRowBuiltin v (PR (break ((==v).loc) -> (pl, sp : pr)) g b) + | Just p <- matchBuiltin sp = [(p, [([], PR (pl ++ pr) g b)])] + | otherwise = [] +splitRowBuiltin _ r = [(P.Unbound (), [([], r)])] + +-- Splits a row with respect to a variable, expecting that the +-- variable will be matched against a sequence matching operation. +-- Yields the subpatterns and a new row to be used in subsequent +-- compilation. The outer list result is used to indicate success or +-- failure. +splitRowSeq + :: Var v + => v + -> SeqMatch + -> PatternRow v + -> [([P.Pattern v], PatternRow v)] +splitRowSeq v m r@(PR (break ((==v).loc) -> (pl, sp : pr)) g b) + = case decomposeSeqP avoid m sp of + Cover sps -> + [(sps, PR (pl ++ filter refutable sps ++ pr) g b)] + Disjoint -> [] + Overlap -> [([], r)] + where avoid = maybe mempty freeVars g <> freeVars b +splitRowSeq _ _ r = [([], r)] + +-- Renames the variables annotating the patterns in a row, for once a +-- canonical choice has been made. +renameRow :: Var v => Map v v -> PatternRow v -> PatternRow v +renameRow m (PR p0 g0 b0) = PR p g b + where + access k + | Just v <- Map.lookup k m = v + | otherwise = k + p = (fmap.fmap) access p0 + g = changeVars m <$> g0 + b = changeVars m b0 + +-- Chooses a common set of variables for use when decomposing +-- patterns into multiple sub-patterns. It is too naive to simply use +-- the variables in the first row, because it may have been generated +-- by decomposing a variable or unbound pattern, which will make up +-- variables for subpatterns. +chooseVars :: Var v => [[P.Pattern v]] -> [v] +chooseVars [] = [] +chooseVars ([]:rs) = chooseVars rs +chooseVars ((P.Unbound{} : _) : rs) = chooseVars rs +chooseVars (r : _) = extractVars r + +-- Creates a pattern matrix from many rows with the subpatterns +-- introduced during the splitting that generated those rows. Also +-- yields an indication of the type of the variables that the +-- subpatterns match against, if possible. +buildMatrix + :: Var v + => [([P.Pattern v], PatternRow v)] + -> ([(v,PType)], PatternMatrix v) +buildMatrix [] = ([], PM []) +buildMatrix vrs = (zip cvs rs, PM $ fixRow <$> vrs) + where + rs = fmap determineType . transpose . fmap fst $ vrs + cvs = chooseVars $ fst <$> vrs + fixRow (extractVars -> rvs, pr) + = renameRow (fromListWith const . zip rvs $ cvs) pr + +-- Splits a pattern matrix on a given variable, expected to be matched +-- against builtin type patterns. Yields the cases covered and +-- corresponding matrices for those cases, with types for any new +-- variables (although currently builtin patterns do not introduce +-- variables). +splitMatrixBuiltin + :: Var v + => v + -> PatternMatrix v + -> [(P.Pattern (), [(v,PType)], PatternMatrix v)] +splitMatrixBuiltin v (PM rs) + = fmap (\(a,(b,c)) -> (a,b,c)) + . toList + . fmap buildMatrix + . fromListWith (++) + $ splitRowBuiltin v =<< rs + +matchPattern :: [(v,PType)] -> SeqMatch -> P.Pattern () +matchPattern vrs = \case + E -> sz 0 + C -> P.SequenceOp () vr Cons vr + S -> P.SequenceOp () vr Snoc vr + L n -> P.SequenceOp () (sz n) Concat (P.Var ()) + R n -> P.SequenceOp () (P.Var ()) Concat (sz n) + DL _ -> P.Unbound () + DR _ -> P.Unbound () + where + vr | [] <- vrs = P.Unbound () | otherwise = P.Var () + sz n = P.SequenceLiteral () . replicate n $ P.Unbound () + +-- Splits a matrix at a given variable with respect to sequence +-- patterns. Yields the appropriate patterns for the covered cases, +-- variables introduced for each case with their types, and new +-- matricies for subsequent compilation. +splitMatrixSeq + :: Var v + => v + -> PatternMatrix v + -> [(P.Pattern (), [(v,PType)], PatternMatrix v)] +splitMatrixSeq v (PM rs) + = cases + where + ms = decideSeqPat $ take 1 . dropWhile ((/=v).loc) . matches =<< rs + hint m vrs + | m `elem` [E,C,S] = vrs + | otherwise = (fmap.fmap) (const $ PData Rf.vectorRef) vrs + cases = ms <&> \m -> + let frs = rs >>= splitRowSeq v m + (vrs, pm) = buildMatrix frs + in (matchPattern vrs m, hint m vrs, pm) + +-- Splits a matrix at a given variable with respect to a data type or +-- ability match. Yields a new matrix for each constructor, with +-- variables introduced and their types for each case. +splitMatrix + :: Var v + => v + -> Reference + -> NCons + -> PatternMatrix v + -> [(Int, [(v,PType)], PatternMatrix v)] +splitMatrix v rf cons (PM rs) + = fmap (\(a, (b, c)) -> (a,b,c)) . (fmap.fmap) buildMatrix $ mmap + where + mmap = fmap (\(t,fs) -> (t, splitRow v rf t fs =<< rs)) cons + +-- Monad for pattern preparation. It is a state monad carrying a fresh +-- variable source, the list of variables bound the the pattern being +-- prepared, and a variable renaming mapping. +type PPM v a = State (Word64, [v], Map v v) a + +freshVar :: Var v => PPM v v +freshVar = state $ \(fw, vs, rn) -> + let v = freshenId fw $ typed Pattern + in (v, (fw+1, vs, rn)) + +useVar :: PPM v v +useVar = state $ \(avoid, v:vs, rn) -> (v, (avoid, vs, rn)) + +renameTo :: Var v => v -> v -> PPM v () +renameTo to from + = modify $ \(avoid, vs, rn) -> + ( avoid, vs + , insertWith (error "renameTo: duplicate rename") from to rn + ) + +-- Tries to rewrite sequence patterns into a format that can be +-- matched most flexibly. +normalizeSeqP :: P.Pattern a -> P.Pattern a +normalizeSeqP (P.As a p) = P.As a (normalizeSeqP p) +normalizeSeqP (P.EffectPure a p) = P.EffectPure a $ normalizeSeqP p +normalizeSeqP (P.EffectBind a r i ps k) + = P.EffectBind a r i (normalizeSeqP <$> ps) (normalizeSeqP k) +normalizeSeqP (P.Constructor a r i ps) + = P.Constructor a r i $ normalizeSeqP <$> ps +normalizeSeqP (P.SequenceLiteral a ps) + = P.SequenceLiteral a $ normalizeSeqP <$> ps +normalizeSeqP (P.SequenceOp a p0 op q0) + = case (op, normalizeSeqP p0, normalizeSeqP q0) of + (Cons, p, P.SequenceLiteral _ ps) + -> P.SequenceLiteral a (p:ps) + (Snoc, P.SequenceLiteral _ ps, p) + -> P.SequenceLiteral a (ps ++ [p]) + (Concat, P.SequenceLiteral _ ps, P.SequenceLiteral _ qs) + -> P.SequenceLiteral a (ps ++ qs) + (Concat, P.SequenceLiteral _ ps, q) + -> foldr (\p r -> P.SequenceOp a p Cons r) q ps + (Concat, p, P.SequenceLiteral _ qs) + -> foldl (\r q -> P.SequenceOp a r Snoc q) p qs + (op, p, q) -> P.SequenceOp a p op q +normalizeSeqP p = p + +-- Prepares a pattern for compilation, like `preparePattern`. This +-- function, however, is used when a candidate variable for a pattern +-- has already been chosen, as with an As pattern. This allows turning +-- redundant names (like with the pattern u@v) into renamings. +prepareAs :: Var v => P.Pattern a -> v -> PPM v (P.Pattern v) +prepareAs (P.Unbound _) u = pure $ P.Var u +prepareAs (P.As _ p) u = prepareAs p u <* (renameTo u =<< useVar) +prepareAs (P.Var _) u = P.Var u <$ (renameTo u =<< useVar) +prepareAs (P.Constructor _ r i ps) u = do + P.Constructor u r i <$> traverse preparePattern ps +prepareAs (P.EffectPure _ p) u = do + P.EffectPure u <$> preparePattern p +prepareAs (P.EffectBind _ r i ps k) u = do + P.EffectBind u r i + <$> traverse preparePattern ps + <*> preparePattern k +prepareAs (P.SequenceLiteral _ ps) u = do + P.SequenceLiteral u <$> traverse preparePattern ps +prepareAs (P.SequenceOp _ p op q) u = do + flip (P.SequenceOp u) op + <$> preparePattern p + <*> preparePattern q +prepareAs p u = pure $ u <$ p + +-- Prepares a pattern for compilation. This removes the existing +-- annotations and replaces them with a choice of variable that the +-- pattern is matching against. As patterns are eliminated and the +-- variables they bind are used as candidates for what that level of +-- the pattern matches against. +preparePattern :: Var v => P.Pattern a -> PPM v (P.Pattern v) +preparePattern (P.Unbound _) = P.Var <$> freshVar +preparePattern (P.Var _) = P.Var <$> useVar +preparePattern (P.As _ p) = prepareAs p =<< useVar +preparePattern p = prepareAs p =<< freshVar + +buildPattern :: Bool -> Reference -> Int -> [v] -> Int -> P.Pattern () +buildPattern effect r t vs nfields + | effect, [] <- vps = error "too few patterns for effect bind" + | effect = P.EffectBind () r t (init vps) (last vps) + | otherwise = P.Constructor () r t vps + where + vps | length vs < nfields + = replicate nfields $ P.Unbound () + | otherwise + = P.Var () <$ vs + +numberCons :: Cons -> NCons +numberCons = zip [0..] + +lookupData :: Reference -> DataSpec -> Either String Cons +lookupData rf (Map.lookup rf -> Just econs) + = case econs of + Left _ -> Left $ "data type matching on ability: " ++ show rf + Right cs -> Right cs +lookupData rf _ = Left $ "unknown data reference: " ++ show rf + +lookupAbil :: Reference -> DataSpec -> Either String Cons +lookupAbil rf (Map.lookup rf -> Just econs) + = case econs of + Right _ -> Left $ "ability matching on data: " ++ show rf + Left cs -> Right cs +lookupAbil rf _ = Left $ "unknown ability reference: " ++ show rf + +compile :: Var v => DataSpec -> Ctx v -> PatternMatrix v -> Term v +compile _ _ (PM []) = blank () +compile spec ctx m@(PM (r:rs)) + | rowIrrefutable r + = case guard r of + Nothing -> body r + Just g -> iff mempty g (body r) $ compile spec ctx (PM rs) + | PData rf <- ty + , rf == Rf.vectorRef + = match () (var () v) + $ buildCaseBuiltin spec ctx + <$> splitMatrixSeq v m + | PData rf <- ty + , rf `member` builtinCase + = match () (var () v) + $ buildCaseBuiltin spec ctx + <$> splitMatrixBuiltin v m + | PData rf <- ty + = case lookupData rf spec of + Right cons -> + match () (var () v) + $ buildCase spec rf False cons ctx + <$> splitMatrix v rf (numberCons cons) m + Left err -> error err + | PReq rfs <- ty + = match () (var () v) $ + [ buildCasePure spec ctx tup + | tup <- splitMatrix v undefined [(-1,1)] m + ] ++ + [ buildCase spec rf True cons ctx tup + | rf <- Set.toList rfs + , Right cons <- [lookupAbil rf spec] + , tup <- splitMatrix v rf (numberCons cons) m + ] + | Unknown <- ty + = error "unknown pattern compilation type" + where + v = choose heuristics m + ty = Map.findWithDefault Unknown v ctx + +buildCaseBuiltin + :: Var v + => DataSpec + -> Ctx v + -> (P.Pattern (), [(v,PType)], PatternMatrix v) + -> MatchCase () (Term v) +buildCaseBuiltin spec ctx0 (p, vrs, m) + = MatchCase p Nothing . absChain' vs $ compile spec ctx m + where + vs = ((),) . fst <$> vrs + ctx = Map.fromList vrs <> ctx0 + +buildCasePure + :: Var v + => DataSpec + -> Ctx v + -> (Int, [(v,PType)], PatternMatrix v) + -> MatchCase () (Term v) +buildCasePure spec ctx0 (_, vts, m) + = MatchCase pat Nothing . absChain' vs $ compile spec ctx m + where + pat = P.EffectPure () (P.Var ()) + vs = ((),) . fst <$> vts + ctx = Map.fromList vts <> ctx0 + +buildCase + :: Var v + => DataSpec + -> Reference + -> Bool + -> Cons + -> Ctx v + -> (Int, [(v,PType)], PatternMatrix v) + -> MatchCase () (Term v) +buildCase spec r eff cons ctx0 (t, vts, m) + = MatchCase pat Nothing . absChain' vs $ compile spec ctx m + where + pat = buildPattern eff r t vs $ cons !! t + vs = ((),) . fst <$> vts + ctx = Map.fromList vts <> ctx0 + +mkRow + :: Var v + => v + -> MatchCase a (Term v) + -> State Word64 (PatternRow v) +mkRow sv (MatchCase (normalizeSeqP -> p0) g0 (AbsN' vs b)) + = state $ \w -> case runState (prepareAs p0 sv) (w, vs, mempty) of + (p, (w, [], rn)) -> (,w) + $ PR (filter refutable [p]) + (changeVars rn <$> g) + (changeVars rn b) + _ -> error "mkRow: not all variables used" + where + g = case g0 of + Just (AbsN' us g) + | us == vs -> Just g + | otherwise -> error "mkRow: guard variables do not match body" + Nothing -> Nothing + _ -> error "mkRow: impossible" +mkRow _ _ = error "mkRow: impossible" + +initialize + :: Var v + => PType + -> Term v + -> [MatchCase () (Term v)] + -> (Maybe v, (v, PType), PatternMatrix v) +initialize r sc cs + = ( lv + , (sv, r) + , PM $ evalState (traverse (mkRow sv) cs) 1 + ) + where + (lv, sv) | Var' v <- sc = (Nothing, v) + | pv <- freshenId 0 $ typed Pattern + = (Just pv, pv) + +splitPatterns :: Var v => DataSpec -> Term v -> Term v +splitPatterns spec0 = visitPure $ \case + Match' sc0 cs0 + | ty <- determineType $ p <$> cs0 + , (lv, scrut, pm) <- initialize ty sc cs + , body <- compile spec (uncurry Map.singleton scrut) pm + -> Just $ case lv of + Just v -> let1 False [(((),v), sc)] body + _ -> body + where + sc = splitPatterns spec sc0 + cs = fmap (splitPatterns spec) <$> cs0 + _ -> Nothing + where + p (MatchCase pp _ _) = pp + spec = Map.insert Rf.booleanRef (Right [0,0]) spec0 + +builtinCase :: Set Reference +builtinCase + = Set.fromList + [ Rf.intRef + , Rf.natRef + , Rf.floatRef + , Rf.textRef + , Rf.charRef + ] + +determineType :: Show a => [P.Pattern a] -> PType +determineType = foldMap f + where + f (P.As _ p) = f p + f P.Int{} = PData Rf.intRef + f P.Nat{} = PData Rf.natRef + f P.Float{} = PData Rf.floatRef + f P.Boolean{} = PData Rf.booleanRef + f P.Text{} = PData Rf.textRef + f P.Char{} = PData Rf.charRef + f P.SequenceLiteral{} = PData Rf.vectorRef + f P.SequenceOp{} = PData Rf.vectorRef + f (P.Constructor _ r _ _) = PData r + f (P.EffectBind _ r _ _ _) = PReq $ Set.singleton r + f P.EffectPure{} = PReq mempty + f _ = Unknown diff --git a/parser-typechecker/src/Unison/Runtime/Rt1.hs b/parser-typechecker/src/Unison/Runtime/Rt1.hs new file mode 100644 index 0000000000..8bee204d5e --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Rt1.hs @@ -0,0 +1,868 @@ +{-# Language BangPatterns #-} +{-# Language OverloadedStrings #-} +{-# Language Strict #-} +{-# Language StrictData #-} +{-# Language RankNTypes #-} +{-# Language PatternSynonyms #-} +{-# Language ViewPatterns #-} + + +module Unison.Runtime.Rt1 where + +import Unison.Prelude + +import Data.Bifunctor (second) +import Data.Bits ((.&.), (.|.), complement, countLeadingZeros, countTrailingZeros, shiftR, shiftL, xor) +import Data.IORef +import Unison.Runtime.IR (pattern CompilationEnv, pattern Req) +import Unison.Runtime.IR hiding (CompilationEnv, IR, Req, Value, Z) +import Unison.Symbol (Symbol) +import Unison.Util.CyclicEq (CyclicEq, cyclicEq) +import Unison.Util.CyclicOrd (CyclicOrd, cyclicOrd) +import Unison.Util.Monoid (intercalateMap) +import qualified System.Mem.StableName as S +import qualified Data.ByteString as BS +import qualified Data.List as List +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Data.Text as Text +import qualified Data.Sequence as Sequence +import qualified Data.Vector.Mutable as MV +import qualified Unison.ABT as ABT +import qualified Unison.Codebase.CodeLookup as CL +import qualified Unison.DataDeclaration as DD +import qualified Unison.Reference as R +import qualified Unison.Runtime.IR as IR +import qualified Unison.Term as Term +import qualified Unison.Util.CycleTable as CT +import qualified Unison.Util.Bytes as Bytes +import qualified Unison.Var as Var + +type CompilationEnv = IR.CompilationEnv ExternalFunction Continuation +type IR = IR.IR ExternalFunction Continuation +type Req = IR.Req ExternalFunction Continuation +type Value = IR.Value ExternalFunction Continuation +type Z = IR.Z ExternalFunction Continuation +type Size = Int +type Stack = MV.IOVector Value + +-- The number of stack elements referenced by an IR +type NeededStack = Int + +data Continuation + = WrapHandler Value Continuation + | One NeededStack Size Stack IR + | Chain Symbol Continuation Continuation + +-- just returns its input +idContinuation :: IO Continuation +idContinuation = do + m0 <- MV.new 1 + pure $ One 0 1 m0 (IR.Leaf (IR.Slot 0)) + +instance Show Continuation where + show _c = "" + +instance External Continuation where + decompileExternal k = runDS $ Term.lam() paramName <$> go [paramName] k + where + paramName = Var.freshIn (used k) (Var.named "result") + used c = case c of + One _ _ _ ir -> boundVarsIR ir + WrapHandler _ k -> used k + Chain s k1 k2 -> Set.insert s (used k1 <> used k2) + go :: [Symbol] -> Continuation -> DS (Term Symbol) + go env k = case k of + WrapHandler h k -> Term.handle() <$> decompileImpl h <*> go env k + One _needed size m ir -> do + captured <- fmap Map.fromList . for (toList (freeSlots ir)) $ \i -> + (i,) <$> liftIO (at size (LazySlot i) m) + decompileIR env (specializeIR captured ir) + Chain s k1 k2 -> do + k1 <- go env k1 + Term.let1' False [(s, k1)] <$> go (s:env) k2 + +-- Wrap a `handle h` around the continuation inside the `Req`. +-- Ex: `k = x -> x + 1` becomes `x -> handle h in x + 1`. +wrapHandler :: Value -> Req -> Req +wrapHandler h (Req r cid args k) = Req r cid args (WrapHandler h k) + +-- Appends `k2` to the end of the `k` continuation +-- Ex: if `k` is `x -> x + 1` and `k2` is `y -> y + 4`, +-- this produces a continuation `x -> let r1 = x + 1; r1 + 4`. +appendCont :: Symbol -> Req -> Continuation -> Req +appendCont v (Req r cid args k) k2 = Req r cid args (Chain v k k2) + +data ExternalFunction = + ExternalFunction R.Reference (Size -> Stack -> IO Value) +instance Eq ExternalFunction where + ExternalFunction r _ == ExternalFunction r2 _ = r == r2 +instance External ExternalFunction where + decompileExternal (ExternalFunction r _) = pure $ Term.ref () r + +-- This function converts `Z` to a `Value`. +-- A bunch of variants follow. +at :: Size -> Z -> Stack -> IO Value +at size i m = case i of + Val v -> force v + Slot i -> + -- the top of the stack is slot 0, at index size - 1 + force =<< MV.read m (size - i - 1) + LazySlot i -> + MV.read m (size - i - 1) + External (ExternalFunction _ e) -> e size m + +atc :: Size -> Z -> Stack -> IO Char +atc size i m = at size i m >>= \case + C c -> pure c + v -> fail $ "type error, expecting C, got " <> show v + +ati :: Size -> Z -> Stack -> IO Int64 +ati size i m = at size i m >>= \case + I i -> pure i + v -> fail $ "type error, expecting I, got " <> show v + +atn :: Size -> Z -> Stack -> IO Word64 +atn size i m = at size i m >>= \case + N i -> pure i + v -> fail $ "type error, expecting N, got " <> show v + +atf :: Size -> Z -> Stack -> IO Double +atf size i m = at size i m >>= \case + F i -> pure i + v -> fail $ "type error, expecting F, got " <> show v + +atb :: Size -> Z -> Stack -> IO Bool +atb size i m = at size i m >>= \case + B b -> pure b + v -> fail $ "type error, expecting B, got " <> show v + +att :: Size -> Z -> Stack -> IO Text +att size i m = at size i m >>= \case + T t -> pure t + v -> do + stackStuff <- fmap (take 200 . show) <$> traverse (MV.read m) [0 .. size - 1] + traceM $ "nstack:\n" <> intercalateMap "\n" (take 200) stackStuff + fail $ "type error, expecting T at " <> show i <> ", got " <> show v + +atbs :: Size -> Z -> Stack -> IO Bytes.Bytes +atbs size i m = at size i m >>= \case + Bs v -> pure v + v -> fail $ "type error, expecting Bytes, got: " <> show v + +ats :: Size -> Z -> Stack -> IO (Seq Value) +ats size i m = at size i m >>= \case + Sequence v -> pure v + v -> fail $ "type error, expecting List, got: " <> show v + +atd :: Size -> Z -> Stack -> IO (R.Reference, ConstructorId, [Value]) +atd size i m = at size i m >>= \case + Data r id vs -> pure (r, id, vs) + v -> fail $ "type error, expecting Data, got " <> show v + +-- | `push` doesn't return the new stack size (is it for efficiency?), +-- so make sure that you add +1 to it yourself, after this call. +push :: Size -> Value -> Stack -> IO Stack +push size v m = do + m <- ensureSize (size + 1) m + MV.write m size v + pure m + +-- Values passed to pushMany* are already in stack order: +-- the first Value is deeper on the resulting stack than the final Value +pushMany :: Foldable f + => Size -> f Value -> Stack -> IO (Size, Stack) +pushMany size values m = do + m <- ensureSize (size + length values) m + let pushArg :: Size -> Value -> IO Size + pushArg size' val = do + MV.write m size' val + pure (size' + 1) + newSize <- foldM pushArg size values + pure (newSize, m) + +pushManyZ :: Foldable f => Size -> f Z -> Stack -> IO (Size, Stack) +pushManyZ size zs m = do + m <- ensureSize (size + length zs) m + let pushArg size' z = do + val <- at size z m -- variable lookup uses current size + MV.write m size' val + pure (size' + 1) + size2 <- foldM pushArg size zs + pure (size2, m) + +-- | Grow the physical stack to at least `size` slots +ensureSize :: Size -> Stack -> IO Stack +ensureSize size m = + if (size > MV.length m) then + MV.grow m size + else pure m + +force :: Value -> IO Value +force (Ref _ _ r) = readIORef r >>= force +force v = pure v + +data ErrorType = ErrorTypeTodo | ErrorTypeBug deriving Show + +data Result + = RRequest Req + | RMatchFail Size [Value] Value + | RDone Value + | RError ErrorType Value + deriving Show + +done :: Value -> IO Result +done v = pure (RDone v) + +arity :: Value -> Int +arity (Lam n _ _) = n +arity _ = 0 + +-- Creates a `CompilationEnv` by pulling out all the constructor arities for +-- types that are referenced by the given term, `t`. +compilationEnv :: Monad m + => CL.CodeLookup Symbol m a + -> Term.Term Symbol a + -> m CompilationEnv +compilationEnv env t = do + let typeDeps = Term.typeDependencies t + arityMap <- fmap (Map.fromList . join) . for (toList typeDeps) $ \case + r@(R.DerivedId id) -> do + decl <- CL.getTypeDeclaration env id + case decl of + Nothing -> error $ "no type declaration for " <> show id -- pure [] + Just (Left ad) -> pure $ + let arities = DD.constructorArities $ DD.toDataDecl ad + in [ ((r, i), arity) | (arity, i) <- arities `zip` [0..] ] + Just (Right dd) -> pure $ + let arities = DD.constructorArities dd + in [ ((r, i), arity) | (arity, i) <- arities `zip` [0..] ] + R.Builtin{} -> pure [] + let cenv = CompilationEnv mempty arityMap + + -- deps = Term.dependencies t + -- this would rely on haskell laziness for compilation, needs more thought + --compiledTerms <- fmap (Map.fromList . join) . for (toList deps) $ \case + -- r@(R.DerivedId id) -> do + -- o <- CL.getTerm env id + -- case o of + -- Nothing -> pure [] + -- Just e -> pure [(r, compile cenv (Term.amap (const ()) e))] + -- _ -> pure [] + pure $ builtinCompilationEnv <> cenv + +builtinCompilationEnv :: CompilationEnv +builtinCompilationEnv = CompilationEnv (builtinsMap <> IR.builtins) mempty + where + builtins :: [(Text, Int, Size -> Stack -> IO Value)] + builtins = + [ mk2 "Text.++" att att (pure . T) (<>) + , mk2 "Text.take" atn att (pure . T) (Text.take . fromIntegral) + , mk2 "Text.drop" atn att (pure . T) (Text.drop . fromIntegral) + , mk2 "Text.==" att att (pure . B) (==) + , mk2 "Text.!=" att att (pure . B) (/=) + , mk2 "Text.<=" att att (pure . B) (<=) + , mk2 "Text.>=" att att (pure . B) (>=) + , mk2 "Text.>" att att (pure . B) (>) + , mk2 "Text.<" att att (pure . B) (<) + , mk1 "Text.size" att (pure . N) (fromIntegral . Text.length) + , mk1 "Text.uncons" att + ( pure + . IR.maybeToOptional + . fmap (\(h, t) -> IR.tuple [C h, T t]) + ) + $ Text.uncons + , mk1 "Text.unsnoc" att + ( pure + . IR.maybeToOptional + . fmap (\(i, l) -> IR.tuple [T i, C l]) + ) + $ Text.unsnoc + + , mk1 "Text.toCharList" att (pure . Sequence) + (Sequence.fromList . map C . Text.unpack) + + , mk1 "Text.fromCharList" ats (pure . T) + (\s -> Text.pack [ c | C c <- toList s ]) + + , mk1 "Char.toNat" atc (pure . N) (fromIntegral . fromEnum) + , mk1 "Char.fromNat" atn (pure . C) (toEnum . fromIntegral) + + , mk2 "List.at" atn ats (pure . IR.maybeToOptional) + $ Sequence.lookup + . fromIntegral + , mk2 "List.cons" at ats (pure . Sequence) (Sequence.<|) + , mk2 "List.snoc" ats at (pure . Sequence) (Sequence.|>) + , mk2 "List.take" atn ats (pure . Sequence) (Sequence.take . fromIntegral) + , mk2 "List.drop" atn ats (pure . Sequence) (Sequence.drop . fromIntegral) + , mk2 "List.++" ats ats (pure . Sequence) (<>) + , mk1 "List.size" ats (pure . N) (fromIntegral . Sequence.length) + + , mk1 "Bytes.fromList" ats (pure . Bs) (\s -> + Bytes.fromByteString (BS.pack [ fromIntegral n | N n <- toList s])) + , mk2 "Bytes.++" atbs atbs (pure . Bs) (<>) + , mk2 "Bytes.take" atn atbs (pure . Bs) (\n b -> Bytes.take (fromIntegral n) b) + , mk2 "Bytes.drop" atn atbs (pure . Bs) (\n b -> Bytes.drop (fromIntegral n) b) + , mk1 "Bytes.toList" atbs (pure . Sequence) + (\bs -> Sequence.fromList [ N (fromIntegral n) | n <- Bytes.toWord8s bs ]) + , mk1 "Bytes.size" atbs (pure . N . fromIntegral) Bytes.size + , mk2 "Bytes.at" atn atbs pure $ \i bs -> + IR.maybeToOptional (N . fromIntegral <$> Bytes.at (fromIntegral i) bs) + , mk1 "Bytes.flatten" atbs (pure . Bs) Bytes.flatten + + -- Trigonometric functions + , mk1 "Float.acos" atf (pure . F) acos + , mk1 "Float.asin" atf (pure . F) asin + , mk1 "Float.atan" atf (pure . F) atan + , mk2 "Float.atan2" atf atf (pure . F) atan2 + , mk1 "Float.cos" atf (pure . F) cos + , mk1 "Float.sin" atf (pure . F) sin + , mk1 "Float.tan" atf (pure . F) tan + + -- Hyperbolic functions + , mk1 "Float.acosh" atf (pure . F) acosh + , mk1 "Float.asinh" atf (pure . F) asinh + , mk1 "Float.atanh" atf (pure . F) atanh + , mk1 "Float.cosh" atf (pure . F) cosh + , mk1 "Float.sinh" atf (pure . F) sinh + , mk1 "Float.tanh" atf (pure . F) tanh + + -- Exponential functions + , mk1 "Float.exp" atf (pure . F) exp + , mk1 "Float.log" atf (pure . F) log + , mk2 "Float.logBase" atf atf (pure . F) logBase + + -- Power Functions + , mk2 "Float.pow" atf atf (pure . F) (**) + , mk1 "Float.sqrt" atf (pure . F) sqrt + + -- Rounding and Remainder Functions + , mk1 "Float.ceiling" atf (pure . I) ceiling + , mk1 "Float.floor" atf (pure . I) floor + , mk1 "Float.round" atf (pure . I) round + , mk1 "Float.truncate" atf (pure . I) truncate + + , mk1 "Nat.toText" atn (pure . T) (Text.pack . show) + , mk1 "Nat.fromText" att (pure . IR.maybeToOptional . fmap N) ( + (\x -> readMaybe x :: Maybe Word64) . Text.unpack) + , mk1 "Nat.toFloat" atn (pure . F) fromIntegral + + , mk1 "Int.toText" ati (pure . T) + (Text.pack . (\x -> if x >= 0 then ("+" <> show x) else show x)) + , mk1 "Int.fromText" att (pure . IR.maybeToOptional . fmap I) $ + (\x -> readMaybe (if "+" `List.isPrefixOf` x then drop 1 x else x)) + . Text.unpack + , mk1 "Int.toFloat" ati (pure . F) fromIntegral + + -- Float Utils + , mk1 "Float.abs" atf (pure . F) abs + , mk2 "Float.max" atf atf (pure . F) max + , mk2 "Float.min" atf atf (pure . F) min + , mk1 "Float.toText" atf (pure . T) (Text.pack . show) + , mk1 "Float.fromText" att (pure . IR.maybeToOptional . fmap F) ( + (\x -> readMaybe x :: Maybe Double) . Text.unpack) + + , mk2 "Debug.watch" att at id (\t v -> putStrLn (Text.unpack t) *> pure v) + ] + + builtinsMap :: Map R.Reference IR + builtinsMap = Map.fromList + [ (R.Builtin name, makeIR arity name ir) | (name, arity, ir) <- builtins ] + makeIR arity name = + Leaf + . Val + . Lam arity (underapply name) + . Leaf + . External + . ExternalFunction (R.Builtin name) + underapply name = + let r = Term.ref () $ R.Builtin name :: Term SymbolC + in FormClosure (ABT.hash r) r [] + mk1 + :: Text + -> (Size -> Z -> Stack -> IO a) + -> (b -> IO Value) + -> (a -> b) + -> (Text, Int, Size -> Stack -> IO Value) + mk1 name getA mkB f = + ( name + , 1 + , \size stack -> do + a <- getA size (Slot 0) stack + mkB $ f a + ) + mk2 + :: Text + -> (Size -> Z -> Stack -> IO a) + -> (Size -> Z -> Stack -> IO b) + -> (c -> IO Value) + -> (a -> b -> c) + -> (Text, Int, Size -> Stack -> IO Value) + mk2 name getA getB mkC f = + ( name + , 2 + , \size stack -> do + a <- getA size (Slot 1) stack + b <- getB size (Slot 0) stack + mkC $ f a b + ) + +run :: (R.Reference -> ConstructorId -> [Value] -> IO Result) + -> CompilationEnv + -> IR + -> IO Result +run ioHandler env ir = do + let -- pir = prettyIR mempty pexternal pcont + -- pvalue = prettyValue mempty pexternal pcont + -- pcont _k = "" -- TP.pretty mempty <$> decompileExternal k + -- if we had a PrettyPrintEnv, we could use that here + -- pexternal (ExternalFunction r _) = P.shown r + -- traceM $ "Running this program" + -- traceM $ P.render 80 (pir ir) + supply <- newIORef 0 + m0 <- MV.new 256 + let + fresh :: IO Int + fresh = atomicModifyIORef' supply (\n -> (n + 1, n)) + + -- TODO: + -- go :: (MonadReader Size m, MonadState Stack m, MonadIO m) => IR -> m Result + go :: Size -> Stack -> IR -> IO Result + go size m ir = do + -- stackStuff <- traverse (MV.read m) [0..size-1] + -- traceM $ "stack: " <> show stackStuff + -- traceM $ "ir: " <> show ir + -- traceM "" + case ir of + Leaf (Val v) -> done v + Leaf slot -> done =<< at size slot m + If c t f -> atb size c m >>= \case + True -> go size m t + False -> go size m f + And i j -> atb size i m >>= \case + True -> go size m j + False -> done (B False) + Or i j -> atb size i m >>= \case + True -> done (B True) + False -> go size m j + Not i -> atb size i m >>= (done . B . not) + Let var b body freeInBody -> go size m b >>= \case + RRequest req -> + let needed = if Set.null freeInBody then 0 else Set.findMax freeInBody + in pure $ RRequest (appendCont var req $ One needed size m body) + RDone v -> do + -- Garbage collect the stack occasionally + (size, m) <- + if size >= MV.length m + -- freeInBody just the set of de bruijn indices referenced in `body` + -- Examples: + -- a) let x = 1 in x, freeInBody = {0} + -- b) let x = 1 in 42, freeInBody = {} + -- We don't need anything from old stack in either of the above + -- + -- c) let x = 1 in (let y = 2 in x + y), freeInBody = {0,1} + -- We need the top element of the old stack to be preserved + then let + maxSlot = + if Set.null freeInBody then -1 + else Set.findMax freeInBody - 1 + in gc size m maxSlot + else pure (size, m) + -- traceM . P.render 80 $ P.shown var <> " =" `P.hang` pvalue v + push size v m >>= \m -> go (size + 1) m body + e@(RMatchFail _ _ _) -> pure e + e@(RError _ _) -> pure e + LetRec bs body -> letrec size m bs body + MakeSequence vs -> + done . Sequence . Sequence.fromList =<< traverse (\i -> at size i m) vs + Construct r cid args -> + done . Data r cid =<< traverse (\i -> at size i m) args + Request r cid args -> + req <$> traverse (\i -> at size i m) args + where + -- The continuation of the request is initially the identity function + -- and we append to it in `Let` as we unwind the stack + req vs = RRequest (Req r cid vs (One 0 size m (Leaf $ Slot 0))) + Handle handler body -> do + h <- at size handler m + runHandler size m h body + Apply fn args -> do + RDone fn <- go size m fn -- ANF should ensure this match is OK + fn <- force fn + call size m fn args + Match scrutinee cases -> do + -- scrutinee : Z -- already evaluated :amazing: + -- cases : [(Pattern, Maybe IR, IR)] + scrute <- at size scrutinee m -- "I am scrute" / "Dwight K. Scrute" + tryCases size scrute m cases + + -- Builtins + AddI i j -> do x <- ati size i m; y <- ati size j m; done (I (x + y)) + SubI i j -> do x <- ati size i m; y <- ati size j m; done (I (x - y)) + MultI i j -> do x <- ati size i m; y <- ati size j m; done (I (x * y)) + DivI i j -> do x <- ati size i m; y <- ati size j m; done (I (x `div` y)) + GtI i j -> do x <- ati size i m; y <- ati size j m; done (B (x > y)) + GtEqI i j -> do x <- ati size i m; y <- ati size j m; done (B (x >= y)) + LtI i j -> do x <- ati size i m; y <- ati size j m; done (B (x < y)) + LtEqI i j -> do x <- ati size i m; y <- ati size j m; done (B (x <= y)) + EqI i j -> do x <- ati size i m; y <- ati size j m; done (B (x == y)) + SignumI i -> do x <- ati size i m; done (I (signum x)) + NegateI i -> do x <- ati size i m; done (I (negate x)) + Truncate0I i -> do x <- ati size i m; done (N (fromIntegral (truncate0 x))) + ModI i j -> do x <- ati size i m; y <- ati size j m; done (I (x `mod` y)) + PowI i j -> do x <- ati size i m; y <- atn size j m; done (I (x ^ y)) + ShiftRI i j -> do x <- ati size i m; y <- atn size j m; done (I (x `shiftR` (fromIntegral y))) + ShiftLI i j -> do x <- ati size i m; y <- atn size j m; done (I (x `shiftL` (fromIntegral y))) + BitAndI i j -> do x <- ati size i m; y <- ati size j m; done (I ((.&.) (fromIntegral x) (fromIntegral y))) + BitOrI i j -> do x <- ati size i m; y <- ati size j m; done (I ((.|.) (fromIntegral x) (fromIntegral y))) + BitXorI i j -> do x <- ati size i m; y <- ati size j m; done (I (xor (fromIntegral x) (fromIntegral y))) + ComplementI i -> do x <- ati size i m; done (I (fromIntegral (complement x))) + LeadZeroI i -> do x <- ati size i m; done (N (fromIntegral (countLeadingZeros x))) + TrailZeroI i -> do x <- ati size i m; done (N (fromIntegral (countTrailingZeros x))) + + AddN i j -> do x <- atn size i m; y <- atn size j m; done (N (x + y)) + -- cast to `Int` and subtract + SubN i j -> do x <- atn size i m; y <- atn size j m + done (I (fromIntegral x - fromIntegral y)) + -- subtraction truncated at 0 (don't wrap around) + DropN i j -> do x <- atn size i m; y <- atn size j m + done (N (x - (y `min` x))) + MultN i j -> do x <- atn size i m; y <- atn size j m; done (N (x * y)) + DivN i j -> do x <- atn size i m; y <- atn size j m; done (N (x `div` y)) + ModN i j -> do x <- atn size i m; y <- atn size j m; done (N (x `mod` y)) + PowN i j -> do x <- atn size i m; y <- atn size j m; done (N (fromIntegral (x ^ y))) + ShiftRN i j -> do x <- atn size i m; y <- atn size j m; done (N (fromIntegral (x `shiftR` (fromIntegral y)))) + ShiftLN i j -> do x <- atn size i m; y <- atn size j m; done (N (fromIntegral (x `shiftL` (fromIntegral y)))) + ToIntN i -> do x <- atn size i m; done (I (fromIntegral x)) + GtN i j -> do x <- atn size i m; y <- atn size j m; done (B (x > y)) + GtEqN i j -> do x <- atn size i m; y <- atn size j m; done (B (x >= y)) + LtN i j -> do x <- atn size i m; y <- atn size j m; done (B (x < y)) + LtEqN i j -> do x <- atn size i m; y <- atn size j m; done (B (x <= y)) + EqN i j -> do x <- atn size i m; y <- atn size j m; done (B (x == y)) + BitAndN i j -> do x <- atn size i m; y <- atn size j m; done (N ((.&.) x y)) + BitOrN i j -> do x <- atn size i m; y <- atn size j m; done (N ((.|.) x y)) + BitXorN i j -> do x <- atn size i m; y <- atn size j m; done (N (xor x y)) + ComplementN i -> do x <- atn size i m; done (N (fromIntegral (complement x))) + LeadZeroN i -> do x <- atn size i m; done (N (fromIntegral (countLeadingZeros x))) + TrailZeroN i -> do x <- atn size i m; done (N (fromIntegral (countTrailingZeros x))) + + AddF i j -> do x <- atf size i m; y <- atf size j m; done (F (x + y)) + SubF i j -> do x <- atf size i m; y <- atf size j m; done (F (x - y)) + MultF i j -> do x <- atf size i m; y <- atf size j m; done (F (x * y)) + DivF i j -> do x <- atf size i m; y <- atf size j m; done (F (x / y)) + GtF i j -> do x <- atf size i m; y <- atf size j m; done (B (x > y)) + GtEqF i j -> do x <- atf size i m; y <- atf size j m; done (B (x >= y)) + LtF i j -> do x <- atf size i m; y <- atf size j m; done (B (x < y)) + LtEqF i j -> do x <- atf size i m; y <- atf size j m; done (B (x <= y)) + EqF i j -> do x <- atf size i m; y <- atf size j m; done (B (x == y)) + EqU i j -> do + -- todo: these can be reused + t1 <- CT.new 8 + t2 <- CT.new 8 + x <- at size i m + y <- at size j m + RDone . B <$> cyclicEq t1 t2 x y + CompareU i j -> do + -- todo: these can be reused + t1 <- CT.new 8 + t2 <- CT.new 8 + x <- at size i m + y <- at size j m + o <- cyclicOrd t1 t2 x y + pure . RDone . I $ case o of + EQ -> 0 + LT -> -1 + GT -> 1 + Bug i -> RError ErrorTypeBug <$> at size i m + Todo i -> RError ErrorTypeTodo <$> at size i m + + runHandler :: Size -> Stack -> Value -> IR -> IO Result + runHandler size m handler body = + go size m body >>= runHandler' size m handler + + -- Certain handlers are of a form where we can can skip the step of + -- copying the continuation inside the request. We aren't totally + -- sure what the conditions are, but speculate: + -- + -- * The Request can't escape the invocation of the handler; that is, the + -- handler can't stash the request for later, it has to inspect and run + -- the continuation immediately. + -- * The handler can't invoke the continuation multiple times, since + -- evaluation of the continuation will alter the stack. + -- * Is that sufficient? Does it matter if continuation is called in + -- tail position or not? + -- + -- Leijn's "Implementing Algebraic Effects in C" paper mentions there's + -- a speedup in the case where the handler uses its continuation just once + -- in tail position: + -- https://www.microsoft.com/en-us/research/wp-content/uploads/2017/06/algeff-in-c-tr-v2.pdf + handlerNeedsCopy :: Value -> Bool + handlerNeedsCopy _ = True -- overly conservative choice, but never wrong! + + runHandler' :: Size -> Stack -> Value -> Result -> IO Result + runHandler' size m handler r = case r of + RRequest req -> do + req <- if handlerNeedsCopy handler then copyRequest req else pure req + m <- push size (Requested req) m + result <- call (size + 1) m handler [Slot 0] + case result of + RMatchFail _ _ _ -> pure $ RRequest (wrapHandler handler req) + r -> pure r + RDone v -> do + m <- push size (Pure v) m + call (size + 1) m handler [Slot 0] + r -> pure r + + call :: Size -> Stack -> Value -> [Z] -> IO Result + -- call _ _ fn@(Lam _ _ _) args | trace ("call "<> show fn <> " " <>show args) False = undefined + call size m fn@(Lam arity underapply body) args = let nargs = length args in + -- fully applied call, `(x y -> ..) 9 10` + if nargs == arity then case underapply of + -- when calling a closure, we supply all the closure arguments, before + -- `args`. See fix528.u for an example. + FormClosure _hash _tm pushedArgs -> do + (size, m) <- pushManyZ size (fmap Val (reverse pushedArgs) ++ args) m + go size m body + _ -> do + (size, m) <- pushManyZ size args m + go size m body + -- overapplied call, e.g. `id id 42` + else if nargs > arity then do + let (usedArgs, extraArgs) = splitAt arity args + result <- call size m fn usedArgs + case result of + RDone fn' -> call size m fn' extraArgs + -- foo : Int ->{IO} (Int -> Int) + -- ... + -- (foo 12 12) + RRequest req -> do + let overApplyName = Var.named "oa" + extraArgvs <- for extraArgs $ \arg -> at size arg m + pure . RRequest . appendCont overApplyName req $ + One 0 size m (Apply (Leaf (Slot 0)) (Val <$> extraArgvs)) + e -> error $ "type error, tried to apply: " <> show e + -- underapplied call, e.g. `(x y -> ..) 9` + else do + argvs <- for args $ \arg -> at size arg m + case underapply of + -- Example 1: + -- f = x y z p -> x - y - z - p + -- f' = f 1 2 -- Specialize f [2, 1] -- each arg is pushed onto top + -- f'' = f' 3 -- Specialize f [3, 2, 1] + -- f'' 4 -- should be the same thing as `f 1 2 3 4` + -- + -- pushedArgs = [mostRecentlyApplied, ..., firstApplied] + Specialize hash lam@(Term.LamsNamed' vs body) pushedArgs -> let + pushedArgs' :: [ (SymbolC, Value)] -- head is the latest argument + pushedArgs' = reverse (drop (length pushedArgs) vs `zip` argvs) ++ pushedArgs + vsRemaining = drop (length pushedArgs') vs + compiled = compile0 env + (reverse (fmap (,Nothing) vsRemaining) ++ + fmap (second Just) pushedArgs') + body + in done $ Lam (arity - nargs) (Specialize hash lam pushedArgs') compiled + Specialize _ e pushedArgs -> error $ "can't underapply a non-lambda: " <> show e <> " " <> show pushedArgs + FormClosure hash tm pushedArgs -> + let pushedArgs' = reverse argvs ++ pushedArgs + in done $ Lam (arity - nargs) (FormClosure hash tm pushedArgs') body + call size m (Cont k) [arg] = do + v <- at size arg m + callContinuation size m k v + call size m fn args = do + s0 <- traverse (MV.read m) [0..size-1] + let s = [(0::Int)..] `zip` reverse s0 + error $ "type error - tried to apply a non-function: " <> + show fn <> " " <> show args <> "\n" <> + "[\n " <> + intercalateMap "\n " (\(i,v) -> "Slot " <> show i <> ": " <> take 50 (show v)) s + <> "\n]" + + callContinuation :: Size -> Stack -> Continuation -> Value -> IO Result + callContinuation size m k v = case k of + One _ size m ir -> do + m <- push size v m + go (size + 1) m ir + WrapHandler h k -> runHandler' size m h =<< callContinuation size m k v + -- reassociate to the right during execution, is this needed and why? + Chain v1 (Chain v2 k1 k2) k3 -> + callContinuation size m (Chain v1 k1 (Chain v2 k2 k3)) v + Chain var k1 k2 -> do + r <- callContinuation size m k1 v + case r of + RDone v -> callContinuation size m k2 v + RRequest req -> pure $ RRequest (appendCont var req k2) + _ -> pure r + + copyContinuation :: Continuation -> IO Continuation + copyContinuation k = case k of + -- reassociate to the right during copying, is this needed and why? + Chain v1 (Chain v2 k1 k2) k3 -> + copyContinuation (Chain v1 k1 (Chain v2 k2 k3)) + Chain v k1 k2 -> Chain v <$> copyContinuation k1 <*> copyContinuation k2 + One needed size stack ir -> do + -- (@0 + @3) -- 3 needed from old stack + -- (@0) -- 0 needed from old stack + -- (1 + 1) -- 0 needed from old stack + let slice = MV.slice (size - needed) needed stack + copied <- MV.clone slice + pure $ One needed (MV.length copied) copied ir + WrapHandler h k -> WrapHandler h <$> copyContinuation k + + copyRequest :: Req -> IO Req + copyRequest (Req r cid args k) = Req r cid args <$> copyContinuation k + + -- Just = match success, Nothing = match fail + -- Returns Values to be put on the stack when evaluating case guard/body + tryCase :: (Value, Pattern) -> Maybe [Value] + -- tryCase x | trace ("tryCase " ++ show x ++ " =") False = undefined + -- tryCase x = traceShowId $ case x of + tryCase = \case + (I x, PatternI x2) -> when' (x == x2) $ Just [] + (F x, PatternF x2) -> when' (x == x2) $ Just [] + (N x, PatternN x2) -> when' (x == x2) $ Just [] + (B x, PatternB x2) -> when' (x == x2) $ Just [] + (T x, PatternT x2) -> when' (x == x2) $ Just [] + (C x, PatternC x2) -> when' (x == x2) $ Just [] + (Data r cid args, PatternData r2 cid2 pats) + -> if r == r2 && cid == cid2 + then join <$> traverse tryCase (zip args pats) + else Nothing + (Sequence args, PatternSequenceLiteral pats) -> + if length args == length pats then join <$> traverse tryCase (zip (toList args) pats) else Nothing + (Sequence args, PatternSequenceCons l r) -> + case args of + h Sequence.:<| t -> (++) <$> tryCase (h, l) <*> tryCase (IR.Sequence t, r) + _ -> Nothing + (Sequence args, PatternSequenceSnoc l r) -> + case args of + t Sequence.:|> h -> (++) <$> tryCase (IR.Sequence t, l) <*> tryCase (h, r) + _ -> Nothing + (Sequence args, PatternSequenceConcat litLen l r) -> + (++) <$> tryCase (IR.Sequence a1, l) <*> tryCase (IR.Sequence a2, r) + where + (a1, a2) = Sequence.splitAt i args + i = either id (\j -> length args - j) litLen + (Pure v, PatternPure p) -> tryCase (v, p) + (Pure _, PatternBind _ _ _ _) -> Nothing + (Requested (Req r cid args k), PatternBind r2 cid2 pats kpat) -> + if r == r2 && cid == cid2 + then join <$> traverse tryCase (zip (args ++ [Cont k]) (pats ++ [kpat])) + else Nothing + (Requested _, PatternPure _) -> Nothing + (v, PatternAs p) -> (v:) <$> tryCase (v, p) + (_, PatternIgnore) -> Just [] + (v, PatternVar) -> Just [v] + (v, p) -> error $ + "bug: type error in pattern match: " <> + "tryCase (" <> show v <> ", " <> show p <> ")" + where when' b m = if b then m else Nothing + + tryCases size scrute m ((pat, _vars, cond, body) : remainingCases) = + case tryCase (scrute, pat) of + Nothing -> tryCases size scrute m remainingCases -- this pattern didn't match + Just vars -> do + (size', m) <- pushMany size vars m + case cond of + Just cond -> do + RDone (B cond) <- go size' m cond + if cond then go size' m body + else tryCases size scrute m remainingCases + Nothing -> go size' m body + tryCases sz scrute _ _ = + pure $ RMatchFail sz [] scrute + + -- To evaluate a `let rec`, we push an empty `Ref` onto the stack for each + -- binding, then evaluate each binding and set that `Ref` to its result. + -- As long as the variable references occur within a function body, + -- there's no problem. + letrec :: Size -> Stack -> [(Symbol, IR)] -> IR -> IO Result + letrec size m bs body = do + refs <- for bs $ \(v,b) -> do + r <- newIORef (UninitializedLetRecSlot v bs body) + i <- fresh + pure (Ref i v r, b) + -- push the empty references onto the stack + (size', m) <- pushMany size (fst <$> refs) m + for_ refs $ \(Ref _ _ r, ir) -> do + let toVal (RDone a) = a + toVal e = error ("bindings in a let rec must not have effects " ++ show e) + result <- toVal <$> go size' m ir + writeIORef r result + go size' m body + + -- Garbage collect the elements of the stack that are more than `maxSlot` + -- from the top - this is done just by copying to a fresh stack. + gc :: Size -> Stack -> Int -> IO (Size, Stack) + -- when maxSlot = -1, nothing from the old stack is needed. + gc _ _ _maxSlot@(-1) = do m <- MV.new 256; pure (0, m) + gc size m maxSlot = do + let start = size - maxSlot - 1 + len = maxSlot + 1 + m <- MV.clone $ MV.slice start len m + pure (len, m) + + loop (RRequest (Req ref cid vs k)) = do + ioResult <- ioHandler ref cid vs + case ioResult of + RDone ioResult -> do + x <- callContinuation 0 m0 k ioResult + loop x + r -> pure r + loop a = pure a + + r <- go 0 m0 ir + loop r + +instance Show ExternalFunction where + show _ = "ExternalFunction" + +instance CyclicEq ExternalFunction where + cyclicEq _ _ (ExternalFunction r _) (ExternalFunction r2 _) = pure (r == r2) + +instance CyclicOrd ExternalFunction where + cyclicOrd _ _ (ExternalFunction r _) (ExternalFunction r2 _) = pure (r `compare` r2) + +instance CyclicEq Continuation where + cyclicEq h1 h2 k1 k2 = do + n1 <- S.makeStableName k1 + n2 <- S.makeStableName k2 + if n1 == n2 then pure True + else case (k1, k2) of + (WrapHandler v1 k1, WrapHandler v2 k2) -> do + b <- cyclicEq h1 h2 v1 v2 + if b then cyclicEq h1 h2 k1 k2 + else pure False + (Chain _ k1 k2, Chain _ k1a k2a) -> do + b <- cyclicEq h1 h2 k1 k1a + if b then cyclicEq h1 h2 k2 k2a + else pure False + (One _needed1 _size1 _s1 _ir1, One _needed2 _size2 _s2 _ir2) -> + error "todo - fill CyclicEq Continuation" + _ -> pure False + +instance CyclicOrd Continuation where + cyclicOrd h1 h2 k1 k2 = do + n1 <- S.makeStableName k1 + n2 <- S.makeStableName k2 + if n1 == n2 then pure EQ + else case (k1, k2) of + (WrapHandler v1 k1, WrapHandler v2 k2) -> do + b <- cyclicOrd h1 h2 v1 v2 + if b == EQ then cyclicOrd h1 h2 k1 k2 + else pure b + (Chain _ k1 k2, Chain _ k1a k2a) -> do + b <- cyclicOrd h1 h2 k1 k1a + if b == EQ then cyclicOrd h1 h2 k2 k2a + else pure b + (One _needed1 _size1 _s1 _ir1, One _needed2 _size2 _s2 _ir2) -> + error "todo - fill CyclicOrd Continuation" + _ -> pure $ continuationConstructorId k1 `compare` continuationConstructorId k2 + +continuationConstructorId :: Continuation -> Int +continuationConstructorId k = case k of + One _ _ _ _ -> 0 + Chain _ _ _ -> 1 + WrapHandler _ _ -> 2 + +truncate0 :: (Num a, Ord a) => a -> a +truncate0 x = if x >= 0 then x else 0 diff --git a/parser-typechecker/src/Unison/Runtime/Rt1IO.hs b/parser-typechecker/src/Unison/Runtime/Rt1IO.hs new file mode 100644 index 0000000000..d09db6fd2e --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Rt1IO.hs @@ -0,0 +1,529 @@ +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Runtime.Rt1IO where + +import Unison.Prelude + +import Control.Exception ( throwIO + , AsyncException(UserInterrupt) + , finally + , bracket + , asyncExceptionFromException + ) +import Control.Concurrent ( ThreadId + , forkIO + , killThread + , threadDelay + ) +import Control.Concurrent.MVar ( MVar + , modifyMVar_ + , readMVar + , newMVar + , newEmptyMVar + , takeMVar + , putMVar + ) +import Control.Lens +import Control.Monad.Morph ( hoist ) +import Control.Monad.Reader ( ReaderT + , runReaderT + , ask + ) +import Control.Monad.Except ( ExceptT(..) + , runExceptT + , throwError + ) +import Data.GUID ( genText ) +import qualified Data.Map as Map +import qualified Data.Sequence as Seq +import Data.Text as Text +import qualified Data.Text.IO as TextIO +import Data.Time.Clock.POSIX as Time +import qualified Network.Simple.TCP as Net +import qualified Network.Socket as Sock +--import qualified Network.Socket as Sock +import System.IO ( Handle + , IOMode(..) + , SeekMode(..) + , BufferMode(..) + , openFile + , hClose + , stdin + , stdout + , stderr + , hIsEOF + , hIsSeekable + , hSeek + , hTell + , hGetBuffering + , hSetBuffering + ) +import System.Directory ( getCurrentDirectory + , setCurrentDirectory + , getTemporaryDirectory + , getDirectoryContents + , doesPathExist + , doesDirectoryExist + , createDirectoryIfMissing + , removeDirectoryRecursive + , renameDirectory + , removeFile + , renameFile + , getModificationTime + , getFileSize + ) +import qualified System.IO.Error as SysError +import Type.Reflection ( Typeable ) +import Unison.Builtin.Decls as DD +import Unison.Symbol +import Unison.Parser ( Ann(External) ) +import qualified Unison.Reference as R +import qualified Unison.Runtime.Rt1 as RT +import qualified Unison.Runtime.IR as IR +import qualified Unison.Term as Term +-- import Debug.Trace +-- import qualified Unison.Util.Pretty as Pretty +-- import Unison.TermPrinter ( pretty ) +import Unison.Codebase.Runtime ( Runtime(Runtime) ) +import Unison.Codebase.MainTerm ( nullaryMain ) +import qualified Unison.Runtime.IOSource as IOSrc +import qualified Unison.Util.Bytes as Bytes +import qualified Unison.Var as Var +import qualified Unison.Util.Pretty as P +import qualified Unison.TermPrinter as TermPrinter +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.Typechecker.Components as Components + +-- TODO: Make this exception more structured? +newtype UnisonRuntimeException = UnisonRuntimeException Text + deriving (Typeable, Show) + +instance Exception UnisonRuntimeException + +type GUID = Text + +data IOState = IOState + { _handleMap :: HandleMap + , _socketMap :: SocketMap + , _threadMap :: ThreadMap + } + +type UIO a = ExceptT IOError (ReaderT S IO) a +type HandleMap = Map GUID Handle +type SocketMap = Map GUID Net.Socket +type ThreadMap = Map GUID ThreadId + +newtype S = S {_ioState :: MVar IOState } + +makeLenses 'S +makeLenses 'IOState + +haskellMode :: Text -> IOMode +haskellMode mode = case mode of + "io.Mode.Read" -> ReadMode + "io.Mode.Write" -> WriteMode + "io.Mode.Append" -> AppendMode + "io.Mode.ReadWrite" -> ReadWriteMode + _ -> error . Text.unpack $ "Unknown IO mode " <> mode + +newUnisonHandle :: Handle -> UIO RT.Value +newUnisonHandle h = do + t <- liftIO genText + m <- view ioState + liftIO . modifyMVar_ m $ pure . over handleMap (Map.insert t h) + pure $ IR.Data IOSrc.handleReference IOSrc.handleId [IR.T t] + +newUnisonSocket :: Net.Socket -> UIO RT.Value +newUnisonSocket s = do + t <- liftIO genText + m <- view ioState + liftIO . modifyMVar_ m $ pure . over socketMap (Map.insert t s) + pure $ IR.Data IOSrc.socketReference IOSrc.socketId [IR.T t] + +deleteUnisonHandle :: Text -> UIO () +deleteUnisonHandle h = do + m <- view ioState + liftIO . modifyMVar_ m $ pure . over handleMap (Map.delete h) + +getHaskellHandle :: Text -> UIO (Maybe Handle) +getHaskellHandle h = do + m <- view ioState + v <- liftIO $ readMVar m + pure . Map.lookup h $ view handleMap v + +getHaskellHandleOrThrow :: Text -> UIO Handle +getHaskellHandleOrThrow h = getHaskellHandle h >>= maybe throwHandleClosed pure + +getHaskellSocket :: Text -> UIO (Maybe Net.Socket) +getHaskellSocket s = do + m <- view ioState + v <- liftIO $ readMVar m + pure . Map.lookup s $ view socketMap v + +getHaskellSocketOrThrow :: Text -> UIO Net.Socket +getHaskellSocketOrThrow s = getHaskellSocket s >>= maybe throwSocketClosed pure + +constructLeft :: RT.Value -> RT.Value +constructLeft v = IR.Data IOSrc.eitherReference IOSrc.eitherLeftId [v] + +constructRight :: RT.Value -> RT.Value +constructRight v = IR.Data IOSrc.eitherReference IOSrc.eitherRightId [v] + +constructSome :: RT.Value -> RT.Value +constructSome v = IR.Data IOSrc.optionReference IOSrc.someId [v] + +constructNone :: RT.Value +constructNone = IR.Data IOSrc.optionReference IOSrc.noneId [] + +convertMaybe :: Maybe RT.Value -> RT.Value +convertMaybe Nothing = constructNone +convertMaybe (Just v) = constructSome v + +convertOptional :: RT.Value -> Maybe RT.Value +convertOptional (IR.Data _ _ [] ) = Nothing +convertOptional (IR.Data _ _ [x]) = Just x +convertOptional v = + error + $ "Compiler bug! This value showed up at runtime where " + <> "an Optional was expected: " + <> show v + +constructPair :: RT.Value -> RT.Value -> RT.Value +constructPair a b = IR.Data DD.pairRef 0 [a, b] + +convertErrorType :: IOError -> IR.ConstructorId +convertErrorType (SysError.ioeGetErrorType -> e) + | SysError.isAlreadyExistsErrorType e = IOSrc.alreadyExistsId + | SysError.isDoesNotExistErrorType e = IOSrc.noSuchThingId + | SysError.isAlreadyInUseErrorType e = IOSrc.resourceBusyId + | SysError.isFullErrorType e = IOSrc.resourceExhaustedId + | SysError.isEOFErrorType e = IOSrc.eofId + | SysError.isIllegalOperationErrorType e = IOSrc.illegalOperationId + | SysError.isPermissionErrorType e = IOSrc.permissionDeniedId + | otherwise = IOSrc.userErrorId + +haskellSeekMode :: Text -> SeekMode +haskellSeekMode mode = case mode of + "io.SeekMode.Absolute" -> AbsoluteSeek + "io.SeekMode.Relative" -> RelativeSeek + "io.SeekMode.FromEnd" -> SeekFromEnd + _ -> error . Text.unpack $ "Unknown seek mode " <> mode + +haskellBufferMode :: RT.Value -> BufferMode +haskellBufferMode mode = case mode of + IR.Data _ _ [] -> NoBuffering + IR.Data _ _ [IR.Data _ _ [] ] -> LineBuffering + IR.Data _ _ [IR.Data _ _ [IR.Data _ _ []]] -> BlockBuffering Nothing + IR.Data _ _ [IR.Data _ _ [IR.Data _ _ [IR.N n]]] -> + BlockBuffering (Just $ fromIntegral n) + _ -> error $ "Unknown buffer mode " <> show mode + +unisonBufferMode :: BufferMode -> RT.Value +unisonBufferMode mode = case mode of + NoBuffering -> constructNone + LineBuffering -> + constructSome (IR.Data IOSrc.bufferModeReference IOSrc.bufferModeLineId []) + BlockBuffering Nothing -> constructSome + (IR.Data IOSrc.bufferModeReference IOSrc.bufferModeBlockId [constructNone]) + BlockBuffering (Just size) -> constructSome + (IR.Data IOSrc.bufferModeReference + IOSrc.bufferModeBlockId + [constructSome . IR.N $ fromIntegral size] + ) + +unisonFilePath :: FilePath -> RT.Value +unisonFilePath fp = + IR.Data IOSrc.filePathReference IOSrc.filePathId [IR.T $ Text.pack fp] + +hostPreference :: [RT.Value] -> Net.HostPreference +hostPreference [] = Net.HostAny +hostPreference [IR.Data _ _ [IR.T host]] = Net.Host $ Text.unpack host +hostPreference x = + error $ "Runtime bug! Not a valid host preference: " <> show x + +constructIoError :: IOError -> RT.Value +constructIoError e = IR.Data + IOSrc.errorReference + IOSrc.ioErrorId + [ IR.Data IOSrc.errorTypeReference (convertErrorType e) [] + , IR.T . Text.pack $ show e + ] + +handleIO' + :: RT.CompilationEnv + -> S + -> R.Reference + -> IR.ConstructorId + -> [RT.Value] + -> IO RT.Result +handleIO' cenv s rid cid vs = case rid of + R.DerivedId x | x == IOSrc.ioHash -> flip runReaderT s $ do + ev <- runExceptT $ handleIO cenv cid vs + case ev of + Left e -> pure . RT.RDone . constructLeft $ constructIoError e + Right v -> pure . RT.RDone $ constructRight v + _ -> RT.RRequest . IR.Req rid cid vs <$> RT.idContinuation + +reraiseIO :: IO a -> UIO a +reraiseIO a = ExceptT . lift $ try @IOError $ liftIO a + +throwHandleClosed :: UIO a +throwHandleClosed = throwError $ illegalOperation "handle is closed" + +throwSocketClosed :: UIO a +throwSocketClosed = throwError $ illegalOperation "socket is closed" + +illegalOperation :: String -> IOError +illegalOperation msg = + SysError.mkIOError SysError.illegalOperationErrorType msg Nothing Nothing + +handleIO :: RT.CompilationEnv -> IR.ConstructorId -> [RT.Value] -> UIO RT.Value +handleIO cenv cid = go (IOSrc.constructorName IOSrc.ioReference cid) + where + go "io.IO.openFile_" [IR.Data _ 0 [IR.T filePath], IR.Data _ mode _] = do + let n = IOSrc.constructorName IOSrc.ioModeReference mode + h <- reraiseIO . openFile (Text.unpack filePath) $ haskellMode n + newUnisonHandle h + go "io.IO.closeFile_" [IR.Data _ 0 [IR.T handle]] = do + hh <- getHaskellHandle handle + reraiseIO $ maybe (pure ()) hClose hh + deleteUnisonHandle handle + pure IR.unit + go "io.IO.isFileEOF_" [IR.Data _ 0 [IR.T handle]] = do + hh <- getHaskellHandleOrThrow handle + isEOF <- reraiseIO $ hIsEOF hh + pure $ IR.B isEOF + go "io.IO.isFileOpen_" [IR.Data _ 0 [IR.T handle]] = + IR.B . isJust <$> getHaskellHandle handle + go "io.IO.getLine_" [IR.Data _ 0 [IR.T handle]] = do + hh <- getHaskellHandleOrThrow handle + line <- reraiseIO $ TextIO.hGetLine hh + pure . IR.T $ line + go "io.IO.getText_" [IR.Data _ 0 [IR.T handle]] = do + hh <- getHaskellHandleOrThrow handle + text <- reraiseIO $ TextIO.hGetContents hh + pure . IR.T $ text + go "io.IO.putText_" [IR.Data _ 0 [IR.T handle], IR.T string] = do + hh <- getHaskellHandleOrThrow handle + reraiseIO . TextIO.hPutStr hh $ string + pure IR.unit + go "io.IO.throw" [IR.Data _ _ [IR.Data _ _ [], IR.T message]] = + liftIO . throwIO $ UnisonRuntimeException message + go "io.IO.isSeekable_" [IR.Data _ 0 [IR.T handle]] = do + hh <- getHaskellHandleOrThrow handle + seekable <- reraiseIO $ hIsSeekable hh + pure $ IR.B seekable + go "io.IO.seek_" [IR.Data _ 0 [IR.T handle], IR.Data _ seekMode [], IR.I int] + = do + hh <- getHaskellHandleOrThrow handle + let mode = IOSrc.constructorName IOSrc.seekModeReference seekMode + reraiseIO . hSeek hh (haskellSeekMode mode) $ fromIntegral int + pure IR.unit + go "io.IO.position_" [IR.Data _ 0 [IR.T handle]] = do + hh <- getHaskellHandleOrThrow handle + pos <- reraiseIO $ hTell hh + pure . IR.I $ fromIntegral pos + go "io.IO.getBuffering_" [IR.Data _ 0 [IR.T handle]] = do + hh <- getHaskellHandleOrThrow handle + bufMode <- reraiseIO $ hGetBuffering hh + pure $ unisonBufferMode bufMode + go "io.IO.setBuffering_" [IR.Data _ 0 [IR.T handle], o] = do + hh <- getHaskellHandleOrThrow handle + reraiseIO . hSetBuffering hh $ haskellBufferMode o + pure IR.unit + go "io.IO.systemTime_" [] = do + t <- reraiseIO $ fmap round Time.getPOSIXTime + pure $ IR.Data IOSrc.epochTimeReference IOSrc.epochTimeId [IR.N t] + go "io.IO.getTemporaryDirectory_" [] = + reraiseIO $ unisonFilePath <$> getTemporaryDirectory + go "io.IO.getCurrentDirectory_" [] = + reraiseIO $ unisonFilePath <$> getCurrentDirectory + go "io.IO.setCurrentDirectory_" [IR.Data _ _ [IR.T dir]] = do + reraiseIO . setCurrentDirectory $ Text.unpack dir + pure IR.unit + go "io.IO.directoryContents_" [IR.Data _ _ [IR.T dir]] = + reraiseIO + $ IR.Sequence + . Seq.fromList + . fmap unisonFilePath + <$> getDirectoryContents (Text.unpack dir) + go "io.IO.fileExists_" [IR.Data _ _ [IR.T dir]] = + reraiseIO $ IR.B <$> doesPathExist (Text.unpack dir) + go "io.IO.isDirectory_" [IR.Data _ _ [IR.T dir]] = + reraiseIO $ IR.B <$> doesDirectoryExist (Text.unpack dir) + go "io.IO.createDirectory_" [IR.Data _ _ [IR.T dir]] = do + reraiseIO $ createDirectoryIfMissing True (Text.unpack dir) + pure IR.unit + go "io.IO.removeDirectory_" [IR.Data _ _ [IR.T dir]] = do + reraiseIO . removeDirectoryRecursive $ Text.unpack dir + pure IR.unit + go "io.IO.renameDirectory_" [IR.Data _ _ [IR.T from], IR.Data _ _ [IR.T to]] + = do + reraiseIO $ renameDirectory (Text.unpack from) (Text.unpack to) + pure IR.unit + go "io.IO.removeFile_" [IR.Data _ _ [IR.T file]] = do + reraiseIO . removeFile $ Text.unpack file + pure IR.unit + go "io.IO.renameFile_" [IR.Data _ _ [IR.T from], IR.Data _ _ [IR.T to]] = do + reraiseIO $ renameFile (Text.unpack from) (Text.unpack to) + pure IR.unit + go "io.IO.getFileTimestamp_" [IR.Data _ _ [IR.T file]] = do + t <- reraiseIO $ getModificationTime (Text.unpack file) + pure $ IR.Data IOSrc.epochTimeReference + IOSrc.epochTimeId + [IR.N . round $ Time.utcTimeToPOSIXSeconds t] + go "io.IO.getFileSize_" [IR.Data _ _ [IR.T file]] = + reraiseIO $ IR.N . fromIntegral <$> getFileSize (Text.unpack file) + go "io.IO.serverSocket_" [IR.Data _ _ mayHost, IR.Data _ _ [IR.T port]] = do + (s, _) <- reraiseIO + $ Net.bindSock (hostPreference mayHost) (Text.unpack port) + newUnisonSocket s + go "io.IO.listen_" [IR.Data _ _ [IR.T socket]] = do + hs <- getHaskellSocketOrThrow socket + reraiseIO $ Net.listenSock hs 2048 + pure IR.unit + go "io.IO.clientSocket_" [IR.Data _ _ [IR.T host], IR.Data _ _ [IR.T port]] = + do + (s, _) <- reraiseIO . Net.connectSock (Text.unpack host) $ Text.unpack + port + newUnisonSocket s + go "io.IO.closeSocket_" [IR.Data _ _ [IR.T socket]] = do + hs <- getHaskellSocket socket + reraiseIO $ traverse_ Net.closeSock hs + pure IR.unit + go "io.IO.accept_" [IR.Data _ _ [IR.T socket]] = do + hs <- getHaskellSocketOrThrow socket + conn <- reraiseIO $ Sock.accept hs + newUnisonSocket $ fst conn + go "io.IO.send_" [IR.Data _ _ [IR.T socket], IR.Bs bs] = do + hs <- getHaskellSocketOrThrow socket + reraiseIO . Net.send hs $ Bytes.toByteString bs + pure IR.unit + go "io.IO.receive_" [IR.Data _ _ [IR.T socket], IR.N n] = do + hs <- getHaskellSocketOrThrow socket + bs <- reraiseIO . Net.recv hs $ fromIntegral n + pure . convertMaybe $ IR.Bs . Bytes.fromByteString <$> bs + go "io.IO.fork_" [IR.Lam _ _ ir] = do + s <- ask + t <- liftIO genText + lock <- liftIO newEmptyMVar + m <- view ioState + id <- reraiseIO . forkIO . void $ do + void $ takeMVar lock + forceThunk cenv s ir + `finally` modifyMVar_ m (pure . over threadMap (Map.delete t)) + liftIO . modifyMVar_ m $ pure . over threadMap (Map.insert t id) + liftIO $ putMVar lock () + pure $ IR.Data IOSrc.threadIdReference IOSrc.threadIdId [IR.T t] + go "io.IO.kill_" [IR.Data _ _ [IR.T thread]] = do + m <- view ioState + map <- liftIO $ view threadMap <$> readMVar m + liftIO $ case Map.lookup thread map of + Nothing -> pure IR.unit + Just ht -> do + killThread ht + pure IR.unit + go "io.IO.delay_" [IR.N n] = do + reraiseIO . threadDelay $ fromIntegral n + pure IR.unit + go "io.IO.bracket_" [IR.Lam _ _ acquire, IR.Lam _ _ release, IR.Lam _ _ use] + = do + s <- ask + let resultToVal (RT.RDone v) = pure v + resultToVal v = + fail $ "IO bracket expected a value but got " <> show v + reraiseIO $ resultToVal =<< bracket + (resultToVal =<< forceThunk cenv s acquire) + (lamToHask cenv s release) + (lamToHask cenv s use) + go a _b = error $ show a <> " is not implemented yet." + -- error + -- $ "IO handler called with unimplemented cid " + -- <> show cid + -- <> " and " + -- <> show a + -- <> " args " + -- <> show b + +forceThunk :: RT.CompilationEnv -> S -> RT.IR -> IO RT.Result +forceThunk cenv s ir = lamToHask cenv s ir IR.unit + +lamToHask :: RT.CompilationEnv -> S -> RT.IR -> RT.Value -> IO RT.Result +lamToHask cenv s ir val = RT.run (handleIO' cenv s) cenv $ task val + where task x = IR.Let (Var.named "_") (IR.Leaf (IR.Val x)) ir mempty + +runtime :: Runtime Symbol +runtime = Runtime terminate eval (nullaryMain External) + where + terminate :: IO () + terminate = pure () + eval cl' ppe term = do + let cl = void (hoist (pure . runIdentity) IOSrc.codeLookup) <> cl' + -- traceM $ Pretty.render 80 (pretty mempty term) + cenv <- RT.compilationEnv cl term -- in `m` + mmap <- newMVar $ IOState + (Map.fromList [("stdin", stdin), ("stdout", stdout), ("stderr", stderr)]) + Map.empty + Map.empty + term <- case Components.minimize' term of + Left es -> fail . reportBug "B23784210" $ + "Term contains duplicate definitions: " <> show (fst <$> es) + Right term -> pure term + r <- try $ RT.run (handleIO' cenv $ S mmap) + cenv + (IR.compile cenv $ Term.amap (const ()) term) + toTermOrError ppe r + +toTermOrError :: PPE.PrettyPrintEnv -> Either SomeException RT.Result + -> IO (Either (P.Pretty P.ColorText) (IR.Term Symbol)) +toTermOrError ppe r = case r of + Right (RT.RDone result) -> Right <$> IR.decompile result + Right (RT.RMatchFail _ _ scrute) -> do + scrute <- IR.decompile scrute + pure . Left . P.callout icon . P.lines $ [ + P.wrap ("I've encountered a" <> P.red "pattern match failure" + <> "while scrutinizing:"), "", + P.indentN 2 $ TermPrinter.pretty ppe scrute, + "", + P.wrap "This happens when calling a function that doesn't handle all possible inputs.", + "", sorryMsg + ] + Right (RT.RError t val) -> do + msg <- IR.decompile val + let errorType = case t of + RT.ErrorTypeTodo -> "builtin.todo" + RT.ErrorTypeBug -> "builtin.bug" + pure . Left . P.callout icon . P.lines $ [ + P.wrap ("I've encountered a call to" <> P.red errorType + <> "with the following value:"), "", + P.indentN 2 $ TermPrinter.pretty ppe msg, + "", sorryMsg + ] + Right (RT.RRequest (IR.Req r cid vs _)) -> do + vs <- traverse IR.decompile vs + let tm = Term.apps' (Term.request() r cid) vs + pure . Left . P.callout icon . P.lines $ [ + P.wrap ("I stopped evaluation after encountering an " <> P.red "unhandled request:"), "", + P.indentN 2 $ TermPrinter.pretty ppe tm, + "", + P.wrap "This happens when using a handler that doesn't handle all possible requests.", + "", sorryMsg + ] + Left (asyncExceptionFromException -> Just e) -> pure . Left . P.callout "⏹" $ + case e of + UserInterrupt -> P.wrap $ "I've" <> P.purple "cancelled evaluation." + e -> P.wrap $ "I've stopped evaluation after receiving a " + <> P.purple (P.shown e) <> "signal." + Left e -> pure . Left . P.callout icon . P.lines $ [ + P.wrap ("I stopped evaluation after encountering " <> P.red "an error:"), "", + P.indentN 2 $ P.string (show (e :: SomeException)), + "", sorryMsg + ] + where + icon = "💔💥" + sorryMsg = P.wrap $ "I'm sorry this message doesn't have more detail about" + <> "the location of the failure." + <> "My makers plan to fix this in a future release. 😢" diff --git a/parser-typechecker/src/Unison/Runtime/SparseVector.hs b/parser-typechecker/src/Unison/Runtime/SparseVector.hs new file mode 100644 index 0000000000..3ae57b9102 --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/SparseVector.hs @@ -0,0 +1,128 @@ +{-# Language BangPatterns #-} +{-# Language MagicHash #-} -- used for unsafe pointer equality + +module Unison.Runtime.SparseVector where + +import Prelude hiding (unzip) +import qualified Data.Vector.Unboxed.Mutable as MUV +import Data.Bits ((.|.), (.&.)) +import qualified Data.Bits as B +import qualified GHC.Exts as Exts +import qualified Data.Vector.Unboxed as UV + +-- Denotes a `Nat -> Maybe a`. +-- Representation is a `Vector a` along with a bitset +-- that encodes the index of each element. +-- Ex: `[(1,a), (5,b)]` is encoded as (100010, [a,b]) +data SparseVector bits a + = SparseVector { indices :: !bits + , elements :: !(UV.Vector a) } + +-- todo: instance (UV.Unbox a, B.FiniteBits bits, Num n) +-- => Num (SparseVector bits n) + +-- Denotationally: `map f v n = f <$> v n` +map :: (UV.Unbox a, UV.Unbox b) => (a -> b) -> SparseVector bits a -> SparseVector bits b +map f v = v { elements = UV.map f (elements v) } + +-- Denotationally, a mask is a `Nat -> Bool`, so this implementation +-- means: `mask ok v n = if ok n then v n else Nothing` +mask :: (UV.Unbox a, B.FiniteBits bits) + => bits -> SparseVector bits a -> SparseVector bits a +mask bits a = + if indices' == bits then a -- check if mask is a superset + else SparseVector indices' $ UV.create $ do + vec <- MUV.new (B.popCount indices') + go vec (indices a) bits 0 0 + where + indices' = indices a .&. bits + eas = elements a + go !out !indAs !indBs !i !k = + if indAs == B.zeroBits || indBs == B.zeroBits then pure out + else let + (!a1, !b1) = (B.countTrailingZeros indAs, B.countTrailingZeros indBs) + in if a1 == b1 then do + MUV.write out k (eas UV.! (i + a1)) + go out (indAs `B.shiftR` (a1 + 1)) (indBs `B.shiftR` (b1 + 1)) + (i + 1) (k + 1) + else if a1 < b1 then + go out (indAs `B.shiftR` (a1 + 1)) indBs + (i + 1) k + else + go out indAs (indBs `B.shiftR` (b1 + 1)) i k + +-- Denotationally: `zipWith f a b n = f <$> a n <*> b n`, in other words, +-- this takes the intersection of the two shapes. +zipWith + :: (UV.Unbox a, UV.Unbox b, UV.Unbox c, B.FiniteBits bits) + => (a -> b -> c) + -> SparseVector bits a + -> SparseVector bits b + -> SparseVector bits c +zipWith f a b = + if indices a `eq` indices b || indices a == indices b then + SparseVector (indices a) (UV.zipWith f (elements a) (elements b)) + else let + indices' = indices a .&. indices b + a' = mask indices' a + b' = mask indices' b + in SparseVector indices' (UV.zipWith f (elements a') (elements b')) + +_1 :: (UV.Unbox a, UV.Unbox b) => SparseVector bits (a,b) -> SparseVector bits a +_1 = fst . unzip + +_2 :: (UV.Unbox a, UV.Unbox b) => SparseVector bits (a,b) -> SparseVector bits b +_2 = snd . unzip + +-- Denotationally: `unzip p = (\n -> fst <$> p n, \n -> snd <$> p n)` +unzip :: (UV.Unbox a, UV.Unbox b) + => SparseVector bits (a,b) + -> (SparseVector bits a, SparseVector bits b) +unzip (SparseVector inds ps) = + let (as,bs) = UV.unzip ps + in (SparseVector inds as, SparseVector inds bs) + +-- Denotationally: `choose bs a b n = if bs n then a n else b n` +choose :: (B.FiniteBits bits, UV.Unbox a) + => bits + -> SparseVector bits a + -> SparseVector bits a + -> SparseVector bits a +choose bits t f + | B.zeroBits == bits = f + | B.complement bits == B.zeroBits = t + | otherwise = -- it's a mix of true and false + merge (mask bits t) (mask (B.complement bits) f) + +-- Denotationally: `merge a b n = a n <|> b n` +merge :: (B.FiniteBits bits, UV.Unbox a) + => SparseVector bits a + -> SparseVector bits a + -> SparseVector bits a +merge a b = SparseVector indices' tricky + where + indices' = indices a .|. indices b + tricky = UV.create $ do + vec <- MUV.new (B.popCount indices') + go vec (indices a) (indices b) 0 0 0 + (!eas, !ebs) = (elements a, elements b) + go !out !indAs !indBs !i !j !k = + if indAs == B.zeroBits || indBs == B.zeroBits then pure out + else let + (!a1, !b1) = (B.countTrailingZeros indAs, B.countTrailingZeros indBs) + in if a1 == b1 then do + MUV.write out k (eas UV.! (i + a1)) + go out (indAs `B.shiftR` (a1 + 1)) (indBs `B.shiftR` (b1 + 1)) + (i + 1) (j + 1) (k + 1) + else if a1 < b1 then do + MUV.write out k (eas UV.! (i + a1)) + go out (indAs `B.shiftR` (a1 + 1)) indBs + (i + 1) j (k + 1) + else do + MUV.write out k (ebs UV.! (j + a1)) + go out indAs (indBs `B.shiftR` (b1 + 1)) i (j + 1) (k + 1) + +-- Pointer equality a la Scala. +eq :: a -> a -> Bool +eq x y = Exts.isTrue# (Exts.reallyUnsafePtrEquality# x y Exts.==# 1#) +{-# INLINE eq #-} diff --git a/parser-typechecker/src/Unison/Runtime/Stack.hs b/parser-typechecker/src/Unison/Runtime/Stack.hs new file mode 100644 index 0000000000..9ccd3018c6 --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Stack.hs @@ -0,0 +1,640 @@ +{-# language GADTs #-} +{-# language DataKinds #-} +{-# language BangPatterns #-} +{-# language TypeFamilies #-} +{-# language ViewPatterns #-} +{-# language PatternGuards #-} +{-# language PatternSynonyms #-} + +module Unison.Runtime.Stack + ( K(..) + , IComb(.., Lam_) + , Closure(.., DataC, PApV, CapV) + , Callback(..) + , Augment(..) + , Dump(..) + , MEM(..) + , Stack(..) + , Off + , SZ + , FP + , universalCompare + , marshalToForeign + , unull + , bnull + , peekD + , peekOffD + , pokeD + , pokeOffD + , peekN + , peekOffN + , pokeN + , pokeOffN + , peekBi + , peekOffBi + , pokeBi + , pokeOffBi + , peekOffS + , pokeS + , pokeOffS + , uscount + , bscount + ) where + +import Prelude hiding (words) + +import Control.Monad (when) +import Control.Monad.Primitive + +import Data.Ord (comparing) +import Data.Foldable (fold) + +import Data.Foldable (toList, for_) +import Data.Primitive.ByteArray +import Data.Primitive.PrimArray +import Data.Primitive.Array +import Data.Sequence (Seq) +import qualified Data.Sequence as Sq +import Data.Word + +import Unison.Reference (Reference) + +import Unison.Runtime.ANF (Mem(..), unpackTags, RTag) +import Unison.Runtime.MCode +import Unison.Runtime.Foreign + +import qualified Unison.Type as Ty + +import Unison.Util.EnumContainers as EC + +import GHC.Stack (HasCallStack) + +newtype Callback = Hook (Stack 'UN -> Stack 'BX -> IO ()) + +instance Eq Callback where _ == _ = True +instance Ord Callback where compare _ _ = EQ + +-- Evaluation stack +data K + = KE + -- callback hook + | CB Callback + -- mark continuation with a prompt + | Mark !(EnumSet Word64) + !(EnumMap Word64 Closure) + !K + -- save information about a frame for later resumption + | Push !Int -- unboxed frame size + !Int -- boxed frame size + !Int -- pending unboxed args + !Int -- pending boxed args + !Section -- code + !K + deriving (Eq, Ord) + +-- Comb with an identifier +data IComb + = IC !Word64 !Comb + deriving (Show) + +instance Eq IComb where + IC i _ == IC j _ = i == j + +pattern Lam_ ua ba uf bf entry <- IC _ (Lam ua ba uf bf entry) + +-- TODO: more reliable ordering for combinators +instance Ord IComb where + compare (IC i _) (IC j _) = compare i j + +data Closure + = PAp {-# unpack #-} !IComb -- code + {-# unpack #-} !(Seg 'UN) -- unboxed args + {- unpack -} !(Seg 'BX) -- boxed args + | Enum !Word64 + | DataU1 !Word64 !Int + | DataU2 !Word64 !Int !Int + | DataB1 !Word64 !Closure + | DataB2 !Word64 !Closure !Closure + | DataUB !Word64 !Int !Closure + | DataG !Word64 !(Seg 'UN) !(Seg 'BX) + | Captured !K {-# unpack #-} !(Seg 'UN) !(Seg 'BX) + | Foreign !Foreign + | BlackHole + deriving (Show, Eq, Ord) + +splitData :: Closure -> Maybe (Word64, [Int], [Closure]) +splitData (Enum t) = Just (t, [], []) +splitData (DataU1 t i) = Just (t, [i], []) +splitData (DataU2 t i j) = Just (t, [i,j], []) +splitData (DataB1 t x) = Just (t, [], [x]) +splitData (DataB2 t x y) = Just (t, [], [x,y]) +splitData (DataUB t i y) = Just (t, [i], [y]) +splitData (DataG t us bs) = Just (t, ints us, toList bs) +splitData _ = Nothing + +ints :: ByteArray -> [Int] +ints ba = fmap (indexByteArray ba) [0..n-1] + where + n = sizeofByteArray ba `div` 8 + +pattern DataC rt ct us bs <- + (splitData -> Just (unpackTags -> (rt, ct), us, bs)) + +pattern PApV ic us bs <- PAp ic (ints -> us) (toList -> bs) +pattern CapV k us bs <- Captured k (ints -> us) (toList -> bs) + +{-# complete DataC, PAp, Captured, Foreign, BlackHole #-} +{-# complete DataC, PApV, Captured, Foreign, BlackHole #-} +{-# complete DataC, PApV, CapV, Foreign, BlackHole #-} + +closureNum :: Closure -> Int +closureNum PAp{} = 0 +closureNum DataC{} = 1 +closureNum Captured{} = 2 +closureNum Foreign{} = 3 +closureNum BlackHole{} = error "BlackHole" + +universalCompare + :: (Word64 -> Reference) + -> (RTag -> Reference) + -> (Foreign -> Foreign -> Ordering) + -> Closure + -> Closure + -> Ordering +universalCompare comb tag frn = cmpc + where + cmpl cm l r + = compare (length l) (length r) <> fold (zipWith cm l r) + cmpc (DataC rt1 ct1 us1 bs1) (DataC rt2 ct2 us2 bs2) + = compare (tag rt1) (tag rt2) + <> compare ct1 ct2 + <> cmpl compare us1 us2 + <> cmpl cmpc bs1 bs2 + cmpc (PApV (IC i1 _) us1 bs1) (PApV (IC i2 _) us2 bs2) + = compare (comb i1) (comb i2) + <> cmpl compare us1 us2 + <> cmpl cmpc bs1 bs2 + cmpc (CapV k1 us1 bs1) (CapV k2 us2 bs2) + = compare k1 k2 + <> cmpl compare us1 us2 + <> cmpl cmpc bs1 bs2 + cmpc (Foreign fl) (Foreign fr) + | Just sl <- maybeUnwrapForeign Ty.vectorRef fl + , Just sr <- maybeUnwrapForeign Ty.vectorRef fr + = comparing Sq.length sl sr <> fold (Sq.zipWith cmpc sl sr) + | otherwise = frn fl fr + cmpc c d = comparing closureNum c d + +marshalToForeign :: HasCallStack => Closure -> Foreign +marshalToForeign (Foreign x) = x +marshalToForeign c + = error $ "marshalToForeign: unhandled closure: " ++ show c + +type Off = Int +type SZ = Int +type FP = Int + +type UA = MutableByteArray (PrimState IO) +type BA = MutableArray (PrimState IO) Closure + +words :: Int -> Int +words n = n `div` 8 + +bytes :: Int -> Int +bytes n = n * 8 + +uargOnto :: UA -> Off -> UA -> Off -> Args' -> IO Int +uargOnto stk sp cop cp0 (Arg1 i) = do + (x :: Int) <- readByteArray stk (sp-i) + writeByteArray cop cp x + pure cp + where cp = cp0+1 +uargOnto stk sp cop cp0 (Arg2 i j) = do + (x :: Int) <- readByteArray stk (sp-i) + (y :: Int) <- readByteArray stk (sp-j) + writeByteArray cop cp x + writeByteArray cop (cp-1) y + pure cp + where cp = cp0+2 +uargOnto stk sp cop cp0 (ArgN v) = do + buf <- if overwrite + then newByteArray $ bytes sz + else pure cop + let loop i + | i < 0 = return () + | otherwise = do + (x :: Int) <- readByteArray stk (sp-indexPrimArray v i) + writeByteArray buf (sz-1-i) x + loop $ i-1 + loop $ sz-1 + when overwrite $ + copyMutableByteArray cop (bytes $ cp+1) buf 0 (bytes sz) + pure cp + where + cp = cp0+sz + sz = sizeofPrimArray v + overwrite = sameMutableByteArray stk cop +uargOnto stk sp cop cp0 (ArgR i l) = do + moveByteArray cop cbp stk sbp (bytes l) + pure $ cp0+l + where + cbp = bytes $ cp0+1 + sbp = bytes $ sp-i-l+1 + +bargOnto :: BA -> Off -> BA -> Off -> Args' -> IO Int +bargOnto stk sp cop cp0 (Arg1 i) = do + x <- readArray stk (sp-i) + writeArray cop cp x + pure cp + where cp = cp0+1 +bargOnto stk sp cop cp0 (Arg2 i j) = do + x <- readArray stk (sp-i) + y <- readArray stk (sp-j) + writeArray cop cp x + writeArray cop (cp-1) y + pure cp + where cp = cp0+2 +bargOnto stk sp cop cp0 (ArgN v) = do + buf <- if overwrite + then newArray sz BlackHole + else pure cop + let loop i + | i < 0 = return () + | otherwise = do + x <- readArray stk $ sp-indexPrimArray v i + writeArray buf (sz-1-i) x + loop $ i-1 + loop $ sz-1 + when overwrite $ + copyMutableArray cop (cp0+1) buf 0 sz + pure cp + where + cp = cp0+sz + sz = sizeofPrimArray v + overwrite = stk == cop +bargOnto stk sp cop cp0 (ArgR i l) = do + copyMutableArray cop (cp0+1) stk (sp-i-l+1) l + pure $ cp0+l + +data Dump = A | F Int | S + +dumpAP :: Int -> Int -> Int -> Dump -> Int +dumpAP _ fp sz d@(F _) = dumpFP fp sz d +dumpAP ap _ _ _ = ap + +dumpFP :: Int -> Int -> Dump -> Int +dumpFP fp _ S = fp +dumpFP fp sz A = fp+sz +dumpFP fp sz (F n) = fp+sz-n + +-- closure augmentation mode +-- instruction, kontinuation, call +data Augment = I | K | C + +class MEM (b :: Mem) where + data Stack b :: * + type Elem b :: * + type Seg b :: * + alloc :: IO (Stack b) + peek :: Stack b -> IO (Elem b) + peekOff :: Stack b -> Off -> IO (Elem b) + poke :: Stack b -> Elem b -> IO () + pokeOff :: Stack b -> Off -> Elem b -> IO () + grab :: Stack b -> SZ -> IO (Seg b, Stack b) + ensure :: Stack b -> SZ -> IO (Stack b) + bump :: Stack b -> IO (Stack b) + bumpn :: Stack b -> SZ -> IO (Stack b) + duplicate :: Stack b -> IO (Stack b) + discardFrame :: Stack b -> IO (Stack b) + saveFrame :: Stack b -> IO (Stack b, SZ, SZ) + restoreFrame :: Stack b -> SZ -> SZ -> IO (Stack b) + prepareArgs :: Stack b -> Args' -> IO (Stack b) + acceptArgs :: Stack b -> Int -> IO (Stack b) + frameArgs :: Stack b -> IO (Stack b) + augSeg :: Augment -> Stack b -> Seg b -> Maybe Args' -> IO (Seg b) + dumpSeg :: Stack b -> Seg b -> Dump -> IO (Stack b) + fsize :: Stack b -> SZ + asize :: Stack b -> SZ + +instance MEM 'UN where + data Stack 'UN + -- Note: uap <= ufp <= usp + = US { uap :: !Int -- arg pointer + , ufp :: !Int -- frame pointer + , usp :: !Int -- stack pointer + , ustk :: {-# unpack #-} !(MutableByteArray (PrimState IO)) + } + type Elem 'UN = Int + type Seg 'UN = ByteArray + alloc = US (-1) (-1) (-1) <$> newByteArray 4096 + {-# inline alloc #-} + peek (US _ _ sp stk) = readByteArray stk sp + {-# inline peek #-} + peekOff (US _ _ sp stk) i = readByteArray stk (sp-i) + {-# inline peekOff #-} + poke (US _ _ sp stk) n = writeByteArray stk sp n + {-# inline poke #-} + pokeOff (US _ _ sp stk) i n = writeByteArray stk (sp-i) n + {-# inline pokeOff #-} + + -- Eats up arguments + grab (US _ fp sp stk) sze = do + mut <- newByteArray sz + copyMutableByteArray mut 0 stk (bfp-sz) sz + seg <- unsafeFreezeByteArray mut + moveByteArray stk (bfp-sz) stk bfp fsz + pure (seg, US (fp-sze) (fp-sze) (sp-sze) stk) + where + sz = bytes sze + bfp = bytes $ fp+1 + fsz = bytes $ sp-fp + {-# inline grab #-} + + ensure stki@(US ap fp sp stk) sze + | sze <= 0 + || bytes (sp+sze+1) < ssz = pure stki + | otherwise = do + stk' <- resizeMutableByteArray stk (ssz+10240) + pure $ US ap fp sp stk' + where + ssz = sizeofMutableByteArray stk + {-# inline ensure #-} + + bump (US ap fp sp stk) = pure $ US ap fp (sp+1) stk + {-# inline bump #-} + + bumpn (US ap fp sp stk) n = pure $ US ap fp (sp+n) stk + {-# inline bumpn #-} + + duplicate (US ap fp sp stk) + = US ap fp sp <$> do + b <- newByteArray sz + copyMutableByteArray b 0 stk 0 sz + pure b + where + sz = sizeofMutableByteArray stk + {-# inline duplicate #-} + + discardFrame (US ap fp _ stk) = pure $ US ap fp fp stk + {-# inline discardFrame #-} + + saveFrame (US ap fp sp stk) = pure (US sp sp sp stk, sp-fp, fp-ap) + {-# inline saveFrame #-} + + restoreFrame (US _ fp0 sp stk) fsz asz = pure $ US ap fp sp stk + where fp = fp0-fsz + ap = fp-asz + {-# inline restoreFrame #-} + + prepareArgs (US ap fp sp stk) (ArgR i l) + | fp+l+i == sp = pure $ US ap (sp-i) (sp-i) stk + prepareArgs (US ap fp sp stk) args = do + sp <- uargOnto stk sp stk fp args + pure $ US ap sp sp stk + {-# inline prepareArgs #-} + + acceptArgs (US ap fp sp stk) n = pure $ US ap (fp-n) sp stk + {-# inline acceptArgs #-} + + frameArgs (US ap _ sp stk) = pure $ US ap ap sp stk + {-# inline frameArgs #-} + + augSeg mode (US ap fp sp stk) seg margs = do + cop <- newByteArray $ ssz+psz+asz + copyByteArray cop soff seg 0 ssz + copyMutableByteArray cop 0 stk ap psz + for_ margs $ uargOnto stk sp cop (words poff + pix - 1) + unsafeFreezeByteArray cop + where + ssz = sizeofByteArray seg + pix | I <- mode = 0 | otherwise = fp-ap + (poff,soff) + | K <- mode = (ssz,0) + | otherwise = (0,psz+asz) + psz = bytes pix + asz = case margs of + Nothing -> 0 + Just (Arg1 _) -> 8 + Just (Arg2 _ _) -> 16 + Just (ArgN v) -> bytes $ sizeofPrimArray v + Just (ArgR _ l) -> bytes l + {-# inline augSeg #-} + + dumpSeg (US ap fp sp stk) seg mode = do + copyByteArray stk bsp seg 0 ssz + pure $ US ap' fp' sp' stk + where + bsp = bytes $ sp+1 + ssz = sizeofByteArray seg + sz = words ssz + sp' = sp+sz + fp' = dumpFP fp sz mode + ap' = dumpAP ap fp sz mode + {-# inline dumpSeg #-} + + fsize (US _ fp sp _) = sp-fp + {-# inline fsize #-} + + asize (US ap fp _ _) = fp-ap + {-# inline asize #-} + +peekN :: Stack 'UN -> IO Word64 +peekN (US _ _ sp stk) = readByteArray stk sp +{-# inline peekN #-} + +peekD :: Stack 'UN -> IO Double +peekD (US _ _ sp stk) = readByteArray stk sp +{-# inline peekD #-} + +peekOffN :: Stack 'UN -> Int -> IO Word64 +peekOffN (US _ _ sp stk) i = readByteArray stk (sp-i) +{-# inline peekOffN #-} + +peekOffD :: Stack 'UN -> Int -> IO Double +peekOffD (US _ _ sp stk) i = readByteArray stk (sp-i) +{-# inline peekOffD #-} + +pokeN :: Stack 'UN -> Word64 -> IO () +pokeN (US _ _ sp stk) n = writeByteArray stk sp n +{-# inline pokeN #-} + +pokeD :: Stack 'UN -> Double -> IO () +pokeD (US _ _ sp stk) d = writeByteArray stk sp d +{-# inline pokeD #-} + +pokeOffN :: Stack 'UN -> Int -> Word64 -> IO () +pokeOffN (US _ _ sp stk) i n = writeByteArray stk (sp-i) n +{-# inline pokeOffN #-} + +pokeOffD :: Stack 'UN -> Int -> Double -> IO () +pokeOffD (US _ _ sp stk) i d = writeByteArray stk (sp-i) d +{-# inline pokeOffD #-} + +pokeBi :: BuiltinForeign b => Stack 'BX -> b -> IO () +pokeBi bstk x = poke bstk (Foreign $ wrapBuiltin x) +{-# inline pokeBi #-} + +pokeOffBi :: BuiltinForeign b => Stack 'BX -> Int -> b -> IO () +pokeOffBi bstk i x = pokeOff bstk i (Foreign $ wrapBuiltin x) +{-# inline pokeOffBi #-} + +peekBi :: BuiltinForeign b => Stack 'BX -> IO b +peekBi bstk = unwrapForeign . marshalToForeign <$> peek bstk +{-# inline peekBi #-} + +peekOffBi :: BuiltinForeign b => Stack 'BX -> Int -> IO b +peekOffBi bstk i = unwrapForeign . marshalToForeign <$> peekOff bstk i +{-# inline peekOffBi #-} + +peekOffS :: Stack 'BX -> Int -> IO (Seq Closure) +peekOffS bstk i = + unwrapForeign . marshalToForeign <$> peekOff bstk i +{-# inline peekOffS #-} + +pokeS :: Stack 'BX -> Seq Closure -> IO () +pokeS bstk s = poke bstk (Foreign $ Wrap Ty.vectorRef s) +{-# inline pokeS #-} + +pokeOffS :: Stack 'BX -> Int -> Seq Closure -> IO () +pokeOffS bstk i s = pokeOff bstk i (Foreign $ Wrap Ty.vectorRef s) +{-# inline pokeOffS #-} + +unull :: Seg 'UN +unull = byteArrayFromListN 0 ([] :: [Int]) + +bnull :: Seg 'BX +bnull = fromListN 0 [] + +instance Show (Stack 'BX) where + show (BS ap fp sp _) + = "BS " ++ show ap ++ " " ++ show fp ++ " " ++ show sp +instance Show (Stack 'UN) where + show (US ap fp sp _) + = "US " ++ show ap ++ " " ++ show fp ++ " " ++ show sp +instance Show K where + show k = "[" ++ go "" k + where + go _ KE = "]" + go _ (CB _) = "]" + go com (Push uf bf ua ba _ k) + = com ++ show (uf,bf,ua,ba) ++ go "," k + go com (Mark ps _ k) = com ++ "M" ++ show ps ++ go "," k + +instance MEM 'BX where + data Stack 'BX + = BS { bap :: !Int + , bfp :: !Int + , bsp :: !Int + , bstk :: {-# unpack #-} !(MutableArray (PrimState IO) Closure) + } + type Elem 'BX = Closure + type Seg 'BX = Array Closure + + alloc = BS (-1) (-1) (-1) <$> newArray 512 BlackHole + {-# inline alloc #-} + + peek (BS _ _ sp stk) = readArray stk sp + {-# inline peek #-} + + peekOff (BS _ _ sp stk) i = readArray stk (sp-i) + {-# inline peekOff #-} + + poke (BS _ _ sp stk) x = writeArray stk sp x + {-# inline poke #-} + + pokeOff (BS _ _ sp stk) i x = writeArray stk (sp-i) x + {-# inline pokeOff #-} + + grab (BS _ fp sp stk) sz = do + seg <- unsafeFreezeArray =<< cloneMutableArray stk (fp+1-sz) sz + copyMutableArray stk (fp+1-sz) stk (fp+1) fsz + pure (seg, BS (fp-sz) (fp-sz) (sp-sz) stk) + where fsz = sp-fp + {-# inline grab #-} + + ensure stki@(BS ap fp sp stk) sz + | sz <= 0 = pure stki + | sp+sz+1 < ssz = pure stki + | otherwise = do + stk' <- newArray (ssz+1280) BlackHole + copyMutableArray stk' 0 stk 0 (sp+1) + pure $ BS ap fp sp stk' + where ssz = sizeofMutableArray stk + {-# inline ensure #-} + + bump (BS ap fp sp stk) = pure $ BS ap fp (sp+1) stk + {-# inline bump #-} + + bumpn (BS ap fp sp stk) n = pure $ BS ap fp (sp+n) stk + {-# inline bumpn #-} + + duplicate (BS ap fp sp stk) + = BS ap fp sp <$> cloneMutableArray stk 0 (sizeofMutableArray stk) + {-# inline duplicate #-} + + discardFrame (BS ap fp _ stk) = pure $ BS ap fp fp stk + {-# inline discardFrame #-} + + saveFrame (BS ap fp sp stk) = pure (BS sp sp sp stk, sp-fp, fp-ap) + {-# inline saveFrame #-} + + restoreFrame (BS _ fp0 sp stk) fsz asz = pure $ BS ap fp sp stk + where + fp = fp0-fsz + ap = fp-asz + {-# inline restoreFrame #-} + + prepareArgs (BS ap fp sp stk) (ArgR i l) + | fp+i+l == sp = pure $ BS ap (sp-i) (sp-i) stk + prepareArgs (BS ap fp sp stk) args = do + sp <- bargOnto stk sp stk fp args + pure $ BS ap sp sp stk + {-# inline prepareArgs #-} + + acceptArgs (BS ap fp sp stk) n = pure $ BS ap (fp-n) sp stk + {-# inline acceptArgs #-} + + frameArgs (BS ap _ sp stk) = pure $ BS ap ap sp stk + {-# inline frameArgs #-} + + augSeg mode (BS ap fp sp stk) seg margs = do + cop <- newArray (ssz+psz+asz) BlackHole + copyArray cop soff seg 0 ssz + copyMutableArray cop poff stk ap psz + for_ margs $ bargOnto stk sp cop (poff+psz-1) + unsafeFreezeArray cop + where + ssz = sizeofArray seg + psz | I <- mode = 0 | otherwise = fp-ap + (poff,soff) + | K <- mode = (ssz,0) + | otherwise = (0,psz+asz) + asz = case margs of + Nothing -> 0 + Just (Arg1 _) -> 1 + Just (Arg2 _ _) -> 2 + Just (ArgN v) -> sizeofPrimArray v + Just (ArgR _ l) -> l + {-# inline augSeg #-} + + dumpSeg (BS ap fp sp stk) seg mode = do + copyArray stk (sp+1) seg 0 sz + pure $ BS ap' fp' sp' stk + where + sz = sizeofArray seg + sp' = sp+sz + fp' = dumpFP fp sz mode + ap' = dumpAP ap fp sz mode + {-# inline dumpSeg #-} + + fsize (BS _ fp sp _) = sp-fp + {-# inline fsize #-} + + asize (BS ap fp _ _) = fp-ap + +uscount :: Seg 'UN -> Int +uscount seg = words $ sizeofByteArray seg + +bscount :: Seg 'BX -> Int +bscount seg = sizeofArray seg + diff --git a/parser-typechecker/src/Unison/Runtime/Vector.hs b/parser-typechecker/src/Unison/Runtime/Vector.hs new file mode 100644 index 0000000000..59fed5041e --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/Vector.hs @@ -0,0 +1,54 @@ +{-# Language GADTs #-} + +module Unison.Runtime.Vector where + +import Unison.Prelude + +import qualified Data.MemoCombinators as Memo +import qualified Data.Vector.Unboxed as UV + +-- A `Vec a` denotes a `Nat -> Maybe a` +data Vec a where + Scalar :: a -> Vec a + Vec :: UV.Unbox a => UV.Vector a -> Vec a + Pair :: Vec a -> Vec b -> Vec (a, b) + Choose :: Vec Bool -> Vec a -> Vec a -> Vec a + Mux :: Vec Nat -> Vec (Vec a) -> Vec a + +-- todo: maybe make representation `(UV.Vector Nat -> UnboxedMap Nat a, Bound)` +-- `UnboxedMap Nat a = (UV.Vector Nat, UV.Vector a)` +-- UnboxedMap Nat could be implemented as an `UArray` +-- `Bound` is Nat, max possible index +-- then easy to implement `+`, `-`, etc + +type Nat = Word64 + +mu :: Vec a -> Nat -> Maybe a +mu v = case v of + Scalar a -> const (Just a) + Vec vs -> \i -> vs UV.!? fromIntegral i + Choose cond t f -> let + (condr, tr, tf) = (mu cond, mu t, mu f) + in \i -> condr i >>= \b -> if b then tr i else tf i + Mux mux branches -> let + muxr = mu mux + branchesr = Memo.integral $ let f = mu branches in \i -> mu <$> f i + in \i -> do j <- muxr i; b <- branchesr j; b i + Pair v1 v2 -> let + (v1r, v2r) = (mu v1, mu v2) + in \i -> liftA2 (,) (v1r i) (v2r i) + +-- Returns the maximum `Nat` for which `mu v` may return `Just`. +bound :: Nat -> Vec a -> Nat +bound width v = case v of + Scalar _ -> width + Vec vs -> fromIntegral $ UV.length vs + Pair v1 v2 -> bound width v1 `min` bound width v2 + Choose cond _ _ -> bound width cond + Mux mux _ -> bound width mux + +toList :: Vec a -> [a] +toList v = let + n = bound maxBound v + muv = mu v + in catMaybes $ muv <$> [0..n] diff --git a/parser-typechecker/src/Unison/Runtime/docs.markdown b/parser-typechecker/src/Unison/Runtime/docs.markdown new file mode 100644 index 0000000000..d08f3aed4d --- /dev/null +++ b/parser-typechecker/src/Unison/Runtime/docs.markdown @@ -0,0 +1,240 @@ + +# Documentation of the Unison runtime + +This directory has the code for the Unison runtime. + +The Unison runtime is responsible for evaluating Unison code which has been parsed and typechecked. Evaluation converts _computations_, which contain reducible expressions (an expression like `1 + 1` or `case (a,b) of ..`) to _values_ (like `42`), which don't have redexes and which are said to be in _normal form_. The runtime has some design constraints: + +##### It should be possible at runtime to hash, serialize, deserialize, and compute the dependencies of any value in the language, including functions. + +These capabilities are needed for the implementation of Unison's distributed programming API which ships arbitrary values over the network (and these functions are also just super convenient for other reasons too). That is, it needs to be possible to have functions like: + + * `encode : forall a . a -> Bytes` + * `decode : forall a . Bytes -> Either Err a` + * `dependencies : forall a . a -> Set Reference` + * `hash : forall a . a -> Hash` + * Note: the types of these might be more constrained than this so you can't break parametricity and use them in parametric code, but the idea is that they could in principle have these types and they really do work _for all_ `a`. + +Importantly, values may contain cycles (a recursive function, for instance), and these to be serializable and hashable as well, so there needs to be a way of detecting and encoding these cycles reliably in all cases (having the serialization code blow up when it hits a cycle is not allowed). + +##### The runtime should make it possible to decompile any value back to a Unison term. + +When you evaluate a watch expression, you should see a term in normal form. This is nicer than the usual approach of having runtime values be their own universe and requiring the programmer to write a bunch of boilerplate to extract useful information from these runtime values. + +##### The runtime should support algebraic effects, which requires being able to manipulate continuations of a running program. + + +##### This first version of the Haskell runtime isn't aiming for extreme speed. It should be correct, simple, and easy for us to understand and maintain. + +Within these parameters, if there's easy speed improvements to be had, great. And perhaps later, we can have a more complicated but insanely fast runtime which is also correct because of intense engineering effort. But now is not the time for that. + +##### The runtime should be modular, so that pieces of it can be reused even if we move from, say, directly interpreting some instruction set to JIT-ing via LLVM. + +The old Scala runtime was monolithic, going directly from a term to a compiled form. + +## Overview + +To evaluate a Unison term, `p : AnnotatedTerm v a` which has been successfully typechecked, the runtime goes through several phases: + + p0 : AnnotatedTerm v a + || + let rec minimization + || + p1 : AnnotatedTerm v a + || + lambda lifting + || + p2 : AnnotatedTerm v a + || + A-normal form (ANF) conversion + || + p3 : AnnotatedTerm v a + || + compilation + || + p4 : IR (see `IR.hs`) + || + evaluation + || + p5 : IR.V (see `IR.hs`) + || + decompilation + || + p6 : AnnotatedTerm v () + +Here's a brief overview of these phases: + +* let rec minimization eliminates needless cycles and reduces cycle sizes to the minimum, to prepare for just having `let` be the only place in the runtime that must deal with ability requests. +* lambda lifting eliminates lambdas with free variables by adding extra parameters for each free variable, simplifying the later compilation phase. +* ANF moves any function calls or ability requests to the body of a `let` or `let rec`, which is the last thing needed to ensure that `let` is the only place we need to deal with ability requests. +* Compilation converts the ANF code to an intermediate representation, which can be interpreted directly to produce a value, `V`. +* After evaluation, the `V` can be decompiled back to a term, which can be displayed to the user in the codebase editor tool. + +#### Phase 1: let rec minimization + +_let rec minimization_ breaks up recursive blocks into a mix of `let` and minimally-sized `let rec` blocks. The implementation is in [`Unison.Typechecker.Components.minimize`](../Typechecker/Components.hs#L17). + +__Why do we do this?__ + +* We decided for sanity and simplicity that the bindings of a cycle (like `ping` and `pong`) can't use any abilities, since it's unclear what order things happen in (if `ping` uses abilities and has a forward reference to `pong`, and `pong` has a reference to `ping` and uses abilities, which effect should happen first??). To clarify, mutually recursive functions in a let rec may use abilities in their body, since those abilities aren't required until the function is called. +* But when the source of a program reveals a clear dependency order to the bindings, we want to be able to use abilities. +* This transformation is also useful in conjunction with ANF conversion - it means that interpretation of `let` is the _one place in the runtime_ where we need to expect an ability request. It makes it very easy to construct the continuations which are passed to the ability handlers. + +_Note:_ The typechecker also does this same let rec minimization transform before typechecking, and when typechecking any `let rec` blocks that remain, it sets the ambient ability to be empty. (TODO: check that typechecker does in fact do this, and fix if not) + +#### Phase 2: lambda lifting + +This transform is currently in the [`ANF.hs`](ANF.hs#L26) file, see the `lambdaLift` function there. This transform eliminates free variables from any lambdas in the term, by turning them into ordinary function parameters. This leaves only closed lambdas, which are easy to compile. + +A lambda with free variables is really a program that will generate a function at runtime when values for those free variables are known. Turning these free variables into function parameters just means less cases to deal with later on during compilation. + +#### Phase 3: ANF conversion + +See [Wikipedia page for ANF](https://en.wikipedia.org/wiki/A-normal_form). __Why do we do this?__ It's much simpler to compile and optimize, and importantly, it leaves us with __just one place__, in `let`, where the continuations of ability requests are processed by the runtime. + +Example: + +``` +handle (state 0) in + x = State.get + 3 + y = x + 1 + State.set 42 + 99 +``` + +This isn't in ANF, and if we tried to compile this directly, our code for doing function application (the `State.get + 3`) would need to be prepared to deal with ability requests and would need to be able to construct the appropriate continuation: + +``` +r -> let + x = r + 3 + y = x + 1 + State.set 42 + 99 +``` + +In contrast, if the code is in ANF, then function application doesn't need to deal with ability requests, as functions are always being applied to values: + +``` +handle (state 0) in + r = State.get + x = r + 3 + y = x + 1 + State.set 42 + 99 +``` + +#### Phase 4: compilation to IR + +The `IR` type, defined in `IR.hs`, denotes a `[Value] -> Result Value`, a function that takes a stack of values and returns a result, where: + +``` +type Result v = + MatchFail | Done v | + Request Reference CtorId [v] IR +-- ^ ^ ^ ^ +-- ability ctor args continuation +``` + +`Value` (defined in [`IR.hs`](Value.hs)) has no redexes and is in normal form. + +An example of `Request`, for the expression `State.set 42` - the `Reference` is `State`, the `CtorId` (an `Int`) is the constructor number for `State.set`, the args is `[42]`, and + +`IR` is represented as an ANF tree, where variable references are represented by their [De Bruijn index](https://en.wikipedia.org/wiki/De_Bruijn_index): the nearest / innermost bound variable has an index of 0, the next nearest has an index of 1, etc. Some more interesting examples: + +* In `x y -> x + y`, in the body of the lambda, `x` has an index of 1 and `y` has an index of 0. +* In `let x = 1; y = x + 1`, in the body of `y`, `x` has an index of 0. +* In `let rec ping x = pong x; pong x = ping x`, in the body of `ping`, `x` has an index of `0`, `pong` has an index of `1`, and `ping` has an index of `2`. +* In `case x of (y, (z, q)) -> y + z`, in the `y + z` expression, `y` has an index of `2` and `z` has an index of `1` and `q` has an index of 0. + +Put another way, variable bindings are pushed onto a stack in "source order", and their position on the stack at each usage site is their De Bruijn index. + +In order to convert from the ANF form of the term, which has named variable references, and the De Bruijn indexed `IR` type, we need to convert from named references to De Bruijn indices. For that, we use the function [`ABT.annotateBound'`](../ABT.hs#L120-L126), which is defined for any abstract binding tree: + +```haskell +annotateBound' + :: (Ord v, Functor f, Foldable f) + => Term f v a0 + -> Term f v [v] +``` + +This annotates every node of an ABT with the list of bound variables, in order, such that the de bruijn index of variable, `v`, can be computed via [`elemIndex`](http://hackage.haskell.org/package/base-4.12.0.0/docs/Data-List.html#v:elemIndex) of `v` in this list. Easy peasy. + +Once we have that, the conversion to `IR`, defined in `IR.compile0`, is straightforward. A couple wrinkles: + +* When a lambda is partially applied, for instance `(x y -> foo x + y) 42`, we recompile the lambda, subsituting the arguments already applied, in this case substituting `x` with `42`, leaving the lambda `(y -> foo 42 + y)`. To support this, the `compile0` function actually takes this environment of already evaluated values, as a `[(SymbolC, Maybe V)]`. This is added onto the end of the list of bound variables. +* Variables which are lambda-lifted out of a function are compiled as "lazy stack lookups" (the `LazySlot` constructor, defined in [`IR.hs`](IR.hs)), which doesn't look inside references on the stack. Why is this done? + +Well, consider: + +```Haskell +let rec loop = n -> + if n == 100 then n + else loop (n + 1) +``` + +After lambda lifting, this looks like: + +```Haskell +let rec loop = + (self n -> if n == 100 then n + else self (n + 1)) loop + ^^^^ +``` + +But this isn't quite right - the `loop` which is passed to itself needs to be passed lazily, otherwise this would not terminate. Really, we want something more like: + +```Haskell +let rec loop = + (self n -> if n == 100 then n + else !self (n + 1)) 'loop + ^^^^ +``` + +Notice that `loop` is passed as the thunk, `'loop`, which is forced inside the body (the `!self`). But we don't literally need to use thunks like this, we can just avoid forcing the reference which appears at that stack slot. + +The `SymbolC` variable type used by the `compile0` function just tracks which variables need to get this treatment - these variables are compiled as a `LazySlot` rather than a `Slot`. + +#### Phase 5: evaluation + +There are interpreters in [`Rt0.hs`](Rt0.hs) and (in progress) [`Rt1.hs`](Rt1.hs). Recall our denotation for `IR` is an `[Value] -> Result Value`, a function that takes a stack of values and produces a `Result`, where: + +``` +type Result v = + MatchFail | Done v | + Request Reference CtorId [v] IR +-- ^ ^ ^ ^ +-- ability ctor args continuation +``` + +If you go through [`Rt0.hs`](Rt0.hs), many of the cases are straightforward: there is basically only one or two reasonable things to do. In general, instructions that introduce variables (like `let`, `let rec`, function calls, and pattern matching) will push onto the stack and invoke the interpreter on subexpressions with this updated stack. + +As a result of the ANF representation, only `let` needs to be prepared to deal with a `Request` result. + +A couple wrinkles: + +* Functions, when fully applied, are simple to interpret: we push the arguments onto the stack and then run the body of the function. But functions can also be under-applied (given fewer arguments than their arity) or over-applied (given more arguments than their arity). These cases all need to be handled: + * Under-applied functions are recompiled, with the given arguments substituted into the body of the function. So `(x y -> x + y) 42` becomes the lambda `y -> 42 + y`. + * Over-applied functions are treated as a fully-applied function, and the result of that call is then applied to the remaining arguments. So `(x -> x) (y -> y) 42` first evaluates the `(x -> x) (y -> y)`, producing `(y -> y)`, and then applies `(y -> y) 42`, producing `42`. +* __Tail calls:__ When a function call is the last expression in a function, that call can discard the stack elements for the current call. + * That is, suppose we are inside the function `x y -> let z = x + y; foo z 19`. At the point of the `foo z 19` call, `x` and `y` are on the stack, but aren't needed anymore. The call to `foo` could therefore drop `x` and `y` from the stack, push `z` and `19` onto the stack and then call `foo`. `foo` only examines at most two stack elements, one for each arg it receives, so it doesn't care whether `x` and `y` are below it in the stack or not. + * There are a few approaches for allowing the stack to be reclaimed: + * One is to have a separate tail call instruction in the `IR` (this makes the `compile` function a bit more complicated, and the evaluator, since it needs to different cases, one for regular calls and one for tail calls). + * Another is to track in the evaluator when a function call is in tail position, and interpret function calls accordingly (this means more state being tracked by the evaluator, and more cases. + * The last approach is to just garbage collect the stack occasionally, this is Baker's idea, [Cheney on the MTA](http://home.pipeline.com/~hbaker1/CheneyMTA.html). To GC the stack, you simply look at the current `IR` and compute its max De Bruijn index, say that's 4, which means that only the top `5` elements of the stack are still referenced by the rest of your computation. You copy these 5 elements to a fresh stack, preserving their order, reset the top of the stack to `5`, and continue on. + * And these approaches need not be mutually exclusive - you can garbage collect the stack and stil have a separate tail call instruction. + +Currently, the `IR` doesn't have a separate tail call instruction and nothing is implemented for tail calls. I think the Cheney on the MTA is very simple, so will probably just do that for now. + +One interesting aspect of the Cheney on the MTA approach is that it's more accurate about garbage collecting references that remain in lexical scope, but which aren't used any longer. For instance, consider: + +```Haskell +let + x = computeHugeList 99 + n = Sequence.size x + y = loop n + y + 100 +``` + +Assume `x` is some huge list and `loop` is some long running loop. At the point where this `loop` function is invoked, `x` is no longer used by the rest of the computation, but because `loop` isn't a tail call, `x` is kept around on the stack and not GC'd. With the Cheny on the MTA approach, this doesn't matter--`x` can be garbage collected as soon as the continuation of the computation no longer references it, independent of any tail calls. + +It seems nice to do this sort of GC (possibly in addition to having a separate tail call instruction). Having some dangling reference in lexical scope is one of those things that causes occasional hard-to-debug memory leaks. I've heard that the JVM will even null out stack slots which aren't used anymore, which is a bit like this. diff --git a/parser-typechecker/src/Unison/TermParser.hs b/parser-typechecker/src/Unison/TermParser.hs new file mode 100644 index 0000000000..e3a74ae5fb --- /dev/null +++ b/parser-typechecker/src/Unison/TermParser.hs @@ -0,0 +1,901 @@ +{-# OPTIONS_GHC -Wno-partial-type-signatures #-} + +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE PartialTypeSignatures #-} + +module Unison.TermParser where + +import Unison.Prelude + +import Control.Monad.Reader (asks, local) +import Prelude hiding (and, or, seq) +import Unison.Name (Name) +import Unison.Names3 (Names) +import Unison.Reference (Reference) +import Unison.Referent (Referent) +import Unison.Parser hiding (seq) +import Unison.Pattern (Pattern) +import Unison.Term (Term, IsTop) +import Unison.Type (Type) +import Unison.Util.List (intercalateMapWith, quenchRuns) +import Unison.Var (Var) +import qualified Data.List.Extra as List.Extra +import qualified Data.Char as Char +import qualified Data.List as List +import qualified Data.Maybe as Maybe +import qualified Data.Set as Set +import qualified Data.Text as Text +import qualified Data.Tuple.Extra as TupleE +import qualified Data.Sequence as Sequence +import qualified Text.Megaparsec as P +import qualified Unison.ABT as ABT +import qualified Unison.Builtin.Decls as DD +import qualified Unison.HashQualified as HQ +import qualified Unison.Lexer as L +import qualified Unison.Name as Name +import qualified Unison.Names3 as Names +import qualified Unison.Parser as Parser (seq, uniqueName) +import qualified Unison.Pattern as Pattern +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import qualified Unison.Typechecker.Components as Components +import qualified Unison.TypeParser as TypeParser +import qualified Unison.Var as Var + +watch :: Show a => String -> a -> a +watch msg a = let !_ = trace (msg ++ ": " ++ show a) () in a + +{- +Precedence of language constructs is identical to Haskell, except that all +operators (like +, <*>, or any sequence of non-alphanumeric characters) are +left-associative and equal precedence, and operators must have surrounding +whitespace (a + b, not a+b) to distinguish from identifiers that may contain +operator characters (like empty? or fold-left). + +Sections / partial application of infix operators is not implemented. +-} + +type TermP v = P v (Term v Ann) + +term :: Var v => TermP v +term = term2 + +term2 :: Var v => TermP v +term2 = lam term2 <|> term3 + +term3 :: Var v => TermP v +term3 = do + t <- infixAppOrBooleanOp + ot <- optional (reserved ":" *> TypeParser.computationType) + pure $ case ot of + Nothing -> t + Just y -> Term.ann (mkAnn t y) t y + +keywordBlock :: Var v => TermP v +keywordBlock = letBlock <|> handle <|> ifthen <|> match <|> lamCase + +typeLink' :: Var v => P v (L.Token Reference) +typeLink' = do + id <- hqPrefixId + ns <- asks names + case Names.lookupHQType (L.payload id) ns of + s | Set.size s == 1 -> pure $ const (Set.findMin s) <$> id + | otherwise -> customFailure $ UnknownType id s + +termLink' :: Var v => P v (L.Token Referent) +termLink' = do + id <- hqPrefixId + ns <- asks names + case Names.lookupHQTerm (L.payload id) ns of + s | Set.size s == 1 -> pure $ const (Set.findMin s) <$> id + | otherwise -> customFailure $ UnknownTerm id s + +link' :: Var v => P v (Either (L.Token Reference) (L.Token Referent)) +link' = do + id <- hqPrefixId + ns <- asks names + case (Names.lookupHQTerm (L.payload id) ns, Names.lookupHQType (L.payload id) ns) of + (s, s2) | Set.size s == 1 && Set.null s2 -> pure . Right $ const (Set.findMin s) <$> id + (s, s2) | Set.size s2 == 1 && Set.null s -> pure . Left $ const (Set.findMin s2) <$> id + (s, s2) -> customFailure $ UnknownId id s s2 + +link :: Var v => TermP v +link = termLink <|> typeLink + where + typeLink = do + P.try (reserved "typeLink") -- type opens a block, gotta use something else + tok <- typeLink' + pure $ Term.typeLink (ann tok) (L.payload tok) + termLink = do + P.try (reserved "termLink") + tok <- termLink' + pure $ Term.termLink (ann tok) (L.payload tok) + +-- We disallow type annotations and lambdas, +-- just function application and operators +blockTerm :: Var v => TermP v +blockTerm = lam term <|> infixAppOrBooleanOp + +match :: Var v => TermP v +match = do + start <- openBlockWith "match" + scrutinee <- term + _ <- closeBlock + _ <- P.try (openBlockWith "with") <|> do + t <- anyToken + P.customFailure (ExpectedBlockOpen "with" t) + cases <- sepBy1 semi matchCase + -- TODO: Add error for empty match list + _ <- closeBlock + pure $ Term.match (ann start <> ann (last cases)) scrutinee cases + +matchCase :: Var v => P v (Term.MatchCase Ann (Term v Ann)) +matchCase = do + (p, boundVars) <- parsePattern + let boundVars' = snd <$> boundVars + guard <- optional $ reserved "|" *> infixAppOrBooleanOp + t <- block "->" + let absChain vs t = foldr (\v t -> ABT.abs' (ann t) v t) t vs + pure . Term.MatchCase p (fmap (absChain boundVars') guard) $ absChain boundVars' t + +parsePattern :: forall v. Var v => P v (Pattern Ann, [(Ann, v)]) +parsePattern = root + where + root = chainl1 patternCandidates patternInfixApp + patternCandidates = constructor <|> leaf + patternInfixApp :: P v ((Pattern Ann, [(Ann, v)]) + -> (Pattern Ann, [(Ann, v)]) + -> (Pattern Ann, [(Ann, v)])) + patternInfixApp = f <$> seqOp + where + f op (l, lvs) (r, rvs) = + (Pattern.SequenceOp (ann l <> ann r) l op r, lvs ++ rvs) + + -- note: nullaryCtor comes before var patterns, since (for better or worse) + -- they can overlap (a variable could be called 'Foo' in the current grammar). + -- This order treats ambiguous patterns as nullary constructors if there's + -- a constructor with a matching name. + leaf = literal <|> nullaryCtor <|> varOrAs <|> unbound <|> seqLiteral <|> + parenthesizedOrTuplePattern <|> effect + literal = (,[]) <$> asum [true, false, number, text, char] + true = (\t -> Pattern.Boolean (ann t) True) <$> reserved "true" + false = (\t -> Pattern.Boolean (ann t) False) <$> reserved "false" + number = number' (tok Pattern.Int) (tok Pattern.Nat) (tok Pattern.Float) + text = (\t -> Pattern.Text (ann t) (L.payload t)) <$> string + char = (\c -> Pattern.Char (ann c) (L.payload c)) <$> character + parenthesizedOrTuplePattern :: P v (Pattern Ann, [(Ann, v)]) + parenthesizedOrTuplePattern = tupleOrParenthesized parsePattern unit pair + unit ann = (Pattern.Constructor ann DD.unitRef 0 [], []) + pair (p1, v1) (p2, v2) = + (Pattern.Constructor (ann p1 <> ann p2) DD.pairRef 0 [p1, p2], + v1 ++ v2) + -- Foo x@(Blah 10) + varOrAs :: P v (Pattern Ann, [(Ann, v)]) + varOrAs = do + v <- wordyPatternName + o <- optional (reserved "@") + if isJust o then + (\(p, vs) -> (Pattern.As (ann v) p, tokenToPair v : vs)) <$> leaf + else pure (Pattern.Var (ann v), [tokenToPair v]) + unbound :: P v (Pattern Ann, [(Ann, v)]) + unbound = (\tok -> (Pattern.Unbound (ann tok), [])) <$> blank + ctor :: _ -> P v (L.Token (Reference, Int)) + ctor err = do + -- this might be a var, so we avoid consuming it at first + tok <- P.try (P.lookAhead hqPrefixId) + names <- asks names + case Names.lookupHQPattern (L.payload tok) names of + s | Set.null s -> die tok s + | Set.size s > 1 -> die tok s + | otherwise -> -- matched ctor name, consume the token + do anyToken; pure (Set.findMin s <$ tok) + where + isLower = Text.all Char.isLower . Text.take 1 . Name.toText + die hq s = case L.payload hq of + -- if token not hash qualified or uppercase, + -- fail w/out consuming it to allow backtracking + HQ.NameOnly n | Set.null s && + isLower n -> fail $ "not a constructor name: " <> show n + -- it was hash qualified, and wasn't found in the env, that's a failure! + _ -> failCommitted $ err hq s + + unzipPatterns f elems = case unzip elems of (patterns, vs) -> f patterns (join vs) + + effectBind0 = do + tok <- ctor UnknownAbilityConstructor + leaves <- many leaf + _ <- reserved "->" + pure (tok, leaves) + + effectBind = do + (tok, leaves) <- P.try effectBind0 + let (ref,cid) = L.payload tok + (cont, vsp) <- parsePattern + pure $ + let f patterns vs = (Pattern.EffectBind (ann tok <> ann cont) ref cid patterns cont, vs ++ vsp) + in unzipPatterns f leaves + + effectPure = go <$> parsePattern where + go (p, vs) = (Pattern.EffectPure (ann p) p, vs) + + effect = do + start <- openBlockWith "{" + (inner, vs) <- effectBind <|> effectPure + end <- closeBlock + pure (Pattern.setLoc inner (ann start <> ann end), vs) + + -- ex: unique type Day = Mon | Tue | ... + nullaryCtor = P.try $ do + tok <- ctor UnknownAbilityConstructor + let (ref, cid) = L.payload tok + pure (Pattern.Constructor (ann tok) ref cid [], []) + + constructor = do + tok <- ctor UnknownDataConstructor + let (ref,cid) = L.payload tok + f patterns vs = + let loc = foldl (<>) (ann tok) $ map ann patterns + in (Pattern.Constructor loc ref cid patterns, vs) + unzipPatterns f <$> many leaf + + seqLiteral = Parser.seq f root + where f loc = unzipPatterns ((,) . Pattern.SequenceLiteral loc) + +lam :: Var v => TermP v -> TermP v +lam p = label "lambda" $ mkLam <$> P.try (some prefixDefinitionName <* reserved "->") <*> p + where + mkLam vs b = Term.lam' (ann (head vs) <> ann b) (map L.payload vs) b + +letBlock, handle, lamCase, ifthen :: Var v => TermP v +letBlock = label "let" $ block "let" + +handle = label "handle" $ do + b <- block "handle" + handler <- block "with" + pure $ Term.handle (ann b) handler b + +lamCase = do + start <- openBlockWith "cases" + cases <- sepBy1 semi matchCase + -- TODO: Add error for empty match list + _ <- closeBlock + lamvar <- Parser.uniqueName 10 + let lamvarTerm = Term.var (ann start) (Var.named lamvar) + matchTerm = Term.match (ann start <> ann (last cases)) lamvarTerm cases + pure $ Term.lam (ann start <> ann (last cases)) (Var.named lamvar) matchTerm + + +ifthen = label "if" $ do + start <- peekAny + c <- block "if" + t <- block "then" + f <- block "else" + pure $ Term.iff (ann start <> ann f) c t f + +text :: Var v => TermP v +text = tok Term.text <$> string + +char :: Var v => TermP v +char = tok Term.char <$> character + +boolean :: Var v => TermP v +boolean = ((\t -> Term.boolean (ann t) True) <$> reserved "true") <|> + ((\t -> Term.boolean (ann t) False) <$> reserved "false") + +seq :: Var v => TermP v -> TermP v +seq = Parser.seq Term.seq + +hashQualifiedPrefixTerm :: Var v => TermP v +hashQualifiedPrefixTerm = resolveHashQualified =<< hqPrefixId + +hashQualifiedInfixTerm :: Var v => TermP v +hashQualifiedInfixTerm = resolveHashQualified =<< hqInfixId + +-- If the hash qualified is name only, it is treated as a var, if it +-- has a short hash, we resolve that short hash immediately and fail +-- committed if that short hash can't be found in the current environment +resolveHashQualified :: Var v => L.Token HQ.HashQualified -> TermP v +resolveHashQualified tok = do + names <- asks names + case L.payload tok of + HQ.NameOnly n -> pure $ Term.var (ann tok) (Name.toVar n) + _ -> case Names.lookupHQTerm (L.payload tok) names of + s | Set.null s -> failCommitted $ UnknownTerm tok s + | Set.size s > 1 -> failCommitted $ UnknownTerm tok s + | otherwise -> pure $ Term.fromReferent (ann tok) (Set.findMin s) + +termLeaf :: forall v . Var v => TermP v +termLeaf = + asum + [ hashQualifiedPrefixTerm + , text + , char + , number + , boolean + , link + , tupleOrParenthesizedTerm + , keywordBlock + , seq term + , delayQuote + , bang + , docBlock + ] + +docBlock :: Var v => TermP v +docBlock = do + openTok <- openBlockWith "[:" + segs <- many segment + closeTok <- closeBlock + let a = ann openTok <> ann closeTok + pure . docNormalize $ Term.app a (Term.constructor a DD.docRef DD.docJoinId) (Term.seq a segs) + where + segment = blob <|> linky + blob = do + s <- string + pure $ Term.app (ann s) (Term.constructor (ann s) DD.docRef DD.docBlobId) + (Term.text (ann s) (L.payload s)) + linky = asum [include, signature, evaluate, source, link] + include = do + _ <- P.try (reserved "include") + hashQualifiedPrefixTerm + signature = do + _ <- P.try (reserved "signature") + tok <- termLink' + pure $ Term.app (ann tok) + (Term.constructor (ann tok) DD.docRef DD.docSignatureId) + (Term.termLink (ann tok) (L.payload tok)) + evaluate = do + _ <- P.try (reserved "evaluate") + tok <- termLink' + pure $ Term.app (ann tok) + (Term.constructor (ann tok) DD.docRef DD.docEvaluateId) + (Term.termLink (ann tok) (L.payload tok)) + source = do + _ <- P.try (reserved "source") + l <- link'' + pure $ Term.app (ann l) + (Term.constructor (ann l) DD.docRef DD.docSourceId) + l + link'' = either ty t <$> link' where + t tok = Term.app (ann tok) + (Term.constructor (ann tok) DD.linkRef DD.linkTermId) + (Term.termLink (ann tok) (L.payload tok)) + ty tok = Term.app (ann tok) + (Term.constructor (ann tok) DD.linkRef DD.linkTypeId) + (Term.typeLink (ann tok) (L.payload tok)) + link = d <$> link'' where + d tm = Term.app (ann tm) (Term.constructor (ann tm) DD.docRef DD.docLinkId) tm + +-- Used by unbreakParas within docNormalize. Doc literals are a joined sequence +-- segments. This type describes a property of a segment. +data UnbreakCase = + -- Finishes with a newline and hence does not determine whether the next + -- line starts with whitespace. + LineEnds + -- Ends with "\n something", i.e. introduces an indented line. + | StartsIndented + -- Ends with "\nsomething", i.e. introduces an unindented line. + | StartsUnindented deriving (Eq, Show) + +-- Doc literal normalization +-- +-- This normalization allows the pretty-printer and doc display code to do +-- indenting, and to do line-wrap of paragraphs, but without the inserted +-- newlines being then frozen into the text for ever more over subsequent +-- edit/update cycles. +-- +-- The alternative would be to stop line-wrapping docs on view/display by adding +-- newlines in the pretty-printer, and instead leave wrapping to the +-- terminal/editor. Might be worth considering if this code ends up being +-- too buggy and fragile to maintain. Maybe display could add newlines, +-- and view could refrain from doing so. +-- +-- Operates on the text of the Blobs within a doc (as parsed by docBlock): +-- - reduces the whitespace after all newlines so that at least one of the +-- non-initial lines has zero indent (important because the pretty-printer adds +-- indenting when displaying doc literals) +-- - removes trailing whitespace from each line +-- - removes newlines between any sequence of non-empty zero-indent lines +-- (i.e. undo line-breaking within paragraphs). +-- +-- Should be understood in tandem with Util.Pretty.paragraphyText, which +-- outputs doc text for display/edit/view. +-- See also unison-src/transcripts/doc-formatting.md. +-- +-- There is some heuristic/approximate logic in here - see the comment flagged +-- with ** below. +-- +-- This function is a bit painful - it's trying to act on a sequence of lines, +-- but that sequence is split up between the various blobs in the doc, which +-- are separated by the elements tracking things like @[source] etc. It +-- would be simplified if the doc representation was something like +-- [Either Char EnrichedElement]. +-- +-- This function has some tracing which you can enable by deleting some calls to +-- 'const id' below. +docNormalize :: (Ord v, Show v) => Term v a -> Term v a +docNormalize tm = case tm of + -- This pattern is just `DD.DocJoin seqs`, but exploded in order to grab + -- the annotations. The aim is just to map `normalize` over it. + a@(Term.App' c@(Term.Constructor' DD.DocRef DD.DocJoinId) s@(Term.Sequence' seqs)) + -> join (ABT.annotation a) + (ABT.annotation c) + (ABT.annotation s) + (normalize seqs) where + _ -> error $ "unexpected doc structure: " ++ show tm + where + normalize = + Sequence.fromList . (map TupleE.fst3) + . (tracing "after unbreakParas") . unbreakParas + . (tracing "after full preprocess") . preProcess + . (tracing "after unindent") . unIndent + . (tracing "initial parse") . miniPreProcess + preProcess xs = zip3 seqs + (lineStarteds $ Sequence.fromList seqs) + (followingLines $ Sequence.fromList seqs) + where seqs = map fst xs + miniPreProcess seqs = zip (toList seqs) (lineStarteds seqs) + unIndent + :: Ord v + => [(Term v a, UnbreakCase)] + -> [(Term v a, UnbreakCase)] + unIndent tms = map go tms where + go (b, previous) = + ((mapBlob $ (reduceIndent includeFirst minIndent)) b, previous) + where + -- Since previous was calculated before unindenting, it will often be wrongly + -- StartsIndented instead of StartsUnindented - but that's OK just for the test + -- below. And we'll recalculate it later in preProcess. + includeFirst = previous == LineEnds + concatenatedBlobs :: Text + concatenatedBlobs = mconcat (toList (fmap (getBlob . fst) tms)) + getBlob (DD.DocBlob txt) = txt + getBlob _ = "." + -- Note we exclude the first line when calculating the minimum indent - the lexer + -- already stripped leading spaces from it, and anyway it would have been sharing + -- its line with the [: and maybe other stuff. + nonInitialNonEmptyLines = + filter (not . Text.null) $ map Text.stripEnd $ drop 1 $ Text.lines + concatenatedBlobs + minIndent = minimumOrZero $ map (Text.length . (Text.takeWhile Char.isSpace)) + nonInitialNonEmptyLines + minimumOrZero xs = if length xs == 0 then 0 else minimum xs + reduceIndent :: Bool -> Int -> Text -> Text + reduceIndent includeFirst n t = + fixup + $ Text.unlines + $ mapExceptFirst reduceLineIndent onFirst + $ Text.lines t where + onFirst = if includeFirst then reduceLineIndent else id + reduceLineIndent l = result where + currentIndent = Text.length $ (Text.takeWhile Char.isSpace) l + remainder = (Text.dropWhile Char.isSpace) l + newIndent = maximum [0, currentIndent - n] + result = Text.replicate newIndent " " `mappend` remainder + -- unlines . lines adds a trailing newline if one was not present: undo that. + fixup = if Text.takeEnd 1 t == "\n" then id else Text.dropEnd 1 + -- Remove newlines between any sequence of non-empty zero-indent lines. + -- This is made more complicated by Doc elements (e.g. links) which break up a + -- blob but don't break a line of output text**. We sometimes need to refer back to the + -- previous blob to see whether a newline is between two zero-indented lines. + -- For example... + -- "This link to @foo makes it harder to see\n + -- that the newline should be removed." + -- ** Whether an element does this (breaks a blob but not a line of output text) really + -- depends on some things we don't know here: does an @[include] target doc occupy + -- just one line or several; whether this doc is going to be viewed or displayed. + -- So we'll get it wrong sometimes. The impact of this is that we may sometimes + -- misjudge whether a newline is separating two non-indented lines, and should therefore + -- be removed. + unbreakParas + :: (Show v, Ord v) + => [(Term v a, UnbreakCase, Bool)] + -> [(Term v a, UnbreakCase, Bool)] + unbreakParas = map go where + -- 'candidate' means 'candidate to be joined with an adjacent line as part of a + -- paragraph'. + go (b, previous, nextIsCandidate) = + (mapBlob go b, previous, nextIsCandidate) where + go txt = if Text.null txt then txt else tr result' where + tr = const id $ trace $ + "\nprocessElement on blob " ++ (show txt) ++ ", result' = " + ++ (show result') ++ ", lines: " ++ (show ls) ++ ", candidates = " + ++ (show candidates) ++ ", previous = " ++ (show previous) + ++ ", firstIsCandidate = " ++ (show firstIsCandidate) ++ "\n\n" + -- remove trailing whitespace + -- ls is non-empty thanks to the Text.null check above + -- Don't cut the last line's trailing whitespace - there's an assumption here + -- that it's followed by something which will put more text on the same line. + ls = mapExceptLast Text.stripEnd id $ Text.lines txt + -- Work out which lines are candidates to be joined as part of a paragraph, i.e. + -- are not indented. + candidate l = case Text.uncons l of + Just (initial, _) -> not . Char.isSpace $ initial + Nothing -> False -- empty line + -- The segment of this blob that runs up to the first newline may not itself + -- be the start of a line of the doc - for example if it's preceded by a link. + -- So work out whether the line of which it is a part is a candidate. + firstIsCandidate = case previous of + LineEnds -> candidate (head ls) + StartsIndented -> False + StartsUnindented -> True + candidates = firstIsCandidate : (tail (map candidate ls)) + result = mconcat $ intercalateMapWith sep fst (zip ls candidates) + sep (_, candidate1) (_, candidate2) = + if candidate1 && candidate2 then " " else "\n" + -- Text.lines forgets whether there was a trailing newline. + -- If there was one, then either add it back or convert it to a space. + result' = if (Text.takeEnd 1 txt) == "\n" + then if (last candidates) && nextIsCandidate + then result `Text.append` " " + else result `Text.append` "\n" + else result + -- A list whose entries match those of tms. `Nothing` is used for elements + -- which just continue a line, and so need to be ignored when looking back + -- for how the last line started. Otherwise describes whether the last + -- line of this entry is indented (or maybe terminated by a newline.) + -- A value of `Nothing` protects ensuing text from having its leading + -- whitespace removed by `unindent`. + -- Note that some elements render over multiple lines when displayed. + -- See test2 in transcript doc-formatting.md for an example of how + -- this looks when there is whitespace immediately following @[source] + -- or @[evaluate]. + lastLines :: Show v => Sequence.Seq (Term v a) -> [Maybe UnbreakCase] + lastLines tms = (flip fmap) (toList tms) $ \case + DD.DocBlob txt -> unbreakCase txt + DD.DocLink _ -> Nothing + DD.DocSource _ -> Nothing + DD.DocSignature _ -> Nothing + DD.DocEvaluate _ -> Nothing + Term.Var' _ -> Nothing -- @[include] + e@_ -> error ("unexpected doc element: " ++ show e) + -- Work out whether the last line of this blob is indented (or maybe + -- terminated by a newline.) + unbreakCase :: Text -> Maybe UnbreakCase + unbreakCase txt = + let (startAndNewline, afterNewline) = Text.breakOnEnd "\n" txt + in if Text.null startAndNewline + then Nothing + else if Text.null afterNewline + then Just LineEnds + else if Char.isSpace (Text.head afterNewline) + then Just StartsIndented + else Just StartsUnindented + -- A list whose entries match those of tms. Describes how the current + -- line started (the line including the start of this entry) - or LineEnds + -- if this entry is starting a line itself. + -- Calculated as the UnbreakCase of the previous entry that included a newline. + -- Really there's a function of type (a -> Bool) -> a -> [a] -> [a] in here + -- fighting to break free - overwriting elements that are 'shadowed' by + -- a preceding element for which the predicate is true, with a copy of + -- that element. + lineStarteds :: Show v => Sequence.Seq (Term v a) -> [UnbreakCase] + lineStarteds tms = tr $ quenchRuns LineEnds StartsUnindented $ xs'' where + tr = const id $ + trace $ "lineStarteds: xs = " ++ (show xs) ++ ", xss = " + ++ (show xss) ++ ", xs' = " ++ (show xs') ++ ", xs'' = " + ++ (show xs'') ++ "\n\n" + -- Make sure there's a Just at the start of the list so we always find + -- one when searching back. + -- Example: xs = [J1,N2,J3] + xs :: [Maybe UnbreakCase] + xs = Just LineEnds : (lastLines tms) + -- Example: xss = [[J1],[J1,N2],[J1,N2,J3]] + xss :: [[Maybe UnbreakCase]] + xss = drop 1 $ List.inits xs + -- Example: after each step of the map... + -- [[J1],[N2,J1],[J3,N2,J1]] -- after reverse + -- [Just J1, Just J1, Just J3] -- after find + -- ... + -- result = [1,1,3] + xs' = + map (Maybe.fromJust . Maybe.fromJust . (List.find isJust) . reverse) xss + xs'' = List.Extra.dropEnd 1 xs' + -- For each element, can it be a line-continuation of a preceding blob? + continuesLine :: Sequence.Seq (Term v a) -> [Bool] + continuesLine tms = (flip fmap) (toList tms) $ \case + DD.DocBlob _ -> False -- value doesn't matter - you don't get adjacent blobs + DD.DocLink _ -> True + DD.DocSource _ -> False + DD.DocSignature _ -> False + DD.DocEvaluate _ -> False + Term.Var' _ -> False -- @[include] + _ -> error ("unexpected doc element" ++ show tm) + -- A list whose entries match those of tms. Can the subsequent entry by a + -- line continuation of this one? + followingLines tms = drop 1 ((continuesLine tms) ++ [False]) + mapExceptFirst :: (a -> b) -> (a -> b) -> [a] -> [b] + mapExceptFirst fRest fFirst = \case + [] -> [] + x : rest -> (fFirst x) : (map fRest rest) + mapExceptLast fRest fLast = reverse . (mapExceptFirst fRest fLast) . reverse + tracing :: Show a => [Char] -> a -> a + tracing when x = + (const id $ trace ("at " ++ when ++ ": " ++ (show x) ++ "\n")) x + blob aa ac at txt = + Term.app aa (Term.constructor ac DD.docRef DD.docBlobId) (Term.text at txt) + join aa ac as segs = + Term.app aa (Term.constructor ac DD.docRef DD.docJoinId) (Term.seq' as segs) + mapBlob :: Ord v => (Text -> Text) -> Term v a -> Term v a + -- this pattern is just `DD.DocBlob txt` but exploded to capture the annotations as well + mapBlob f (aa@(Term.App' ac@(Term.Constructor' DD.DocRef DD.DocBlobId) at@(Term.Text' txt))) + = blob (ABT.annotation aa) (ABT.annotation ac) (ABT.annotation at) (f txt) + mapBlob _ t = t + +delayQuote :: Var v => TermP v +delayQuote = P.label "quote" $ do + start <- reserved "'" + e <- termLeaf + pure $ DD.delayTerm (ann start <> ann e) e + +bang :: Var v => TermP v +bang = P.label "bang" $ do + start <- reserved "!" + e <- termLeaf + pure $ DD.forceTerm (ann start <> ann e) (ann start) e + +var :: Var v => L.Token v -> Term v Ann +var t = Term.var (ann t) (L.payload t) + +seqOp :: Ord v => P v Pattern.SeqOp +seqOp = + (Pattern.Snoc <$ matchToken (L.SymbolyId ":+" Nothing)) + <|> (Pattern.Cons <$ matchToken (L.SymbolyId "+:" Nothing)) + <|> (Pattern.Concat <$ matchToken (L.SymbolyId "++" Nothing)) + +term4 :: Var v => TermP v +term4 = f <$> some termLeaf + where + f (func:args) = Term.apps func ((\a -> (ann func <> ann a, a)) <$> args) + f [] = error "'some' shouldn't produce an empty list" + +-- e.g. term4 + term4 - term4 +-- or term4 || term4 && term4 +infixAppOrBooleanOp :: Var v => TermP v +infixAppOrBooleanOp = chainl1 term4 (or <|> and <|> infixApp) + where or = orf <$> label "or" (reserved "||") + orf op lhs rhs = Term.or (ann op <> ann rhs) lhs rhs + and = andf <$> label "and" (reserved "&&") + andf op lhs rhs = Term.and (ann op <> ann rhs) lhs rhs + infixApp = infixAppf <$> label "infixApp" (hashQualifiedInfixTerm <* optional semi) + infixAppf op lhs rhs = Term.apps op [(ann lhs, lhs), (ann rhs, rhs)] + +typedecl :: Var v => P v (L.Token v, Type v Ann) +typedecl = + (,) <$> P.try (prefixDefinitionName <* reserved ":") + <*> TypeParser.valueType + <* semi + +verifyRelativeVarName :: Var v => P v (L.Token v) -> P v (L.Token v) +verifyRelativeVarName p = do + v <- p + verifyRelativeName' (Name.fromVar <$> v) + pure v + +verifyRelativeName :: Ord v => P v (L.Token Name) -> P v (L.Token Name) +verifyRelativeName name = do + name <- name + verifyRelativeName' name + pure name + +verifyRelativeName' :: Ord v => L.Token Name -> P v () +verifyRelativeName' name = do + let txt = Name.toText . L.payload $ name + when (Text.isPrefixOf "." txt && txt /= ".") $ + failCommitted (DisallowedAbsoluteName name) + +binding :: forall v. Var v => P v ((Ann, v), Term v Ann) +binding = label "binding" $ do + typ <- optional typedecl + -- a ++ b = ... OR + -- foo `mappend` bar = ... + let infixLhs = do + (arg1, op) <- P.try $ + (,) <$> prefixDefinitionName <*> infixDefinitionName + arg2 <- prefixDefinitionName + pure (ann arg1, op, [arg1, arg2]) + let prefixLhs = do + v <- prefixDefinitionName + vs <- many prefixDefinitionName + pure (ann v, v, vs) + let + lhs :: P v (Ann, L.Token v, [L.Token v]) + lhs = infixLhs <|> prefixLhs + case typ of + Nothing -> do + -- we haven't seen a type annotation, so lookahead to '=' before commit + (loc, name, args) <- P.try (lhs <* P.lookAhead (openBlockWith "=")) + body <- block "=" + verifyRelativeName' (fmap Name.fromVar name) + pure $ mkBinding loc (L.payload name) args body + Just (nameT, typ) -> do + (_, name, args) <- lhs + verifyRelativeName' (fmap Name.fromVar name) + when (L.payload name /= L.payload nameT) $ + customFailure $ SignatureNeedsAccompanyingBody nameT + body <- block "=" + pure $ fmap (\e -> Term.ann (ann nameT <> ann e) e typ) + (mkBinding (ann nameT) (L.payload name) args body) + where + mkBinding loc f [] body = ((loc, f), body) + mkBinding loc f args body = + ((loc, f), Term.lam' (loc <> ann body) (L.payload <$> args) body) + + +customFailure :: P.MonadParsec e s m => e -> m a +customFailure = P.customFailure + +block :: forall v. Var v => String -> TermP v +block s = block' False s (openBlockWith s) closeBlock + +-- example: use Foo.bar.Baz + ++ x +-- + ++ and x are called the "suffixes" of the `use` statement, and +-- `Foo.bar.Baz` is called the prefix. A `use` statement has the effect +-- of allowing you to reference identifiers of the form . +-- using just . +-- +-- `use foo` by itself is equivalent to `use foo bar baz ...` for all +-- names in the environment prefixed by `foo` +-- +-- todo: doesn't support use Foo.bar ++#abc, which lets you use `++` unqualified to refer to `Foo.bar.++#abc` +importp :: Ord v => P v [(Name, Name)] +importp = do + kw <- reserved "use" + -- we allow symbolyId here and parse the suffix optionaly, so we can generate + -- a nicer error message if the suffixes are empty + prefix <- optional + $ fmap Right (importWordyId <|> importDotId) -- use . Nat + <|> fmap Left importSymbolyId + suffixes <- optional (some (importWordyId <|> importSymbolyId)) + case (prefix, suffixes) of + (Nothing, _) -> P.customFailure $ UseEmpty kw + (Just prefix@(Left _), _) -> P.customFailure $ UseInvalidPrefixSuffix prefix suffixes + (Just (Right prefix), Nothing) -> do -- `wildcard import` + names <- asks names + pure $ Names.expandWildcardImport (L.payload prefix) (Names.currentNames names) + (Just (Right prefix), Just suffixes) -> pure $ do + suffix <- L.payload <$> suffixes + pure (suffix, Name.joinDot (L.payload prefix) suffix) + +--module Monoid where +-- -- we replace all the binding names with Monoid.op, and +-- -- if `op` is free in the body of any binding, we replace it with `Monoid.op` +-- op : Monoid a -> (a -> a -> a) +-- op m = case m of Monoid + +data BlockElement v + = Binding ((Ann, v), Term v Ann) + | Action (Term v Ann) + | Namespace String [BlockElement v] + +namespaceBlock :: Var v => P v (BlockElement v) +namespaceBlock = do + _ <- reserved "namespace" + -- need a version of verifyRelativeName that takes a `Token Name` + name <- verifyRelativeName importWordyId + let statement = (Binding <$> binding) <|> namespaceBlock + _ <- openBlockWith "where" + elems <- sepBy semi statement + _ <- closeBlock + pure $ Namespace (Name.toString $ L.payload name) elems + +toBindings :: forall v . Var v => [BlockElement v] -> [((Ann,v), Term v Ann)] +toBindings b = let + expand (Binding ((a, v), e)) = [((a, Just v), e)] + expand (Action e) = [((ann e, Nothing), e)] + expand (Namespace name bs) = scope name $ expand =<< bs + v `orBlank` i = fromMaybe (Var.nameds $ "_" ++ show i) v + finishBindings bs = + [((a, v `orBlank` i), e) | (((a,v), e), i) <- bs `zip` [(1::Int)..]] + + scope :: String -> [((Ann, Maybe v), Term v Ann)] + -> [((Ann, Maybe v), Term v Ann)] + scope name bs = let + vs :: [Maybe v] + vs = snd . fst <$> bs + prefix :: v -> v + prefix v = Var.named (Text.pack name `mappend` "." `mappend` Var.name v) + vs' :: [Maybe v] + vs' = fmap prefix <$> vs + substs = [ (v, Term.var () v') | (Just v, Just v') <- vs `zip` vs' ] + sub = ABT.substsInheritAnnotation substs + in [ ((a, v'), sub e) | (((a,_),e), v') <- bs `zip` vs' ] + in finishBindings (expand =<< b) + +-- subst +-- use Foo.Bar + blah +-- use Bar.Baz zonk zazzle +imports :: Var v => P v (Names, [(v,v)]) +imports = do + let sem = P.try (semi <* P.lookAhead (reserved "use")) + imported <- mconcat . reverse <$> sepBy sem importp + ns' <- Names.importing imported <$> asks names + pure (ns', [(Name.toVar suffix, Name.toVar full) | (suffix,full) <- imported ]) + +-- A key feature of imports is we want to be able to say: +-- `use foo.bar Baz qux` without having to specify whether `Baz` or `qux` are +-- terms or types. +substImports :: Var v => Names -> [(v,v)] -> Term v Ann -> Term v Ann +substImports ns imports = + ABT.substsInheritAnnotation [ (suffix, Term.var () full) + | (suffix,full) <- imports ] . -- no guard here, as `full` could be bound + -- not in Names, but in a later term binding + Term.substTypeVars [ (suffix, Type.var () full) + | (suffix, full) <- imports, Names.hasTypeNamed (Name.fromVar full) ns ] + +block' + :: forall v b + . Var v + => IsTop + -> String + -> P v (L.Token ()) + -> P v b + -> TermP v +block' isTop s openBlock closeBlock = do + open <- openBlock + (names, imports) <- imports + _ <- optional semi + statements <- local (\e -> e { names = names } ) $ sepBy semi statement + _ <- closeBlock + substImports names imports <$> go open statements + where + statement = namespaceBlock <|> + asum [ Binding <$> binding, Action <$> blockTerm ] + go :: L.Token () -> [BlockElement v] -> P v (Term v Ann) + go open bs + = let + startAnnotation = (fst . fst . head $ toBindings bs) + endAnnotation = (fst . fst . last $ toBindings bs) + finish tm = case Components.minimize' tm of + Left dups -> customFailure $ DuplicateTermNames (toList dups) + Right tm -> pure tm + in + case reverse bs of + Namespace _v _ : _ -> finish $ Term.letRec + isTop + (startAnnotation <> endAnnotation) + (toBindings bs) + (Term.var endAnnotation + (positionalVar endAnnotation Var.missingResult) + ) + Binding ((a, _v), _) : _ -> finish $ Term.letRec + isTop + (startAnnotation <> endAnnotation) + (toBindings bs) + (Term.var a (positionalVar endAnnotation Var.missingResult)) + Action e : bs -> finish $ Term.letRec + isTop + (startAnnotation <> ann e) + (toBindings $ reverse bs) + e + [] -> customFailure $ EmptyBlock (const s <$> open) + +number :: Var v => TermP v +number = number' (tok Term.int) (tok Term.nat) (tok Term.float) + +number' + :: Ord v + => (L.Token Int64 -> a) + -> (L.Token Word64 -> a) + -> (L.Token Double -> a) + -> P v a +number' i u f = fmap go numeric + where + go num@(L.payload -> p) | any (\c -> c == '.' || c == 'e') p && take 1 p == "+" = f (read . drop 1 <$> num) + | any (\c -> c == '.' || c == 'e') p = f (read <$> num) + | take 1 p == "+" = i (read . drop 1 <$> num) + | take 1 p == "-" = i (read <$> num) + | otherwise = u (read <$> num) + +tupleOrParenthesizedTerm :: Var v => TermP v +tupleOrParenthesizedTerm = label "tuple" $ tupleOrParenthesized term DD.unitTerm pair + where + pair t1 t2 = + Term.app (ann t1 <> ann t2) + (Term.app (ann t1) + (Term.constructor (ann t1 <> ann t2) DD.pairRef 0) + t1) + t2 diff --git a/parser-typechecker/src/Unison/TermPrinter.hs b/parser-typechecker/src/Unison/TermPrinter.hs new file mode 100644 index 0000000000..35ed53ea8c --- /dev/null +++ b/parser-typechecker/src/Unison/TermPrinter.hs @@ -0,0 +1,1029 @@ +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.TermPrinter where + +import Unison.Prelude + +import Data.List +import qualified Data.Map as Map +import qualified Data.Set as Set +import Data.Text ( unpack ) +import qualified Data.Text as Text +import qualified Text.Show.Unicode as U +import Data.Vector ( ) +import Unison.ABT ( pattern AbsN', reannotateUp, annotation ) +import qualified Unison.ABT as ABT +import qualified Unison.Blank as Blank +import qualified Unison.HashQualified as HQ +import Unison.Lexer ( symbolyId, showEscapeChar ) +import Unison.Name ( Name ) +import qualified Unison.Name as Name +import qualified Unison.NameSegment as NameSegment +import Unison.NamePrinter ( styleHashQualified'' ) +import qualified Unison.Pattern as Pattern +import Unison.Pattern ( Pattern ) +import Unison.Reference ( Reference ) +import qualified Unison.Referent as Referent +import qualified Unison.Util.SyntaxText as S +import Unison.Util.SyntaxText ( SyntaxText ) +import Unison.Term +import Unison.Type ( Type ) +import qualified Unison.Type as Type +import qualified Unison.TypePrinter as TypePrinter +import Unison.Var ( Var ) +import qualified Unison.Var as Var +import Unison.Util.Monoid ( intercalateMap ) +import qualified Unison.Util.Pretty as PP +import Unison.Util.Pretty ( Pretty, ColorText ) +import Unison.PrettyPrintEnv ( PrettyPrintEnv, Suffix, Prefix, Imports, elideFQN ) +import qualified Unison.PrettyPrintEnv as PrettyPrintEnv +import qualified Unison.Builtin.Decls as DD +import Unison.Builtin.Decls (pattern TuplePattern, pattern TupleTerm') +import qualified Unison.ConstructorType as CT + +pretty :: Var v => PrettyPrintEnv -> Term v a -> Pretty ColorText +pretty env tm = PP.syntaxToColor $ pretty0 env (ac (-1) Normal Map.empty MaybeDoc) (printAnnotate env tm) + +pretty' :: Var v => Maybe Int -> PrettyPrintEnv -> Term v a -> ColorText +pretty' (Just width) n t = PP.render width $ PP.syntaxToColor $ pretty0 n (ac (-1) Normal Map.empty MaybeDoc) (printAnnotate n t) +pretty' Nothing n t = PP.renderUnbroken $ PP.syntaxToColor $ pretty0 n (ac (-1) Normal Map.empty MaybeDoc) (printAnnotate n t) + +-- Information about the context in which a term appears, which affects how the +-- term should be rendered. +data AmbientContext = AmbientContext + { + -- The operator precedence of the enclosing context (a number from 0 to 11, + -- or -1 to render without outer parentheses unconditionally). + -- Function application has precedence 10. + precedence :: Int + , blockContext :: BlockContext + , infixContext :: InfixContext + , imports :: Imports + , docContext :: DocLiteralContext + } + +-- Description of the position of this ABT node, when viewed in the +-- surface syntax. +data BlockContext + -- This ABT node is at the top level of a TermParser.block. + = Block + | Normal + deriving (Eq) + +data InfixContext + -- This ABT node is an infix operator being used in infix position. + = Infix + | NonInfix + deriving (Eq) + +data DocLiteralContext + -- We won't try and render this ABT node or anything under it as a [: @Doc literal :] + = NoDoc + -- We'll keep checking as we recurse down + | MaybeDoc + deriving (Eq) + +{- Explanation of precedence handling + + We illustrate precedence rules as follows. + + >=10 + 10f 10x + + This example shows that a function application f x is enclosed in + parentheses whenever the ambient precedence around it is >= 10, and that + when printing its two components, an ambient precedence of 10 is used in + both places. + + The pretty-printer uses the following rules for printing terms. + + >=12 + let x = (-1)y + 1z + + >=11 + ! 11x + ' 11x + 11x ? + + >=10 + 10f 10x 10y ... + + >=3 + x -> 2y + 3x + 3y + ... 3z + + >=2 + if 0a then 0b else 0c + handle 0b with 0h + case 2x of + a | 2g -> 0b + + >=0 + 10a : 0Int + + + And the following for patterns. + + >=11 + x@11p + + >=10 + Con 10p 10q ... + + -- never any external parens added around the following + { p } + { Eff 10p 10q ... -> 0k } + +-} + +pretty0 + :: Var v + => PrettyPrintEnv + -> AmbientContext + -> Term3 v PrintAnnotation + -> Pretty SyntaxText +pretty0 + n + a@AmbientContext + { precedence = p + , blockContext = bc + , infixContext = ic + , imports = im + , docContext = doc + } + term + -- Note: the set of places in this function that call calcImports has to be kept in sync + -- with the definition of immediateChildBlockTerms, otherwise `use` statements get + -- inserted at the wrong scope. + = specialCases term $ \case + Var' v -> parenIfInfix name ic $ styleHashQualified'' (fmt S.Var) name + -- OK since all term vars are user specified, any freshening was just added during typechecking + where name = elideFQN im $ HQ.unsafeFromVar (Var.reset v) + Ref' r -> parenIfInfix name ic $ styleHashQualified'' (fmt $ S.Reference r) name + where name = elideFQN im $ PrettyPrintEnv.termName n (Referent.Ref r) + TermLink' r -> parenIfInfix name ic $ + fmt S.LinkKeyword "termLink " <> styleHashQualified'' (fmt $ S.Referent r) name + where name = elideFQN im $ PrettyPrintEnv.termName n r + TypeLink' r -> parenIfInfix name ic $ + fmt S.LinkKeyword "typeLink " <> styleHashQualified'' (fmt $ S.Reference r) name + where name = elideFQN im $ PrettyPrintEnv.typeName n r + Ann' tm t -> + paren (p >= 0) + $ pretty0 n (ac 10 Normal im doc) tm + <> PP.hang (fmt S.TypeAscriptionColon " :" ) (TypePrinter.pretty0 n im 0 t) + Int' i -> fmt S.NumericLiteral $ (if i >= 0 then l "+" else mempty) <> l (show i) + Nat' u -> fmt S.NumericLiteral $ l $ show u + Float' f -> fmt S.NumericLiteral $ l $ show f + -- TODO How to handle Infinity, -Infinity and NaN? Parser cannot parse + -- them. Haskell doesn't have literals for them either. Is this + -- function only required to operate on terms produced by the parser? + -- In which case the code is fine as it stands. If it can somehow run + -- on values produced by execution (or, one day, on terms produced by + -- metaprograms), then it needs to be able to print them (and then the + -- parser ought to be able to parse them, to maintain symmetry.) + Boolean' b -> fmt S.BooleanLiteral $ if b then l "true" else l "false" + Text' s -> fmt S.TextLiteral $ l $ U.ushow s + Char' c -> fmt S.CharLiteral $ l $ case showEscapeChar c of + Just c -> "?\\" ++ [c] + Nothing -> '?': [c] + Blank' id -> fmt S.Blank $ l "_" <> l (fromMaybe "" (Blank.nameb id)) + Constructor' ref i -> styleHashQualified'' (fmt S.Constructor) $ + elideFQN im $ PrettyPrintEnv.termName n (Referent.Con ref i CT.Data) + Request' ref i -> styleHashQualified'' (fmt S.Request) $ + elideFQN im $ PrettyPrintEnv.termName n (Referent.Con ref i CT.Effect) + Handle' h body -> paren (p >= 2) $ + if PP.isMultiLine pb || PP.isMultiLine ph then PP.lines [ + (fmt S.ControlKeyword "handle") `PP.hang` pb, + (fmt S.ControlKeyword "with") `PP.hang` ph + ] + else PP.spaced [ + (fmt S.ControlKeyword "handle") `PP.hang` pb + <> PP.softbreak + <> (fmt S.ControlKeyword "with") `PP.hang` ph + ] + where + pb = pblock body + ph = pblock h + pblock tm = let (im', uses) = calcImports im tm + in uses $ [pretty0 n (ac 0 Block im' doc) tm] + App' x (Constructor' DD.UnitRef 0) -> + paren (p >= 11) $ (fmt S.DelayForceChar $ l "!") <> pretty0 n (ac 11 Normal im doc) x + LamNamed' v x | (Var.name v) == "()" -> + paren (p >= 11) $ (fmt S.DelayForceChar $ l "'") <> pretty0 n (ac 11 Normal im doc) x + Sequence' xs -> PP.group $ + (fmt S.DelimiterChar $ l "[") <> optSpace + <> intercalateMap ((fmt S.DelimiterChar $ l ",") <> PP.softbreak <> optSpace <> optSpace) + (pretty0 n (ac 0 Normal im doc)) + xs + <> optSpace <> (fmt S.DelimiterChar $ l "]") + where optSpace = PP.orElse "" " " + If' cond t f -> paren (p >= 2) $ + if PP.isMultiLine pt || PP.isMultiLine pf then PP.lines [ + (fmt S.ControlKeyword "if ") <> pcond <> (fmt S.ControlKeyword " then") `PP.hang` pt, + (fmt S.ControlKeyword "else") `PP.hang` pf + ] + else PP.spaced [ + ((fmt S.ControlKeyword "if") `PP.hang` pcond) <> ((fmt S.ControlKeyword " then") `PP.hang` pt), + (fmt S.ControlKeyword "else") `PP.hang` pf + ] + where + pcond = pretty0 n (ac 2 Block im doc) cond + pt = branch t + pf = branch f + branch tm = let (im', uses) = calcImports im tm + in uses $ [pretty0 n (ac 0 Block im' doc) tm] + And' x y -> + paren (p >= 10) $ PP.spaced [ + pretty0 n (ac 10 Normal im doc) x, + fmt S.ControlKeyword "&&", + pretty0 n (ac 10 Normal im doc) y + ] + Or' x y -> + paren (p >= 10) $ PP.spaced [ + pretty0 n (ac 10 Normal im doc) x, + fmt S.ControlKeyword "||", + pretty0 n (ac 10 Normal im doc) y + ] + LetBlock bs e -> printLet bc bs e im' uses + Match' scrutinee branches -> paren (p >= 2) $ + if PP.isMultiLine ps then PP.lines [ + (fmt S.ControlKeyword "match ") `PP.hang` ps, + (fmt S.ControlKeyword " with") `PP.hang` pbs + ] + else ((fmt S.ControlKeyword "match ") <> ps <> (fmt S.ControlKeyword " with")) `PP.hang` pbs + where ps = pretty0 n (ac 2 Normal im doc) scrutinee + pbs = printCase n im doc branches + + t -> l "error: " <> l (show t) + where + specialCases term go = case (term, binaryOpsPred) of + (DD.Doc, _) | doc == MaybeDoc -> + if isDocLiteral term + then prettyDoc n im term + else pretty0 n (a {docContext = NoDoc}) term + (TupleTerm' [x], _) -> let + pair = parenIfInfix name ic $ styleHashQualified'' (fmt S.Constructor) name + where name = elideFQN im $ PrettyPrintEnv.termName n (DD.pairCtorRef) in + paren (p >= 10) $ pair `PP.hang` + PP.spaced [pretty0 n (ac 10 Normal im doc) x, fmt S.Constructor "()" ] + (TupleTerm' xs, _) -> paren True $ commaList xs + BinaryAppsPred' apps lastArg -> paren (p >= 3) $ + binaryApps apps (pretty0 n (ac 3 Normal im doc) lastArg) + _ -> case (term, nonForcePred) of + AppsPred' f args -> + paren (p >= 10) $ pretty0 n (ac 10 Normal im doc) f `PP.hang` + PP.spacedMap (pretty0 n (ac 10 Normal im doc)) args + _ -> case (term, nonUnitArgPred) of + (LamsNamedMatch' [] branches, _) -> + paren (p >= 3) $ + PP.group (fmt S.ControlKeyword "cases") `PP.hang` printCase n im doc branches + LamsNamedPred' vs body -> + paren (p >= 3) $ + PP.group (varList vs <> fmt S.ControlKeyword " ->") `PP.hang` pretty0 n (ac 2 Block im doc) body + _ -> go term + + sepList = sepList' (pretty0 n (ac 0 Normal im doc)) + sepList' f sep xs = fold $ intersperse sep (map f xs) + varList = sepList' (PP.text . Var.name) PP.softbreak + commaList = sepList (fmt S.DelimiterChar (l ",") <> PP.softbreak) + + printLet :: Var v + => BlockContext + -> [(v, Term3 v PrintAnnotation)] + -> Term3 v PrintAnnotation + -> Imports + -> ([Pretty SyntaxText] -> Pretty SyntaxText) + -> Pretty SyntaxText + printLet sc bs e im' uses = + paren ((sc /= Block) && p >= 12) + $ letIntro + $ (uses [(PP.lines (map printBinding bs ++ + [PP.group $ pretty0 n (ac 0 Normal im' doc) e]))]) + where + printBinding (v, binding) = if isBlank $ Var.nameStr v + then pretty0 n (ac (-1) Normal im' doc) binding + else prettyBinding0 n (ac (-1) Normal im' doc) (HQ.unsafeFromVar v) binding + letIntro = case sc of + Block -> id + Normal -> \x -> (fmt S.ControlKeyword "let") `PP.hang` x + + -- This predicate controls which binary functions we render as infix + -- operators. At the moment the policy is just to render symbolic + -- operators as infix - not 'wordy' function names. So we produce + -- "x + y" and "foo x y" but not "x `foo` y". + binaryOpsPred :: Var v => Term3 v PrintAnnotation -> Bool + binaryOpsPred = \case + Ref' r | isSymbolic (PrettyPrintEnv.termName n (Referent.Ref r)) -> True + Var' v | isSymbolic (HQ.unsafeFromVar v) -> True + _ -> False + + nonForcePred :: Term3 v PrintAnnotation -> Bool + nonForcePred = \case + Constructor' DD.UnitRef 0 -> False + Constructor' DD.DocRef _ -> False + _ -> True + + nonUnitArgPred :: Var v => v -> Bool + nonUnitArgPred v = (Var.name v) /= "()" + + -- Render a binary infix operator sequence, like [(a2, f2), (a1, f1)], + -- meaning (a1 `f1` a2) `f2` (a3 rendered by the caller), producing + -- "a1 `f1` a2 `f2`". Except the operators are all symbolic, so we won't + -- produce any backticks. We build the result out from the right, + -- starting at `f2`. + binaryApps + :: Var v => [(Term3 v PrintAnnotation, Term3 v PrintAnnotation)] + -> Pretty SyntaxText + -> Pretty SyntaxText + binaryApps xs last = unbroken `PP.orElse` broken + -- todo: use `PP.column2` in the case where we need to break + where + unbroken = PP.spaced (ps ++ [last]) + broken = PP.column2 (psCols $ [""] ++ ps ++ [last]) + psCols ps = case take 2 ps of + [x,y] -> (x,y) : psCols (drop 2 ps) + [] -> [] + _ -> error "??" + ps = join $ [r a f | (a, f) <- reverse xs ] + r a f = [pretty0 n (ac 3 Normal im doc) a, + pretty0 n (AmbientContext 10 Normal Infix im doc) f] + + (im', uses) = calcImports im term + +prettyPattern + :: forall v loc . Var v + => PrettyPrintEnv + -> AmbientContext + -> Int + -> [v] + -> Pattern loc + -> (Pretty SyntaxText, [v]) +-- vs is the list of pattern variables used by the pattern, plus possibly a +-- tail of variables it doesn't use. This tail is the second component of +-- the return value. +prettyPattern n c@(AmbientContext { imports = im }) p vs patt = case patt of + Pattern.Char _ c -> (fmt S.CharLiteral $ l $ case showEscapeChar c of + Just c -> "?\\" ++ [c] + Nothing -> '?': [c], vs) + Pattern.Unbound _ -> (fmt S.DelimiterChar $ l "_", vs) + Pattern.Var _ -> let (v : tail_vs) = vs in (fmt S.Var $ l $ Var.nameStr v, tail_vs) + Pattern.Boolean _ b -> (fmt S.BooleanLiteral $ if b then l "true" else l "false", vs) + Pattern.Int _ i -> (fmt S.NumericLiteral $ (if i >= 0 then l "+" else mempty) <> (l $ show i), vs) + Pattern.Nat _ u -> (fmt S.NumericLiteral $ l $ show u, vs) + Pattern.Float _ f -> (fmt S.NumericLiteral $ l $ show f, vs) + Pattern.Text _ t -> (fmt S.TextLiteral $ l $ show t, vs) + TuplePattern pats | length pats /= 1 -> + let (pats_printed, tail_vs) = patterns (-1) vs pats + in (PP.parenthesizeCommas pats_printed, tail_vs) + Pattern.Constructor _ ref i [] -> + (styleHashQualified'' (fmt S.Constructor) $ elideFQN im (PrettyPrintEnv.patternName n ref i), vs) + Pattern.Constructor _ ref i pats -> + let (pats_printed, tail_vs) = patternsSep 10 PP.softbreak vs pats + in ( paren (p >= 10) + $ styleHashQualified'' (fmt S.Constructor) (elideFQN im (PrettyPrintEnv.patternName n ref i)) + `PP.hang` pats_printed + , tail_vs) + Pattern.As _ pat -> + let (v : tail_vs) = vs + (printed, eventual_tail) = prettyPattern n c 11 tail_vs pat + in (paren (p >= 11) $ ((fmt S.Var $ l $ Var.nameStr v) <> (fmt S.DelimiterChar $ l "@") <> printed), eventual_tail) + Pattern.EffectPure _ pat -> + let (printed, eventual_tail) = prettyPattern n c (-1) vs pat + in (PP.sep " " [fmt S.DelimiterChar "{", printed, fmt S.DelimiterChar "}"], eventual_tail) + Pattern.EffectBind _ ref i pats k_pat -> + let (pats_printed , tail_vs ) = patternsSep 10 PP.softbreak vs pats + (k_pat_printed, eventual_tail) = prettyPattern n c 0 tail_vs k_pat + in ((fmt S.DelimiterChar "{" ) <> + (PP.sep " " . PP.nonEmpty $ [ + styleHashQualified'' (fmt S.Request) $ elideFQN im (PrettyPrintEnv.patternName n ref i), + pats_printed, + fmt S.ControlKeyword "->", + k_pat_printed]) <> + (fmt S.DelimiterChar "}") + , eventual_tail) + Pattern.SequenceLiteral _ pats -> + let (pats_printed, tail_vs) = patternsSep (-1) (fmt S.DelimiterChar ", ") vs pats + in ((fmt S.DelimiterChar "[") <> pats_printed <> (fmt S.DelimiterChar "]"), tail_vs) + Pattern.SequenceOp _ l op r -> + let (pl, lvs) = prettyPattern n c p vs l + (pr, rvs) = prettyPattern n c (p + 1) lvs r + f i s = (paren (p >= i) (pl <> " " <> (fmt (S.Op op) s) <> " " <> pr), rvs) + in case op of + Pattern.Cons -> f 9 "+:" + Pattern.Snoc -> f 9 ":+" + Pattern.Concat -> f 9 "++" + where + l :: IsString s => String -> s + l = fromString + patterns p vs (pat : pats) = + let (printed , tail_vs ) = + prettyPattern n c p vs pat + (rest_printed, eventual_tail) = patterns p tail_vs pats + in (printed : rest_printed, eventual_tail) + patterns _ vs [] = ([], vs) + patternsSep p sep vs pats = case patterns p vs pats of + (printed, tail_vs) -> (PP.sep sep printed, tail_vs) + +printCase + :: Var v + => PrettyPrintEnv + -> Imports + -> DocLiteralContext + -> [MatchCase () (Term3 v PrintAnnotation)] + -> Pretty SyntaxText +printCase env im doc ms = PP.lines $ map each gridArrowsAligned where + each (lhs, arrow, body) = PP.group $ (lhs <> arrow) `PP.hang` body + grid = go <$> ms + gridArrowsAligned = tidy <$> zip (PP.align' (f <$> grid)) grid where + f (a, b, _) = (a, Just b) + tidy ((a', b'), (_, _, c)) = (a', b', c) + go (MatchCase pat guard (AbsN' vs body)) = + (lhs, arrow, (uses [pretty0 env (ac 0 Block im' doc) body])) + where + lhs = PP.group (fst (prettyPattern env (ac 0 Block im doc) (-1) vs pat)) + <> printGuard guard + arrow = fmt S.ControlKeyword "->" + printGuard (Just g0) = let + -- strip off any Abs-chain around the guard, guard variables are rendered + -- like any other variable, ex: case Foo x y | x < y -> ... + g = case g0 of + AbsN' _ g' -> g' + _ -> g0 + in PP.group $ PP.spaced [(fmt S.DelimiterChar " |"), pretty0 env (ac 2 Normal im doc) g] + printGuard Nothing = mempty + (im', uses) = calcImports im body + go _ = (l "error", mempty, mempty) + +{- Render a binding, producing output of the form + +foo : t -> u +foo a = ... + +The first line is only output if the term has a type annotation as the +outermost constructor. + +Binary functions with symbolic names are output infix, as follows: + +(+) : t -> t -> t +a + b = ... + +-} +prettyBinding + :: Var v + => PrettyPrintEnv + -> HQ.HashQualified + -> Term2 v at ap v a + -> Pretty SyntaxText +prettyBinding n = prettyBinding0 n $ ac (-1) Block Map.empty MaybeDoc + +prettyBinding' :: + Var v => Int -> PrettyPrintEnv -> HQ.HashQualified -> Term v a -> ColorText +prettyBinding' width n v t = PP.render width $ PP.syntaxToColor $ prettyBinding n v t + +prettyBinding0 + :: Var v + => PrettyPrintEnv + -> AmbientContext + -> HQ.HashQualified + -> Term2 v at ap v a + -> Pretty SyntaxText +prettyBinding0 env a@AmbientContext { imports = im, docContext = doc } v term = go + (symbolic && isBinary term) + term + where + go infix' = \case + Ann' tm tp -> PP.lines + [ PP.group + (renderName v <> PP.hang (fmt S.TypeAscriptionColon " :") + (TypePrinter.pretty0 env im (-1) tp) + ) + , PP.group (prettyBinding0 env a v tm) + ] + (printAnnotate env -> LamsNamedMatch' vs branches) -> + PP.group + $ PP.group (defnLhs v vs <> fmt S.BindingEquals " =" <> " " <> fmt S.ControlKeyword "cases") + `PP.hang` printCase env im doc branches + LamsNamedOrDelay' vs body -> + let (im', uses) = calcImports im body' + -- In the case where we're being called from inside `pretty0`, this + -- call to printAnnotate is unfortunately repeating work we've already + -- done. + body' = printAnnotate env body + in PP.group + $ PP.group (defnLhs v vs <> fmt S.BindingEquals " =") + `PP.hang` uses [pretty0 env (ac (-1) Block im' doc) body'] + t -> l "error: " <> l (show t) + where + defnLhs v vs + | infix' = case vs of + x : y : _ -> PP.sep + " " + [ fmt S.Var $ PP.text (Var.name x) + , styleHashQualified'' (fmt $ S.HashQualifier v) $ elideFQN im v + , fmt S.Var $ PP.text (Var.name y) + ] + _ -> l "error" + | null vs = renderName v + | otherwise = renderName v `PP.hang` args vs + args = PP.spacedMap $ fmt S.Var . PP.text . Var.name + renderName n = + let n' = elideFQN im n + in parenIfInfix n' NonInfix $ styleHashQualified'' (fmt $ S.HashQualifier n') n' + symbolic = isSymbolic v + isBinary = \case + Ann' tm _ -> isBinary tm + LamsNamedMatch' vs _ -> length vs == 1 + LamsNamedOrDelay' vs _ -> length vs == 2 + _ -> False -- unhittable + +isDocLiteral :: Term3 v PrintAnnotation -> Bool +isDocLiteral term = case term of + DD.DocJoin segs -> all isDocLiteral segs + DD.DocBlob _ -> True + DD.DocLink (DD.LinkTerm (TermLink' _)) -> True + DD.DocLink (DD.LinkType (TypeLink' _)) -> True + DD.DocSource (DD.LinkTerm (TermLink' _)) -> True + DD.DocSource (DD.LinkType (TypeLink' _)) -> True + DD.DocSignature (TermLink' _) -> True + DD.DocEvaluate (TermLink' _) -> True + Ref' _ -> True -- @[include] + _ -> False + +-- Similar to DisplayValues.displayDoc, but does not follow and expand references. +prettyDoc :: Var v => PrettyPrintEnv -> Imports -> Term3 v a -> Pretty SyntaxText +prettyDoc n im term = mconcat [ fmt S.DocDelimiter $ l "[: " + , go term + , spaceUnlessBroken + , fmt S.DocDelimiter $ l ":]"] + where + go (DD.DocJoin segs) = foldMap go segs + go (DD.DocBlob txt) = PP.paragraphyText (escaped txt) + go (DD.DocLink (DD.LinkTerm (TermLink' r))) = + (fmt S.DocDelimiter $ l "@") <> ((fmt $ S.Referent r) $ fmtTerm r) + go (DD.DocLink (DD.LinkType (TypeLink' r))) = + (fmt S.DocDelimiter $ l "@") <> ((fmt $ S.Reference r) $ fmtType r) + go (DD.DocSource (DD.LinkTerm (TermLink' r))) = + atKeyword "source" <> fmtTerm r + go (DD.DocSource (DD.LinkType (TypeLink' r))) = + atKeyword "source" <> fmtType r + go (DD.DocSignature (TermLink' r)) = + atKeyword "signature" <> fmtTerm r + go (DD.DocEvaluate (TermLink' r)) = + atKeyword "evaluate" <> fmtTerm r + go (Ref' r) = atKeyword "include" <> fmtTerm (Referent.Ref r) + go _ = l $ "(invalid doc literal: " ++ show term ++ ")" + fmtName s = styleHashQualified'' (fmt $ S.HashQualifier s) $ elideFQN im s + fmtTerm r = fmtName $ PrettyPrintEnv.termName n r + fmtType r = fmtName $ PrettyPrintEnv.typeName n r + atKeyword w = + (fmt S.DocDelimiter $ l "@[") <> + (fmt S.DocKeyword $ l w) <> + (fmt S.DocDelimiter $ l "] ") + escaped = Text.replace "@" "\\@" . Text.replace ":]" "\\:]" + spaceUnlessBroken = PP.orElse " " "" + +paren :: Bool -> Pretty SyntaxText -> Pretty SyntaxText +paren True s = PP.group $ fmt S.Parenthesis "(" <> s <> fmt S.Parenthesis ")" +paren False s = PP.group s + +parenIfInfix + :: HQ.HashQualified -> InfixContext -> (Pretty SyntaxText -> Pretty SyntaxText) +parenIfInfix name ic = + if isSymbolic name && ic == NonInfix then paren True else id + +l :: IsString s => String -> Pretty s +l = fromString + +isSymbolic :: HQ.HashQualified -> Bool +isSymbolic (HQ.NameOnly name) = isSymbolic' name +isSymbolic (HQ.HashQualified name _) = isSymbolic' name +isSymbolic (HQ.HashOnly _) = False + +isSymbolic' :: Name -> Bool +isSymbolic' name = case symbolyId . Name.toString $ name of + Right _ -> True + _ -> False + +isBlank :: String -> Bool +isBlank ('_' : rest) | (isJust ((readMaybe rest) :: Maybe Int)) = True +isBlank _ = False + +ac :: Int -> BlockContext -> Imports -> DocLiteralContext -> AmbientContext +ac prec bc im doc = AmbientContext prec bc NonInfix im doc + +fmt :: S.Element -> Pretty S.SyntaxText -> Pretty S.SyntaxText +fmt = PP.withSyntax + +{- + # FQN elision + + The term pretty-printer inserts `use` statements in some circumstances, to + avoid the need for using fully-qualified names (FQNs) everywhere. The + following is an explanation and specification, as developed in issue #285. + + As an example, instead of + + foo p q r = + if p then Util.bar q else Util.bar r + + we actually output the following. + + foo p q r = + use Util bar + if p then bar q else bar r + + Here, the `use` statement `use Util bar` has been inserted at the start of + the block statement containing the `if`. Within that scope, `Util.bar` can + be referred to just with `bar`. We say `Util` is the prefix, and `bar` is + the suffix. + + When choosing where to place `use` statements, the pretty-printer tries to + - float them down, deeper into the syntax tree, to keep them visually close + to the use sites ('usages') of the names involved, but also tries to + - minimize the number of repetitions of `use` statements for the same names + by floating them up, towards the top of the syntax tree, so that one + `use` statement takes effect over more name usages. + + It avoids producing output like the following. + + foo p q r = + use My bar + if p then bar q else Your.bar r + + Here `My.bar` is imported with a `use` statement, but `Your.bar` is not. + We avoid this because it would be easy to misread `bar` as meaning + `Your.bar`. Instead both names are output fully qualified. + + This means that a `use` statement is only emitted for a name + when the suffix is unique, across all the names referenced in the scope of + the `use` statement. + + We don't emit a `use` statement for a name if it only occurs once within + the scope (unless it's an infix operator, since they look nicer without + a namespace qualifier.) + + The emitted code does not depend on Type-Driven Name Resolution (TDNR). + For example, we emit + foo = + use Nat + + 1 + 2 + even though TDNR means that `foo = 1 + 2` would have had the same + meaning. That avoids the reader having to run typechecker logic in their + head in order to know what functions are being called. + + Multi-level name qualification is allowed - like `Foo.Bar.baz`. The + pretty-printer tries to strip off as many sections of the prefix as + possible, without causing a clash with other names. If more sections + can be stripped off, further down the tree, then it does this too. + + ## Specification + + We output a `use` statement for prefix P and suffix S at a given scope if + - the scope is a block statement (so the `use` is syntactically valid) + - the number of usages of the thing referred to by P.S within the scope + - is > 1, or + - is 1, and S is an infix operator + - [uniqueness] there is no other Q with Q.S used in that scope + - there is no longer prefix PP (and suffix s, with PP.s == P.S) which + satisfies uniqueness + - [narrowness] there is no block statement further down inside this one + which contains all of the usages. + + Use statements in a block statement are sorted alphabetically by prefix. + Suffixes covered by a single use statement are sorted alphabetically. + Note that each `use` line cannot be line-broken. Ideally they would + fit the available space by splitting into multiple separate `use` lines. + + ## Algorithm + + Bubbling up from the leaves of the syntax tree, we calculate for each + node, a `Map Suffix (Map Prefix Int)` (the 'usages map'), where the `Int` + is the number of usages of Prefix.Suffix at/under that node. (Note that + a usage of `A.B.c` corresponds to two entries in the outer map.) See + `printAnnotate`. + + Once we have this decoration on all the terms, we start pretty-printing. + As we recurse back down through the tree, we keep a `Map Name Suffix` (the + 'imports map'), to record the effect of all the `use` statements we've added + in the nodes above. When outputting names, we check this map to work out + how to render them, using any suffix we find, or else falling back to the + FQN. At each block statement, each suffix in that term's usages map is a + candidate to be imported with a use statement, subject to the various + rules in the specification. + + # Debugging + + Start by enabling the tracing in elideFQN in PrettyPrintEnv.hs. + + There's also tracing in allInSubBlock to help when the narrowness check + is playing up. + + # Semantics of imports + + Here is some background on how imports work. + + `use XYZ blah` brings `XYZ.blah` into scope, bound to the name `blah`. More + generally, `use` is followed by a FQN prefix, then the local suffix. + Concatenate the FQN prefix with the local suffix, with a dot between them, + and you get the FQN, which is bound to the name equal to the local suffix. + + `use XYZ blah qux` is equivalent to the two statements (and this + generalizes for any N symbols): + use XYZ blah + use XYZ qux + + This syntax works the same even if XYZ or blah have dots in them, so: + `use Util.External My.Foo` brings `Util.External.My.Foo` into scope, bound + to the name `My.Foo`. + + That's it. No wildcard imports, imports that do renaming, etc. We can + consider adding some features like this later. +-} + +data PrintAnnotation = PrintAnnotation + { + -- For each suffix that appears in/under this term, the set of prefixes + -- used with that suffix, and how many times each occurs. + usages :: Map Suffix (Map Prefix Int) + } deriving (Show) + +instance Semigroup PrintAnnotation where + (PrintAnnotation { usages = a } ) <> (PrintAnnotation { usages = b } ) = + PrintAnnotation { usages = Map.unionWith f a b } where + f a' b' = Map.unionWith (+) a' b' + +instance Monoid PrintAnnotation where + mempty = PrintAnnotation { usages = Map.empty } + +suffixCounterTerm :: Var v => PrettyPrintEnv -> Term2 v at ap v a -> PrintAnnotation +suffixCounterTerm n = \case + Var' v -> countHQ $ HQ.unsafeFromVar v + Ref' r -> countHQ $ PrettyPrintEnv.termName n (Referent.Ref r) + Ref' r -> countHQ $ PrettyPrintEnv.termName n (Referent.Ref r) + Constructor' r _ | noImportRefs r -> mempty + Constructor' r i -> countHQ $ PrettyPrintEnv.termName n (Referent.Con r i CT.Data) + Request' r i -> countHQ $ PrettyPrintEnv.termName n (Referent.Con r i CT.Effect) + Ann' _ t -> countTypeUsages n t + Match' _ bs -> let pat (MatchCase p _ _) = p + in foldMap ((countPatternUsages n) . pat) bs + _ -> mempty + +suffixCounterType :: Var v => PrettyPrintEnv -> Type v a -> PrintAnnotation +suffixCounterType n = \case + Type.Var' v -> countHQ $ HQ.unsafeFromVar v + Type.Ref' r | noImportRefs r || r == Type.vectorRef -> mempty + Type.Ref' r -> countHQ $ PrettyPrintEnv.typeName n r + _ -> mempty + +printAnnotate :: (Var v, Ord v) => PrettyPrintEnv -> Term2 v at ap v a -> Term3 v PrintAnnotation +printAnnotate n tm = fmap snd (go (reannotateUp (suffixCounterTerm n) tm)) where + go :: Ord v => Term2 v at ap v b -> Term2 v () () v b + go = extraMap' id (const ()) (const ()) + +countTypeUsages :: (Var v, Ord v) => PrettyPrintEnv -> Type v a -> PrintAnnotation +countTypeUsages n t = snd $ annotation $ reannotateUp (suffixCounterType n) t + +countPatternUsages :: PrettyPrintEnv -> Pattern loc -> PrintAnnotation +countPatternUsages n p = Pattern.foldMap' f p where + f = \case + Pattern.Unbound _ -> mempty + Pattern.Var _ -> mempty + Pattern.Boolean _ _ -> mempty + Pattern.Int _ _ -> mempty + Pattern.Nat _ _ -> mempty + Pattern.Float _ _ -> mempty + Pattern.Text _ _ -> mempty + Pattern.Char _ _ -> mempty + Pattern.As _ _ -> mempty + Pattern.SequenceLiteral _ _ -> mempty + Pattern.SequenceOp _ _ _ _ -> mempty + Pattern.EffectPure _ _ -> mempty + Pattern.EffectBind _ r i _ _ -> countHQ $ PrettyPrintEnv.patternName n r i + Pattern.Constructor _ r i _ -> + if noImportRefs r then mempty + else countHQ $ PrettyPrintEnv.patternName n r i + +countHQ :: HQ.HashQualified -> PrintAnnotation +countHQ hq = fold $ fmap countName (HQ.toName $ hq) + +countName :: Name -> PrintAnnotation +countName n = let f = \(p, s) -> (s, Map.singleton p 1) + in PrintAnnotation { usages = Map.fromList $ map f $ splitName n} + +-- Generates all valid splits of a name into a prefix and suffix. +-- See examples in Unison.Test.TermPrinter +splitName :: Name -> [(Prefix, Suffix)] +splitName n = + let ns = NameSegment.toText <$> Name.segments n + in filter (not . Text.null . snd) $ inits ns `zip` map dotConcat (tails ns) + +joinName :: Prefix -> Suffix -> Name +joinName p s = Name.unsafeFromText $ dotConcat $ p ++ [s] + +dotConcat :: [Text] -> Text +dotConcat = Text.concat . (intersperse ".") + +-- This predicate is used to keep certain refs out of the FQN elision annotations, +-- so that we don't get `use` statements for them. +-- +-- Don't do `use () ()` or `use Pair Pair`. Tuple syntax generates ().() and Pair.Pair +-- under the covers anyway. This does mean that if someone is using Pair.Pair directly, +-- then they'll miss out on FQN elision for that. +-- +-- Don't do `use builtin.Doc Blob`, `use builtin.Link Term`, or similar. That avoids +-- unnecessary use statements above Doc literals and termLink/typeLink. +noImportRefs :: Reference -> Bool +noImportRefs r = + elem r + [ DD.pairRef + , DD.unitRef + , DD.docRef + , DD.linkRef + ] + +infixl 0 |> +(|>) :: a -> (a -> b) -> b +x |> f = f x + +-- This function gets used each time we start printing a new block statement. +-- It decides what extra imports to introduce (returning the full new set), and +-- determines some pretty-printed lines that looks like +-- use A x +-- use B y +-- providing a `[Pretty SyntaxText] -> Pretty SyntaxText` that prepends those +-- lines to the list of lines provided, and then concatenates them. +calcImports + :: (Var v, Ord v) + => Imports + -> Term3 v PrintAnnotation + -> (Imports, [Pretty SyntaxText] -> Pretty SyntaxText) +calcImports im tm = (im', render $ getUses result) + where + -- The guts of this function is a pipeline of transformations and filters, starting from the + -- PrintAnnotation we built up in printAnnotate. + -- In `result`, the Name matches Prefix ++ Suffix; and the Int is the number of usages in this scope. + -- `result` lists all the names we're going to import, and what Prefix we'll use for each. + result :: Map Name (Prefix, Suffix, Int) + result = usages' + |> uniqueness + |> enoughUsages + |> groupAndCountLength + |> longestPrefix + |> avoidRepeatsAndClashes + |> narrowestPossible + usages' :: Map Suffix (Map Prefix Int) + usages' = usages $ annotation tm + -- Keep only names P.S where there is no other Q with Q.S also used in this scope. + uniqueness :: Map Suffix (Map Prefix Int) -> Map Suffix (Prefix, Int) + uniqueness m = m |> Map.filter (\ps -> (Map.size ps) == 1) + |> Map.map (\ps -> head $ Map.toList ps) + -- Keep only names where the number of usages in this scope + -- - is > 1, or + -- - is 1, and S is an infix operator. + -- Also drop names with an empty prefix. + lookupOrDie s m = fromMaybe msg (Map.lookup s m) where + msg = error $ "TermPrinter.enoughUsages " <> show (s, m) + + enoughUsages :: Map Suffix (Prefix, Int) -> Map Suffix (Prefix, Int) + enoughUsages m = (Map.keys m) |> filter (\s -> let (p, i) = lookupOrDie s m + in (i > 1 || isRight (symbolyId (unpack s))) && + (length p > 0)) + |> map (\s -> (s, lookupOrDie s m)) + |> Map.fromList + -- Group by `Prefix ++ Suffix`, and then by `length Prefix` + groupAndCountLength :: Map Suffix (Prefix, Int) -> Map (Name, Int) (Prefix, Suffix, Int) + groupAndCountLength m = Map.toList m |> map (\(s, (p, i)) -> let n = joinName p s + l = length p + in ((n, l), (p, s, i))) + |> Map.fromList + -- For each k1, choose the v with the largest k2. + longestPrefix :: (Show k1, Show k2, Ord k1, Ord k2) => Map (k1, k2) v -> Map k1 v + longestPrefix m = let k1s = Set.map fst $ Map.keysSet m + k2s = k1s |> Map.fromSet (\k1' -> Map.keysSet m + |> Set.filter (\(k1, _) -> k1 == k1') + |> Set.map snd) + maxk2s = Map.map maximum k2s + err k1 k2 = error $ + "TermPrinter.longestPrefix not found " + <> show (k1,k2) + <> " in " <> show maxk2s + in Map.mapWithKey (\k1 k2 -> fromMaybe (err k1 k2) $ Map.lookup (k1, k2) m) maxk2s + -- Don't do another `use` for a name for which we've already done one, unless the + -- new suffix is shorter. + avoidRepeatsAndClashes :: Map Name (Prefix, Suffix, Int) -> Map Name (Prefix, Suffix, Int) + avoidRepeatsAndClashes = Map.filterWithKey $ + \n (_, s', _) -> case Map.lookup n im of + Just s -> (Text.length s') < (Text.length s) + Nothing -> True + -- Is there a strictly smaller block term underneath this one, containing all the usages + -- of some of the names? Skip emitting `use` statements for those, so we can do it + -- further down, closer to the use sites. + narrowestPossible :: Map Name (Prefix, Suffix, Int) -> Map Name (Prefix, Suffix, Int) + narrowestPossible m = m |> Map.filter (\(p, s, i) -> not $ allInSubBlock tm p s i) + -- `union` is left-biased, so this can replace existing imports. + im' = getImportMapAdditions result `Map.union` im + getImportMapAdditions :: Map Name (Prefix, Suffix, Int) -> Map Name Suffix + getImportMapAdditions = Map.map (\(_, s, _) -> s) + getUses :: Map Name (Prefix, Suffix, Int) -> Map Prefix (Set Suffix) + getUses m = Map.elems m |> map (\(p, s, _) -> (p, Set.singleton s)) + |> Map.fromListWith Set.union + render :: Map Prefix (Set Suffix) -> [Pretty SyntaxText] -> Pretty SyntaxText + render m rest = + let uses = Map.mapWithKey (\p ss -> (fmt S.UseKeyword $ l"use ") <> + (fmt S.UsePrefix (intercalateMap (l".") (l . unpack) p)) <> l" " <> + (fmt S.UseSuffix (intercalateMap (l" ") (l . unpack) (Set.toList ss)))) m + |> Map.toList + |> map snd + in PP.lines (uses ++ rest) + +-- Given a block term and a name (Prefix, Suffix) of interest, is there a strictly smaller +-- blockterm within it, containing all usages of that name? A blockterm is a place +-- where the syntax lets us put a use statement, like the branches of an if/then/else. +-- We traverse the block terms by traversing the whole subtree with ABT.find, and paying +-- attention to those subterms that look like a blockterm. This is complicated +-- by the fact that you can't always tell if a term is a blockterm just +-- by looking at it: in some cases you can only tell when you can see it in the context of +-- the wider term that contains it. So actually we traverse the tree, at each term +-- looking for child terms that are block terms, and see if any of those contain +-- all the usages of the name. +-- Cut out the occurrences of "const id $" to get tracing. +allInSubBlock :: (Var v, Ord v) => Term3 v PrintAnnotation -> Prefix -> Suffix -> Int -> Bool +allInSubBlock tm p s i = let found = concat $ ABT.find finder tm + result = any (/= tm) $ found + tr = const id $ trace ("\nallInSubBlock(" ++ show p ++ ", " ++ + show s ++ ", " ++ show i ++ "): returns " ++ + show result ++ "\nInput:\n" ++ show tm ++ + "\nFound: \n" ++ show found ++ "\n\n") + in tr result where + getUsages t = annotation t + |> usages + |> Map.lookup s + |> fmap (Map.lookup p) + |> join + |> fromMaybe 0 + finder t = let result = let i' = getUsages t + in if i' < i + then ABT.Prune + else + let found = filter hit $ immediateChildBlockTerms t + in if (i' == i) && (not $ null found) + then ABT.Found found + else ABT.Continue + children = concat (map (\t -> "child: " ++ show t ++ "\n") $ immediateChildBlockTerms t) + tr = const id $ trace ("\nfinder: returns " ++ show result ++ + "\n children:" ++ children ++ + "\n input: \n" ++ show t ++ "\n\n") + in tr $ result + hit t = (getUsages t) == i + +-- Return any blockterms at or immediately under this term. Has to match the places in the +-- syntax that get a call to `calcImports` in `pretty0`. AST nodes that do a calcImports in +-- pretty0, in order to try and emit a `use` statement, need to be emitted also by this +-- function, otherwise the `use` statement may come out at an enclosing scope instead. +immediateChildBlockTerms :: (Var vt, Var v) => Term2 vt at ap v a -> [Term2 vt at ap v a] +immediateChildBlockTerms = \case + Handle' handler body -> [handler, body] + If' _ t f -> [t, f] + LetBlock bs _ -> concat $ map doLet bs + Match' _ branches -> concat $ map doCase branches + _ -> [] + where + doCase (MatchCase _ _ (AbsN' _ body)) = [body] + doCase _ = error "bad match" [] + doLet (v, Ann' tm _) = doLet (v, tm) + doLet (v, LamsNamedOpt' _ body) = if isBlank $ Var.nameStr v + then [] + else [body] + doLet t = error (show t) [] + +pattern LetBlock bindings body <- (unLetBlock -> Just (bindings, body)) + +-- Collects nested let/let rec blocks into one minimally nested block. +-- Handy because `let` and `let rec` blocks get rendered the same way. +-- We preserve nesting when the inner block shadows definitions in the +-- outer block. +unLetBlock + :: Ord v + => Term2 vt at ap v a + -> Maybe ([(v, Term2 vt at ap v a)], Term2 vt at ap v a) +unLetBlock t = rec t where + dontIntersect v1s v2s = + all (`Set.notMember` v2set) (fst <$> v1s) where + v2set = Set.fromList (fst <$> v2s) + rec t = case unLetRecNamed t of + Nothing -> nonrec t + Just (_isTop, bindings, body) -> case rec body of + Just (innerBindings, innerBody) | dontIntersect bindings innerBindings -> + Just (bindings ++ innerBindings, innerBody) + _ -> Just (bindings, body) + nonrec t = case unLet t of + Nothing -> Nothing + Just (bindings0, body) -> + let bindings = [ (v,b) | (_,v,b) <- bindings0 ] in + case rec body of + Just (innerBindings, innerBody) | dontIntersect bindings innerBindings -> + Just (bindings ++ innerBindings, innerBody) + _ -> Just (bindings, body) diff --git a/parser-typechecker/src/Unison/TypeParser.hs b/parser-typechecker/src/Unison/TypeParser.hs new file mode 100644 index 0000000000..86c5f350e9 --- /dev/null +++ b/parser-typechecker/src/Unison/TypeParser.hs @@ -0,0 +1,115 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.TypeParser where + +import Unison.Prelude + +import qualified Text.Megaparsec as P +import qualified Unison.Lexer as L +import Unison.Parser +import Unison.Type (Type) +import qualified Unison.Type as Type +import Unison.Var (Var) +import qualified Unison.Builtin.Decls as DD +import qualified Unison.HashQualified as HQ +import qualified Unison.Name as Name +import qualified Unison.Names3 as Names +import qualified Data.Set as Set +import Control.Monad.Reader (asks) + +-- A parsed type is annotated with its starting and ending position in the +-- source text. +type TypeP v = P v (Type v Ann) + +-- Value types cannot have effects, unless those effects appear to +-- the right of a function arrow: +-- valueType ::= Int | Text | App valueType valueType | Arrow valueType computationType +valueType :: Var v => TypeP v +valueType = forall type1 <|> type1 + +-- Computation +-- computationType ::= [{effect*}] valueType +computationType :: Var v => TypeP v +computationType = effect <|> valueType + +valueTypeLeaf :: Var v => TypeP v +valueTypeLeaf = + tupleOrParenthesizedType valueType <|> typeAtom <|> sequenceTyp + +-- Examples: Optional, Optional#abc, woot, #abc +typeAtom :: Var v => TypeP v +typeAtom = hqPrefixId >>= \tok -> case L.payload tok of + HQ.NameOnly n -> pure $ Type.var (ann tok) (Name.toVar n) + hq -> do + names <- asks names + let matches = Names.lookupHQType hq names + if Set.size matches /= 1 + then P.customFailure (UnknownType tok matches) + else pure $ Type.ref (ann tok) (Set.findMin matches) + +type1 :: Var v => TypeP v +type1 = arrow type2a + +type2a :: Var v => TypeP v +type2a = delayed <|> type2 + +delayed :: Var v => TypeP v +delayed = do + q <- reserved "'" + t <- effect <|> type2a + pure $ Type.arrow (Ann (L.start q) (end $ ann t)) + (DD.unitType (ann q)) + t + +type2 :: Var v => TypeP v +type2 = do + hd <- valueTypeLeaf + tl <- many (effectList <|> valueTypeLeaf) + pure $ foldl' (\a b -> Type.app (ann a <> ann b) a b) hd tl + +-- ex : {State Text, IO} (Sequence Int) +effect :: Var v => TypeP v +effect = do + es <- effectList + t <- valueTypeLeaf + pure (Type.effect1 (ann es <> ann t) es t) + +effectList :: Var v => TypeP v +effectList = do + open <- openBlockWith "{" + es <- sepBy (reserved ",") valueType + close <- closeBlock + pure $ Type.effects (ann open <> ann close) es + +sequenceTyp :: Var v => TypeP v +sequenceTyp = do + open <- reserved "[" + t <- valueType + close <- reserved "]" + let a = ann open <> ann close + pure $ Type.app a (Type.vector a) t + +tupleOrParenthesizedType :: Var v => TypeP v -> TypeP v +tupleOrParenthesizedType rec = tupleOrParenthesized rec DD.unitType pair + where + pair t1 t2 = + let a = ann t1 <> ann t2 + in Type.app a (Type.app (ann t1) (DD.pairType a) t1) t2 + +-- valueType ::= ... | Arrow valueType computationType +arrow :: Var v => TypeP v -> TypeP v +arrow rec = + let eff = mkArr <$> optional effectList + mkArr Nothing a b = Type.arrow (ann a <> ann b) a b + mkArr (Just es) a b = Type.arrow (ann a <> ann b) a (Type.effect1 (ann es <> ann b) es b) + in chainr1 (effect <|> rec) (reserved "->" *> eff) + +-- "forall a b . List a -> List b -> Maybe Text" +forall :: Var v => TypeP v -> TypeP v +forall rec = do + kw <- reserved "forall" <|> reserved "∀" + vars <- fmap (fmap L.payload) . some $ prefixDefinitionName + _ <- matchToken $ L.SymbolyId "." Nothing + t <- rec + pure $ Type.foralls (ann kw <> ann t) vars t + diff --git a/parser-typechecker/src/Unison/TypePrinter.hs b/parser-typechecker/src/Unison/TypePrinter.hs new file mode 100644 index 0000000000..750fe6ccb2 --- /dev/null +++ b/parser-typechecker/src/Unison/TypePrinter.hs @@ -0,0 +1,185 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} + +module Unison.TypePrinter where + +import Unison.Prelude + +import qualified Data.Map as Map +import Unison.HashQualified (HashQualified) +import Unison.NamePrinter (styleHashQualified'') +import Unison.PrettyPrintEnv (PrettyPrintEnv, Imports, elideFQN) +import qualified Unison.PrettyPrintEnv as PrettyPrintEnv +import Unison.Reference (pattern Builtin) +import Unison.Type +import Unison.Util.Pretty (ColorText, Pretty) +import Unison.Util.ColorText (toPlain) +import qualified Unison.Util.SyntaxText as S +import Unison.Util.SyntaxText (SyntaxText) +import qualified Unison.Util.Pretty as PP +import Unison.Var (Var) +import qualified Unison.Var as Var +import qualified Unison.Builtin.Decls as DD + +pretty :: forall v a . (Var v) => PrettyPrintEnv -> Type v a -> Pretty ColorText +pretty ppe = PP.syntaxToColor . pretty0 ppe mempty (-1) + +pretty' :: Var v => Maybe Int -> PrettyPrintEnv -> Type v a -> String +pretty' (Just width) n t = toPlain $ PP.render width $ PP.syntaxToColor $ pretty0 n Map.empty (-1) t +pretty' Nothing n t = toPlain $ PP.render maxBound $ PP.syntaxToColor $ pretty0 n Map.empty (-1) t + +{- Explanation of precedence handling + + We illustrate precedence rules as follows. + + >=10 + 10f 10x + + This example shows that a type application f x is enclosed in parentheses + whenever the ambient precedence around it is >= 10, and that when printing + its two components, an ambient precedence of 10 is used in both places. + + The pretty-printer uses the following rules for printing types. + + >=10 + 10f 10x + { 0e } 10t + + >=0 + 0a -> 0b + +-} + +pretty0 + :: forall v a . (Var v) + => PrettyPrintEnv + -> Imports + -> Int + -> Type v a + -> Pretty SyntaxText +pretty0 n im p tp = prettyRaw n im p (cleanup (removePureEffects tp)) + +prettyRaw + :: forall v a . (Var v) + => PrettyPrintEnv + -> Imports + -> Int + -> Type v a + -> Pretty SyntaxText +-- p is the operator precedence of the enclosing context (a number from 0 to +-- 11, or -1 to avoid outer parentheses unconditionally). Function +-- application has precedence 10. +prettyRaw n im p tp = go n im p tp + where + go :: PrettyPrintEnv -> Imports -> Int -> Type v a -> Pretty SyntaxText + go n im p tp = case stripIntroOuters tp of + Var' v -> fmt S.Var $ PP.text (Var.name v) + DD.TupleType' xs | length xs /= 1 -> PP.parenthesizeCommas $ map (go n im 0) xs + -- Would be nice to use a different SyntaxHighlights color if the reference is an ability. + Ref' r -> styleHashQualified'' (fmt $ S.Reference r) $ elideFQN im (PrettyPrintEnv.typeName n r) + Cycle' _ _ -> fromString "error: TypeParser does not currently emit Cycle" + Abs' _ -> fromString "error: TypeParser does not currently emit Abs" + Ann' _ _ -> fromString "error: TypeParser does not currently emit Ann" + App' (Ref' (Builtin "Sequence")) x -> + PP.group $ (fmt S.DelimiterChar "[") <> go n im 0 x <> (fmt S.DelimiterChar "]") + Apps' f xs -> PP.parenthesizeIf (p >= 10) $ go n im 9 f `PP.hang` PP.spaced + (go n im 10 <$> xs) + Effect1' e t -> + PP.parenthesizeIf (p >= 10) $ go n im 9 e <> " " <> go n im 10 t + Effects' es -> effects (Just es) + ForallsNamed' vs' body -> + let vs = filter (\v -> Var.name v /= "()") vs' + in if p < 0 && all Var.universallyQuantifyIfFree vs + then go n im p body + else paren (p >= 0) $ + let vformatted = PP.sep " " (fmt S.Var . PP.text . Var.name <$> vs) + in (fmt S.TypeOperator "∀ " <> vformatted <> fmt S.TypeOperator ".") + `PP.hang` go n im (-1) body + t@(Arrow' _ _) -> case t of + EffectfulArrows' (Ref' DD.UnitRef) rest -> arrows True True rest + EffectfulArrows' fst rest -> + case fst of + Var' v | Var.name v == "()" + -> fmt S.DelayForceChar "'" <> arrows False True rest + _ -> PP.parenthesizeIf (p >= 0) $ + go n im 0 fst <> arrows False False rest + _ -> "error" + _ -> "error" + effects Nothing = mempty + effects (Just es) = PP.group $ (fmt S.AbilityBraces "{") <> PP.commas (go n im 0 <$> es) <> (fmt S.AbilityBraces "}") + arrow delay first mes = + (if first then mempty else PP.softbreak <> (fmt S.TypeOperator "->")) + <> (if delay then (if first then (fmt S.DelayForceChar "'") else (fmt S.DelayForceChar " '")) else mempty) + <> effects mes + <> if (isJust mes) || (not delay) && (not first) then " " else mempty + + arrows delay first [(mes, Ref' DD.UnitRef)] = arrow delay first mes <> (fmt S.Unit "()") + arrows delay first ((mes, Ref' DD.UnitRef) : rest) = + arrow delay first mes <> (parenNoGroup delay $ arrows True True rest) + arrows delay first ((mes, arg) : rest) = + arrow delay first mes + <> ( parenNoGroup (delay && (not $ null rest)) + $ go n im 0 arg + <> arrows False False rest + ) + arrows False False [] = mempty + arrows False True [] = mempty -- not reachable + arrows True _ [] = mempty -- not reachable + + paren True s = PP.group $ ( fmt S.Parenthesis "(" ) <> s <> ( fmt S.Parenthesis ")" ) + paren False s = PP.group s + + parenNoGroup True s = ( fmt S.Parenthesis "(" ) <> s <> ( fmt S.Parenthesis ")" ) + parenNoGroup False s = s + +fmt :: S.Element -> Pretty S.SyntaxText -> Pretty S.SyntaxText +fmt = PP.withSyntax + +-- todo: provide sample output in comment +prettySignatures' + :: Var v => PrettyPrintEnv + -> [(HashQualified, Type v a)] + -> [Pretty ColorText] +prettySignatures' env ts = map PP.syntaxToColor $ PP.align + [ ( styleHashQualified'' (fmt $ S.HashQualifier name) name + , (fmt S.TypeAscriptionColon ": " <> pretty0 env Map.empty (-1) typ) + `PP.orElse` ( fmt S.TypeAscriptionColon ": " + <> PP.indentNAfterNewline 2 (pretty0 env Map.empty (-1) typ) + ) + ) + | (name, typ) <- ts + ] + +-- todo: provide sample output in comment; different from prettySignatures' +prettySignaturesAlt' + :: Var v => PrettyPrintEnv + -> [([HashQualified], Type v a)] + -> [Pretty ColorText] +prettySignaturesAlt' env ts = map PP.syntaxToColor $ PP.align + [ ( PP.commas . fmap (\name -> styleHashQualified'' (fmt $ S.HashQualifier name) name) $ names + , (fmt S.TypeAscriptionColon ": " <> pretty0 env Map.empty (-1) typ) + `PP.orElse` ( fmt S.TypeAscriptionColon ": " + <> PP.indentNAfterNewline 2 (pretty0 env Map.empty (-1) typ) + ) + ) + | (names, typ) <- ts + ] + +-- prettySignatures'' :: Var v => PrettyPrintEnv -> [(Name, Type v a)] -> [Pretty ColorText] +-- prettySignatures'' env ts = prettySignatures' env (first HQ.fromName <$> ts) + +prettySignatures + :: Var v + => PrettyPrintEnv + -> [(HashQualified, Type v a)] + -> Pretty ColorText +prettySignatures env ts = PP.lines $ + PP.group <$> prettySignatures' env ts + +prettySignaturesAlt + :: Var v + => PrettyPrintEnv + -> [([HashQualified], Type v a)] + -> Pretty ColorText +prettySignaturesAlt env ts = PP.lines $ + PP.group <$> prettySignaturesAlt' env ts diff --git a/parser-typechecker/src/Unison/Typechecker.hs b/parser-typechecker/src/Unison/Typechecker.hs new file mode 100644 index 0000000000..b0a131640e --- /dev/null +++ b/parser-typechecker/src/Unison/Typechecker.hs @@ -0,0 +1,351 @@ +{-# LANGUAGE OverloadedLists #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE ViewPatterns #-} + +-- | This module is the primary interface to the Unison typechecker +-- module Unison.Typechecker (admissibleTypeAt, check, check', checkAdmissible', equals, locals, subtype, isSubtype, synthesize, synthesize', typeAt, wellTyped) where + +module Unison.Typechecker where + +import Unison.Prelude + +import Control.Lens +import Control.Monad.Fail (fail) +import Control.Monad.State (State, StateT, execState, get, + modify) +import Control.Monad.Writer +import qualified Data.Map as Map +import qualified Data.Sequence.NonEmpty as NESeq (toSeq) +import qualified Data.Text as Text +import qualified Unison.ABT as ABT +import qualified Unison.Blank as B +import Unison.Referent (Referent) +import Unison.Result (pattern Result, Result, + ResultT, runResultT) +import qualified Unison.Result as Result +import Unison.Term (Term) +import qualified Unison.Term as Term +import Unison.Type (Type) +import qualified Unison.Typechecker.Context as Context +import qualified Unison.Typechecker.TypeVar as TypeVar +import Unison.Var (Var) +import qualified Unison.Var as Var +import qualified Unison.Typechecker.TypeLookup as TL +import Unison.Util.List ( uniqueBy ) + +type Name = Text + +data Notes v loc = Notes { + bugs :: Seq (Context.CompilerBug v loc), + errors :: Seq (Context.ErrorNote v loc), + infos :: Seq (Context.InfoNote v loc) +} + +instance Semigroup (Notes v loc) where + Notes bs es is <> Notes bs' es' is' = Notes (bs <> bs') (es <> es') (is <> is') + +instance Monoid (Notes v loc) where + mempty = Notes mempty mempty mempty + +convertResult :: Context.Result v loc a -> Result (Notes v loc) a +convertResult = \case + Context.Success is a -> Result (Notes mempty mempty is) (Just a) + Context.TypeError es is -> Result (Notes mempty (NESeq.toSeq es) is) Nothing + Context.CompilerBug bug es is -> Result (Notes [bug] es is) Nothing + +data NamedReference v loc = + NamedReference { fqn :: Name, fqnType :: Type v loc + , replacement :: Either v Referent } + deriving Show + +data Env v loc = Env + { _ambientAbilities :: [Type v loc] + , _typeLookup :: TL.TypeLookup v loc + , _unqualifiedTerms :: Map Name [NamedReference v loc] + } + +makeLenses ''Env + +-- -- | Compute the allowed type of a replacement for a given subterm. +-- -- Example, in @\g -> map g [1,2,3]@, @g@ has an admissible type of +-- -- @Int -> r@, where @r@ is an unbound universal type variable, which +-- -- means that an @Int -> Bool@, an @Int -> String@, etc could all be +-- -- substituted for @g@. +-- -- +-- -- Algorithm works by replacing the subterm, @e@ with +-- -- @(f e)@, where @f@ is a fresh function parameter. We then +-- -- read off the type of @e@ from the inferred result type of @f@. +-- admissibleTypeAt :: (Monad f, Var v) +-- => (Env v loc) +-- -> Path +-- -> Term v loc +-- -> f (Result v loc (Type v loc)) +-- admissibleTypeAt env path t = +-- let +-- f = ABT.v' "f" +-- shake (Type.Arrow' (Type.Arrow' _ tsub) _) = Type.generalize tsub +-- shake (Type.ForallNamed' _ t) = shake t +-- shake _ = error "impossible, f had better be a function" +-- in case Term.lam() f <$> Paths.modifyTerm (\t -> Term.app() (Term.var() (ABT.Free f)) (Term.wrapV t)) path t of +-- Nothing -> pure . failNote $ InvalidPath path t +-- Just t -> fmap shake <$> synthesize env t + +-- -- | Compute the type of the given subterm. +-- typeAt :: (Monad f, Var v) => Env v loc -> Path -> Term v loc -> f (Type v loc) +-- typeAt env [] t = synthesize env t +-- typeAt env path t = +-- let +-- f = ABT.v' "f" +-- remember e = Term.var() (ABT.Free f) `Term.app_` Term.wrapV e +-- shake (Type.Arrow' (Type.Arrow' tsub _) _) = Type.generalize tsub +-- shake (Type.ForallNamed' _ t) = shake t +-- shake _ = error "impossible, f had better be a function" +-- in case Term.lam() f <$> Paths.modifyTerm remember path t of +-- Nothing -> failNote $ InvalidPath path t +-- Just t -> pure . shake <$> synthesize env t +-- +-- -- | Return the type of all local variables in scope at the given location +-- locals :: (Monad f, Var v) => Env v loc -> Path -> Term v loc +-- -> f [(v, Type v loc)] +-- locals env path ctx | ABT.isClosed ctx = +-- zip (map ABT.unvar vars) <$> types +-- where +-- -- replace focus, x, with `let saved = f v1 v2 v3 ... vn in x`, +-- -- where `f` is fresh variable, then infer type of `f`, read off the +-- -- types of `v1`, `v2`, ... +-- vars = map ABT.Bound (Paths.inScopeAtTerm path ctx) +-- f = ABT.v' "f" +-- saved = ABT.v' "saved" +-- remember e = Term.let1_ [(saved, Term.var() (ABT.Free f) `Term.apps` map (((),) . Term.var()) vars)] (Term.wrapV e) +-- usingAllLocals = Term.lam() f (Paths.modifyTerm' remember path ctx) +-- types = if null vars then pure [] +-- else extract <$> typeAt env [] usingAllLocals +-- extract (Type.Arrow' i _) = extract1 i +-- extract (Type.ForallNamed' _ t) = extract t +-- extract t = error $ "expected function type, got: " ++ show t +-- extract1 (Type.Arrow' i o) = i : extract1 o +-- extract1 _ = [] +-- locals _ _ _ _ ctx = +-- -- need to call failNote multiple times +-- failNote <$> (uncurry UnknownSymbol <$> ABT.freeVarAnnotations ctx) + + +-- | Infer the type of a 'Unison.Term', using +-- a function to resolve the type of @Ref@ constructors +-- contained in that term. +synthesize + :: (Monad f, Var v, Ord loc) + => Env v loc + -> Term v loc + -> ResultT (Notes v loc) f (Type v loc) +synthesize env t = let + result = convertResult $ Context.synthesizeClosed + (TypeVar.liftType <$> view ambientAbilities env) + (view typeLookup env) + (TypeVar.liftTerm t) + in Result.hoist (pure . runIdentity) $ fmap TypeVar.lowerType result + +isSubtype :: Var v => Type v loc -> Type v loc -> Bool +isSubtype t1 t2 = + case Context.isSubtype (tvar $ void t1) (tvar $ void t2) of + Left bug -> error $ "compiler bug encountered: " ++ show bug + Right b -> b + where tvar = TypeVar.liftType + +isEqual :: Var v => Type v loc -> Type v loc -> Bool +isEqual t1 t2 = isSubtype t1 t2 && isSubtype t2 t1 + +type TDNR f v loc a = + StateT (Term v loc) (ResultT (Notes v loc) f) a + +data Resolution v loc = + Resolution { resolvedName :: Text + , inferredType :: Context.Type v loc + , resolvedLoc :: loc + , suggestions :: [Context.Suggestion v loc] + } + +-- | Infer the type of a 'Unison.Term', using type-directed name resolution +-- to attempt to resolve unknown symbols. +synthesizeAndResolve + :: (Monad f, Var v, Ord loc) => Env v loc -> TDNR f v loc (Type v loc) +synthesizeAndResolve env = do + tm <- get + (tp, notes) <- listen . lift $ synthesize env tm + typeDirectedNameResolution notes tp env + +compilerBug :: Context.CompilerBug v loc -> Result (Notes v loc) () +compilerBug bug = do + tell $ Notes [bug] mempty mempty + Control.Monad.Fail.fail "" + +typeError :: Context.ErrorNote v loc -> Result (Notes v loc) () +typeError note = do + tell $ Notes mempty [note] mempty + Control.Monad.Fail.fail "" + +btw :: Monad f => Context.InfoNote v loc -> ResultT (Notes v loc) f () +btw note = tell $ Notes mempty mempty [note] + +liftResult :: Monad f => Result (Notes v loc) a -> TDNR f v loc a +liftResult = lift . MaybeT . WriterT . pure . runIdentity . runResultT + +-- Resolve "solved blanks". If a solved blank's type and name matches the type +-- and unqualified name of a symbol that isn't imported, provide a note +-- suggesting the import. If the blank is ambiguous and only one typechecks, use +-- that one. Otherwise, provide an unknown symbol error to the user. +-- The cases we consider are: +-- 1. There exist names that match and their types match too. Tell the user +-- the fully qualified names of these terms, and their types. +-- 2. There's more than one name that matches, +-- but only one that typechecks. Substitute that one into the code. +-- 3. No match at all. Throw an unresolved symbol at the user. +typeDirectedNameResolution + :: forall v loc f + . (Monad f, Var v, Ord loc) + => Notes v loc + -> Type v loc + -> Env v loc + -> TDNR f v loc (Type v loc) +typeDirectedNameResolution oldNotes oldType env = do + -- Add typed components (local definitions) to the TDNR environment. + let tdnrEnv = execState (traverse_ addTypedComponent $ infos oldNotes) env + -- Resolve blanks in the notes and generate some resolutions + resolutions <- liftResult . traverse (resolveNote tdnrEnv) . toList $ infos + oldNotes + case catMaybes resolutions of + [] -> pure oldType + rs -> + let + goAgain = + any ((== 1) . length . dedupe . filter Context.isExact . suggestions) rs + in if goAgain + then do + traverse_ substSuggestion rs + synthesizeAndResolve tdnrEnv + else do + -- The type hasn't changed + liftResult $ suggest rs + pure oldType + where + addTypedComponent :: Context.InfoNote v loc -> State (Env v loc) () + addTypedComponent (Context.TopLevelComponent vtts) + = for_ vtts $ \(v, typ, _) -> + unqualifiedTerms %= Map.insertWith (<>) + (Var.unqualifiedName v) + [NamedReference (Var.name v) typ (Left v)] + addTypedComponent _ = pure () + + suggest :: [Resolution v loc] -> Result (Notes v loc) () + suggest = traverse_ + (\(Resolution name inferredType loc suggestions) -> + typeError $ Context.ErrorNote + (Context.UnknownTerm loc (Var.named name) (dedupe suggestions) inferredType) + [] + ) + guard x a = if x then Just a else Nothing + + substSuggestion :: Resolution v loc -> TDNR f v loc () + substSuggestion (Resolution name _ loc (filter Context.isExact -> + [Context.Suggestion _ _ replacement Context.Exact])) + = do + modify (substBlank (Text.unpack name) loc solved) + lift . btw $ Context.Decision (Var.named name) loc solved + where + solved = either (Term.var loc) (Term.fromReferent loc) replacement + substSuggestion _ = pure () + + -- Resolve a `Blank` to a term + substBlank :: String -> loc -> Term v loc -> Term v loc -> Term v loc + substBlank s a r = ABT.visitPure go + where + go t = guard (ABT.annotation t == a) $ ABT.visitPure resolve t + resolve (Term.Blank' (B.Recorded (B.Resolve loc name))) | name == s = + Just (const loc <$> r) + resolve _ = Nothing + + -- Returns Nothing for irrelevant notes + resolveNote + :: Env v loc + -> Context.InfoNote v loc + -> Result (Notes v loc) (Maybe (Resolution v loc)) + resolveNote env (Context.SolvedBlank (B.Resolve loc n) _ it) + = fmap (Just . Resolution (Text.pack n) it loc . dedupe . join) + . traverse (resolve it) + . join + . maybeToList + . Map.lookup (Text.pack n) + $ view unqualifiedTerms env + resolveNote _ n = btw n >> pure Nothing + dedupe :: [Context.Suggestion v loc] -> [Context.Suggestion v loc] + dedupe = uniqueBy Context.suggestionReplacement + resolve + :: Context.Type v loc + -> NamedReference v loc + -> Result (Notes v loc) [Context.Suggestion v loc] + resolve inferredType (NamedReference fqn foundType replace) = + -- We found a name that matches. See if the type matches too. + case Context.isSubtype (TypeVar.liftType foundType) inferredType of + Left bug -> const [] <$> compilerBug bug + -- Suggest the import if the type matches. + Right b -> pure + [ Context.Suggestion + fqn + (TypeVar.liftType foundType) + replace + (if b then Context.Exact else Context.WrongType) + ] + +-- | Check whether a term matches a type, using a +-- function to resolve the type of @Ref@ constructors +-- contained in the term. Returns @typ@ if successful, +-- and a note about typechecking failure otherwise. +check + :: (Monad f, Var v, Ord loc) + => Env v loc + -> Term v loc + -> Type v loc + -> ResultT (Notes v loc) f (Type v loc) +check env term typ = synthesize env (Term.ann (ABT.annotation term) term typ) +-- | `checkAdmissible' e t` tests that `(f : t -> r) e` is well-typed. +-- If `t` has quantifiers, these are moved outside, so if `t : forall a . a`, +-- this will check that `(f : forall a . a -> a) e` is well typed. +-- checkAdmissible' :: Var v => Term v -> Type v -> Either Note (Type v) +-- checkAdmissible' term typ = +-- synthesize' (Term.blank() `Term.ann_` tweak typ `Term.app_` term) +-- where +-- tweak (Type.ForallNamed' v body) = Type.forall() v (tweak body) +-- tweak t = Type.arrow() t t +-- | Returns `True` if the expression is well-typed, `False` otherwise +wellTyped :: (Monad f, Var v, Ord loc) => Env v loc -> Term v loc -> f Bool +wellTyped env term = go <$> runResultT (synthesize env term) + where go (may, _) = isJust may + +-- | @subtype a b@ is @Right b@ iff @f x@ is well-typed given +-- @x : a@ and @f : b -> t@. That is, if a value of type `a` +-- can be passed to a function expecting a `b`, then `subtype a b` +-- returns `Right b`. This function returns @Left note@ with information +-- about the reason for subtyping failure otherwise. +-- +-- Example: @subtype (forall a. a -> a) (Int -> Int)@ returns @Right (Int -> Int)@. +-- subtype :: Var v => Type v -> Type v -> Either Note (Type v) +-- subtype t1 t2 = error "todo" + -- let (t1', t2') = (ABT.vmap TypeVar.Universal t1, ABT.vmap TypeVar.Universal t2) + -- in case Context.runM (Context.subtype t1' t2') + -- (Context.MEnv Context.env0 [] Map.empty True) of + -- Left e -> Left e + -- Right _ -> Right t2 + +-- | Returns true if @subtype t1 t2@ returns @Right@, false otherwise +-- isSubtype :: Var v => Type v -> Type v -> Bool +-- isSubtype t1 t2 = case subtype t1 t2 of +-- Left _ -> False +-- Right _ -> True + +-- | Returns true if the two type are equal, up to alpha equivalence and +-- order of quantifier introduction. Note that alpha equivalence considers: +-- `forall b a . a -> b -> a` and +-- `forall a b . a -> b -> a` to be different types +-- equals :: Var v => Type v -> Type v -> Bool +-- equals t1 t2 = isSubtype t1 t2 && isSubtype t2 t1 diff --git a/parser-typechecker/src/Unison/Typechecker/Components.hs b/parser-typechecker/src/Unison/Typechecker/Components.hs new file mode 100644 index 0000000000..9cf283ba7a --- /dev/null +++ b/parser-typechecker/src/Unison/Typechecker/Components.hs @@ -0,0 +1,88 @@ +module Unison.Typechecker.Components (minimize, minimize') where + +import Unison.Prelude + +import Control.Arrow ((&&&)) +import Data.Bifunctor (first) +import Data.Function (on) +import Data.List (groupBy, sortBy) +import Data.List.NonEmpty (NonEmpty) +import qualified Data.List.NonEmpty as Nel +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Unison.ABT as ABT +import Unison.Term (Term') +import qualified Unison.Term as Term +import Unison.Var (Var) + +unordered :: Var v => [(v,Term' vt v a)] -> [[(v,Term' vt v a)]] +unordered = ABT.components + +ordered :: Var v => [(v,Term' vt v a)] -> [[(v,Term' vt v a)]] +ordered = ABT.orderedComponents + +-- | Algorithm for minimizing cycles of a `let rec`. This can +-- improve generalization during typechecking and may also be more +-- efficient for execution. +-- +-- For instance: +-- +-- minimize (let rec id x = x; g = id 42; y = id "hi" in g) +-- ==> +-- Just (let id x = x; g = id 42; y = id "hi" in g) +-- +-- Gets rid of the let rec and replaces it with an ordinary `let`, such +-- that `id` is suitably generalized. +-- +-- Fails on the left if there are duplicate definitions. +minimize + :: Var v + => Term' vt v a + -> Either (NonEmpty (v, [a])) (Maybe (Term' vt v a)) +minimize (Term.LetRecNamedAnnotatedTop' isTop ann bs e) = + let bindings = first snd <$> bs + group = map (fst . head &&& map (ABT.annotation . snd)) . groupBy ((==) `on` fst) . sortBy + (compare `on` fst) + grouped = group bindings + dupes = filter ((> 1) . length . snd) grouped + in if not $ null dupes + then Left $ Nel.fromList dupes + else + let cs0 = if isTop then unordered bindings else ordered bindings + -- within a cycle, we put the lambdas first, so + -- unguarded definitions can refer to these lambdas, example: + -- + -- foo x = blah + 1 + x + -- blah = foo 10 + -- + -- Here `foo` and `blah` are part of a cycle, but putting `foo` + -- first at least lets the program run (though it has an infinite + -- loop). + cs = sortOn (\(_,e) -> Term.arity e == 0) <$> cs0 + varAnnotations = Map.fromList ((\((a, v), _) -> (v, a)) <$> bs) + msg v = error $ "Components.minimize " <> show (v, Map.keys varAnnotations) + annotationFor v = fromMaybe (msg v) $ Map.lookup v varAnnotations + annotatedVar v = (annotationFor v, v) + -- When introducing a nested let/let rec, we use the annotation + -- of the variable that starts off that let/let rec + mklet [(hdv, hdb)] e + | Set.member hdv (ABT.freeVars hdb) = Term.letRec isTop + (annotationFor hdv) + [(annotatedVar hdv, hdb)] + e + | otherwise = Term.let1 isTop [(annotatedVar hdv, hdb)] e + mklet cycle@((hdv, _) : _) e = Term.letRec isTop + (annotationFor hdv) + (first annotatedVar <$> cycle) + e + mklet [] e = e + in + -- The outer annotation is going to be meaningful, so we make + -- sure to preserve it, whereas the annotations at intermediate Abs + -- nodes aren't necessarily meaningful + Right . Just . ABT.annotate ann . foldr mklet e $ cs +minimize _ = Right Nothing + +minimize' + :: Var v => Term' vt v a -> Either (NonEmpty (v,[a])) (Term' vt v a) +minimize' term = fromMaybe term <$> minimize term diff --git a/parser-typechecker/src/Unison/Typechecker/Context.hs b/parser-typechecker/src/Unison/Typechecker/Context.hs new file mode 100644 index 0000000000..521e19ff4c --- /dev/null +++ b/parser-typechecker/src/Unison/Typechecker/Context.hs @@ -0,0 +1,1801 @@ +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Typechecker.Context + ( synthesizeClosed + , ErrorNote(..) + , CompilerBug (..) + , InfoNote(..) + , Cause(..) + , Context(..) + , ActualArgCount + , ExpectedArgCount + , ConstructorId + , Element(..) + , PathElement(..) + , Term + , Type + , TypeVar + , Result(..) + , errorTerms + , innermostErrorTerm + , lookupAnn + , lookupSolved + , apply + , isEqual + , isSubtype + , isRedundant + , Suggestion(..) + , SuggestionMatch(..) + , isExact + , typeErrors + , infoNotes + ) +where + +import Unison.Prelude + +import qualified Control.Monad.Fail as MonadFail +import Control.Monad.Reader.Class +import Control.Monad.State ( get + , put + , StateT + , runStateT + ) +import Data.Bifunctor ( first + , second + ) +import qualified Data.Foldable as Foldable +import Data.List +import Data.List.NonEmpty ( NonEmpty ) +import qualified Data.Map as Map +import qualified Data.Sequence as Seq +import Data.Sequence.NonEmpty ( NESeq ) +import qualified Data.Sequence.NonEmpty as NESeq +import qualified Data.Set as Set +import qualified Data.Text as Text +import qualified Unison.ABT as ABT +import qualified Unison.Blank as B +import Unison.DataDeclaration ( DataDeclaration + , EffectDeclaration + ) +import qualified Unison.DataDeclaration as DD +import Unison.Pattern ( Pattern ) +import qualified Unison.Pattern as Pattern +import Unison.Reference ( Reference ) +import Unison.Referent ( Referent ) +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import Unison.Typechecker.Components ( minimize' ) +import qualified Unison.Typechecker.TypeLookup as TL +import qualified Unison.Typechecker.TypeVar as TypeVar +import Unison.Var ( Var ) +import qualified Unison.Var as Var +import qualified Unison.TypePrinter as TP + +type TypeVar v loc = TypeVar.TypeVar (B.Blank loc) v +type Type v loc = Type.Type (TypeVar v loc) loc +type Term v loc = Term.Term' (TypeVar v loc) v loc +type Monotype v loc = Type.Monotype (TypeVar v loc) loc +type RedundantTypeAnnotation = Bool + +pattern Universal v = Var (TypeVar.Universal v) +pattern Existential b v = Var (TypeVar.Existential b v) + +existential :: v -> Element v loc +existential = Existential B.Blank + +existential' :: Ord v => a -> B.Blank loc -> v -> Type.Type (TypeVar v loc) a +existential' a blank v = ABT.annotatedVar a (TypeVar.Existential blank v) + +existentialp :: Ord v => a -> v -> Type v a +existentialp a = existential' a B.Blank + +universal' :: Ord v => a -> v -> Type.Type (TypeVar v loc) a +universal' a v = ABT.annotatedVar a (TypeVar.Universal v) + +-- | Elements of an ordered algorithmic context +data Element v loc + = Var (TypeVar v loc) -- A variable declaration + | Solved (B.Blank loc) v (Monotype v loc) -- `v` is solved to some monotype + | Ann v (Type v loc) -- `v` has type `a`, maybe quantified + | Marker v -- used for scoping + +instance (Ord loc, Var v) => Eq (Element v loc) where + Var v == Var v2 = v == v2 + Solved _ v t == Solved _ v2 t2 = v == v2 && t == t2 + Ann v t == Ann v2 t2 = v == v2 && t == t2 + Marker v == Marker v2 = v == v2 + _ == _ = False + +data Env v loc = Env { freshId :: Word64, ctx :: Context v loc } + +type DataDeclarations v loc = Map Reference (DataDeclaration v loc) +type EffectDeclarations v loc = Map Reference (EffectDeclaration v loc) + +data Result v loc a = Success (Seq (InfoNote v loc)) a + | TypeError (NESeq (ErrorNote v loc)) (Seq (InfoNote v loc)) + | CompilerBug (CompilerBug v loc) + (Seq (ErrorNote v loc)) -- type errors before hitting the bug + (Seq (InfoNote v loc)) -- info notes before hitting the bug + deriving (Functor) + +instance Applicative (Result v loc) where + pure = Success mempty + CompilerBug bug es is <*> _ = CompilerBug bug es is + r <*> CompilerBug bug es' is' = CompilerBug bug (typeErrors r <> es') (infoNotes r <> is') + TypeError es is <*> r' = TypeError (es NESeq.|>< (typeErrors r')) (is <> infoNotes r') + Success is _ <*> TypeError es' is' = TypeError es' (is <> is') + Success is f <*> Success is' a = Success (is <> is') (f a) + +instance Monad (Result v loc) where + s@(Success _ a) >>= f = s *> f a + TypeError es is >>= _ = TypeError es is + CompilerBug bug es is >>= _ = CompilerBug bug es is + +btw' :: InfoNote v loc -> Result v loc () +btw' note = Success (Seq.singleton note) () + +typeError :: Cause v loc -> Result v loc a +typeError cause = TypeError (pure $ ErrorNote cause mempty) mempty + +compilerBug :: CompilerBug v loc -> Result v loc a +compilerBug bug = CompilerBug bug mempty mempty + +typeErrors :: Result v loc a -> Seq (ErrorNote v loc) +typeErrors = \case + TypeError es _ -> NESeq.toSeq es + CompilerBug _ es _ -> es + Success _ _ -> mempty + +infoNotes :: Result v loc a -> Seq (InfoNote v loc) +infoNotes = \case + TypeError _ is -> is + CompilerBug _ _ is -> is + Success is _ -> is + +mapErrors :: (ErrorNote v loc -> ErrorNote v loc) -> Result v loc a -> Result v loc a +mapErrors f r = case r of + TypeError es is -> TypeError (f <$> es) is + CompilerBug bug es is -> CompilerBug bug (f <$> es) is + s@(Success _ _) -> s + +newtype MT v loc f a = MT { + runM :: MEnv v loc -> f (a, Env v loc) +} + +-- | Typechecking monad +type M v loc = MT v loc (Result v loc) + +-- | Typechecking computation that, unless it crashes +-- with a compiler bug, always produces a value. +type TotalM v loc = MT v loc (Either (CompilerBug v loc)) + +liftResult :: Result v loc a -> M v loc a +liftResult r = MT (\m -> (, env m) <$> r) + +liftTotalM :: TotalM v loc a -> M v loc a +liftTotalM (MT m) = MT $ \menv -> case m menv of + Left bug -> CompilerBug bug mempty mempty + Right a -> Success mempty a + +-- errorNote :: Cause v loc -> M v loc () +-- errorNote = liftResult . errorNote + +btw :: InfoNote v loc -> M v loc () +btw = liftResult . btw' + +modEnv :: (Env v loc -> Env v loc) -> M v loc () +modEnv f = modEnv' $ ((), ) . f + +modEnv' :: (Env v loc -> (a, Env v loc)) -> M v loc a +modEnv' f = MT (\menv -> pure . f $ env menv) + +data Unknown = Data | Effect deriving Show + +data CompilerBug v loc + = UnknownDecl Unknown Reference (Map Reference (DataDeclaration v loc)) + | UnknownConstructor Unknown Reference Int (DataDeclaration v loc) + | UndeclaredTermVariable v (Context v loc) + | RetractFailure (Element v loc) (Context v loc) + | EmptyLetRec (Term v loc) -- the body of the empty let rec + | PatternMatchFailure + | EffectConstructorHadMultipleEffects (Type v loc) + | FreeVarsInTypeAnnotation (Set (TypeVar v loc)) + | UnannotatedReference Reference + | MalformedPattern (Pattern loc) + | UnknownTermReference Reference + | UnknownExistentialVariable v (Context v loc) + -- `IllegalContextExtension ctx elem msg` + -- extending `ctx` with `elem` would make `ctx` ill-formed, as explained by `msg` + | IllegalContextExtension (Context v loc) (Element v loc) String + | OtherBug String + deriving Show + +data PathElement v loc + = InSynthesize (Term v loc) + | InSubtype (Type v loc) (Type v loc) + | InCheck (Term v loc) (Type v loc) + | InInstantiateL v (Type v loc) + | InInstantiateR (Type v loc) v + | InSynthesizeApp (Type v loc) (Term v loc) Int + | InFunctionCall [v] (Term v loc) (Type v loc) [Term v loc] + | InAndApp + | InOrApp + | InIfCond + | InIfBody loc -- location of `then` expression + | InVectorApp loc -- location of 1st vector element + | InMatch loc -- location of 1st case body + | InMatchGuard + | InMatchBody + deriving Show + +type ExpectedArgCount = Int +type ActualArgCount = Int +type ConstructorId = Int + +data SuggestionMatch = Exact | WrongType | WrongName + deriving (Ord, Eq, Show) + +data Suggestion v loc = + Suggestion { suggestionName :: Text + , suggestionType :: Type v loc + , suggestionReplacement :: Either v Referent + , suggestionMatch :: SuggestionMatch + } + deriving (Eq, Show) + +isExact :: Suggestion v loc -> Bool +isExact Suggestion {..} = suggestionMatch == Exact + +data ErrorNote v loc = ErrorNote { + cause :: Cause v loc, + path :: Seq (PathElement v loc) +} deriving Show + +-- `Decision v loc fqn` is a decision to replace the name v at location loc +-- with the fully qualified name fqn. +data InfoNote v loc + = SolvedBlank (B.Recorded loc) v (Type v loc) + | Decision v loc (Term.Term v loc) + | TopLevelComponent [(v, Type.Type v loc, RedundantTypeAnnotation)] + deriving (Show) + +data Cause v loc + = TypeMismatch (Context v loc) + | IllFormedType (Context v loc) + | UnknownSymbol loc v + | UnknownTerm loc v [Suggestion v loc] (Type v loc) + | AbilityCheckFailure [Type v loc] [Type v loc] (Context v loc) -- ambient, requested + | EffectConstructorWrongArgCount ExpectedArgCount ActualArgCount Reference ConstructorId + | MalformedEffectBind (Type v loc) (Type v loc) [Type v loc] -- type of ctor, type of ctor result + -- Type of ctor, number of arguments we got + | PatternArityMismatch loc (Type v loc) Int + -- A variable is defined twice in the same block + | DuplicateDefinitions (NonEmpty (v, [loc])) + -- A let rec where things that aren't guarded cyclicly depend on each other + | UnguardedLetRecCycle [v] [(v, Term v loc)] + | ConcatPatternWithoutConstantLength loc (Type v loc) + | HandlerOfUnexpectedType loc (Type v loc) + deriving Show + +errorTerms :: ErrorNote v loc -> [Term v loc] +errorTerms n = Foldable.toList (path n) >>= \e -> case e of + InCheck e _ -> [e] + InSynthesizeApp _ e _ -> [e] + InSynthesize e -> [e] + _ -> [ ] + +innermostErrorTerm :: ErrorNote v loc -> Maybe (Term v loc) +innermostErrorTerm n = listToMaybe $ errorTerms n + +solveBlank :: B.Recorded loc -> v -> Type v loc -> M v loc () +solveBlank blank v typ = btw $ SolvedBlank blank v typ + +-- Add `p` onto the end of the `path` of this `ErrorNote` +scope' :: PathElement v loc -> ErrorNote v loc -> ErrorNote v loc +scope' p (ErrorNote cause path) = ErrorNote cause (path `mappend` pure p) + +-- Add `p` onto the end of the `path` of any `ErrorNote`s emitted by the action +scope :: PathElement v loc -> M v loc a -> M v loc a +scope p (MT m) = MT (mapErrors (scope' p) . m) + +-- | The typechecking environment +data MEnv v loc = MEnv { + env :: Env v loc, -- The typechecking state + abilities :: [Type v loc], -- Allowed ambient abilities + dataDecls :: DataDeclarations v loc, -- Data declarations in scope + effectDecls :: EffectDeclarations v loc, -- Effect declarations in scope + -- Types for which ability check should be skipped. + -- See abilityCheck function for how this is used. + skipAbilityCheck :: [Type v loc] +} + +newtype Context v loc = Context [(Element v loc, Info v loc)] + +data Info v loc = + Info { existentialVars :: Set v -- set of existentials seen so far + , solvedExistentials :: Map v (Monotype v loc) -- `v` is solved to some monotype + , universalVars :: Set v -- set of universals seen so far + , termVarAnnotations :: Map v (Type v loc) + , allVars :: Set v -- all variables seen so far + , previouslyTypecheckedVars :: Set v -- term vars already typechecked + } + +-- | The empty context +context0 :: Context v loc +context0 = Context [] + +-- | Focuses on the first element in the list that satisfies the predicate. +-- Returns `(prefix, focusedElem, suffix)`, where `prefix` is in reverse order. +focusAt :: (a -> Bool) -> [a] -> Maybe ([a], a, [a]) +focusAt p xs = go [] xs where + go _ [] = Nothing + go l (h:t) = if p h then Just (l, h, t) else go (h:l) t + +-- | Delete from the end of this context up to and including +-- the given `Element`. Returns `Nothing` if the element is not found. +retract0 :: (Var v, Ord loc) => Element v loc -> Context v loc -> Maybe (Context v loc, [Element v loc]) +retract0 e (Context ctx) = case focusAt (\(e',_) -> e' == e) ctx of + Just (discarded, _, remaining) -> + -- note: no need to recompute used variables; any suffix of the + -- context snoc list is also a valid context + Just (Context remaining, map fst discarded) + Nothing -> Nothing + +-- | Adds a marker to the end of the context, runs the `body` and then discards +-- from the end of the context up to and including the marker. Returns the result +-- of `body` and the discarded context (not including the marker), respectively. +-- Freshened `markerHint` is used to create the marker. +markThenRetract :: (Var v, Ord loc) => v -> M v loc a -> M v loc (a, [Element v loc]) +markThenRetract markerHint body = do + v <- freshenVar markerHint + extendContext (Marker v) + a <- body + (a,) <$> doRetract (Marker v) + where + doRetract :: (Var v, Ord loc) => Element v loc -> M v loc [Element v loc] + doRetract e = do + ctx <- getContext + case retract0 e ctx of + Nothing -> compilerCrash (RetractFailure e ctx) + Just (t, discarded) -> do + let solved = + [ (b, v, inst $ Type.getPolytype sa) + | Solved (B.Recorded b) v sa <- discarded + ] + unsolved = + [ (b, v, inst $ existential' (B.loc b) b' v) + | Existential b'@(B.Recorded b) v <- discarded + ] + go (b, v, sa) = solveBlank b v sa + inst = apply ctx + Foldable.traverse_ go (solved ++ unsolved) + setContext t + pure discarded + +markThenRetract0 :: (Var v, Ord loc) => v -> M v loc a -> M v loc () +markThenRetract0 markerHint body = () <$ markThenRetract markerHint body + +-- unsolved' :: Context v loc -> [(B.Blank loc, v)] +-- unsolved' (Context ctx) = [(b,v) | (Existential b v, _) <- ctx] + +replace :: (Var v, Ord loc) => Element v loc -> [Element v loc] -> Context v loc -> M v loc (Context v loc) +replace e focus ctx = + case breakAt e ctx of + Just (l, _, r) -> l `extendN` (focus <> r) + Nothing -> pure ctx + +breakAt :: (Var v, Ord loc) + => Element v loc + -> Context v loc + -> Maybe (Context v loc, Element v loc, [Element v loc]) +breakAt m (Context xs) = + case focusAt (\(e,_) -> e === m) xs of + Just (r, m, l) -> + -- l is a suffix of xs and is already a valid context + Just (Context l, fst m, map fst r) + Nothing -> Nothing + where + Existential _ v === Existential _ v2 | v == v2 = True + Universal v === Universal v2 | v == v2 = True + Marker v === Marker v2 | v == v2 = True + _ === _ = False + + +-- | ordered Γ α β = True <=> Γ[α^][β^] +ordered :: (Var v, Ord loc) => Context v loc -> v -> v -> Bool +ordered ctx v v2 = Set.member v (existentials (retract' (existential v2) ctx)) + where + -- Like `retract`, but returns the empty context if retracting would remove + -- all elements. + retract' + :: (Var v, Ord loc) => Element v loc -> Context v loc -> Context v loc + retract' e ctx = maybe context0 fst $ retract0 e ctx + +-- env0 :: Env v loc +-- env0 = Env 0 context0 + +debugEnabled :: Bool +debugEnabled = False + +debugPatternsEnabled :: Bool +debugPatternsEnabled = False + +_logContext :: (Ord loc, Var v) => String -> M v loc () +_logContext msg = when debugEnabled $ do + ctx <- getContext + let !_ = trace ("\n"++msg ++ ": " ++ show ctx) () + setContext ctx + +usedVars :: Ord v => Context v loc -> Set v +usedVars = allVars . info + +fromMEnv :: (MEnv v loc -> a) -> M v loc a +fromMEnv f = f <$> ask + +getContext :: M v loc (Context v loc) +getContext = fromMEnv $ ctx . env + +setContext :: Context v loc -> M v loc () +setContext ctx = modEnv (\e -> e { ctx = ctx }) + +modifyContext :: (Context v loc -> M v loc (Context v loc)) -> M v loc () +modifyContext f = do + c <- getContext + c <- f c + setContext c + +appendContext :: (Var v, Ord loc) => [Element v loc] -> M v loc () +appendContext = traverse_ extendContext + +extendContext :: Var v => Element v loc -> M v loc () +extendContext e = isReserved (varOf e) >>= \case + True -> modifyContext (extend e) + False -> getContext >>= \ctx -> compilerCrash $ + IllegalContextExtension ctx e $ + "Extending context with a variable that is not reserved by the typechecking environment." <> + " That means `freshenVar` is allowed to return it as a fresh variable, which would be wrong." + +replaceContext :: (Var v, Ord loc) => Element v loc -> [Element v loc] -> M v loc () +replaceContext elem replacement = + fromMEnv (\menv -> find (not . (`isReservedIn` env menv) . varOf) replacement) >>= \case + Nothing -> modifyContext (replace elem replacement) + Just e -> getContext >>= \ctx -> compilerCrash $ + IllegalContextExtension ctx e $ + "Extending context with a variable that is not reserved by the typechecking environment." <> + " That means `freshenVar` is allowed to return it as a fresh variable, which would be wrong." + +varOf :: Element v loc -> v +varOf (Var tv) = TypeVar.underlying tv +varOf (Solved _ v _) = v +varOf (Ann v _) = v +varOf (Marker v) = v + +isReserved :: Var v => v -> M v loc Bool +isReserved v = fromMEnv $ (v `isReservedIn`) . env + +isReservedIn :: Var v => v -> Env v loc -> Bool +isReservedIn v e = freshId e > Var.freshId v + +universals :: Ord v => Context v loc -> Set v +universals = universalVars . info + +existentials :: Ord v => Context v loc -> Set v +existentials = existentialVars . info + +-- | "Reserves" the given variables in this typechecking environment, +-- i.e. ensures that they won't be returned from `freshenVar` as fresh. +reserveAll :: (Var v, Foldable t) => t v -> M v loc () +reserveAll vs = + let maxId = foldr (max . Var.freshId) 0 vs + in modEnv (\e -> e { freshId = freshId e `max` maxId + 1}) + +freshenVar :: Var v => v -> M v0 loc v +freshenVar v = modEnv' + (\e -> + let id = freshId e in (Var.freshenId id v, e { freshId = freshId e + 1 }) + ) + +freshenTypeVar :: Var v => TypeVar v loc -> M v loc v +freshenTypeVar v = modEnv' + (\e -> + let id = freshId e + in (Var.freshenId id (TypeVar.underlying v), e { freshId = id + 1 }) + ) + +isClosed :: Var v => Term v loc -> M v loc Bool +isClosed e = Set.null <$> freeVars e + +freeVars :: Var v => Term v loc -> M v loc (Set v) +freeVars e = do + ctx <- getContext + pure $ ABT.freeVars e `Set.difference` previouslyTypecheckedVars (info ctx) + +-- todo: do we want this to return a location for the aspect of the type that was not well formed +-- todo: or maybe a note / list of notes, or an M +-- | Check that the type is well formed wrt the given `Context`, see Figure 7 of paper +wellformedType :: Var v => Context v loc -> Type v loc -> Bool +wellformedType c t = case t of + Type.Var' (TypeVar.Existential _ v) -> Set.member v (existentials c) + Type.Var' (TypeVar.Universal v) -> Set.member v (universals c) + Type.Ref' _ -> True + Type.Arrow' i o -> wellformedType c i && wellformedType c o + Type.Ann' t' _ -> wellformedType c t' + Type.App' x y -> wellformedType c x && wellformedType c y + Type.Effect1' e a -> wellformedType c e && wellformedType c a + Type.Effects' es -> all (wellformedType c) es + Type.IntroOuterNamed' _ t -> wellformedType c t + Type.Forall' t' -> + let (v,ctx2) = extendUniversal c + in wellformedType ctx2 (ABT.bind t' (universal' (ABT.annotation t) v)) + _ -> error $ "Match failure in wellformedType: " ++ show t + where + -- | Extend this `Context` with a single variable, guaranteed fresh + extendUniversal ctx = + let v = Var.freshIn (usedVars ctx) (Var.named "var") + Right ctx' = extend' (Universal v) ctx + in (v, ctx') + +-- | Return the `Info` associated with the last element of the context, or the zero `Info`. +info :: Ord v => Context v loc -> Info v loc +info (Context []) = Info mempty mempty mempty mempty mempty mempty +info (Context ((_,i):_)) = i + +-- | Add an element onto the end of this `Context`. Takes `O(log N)` time, +-- including updates to the accumulated `Info` value. +-- Fail if the new context is not well formed (see Figure 7 of paper). +extend' :: Var v => Element v loc -> Context v loc -> Either (CompilerBug v loc) (Context v loc) +extend' e c@(Context ctx) = Context . (:ctx) . (e,) <$> i' where + Info es ses us uas vs pvs = info c + -- see figure 7 + i' = case e of + Var v -> case v of + -- UvarCtx - ensure no duplicates + TypeVar.Universal v -> if Set.notMember v vs + then pure $ Info es ses (Set.insert v us) uas (Set.insert v vs) pvs + else crash $ "variable " <> show v <> " already defined in the context" + -- EvarCtx - ensure no duplicates, and that this existential is not solved earlier in context + TypeVar.Existential _ v -> if Set.notMember v vs + then pure $ Info (Set.insert v es) ses us uas (Set.insert v vs) pvs + else crash $ "variable " <> show v <> " already defined in the context" + -- SolvedEvarCtx - ensure `v` is fresh, and the solution is well-formed wrt the context + Solved _ v sa@(Type.getPolytype -> t) + | Set.member v vs -> crash $ "variable " <> show v <> " already defined in the context" + | not (wellformedType c t) -> crash $ "type " <> show t <> " is not well-formed wrt the context" + | otherwise -> pure $ + Info (Set.insert v es) (Map.insert v sa ses) us uas (Set.insert v vs) pvs + -- VarCtx - ensure `v` is fresh, and annotation is well-formed wrt the context + Ann v t + | Set.member v vs -> crash $ "variable " <> show v <> " already defined in the context" + | not (wellformedType c t) -> crash $ "type " <> show t <> " is not well-formed wrt the context" + | otherwise -> pure $ + Info es ses us (Map.insert v t uas) (Set.insert v vs) + ((if Set.null (Type.freeVars t) then Set.insert v else id) pvs) + -- MarkerCtx - note that since a Marker is always the first mention of a variable, suffices to + -- just check that `v` is not previously mentioned + Marker v -> if Set.notMember v vs + then pure $ Info es ses us uas (Set.insert v vs) pvs + else crash $ "marker variable " <> show v <> " already defined in the context" + crash reason = Left $ IllegalContextExtension c e reason + +extend :: Var v => Element v loc -> Context v loc -> M v loc (Context v loc) +extend e c = either compilerCrash pure $ extend' e c + +-- | Add the given elements onto the end of the given `Context`. +-- Fail if the new context is not well-formed. +extendN :: Var v => Context v loc -> [Element v loc] -> M v loc (Context v loc) +extendN ctx es = foldM (flip extend) ctx es + +-- | doesn't combine notes +orElse :: M v loc a -> M v loc a -> M v loc a +orElse m1 m2 = MT go where + go menv = runM m1 menv <|> runM m2 menv + s@(Success _ _) <|> _ = s + TypeError _ _ <|> r = r + CompilerBug _ _ _ <|> r = r -- swallowing bugs for now: when checking whether a type annotation + -- is redundant, typechecking without that annotation might result in + -- a CompilerBug that we want `orElse` to recover from + +-- getMaybe :: Result v loc a -> Result v loc (Maybe a) +-- getMaybe = hoistMaybe Just + +-- hoistMaybe :: (Maybe a -> Maybe b) -> Result v loc a -> Result v loc b +-- hoistMaybe f (Result es is a) = Result es is (f a) + +getDataDeclarations :: M v loc (DataDeclarations v loc) +getDataDeclarations = fromMEnv dataDecls + +getEffectDeclarations :: M v loc (EffectDeclarations v loc) +getEffectDeclarations = fromMEnv effectDecls + +getAbilities :: M v loc [Type v loc] +getAbilities = fromMEnv abilities + +shouldPerformAbilityCheck :: (Ord loc, Var v) => Type v loc -> M v loc Bool +shouldPerformAbilityCheck t = do + skip <- fromMEnv skipAbilityCheck + skip <- traverse applyM skip + t <- applyM t + pure $ all (/= t) skip + +compilerCrash :: CompilerBug v loc -> M v loc a +compilerCrash bug = liftResult $ compilerBug bug + +failWith :: Cause v loc -> M v loc a +failWith cause = liftResult $ typeError cause + +compilerCrashResult :: CompilerBug v loc -> Result v loc a +compilerCrashResult bug = CompilerBug bug mempty mempty + +getDataDeclaration :: Reference -> M v loc (DataDeclaration v loc) +getDataDeclaration r = do + decls <- getDataDeclarations + case Map.lookup r decls of + Nothing -> compilerCrash (UnknownDecl Data r decls) + Just decl -> pure decl + +getEffectDeclaration :: Reference -> M v loc (EffectDeclaration v loc) +getEffectDeclaration r = do + decls <- getEffectDeclarations + case Map.lookup r decls of + Nothing -> compilerCrash (UnknownDecl Effect r (DD.toDataDecl <$> decls)) + Just decl -> pure decl + +getDataConstructorType :: (Var v, Ord loc) => Reference -> Int -> M v loc (Type v loc) +getDataConstructorType = getConstructorType' Data getDataDeclaration + +getEffectConstructorType :: (Var v, Ord loc) => Reference -> Int -> M v loc (Type v loc) +getEffectConstructorType = getConstructorType' Effect go where + go r = DD.toDataDecl <$> getEffectDeclaration r + +-- Encountered an unknown constructor in the typechecker; unknown constructors +-- should have been detected earlier though. +getConstructorType' :: Var v + => Unknown + -> (Reference -> M v loc (DataDeclaration v loc)) + -> Reference + -> Int + -> M v loc (Type v loc) +getConstructorType' kind get r cid = do + decl <- get r + case drop cid (DD.constructors decl) of + [] -> compilerCrash $ UnknownConstructor kind r cid decl + (_v, typ) : _ -> pure $ ABT.vmap TypeVar.Universal typ + +extendUniversal :: (Var v) => v -> M v loc v +extendUniversal v = do + v' <- freshenVar v + extendContext (Universal v') + pure v' + +extendExistential :: (Var v) => v -> M v loc v +extendExistential v = do + v' <- freshenVar v + extendContext (Existential B.Blank v') + pure v' + +extendExistentialTV :: Var v => v -> M v loc (TypeVar v loc) +extendExistentialTV v = + TypeVar.Existential B.Blank <$> extendExistential v + +notMember :: (Var v, Ord loc) => v -> Set (TypeVar v loc) -> Bool +notMember v s = + Set.notMember (TypeVar.Universal v) s && + Set.notMember (TypeVar.Existential B.Blank v) s + +-- | Replace any existentials with their solution in the context +apply :: (Var v, Ord loc) => Context v loc -> Type v loc -> Type v loc +apply ctx = apply' (solvedExistentials . info $ ctx) + +-- | Replace any existentials with their solution in the context (given as a list of elements) +applyCtx :: (Var v, Ord loc) => [Element v loc] -> Type v loc -> Type v loc +applyCtx elems = apply' $ Map.fromList [ (v, sa) | Solved _ v sa <- elems ] + +apply' :: (Var v, Ord loc) => Map v (Monotype v loc) -> Type v loc -> Type v loc +apply' _ t | Set.null (Type.freeVars t) = t +apply' solvedExistentials t = go t where + go t = case t of + Type.Var' (TypeVar.Universal _) -> t + Type.Ref' _ -> t + Type.Var' (TypeVar.Existential _ v) -> + maybe t (\(Type.Monotype t') -> go t') (Map.lookup v solvedExistentials) + Type.Arrow' i o -> Type.arrow a (go i) (go o) + Type.App' x y -> Type.app a (go x) (go y) + Type.Ann' v k -> Type.ann a (go v) k + Type.Effect1' e t -> Type.effect1 a (go e) (go t) + Type.Effects' es -> Type.effects a (map go es) + Type.ForallNamed' v t' -> Type.forall a v (go t') + Type.IntroOuterNamed' v t' -> Type.introOuter a v (go t') + _ -> error $ "Match error in Context.apply': " ++ show t + where a = ABT.annotation t + +loc :: ABT.Term f v loc -> loc +loc = ABT.annotation + +-- Prepends the provided abilities onto the existing ambient for duration of `m` +withEffects :: [Type v loc] -> M v loc a -> M v loc a +withEffects abilities' m = + MT (\menv -> runM m (menv { abilities = abilities' ++ abilities menv })) + +-- Replaces the ambient abilities with the provided for duration of `m` +withEffects0 :: [Type v loc] -> M v loc a -> M v loc a +withEffects0 abilities' m = + MT (\menv -> runM m (menv { abilities = abilities' })) + + +synthesizeApps :: (Foldable f, Var v, Ord loc) => Type v loc -> f (Term v loc) -> M v loc (Type v loc) +synthesizeApps ft args = + foldM go ft $ Foldable.toList args `zip` [1..] + where go ft arg = do + ctx <- getContext + synthesizeApp (apply ctx ft) arg + +-- | Synthesize the type of the given term, `arg` given that a function of +-- the given type `ft` is being applied to `arg`. Update the context in +-- the process. +-- e.g. in `(f:t) x` -- finds the type of (f x) given t and x. +synthesizeApp :: (Var v, Ord loc) => Type v loc -> (Term v loc, Int) -> M v loc (Type v loc) +synthesizeApp ft arg | debugEnabled && traceShow ("synthesizeApp"::String, ft, arg) False = undefined +synthesizeApp (Type.stripIntroOuters -> Type.Effect'' es ft) argp@(arg, argNum) = + scope (InSynthesizeApp ft arg argNum) $ abilityCheck es >> go ft + where + go (Type.Forall' body) = do -- Forall1App + v <- ABT.freshen body freshenTypeVar + appendContext [existential v] + let ft2 = ABT.bindInheritAnnotation body (existential' () B.Blank v) + synthesizeApp ft2 argp + go (Type.Arrow' i o) = do -- ->App + let (es, _) = Type.stripEffect o + abilityCheck es + o <$ check arg i + go (Type.Var' (TypeVar.Existential b a)) = do -- a^App + [i,e,o] <- traverse freshenVar [Var.named "i", Var.named "synthsizeApp-refined-effect", Var.named "o"] + let it = existential' (loc ft) B.Blank i + ot = existential' (loc ft) B.Blank o + et = existential' (loc ft) B.Blank e + soln = Type.Monotype (Type.arrow (loc ft) + it + (Type.effect (loc ft) [et] ot)) + ctxMid = [existential o, existential e, + existential i, Solved b a soln] + replaceContext (existential a) ctxMid + synthesizeApp (Type.getPolytype soln) argp + go _ = getContext >>= \ctx -> failWith $ TypeMismatch ctx +synthesizeApp _ _ = error "unpossible - Type.Effect'' pattern always succeeds" + +-- For arity 3, creates the type `∀ a . a -> a -> a -> Sequence a` +-- For arity 2, creates the type `∀ a . a -> a -> Sequence a` +vectorConstructorOfArity :: (Var v, Ord loc) => loc -> Int -> M v loc (Type v loc) +vectorConstructorOfArity loc arity = do + let elementVar = Var.named "elem" + args = replicate arity (loc, Type.var loc elementVar) + resultType = Type.app loc (Type.vector loc) (Type.var loc elementVar) + vt = Type.forall loc elementVar (Type.arrows args resultType) + pure vt + +generalizeAndUnTypeVar :: Var v => Type v a -> Type.Type v a +generalizeAndUnTypeVar t = + Type.cleanup . ABT.vmap TypeVar.underlying . Type.generalize (Set.toList $ ABT.freeVars t) $ t + +generalizeExistentials' + :: Var v => Type v a -> Type v a +generalizeExistentials' t = + Type.generalize (filter isExistential . Set.toList $ ABT.freeVars t) t + where + isExistential (TypeVar.Existential _ _) = True + isExistential _ = False + +noteTopLevelType + :: (Ord loc, Var v) + => ABT.Subst f v a + -> Term v loc + -> Type v loc + -> M v loc () +noteTopLevelType e binding typ = case binding of + Term.Ann' strippedBinding _ -> do + inferred <- (Just <$> synthesize strippedBinding) `orElse` pure Nothing + case inferred of + Nothing -> btw $ TopLevelComponent + [(Var.reset (ABT.variable e), generalizeAndUnTypeVar typ, False)] + Just inferred -> do + redundant <- isRedundant typ inferred + btw $ TopLevelComponent + [(Var.reset (ABT.variable e), generalizeAndUnTypeVar typ, redundant)] + -- The signature didn't exist, so was definitely redundant + _ -> btw $ TopLevelComponent + [(Var.reset (ABT.variable e), generalizeAndUnTypeVar typ, True)] + +-- | Synthesize the type of the given term, updating the context in the process. +-- | Figure 11 from the paper +synthesize :: forall v loc . (Var v, Ord loc) => Term v loc -> M v loc (Type v loc) +synthesize e | debugEnabled && traceShow ("synthesize"::String, e) False = undefined +synthesize e = scope (InSynthesize e) $ + case minimize' e of + Left es -> failWith (DuplicateDefinitions es) + Right e -> do + Type.Effect'' es t <- go e + abilityCheck es + pure t + where + l = loc e + go :: (Var v, Ord loc) => Term v loc -> M v loc (Type v loc) + go (Term.Var' v) = getContext >>= \ctx -> case lookupAnn ctx v of -- Var + Nothing -> compilerCrash $ UndeclaredTermVariable v ctx + Just t -> pure t + go (Term.Blank' blank) = do + v <- freshenVar Var.blank + appendContext [Existential blank v] + pure $ existential' l blank v -- forall (TypeVar.Universal v) (Type.universal v) + go (Term.Ann' (Term.Ref' _) t) = case ABT.freeVars t of + s | Set.null s -> + -- innermost Ref annotation assumed to be correctly provided by `synthesizeClosed` + existentializeArrows t + s -> compilerCrash $ FreeVarsInTypeAnnotation s + go (Term.Ref' h) = compilerCrash $ UnannotatedReference h + go (Term.Constructor' r cid) = do + t <- getDataConstructorType r cid + existentializeArrows t + go (Term.Request' r cid) = do + t <- ungeneralize =<< getEffectConstructorType r cid + existentializeArrows t + go (Term.Ann' e t) = do + t <- existentializeArrows t + t <$ checkScoped e t + go (Term.Float' _) = pure $ Type.float l -- 1I=> + go (Term.Int' _) = pure $ Type.int l -- 1I=> + go (Term.Nat' _) = pure $ Type.nat l -- 1I=> + go (Term.Boolean' _) = pure $ Type.boolean l + go (Term.Text' _) = pure $ Type.text l + go (Term.Char' _) = pure $ Type.char l + go (Term.TermLink' _) = pure $ Type.termLink l + go (Term.TypeLink' _) = pure $ Type.typeLink l + go (Term.Apps' f args) = do -- ->EEEEE + ft <- synthesize f + ctx <- getContext + (vs, ft) <- ungeneralize' ft + scope (InFunctionCall vs f ft args) $ synthesizeApps (apply ctx ft) args + go (Term.Sequence' v) = do + ft <- vectorConstructorOfArity (loc e) (Foldable.length v) + case Foldable.toList v of + [] -> pure ft + v1 : _ -> + scope (InVectorApp (ABT.annotation v1)) $ synthesizeApps ft v + go (Term.Let1Top' top binding e) = do + isClosed <- isClosed binding + -- note: no need to freshen binding, it can't refer to v + (t, ctx2) <- markThenRetract Var.inferOther $ do + _ <- extendExistential Var.inferOther + synthesize binding + -- If the binding has no free variables, we generalize over its existentials + tbinding <- + if isClosed then pure $ generalizeExistentials ctx2 t + else applyM . applyCtx ctx2 $ t + v' <- ABT.freshen e freshenVar + appendContext [Ann v' tbinding] + t <- applyM =<< synthesize (ABT.bindInheritAnnotation e (Term.var() v')) + when top $ noteTopLevelType e binding tbinding + -- doRetract $ Ann v' tbinding + pure t + go (Term.Lam' body) = do -- ->I=> (Full Damas Milner rule) + -- arya: are there more meaningful locations we could put into and pull out of the abschain?) + [arg, i, e, o] <- sequence [ ABT.freshen body freshenVar + , freshenVar (ABT.variable body) + , freshenVar Var.inferAbility + , freshenVar Var.inferOutput ] + let it = existential' l B.Blank i + ot = existential' l B.Blank o + et = existential' l B.Blank e + appendContext $ + [existential i, existential e, existential o, Ann arg it] + body' <- pure $ ABT.bindInheritAnnotation body (Term.var() arg) + if Term.isLam body' then withEffects0 [] $ check body' ot + else withEffects0 [et] $ check body' ot + ctx <- getContext + let t = Type.arrow l it (Type.effect l (apply ctx <$> [et]) ot) + pure t + go (Term.LetRecNamed' [] body) = synthesize body + go (Term.LetRecTop' isTop letrec) = do + (t, ctx2) <- markThenRetract (Var.named "let-rec-marker") $ do + e <- annotateLetRecBindings isTop letrec + synthesize e + pure $ generalizeExistentials ctx2 t + go (Term.If' cond t f) = do + scope InIfCond $ check cond (Type.boolean l) + scope (InIfBody $ ABT.annotation t) $ synthesizeApps (Type.iff2 l) [t, f] + go (Term.And' a b) = + scope InAndApp $ synthesizeApps (Type.andor' l) [a, b] + go (Term.Or' a b) = + scope InOrApp $ synthesizeApps (Type.andor' l) [a, b] + go (Term.Match' scrutinee cases) = do + scrutineeType <- synthesize scrutinee + outputTypev <- freshenVar (Var.named "match-output") + let outputType = existential' l B.Blank outputTypev + appendContext [existential outputTypev] + case cases of -- only relevant with 2 or more cases, but 1 is safe too. + [] -> pure () + Term.MatchCase _ _ t : _ -> scope (InMatch (ABT.annotation t)) $ + Foldable.traverse_ (checkCase scrutineeType outputType) cases + ctx <- getContext + pure $ apply ctx outputType + go (Term.Handle' h body) = do + -- To synthesize a handle block, we first synthesize the handler h, + -- then push its allowed abilities onto the current ambient set when + -- checking the body. Assuming that works, we also verify that the + -- handler only uses abilities in the current ambient set. + ht <- synthesize h >>= applyM >>= ungeneralize + ctx <- getContext + case ht of + -- common case, like `h : Request {Remote} a -> b`, brings + -- `Remote` into ambient when checking `body` + Type.Arrow' (Type.Apps' (Type.Ref' ref) [et,i]) o | ref == Type.effectRef -> do + let es = Type.flattenEffects et + withEffects es $ check body i + o <- applyM o + let (oes, o') = Type.stripEffect o + abilityCheck oes + pure o' + -- degenerate case, like `handle x -> 10 in ...` + Type.Arrow' (i@(Type.Var' (TypeVar.Existential _ v@(lookupSolved ctx -> Nothing)))) o -> do + e <- extendExistential v + withEffects [existentialp (loc i) e] $ check body i + o <- applyM o + let (oes, o') = Type.stripEffect o + abilityCheck oes + pure o' + _ -> failWith $ HandlerOfUnexpectedType (loc h) ht + go _e = compilerCrash PatternMatchFailure + +checkCase :: forall v loc . (Var v, Ord loc) + => Type v loc + -> Type v loc + -> Term.MatchCase loc (Term v loc) + -> M v loc () +checkCase scrutineeType outputType (Term.MatchCase pat guard rhs) = do + scrutineeType <- applyM scrutineeType + outputType <- applyM outputType + markThenRetract0 Var.inferOther $ do + let peel t = case t of + ABT.AbsN' vars bod -> (vars, bod) + _ -> ([], t) + (rhsvs, rhsbod) = peel rhs + mayGuard = snd . peel <$> guard + (substs, remains) <- runStateT (checkPattern scrutineeType pat) rhsvs + unless (null remains) $ compilerCrash (MalformedPattern pat) + let subst = ABT.substsInheritAnnotation (second (Term.var ()) <$> substs) + rhs' = subst rhsbod + guard' = subst <$> mayGuard + for_ guard' $ \g -> scope InMatchGuard $ check g (Type.boolean (loc g)) + outputType <- applyM outputType + scope InMatchBody $ check rhs' outputType + +checkPattern + :: (Var v, Ord loc) + => Type v loc + -> Pattern loc + -> StateT [v] (M v loc) [(v, v)] +checkPattern tx ty | (debugEnabled || debugPatternsEnabled) && traceShow ("checkPattern"::String, tx, ty) False = undefined +checkPattern scrutineeType0 p = + lift (ungeneralize scrutineeType0) >>= \scrutineeType -> case p of + Pattern.Unbound _ -> pure [] + Pattern.Var _loc -> do + v <- getAdvance p + v' <- lift $ freshenVar v + lift . appendContext $ [Ann v' scrutineeType] + pure [(v, v')] + Pattern.SequenceLiteral loc ps -> do + vt <- lift $ do + v <- freshenVar Var.inferOther + let vt = existentialp loc v + appendContext [existential v] + subtype (Type.app loc (Type.vector loc) vt) scrutineeType + applyM vt + join <$> traverse (checkPattern vt) ps + Pattern.SequenceOp loc l op r -> do + let (locL, locR) = (Pattern.loc l, Pattern.loc r) + vt <- lift $ do + v <- freshenVar Var.inferOther + let vt = existentialp loc v + appendContext [existential v] + -- todo: `Type.vector loc` is super-probably wrong; + -- I'm thinking it should be Ann.Intrinsic, but we don't + -- have access to that here. + subtype (Type.app loc (Type.vector loc) vt) scrutineeType + applyM vt + case op of + Pattern.Cons -> do + lvs <- checkPattern vt l + -- todo: same `Type.vector loc` thing + rvs <- checkPattern (Type.app locR (Type.vector locR) vt) r + pure $ lvs ++ rvs + Pattern.Snoc -> do + -- todo: same `Type.vector loc` thing + lvs <- checkPattern (Type.app locL (Type.vector locL) vt) l + rvs <- checkPattern vt r + pure $ lvs ++ rvs + Pattern.Concat -> + case (l, r) of + (p, _) | isConstLen p -> f + (_, p) | isConstLen p -> f + (_, _) -> lift . failWith $ + ConcatPatternWithoutConstantLength loc (Type.app loc (Type.vector loc) vt) + where + f = liftA2 (++) (g locL l) (g locR r) + -- todo: same `Type.vector loc` thing + g l p = checkPattern (Type.app l (Type.vector l) vt) p + + -- Only pertains to sequences, returns False if not a sequence + isConstLen :: Pattern loc -> Bool + isConstLen p = case p of + Pattern.SequenceLiteral _ _ -> True + Pattern.SequenceOp _ l op r -> case op of + Pattern.Snoc -> isConstLen l + Pattern.Cons -> isConstLen r + Pattern.Concat -> isConstLen l && isConstLen r + Pattern.As _ p -> isConstLen p + _ -> False + -- TODO: provide a scope here for giving a good error message + Pattern.Boolean loc _ -> + lift $ subtype (Type.boolean loc) scrutineeType $> mempty + Pattern.Int loc _ -> + lift $ subtype (Type.int loc) scrutineeType $> mempty + Pattern.Nat loc _ -> + lift $ subtype (Type.nat loc) scrutineeType $> mempty + Pattern.Float loc _ -> + lift $ subtype (Type.float loc) scrutineeType $> mempty + Pattern.Text loc _ -> + lift $ subtype (Type.text loc) scrutineeType $> mempty + Pattern.Char loc _ -> + lift $ subtype (Type.char loc) scrutineeType $> mempty + Pattern.Constructor loc ref cid args -> do + dct <- lift $ getDataConstructorType ref cid + udct <- lift $ ungeneralize dct + unless (Type.arity udct == length args) + . lift + . failWith + $ PatternArityMismatch loc dct (length args) + let step (Type.Arrow' i o, vso) pat = + (\vso' -> (o, vso ++ vso')) <$> checkPattern i pat + step _ _ = + lift . failWith $ PatternArityMismatch loc dct (length args) + (overall, vs) <- foldM step (udct, []) args + st <- lift $ applyM scrutineeType + lift $ subtype overall st + pure vs + Pattern.As _loc p' -> do + v <- getAdvance p + v' <- lift $ freshenVar v + lift . appendContext $ [Ann v' scrutineeType] + ((v, v') :) <$> checkPattern scrutineeType p' + Pattern.EffectPure loc p -> do + vt <- lift $ do + v <- freshenVar Var.inferPatternPureV + e <- freshenVar Var.inferPatternPureE + let vt = existentialp loc v + let et = existentialp loc e + appendContext [existential v, existential e] + subtype (Type.effectV loc (loc, et) (loc, vt)) scrutineeType + applyM vt + checkPattern vt p + Pattern.EffectBind loc ref cid args k -> do + -- scrutineeType should be a supertype of `Effect e vt` + -- for fresh existentials `e` and `vt` + e <- lift $ extendExistential Var.inferPatternBindE + v <- lift $ extendExistential Var.inferPatternBindV + let evt = Type.effectV loc (loc, existentialp loc e) + (loc, existentialp loc v) + lift $ subtype evt scrutineeType + ect <- lift $ getEffectConstructorType ref cid + uect <- lift $ ungeneralize ect + unless (Type.arity uect == length args) + . lift + . failWith + . PatternArityMismatch loc ect + $ length args + let step (Type.Arrow' i o, vso) pat = + (\vso' -> (o, vso ++ vso')) <$> checkPattern i pat + step _ _ = + lift . failWith $ PatternArityMismatch loc ect (length args) + (ctorOutputType, vs) <- foldM step (uect, []) args + case ctorOutputType of + -- an effect ctor should have exactly 1 effect! + Type.Effect'' [et] it -> do + -- expecting scrutineeType to be `Effect et vt` + st <- lift $ applyM scrutineeType + case st of + Type.App' _ vt -> + let kt = Type.arrow (Pattern.loc k) + it + (Type.effect (Pattern.loc k) [et] vt) + in (vs ++) <$> checkPattern kt k + _ -> lift . compilerCrash $ PatternMatchFailure + _ -> lift . compilerCrash $ EffectConstructorHadMultipleEffects + ctorOutputType + where + + getAdvance :: Pattern loc -> StateT [v] (M v loc) v + getAdvance p = do + vs <- get + case vs of + [] -> lift $ compilerCrash (MalformedPattern p) + (v : vs) -> do + put vs + pure v + +applyM :: (Var v, Ord loc) => Type v loc -> M v loc (Type v loc) +applyM t = (`apply` t) <$> getContext + +lookupAnn :: Ord v => Context v loc -> v -> Maybe (Type v loc) +lookupAnn ctx v = Map.lookup v (termVarAnnotations . info $ ctx) + +lookupSolved :: Ord v => Context v loc -> v -> Maybe (Monotype v loc) +lookupSolved ctx v = Map.lookup v (solvedExistentials . info $ ctx) + +resetContextAfter :: a -> M v loc a -> M v loc a +resetContextAfter x a = do + ctx <- getContext + a <- a `orElse` pure x + setContext ctx + pure a + +-- | Synthesize and generalize the type of each binding in a let rec. +-- Updates the context so that all bindings are annotated with +-- their type. Also returns the freshened version of `body`. +-- See usage in `synthesize` and `check` for `LetRec'` case. +annotateLetRecBindings + :: (Var v, Ord loc) + => Term.IsTop + -> ((v -> M v loc v) -> M v loc ([(v, Term v loc)], Term v loc)) + -> M v loc (Term v loc) +annotateLetRecBindings isTop letrec = + -- If this is a top-level letrec, then emit a TopLevelComponent note, + -- which asks if the user-provided type annotations were needed. + if isTop + then do + -- First, typecheck (using annotateLetRecBindings') the bindings with any + -- user-provided annotations. + (body, vts) <- annotateLetRecBindings' True + -- Then, try typechecking again, but ignoring any user-provided annotations. + -- This will infer whatever type. If it altogether fails to typecheck here + -- then, ...(1) + withoutAnnotations <- + resetContextAfter Nothing $ Just <$> annotateLetRecBindings' False + -- convert from typechecker TypeVar back to regular `v` vars + let unTypeVar (v, t) = (v, generalizeAndUnTypeVar t) + case withoutAnnotations of + Just (_, vts') -> do + r <- and <$> zipWithM isRedundant (fmap snd vts) (fmap snd vts') + btw $ TopLevelComponent ((\(v,b) -> (Var.reset v, b,r)) . unTypeVar <$> vts) + -- ...(1) we'll assume all the user-provided annotations were needed + Nothing -> btw + $ TopLevelComponent ((\(v, b) -> (Var.reset v, b, False)) . unTypeVar <$> vts) + pure body + -- If this isn't a top-level letrec, then we don't have to do anything special + else fst <$> annotateLetRecBindings' True + where + annotateLetRecBindings' useUserAnnotations = do + (bindings, body) <- letrec freshenVar + let vs = map fst bindings + ((bindings, bindingTypes), ctx2) <- markThenRetract Var.inferOther $ do + let f (v, binding) = case binding of + -- If user has provided an annotation, we use that + Term.Ann' e t | useUserAnnotations -> do + -- Arrows in `t` with no ability lists get an attached fresh + -- existential to allow inference of required abilities + t2 <- existentializeArrows =<< applyM t + pure (Term.ann (loc binding) e t2, t2) + -- If we're not using an annotation, we make one up. There's 2 cases: + + lam@(Term.Lam' _) -> + -- If `e` is a lambda of arity K, we immediately refine the + -- existential to `a1 ->{e1} a2 ... ->{eK} r`. This gives better + -- inference of the lambda's ability variables in conjunction with + -- handling of lambdas in `check` judgement. + (lam,) <$> existentialFunctionTypeFor lam + e -> do + -- Anything else, just make up a fresh existential + -- which will be refined during typechecking of the binding + vt <- extendExistential v + pure $ (e, existential' (loc binding) B.Blank vt) + (bindings, bindingTypes) <- unzip <$> traverse f bindings + appendContext (zipWith Ann vs bindingTypes) + -- check each `bi` against its type + Foldable.for_ (zip bindings bindingTypes) $ \(b, t) -> + -- note: elements of a cycle have to be pure, otherwise order of effects + -- is unclear and chaos ensues + withEffects0 [] (checkScoped b t) + ensureGuardedCycle (vs `zip` bindings) + pure (bindings, bindingTypes) + -- compute generalized types `gt1, gt2 ...` for each binding `b1, b2...`; + -- add annotations `v1 : gt1, v2 : gt2 ...` to the context + let bindingArities = Term.arity <$> bindings + gen bindingType _arity = generalizeExistentials ctx2 bindingType + bindingTypesGeneralized = zipWith gen bindingTypes bindingArities + annotations = zipWith Ann vs bindingTypesGeneralized + appendContext annotations + pure (body, vs `zip` bindingTypesGeneralized) + +ensureGuardedCycle :: Var v => [(v, Term v loc)] -> M v loc () +ensureGuardedCycle bindings = let + -- We make sure that nonLambdas can depend only on lambdas, not on each other + nonLambdas = Set.fromList [ v | (v, b) <- bindings, Term.arity b == 0 ] + (notok, ok) = partition f bindings + f (v, b) = + if Set.member v nonLambdas then + not $ Set.null (ABT.freeVars b `Set.intersection` nonLambdas) + else False + in if length ok == length bindings then pure () + else failWith $ UnguardedLetRecCycle (fst <$> notok) bindings + +existentialFunctionTypeFor :: Var v => Term v loc -> M v loc (Type v loc) +existentialFunctionTypeFor lam@(Term.LamNamed' v body) = do + v <- extendExistential v + e <- extendExistential Var.inferAbility + o <- existentialFunctionTypeFor body + pure $ Type.arrow (loc lam) + (existentialp (loc lam) v) + (Type.effect (loc lam) [existentialp (loc lam) e] o) +existentialFunctionTypeFor e = do + v <- extendExistential Var.inferOutput + pure $ existentialp (loc e) v + +existentializeArrows :: Var v => Type v loc -> M v loc (Type v loc) +existentializeArrows t = do + t <- Type.existentializeArrows (extendExistentialTV Var.inferAbility) t + pure t + +ungeneralize :: (Var v, Ord loc) => Type v loc -> M v loc (Type v loc) +ungeneralize t = snd <$> ungeneralize' t + +ungeneralize' :: (Var v, Ord loc) => Type v loc -> M v loc ([v], Type v loc) +ungeneralize' (Type.Forall' t) = do + v <- ABT.freshen t freshenTypeVar + appendContext [existential v] + t <- pure $ ABT.bindInheritAnnotation t (existential' () B.Blank v) + first (v:) <$> ungeneralize' t +ungeneralize' t = pure ([], t) + +-- | Apply the context to the input type, then convert any unsolved existentials +-- to universals. +generalizeExistentials :: (Var v, Ord loc) => [Element v loc] -> Type v loc -> Type v loc +generalizeExistentials ctx t = + foldr gen (applyCtx ctx t) unsolvedExistentials + where + unsolvedExistentials = [ v | Var (TypeVar.Existential _ v) <- ctx ] + gen e t = + if TypeVar.Existential B.Blank e `ABT.isFreeIn` t + -- location of the forall is just the location of the input type + -- and the location of each quantified variable is just inherited from + -- its source location + then Type.forall (loc t) + (TypeVar.Universal e) + (ABT.substInheritAnnotation + (TypeVar.Existential B.Blank e) + (universal' () e) + t) + else t -- don't bother introducing a forall if type variable is unused + +-- This checks `e` against the type `t`, but if `t` is a `∀`, any ∀-quantified +-- variables are freshened and substituted into `e`. This should be called whenever +-- a term is being checked against a type due to a user-provided signature on `e`. +-- See its usage in `synthesize` and `annotateLetRecBindings`. +checkScoped :: forall v loc . (Var v, Ord loc) => Term v loc -> Type v loc -> M v loc () +checkScoped e t = case t of + Type.Forall' body -> do -- ForallI + v <- ABT.freshen body freshenTypeVar + markThenRetract0 v $ do + x <- extendUniversal v + let e' = Term.substTypeVar (ABT.variable body) (universal' () x) e + checkScoped e' (ABT.bindInheritAnnotation body (universal' () x)) + _ -> check e t + +-- | Check that under the given context, `e` has type `t`, +-- updating the context in the process. +check :: forall v loc . (Var v, Ord loc) => Term v loc -> Type v loc -> M v loc () +check e t | debugEnabled && traceShow ("check" :: String, e, t) False = undefined +check e0 t0 = scope (InCheck e0 t0) $ do + ctx <- getContext + let Type.Effect'' es t = t0 + let e = minimize' e0 + case e of + Left e -> failWith $ DuplicateDefinitions e + Right e -> + if wellformedType ctx t0 + then case t of + -- expand existentials before checking + t@(Type.Var' (TypeVar.Existential _ _)) -> abilityCheck es >> go e (apply ctx t) + t -> go e (Type.stripIntroOuters t) + else failWith $ IllFormedType ctx + where + go :: Term v loc -> Type v loc -> M v loc () + go e (Type.Forall' body) = do -- ForallI + v <- ABT.freshen body freshenTypeVar + markThenRetract0 v $ do + x <- extendUniversal v + check e (ABT.bindInheritAnnotation body (universal' () x)) + go (Term.Lam' body) (Type.Arrow' i o) = do -- =>I + x <- ABT.freshen body freshenVar + markThenRetract0 x $ do + extendContext (Ann x i) + let Type.Effect'' es ot = o + body' <- pure $ ABT.bindInheritAnnotation body (Term.var() x) + withEffects0 es $ check body' ot + go (Term.Let1' binding e) t = do + v <- ABT.freshen e freshenVar + tbinding <- synthesize binding + markThenRetract0 v $ do + extendContext (Ann v tbinding) + check (ABT.bindInheritAnnotation e (Term.var () v)) t + go (Term.LetRecNamed' [] e) t = check e t + go (Term.LetRecTop' isTop letrec) t = + markThenRetract0 (Var.named "let-rec-marker") $ do + e <- annotateLetRecBindings isTop letrec + check e t + go e t = do -- Sub + a <- synthesize e + ctx <- getContext + subtype (apply ctx a) (apply ctx t) + +-- | `subtype ctx t1 t2` returns successfully if `t1` is a subtype of `t2`. +-- This may have the effect of altering the context. +subtype :: forall v loc . (Var v, Ord loc) => Type v loc -> Type v loc -> M v loc () +subtype tx ty | debugEnabled && traceShow ("subtype"::String, tx, ty) False = undefined +subtype tx ty = scope (InSubtype tx ty) $ do + ctx <- getContext + go (ctx :: Context v loc) (Type.stripIntroOuters tx) (Type.stripIntroOuters ty) + where -- Rules from figure 9 + go :: Context v loc -> Type v loc -> Type v loc -> M v loc () + go _ (Type.Ref' r) (Type.Ref' r2) | r == r2 = pure () -- `Unit` + go ctx t1@(Type.Var' (TypeVar.Universal v1)) t2@(Type.Var' (TypeVar.Universal v2)) -- `Var` + | v1 == v2 && wellformedType ctx t1 && wellformedType ctx t2 + = pure () + go ctx t1@(Type.Var' (TypeVar.Existential _ v1)) t2@(Type.Var' (TypeVar.Existential _ v2)) -- `Exvar` + | v1 == v2 && wellformedType ctx t1 && wellformedType ctx t2 + = pure () + go _ (Type.Arrow' i1 o1) (Type.Arrow' i2 o2) = do -- `-->` + subtype i2 i1; ctx' <- getContext + subtype (apply ctx' o1) (apply ctx' o2) + go _ (Type.App' x1 y1) (Type.App' x2 y2) = do -- analogue of `-->` + subtype x1 x2 + -- We don't know the variance of the type argument, so we assume + -- (conservatively) that it's invariant, see + -- discussion https://github.com/unisonweb/unison/issues/512 + y1 <- applyM y1; y2 <- applyM y2 + subtype y1 y2 + y1 <- applyM y1; y2 <- applyM y2 + -- performing the subtype check in both directions means the types must be equal + subtype y2 y1 + go _ t (Type.Forall' t2) = do + v <- ABT.freshen t2 freshenTypeVar + markThenRetract0 v $ do + v' <- extendUniversal v + t2 <- pure $ ABT.bindInheritAnnotation t2 (universal' () v') + subtype t t2 + go _ (Type.Forall' t) t2 = do + v0 <- ABT.freshen t freshenTypeVar + markThenRetract0 v0 $ do + v <- extendExistential v0 + t <- pure $ ABT.bindInheritAnnotation t (existential' () B.Blank v) + t1 <- applyM t + subtype t1 t2 + go _ (Type.Effect1' e1 a1) (Type.Effect1' e2 a2) = do + subtype e1 e2 + ctx <- getContext + subtype (apply ctx a1) (apply ctx a2) + go _ a (Type.Effect1' _e2 a2) = subtype a a2 + go _ (Type.Effect1' es a) a2 = do + subtype es (Type.effects (loc es) []) + subtype a a2 + go ctx (Type.Var' (TypeVar.Existential b v)) t -- `InstantiateL` + | Set.member v (existentials ctx) && notMember v (Type.freeVars t) = + instantiateL b v t + go ctx t (Type.Var' (TypeVar.Existential b v)) -- `InstantiateR` + | Set.member v (existentials ctx) && notMember v (Type.freeVars t) = + instantiateR t b v + go _ (Type.Effects' es1) (Type.Effects' es2) = do + ctx <- getContext + let es1' = map (apply ctx) es1 + es2' = map (apply ctx) es2 + if all (`elem` es2') es1' then pure () else abilityCheck' es2' es1' + go _ t t2@(Type.Effects' _) | expand t = subtype (Type.effects (loc t) [t]) t2 + go _ t@(Type.Effects' _) t2 | expand t2 = subtype t (Type.effects (loc t2) [t2]) + go ctx _ _ = failWith $ TypeMismatch ctx + + expand :: Type v loc -> Bool + expand t = case t of + Type.Var' (TypeVar.Existential _ _) -> True + Type.App' _ _ -> True + Type.Ref' _ -> True + _ -> False + + +-- | Instantiate the given existential such that it is +-- a subtype of the given type, updating the context +-- in the process. +instantiateL :: (Var v, Ord loc) => B.Blank loc -> v -> Type v loc -> M v loc () +instantiateL _ v t | debugEnabled && traceShow ("instantiateL"::String, v, t) False = undefined +instantiateL blank v (Type.stripIntroOuters -> t) = scope (InInstantiateL v t) $ + getContext >>= \ctx -> case Type.monotype t of + Just t -> solve ctx v t >>= \case + Just ctx -> setContext ctx -- InstLSolve + Nothing -> go ctx + Nothing -> go ctx + where + go ctx = case t of + Type.Var' (TypeVar.Existential _ v2) | ordered ctx v v2 -> -- InstLReach (both are existential, set v2 = v) + solve ctx v2 (Type.Monotype (existentialp (loc t) v)) >>= + maybe (failWith $ TypeMismatch ctx) setContext + Type.Arrow' i o -> do -- InstLArr + [i',o'] <- traverse freshenVar [nameFrom Var.inferInput i, nameFrom Var.inferOutput o] + let s = Solved blank v (Type.Monotype (Type.arrow (loc t) + (existentialp (loc i) i') + (existentialp (loc o) o'))) + replaceContext (existential v) + [existential o', existential i', s] + instantiateR i B.Blank i' -- todo: not sure about this, could also be `blank` + applyM o >>= instantiateL B.Blank o' + Type.App' x y -> do -- analogue of InstLArr + [x', y'] <- traverse freshenVar [nameFrom Var.inferTypeConstructor x, nameFrom Var.inferTypeConstructorArg y] + let s = Solved blank v (Type.Monotype (Type.app (loc t) + (existentialp (loc x) x') + (existentialp (loc y) y'))) + replaceContext (existential v) + [existential y', existential x', s] + applyM x >>= instantiateL B.Blank x' + applyM y >>= instantiateL B.Blank y' + Type.Effect1' es vt -> do + es' <- freshenVar Var.inferAbility + vt' <- freshenVar Var.inferOther + let t' = Type.effect1 (loc t) (existentialp (loc es) es') + (existentialp (loc vt) vt') + s = Solved blank v (Type.Monotype t') + replaceContext (existential v) + [existential es', existential vt', s] + applyM es >>= instantiateL B.Blank es' + applyM vt >>= instantiateL B.Blank vt' + Type.Effects' es -> do + es' <- traverse (\e -> freshenVar (nameFrom Var.inferAbility e)) es + let locs = loc <$> es + t' = Type.effects (loc t) (uncurry existentialp <$> locs `zip` es') + s = Solved blank v $ Type.Monotype t' + replaceContext (existential v) + ((existential <$> es') ++ [s]) + Foldable.for_ (es' `zip` es) $ \(e',e) -> + applyM e >>= instantiateL B.Blank e' + Type.Forall' body -> do -- InstLIIL + v0 <- ABT.freshen body freshenTypeVar + markThenRetract0 v0 $ do + v <- extendUniversal v0 + instantiateL B.Blank v (ABT.bindInheritAnnotation body (universal' () v)) + _ -> failWith $ TypeMismatch ctx + +nameFrom :: Var v => v -> Type v loc -> v +nameFrom _ (Type.Var' v) = TypeVar.underlying (Var.reset v) +nameFrom ifNotVar _ = ifNotVar + +-- | Instantiate the given existential such that it is +-- a supertype of the given type, updating the context +-- in the process. +instantiateR :: (Var v, Ord loc) => Type v loc -> B.Blank loc -> v -> M v loc () +instantiateR t _ v | debugEnabled && traceShow ("instantiateR"::String, t, v) False = undefined +instantiateR (Type.stripIntroOuters -> t) blank v = scope (InInstantiateR t v) $ + getContext >>= \ctx -> case Type.monotype t of + Just t -> solve ctx v t >>= \case + Just ctx -> setContext ctx -- InstRSolve + Nothing -> go ctx + Nothing -> go ctx + where + go ctx = case t of + Type.Var' (TypeVar.Existential _ v2) | ordered ctx v v2 -> -- InstRReach (both are existential, set v2 = v) + solve ctx v2 (Type.Monotype (existentialp (loc t) v)) >>= + maybe (failWith $ TypeMismatch ctx) setContext + Type.Arrow' i o -> do -- InstRArrow + [i', o'] <- traverse freshenVar [nameFrom Var.inferInput i, nameFrom Var.inferOutput o] + let s = Solved blank v (Type.Monotype + (Type.arrow (loc t) + (existentialp (loc i) i') + (existentialp (loc o) o'))) + replaceContext (existential v) + [existential o', existential i', s] + ctx <- instantiateL B.Blank i' i >> getContext + instantiateR (apply ctx o) B.Blank o' + Type.App' x y -> do -- analogue of InstRArr + -- example foo a <: v' will + -- 1. create foo', a', add these to the context + -- 2. add v' = foo' a' to the context + -- 3. recurse to refine the types of foo' and a' + [x', y'] <- traverse freshenVar [nameFrom Var.inferTypeConstructor x, nameFrom Var.inferTypeConstructorArg y] + let s = Solved blank v (Type.Monotype (Type.app (loc t) (existentialp (loc x) x') (existentialp (loc y) y'))) + replaceContext (existential v) [existential y', existential x', s] + applyM x >>= \x -> instantiateR x B.Blank x' + applyM y >>= \y -> instantiateR y B.Blank y' + Type.Effect1' es vt -> do + es' <- freshenVar (nameFrom Var.inferAbility es) + vt' <- freshenVar (nameFrom Var.inferTypeConstructorArg vt) + let t' = Type.effect1 (loc t) (existentialp (loc es) es') + (existentialp (loc vt) vt') + s = Solved blank v (Type.Monotype t') + replaceContext (existential v) + [existential es', existential vt', s] + applyM es >>= \es -> instantiateR es B.Blank es' + applyM vt >>= \vt -> instantiateR vt B.Blank vt' + Type.Effects' es -> do + es' <- traverse (\e -> freshenVar (nameFrom Var.inferAbility e)) es + let locs = loc <$> es + t' = Type.effects (loc t) (uncurry existentialp <$> locs `zip` es') + s = Solved blank v $ Type.Monotype t' + replaceContext (existential v) + ((existential <$> es') ++ [s]) + Foldable.for_ (es `zip` es') $ \(e, e') -> do + ctx <- getContext + instantiateR (apply ctx e) B.Blank e' + Type.Forall' body -> do -- InstRAIIL + x' <- ABT.freshen body freshenTypeVar + markThenRetract0 x' $ do + appendContext [existential x'] + instantiateR (ABT.bindInheritAnnotation body (existential' () B.Blank x')) B.Blank v + _ -> failWith $ TypeMismatch ctx + +-- | solve (ΓL,α^,ΓR) α τ = (ΓL,α^ = τ,ΓR) +-- Solve the given existential variable to the given monotype. +-- If the given monotype is not well-formed at the context location +-- where the existential variable is introduced, return `Nothing`. +-- Fail with type mismatch if the existential is already solved to something else. +-- Fail with a compiler bug if the existential does not appear in the context at all. +solve :: (Var v, Ord loc) => Context v loc -> v -> Monotype v loc -> M v loc (Maybe (Context v loc)) +solve ctx v t = case lookupSolved ctx v of + Just t2 -> + -- okay to solve something again if it's to an identical type + if same t t2 then pure (Just ctx) + else failWith $ TypeMismatch ctx + where same t1 t2 = apply ctx (Type.getPolytype t1) == apply ctx (Type.getPolytype t2) + Nothing -> case breakAt (existential v) ctx of + Just (ctxL, Existential blank v, ctxR) -> + if wellformedType ctxL (Type.getPolytype t) + then Just <$> ctxL `extendN` ((Solved blank v t) : ctxR) + else pure Nothing + _ -> compilerCrash $ UnknownExistentialVariable v ctx + +abilityCheck' :: forall v loc . (Var v, Ord loc) => [Type v loc] -> [Type v loc] -> M v loc () +abilityCheck' [] [] = pure () +abilityCheck' ambient0 requested0 = go ambient0 requested0 where + go _ambient [] = pure () + go ambient0 (r:rs) = do + -- Note: if applyM returns an existential, it's unsolved + ambient <- traverse applyM ambient0 + r <- applyM r + -- 1. Look in ambient for exact match of head of `r` + case find (headMatch r) ambient of + -- 2a. If yes for `a` in ambient, do `subtype amb r` and done. + Just amb -> do + subtype amb r `orElse` die r + go ambient rs + -- 2b. If no: + Nothing -> case r of + -- It's an unsolved existential, instantiate it to all of ambient + Type.Var' (TypeVar.Existential b v) -> do + let et2 = Type.effects (loc r) ambient + -- instantiate it to `{}` if can't cover all of ambient + instantiateR et2 b v + `orElse` instantiateR (Type.effects (loc r) []) b v + `orElse` die1 + go ambient rs + _ -> -- find unsolved existential, 'e, that appears in ambient + let unsolveds = (ambient >>= Type.flattenEffects >>= vars) + vars (Type.Var' (TypeVar.Existential b v)) = [(b,v)] + vars _ = [] + in case listToMaybe unsolveds of + Just (b, e') -> do + -- introduce fresh existential 'e2 to context + e2' <- extendExistential e' + let et2 = Type.effects (loc r) [r, existentialp (loc r) e2'] + instantiateR et2 b e' `orElse` die r + go ambient rs + _ -> die r + + headMatch :: Type v loc -> Type v loc -> Bool + headMatch (Type.App' f _) (Type.App' f2 _) = headMatch f f2 + headMatch r r2 = r == r2 + + -- as a last ditch effort, if the request is an existential and there are + -- no remaining unbound existentials left in ambient, we try to instantiate + -- the request to the ambient effect list + die r = case r of + Type.Var' (TypeVar.Existential b v) -> + instantiateL b v (Type.effects (loc r) ambient0) `orElse` die1 + -- instantiateL b v (Type.effects (loc r) []) `orElse` die1 + _ -> die1 -- and if that doesn't work, then we're really toast + + die1 = do + ctx <- getContext + failWith $ AbilityCheckFailure (apply ctx <$> ambient0) + (apply ctx <$> requested0) + ctx + +abilityCheck :: (Var v, Ord loc) => [Type v loc] -> M v loc () +abilityCheck requested = do + ambient <- getAbilities + requested' <- filterM shouldPerformAbilityCheck requested + ctx <- getContext + abilityCheck' (apply ctx <$> ambient >>= Type.flattenEffects) + (apply ctx <$> requested' >>= Type.flattenEffects) + +verifyDataDeclarations :: (Var v, Ord loc) => DataDeclarations v loc -> Result v loc () +verifyDataDeclarations decls = forM_ (Map.toList decls) $ \(_ref, decl) -> do + let ctors = DD.constructors decl + forM_ ctors $ \(_ctorName,typ) -> verifyClosed typ id + +-- | public interface to the typechecker +synthesizeClosed + :: (Var v, Ord loc) + => [Type v loc] + -> TL.TypeLookup v loc + -> Term v loc + -> Result v loc (Type v loc) +synthesizeClosed abilities lookupType term0 = let + datas = TL.dataDecls lookupType + effects = TL.effectDecls lookupType + term = annotateRefs (TL.typeOfTerm' lookupType) term0 + in case term of + Left missingRef -> + compilerCrashResult (UnknownTermReference missingRef) + Right term -> run [] datas effects $ do + liftResult $ verifyDataDeclarations datas + *> verifyDataDeclarations (DD.toDataDecl <$> effects) + *> verifyClosedTerm term + synthesizeClosed' abilities term + +verifyClosedTerm :: forall v loc . Ord v => Term v loc -> Result v loc () +verifyClosedTerm t = do + ok1 <- verifyClosed t id + let freeTypeVars = Map.toList $ Term.freeTypeVarAnnotations t + reportError (v, locs) = for_ locs $ \loc -> + typeError (UnknownSymbol loc (TypeVar.underlying v)) + for_ freeTypeVars reportError + when (not ok1 || (not . null) freeTypeVars) $ compilerBug (OtherBug "impossible") + +verifyClosed :: (Traversable f, Ord v) => ABT.Term f v a -> (v -> v2) -> Result v2 a Bool +verifyClosed t toV2 = + let isBoundIn v t = Set.member v (snd (ABT.annotation t)) + loc t = fst (ABT.annotation t) + go t@(ABT.Var' v) | not (isBoundIn v t) = typeError (UnknownSymbol (loc t) $ toV2 v) + go _ = pure True + in all id <$> ABT.foreachSubterm go (ABT.annotateBound t) + +annotateRefs :: (Applicative f, Var v) + => (Reference -> f (Type.Type v loc)) + -> Term v loc + -> f (Term v loc) +annotateRefs synth = ABT.visit f where + f r@(Term.Ref' h) = Just (Term.ann ra (Term.ref ra h) <$> (ge <$> synth h)) + where ra = ABT.annotation r + ge t = ABT.vmap TypeVar.Universal $ t + f _ = Nothing + +run + :: (Var v, Ord loc, Functor f) + => [Type v loc] + -> DataDeclarations v loc + -> EffectDeclarations v loc + -> MT v loc f a + -> f a +run ambient datas effects m = + fmap fst + . runM m + $ MEnv (Env 1 context0) ambient datas effects [] + +synthesizeClosed' :: (Var v, Ord loc) + => [Type v loc] + -> Term v loc + -> M v loc (Type v loc) +synthesizeClosed' abilities term = do + -- save current context, for restoration when done + ctx0 <- getContext + setContext context0 + (t, ctx) <- markThenRetract (Var.named "start") $ do + -- retract will cause notes to be written out for + -- any `Blank`-tagged existentials passing out of scope + withEffects0 abilities (synthesize term) + setContext ctx0 -- restore the initial context + pure $ generalizeExistentials ctx t + +-- Check if the given typechecking action succeeds. +succeeds :: M v loc a -> TotalM v loc Bool +succeeds m = do + e <- ask + case runM m e of + Success _ _ -> pure True + TypeError _ _ -> pure False + CompilerBug bug _ _ -> MT (\_ -> Left bug) + +-- Check if `t1` is a subtype of `t2`. Doesn't update the typechecking context. +isSubtype' :: (Var v, Ord loc) => Type v loc -> Type v loc -> TotalM v loc Bool +isSubtype' type1 type2 = succeeds $ do + let vars = Set.toList $ Set.union (ABT.freeVars type1) (ABT.freeVars type2) + reserveAll (TypeVar.underlying <$> vars) + appendContext (Var <$> vars) + subtype type1 type2 + +-- `isRedundant userType inferredType` returns `True` if the `userType` +-- is equal "up to inferred abilities" to `inferredType`. +-- +-- Example: `userType` is `Nat -> Nat`, `inferredType` is `∀ a . a ->{IO} a`. +-- In this case, the signature isn't redundant, and we return +-- `False`. +-- Example: `userType` is (`∀ a . a -> a`) and inferred is `∀ z e . z ->{e} z`. +-- In this case, the signature IS redundant, and we return `True`. +isRedundant + :: (Var v, Ord loc) + => Type v loc + -> Type v loc + -> M v loc Bool +isRedundant userType0 inferredType0 = do + ctx0 <- getContext + -- the inferred type may have some unsolved existentials, which we generalize over + -- before doing the comparison, otherwise it will just test equal to any + -- concrete instantiation of those existentials. For instance, the + -- inferred type `a -> a` for a existential `a` should get generalized + -- to `∀ a . a -> a` before comparison to `Nat -> Nat`, otherwise the + -- typechecker will solve `a = Nat` and call the types equal! + userType <- existentializeArrows userType0 + inferredType <- generalizeExistentials' <$> applyM inferredType0 + -- We already know `inferred <: userType`, otherwise the user's given + -- type would have caused the program not to typecheck! Ex: if user writes + -- `: Nat -> Nat` when it has an inferred type of `a -> a`. So we only + -- need to check the other direction to determine redundancy. + (liftTotalM $ isSubtype' userType inferredType) <* setContext ctx0 + +-- Public interface to `isSubtype` +isSubtype + :: (Var v, Ord loc) => Type v loc -> Type v loc -> Either (CompilerBug v loc) Bool +isSubtype t1 t2 = + run [] Map.empty Map.empty (isSubtype' t1 t2) + +isEqual + :: (Var v, Ord loc) => Type v loc -> Type v loc -> Either (CompilerBug v loc) Bool +isEqual t1 t2 = + (&&) <$> isSubtype t1 t2 <*> isSubtype t2 t1 + +instance (Var v) => Show (Element v loc) where + show (Var v) = case v of + TypeVar.Universal x -> "@" <> show x + TypeVar.Existential _ x -> "'" ++ show x + show (Solved _ v t) = "'"++Text.unpack (Var.name v)++" = "++TP.pretty' Nothing mempty (Type.getPolytype t) + show (Ann v t) = Text.unpack (Var.name v) ++ " : " ++ + TP.pretty' Nothing mempty t + show (Marker v) = "|"++Text.unpack (Var.name v)++"|" + +instance (Ord loc, Var v) => Show (Context v loc) where + show ctx@(Context es) = "Γ\n " ++ (intercalate "\n " . map (showElem ctx . fst)) (reverse es) + where + showElem _ctx (Var v) = case v of + TypeVar.Universal x -> "@" <> show x + TypeVar.Existential _ x -> "'" ++ show x + showElem ctx (Solved _ v (Type.Monotype t)) = "'"++Text.unpack (Var.name v)++" = "++ TP.pretty' Nothing mempty (apply ctx t) + showElem ctx (Ann v t) = Text.unpack (Var.name v) ++ " : " ++ TP.pretty' Nothing mempty (apply ctx t) + showElem _ (Marker v) = "|"++Text.unpack (Var.name v)++"|" + +-- MEnv v loc -> (Seq (ErrorNote v loc), (a, Env v loc)) +instance Monad f => Monad (MT v loc f) where + return a = MT (\menv -> pure (a, env menv)) + m >>= f = MT go where + go menv = do + (a, env1) <- runM m menv + runM (f a) (menv { env = env1 }) + +instance Monad f => MonadFail.MonadFail (MT v loc f) where + fail = error + +instance Monad f => Applicative (MT v loc f) where + pure a = MT (\menv -> pure (a, env menv)) + (<*>) = ap + +instance Functor f => Functor (MT v loc f) where + fmap f (MT m) = MT (\menv -> fmap (first f) (m menv)) + +instance Monad f => MonadReader (MEnv v loc) (MT v loc f) where + ask = MT (\e -> pure (e, env e)) + local f m = MT $ runM m . f diff --git a/parser-typechecker/src/Unison/Typechecker/Extractor.hs b/parser-typechecker/src/Unison/Typechecker/Extractor.hs new file mode 100644 index 0000000000..edd3db69b4 --- /dev/null +++ b/parser-typechecker/src/Unison/Typechecker/Extractor.hs @@ -0,0 +1,343 @@ +module Unison.Typechecker.Extractor where + +import Unison.Prelude hiding (whenM) + +import Control.Monad.Reader +import qualified Data.List as List +import Data.List.NonEmpty ( NonEmpty ) +import qualified Data.Set as Set +import Unison.Reference ( Reference ) +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import qualified Unison.Typechecker.Context as C +import Unison.Util.Monoid ( whenM ) +import qualified Unison.Blank as B +import Unison.Var (Var) +import qualified Unison.Var as Var +import Unison.Type (Type) + +type RedundantTypeAnnotation = Bool + +type Extractor e a = MaybeT (Reader e) a + +type ErrorExtractor v loc a = Extractor (C.ErrorNote v loc) a + +type InfoExtractor v loc a = Extractor (C.InfoNote v loc) a + +type PathExtractor v loc a = Extractor (C.PathElement v loc) a + +type SubseqExtractor v loc a = SubseqExtractor' (C.ErrorNote v loc) a + +extractor :: (e -> Maybe a) -> Extractor e a +extractor = MaybeT . reader + +extract :: Extractor e a -> e -> Maybe a +extract = runReader . runMaybeT + +subseqExtractor :: (C.ErrorNote v loc -> [Ranged a]) -> SubseqExtractor v loc a +subseqExtractor f = SubseqExtractor' f + +traceSubseq :: Show a => String -> SubseqExtractor' n a -> SubseqExtractor' n a +traceSubseq s ex = SubseqExtractor' $ \n -> + let rs = runSubseq ex n + in trace (if null s then show rs else s ++ ": " ++ show rs) rs + +traceNote + :: Show a => String -> ErrorExtractor v loc a -> ErrorExtractor v loc a +traceNote s ex = extractor $ \n -> + let result = extract ex n + in trace (if null s then show result else s ++ ": " ++ show result) result + +unique :: SubseqExtractor v loc a -> ErrorExtractor v loc a +unique ex = extractor $ \note -> case runSubseq ex note of + [Pure a ] -> Just a + [Ranged a _ _] -> Just a + _ -> Nothing + +data SubseqExtractor' n a = + SubseqExtractor' { runSubseq :: n -> [Ranged a] } + +data Ranged a + = Pure a + | Ranged { get :: a, start :: Int, end :: Int } + deriving (Functor, Show) + +-- | collects the regions where `xa` doesn't match / aka invert a set of intervals +-- unused, but don't want to delete it yet - Aug 30, 2018 +_no :: SubseqExtractor' n a -> SubseqExtractor' n () +_no xa = SubseqExtractor' $ \note -> + let as = runSubseq xa note + in if null [ a | Pure a <- as ] + then -- results are not full + if null as + then [Pure ()] -- results are empty, make them full + -- not full and not empty, find the negation + else reverse . fst $ foldl' go + ([], Nothing) + (List.sort $ fmap toPairs as) + else [] -- results were full, make them empty + where + toPairs :: Ranged a -> (Int, Int) + toPairs (Pure _ ) = error "this case should be avoided by the if!" + toPairs (Ranged _ start end) = (start, end) + + go :: ([Ranged ()], Maybe Int) -> (Int, Int) -> ([Ranged ()], Maybe Int) + go ([] , Nothing) (0, r) = ([], Just (r + 1)) + go ([] , Nothing) (l, r) = ([Ranged () 0 (l - 1)], Just r) + go (_ : _, Nothing) _ = error "state machine bug in Extractor2.no" + go (rs, Just r0) (l, r) = + (if r0 + 1 <= l - 1 then Ranged () (r0 + 1) (l - 1) : rs else rs, Just r) + +-- unused / untested +_any :: SubseqExtractor v loc () +_any = _any' (\n -> pathLength n - 1) + where + pathLength :: C.ErrorNote v loc -> Int + pathLength = length . toList . C.path + +_any' :: (n -> Int) -> SubseqExtractor' n () +_any' getLast = SubseqExtractor' $ \note -> Pure () : do + let last = getLast note + start <- [0 .. last] + end <- [0 .. last] + pure $ Ranged () start end + +-- Kind of a newtype for Ranged.Ranged. +-- The Eq instance ignores the embedded value +data DistinctRanged a = DistinctRanged a Int Int +instance Eq (DistinctRanged a) where + DistinctRanged _ l r == DistinctRanged _ l' r' = l == l' && r == r' +instance Ord (DistinctRanged a) where + DistinctRanged _ l r <= DistinctRanged _ l' r' = + l < l' || (l == l' && r <= r') + +-- todo: this could return NonEmpty +some :: forall n a . SubseqExtractor' n a -> SubseqExtractor' n [a] +some xa = SubseqExtractor' $ \note -> + let as :: [Ranged a] + as = runSubseq xa note + -- Given a list of subseqs [Ranged a], find the adjacent groups [Ranged [a]]. + -- `Pure`s arguably can't be adjacent; not sure what to do with them. Currently ignored. + in fmap reverse <$> go Set.empty as + where + fromDistinct (DistinctRanged a l r) = Ranged a l r + go :: Set (DistinctRanged [a]) -> [Ranged a] -> [Ranged [a]] + go seen [] = fmap fromDistinct . toList $ seen + go seen (rh@(Ranged h start end) : t) = + let seen' :: Set (DistinctRanged [a]) + seen' = + Set.fromList . join . fmap (toList . consRange rh) . toList $ seen + in go (Set.insert (DistinctRanged [h] start end) seen `Set.union` seen') t + go seen (Pure _ : t) = go seen t + + consRange :: Ranged a -> DistinctRanged [a] -> Maybe (DistinctRanged [a]) + consRange new group@(DistinctRanged as start' _) = if isAdjacent group new + then Just (DistinctRanged (get new : as) start' (end new)) + else Nothing + + -- Returns true if inputs are adjacent Ranged regions + -- Question: Should a Pure be considered adjacent? + isAdjacent :: forall a b . DistinctRanged a -> Ranged b -> Bool + isAdjacent (DistinctRanged _ _ endA) (Ranged _ startB _) = endA + 1 == startB + isAdjacent _ _ = False + +pathStart :: SubseqExtractor' n () +pathStart = SubseqExtractor' $ \_ -> [Ranged () (-1) (-1)] + +-- Scopes -- +asPathExtractor :: (C.PathElement v loc -> Maybe a) -> SubseqExtractor v loc a +asPathExtractor = fromPathExtractor . extractor + where + fromPathExtractor :: PathExtractor v loc a -> SubseqExtractor v loc a + fromPathExtractor ex = + subseqExtractor $ join . fmap go . (`zip` [0 ..]) . toList . C.path + where + go (e, i) = case extract ex e of + Just a -> [Ranged a i i] + Nothing -> [] + +inSynthesize :: SubseqExtractor v loc (C.Term v loc) +inSynthesize = asPathExtractor $ \case + C.InSynthesize t -> Just t + _ -> Nothing + +inSubtype :: SubseqExtractor v loc (C.Type v loc, C.Type v loc) +inSubtype = asPathExtractor $ \case + C.InSubtype found expected -> Just (found, expected) + _ -> Nothing + +inCheck :: SubseqExtractor v loc (C.Term v loc, C.Type v loc) +inCheck = asPathExtractor $ \case + C.InCheck e t -> Just (e, t) + _ -> Nothing + +-- inInstantiateL +-- inInstantiateR + +inSynthesizeApp :: SubseqExtractor v loc (C.Type v loc, C.Term v loc, Int) +inSynthesizeApp = asPathExtractor $ \case + C.InSynthesizeApp t e n -> Just (t, e, n) + _ -> Nothing + +inFunctionCall + :: SubseqExtractor v loc ([v], C.Term v loc, C.Type v loc, [C.Term v loc]) +inFunctionCall = asPathExtractor $ \case + C.InFunctionCall vs f ft e -> case f of + Term.Ann' f _ -> Just (vs, f, ft, e) + f -> Just (vs, f, ft, e) + _ -> Nothing + +inAndApp, inOrApp, inIfCond, inMatchGuard, inMatchBody + :: SubseqExtractor v loc () +inAndApp = asPathExtractor $ \case + C.InAndApp -> Just () + _ -> Nothing +inOrApp = asPathExtractor $ \case + C.InOrApp -> Just () + _ -> Nothing +inIfCond = asPathExtractor $ \case + C.InIfCond -> Just () + _ -> Nothing +inMatchGuard = asPathExtractor $ \case + C.InMatchGuard -> Just () + _ -> Nothing +inMatchBody = asPathExtractor $ \case + C.InMatchBody -> Just () + _ -> Nothing + +inMatch, inVector, inIfBody :: SubseqExtractor v loc loc +inMatch = asPathExtractor $ \case + C.InMatch loc -> Just loc + _ -> Nothing +inVector = asPathExtractor $ \case + C.InVectorApp loc -> Just loc + _ -> Nothing +inIfBody = asPathExtractor $ \case + C.InIfBody loc -> Just loc + _ -> Nothing + +-- Causes -- +cause :: ErrorExtractor v loc (C.Cause v loc) +cause = extractor $ pure . C.cause + +duplicateDefinitions :: ErrorExtractor v loc (NonEmpty (v, [loc])) +duplicateDefinitions = cause >>= \case + C.DuplicateDefinitions vs -> pure vs + _ -> mzero + +typeMismatch :: ErrorExtractor v loc (C.Context v loc) +typeMismatch = cause >>= \case + C.TypeMismatch c -> pure c + _ -> mzero + +illFormedType :: ErrorExtractor v loc (C.Context v loc) +illFormedType = cause >>= \case + C.IllFormedType c -> pure c + _ -> mzero + +unknownSymbol :: ErrorExtractor v loc (loc, v) +unknownSymbol = cause >>= \case + C.UnknownSymbol loc v -> pure (loc, v) + _ -> mzero + +unknownTerm :: Var v => ErrorExtractor v loc (loc, v, [C.Suggestion v loc], C.Type v loc) +unknownTerm = cause >>= \case + C.UnknownTerm loc v suggestions expectedType -> do + let k = Var.Inference Var.Ability + cleanup = Type.cleanup . Type.removePureEffects . Type.generalize' k + pure (loc, v, suggestions, cleanup expectedType) + _ -> mzero + +abilityCheckFailure + :: ErrorExtractor v loc ([C.Type v loc], [C.Type v loc], C.Context v loc) +abilityCheckFailure = cause >>= \case + C.AbilityCheckFailure ambient requested ctx -> pure (ambient, requested, ctx) + _ -> mzero + +effectConstructorWrongArgCount + :: ErrorExtractor + v + loc + (C.ExpectedArgCount, C.ActualArgCount, Reference, C.ConstructorId) +effectConstructorWrongArgCount = cause >>= \case + C.EffectConstructorWrongArgCount expected actual r cid -> + pure (expected, actual, r, cid) + _ -> mzero + +malformedEffectBind + :: ErrorExtractor v loc (C.Type v loc, C.Type v loc, [C.Type v loc]) +malformedEffectBind = cause >>= \case + C.MalformedEffectBind ctor ctorResult es -> pure (ctor, ctorResult, es) + _ -> mzero + +solvedBlank :: InfoExtractor v loc (B.Recorded loc, v, C.Type v loc) +solvedBlank = extractor $ \n -> case n of + C.SolvedBlank b v t -> pure (b, v, t) + _ -> mzero + +-- Misc -- +errorNote :: ErrorExtractor v loc (C.ErrorNote v loc) +errorNote = extractor $ Just . id + +infoNote :: InfoExtractor v loc (C.InfoNote v loc) +infoNote = extractor $ Just . id + +innermostTerm :: ErrorExtractor v loc (C.Term v loc) +innermostTerm = extractor $ \n -> case C.innermostErrorTerm n of + Just e -> pure e + Nothing -> mzero + +path :: ErrorExtractor v loc [C.PathElement v loc] +path = extractor $ pure . toList . C.path + +-- Informational notes -- +topLevelComponent + :: InfoExtractor + v + loc + [(v, Type v loc, RedundantTypeAnnotation)] +topLevelComponent = extractor go + where + go (C.TopLevelComponent c) = Just c + go _ = Nothing + +instance Functor (SubseqExtractor' n) where + fmap = liftM + +instance Applicative (SubseqExtractor' n) where + pure = return + (<*>) = ap + +instance MonadFail (SubseqExtractor' n) where + fail _ = mzero + +instance Monad (SubseqExtractor' n) where + return a = SubseqExtractor' $ \_ -> [Pure a] + xa >>= f = SubseqExtractor' $ \note -> + let as = runSubseq xa note in do + ra <- as + case ra of + Pure a -> runSubseq (f a) note + Ranged a startA endA -> + let rbs = runSubseq (f a) note in do + rb <- rbs + case rb of + Pure b -> pure (Ranged b startA endA) + Ranged b startB endB -> + whenM (startB == endA + 1) (pure (Ranged b startA endB)) + +instance Alternative (SubseqExtractor' n) where + empty = mzero + (<|>) = mplus + +instance MonadPlus (SubseqExtractor' n) where + mzero = SubseqExtractor' $ \_ -> [] + mplus (SubseqExtractor' f1) (SubseqExtractor' f2) = + SubseqExtractor' (\n -> f1 n `mplus` f2 n) + +instance Monoid (SubseqExtractor' n a) where + mempty = mzero + mappend = mplus + +instance Semigroup (SubseqExtractor' n a) where + (<>) = mappend diff --git a/parser-typechecker/src/Unison/Typechecker/TypeError.hs b/parser-typechecker/src/Unison/Typechecker/TypeError.hs new file mode 100644 index 0000000000..8a6def305f --- /dev/null +++ b/parser-typechecker/src/Unison/Typechecker/TypeError.hs @@ -0,0 +1,298 @@ +{-# LANGUAGE BangPatterns #-} + +module Unison.Typechecker.TypeError where + +import Unison.Prelude hiding (whenM) + +import Data.Bifunctor (second) +import Data.List.NonEmpty (NonEmpty) +import Prelude hiding (all, and, or) +import qualified Unison.ABT as ABT +import qualified Unison.Type as Type +import qualified Unison.Typechecker.Context as C +import qualified Unison.Typechecker.Extractor as Ex +import Unison.Util.Monoid (whenM) +import Unison.Var (Var) +import Unison.Type (Type) + +data BooleanMismatch = CondMismatch | AndMismatch | OrMismatch | GuardMismatch + deriving Show + +data ExistentialMismatch = IfBody | VectorBody | CaseBody + deriving Show + +data TypeError v loc + = Mismatch { foundType :: C.Type v loc -- overallType1 + , expectedType :: C.Type v loc -- overallType2 + , foundLeaf :: C.Type v loc -- leaf1 + , expectedLeaf :: C.Type v loc -- leaf2 + , mismatchSite :: C.Term v loc + , note :: C.ErrorNote v loc + } + | BooleanMismatch { getBooleanMismatch :: BooleanMismatch + , mismatchSite :: C.Term v loc + , foundType :: C.Type v loc + , note :: C.ErrorNote v loc + } + | ExistentialMismatch { getExistentialMismatch :: ExistentialMismatch + , expectedType :: C.Type v loc + , expectedLoc :: loc + , foundType :: C.Type v loc + , mismatchSite :: C.Term v loc + , note :: C.ErrorNote v loc + } + | FunctionApplication { f :: C.Term v loc + , ft :: C.Type v loc + , arg :: C.Term v loc + , argNum :: Int + , foundType :: C.Type v loc + , expectedType :: C.Type v loc + , leafs :: Maybe (C.Type v loc, C.Type v loc) -- found, expected + , solvedVars :: [(v, C.Type v loc)] + , note :: C.ErrorNote v loc + } + | NotFunctionApplication { f :: C.Term v loc + , ft :: C.Type v loc + , note :: C.ErrorNote v loc + } + | AbilityCheckFailure { ambient :: [C.Type v loc] + , requested :: [C.Type v loc] + , abilityCheckFailureSite :: loc + , note :: C.ErrorNote v loc + } + | UnguardedLetRecCycle { cycle :: [v] + , cycleLocs :: [loc] + , note :: C.ErrorNote v loc } + | UnknownType { unknownTypeV :: v + , typeSite :: loc + , note :: C.ErrorNote v loc + } + | UnknownTerm { unknownTermV :: v + , termSite :: loc + , suggestions :: [C.Suggestion v loc] + , expectedType :: C.Type v loc + , note :: C.ErrorNote v loc + } + | DuplicateDefinitions { defns :: NonEmpty (v, [loc]) + , note :: C.ErrorNote v loc + } + | Other (C.ErrorNote v loc) + deriving (Show) + +type RedundantTypeAnnotation = Bool + +data TypeInfo v loc = + TopLevelComponent + { definitions :: [(v, Type v loc, RedundantTypeAnnotation)] } + deriving (Show) + +type TypeNote v loc = Either (TypeError v loc) (TypeInfo v loc) + +typeErrorFromNote + :: (Ord loc, Show loc, Var v) => C.ErrorNote v loc -> TypeError v loc +typeErrorFromNote n = case Ex.extract allErrors n of + Just msg -> msg + Nothing -> Other n + +typeInfoFromNote + :: (Ord loc, Show loc, Var v) => C.InfoNote v loc -> Maybe (TypeInfo v loc) +typeInfoFromNote n = case n of + C.TopLevelComponent defs -> Just $ TopLevelComponent defs + _ -> Nothing + +allErrors + :: (Var v, Ord loc) => Ex.ErrorExtractor v loc (TypeError v loc) +allErrors = asum + [ and + , or + , cond + , matchGuard + , ifBody + , vectorBody + , matchBody + , applyingFunction + , applyingNonFunction + , generalMismatch + , abilityCheckFailure + , unguardedCycle + , unknownType + , unknownTerm + , duplicateDefinitions + ] + +topLevelComponent :: Ex.InfoExtractor v a (TypeInfo v a) +topLevelComponent = do + defs <- Ex.topLevelComponent + pure $ TopLevelComponent defs + +abilityCheckFailure :: Ex.ErrorExtractor v a (TypeError v a) +abilityCheckFailure = do + (ambient, requested, _ctx) <- Ex.abilityCheckFailure + e <- Ex.innermostTerm + n <- Ex.errorNote + pure $ AbilityCheckFailure ambient requested (ABT.annotation e) n + +duplicateDefinitions :: Ex.ErrorExtractor v a (TypeError v a) +duplicateDefinitions = do + vs <- Ex.duplicateDefinitions + n <- Ex.errorNote + pure $ DuplicateDefinitions vs n + +unknownType :: Ex.ErrorExtractor v loc (TypeError v loc) +unknownType = do + (loc, v) <- Ex.unknownSymbol + n <- Ex.errorNote + pure $ UnknownType v loc n + +unknownTerm :: Var v => Ex.ErrorExtractor v loc (TypeError v loc) +unknownTerm = do + (loc, v, suggs, typ) <- Ex.unknownTerm + n <- Ex.errorNote + pure $ UnknownTerm v loc suggs (Type.cleanup typ) n + +generalMismatch :: (Var v, Ord loc) => Ex.ErrorExtractor v loc (TypeError v loc) +generalMismatch = do + ctx <- Ex.typeMismatch + let sub t = C.apply ctx t + + subtypes :: Ex.ErrorExtractor v loc [(C.Type v loc, C.Type v loc)] + subtypes = do + path <- Ex.path + pure [ (t1, t2) | C.InSubtype t1 t2 <- path ] + + firstLastSubtype :: Ex.ErrorExtractor v loc ( (C.Type v loc, C.Type v loc) + , (C.Type v loc, C.Type v loc) ) + firstLastSubtype = subtypes >>= \case + [] -> empty + l -> pure (head l, last l) + n <- Ex.errorNote + mismatchSite <- Ex.innermostTerm + ((foundLeaf, expectedLeaf), (foundType, expectedType)) <- firstLastSubtype + let [ft, et, fl, el] = Type.cleanups [sub foundType, sub expectedType, + sub foundLeaf, sub expectedLeaf] + pure $ Mismatch ft et fl el mismatchSite n + + +and,or,cond,matchGuard + :: (Var v, Ord loc) + => Ex.ErrorExtractor v loc (TypeError v loc) +and = booleanMismatch0 AndMismatch (Ex.inSynthesizeApp >> Ex.inAndApp) +or = booleanMismatch0 OrMismatch (Ex.inSynthesizeApp >> Ex.inOrApp) +cond = booleanMismatch0 CondMismatch Ex.inIfCond +matchGuard = booleanMismatch0 GuardMismatch Ex.inMatchGuard + +unguardedCycle :: Ex.ErrorExtractor v loc (TypeError v loc) +unguardedCycle = do + n <- Ex.errorNote + C.UnguardedLetRecCycle vs es <- Ex.cause + let loc = ABT.annotation . snd <$> es + pure $ UnguardedLetRecCycle vs loc n + +-- | helper function to support `and` / `or` / `cond` +booleanMismatch0 :: (Var v, Ord loc) + => BooleanMismatch + -> Ex.SubseqExtractor v loc () + -> Ex.ErrorExtractor v loc (TypeError v loc) +booleanMismatch0 b ex = do + n <- Ex.errorNote + ctx <- Ex.typeMismatch + let sub t = C.apply ctx t + mismatchSite <- Ex.innermostTerm + foundType <- Ex.unique $ do + Ex.pathStart + (foundType, _, _) <- inSubtypes + void $ Ex.some Ex.inCheck + ex + pure $ Type.cleanup foundType + pure (BooleanMismatch b mismatchSite (sub foundType) n) + +existentialMismatch0 + :: (Var v, Ord loc) + => ExistentialMismatch + -> Ex.SubseqExtractor v loc loc + -> Ex.ErrorExtractor v loc (TypeError v loc) +existentialMismatch0 em getExpectedLoc = do + n <- Ex.errorNote + ctx <- Ex.typeMismatch + let sub t = C.apply ctx t + mismatchSite <- Ex.innermostTerm + ([foundType, expectedType], expectedLoc) <- Ex.unique $ do + Ex.pathStart + subtypes@(_:_) <- Ex.some Ex.inSubtype + let (foundType, expectedType) = last subtypes + void $ Ex.some Ex.inCheck + expectedLoc <- getExpectedLoc + pure (Type.cleanups [foundType, expectedType], expectedLoc) + pure $ ExistentialMismatch em (sub expectedType) expectedLoc + (sub foundType) mismatchSite + -- todo : save type leaves too + n + +ifBody, vectorBody, matchBody + :: (Var v, Ord loc) => Ex.ErrorExtractor v loc (TypeError v loc) +ifBody = existentialMismatch0 IfBody (Ex.inSynthesizeApp >> Ex.inIfBody) +vectorBody = existentialMismatch0 VectorBody (Ex.inSynthesizeApp >> Ex.inVector) +matchBody = existentialMismatch0 CaseBody (Ex.inMatchBody >> Ex.inMatch) + +applyingNonFunction :: Var v => Ex.ErrorExtractor v loc (TypeError v loc) +applyingNonFunction = do + _ <- Ex.typeMismatch + n <- Ex.errorNote + (f, ft) <- Ex.unique $ do + Ex.pathStart + (arity0Type, _arg, _argNum) <- Ex.inSynthesizeApp + (_, f, ft, args) <- Ex.inFunctionCall + let expectedArgCount = Type.arity ft + foundArgCount = length args + -- unexpectedArgLoc = ABT.annotation arg + whenM (expectedArgCount < foundArgCount) $ pure (f, arity0Type) + pure $ NotFunctionApplication f (Type.cleanup ft) n + +-- | Want to collect this info: + -- The `n`th argument to `f` is `foundType`, but I was expecting `expectedType`. + -- + -- 30 | asdf asdf asdf + -- + -- If you're curious + -- `f` has type `blah`, where + -- `a` was chosen as `A` + -- `b` was chosen as `B` + -- `c` was chosen as `C` + -- (many colors / groups) +applyingFunction :: forall v loc. (Var v) => Ex.ErrorExtractor v loc (TypeError v loc) +applyingFunction = do + n <- Ex.errorNote + ctx <- Ex.typeMismatch + Ex.unique $ do + Ex.pathStart + -- todo: make a new extrator for (some inSubtype) that pulls out the head and tail and nothing in between? + (found, expected, leafs) <- inSubtypes + arg <- fst . head <$> Ex.some Ex.inCheck + (_, _, argIndex) <- Ex.inSynthesizeApp + (typeVars, f, ft, _args) <- Ex.inFunctionCall + let go :: v -> Maybe (v, C.Type v loc) + go v = (v,) . Type.getPolytype <$> C.lookupSolved ctx v + solvedVars = catMaybes (go <$> typeVars) + let vm = Type.cleanupVarsMap $ [ft, found, expected] + <> (fst <$> toList leafs) + <> (snd <$> toList leafs) + <> (snd <$> solvedVars) + cleanup = Type.cleanupVars1' vm . Type.cleanupAbilityLists + pure $ FunctionApplication f (cleanup ft) + arg argIndex + (cleanup found) + (cleanup expected) + ((\(a,b) -> (cleanup a, cleanup b)) <$> leafs) + (second cleanup <$> solvedVars) + n + +inSubtypes :: Ex.SubseqExtractor v loc (C.Type v loc, + C.Type v loc, + Maybe (C.Type v loc, C.Type v loc)) +inSubtypes = do + subtypes <- Ex.some Ex.inSubtype + let ((found, expected), leaves) = case subtypes of + [] -> error "unpossible: Ex.some should only succeed on nonnull output" + [(found, expected)] -> ((found, expected), Nothing) + _ -> (last subtypes, Just $ head subtypes) + pure (found, expected, leaves) diff --git a/parser-typechecker/src/Unison/Typechecker/TypeLookup.hs b/parser-typechecker/src/Unison/Typechecker/TypeLookup.hs new file mode 100644 index 0000000000..2925e7c005 --- /dev/null +++ b/parser-typechecker/src/Unison/Typechecker/TypeLookup.hs @@ -0,0 +1,66 @@ +module Unison.Typechecker.TypeLookup where + +import Unison.Prelude + +import Unison.Reference (Reference) +import Unison.Referent (Referent) +import Unison.Type (Type) +import qualified Data.Map as Map +import qualified Unison.ConstructorType as CT +import qualified Unison.DataDeclaration as DD +import Unison.DataDeclaration (EffectDeclaration, DataDeclaration) +import qualified Unison.Referent as Referent + +-- Used for typechecking. +data TypeLookup v a = + TypeLookup { typeOfTerms :: Map Reference (Type v a) + , dataDecls :: Map Reference (DataDeclaration v a) + , effectDecls :: Map Reference (EffectDeclaration v a) } + deriving Show + +typeOfReferent :: TypeLookup v a -> Referent -> Maybe (Type v a) +typeOfReferent tl r = case r of + Referent.Ref r -> typeOfTerm tl r + Referent.Con r cid CT.Data -> typeOfDataConstructor tl r cid + Referent.Con r cid CT.Effect -> typeOfEffectConstructor tl r cid + +-- bombs if not found +unsafeConstructorType :: TypeLookup v a -> Reference -> CT.ConstructorType +unsafeConstructorType tl r = fromMaybe + (error $ "no constructor type for " <> show r) + (constructorType tl r) + +constructorType :: TypeLookup v a -> Reference -> Maybe CT.ConstructorType +constructorType tl r = + (const CT.Data <$> Map.lookup r (dataDecls tl)) <|> + (const CT.Effect <$> Map.lookup r (effectDecls tl)) + +typeOfDataConstructor :: TypeLookup v a -> Reference -> Int -> Maybe (Type v a) +typeOfDataConstructor tl r cid = go =<< Map.lookup r (dataDecls tl) + where go dd = DD.typeOfConstructor dd cid + +typeOfEffectConstructor :: TypeLookup v a -> Reference -> Int -> Maybe (Type v a) +typeOfEffectConstructor tl r cid = go =<< Map.lookup r (effectDecls tl) + where go dd = DD.typeOfConstructor (DD.toDataDecl dd) cid + +typeOfTerm :: TypeLookup v a -> Reference -> Maybe (Type v a) +typeOfTerm tl r = Map.lookup r (typeOfTerms tl) + +typeOfTerm' :: TypeLookup v a -> Reference -> Either Reference (Type v a) +typeOfTerm' tl r = case Map.lookup r (typeOfTerms tl) of + Nothing -> Left r + Just a -> Right a + +instance Semigroup (TypeLookup v a) where (<>) = mappend + +instance Monoid (TypeLookup v a) where + mempty = TypeLookup mempty mempty mempty + mappend (TypeLookup a b c) (TypeLookup a2 b2 c2) = + TypeLookup (a <> a2) (b <> b2) (c <> c2) + +instance Functor (TypeLookup v) where + fmap f tl = + TypeLookup + (fmap f <$> typeOfTerms tl) + (fmap f <$> dataDecls tl) + (fmap f <$> effectDecls tl) diff --git a/parser-typechecker/src/Unison/Typechecker/TypeVar.hs b/parser-typechecker/src/Unison/Typechecker/TypeVar.hs new file mode 100644 index 0000000000..b24a0cacbc --- /dev/null +++ b/parser-typechecker/src/Unison/Typechecker/TypeVar.hs @@ -0,0 +1,53 @@ +{-# LANGUAGE PatternSynonyms #-} + +module Unison.Typechecker.TypeVar where + +import qualified Data.Set as Set +import qualified Unison.ABT as ABT +import qualified Unison.Term as Term +import Unison.Term (Term, Term') +import Unison.Type (Type) +import Unison.Var (Var) +import qualified Unison.Var as Var + +data TypeVar b v = Universal v | Existential b v deriving (Functor) + +instance Eq v => Eq (TypeVar b v) where + Universal v == Universal v2 = v == v2 + Existential _ v == Existential _ v2 = v == v2 + _ == _ = False + +instance Ord v => Ord (TypeVar b v) where + Universal v `compare` Universal v2 = compare v v2 + Existential _ v `compare` Existential _ v2 = compare v v2 + Universal _ `compare` Existential _ _ = LT + _ `compare` _ = GT + +underlying :: TypeVar b v -> v +underlying (Universal v) = v +underlying (Existential _ v) = v + +instance Show v => Show (TypeVar b v) where + show (Universal v) = show v + show (Existential _ v) = "'" ++ show v + +instance ABT.Var v => ABT.Var (TypeVar b v) where + freshIn s v = ABT.freshIn (Set.map underlying s) <$> v + +instance Var v => Var (TypeVar b v) where + typed t = Universal (Var.typed t) + typeOf v = Var.typeOf (underlying v) + freshId v = Var.freshId (underlying v) + freshenId id v = Var.freshenId id <$> v + +liftType :: Ord v => Type v a -> Type (TypeVar b v) a +liftType = ABT.vmap Universal + +lowerType :: Ord v => Type (TypeVar b v) a -> Type v a +lowerType = ABT.vmap underlying + +liftTerm :: Ord v => Term v a -> Term' (TypeVar b v) v a +liftTerm = Term.vtmap Universal + +lowerTerm :: Ord v => Term' (TypeVar b v) v a -> Term v a +lowerTerm = Term.vtmap underlying diff --git a/parser-typechecker/src/Unison/UnisonFile.hs b/parser-typechecker/src/Unison/UnisonFile.hs new file mode 100644 index 0000000000..60db70c47b --- /dev/null +++ b/parser-typechecker/src/Unison/UnisonFile.hs @@ -0,0 +1,369 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE PatternSynonyms #-} + +module Unison.UnisonFile where + +import Unison.Prelude + +import Control.Lens +import Data.Bifunctor (second, first) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Unison.ABT as ABT +import qualified Unison.ConstructorType as CT +import Unison.DataDeclaration (DataDeclaration) +import Unison.DataDeclaration (EffectDeclaration(..)) +import Unison.DataDeclaration (hashDecls) +import qualified Unison.DataDeclaration as DD +import qualified Unison.Builtin.Decls as DD +import qualified Unison.Name as Name +import qualified Unison.Names3 as Names +import Unison.Reference (Reference) +import qualified Unison.Reference as Reference +import qualified Unison.Referent as Referent +import Unison.Term (Term) +import qualified Unison.Term as Term +import Unison.Type (Type) +import qualified Unison.Type as Type +import qualified Unison.Util.List as List +import Unison.Util.Relation (Relation) +import qualified Unison.Util.Relation as Relation +import Unison.Var (Var) +import qualified Unison.Var as Var +import qualified Unison.Typechecker.TypeLookup as TL +import Unison.Names3 (Names0) +import qualified Unison.LabeledDependency as LD +import Unison.LabeledDependency (LabeledDependency) +-- import qualified Unison.Typechecker.Components as Components + +data UnisonFile v a = UnisonFileId { + dataDeclarationsId :: Map v (Reference.Id, DataDeclaration v a), + effectDeclarationsId :: Map v (Reference.Id, EffectDeclaration v a), + terms :: [(v, Term v a)], + watches :: Map WatchKind [(v, Term v a)] +} deriving Show + +pattern UnisonFile ds es tms ws <- + UnisonFileId (fmap (first Reference.DerivedId) -> ds) + (fmap (first Reference.DerivedId) -> es) + tms + ws +{-# COMPLETE UnisonFile #-} + +dataDeclarations :: UnisonFile v a -> Map v (Reference, DataDeclaration v a) +dataDeclarations = fmap (first Reference.DerivedId) . dataDeclarationsId + +effectDeclarations :: UnisonFile v a -> Map v (Reference, EffectDeclaration v a) +effectDeclarations = fmap (first Reference.DerivedId) . effectDeclarationsId + +watchesOfKind :: WatchKind -> UnisonFile v a -> [(v, Term v a)] +watchesOfKind kind uf = Map.findWithDefault [] kind (watches uf) + +watchesOfOtherKinds :: WatchKind -> UnisonFile v a -> [(v, Term v a)] +watchesOfOtherKinds kind uf = + join [ ws | (k, ws) <- Map.toList (watches uf), k /= kind ] + +allWatches :: UnisonFile v a -> [(v, Term v a)] +allWatches = join . Map.elems . watches + +type WatchKind = Var.WatchKind +pattern RegularWatch = Var.RegularWatch +pattern TestWatch = Var.TestWatch + +-- Converts a file to a single let rec with a body of `()`, for +-- purposes of typechecking. +typecheckingTerm :: (Var v, Monoid a) => UnisonFile v a -> Term v a +typecheckingTerm uf = + Term.letRec' True (terms uf <> testWatches <> watchesOfOtherKinds TestWatch uf) $ + DD.unitTerm mempty + where + -- we make sure each test has type Test.Result + f w = let wa = ABT.annotation w in Term.ann wa w (DD.testResultType wa) + testWatches = map (second f) $ watchesOfKind TestWatch uf + +-- Converts a file and a body to a single let rec with the given body. +uberTerm' :: (Var v, Monoid a) => UnisonFile v a -> Term v a -> Term v a +uberTerm' uf body = + Term.letRec' True (terms uf <> allWatches uf) $ body + +-- A UnisonFile after typechecking. Terms are split into groups by +-- cycle and the type of each term is known. +data TypecheckedUnisonFile v a = + TypecheckedUnisonFileId { + dataDeclarationsId' :: Map v (Reference.Id, DataDeclaration v a), + effectDeclarationsId' :: Map v (Reference.Id, EffectDeclaration v a), + topLevelComponents' :: [[(v, Term v a, Type v a)]], + watchComponents :: [(WatchKind, [(v, Term v a, Type v a)])], + hashTermsId :: Map v (Reference.Id, Term v a, Type v a) + } deriving Show + +-- backwards compatibility with the old data type +dataDeclarations' :: TypecheckedUnisonFile v a -> Map v (Reference, DataDeclaration v a) +dataDeclarations' = fmap (first Reference.DerivedId) . dataDeclarationsId' +effectDeclarations' :: TypecheckedUnisonFile v a -> Map v (Reference, EffectDeclaration v a) +effectDeclarations' = fmap (first Reference.DerivedId) . effectDeclarationsId' +hashTerms :: TypecheckedUnisonFile v a -> Map v (Reference, Term v a, Type v a) +hashTerms = fmap (over _1 Reference.DerivedId) . hashTermsId + +{-# COMPLETE TypecheckedUnisonFile #-} +pattern TypecheckedUnisonFile ds es tlcs wcs hts <- + TypecheckedUnisonFileId (fmap (first Reference.DerivedId) -> ds) + (fmap (first Reference.DerivedId) -> es) + tlcs + wcs + (fmap (over _1 Reference.DerivedId) -> hts) + +-- todo: this is confusing, right? +-- currently: create a degenerate TypecheckedUnisonFile +-- multiple definitions of "top-level components" non-watch vs w/ watch +typecheckedUnisonFile :: Var v + => Map v (Reference.Id, DataDeclaration v a) + -> Map v (Reference.Id, EffectDeclaration v a) + -> [[(v, Term v a, Type v a)]] + -> [(WatchKind, [(v, Term v a, Type v a)])] + -> TypecheckedUnisonFile v a +typecheckedUnisonFile datas effects tlcs watches = + file0 { hashTermsId = hashImpl file0 } + where + file0 = TypecheckedUnisonFileId datas effects tlcs watches mempty + hashImpl file = let + -- test watches are added to the codebase also + -- todo: maybe other kinds of watches too + components = topLevelComponents file + types = Map.fromList [(v,t) | (v,_,t) <- join components ] + terms0 = Map.fromList [(v,e) | (v,e,_) <- join components ] + hcs = Term.hashComponents terms0 + in Map.fromList [ (v, (r, e, t)) | (v, (r, e)) <- Map.toList hcs, + Just t <- [Map.lookup v types] ] + +lookupDecl :: Ord v => v -> TypecheckedUnisonFile v a + -> Maybe (Reference.Id, DD.Decl v a) +lookupDecl v uf = + over _2 Right <$> (Map.lookup v (dataDeclarationsId' uf)) <|> + over _2 Left <$> (Map.lookup v (effectDeclarationsId' uf)) + +allTerms :: Ord v => TypecheckedUnisonFile v a -> Map v (Term v a) +allTerms uf = + Map.fromList [ (v, t) | (v, t, _) <- join $ topLevelComponents' uf ] + +topLevelComponents :: TypecheckedUnisonFile v a + -> [[(v, Term v a, Type v a)]] +topLevelComponents file = + topLevelComponents' file ++ [ comp | (TestWatch, comp) <- watchComponents file ] + +getDecl' :: Ord v => TypecheckedUnisonFile v a -> v -> Maybe (DD.Decl v a) +getDecl' uf v = + (Right . snd <$> Map.lookup v (dataDeclarations' uf)) <|> + (Left . snd <$> Map.lookup v (effectDeclarations' uf)) + +-- External type references that appear in the types of the file's terms +termSignatureExternalLabeledDependencies + :: Ord v => TypecheckedUnisonFile v a -> Set LabeledDependency +termSignatureExternalLabeledDependencies + (TypecheckedUnisonFile dataDeclarations' effectDeclarations' _ _ hashTerms) = + Set.difference + (Set.map LD.typeRef + . foldMap Type.dependencies + . fmap (\(_r, _e, t) -> t) + . toList + $ hashTerms) + -- exclude any references that are defined in this file + (Set.fromList $ + (map (LD.typeRef . fst) . toList) dataDeclarations' <> + (map (LD.typeRef . fst) . toList) effectDeclarations') + +-- Returns a relation for the dependencies of this file. The domain is +-- the dependent, and the range is its dependencies, thus: +-- `R.lookupDom r (dependencies file)` returns the set of dependencies +-- of the reference `r`. +dependencies' :: + forall v a. Var v => TypecheckedUnisonFile v a -> Relation Reference.Id Reference +dependencies' file = let + terms :: Map v (Reference.Id, Term v a, Type v a) + terms = hashTermsId file + decls :: Map v (Reference.Id, DataDeclaration v a) + decls = dataDeclarationsId' file <> + fmap (second toDataDecl) (effectDeclarationsId' file ) + termDeps = foldl' f Relation.empty $ toList terms + allDeps = foldl' g termDeps $ toList decls + f acc (r, tm, tp) = acc <> termDeps <> typeDeps + where termDeps = + Relation.fromList [ (r, dep) | dep <- toList (Term.dependencies tm)] + typeDeps = + Relation.fromList [ (r, dep) | dep <- toList (Type.dependencies tp)] + g acc (r, decl) = acc <> ctorDeps + where ctorDeps = + Relation.fromList [ (r, dep) | (_, _, tp) <- DD.constructors' decl + , dep <- toList (Type.dependencies tp) + ] + in allDeps + +-- Returns the dependencies of the `UnisonFile` input. Needed so we can +-- load information about these dependencies before starting typechecking. +dependencies :: (Monoid a, Var v) => UnisonFile v a -> Set Reference +dependencies (UnisonFile ds es ts ws) = + foldMap (DD.dependencies . snd) ds + <> foldMap (DD.dependencies . DD.toDataDecl . snd) es + <> foldMap (Term.dependencies . snd) ts + <> foldMap (foldMap (Term.dependencies . snd)) ws + +discardTypes :: TypecheckedUnisonFile v a -> UnisonFile v a +discardTypes (TypecheckedUnisonFileId datas effects terms watches _) = let + watches' = g . mconcat <$> List.multimap watches + g tup3s = [(v,e) | (v,e,_t) <- tup3s ] + in UnisonFileId datas effects [ (a,b) | (a,b,_) <- join terms ] watches' + +declsToTypeLookup :: Var v => UnisonFile v a -> TL.TypeLookup v a +declsToTypeLookup uf = TL.TypeLookup mempty + (wrangle (dataDeclarations uf)) + (wrangle (effectDeclarations uf)) + where wrangle = Map.fromList . Map.elems + +toNames :: Var v => UnisonFile v a -> Names0 +toNames uf = datas <> effects + where + datas = foldMap DD.dataDeclToNames' (Map.toList (dataDeclarationsId uf)) + effects = foldMap DD.effectDeclToNames' (Map.toList (effectDeclarationsId uf)) + +typecheckedToNames0 :: Var v => TypecheckedUnisonFile v a -> Names0 +typecheckedToNames0 uf = Names.names0 (terms <> ctors) types where + terms = Relation.fromList + [ (Name.fromVar v, Referent.Ref r) + | (v, (r, _, _)) <- Map.toList $ hashTerms uf ] + types = Relation.fromList + [ (Name.fromVar v, r) + | (v, r) <- Map.toList $ fmap fst (dataDeclarations' uf) + <> fmap fst (effectDeclarations' uf) ] + ctors = Relation.fromMap + . Map.mapKeys Name.fromVar + . fmap (fmap Reference.DerivedId) + . hashConstructors + $ uf + +typecheckedUnisonFile0 :: Ord v => TypecheckedUnisonFile v a +typecheckedUnisonFile0 = TypecheckedUnisonFileId Map.empty Map.empty mempty mempty mempty + +-- Returns true if the file has any definitions or watches +nonEmpty :: TypecheckedUnisonFile v a -> Bool +nonEmpty uf = + not (Map.null (dataDeclarations' uf)) || + not (Map.null (effectDeclarations' uf)) || + any (not . null) (topLevelComponents' uf) || + any (not . null) (watchComponents uf) + +hashConstructors + :: forall v a. Ord v => TypecheckedUnisonFile v a -> Map v Referent.Id +hashConstructors file = + let ctors1 = Map.elems (dataDeclarationsId' file) >>= \(ref, dd) -> + [ (v, Referent.Con' ref i CT.Data) | (v,i) <- DD.constructorVars dd `zip` [0 ..] ] + ctors2 = Map.elems (effectDeclarationsId' file) >>= \(ref, dd) -> + [ (v, Referent.Con' ref i CT.Effect) | (v,i) <- DD.constructorVars (DD.toDataDecl dd) `zip` [0 ..] ] + in Map.fromList (ctors1 ++ ctors2) + +type CtorLookup = Map String (Reference, Int) + +-- Substitutes free type and term variables occurring in the terms of this +-- `UnisonFile` using `externalNames`. +-- +-- Hash-qualified names are substituted during parsing, but non-HQ names are +-- substituted at the end of parsing, since they can be locally bound. Example, in +-- `x -> x + math.sqrt 2`, we don't know if `math.sqrt` is locally bound until +-- we are done parsing, whereas `math.sqrt#abc` can be resolved immediately +-- as it can't refer to a local definition. +bindNames :: Var v + => Names0 + -> UnisonFile v a + -> Names.ResolutionResult v a (UnisonFile v a) +bindNames names (UnisonFileId d e ts ws) = do + -- todo: consider having some kind of binding structure for terms & watches + -- so that you don't weirdly have free vars to tiptoe around. + -- The free vars should just be the things that need to be bound externally. + let termVars = (fst <$> ts) ++ (Map.elems ws >>= map fst) + termVarsSet = Set.fromList termVars + -- todo: can we clean up this lambda using something like `second` + ts' <- traverse (\(v,t) -> (v,) <$> Term.bindNames termVarsSet names t) ts + ws' <- traverse (traverse (\(v,t) -> (v,) <$> Term.bindNames termVarsSet names t)) ws + pure $ UnisonFileId d e ts' ws' + +constructorType :: + Var v => UnisonFile v a -> Reference -> Maybe CT.ConstructorType +constructorType = TL.constructorType . declsToTypeLookup + +data Env v a = Env + -- Data declaration name to hash and its fully resolved form + { datasId :: Map v (Reference.Id, DataDeclaration v a) + -- Effect declaration name to hash and its fully resolved form + , effectsId :: Map v (Reference.Id, EffectDeclaration v a) + -- Naming environment + , names :: Names0 +} + +datas :: Env v a -> Map v (Reference, DataDeclaration v a) +datas = fmap (first Reference.DerivedId) . datasId + +effects :: Env v a -> Map v (Reference, EffectDeclaration v a) +effects = fmap (first Reference.DerivedId) . effectsId + +data Error v a + -- A free type variable that couldn't be resolved + = UnknownType v a + -- A variable which is both a data and an ability declaration + | DupDataAndAbility v a a + deriving (Eq,Ord,Show) + +-- This function computes hashes for data and effect declarations, and +-- also returns a function for resolving strings to (Reference, ConstructorId) +-- for parsing of pattern matching +-- +-- If there are duplicate declarations, the duplicated names are returned on the +-- left. +environmentFor + :: forall v a . Var v + => Names0 + -> Map v (DataDeclaration v a) + -> Map v (EffectDeclaration v a) + -> Names.ResolutionResult v a (Either [Error v a] (Env v a)) +environmentFor names dataDecls0 effectDecls0 = do + let locallyBoundTypes = Map.keysSet dataDecls0 <> Map.keysSet effectDecls0 + -- data decls and hash decls may reference each other, and thus must be hashed together + dataDecls :: Map v (DataDeclaration v a) <- + traverse (DD.bindNames locallyBoundTypes names) dataDecls0 + effectDecls :: Map v (EffectDeclaration v a) <- + traverse (DD.withEffectDeclM (DD.bindNames locallyBoundTypes names)) effectDecls0 + let allDecls0 :: Map v (DataDeclaration v a) + allDecls0 = Map.union dataDecls (toDataDecl <$> effectDecls) + hashDecls' :: [(v, Reference.Id, DataDeclaration v a)] <- + hashDecls allDecls0 + -- then we have to pick out the dataDecls from the effectDecls + let + allDecls = Map.fromList [ (v, (r, de)) | (v, r, de) <- hashDecls' ] + dataDecls' = Map.difference allDecls effectDecls + effectDecls' = second EffectDeclaration <$> Map.difference allDecls dataDecls + -- ctor and effect terms + ctors = foldMap DD.dataDeclToNames' (Map.toList dataDecls') + effects = foldMap DD.effectDeclToNames' (Map.toList effectDecls') + names' = ctors <> effects + overlaps = let + w v dd (toDataDecl -> ed) = DupDataAndAbility v (DD.annotation dd) (DD.annotation ed) + in Map.elems $ Map.intersectionWithKey w dataDecls effectDecls where + okVars = Map.keysSet allDecls0 + unknownTypeRefs = Map.elems allDecls0 >>= \dd -> + let cts = DD.constructorTypes dd + in cts >>= \ct -> [ UnknownType v a | (v,a) <- ABT.freeVarOccurrences mempty ct + , not (Set.member v okVars) ] + pure $ + if null overlaps && null unknownTypeRefs + then pure $ Env dataDecls' effectDecls' names' + else Left (unknownTypeRefs ++ overlaps) + +allVars :: Ord v => UnisonFile v a -> Set v +allVars (UnisonFile ds es ts ws) = Set.unions + [ Map.keysSet ds + , foldMap (DD.allVars . snd) ds + , Map.keysSet es + , foldMap (DD.allVars . toDataDecl . snd) es + , Set.unions [ Set.insert v (Term.allVars t) | (v, t) <- ts ] + , Set.unions [ Set.insert v (Term.allVars t) | (v, t) <- join . Map.elems $ ws ] + ] diff --git a/parser-typechecker/src/Unison/Util/AnnotatedText.hs b/parser-typechecker/src/Unison/Util/AnnotatedText.hs new file mode 100644 index 0000000000..3b537516bf --- /dev/null +++ b/parser-typechecker/src/Unison/Util/AnnotatedText.hs @@ -0,0 +1,199 @@ +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE TypeFamilies #-} + +module Unison.Util.AnnotatedText where + +import Unison.Prelude + +import qualified Data.List as L +import qualified Data.Foldable as Foldable +import qualified Data.Map as Map +import Data.Sequence (Seq ((:|>), (:<|))) +import qualified Data.Sequence as Seq +import Data.Tuple.Extra (second) +import Unison.Lexer (Line, Pos (..)) +import Unison.Util.Monoid (intercalateMap) +import Unison.Util.Range (Range (..), inRange) +import qualified Data.ListLike as LL + +-- type AnnotatedText a = AnnotatedText (Maybe a) + +newtype AnnotatedText a = AnnotatedText (Seq (String, Maybe a)) + deriving (Eq, Functor, Foldable, Show) + +instance Semigroup (AnnotatedText a) where + AnnotatedText (as :|> ("", _)) <> bs = AnnotatedText as <> bs + as <> AnnotatedText (("", _) :<| bs) = as <> AnnotatedText bs + AnnotatedText as <> AnnotatedText bs = AnnotatedText (as <> bs) + +instance Monoid (AnnotatedText a) where + mempty = AnnotatedText Seq.empty + +instance LL.FoldableLL (AnnotatedText a) Char where + foldl' f z (AnnotatedText at) = Foldable.foldl' f' z at where + f' z (str, _) = L.foldl' f z str + foldl = LL.foldl + foldr f z (AnnotatedText at) = Foldable.foldr f' z at where + f' (str, _) z = L.foldr f z str + +instance LL.ListLike (AnnotatedText a) Char where + singleton ch = fromString [ch] + uncons (AnnotatedText at) = case at of + (s,a) :<| tl -> case L.uncons s of + Nothing -> LL.uncons (AnnotatedText tl) + Just (hd,s) -> Just (hd, AnnotatedText $ (s,a) :<| tl) + Seq.Empty -> Nothing + break f at = (LL.takeWhile (not . f) at, LL.dropWhile (not . f) at) + takeWhile f (AnnotatedText at) = case at of + Seq.Empty -> AnnotatedText Seq.Empty + (s,a) :<| tl -> let s' = L.takeWhile f s in + if length s' == length s then + AnnotatedText (pure (s,a)) <> LL.takeWhile f (AnnotatedText tl) + else + AnnotatedText (pure (s',a)) + dropWhile f (AnnotatedText at) = case at of + Seq.Empty -> AnnotatedText Seq.Empty + (s,a) :<| tl -> case L.dropWhile f s of + [] -> LL.dropWhile f (AnnotatedText tl) + s -> AnnotatedText $ (s,a) :<| tl + take n (AnnotatedText at) = case at of + Seq.Empty -> AnnotatedText Seq.Empty + (s,a) :<| tl -> + if n <= length s then AnnotatedText $ pure (take n s, a) + else AnnotatedText (pure (s,a)) <> + LL.take (n - length s) (AnnotatedText tl) + drop n (AnnotatedText at) = case at of + Seq.Empty -> AnnotatedText Seq.Empty + (s,a) :<| tl -> + if n <= length s then AnnotatedText $ (drop n s, a) :<| tl + else LL.drop (n - length s) (AnnotatedText tl) + null (AnnotatedText at) = all (null . fst) at + + -- Quoted text (indented, with source line numbers) with annotated portions. +data AnnotatedExcerpt a = AnnotatedExcerpt + { lineOffset :: Line + , text :: String + , annotations :: Map Range a + } deriving (Functor) + +annotate' :: Maybe b -> AnnotatedText a -> AnnotatedText b +annotate' a (AnnotatedText at) = + AnnotatedText $ (\(s,_) -> (s, a)) <$> at + +deannotate :: AnnotatedText a -> AnnotatedText b +deannotate = annotate' Nothing + +-- Replace the annotation (whether existing or no) with the given annotation +annotate :: a -> AnnotatedText a -> AnnotatedText a +annotate a (AnnotatedText at) = + AnnotatedText $ (\(s,_) -> (s,Just a)) <$> at + +annotateMaybe :: AnnotatedText (Maybe a) -> AnnotatedText a +annotateMaybe (AnnotatedText s) = AnnotatedText (fmap (second join) s) + +trailingNewLine :: AnnotatedText a -> Bool +trailingNewLine (AnnotatedText (init :|> (s,_))) = + case lastMay s of + Just '\n' -> True + Just _ -> False + _ -> trailingNewLine (AnnotatedText init) +trailingNewLine _ = False + +markup :: AnnotatedExcerpt a -> Map Range a -> AnnotatedExcerpt a +markup a r = a { annotations = r `Map.union` annotations a } + +-- renderTextUnstyled :: AnnotatedText a -> Rendered Void +-- renderTextUnstyled (AnnotatedText chunks) = foldl' go mempty chunks +-- where go r (text, _) = r <> fromString text + +textLength :: AnnotatedText a -> Int +textLength (AnnotatedText chunks) = foldl' go 0 chunks + where go len (text, _a) = len + length text + +textEmpty :: AnnotatedText a -> Bool +textEmpty = (==0) . textLength + +condensedExcerptToText :: Int -> AnnotatedExcerpt a -> AnnotatedText a +condensedExcerptToText margin e = + intercalateMap " .\n" excerptToText $ snipWithContext margin e + +excerptToText :: forall a. AnnotatedExcerpt a -> AnnotatedText a +excerptToText e = + track (Pos line1 1) [] (Map.toList $ annotations e) (renderLineNumber line1) (text e) + where + line1 :: Int + line1 = lineOffset e + renderLineNumber :: Int -> AnnotatedText a + renderLineNumber n = fromString $ " " ++ spaces ++ sn ++ " | " + where sn = show n + spaces = replicate (lineNumberWidth - length sn) ' ' + lineNumberWidth = 4 + + -- step through the source characters and annotations + track _ _ _ rendered "" = rendered + track _ _ _ rendered "\n" = rendered <> "\n" + track pos@(Pos line col) stack annotations rendered _input@(c:rest) = + let + (poppedAnnotations, remainingAnnotations) = span (inRange pos . fst) annotations + -- drop any stack entries that will be closed after this char + -- and add new stack entries + stack' = foldl' pushColor stack0 poppedAnnotations + where pushColor s (Range _ end, style) = (style, end) : s + stack0 = dropWhile ((<=pos) . snd) stack + maybeColor = fst <$> headMay stack' + -- on new line, advance pos' vertically and set up line header + -- additions :: AnnotatedText (Maybe a) + pos' :: Pos + (additions, pos') = + if c == '\n' + then ("\n" <> renderLineNumber (line + 1), Pos (line + 1) 1) + else (annotate' maybeColor (fromString [c]), Pos line (col + 1)) + in track pos' stack' remainingAnnotations (rendered <> additions) rest + +snipWithContext :: Int -> AnnotatedExcerpt a -> [AnnotatedExcerpt a] +snipWithContext margin source = + case foldl' whileWithinMargin + (Nothing, mempty, mempty) + (Map.toList $ annotations source) of + (Nothing, _, _) -> [] + (Just (Range (Pos startLine' _) (Pos endLine' _)), group', rest') -> + let dropLineCount = startLine' - lineOffset source + takeLineCount = endLine' - startLine' + 1 + text', text2' :: [String] + (text', text2') = + splitAt takeLineCount (drop dropLineCount (lines (text source))) + in AnnotatedExcerpt startLine' (unlines text') group' + : snipWithContext + margin (AnnotatedExcerpt (endLine' + 1) (unlines text2') rest') + where + withinMargin :: Range -> Range -> Bool + withinMargin (Range _start1 (Pos end1 _)) (Range (Pos start2 _) _end2) = + end1 + margin >= start2 + + whileWithinMargin :: (Maybe Range, Map Range a, Map Range a) + -> (Range, a) + -> (Maybe Range, Map Range a, Map Range a) + whileWithinMargin (r0, taken, rest) (r1,a1) = + case r0 of + Nothing -> -- haven't processed any annotations yet + (Just r1, Map.singleton r1 a1, mempty) + Just r0 -> + -- if all annotations so far can be joined without .. separations + if null rest + -- if this one can be joined to the new region without .. separation + then if withinMargin r0 r1 + -- add it to the first set and grow the compare region + then (Just $ r0 <> r1, Map.insert r1 a1 taken, mempty) + -- otherwise add it to the second set + else (Just r0, taken, Map.singleton r1 a1) + -- once we've added to the second set, anything more goes there too + else (Just r0, taken, Map.insert r1 a1 rest) + +instance IsString (AnnotatedText a) where + fromString s = AnnotatedText . pure $ (s, Nothing) + +instance IsString (AnnotatedExcerpt a) where + fromString s = AnnotatedExcerpt 1 s mempty diff --git a/parser-typechecker/src/Unison/Util/Bytes.hs b/parser-typechecker/src/Unison/Util/Bytes.hs new file mode 100644 index 0000000000..4c9dffeecb --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Bytes.hs @@ -0,0 +1,96 @@ +{-# Language ViewPatterns #-} +{-# OPTIONS_GHC -fno-warn-orphans #-} + +module Unison.Util.Bytes where + +import Unison.Prelude hiding (empty) + +import Data.Monoid (Sum(..)) +import Prelude hiding (drop) +import qualified Data.ByteString as B +import qualified Data.ByteString.Lazy as BL +import qualified Data.FingerTree as T + +-- Bytes type represented as a finger tree of ByteStrings. +-- Can be efficiently sliced and indexed, using the byte count +-- annotation at each subtree. +newtype Bytes = Bytes (T.FingerTree (Sum Int) B.ByteString) + +null :: Bytes -> Bool +null (Bytes bs) = T.null bs + +empty :: Bytes +empty = Bytes mempty + +fromByteString :: B.ByteString -> Bytes +fromByteString = snoc empty + +toByteString :: Bytes -> B.ByteString +toByteString b = B.concat (chunks b) + +size :: Bytes -> Int +size (Bytes bs) = getSum (T.measure bs) + +chunks :: Bytes -> [B.ByteString] +chunks (Bytes b) = toList b + +cons :: B.ByteString -> Bytes -> Bytes +cons b bs | B.null b = bs +cons b (Bytes bs) = Bytes (b T.<| bs) + +snoc :: Bytes -> B.ByteString -> Bytes +snoc bs b | B.null b = bs +snoc (Bytes bs) b = Bytes (bs T.|> b) + +flatten :: Bytes -> Bytes +flatten b = snoc mempty (B.concat (chunks b)) + +take :: Int -> Bytes -> Bytes +take n (Bytes bs) = go (T.split (> Sum n) bs) where + go (ok, s) = Bytes $ case T.viewl s of + last T.:< _ -> + if T.measure ok == Sum n then ok + else ok T.|> B.take (n - getSum (T.measure ok)) last + _ -> ok + +drop :: Int -> Bytes -> Bytes +drop n b0@(Bytes bs) = go (T.dropUntil (> Sum n) bs) where + go s = Bytes $ case T.viewl s of + head T.:< tail -> + if (size b0 - getSum (T.measure s)) == n then s + else B.drop (n - (size b0 - getSum (T.measure s))) head T.<| tail + _ -> s + +at :: Int -> Bytes -> Maybe Word8 +at i bs = case drop i bs of + Bytes (T.viewl -> hd T.:< _) -> Just (B.head hd) + _ -> Nothing + +toWord8s :: Bytes -> [Word8] +toWord8s bs = catMaybes [ at i bs | i <- [0..(size bs - 1)] ] + +fromWord8s :: [Word8] -> Bytes +fromWord8s bs = fromByteString (B.pack bs) + +instance Monoid Bytes where + mempty = Bytes mempty + mappend (Bytes b1) (Bytes b2) = Bytes (b1 `mappend` b2) + +instance Semigroup Bytes where (<>) = mappend + +instance T.Measured (Sum Int) B.ByteString where + measure b = Sum (B.length b) + +instance Show Bytes where + show bs = show (toWord8s bs) + +instance Eq Bytes where + b1 == b2 | size b1 == size b2 = go b1 b2 + where + go b1 b2 = BL.fromChunks (chunks b1) == BL.fromChunks (chunks b2) + _ == _ = False + +-- Lexicographical ordering +instance Ord Bytes where + b1 `compare` b2 = + BL.fromChunks (chunks b1) `compare` BL.fromChunks (chunks b2) diff --git a/parser-typechecker/src/Unison/Util/Cache.hs b/parser-typechecker/src/Unison/Util/Cache.hs new file mode 100644 index 0000000000..499d75f806 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Cache.hs @@ -0,0 +1,90 @@ +module Unison.Util.Cache where + +import Prelude hiding (lookup) +import Unison.Prelude +import UnliftIO (newTVarIO, modifyTVar', writeTVar, atomically, readTVar, readTVarIO) +import qualified Data.Map as Map + +data Cache m k v = + Cache { lookup :: k -> m (Maybe v) + , insert :: k -> v -> m () + } + +-- Create a cache of unbounded size. +cache :: (MonadIO m, Ord k) => m (Cache m k v) +cache = do + t <- newTVarIO Map.empty + let + lookup k = Map.lookup k <$> readTVarIO t + insert k v = do + m <- readTVarIO t + case Map.lookup k m of + Nothing -> atomically $ modifyTVar' t (Map.insert k v) + _ -> pure () + + pure $ Cache lookup insert + +nullCache :: (MonadIO m, Ord k) => m (Cache m k v) +nullCache = pure $ Cache (const (pure Nothing)) (\_ _ -> pure ()) + +-- Create a cache of bounded size. Once the cache +-- reaches a size of `maxSize`, older unused entries +-- are evicted from the cache. Unlike LRU caching, +-- where cache hits require updating LRU info, +-- cache hits here are read-only and contention free. +semispaceCache :: (MonadIO m, Ord k) => Word -> m (Cache m k v) +semispaceCache 0 = nullCache +semispaceCache maxSize = do + -- Analogous to semispace GC, keep 2 maps: gen0 and gen1 + -- `insert k v` is done in gen0 + -- if full, gen1 = gen0; gen0 = Map.empty + -- `lookup k` is done in gen0; then gen1 + -- if found in gen0, return immediately + -- if found in gen1, `insert k v`, then return + -- Thus, older keys not recently looked up are forgotten + gen0 <- newTVarIO Map.empty + gen1 <- newTVarIO Map.empty + let + lookup k = readTVarIO gen0 >>= \m0 -> + case Map.lookup k m0 of + Nothing -> readTVarIO gen1 >>= \m1 -> + case Map.lookup k m1 of + Nothing -> pure Nothing + Just v -> insert k v $> Just v + just -> pure just + insert k v = atomically $ do + modifyTVar' gen0 (Map.insert k v) + m0 <- readTVar gen0 + when (fromIntegral (Map.size m0) >= maxSize) $ do + writeTVar gen1 m0 + writeTVar gen0 Map.empty + pure $ Cache lookup insert + +-- Cached function application: if a key `k` is not in the cache, +-- calls `f` and inserts `f k` results in the cache. +apply :: Monad m => Cache m k v -> (k -> m v) -> k -> m v +apply c f k = lookup c k >>= \case + Just v -> pure v + Nothing -> do + v <- f k + insert c k v + pure v + +-- Cached function application which only caches values for +-- which `f k` is non-empty. For instance, if `g` is `Maybe`, +-- and `f x` returns `Nothing`, this won't be cached. +-- +-- Useful when we think that missing results for `f` may be +-- later filled in so we don't want to cache missing results. +applyDefined :: (Monad m, Applicative g, Traversable g) + => Cache m k v + -> (k -> m (g v)) + -> k + -> m (g v) +applyDefined c f k = lookup c k >>= \case + Just v -> pure (pure v) + Nothing -> do + v <- f k + -- only populate the cache if f returns a non-empty result + for_ v $ \v -> insert c k v + pure v diff --git a/parser-typechecker/src/Unison/Util/ColorText.hs b/parser-typechecker/src/Unison/Util/ColorText.hs new file mode 100644 index 0000000000..5cfd21b23c --- /dev/null +++ b/parser-typechecker/src/Unison/Util/ColorText.hs @@ -0,0 +1,129 @@ +module Unison.Util.ColorText ( + ColorText, Color(..), style, toANSI, toPlain, toHTML, defaultColors, + black, red, green, yellow, blue, purple, cyan, white, hiBlack, hiRed, hiGreen, hiYellow, hiBlue, hiPurple, hiCyan, hiWhite, bold, underline, + module Unison.Util.AnnotatedText) +where + +import Unison.Prelude + +import qualified System.Console.ANSI as ANSI +import Unison.Util.AnnotatedText (AnnotatedText(..), annotate) +import qualified Unison.Util.SyntaxText as ST hiding (toPlain) + +type ColorText = AnnotatedText Color + +data Color + = Black | Red | Green | Yellow | Blue | Purple | Cyan | White + | HiBlack| HiRed | HiGreen | HiYellow | HiBlue | HiPurple | HiCyan | HiWhite + | Bold | Underline + deriving (Eq, Ord, Bounded, Enum, Show, Read) + +black, red, green, yellow, blue, purple, cyan, white, hiBlack, hiRed, hiGreen, hiYellow, hiBlue, hiPurple, hiCyan, hiWhite, bold, underline :: ColorText -> ColorText +black = style Black +red = style Red +green = style Green +yellow = style Yellow +blue = style Blue +purple = style Purple +cyan = style Cyan +white = style White +hiBlack = style HiBlack +hiRed = style HiRed +hiGreen = style HiGreen +hiYellow = style HiYellow +hiBlue = style HiBlue +hiPurple = style HiPurple +hiCyan = style HiCyan +hiWhite = style HiWhite +bold = style Bold +underline = style Underline + +style :: Color -> ColorText -> ColorText +style = annotate + +toHTML :: String -> ColorText -> String +toHTML cssPrefix (AnnotatedText at) = toList at >>= \case + (s, color) -> wrap color (s >>= newlineToBreak) + where + newlineToBreak '\n' = "
\n" + newlineToBreak ch = [ch] + wrap Nothing s = "" <> s <> "" + wrap (Just c) s = + "" <> s <> "" + colorName c = "\"" <> cssPrefix <> "-" <> show c <> "\"" + +-- Convert a `ColorText` to a `String`, ignoring colors +toPlain :: ColorText -> String +toPlain (AnnotatedText at) = join (toList $ fst <$> at) + +-- Convert a `ColorText` to a `String`, using ANSI codes to produce colors +toANSI :: ColorText -> String +toANSI (AnnotatedText chunks) = + join . toList $ snd (foldl' go (Nothing, mempty) chunks) <> resetANSI + where + go + :: (Maybe Color, Seq String) + -> (String, Maybe Color) + -> (Maybe Color, Seq String) + go (prev, r) (text, new) = if prev == new + then (prev, r <> pure text) + else + ( new + , case new of + Nothing -> r <> resetANSI <> pure text + Just style -> r <> resetANSI <> toANSI style <> pure text + ) + resetANSI = pure . ANSI.setSGRCode $ [ANSI.Reset] + toANSI :: Color -> Seq String + toANSI c = pure . ANSI.setSGRCode $ case c of + Black -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Black] + Red -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Red] + Green -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Green] + Yellow -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Yellow] + Blue -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Blue] + Purple -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Magenta] + Cyan -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.Cyan] + White -> [ANSI.SetColor ANSI.Foreground ANSI.Dull ANSI.White] + HiBlack -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Black] + HiRed -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Red] + HiGreen -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Green] + HiYellow -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Yellow] + HiBlue -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Blue] + HiPurple -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Magenta] + HiCyan -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.Cyan] + HiWhite -> [ANSI.SetColor ANSI.Foreground ANSI.Vivid ANSI.White] + Bold -> [ANSI.SetConsoleIntensity ANSI.BoldIntensity] + Underline -> [ANSI.SetUnderlining ANSI.SingleUnderline] + +defaultColors :: ST.Element -> Maybe Color +defaultColors = \case + ST.NumericLiteral -> Nothing + ST.TextLiteral -> Nothing + ST.CharLiteral -> Nothing + ST.BooleanLiteral -> Nothing + ST.Blank -> Nothing + ST.Var -> Nothing + ST.Reference _ -> Nothing + ST.Referent _ -> Nothing + ST.Op _ -> Nothing + ST.Unit -> Nothing + ST.Constructor -> Nothing + ST.Request -> Nothing + ST.AbilityBraces -> Just HiBlack + ST.ControlKeyword -> Just Bold + ST.LinkKeyword -> Just HiBlack + ST.TypeOperator -> Just HiBlack + ST.BindingEquals -> Nothing + ST.TypeAscriptionColon -> Just Blue + ST.DataTypeKeyword -> Nothing + ST.DataTypeParams -> Nothing + ST.DataTypeModifier -> Nothing + ST.UseKeyword -> Just HiBlack + ST.UsePrefix -> Just HiBlack + ST.UseSuffix -> Just HiBlack + ST.HashQualifier _ -> Just HiBlack + ST.DelayForceChar -> Just Yellow + ST.DelimiterChar -> Nothing + ST.Parenthesis -> Nothing + ST.DocDelimiter -> Just Green + ST.DocKeyword -> Just Bold diff --git a/parser-typechecker/src/Unison/Util/CycleTable.hs b/parser-typechecker/src/Unison/Util/CycleTable.hs new file mode 100644 index 0000000000..9792636555 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/CycleTable.hs @@ -0,0 +1,39 @@ +module Unison.Util.CycleTable where + +import Data.HashTable.IO (BasicHashTable) +import Data.Hashable (Hashable) +import qualified Data.HashTable.IO as HT +import qualified Data.Mutable as M + +-- A hash table along with a unique number which gets incremented on +-- each insert. This is used as an implementation detail by `CyclicEq`, +-- `CyclicOrd`, etc to be able to compare, hash, or serialize cyclic structures. + +data CycleTable k v = + CycleTable { + table :: BasicHashTable k v, + sizeRef :: M.IOPRef Int + } + +new :: Int -> IO (CycleTable k v) +new size = do + t <- HT.newSized size + r <- M.newRef 0 + pure (CycleTable t r) + +lookup :: (Hashable k, Eq k) => k -> CycleTable k v -> IO (Maybe v) +lookup k t = HT.lookup (table t) k + +insert :: (Hashable k, Eq k) => k -> v -> CycleTable k v -> IO () +insert k v t = do + HT.insert (table t) k v + M.modifyRef (sizeRef t) (1 +) + +size :: CycleTable k v -> IO Int +size h = M.readRef (sizeRef h) + +insertEnd :: (Hashable k, Eq k) => k -> CycleTable k Int -> IO () +insertEnd k t = do + n <- size t + insert k n t + diff --git a/parser-typechecker/src/Unison/Util/CyclicEq.hs b/parser-typechecker/src/Unison/Util/CyclicEq.hs new file mode 100644 index 0000000000..46cb72c6bd --- /dev/null +++ b/parser-typechecker/src/Unison/Util/CyclicEq.hs @@ -0,0 +1,60 @@ +{-# Language BangPatterns #-} +{-# Language Strict #-} +{-# Language StrictData #-} + +module Unison.Util.CyclicEq where + +import Unison.Prelude + +import Data.Vector (Vector) +import qualified Data.Vector as V +import qualified Data.Sequence as S +import qualified Unison.Util.CycleTable as CT + +{- + Typeclass used for comparing potentially cyclic types for equality. + Cyclic types may refer to themselves indirectly, so something is needed to + prevent an infinite loop in these cases. The basic idea: when a subexpression + is first examined, its "id" (represented as some `Int`) may be added to the + mutable hash table along with its position. The next time that same id is + encountered, it will be compared based on this position. + -} +class CyclicEq a where + -- Map from `Ref` ID to position in the stream + -- If a ref is encountered again, we use its mapped ID + cyclicEq :: CT.CycleTable Int Int -> CT.CycleTable Int Int -> a -> a -> IO Bool + +bothEq' :: (Eq a, CyclicEq b) => CT.CycleTable Int Int -> CT.CycleTable Int Int + -> a -> a -> b -> b -> IO Bool +bothEq' h1 h2 a1 a2 b1 b2 = + if a1 == a2 then cyclicEq h1 h2 b1 b2 + else pure False + +bothEq :: + (CyclicEq a, CyclicEq b) => CT.CycleTable Int Int -> CT.CycleTable Int Int + -> a -> a -> b -> b -> IO Bool +bothEq h1 h2 a1 a2 b1 b2 = cyclicEq h1 h2 a1 a2 >>= \b -> + if b then cyclicEq h1 h2 b1 b2 + else pure False + +instance CyclicEq a => CyclicEq [a] where + cyclicEq h1 h2 (x:xs) (y:ys) = bothEq h1 h2 x y xs ys + cyclicEq _ _ [] [] = pure True + cyclicEq _ _ _ _ = pure False + +instance CyclicEq a => CyclicEq (S.Seq a) where + cyclicEq h1 h2 xs ys = + if S.length xs == S.length ys then cyclicEq h1 h2 (toList xs) (toList ys) + else pure False + +instance CyclicEq a => CyclicEq (Vector a) where + cyclicEq h1 h2 xs ys = + if V.length xs /= V.length ys then pure False + else go 0 h1 h2 xs ys + where + go !i !h1 !h2 !xs !ys = + if i >= V.length xs then pure True + else do + b <- cyclicEq h1 h2 (xs V.! i) (ys V.! i) + if b then go (i + 1) h1 h2 xs ys + else pure False diff --git a/parser-typechecker/src/Unison/Util/CyclicOrd.hs b/parser-typechecker/src/Unison/Util/CyclicOrd.hs new file mode 100644 index 0000000000..6896110ed7 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/CyclicOrd.hs @@ -0,0 +1,54 @@ +{-# Language BangPatterns #-} +{-# Language Strict #-} +{-# Language StrictData #-} + +module Unison.Util.CyclicOrd where + +import Unison.Prelude + +import Data.Vector (Vector) +import Unison.Util.CycleTable (CycleTable) +import qualified Data.Vector as V +import qualified Data.Sequence as S +import qualified Unison.Util.CycleTable as CT + +-- Same idea as `CyclicEq`, but for ordering. +class CyclicOrd a where + -- Map from `Ref` ID to position in the stream + -- If a ref is encountered again, we use its mapped ID + cyclicOrd :: CycleTable Int Int -> CycleTable Int Int -> a -> a -> IO Ordering + +bothOrd' :: + (Ord a, CyclicOrd b) => CT.CycleTable Int Int -> CT.CycleTable Int Int + -> a -> a -> b -> b -> IO Ordering +bothOrd' h1 h2 a1 a2 b1 b2 = case compare a1 a2 of + EQ -> cyclicOrd h1 h2 b1 b2 + c -> pure c + +bothOrd :: + (CyclicOrd a, CyclicOrd b) => CT.CycleTable Int Int -> CT.CycleTable Int Int + -> a -> a -> b -> b -> IO Ordering +bothOrd h1 h2 a1 a2 b1 b2 = cyclicOrd h1 h2 a1 a2 >>= \b -> + if b == EQ then cyclicOrd h1 h2 b1 b2 + else pure b + +instance CyclicOrd a => CyclicOrd [a] where + cyclicOrd h1 h2 (x:xs) (y:ys) = bothOrd h1 h2 x y xs ys + cyclicOrd _ _ [] [] = pure EQ + cyclicOrd _ _ [] _ = pure LT + cyclicOrd _ _ _ [] = pure GT + +instance CyclicOrd a => CyclicOrd (S.Seq a) where + cyclicOrd h1 h2 xs ys = cyclicOrd h1 h2 (toList xs) (toList ys) + +instance CyclicOrd a => CyclicOrd (Vector a) where + cyclicOrd h1 h2 xs ys = go 0 h1 h2 xs ys + where + go !i !h1 !h2 !xs !ys = + if i >= V.length xs && i >= V.length ys then pure EQ + else if i >= V.length xs then pure LT + else if i >= V.length ys then pure GT + else do + b <- cyclicOrd h1 h2 (xs V.! i) (ys V.! i) + if b == EQ then go (i + 1) h1 h2 xs ys + else pure b diff --git a/parser-typechecker/src/Unison/Util/EnumContainers.hs b/parser-typechecker/src/Unison/Util/EnumContainers.hs new file mode 100644 index 0000000000..a00134a21c --- /dev/null +++ b/parser-typechecker/src/Unison/Util/EnumContainers.hs @@ -0,0 +1,115 @@ +{-# language DeriveTraversable #-} +{-# language GeneralizedNewtypeDeriving #-} + +module Unison.Util.EnumContainers + ( EnumMap + , EnumSet + , EnumKey(..) + , mapFromList + , setFromList + , mapSingleton + , setSingleton + , mapInsert + , unionWith + , keys + , restrictKeys + , withoutKeys + , member + , lookup + , lookupWithDefault + , foldMapWithKey + , mapToList + , (!) + , findMin + ) where + +import Prelude hiding (lookup) + +import Data.Bifunctor +import Data.Word (Word64,Word16) + +import qualified Data.IntSet as IS +import qualified Data.IntMap.Strict as IM + +class EnumKey k where + keyToInt :: k -> Int + intToKey :: Int -> k + +instance EnumKey Word64 where + keyToInt e = fromIntegral e + intToKey i = fromIntegral i + +instance EnumKey Word16 where + keyToInt e = fromIntegral e + intToKey i = fromIntegral i + +newtype EnumMap k a = EM (IM.IntMap a) + deriving + ( Monoid + , Semigroup + , Functor + , Foldable + , Traversable + , Show + , Eq + , Ord + ) + +newtype EnumSet k = ES IS.IntSet + deriving + ( Monoid + , Semigroup + , Show + , Eq + , Ord + ) + +mapFromList :: EnumKey k => [(k, a)] -> EnumMap k a +mapFromList = EM . IM.fromList . fmap (first keyToInt) + +setFromList :: EnumKey k => [k] -> EnumSet k +setFromList = ES . IS.fromList . fmap keyToInt + +mapSingleton :: EnumKey k => k -> a -> EnumMap k a +mapSingleton e a = EM $ IM.singleton (keyToInt e) a + +setSingleton :: EnumKey k => k -> EnumSet k +setSingleton e = ES . IS.singleton $ keyToInt e + +mapInsert :: EnumKey k => k -> a -> EnumMap k a -> EnumMap k a +mapInsert e x (EM m) = EM $ IM.insert (keyToInt e) x m + +unionWith + :: EnumKey k => EnumKey k + => (a -> a -> a) -> EnumMap k a -> EnumMap k a -> EnumMap k a +unionWith f (EM l) (EM r) = EM $ IM.unionWith f l r + +keys :: EnumKey k => EnumMap k a -> [k] +keys (EM m) = fmap intToKey . IM.keys $ m + +restrictKeys :: EnumKey k => EnumMap k a -> EnumSet k -> EnumMap k a +restrictKeys (EM m) (ES s) = EM $ IM.restrictKeys m s + +withoutKeys :: EnumKey k => EnumMap k a -> EnumSet k -> EnumMap k a +withoutKeys (EM m) (ES s) = EM $ IM.withoutKeys m s + +member :: EnumKey k => k -> EnumSet k -> Bool +member e (ES s) = IS.member (keyToInt e) s + +lookup :: EnumKey k => k -> EnumMap k a -> Maybe a +lookup e (EM m) = IM.lookup (keyToInt e) m + +lookupWithDefault :: EnumKey k => a -> k -> EnumMap k a -> a +lookupWithDefault d e (EM m) = IM.findWithDefault d (keyToInt e) m + +foldMapWithKey :: EnumKey k => Monoid m => (k -> a -> m) -> EnumMap k a -> m +foldMapWithKey f (EM m) = IM.foldMapWithKey (f . intToKey) m + +mapToList :: EnumKey k => EnumMap k a -> [(k, a)] +mapToList (EM m) = first intToKey <$> IM.toList m + +(!) :: EnumKey k => EnumMap k a -> k -> a +EM m ! e = m IM.! keyToInt e + +findMin :: EnumKey k => EnumSet k -> k +findMin (ES s) = intToKey $ IS.findMin s diff --git a/parser-typechecker/src/Unison/Util/Exception.hs b/parser-typechecker/src/Unison/Util/Exception.hs new file mode 100644 index 0000000000..c4db7c2f7a --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Exception.hs @@ -0,0 +1,16 @@ +module Unison.Util.Exception where + +import Unison.Prelude + +import Control.Concurrent.Async (withAsync, waitCatch) + +-- These are adapted from: https://github.com/snoyberg/classy-prelude/blob/ccd19f2c62882c69d5dcdd3da5c0df1031334c5a/classy-prelude/ClassyPrelude.hs#L320 +-- License is MIT: https://github.com/snoyberg/classy-prelude/blob/ccd19f2c62882c69d5dcdd3da5c0df1031334c5a/classy-prelude/LICENSE + +-- Catch all exceptions except asynchronous exceptions. +tryAny :: MonadIO m => IO a -> m (Either SomeException a) +tryAny action = liftIO $ withAsync action waitCatch + +-- Catch all exceptions except asynchronous exceptions. +catchAny :: IO a -> (SomeException -> IO a) -> IO a +catchAny action onE = tryAny action >>= either onE return diff --git a/parser-typechecker/src/Unison/Util/Find.hs b/parser-typechecker/src/Unison/Util/Find.hs new file mode 100644 index 0000000000..089e44f850 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Find.hs @@ -0,0 +1,188 @@ +{-# LANGUAGE ViewPatterns #-} + +module Unison.Util.Find ( + fuzzyFinder, simpleFuzzyFinder, simpleFuzzyScore, fuzzyFindInBranch, fuzzyFindMatchArray, prefixFindInBranch + ) where + +import Unison.Prelude + +import qualified Data.Char as Char +import qualified Data.List as List +import qualified Data.Text as Text +-- http://www.serpentine.com/blog/2007/02/27/a-haskell-regular-expression-tutorial/ +-- https://www.stackage.org/haddock/lts-13.9/regex-base-0.93.2/Text-Regex-Base-Context.html -- re-exported by TDFA +-- https://www.stackage.org/haddock/lts-13.9/regex-tdfa-1.2.3.1/Text-Regex-TDFA.html +import qualified Text.Regex.TDFA as RE +import Unison.Codebase.SearchResult (SearchResult) +import qualified Unison.Codebase.SearchResult as SR +import Unison.HashQualified' (HashQualified) +import qualified Unison.HashQualified' as HQ +import qualified Unison.Name as Name +import qualified Unison.Names2 as Names +import Unison.Names2 ( Names0 ) +import Unison.NamePrinter (prettyHashQualified') +import qualified Unison.Reference as Reference +import qualified Unison.Referent as Referent +import qualified Unison.ShortHash as SH +import Unison.Util.Monoid (intercalateMap) +import qualified Unison.Util.Pretty as P +import qualified Unison.Util.Relation as R + + +fuzzyFinder :: forall a. + String -> [a] -> (a -> String) -> [(a, P.Pretty P.ColorText)] +fuzzyFinder query items render = + sortAndCleanup $ fuzzyFindMatchArray query items render + where + sortAndCleanup = List.map snd . List.sortOn fst + +simpleFuzzyFinder :: forall a. + String -> [a] -> (a -> String) -> [(a, P.Pretty P.ColorText)] +simpleFuzzyFinder query items render = + sortAndCleanup $ do + a <- items + let s = render a + score <- toList (simpleFuzzyScore query s) + pure ((a, hi s), score) + where + hi = highlightSimple query + sortAndCleanup = List.map fst . List.sortOn snd + +-- highlights `query` if it is a prefix of `s`, or if it +-- appears in the final segement of s (after the final `.`) +highlightSimple :: String -> String -> P.Pretty P.ColorText +highlightSimple "" = P.string +highlightSimple query = go where + go [] = mempty + go s@(h:t) | query `List.isPrefixOf` s = hiQuery <> go (drop len s) + | otherwise = P.string [h] <> go t + len = length query + hiQuery = P.hiBlack (P.string query) + +simpleFuzzyScore :: String -> String -> Maybe Int +simpleFuzzyScore query s + | query `List.isPrefixOf` s = Just (bonus s 2) + | query `List.isSuffixOf` s = Just (bonus s 1) + | query `List.isInfixOf` s = Just (bonus s 3) + | lowerquery `List.isInfixOf` lowers = Just (bonus s 4) + | otherwise = Nothing + where + -- prefer relative names + bonus ('.':_) n = n*10 + bonus _ n = n + lowerquery = Char.toLower <$> query + lowers = Char.toLower <$> s + +-- This logic was split out of fuzzyFinder because the `RE.MatchArray` has an +-- `Ord` instance that helps us sort the fuzzy matches in a nice way. (see +-- comment below.) `Editor.fuzzyNameDistance` uses this `Ord` instance. +fuzzyFindMatchArray :: forall a. + String -> [a] -> (a -> String) + -> [(RE.MatchArray, (a, P.Pretty P.ColorText))] +fuzzyFindMatchArray query items render = + scoreAndHighlight $ items + where + scoreAndHighlight = catMaybes . List.map go + go :: a -> Maybe (RE.MatchArray, (a, P.Pretty P.ColorText)) + go a = + let string = render a + text = Text.pack string + matches = RE.matchOnce regex string + addContext matches = + let highlighted = highlight P.bold text . tail . toList $ matches + in (matches, (a, highlighted)) + in addContext <$> matches + -- regex "Foo" = "(\\F).*(\\o).*(\\o)" + regex :: RE.Regex + regex = let + s = if null query then ".*" + else intercalateMap ".*" esc query where esc c = "(\\" <> [c] <> ")" + in RE.makeRegexOpts + RE.defaultCompOpt { RE.caseSensitive = False + -- newSyntax = False, otherwise "\<" and "\>" + -- matches word boundaries instead of literal < and > + , RE.newSyntax = False + } + RE.defaultExecOpt + s + -- Sort on: + -- a. length of match group to find the most compact match + -- b. start position of the match group to find the earliest match + -- c. the item itself for alphabetical ranking + -- Ord MatchArray already provides a. and b. todo: c. + +prefixFindInBranch :: + Names0 -> HashQualified -> [(SearchResult, P.Pretty P.ColorText)] +prefixFindInBranch b hq = fmap getName $ + case HQ.toName hq of + -- query string includes a name component, so do a prefix find on that + (Name.toString -> n) -> + filter (filterName n) (candidates b hq) + where + filterName n sr = + fromString n `Name.isPrefixOf` (HQ.toName . SR.name) sr + +-- only search before the # before the # and after the # after the # +fuzzyFindInBranch :: Names0 + -> HashQualified + -> [(SearchResult, P.Pretty P.ColorText)] +fuzzyFindInBranch b hq = + case HQ.toName hq of + (Name.toString -> n) -> + simpleFuzzyFinder n (candidates b hq) + (Name.toString . HQ.toName . SR.name) + +getName :: SearchResult -> (SearchResult, P.Pretty P.ColorText) +getName sr = (sr, P.syntaxToColor $ prettyHashQualified' (SR.name sr)) + +candidates :: Names.Names' Name.Name -> HashQualified -> [SearchResult] +candidates b hq = typeCandidates <> termCandidates + where + -- filter branch by hash + typeCandidates = + fmap typeResult . filterTypes . R.toList . Names.types $ b + termCandidates = + fmap termResult . filterTerms . R.toList . Names.terms $ b + filterTerms = case HQ.toHash hq of + Just sh -> List.filter $ SH.isPrefixOf sh . Referent.toShortHash . snd + Nothing -> id + filterTypes = case HQ.toHash hq of + Just sh -> List.filter $ SH.isPrefixOf sh . Reference.toShortHash. snd + Nothing -> id + typeResult (n, r) = SR.typeResult (Names._hqTypeName b n r) r + (Names._hqTypeAliases b n r) + termResult (n, r) = SR.termResult (Names._hqTermName b n r) r + (Names._hqTermAliases b n r) + +type Pos = Int +type Len = Int +-- This [(Pos, Len)] type is the same as `tail . toList` of a regex MatchArray +highlight :: (P.Pretty P.ColorText -> P.Pretty P.ColorText) + -> Text + -> [(Pos, Len)] + -> P.Pretty P.ColorText +highlight on = highlight' on id + +highlight' :: (P.Pretty P.ColorText -> P.Pretty P.ColorText) + -> (P.Pretty P.ColorText -> P.Pretty P.ColorText) + -> Text + -> [(Pos, Len)] + -> P.Pretty P.ColorText +highlight' on off t groups = case groups of + [] -> (off . P.text) t + (0,_) : _ -> go groups + (start,_) : _ -> (off . P.text . Text.take start) t <> go groups + where + go = \case + [] -> error "unpossible I think" + (start, len) : (start2, len2) : groups + | start + len == start2 -> + -- avoid an on/off since there's no gap between groups + go ((start, len + len2) : groups) + (start, len) : groups -> + let (selected, remaining) = Text.splitAt len . Text.drop start $ t + in (on . P.text) selected <> case groups of + [] -> (off . P.text) remaining + (start2, _) : _ -> + (off . P.text . Text.drop (start + len) . Text.take start2 $ t) + <> go groups diff --git a/parser-typechecker/src/Unison/Util/Free.hs b/parser-typechecker/src/Unison/Util/Free.hs new file mode 100644 index 0000000000..f10e8c3cd4 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Free.hs @@ -0,0 +1,68 @@ +{-# Language ExistentialQuantification, Rank2Types #-} + +module Unison.Util.Free where + +import Unison.Prelude hiding (fold) + +-- We would use another package for this if we knew of one. +-- Neither http://hackage.haskell.org/package/free +-- nor http://hackage.haskell.org/package/free-functors +-- nor http://hackage.haskell.org/package/freer +-- appear to have this. + +data Free f a = Pure a | forall x . Bind (f x) (x -> Free f a) + +eval :: f a -> Free f a +eval fa = Bind fa Pure + +-- unfold :: (v -> f (Either a v)) -> v -> Free f a + +fold :: Monad m => (forall x. f x -> m x) -> Free f a -> m a +fold f m = case m of + Pure a -> pure a + Bind x k -> f x >>= fold f . k + +unfold :: (v -> Either a (f v)) -> v -> Free f a +unfold f seed = case f seed of + Left a -> Pure a + Right fv -> Bind fv (unfold f) + +unfold' :: (v -> Free f (Either a v)) -> v -> Free f a +unfold' f seed = f seed >>= either Pure (unfold' f) + +unfoldM :: (Traversable f, Applicative m, Monad m) + => (b -> m (Either a (f b))) -> b -> m (Free f a) +unfoldM f seed = do + e <- f seed + case e of + Left a -> pure (Pure a) + Right fb -> free <$> traverse (unfoldM f) fb + +free :: Traversable f => f (Free f a) -> Free f a +free = go . sequence + where go (Pure fa) = Bind fa Pure + go (Bind fi f) = Bind fi (go . f) + + +foldWithIndex :: forall f m a . Monad m => (forall x. Int -> f x -> m x) -> Free f a -> m a +foldWithIndex f m = go 0 f m + where go :: Int -> (forall x. Int -> f x -> m x) -> Free f a -> m a + go starting f m = case m of + Pure a -> pure a + Bind x k -> (f starting x) >>= (go $ starting + 1) f . k + + +instance Functor (Free f) where + fmap = liftM + +instance Monad (Free f) where + return = Pure + Pure a >>= f = f a + Bind fx f >>= g = Bind fx (f >=> g) + + +instance Applicative (Free f) where + pure = Pure + (<*>) = ap + +instance MonadTrans Free where lift = eval diff --git a/parser-typechecker/src/Unison/Util/Less.hs b/parser-typechecker/src/Unison/Util/Less.hs new file mode 100644 index 0000000000..145a20cfb7 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Less.hs @@ -0,0 +1,25 @@ +module Unison.Util.Less where + +import System.Process +import System.IO (hPutStr, hClose) +import Control.Exception.Extra (ignore) +import Unison.Prelude (void) + +less :: String -> IO () +less str = do + let args = ["--no-init" -- don't clear the screen on exit + ,"--raw-control-chars" -- pass through colors and stuff + ,"--prompt=[less] Use space/arrow keys to navigate, or 'q' to return to ucm:" + ,"--quit-if-one-screen" -- self-explanatory + ] + (Just stdin, _stdout, _stderr, pid) + <- createProcess (proc "less" args) { std_in = CreatePipe } + + -- If `less` exits before consuming all of stdin, `hPutStr` will crash. + ignore $ hPutStr stdin str + + -- If `less` has already exited, hClose throws an exception. + ignore $ hClose stdin + + -- Wait for `less` to exit. + void $ waitForProcess pid diff --git a/parser-typechecker/src/Unison/Util/Logger.hs b/parser-typechecker/src/Unison/Util/Logger.hs new file mode 100644 index 0000000000..762f16234a --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Logger.hs @@ -0,0 +1,109 @@ +-- | Small logging library. Typical usage, import qualified: +-- +-- import qualified Unison.Util.Logger as L +-- +-- do +-- logger <- L.atomic . L.atInfo . L.scope "worker" . L.toHandle $ stderr +-- L.warn logger "WARNING!!!" +-- L.debug logger "Debug message, will be ignored" +-- let logger2 = L.atDebug logger +-- L.debug logger2 "Debug message, will be printed" +-- logger' <- L.at L.warnLevel +-- +module Unison.Util.Logger where + +import Unison.Prelude + +import Control.Concurrent (forkIO) +import Control.Concurrent.MVar +import Control.Exception (bracket) +import Data.List +import System.IO (Handle, hPutStrLn, hGetLine, stdout, stderr) +import System.IO.Error (isEOFError) + +type Level = Int +type Scope = [String] + +data Logger = + Logger { getScope :: !Scope + , prefix :: String -> String + , getLevel :: !Level + , raw :: String -> IO () } + +-- | Ensure at most one message is logged at the same time +atomic :: Logger -> IO Logger +atomic logger = do + lock <- newMVar () + pure $ + let raw' msg = bracket (takeMVar lock) (\_ -> putMVar lock ()) (\_ -> raw logger msg) + in logger { raw = raw' } + +toHandle :: Handle -> Logger +toHandle h = logger (hPutStrLn h) + +toStandardError :: Logger +toStandardError = toHandle stderr + +toStandardOut :: Logger +toStandardOut = toHandle stdout + +logHandleAt :: Logger -> Level -> Handle -> IO () +logHandleAt logger lvl h + | lvl > getLevel logger = pure () + | otherwise = void . forkIO $ loop where + loop = do + line <- try (hGetLine h) + case line of + Left ioe | isEOFError ioe -> logAt (scope "logHandleAt" logger) 3 "EOF" + | otherwise -> logAt (scope "logHandleAt" logger) 2 (show ioe) + Right line -> logAt logger lvl line >> loop + +logAt' :: Logger -> Level -> IO String -> IO () +logAt' logger lvl msg | lvl <= getLevel logger = msg >>= \msg -> raw logger (prefix logger msg) + | otherwise = pure () + +logAt :: Logger -> Level -> String -> IO () +logAt logger lvl msg | lvl <= getLevel logger = raw logger (prefix logger msg) + | otherwise = pure () + +scope :: String -> Logger -> Logger +scope s (Logger s0 _ lvl raw) = Logger s' prefix' lvl raw where + prefix' msg = prefix ++ msg + prefix = "[" ++ intercalate " " s' ++ "] " + s' = s:s0 + +scope' :: [String] -> Logger -> Logger +scope' s l = foldr scope l s + +logger :: (String -> IO ()) -> Logger +logger log = Logger [] id 0 log + +error, warn, info, debug, trace :: Logger -> String -> IO () +error l = logAt l errorLevel +warn l = logAt l warnLevel +info l = logAt l infoLevel +debug l = logAt l debugLevel +trace l = logAt l traceLevel + +error', warn', info', debug', trace' :: Logger -> IO String -> IO () +error' l = logAt' l errorLevel +warn' l = logAt' l warnLevel +info' l = logAt' l infoLevel +debug' l = logAt' l debugLevel +trace' l = logAt' l traceLevel + +errorLevel, warnLevel, infoLevel, debugLevel, traceLevel :: Level +(errorLevel, warnLevel, infoLevel, debugLevel, traceLevel) = (1,2,3,4,5) + +at :: Level -> Logger -> Logger +at lvl logger = logger { getLevel = lvl } + +atError, atWarn, atInfo, atDebug, atTrace :: Logger -> Logger +(atError, atWarn, atInfo, atDebug, atTrace) = + (at errorLevel, at warnLevel, at infoLevel, at debugLevel, at traceLevel) + +increment :: Logger -> Logger +increment (Logger s p n l) = Logger s p (n+1) l + +decrement :: Logger -> Logger +decrement (Logger s p n l) = Logger s p (n-1) l diff --git a/parser-typechecker/src/Unison/Util/Map.hs b/parser-typechecker/src/Unison/Util/Map.hs new file mode 100644 index 0000000000..4df2bc54e6 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Map.hs @@ -0,0 +1,16 @@ +module Unison.Util.Map + ( unionWithM + ) where + +import qualified Control.Monad as Monad +import qualified Data.Map as Map + +import Unison.Prelude + +unionWithM :: forall m k a. + (Monad m, Ord k) => (a -> a -> m a) -> Map k a -> Map k a -> m (Map k a) +unionWithM f m1 m2 = Monad.foldM go m1 $ Map.toList m2 where + go :: Map k a -> (k, a) -> m (Map k a) + go m1 (k, a2) = case Map.lookup k m1 of + Just a1 -> do a <- f a1 a2; pure $ Map.insert k a m1 + Nothing -> pure $ Map.insert k a2 m1 diff --git a/parser-typechecker/src/Unison/Util/Menu.hs b/parser-typechecker/src/Unison/Util/Menu.hs new file mode 100644 index 0000000000..90a49a907d --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Menu.hs @@ -0,0 +1,286 @@ +{-# LANGUAGE OverloadedLists #-} +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Util.Menu (menu1, menuN, groupMenuN) where + +import Unison.Prelude + +import Data.List (find, isPrefixOf) +import qualified Data.Set as Set +import Data.Strings (strPadLeft) +import qualified Text.Read as Read +import Unison.Util.AnnotatedText (textEmpty) +import Unison.Util.ColorText (ColorText, toANSI) +import Unison.Util.Monoid (intercalateMap) +-- utility - command line menus + +type Caption = ColorText +type Stylized = ColorText +type Keyword = String +type Console = IO String + +renderChoices :: forall a mc + . (a -> Stylized) + -> (mc -> Stylized) + -> [([Keyword], [a])] + -> [([Keyword], mc)] + -> (Keyword -> Bool) + -> Stylized +renderChoices render renderMeta groups metas isSelected = + showGroups <> showMetas + where + showGroups = intercalateMap "\n" format numberedGroups <> + if (not.null) groups && (not.null) metas then "\n\n" else "" + showMetas = intercalateMap "\n" (("["<>) . (<>"]") . renderMeta . snd) metas + numberedGroups :: [(([Keyword], [a]), Int)] + numberedGroups = zip groups [1..] + numberWidth = (1+) . floor @Double . logBase 10 . fromIntegral $ length groups + format :: (([Keyword], [a]), Int) -> Stylized + format ((keywords, as), number) = + intercalateMap + "\n" + (format1 number (length as) (any isSelected keywords)) + (zip as [0..]) + format1 :: Int -> Int -> Bool -> (a, Int) -> Stylized + format1 groupNumber groupSize isSelected (a, index) = + header <> bracket <> render a + where + header :: (Semigroup s, IsString s) => s + header = + (if representativeRow + then (if isSelected then "*" else " ") + <> fromString (strPadLeft ' ' numberWidth (show groupNumber)) + <> ". " + else fromString $ replicate (numberWidth + 3) ' ') + representativeRow :: Bool + representativeRow = index == (groupSize - 1) `div` 2 + bracket :: IsString s => s + bracket = + if maxGroupSize > 1 then + if groupSize == 1 then "╶" + else if index == 0 then "┌" + else if index < groupSize - 1 then "│" + else "└" + else "" + maxGroupSize = maximum (length . snd <$> groups) + + +{- + + + 1 ping + pong + 2 foo + 3 bar + + [cancel] + [help] + + >> ping + + -} + +menu1 :: forall a mc + . Console + -> Caption + -> (a -> Stylized) + -> (mc -> Stylized) + -> [(Keyword, a)] + -> [(Keyword, mc)] + -> Maybe Keyword + -> IO (Maybe (Either mc a)) +menu1 console caption render renderMeta groups metas initial = do + let groups' = [ ([k], [a]) | (k, a) <- groups ] + metas' = [ ([k], mc) | (k, mc) <- metas ] + groupMenu1 console caption render renderMeta groups' metas' initial >>= \case + Just (Right [a]) -> pure (Just (Right a)) + Just (Left mc) -> pure (Just (Left mc)) + Nothing -> pure Nothing + _ -> error "unpossible; by construction we should only get singleton lists back" + +_repeatMenu1 :: forall a mc + . Console + -> Caption + -> (a -> Stylized) + -> (mc -> Stylized) + -> [([Keyword], [a])] + -> [([Keyword], mc)] + -> Maybe Keyword + -> IO (Either mc [a]) +_repeatMenu1 console caption render renderMeta groups metas initial = + groupMenu1 console caption render renderMeta groups metas initial >>= \case + Just x -> pure x + Nothing -> _repeatMenu1 console caption render renderMeta groups metas initial + +groupMenu1 :: forall a mc + . Console + -> Caption + -> (a -> Stylized) + -> (mc -> Stylized) + -> [([Keyword], [a])] + -> [([Keyword], mc)] + -> Maybe Keyword + -> IO (Maybe (Either mc [a])) +groupMenu1 console caption render renderMeta groups metas initial = do + when ((not . textEmpty) caption) $ do + print . toANSI $ caption + putStrLn "" + print . toANSI $ renderChoices render renderMeta groups metas (`elem` initial) + resume + where + restart = groupMenu1 console caption render renderMeta groups metas initial + -- restart with an updated caption + restart' caption groups metas initial = + groupMenu1 console caption render renderMeta groups metas initial + resume = do + putStr "\n>> " + input <- console + case words input of + [] -> useExistingSelections groups initial + input : _ -> case Read.readMaybe input of + Just i -> pickGroupByNumber i + Nothing -> pickGroupByPrefix input + where + pickGroupByNumber :: Int -> IO (Maybe (Either mc [a])) + pickGroupByNumber i = case atMay groups (i-1) of + Nothing -> do + putStrLn $ "Please pick a number from 1 to " ++ + show (length groups) ++ "." + restart + Just (_keywords, as) -> pure (Just (Right as)) + pickGroupByPrefix :: String -> IO (Maybe (Either mc [a])) + pickGroupByPrefix s = case matchingItems groups metas s of + ([],[]) -> do + putStrLn $ "Sorry, '" ++ s ++ "' didn't match anything." + resume + ([(_, as)],[]) -> pure (Just (Right as)) + ([], [(_, mc)]) -> pure (Just (Left mc)) + (groups, metas) -> + restart' + "Please clarify your selection, or press Enter to back up:" + groups metas Nothing >>= \case + Nothing -> restart + x -> pure x + matchingItems :: + forall a mc. [([Keyword], [a])] -> [([Keyword], mc)] -> String + -> ([([Keyword], [a])], [([Keyword], mc)]) + matchingItems groups metas s = + (filter (any (s `isPrefixOf`) . fst) groups + ,filter (any (s `isPrefixOf`) . fst) metas) + useExistingSelections :: + [([Keyword], [a])] -> Maybe Keyword -> IO (Maybe (Either mc [a])) + useExistingSelections groups initial = case initial of + Nothing -> pure Nothing + Just initial -> + case findMatchingGroup [initial] groups of + Just group -> pure (Just (Right group)) + Nothing -> error $ + "Default selection \"" ++ show initial ++ "\"" ++ + " not found in choice groups:\n" ++ show (fst <$> groups) + findMatchingGroup :: forall a. [Keyword] -> [([Keyword], [a])] -> Maybe [a] + findMatchingGroup initials groups = + snd <$> find (\(keywords, _as) -> any (`elem` keywords) initials) groups + + +{- + + + 1 ping + pong + 2 foo + 3 bar + + [all] + [cancel] + [help] + + >> 1 3 + >> * + + -} +menuN :: Console + -> Caption + -> (a -> Stylized) + -> (mc -> Stylized) + -> [([Keyword], [a])] + -> [([Keyword], mc)] + -> [Keyword] + -> IO (Either mc [[a]]) +menuN _console _caption _render _renderMeta _groups _metas _initials = pure (Right []) + +groupMenuN :: forall a mc. Ord a + => Console + -> Caption + -> (a -> Stylized) + -> (mc -> Stylized) + -> [([Keyword], [a])] + -> [([Keyword], mc)] + -> [[Keyword]] + -> IO (Either mc [[a]]) +groupMenuN console caption render renderMeta groups metas initials = + groupMenuN' console caption render renderMeta groups metas (Set.fromList initials) + +groupMenuN' :: forall a mc. Ord a + => Console + -> Caption + -> (a -> Stylized) + -> (mc -> Stylized) + -> [([Keyword], [a])] + -> [([Keyword], mc)] + -> Set [Keyword] + -> IO (Either mc [[a]]) +groupMenuN' console caption render renderMeta groups metas initials = do + when ((not . textEmpty) caption) $ do + print . toANSI $ caption + putStrLn "" + print . toANSI $ renderChoices render renderMeta groups metas ((`any` initials) . elem) + resume initials + where + restart initials = groupMenuN' console caption render renderMeta groups metas initials + -- restart with an updated caption + restart' caption groups metas initials = + groupMenuN' console caption render renderMeta groups metas initials + resume :: Set [Keyword] -> IO (Either mc [[a]]) + resume initials = do + putStr "\n>> " + input <- console + case words input of + [] -> useExistingSelections groups initials + input : _ -> case Read.readMaybe input of + Just i -> pickGroupByNumber i + Nothing -> pickGroupByPrefix input + where + pickGroupByNumber :: Int -> IO (Either mc [[a]]) + pickGroupByNumber i = case atMay groups (i-1) of + Nothing -> do + putStrLn $ "Please pick a number from 1 to " ++ + show (length groups) ++ "." + restart initials + Just (kw, _) -> restart (Set.insert kw initials) + pickGroupByPrefix :: String -> IO (Either mc [[a]]) + pickGroupByPrefix s = case matchingItems groups metas s of + ([],[]) -> do + putStrLn $ "Sorry, '" ++ s ++ "' didn't match anything." + resume initials + ([], [(_, mc)]) -> pure (Left mc) + ([(kw, _)],[]) -> restart (Set.insert kw initials) + (_, _) -> + restart' + "Your prefix matched both groups and commands; please choose by number or use a longer prefix:" + groups metas initials + matchingItems :: + forall a mc. [([Keyword], [a])] -> [([Keyword], mc)] -> String + -> ([([Keyword], [a])], [([Keyword], mc)]) + matchingItems groups metas s = + (filter (any (s `isPrefixOf`) . fst) groups + ,filter (any (s `isPrefixOf`) . fst) metas) + useExistingSelections :: + [([Keyword], [a])] -> Set [Keyword] -> IO (Either mc [[a]]) + useExistingSelections groups initials = pure . pure $ + foldr go [] initials where + go kws selections = case findMatchingGroup kws groups of + Just as -> as : selections + Nothing -> selections + findMatchingGroup :: forall a. [Keyword] -> [([Keyword], [a])] -> Maybe [a] + findMatchingGroup initials groups = + snd <$> find (\(keywords, _as) -> any (`elem` keywords) initials) groups diff --git a/parser-typechecker/src/Unison/Util/PinBoard.hs b/parser-typechecker/src/Unison/Util/PinBoard.hs new file mode 100644 index 0000000000..f7482f94a4 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/PinBoard.hs @@ -0,0 +1,143 @@ +{-# LANGUAGE OverloadedStrings #-} + +-- | A utility type for saving memory in the presence of many duplicate ByteStrings, etc. If you have data that may be +-- a redundant duplicate, try pinning it to a pin board, and use the result of that operation instead. +-- +-- Without a pin board: +-- +-- x ───── "38dce848c8c829c62" +-- y ───── "38dce848c8c829c62" +-- z ───── "d2518f260535b927b" +-- +-- With a pin board: +-- +-- x ───── "38dce848c8c829c62" ┄┄┄┄┄┐ +-- y ────────┘ board +-- z ───── "d2518f260535b927b" ┄┄┄┄┄┘ +-- +-- ... and after x is garbage collected: +-- +-- "38dce848c8c829c62" ┄┄┄┄┄┐ +-- y ────────┘ board +-- z ───── "d2518f260535b927b" ┄┄┄┄┄┘ +-- +-- ... and after y is garbage collected: +-- +-- board +-- z ───── "d2518f260535b927b" ┄┄┄┄┄┘ +module Unison.Util.PinBoard + ( PinBoard, + new, + pin, + + -- * For debugging + debugDump, + debugSize, + ) +where + +import Control.Concurrent.MVar +import Data.Foldable (find, foldlM) +import Data.Functor.Compose +import Data.Hashable (Hashable, hash) +import qualified Data.IntMap as IntMap +import Data.IntMap.Strict (IntMap) +import qualified Data.Text as Text +import qualified Data.Text.IO as Text +import Data.Tuple (swap) +import System.Mem.Weak (Weak, deRefWeak, mkWeakPtr) +import Unison.Prelude + +-- | A "pin board" is a place to pin values; semantically, it's a set, but differs in a few ways: +-- +-- * Pinned values aren't kept alive by the pin board, they might be garbage collected at any time. +-- * If you try to pin a value that's already pinned (per its Eq instance), the pinned one will be returned +-- instead. +-- * It has a small API: just 'new' and 'pin'. +newtype PinBoard a + = PinBoard (MVar (IntMap (Bucket a))) + +new :: MonadIO m => m (PinBoard a) +new = + liftIO (PinBoard <$> newMVar IntMap.empty) + +pin :: forall a m. (Eq a, Hashable a, MonadIO m) => PinBoard a -> a -> m a +pin (PinBoard boardVar) x = liftIO do + modifyMVar boardVar \board -> + swap <$> getCompose (IntMap.alterF alter n board) + where + -- Pin to pin board at a hash key: either there's nothing there (ifMiss), or there's a nonempty bucket (ifHit). + alter :: Maybe (Bucket a) -> Compose IO ((,) a) (Maybe (Bucket a)) + alter = + Compose . maybe ifMiss ifHit + -- Pin a new value: create a new singleton bucket. + ifMiss :: IO (a, Maybe (Bucket a)) + ifMiss = + (x,) . Just <$> newBucket x finalizer + -- Possibly pin a new value: if it already exists in the bucket, return that one instead. Otherwise, insert it. + ifHit :: Bucket a -> IO (a, Maybe (Bucket a)) + ifHit bucket = + bucketFind bucket x >>= \case + -- Hash collision: the bucket has things in it, but none are the given value. Insert. + Nothing -> (x,) . Just <$> bucketAdd bucket x finalizer + -- The thing being inserted already exists; return it. + Just y -> pure (y, Just bucket) + -- When each thing pinned here is garbage collected, compact its bucket. + finalizer :: IO () + finalizer = + modifyMVar_ boardVar (IntMap.alterF (maybe (pure Nothing) bucketCompact) n) + n :: Int + n = + hash x + +debugDump :: MonadIO m => (a -> Text) -> PinBoard a -> m () +debugDump f (PinBoard boardVar) = liftIO do + board <- readMVar boardVar + contents <- (traverse . traverse) bucketToList (IntMap.toList board) + Text.putStrLn (Text.unlines ("PinBoard" : map row contents)) + where + row (n, xs) = + Text.pack (show n) <> " => " <> Text.pack (show (map f xs)) + +debugSize :: PinBoard a -> IO Int +debugSize (PinBoard boardVar) = do + board <- readMVar boardVar + foldlM step 0 board + where + step :: Int -> Bucket a -> IO Int + step acc = + bucketToList >=> \xs -> pure (acc + length xs) + +-- | A bucket of weak pointers to different values that all share a hash. +newtype Bucket a + = Bucket [Weak a] -- Invariant: non-empty list + +-- | A singleton bucket. +newBucket :: a -> IO () -> IO (Bucket a) +newBucket = + bucketAdd (Bucket []) + +-- | Add a value to a bucket. +bucketAdd :: Bucket a -> a -> IO () -> IO (Bucket a) +bucketAdd (Bucket weaks) x finalizer = do + weak <- mkWeakPtr x (Just finalizer) + pure (Bucket (weak : weaks)) + +-- | Drop all garbage-collected values from a bucket. If none remain, returns Nothing. +bucketCompact :: Bucket a -> IO (Maybe (Bucket a)) +bucketCompact (Bucket weaks) = + bucketFromList <$> mapMaybeM (\w -> (w <$) <$> deRefWeak w) weaks + +-- | Look up a value in a bucket per its Eq instance. +bucketFind :: Eq a => Bucket a -> a -> IO (Maybe a) +bucketFind bucket x = + find (== x) <$> bucketToList bucket + +bucketFromList :: [Weak a] -> Maybe (Bucket a) +bucketFromList = \case + [] -> Nothing + weaks -> Just (Bucket weaks) + +bucketToList :: Bucket a -> IO [a] +bucketToList (Bucket weaks) = + mapMaybeM deRefWeak weaks diff --git a/parser-typechecker/src/Unison/Util/Pretty.hs b/parser-typechecker/src/Unison/Util/Pretty.hs new file mode 100644 index 0000000000..e9621257ee --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Pretty.hs @@ -0,0 +1,903 @@ +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Util.Pretty ( + Pretty, + ColorText, + align, + align', + alternations, + backticked, + backticked', + boxForkLeft, + boxLeft, + boxLeftM, + boxRight, + boxRightM, + bulleted, + bracket, + -- breakable + callout, + excerptSep, + excerptSep', + excerptColumn2, + excerptColumn2Headed, + warnCallout, blockedCallout, fatalCallout, okCallout, + column2, + column2sep, + column2Header, + column2M, + column2UnzippedM, + column3, + column3M, + column3UnzippedM, + column3sep, + commas, + commented, + oxfordCommas, + oxfordCommasWith, + plural, + dashed, + flatMap, + group, + hang', + hang, + hangUngrouped', + hangUngrouped, + indent, + indentAfterNewline, + indentN, + indentNonEmptyN, + indentNAfterNewline, + isMultiLine, + leftPad, + lines, + linesNonEmpty, + linesSpaced, + lit, + map, + mayColumn2, + nest, + num, + newline, + lineSkip, + nonEmpty, + numbered, + numberedColumn2, + numberedColumn2Header, + numberedList, + orElse, + orElses, + paragraphyText, + parenthesize, + parenthesizeCommas, + parenthesizeIf, + render, + renderUnbroken, + rightPad, + sep, + sepNonEmpty, + sepSpaced, + shown, + softbreak, + spaceIfBreak, + spaced, + spacedMap, + spacesIfBreak, + string, + surroundCommas, + syntaxToColor, + text, + toANSI, + toAnsiUnbroken, + toHTML, + toPlain, + toPlainUnbroken, + underline, + withSyntax, + wrap, + wrapColumn2, + wrapString, + black, red, green, yellow, blue, purple, cyan, white, hiBlack, hiRed, hiGreen, hiYellow, hiBlue, hiPurple, hiCyan, hiWhite, bold, + border, + Width, + -- * Exported for testing + delta, + Delta, + ) where + +import Unison.Prelude + +import Data.Bifunctor ( second ) +import Data.Char ( isSpace ) +import Data.List ( intersperse ) +import Prelude hiding ( lines , map ) +import Unison.Util.AnnotatedText ( annotateMaybe ) +import qualified Unison.Util.ColorText as CT +import qualified Unison.Util.SyntaxText as ST +import Unison.Util.Monoid ( intercalateMap ) +import qualified Data.ListLike as LL +import qualified Data.Sequence as Seq +import qualified Data.Text as Text +import Control.Monad.Identity (runIdentity, Identity(..)) + +type Width = Int +type ColorText = CT.ColorText + +data Pretty s = Pretty { delta :: Delta, out :: F s (Pretty s) } deriving Eq + +instance Functor Pretty where + fmap f (Pretty d o) = Pretty d (mapLit f $ fmap (fmap f) o) + +data F s r + = Empty + -- | A group adds a level of breaking. Layout tries not to break a group + -- unless needed to fit in available width. Breaking is done "outside in". + -- + -- (a | b) <> (c | d) will try (a <> c), then (b <> d) + -- + -- (a | b) <> group (c | d) will try (a <> c), then (b <> c), then (b <> d) + | Group r + | Lit s + | Wrap (Seq r) + | OrElse r r + | Append (Seq r) + deriving (Eq, Show, Foldable, Traversable, Functor) + +mapLit :: (s -> t) -> F s r -> F t r +mapLit f (Lit s) = Lit (f s) +mapLit _ Empty = Empty +mapLit _ (Group r) = Group r +mapLit _ (Wrap s) = Wrap s +mapLit _ (OrElse r s) = OrElse r s +mapLit _ (Append s) = Append s + +lit :: (IsString s, LL.ListLike s Char) => s -> Pretty s +lit s = lit' (foldMap chDelta $ LL.toList s) s + +lit' :: Delta -> s -> Pretty s +lit' d s = Pretty d (Lit s) + +orElse :: Pretty s -> Pretty s -> Pretty s +orElse p1 p2 = Pretty (delta p1) (OrElse p1 p2) + +orElses :: [Pretty s] -> Pretty s +orElses [] = mempty +orElses ps = foldr1 orElse ps + +wrapImpl :: IsString s => [Pretty s] -> Pretty s +wrapImpl [] = mempty +wrapImpl (p:ps) = wrap_ . Seq.fromList $ + p : fmap (\p -> (" " <> p) `orElse` (newline <> p)) ps + +wrapImplPreserveSpaces :: (LL.ListLike s Char, IsString s) => [Pretty s] -> Pretty s +wrapImplPreserveSpaces = \case + [] -> mempty + (p:ps) -> wrap_ . Seq.fromList $ p : fmap f ps + where + startsWithSpace p = case out p of + (Lit s) -> fromMaybe False (fmap (isSpaceNotNewline . fst) $ LL.uncons s) + _ -> False + f p | startsWithSpace p = p `orElse` newline + f p = p + +isSpaceNotNewline :: Char -> Bool +isSpaceNotNewline c = isSpace c && not (c == '\n') + +wrapString :: (LL.ListLike s Char, IsString s) => String -> Pretty s +wrapString s = wrap (lit $ fromString s) + +-- Wrap text, preserving whitespace (apart from at the wrap points.) +-- Used in particular for viewing/displaying doc literals. +-- Should be understood in tandem with TermParser.docNormalize. +-- See also unison-src/transcripts/doc-formatting.md. +paragraphyText :: (LL.ListLike s Char, IsString s) => Text -> Pretty s +paragraphyText = sep "\n" . fmap (wrapPreserveSpaces . text) . Text.splitOn "\n" + +wrap :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s +wrap p = wrapImpl (toLeaves [p]) where + toLeaves [] = [] + toLeaves (hd:tl) = case out hd of + Empty -> toLeaves tl + Lit s -> wordify s ++ toLeaves tl + Group _ -> hd : toLeaves tl + OrElse a _ -> toLeaves (a:tl) + Wrap _ -> hd : toLeaves tl + Append hds -> toLeaves (toList hds ++ tl) + wordify s0 = let s = LL.dropWhile isSpace s0 in + if LL.null s then [] + else case LL.break isSpace s of (word1, s) -> lit word1 : wordify s + +-- Does not insert spaces where none were present, and does not collapse +-- sequences of spaces into one. +-- It'd be a bit painful to just replace wrap with the following version, because +-- lots of OutputMessages code depends on wrap's behaviour of sometimes adding +-- extra spaces. +wrapPreserveSpaces :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s +wrapPreserveSpaces p = wrapImplPreserveSpaces (toLeaves [p]) where + toLeaves [] = [] + toLeaves (hd:tl) = case out hd of + Empty -> toLeaves tl + Lit s -> (fmap lit $ alternations isSpaceNotNewline s) ++ toLeaves tl + Group _ -> hd : toLeaves tl + OrElse a _ -> toLeaves (a:tl) + Wrap _ -> hd : toLeaves tl + Append hds -> toLeaves (toList hds ++ tl) + +-- Cut a list every time a predicate changes. Produces a list of +-- non-empty lists. +alternations :: (LL.ListLike s c) => (c -> Bool) -> s -> [s] +alternations p s = reverse $ go True s [] where + go _ s acc | LL.null s = acc + go w s acc = go (not w) rest acc' where + (t, rest) = LL.span p' s + p' = if w then p else (\x -> not (p x)) + acc' = if (LL.null t) then acc else t : acc + +wrap_ :: Seq (Pretty s) -> Pretty s +wrap_ ps = Pretty (foldMap delta ps) (Wrap ps) + +group :: Pretty s -> Pretty s +group p = Pretty (delta p) (Group p) + +toANSI :: Width -> Pretty CT.ColorText -> String +toANSI avail p = CT.toANSI (render avail p) + +toAnsiUnbroken :: Pretty ColorText -> String +toAnsiUnbroken p = CT.toANSI (renderUnbroken p) + +toPlain :: Width -> Pretty CT.ColorText -> String +toPlain avail p = CT.toPlain (render avail p) + +toHTML :: String -> Width -> Pretty CT.ColorText -> String +toHTML cssPrefix avail p = CT.toHTML cssPrefix (render avail p) + +toPlainUnbroken :: Pretty ColorText -> String +toPlainUnbroken p = CT.toPlain (renderUnbroken p) + +syntaxToColor :: Pretty ST.SyntaxText -> Pretty ColorText +syntaxToColor = fmap $ annotateMaybe . fmap CT.defaultColors + +-- set the syntax, overriding any present syntax +withSyntax :: ST.Element -> Pretty ST.SyntaxText -> Pretty ST.SyntaxText +withSyntax e = fmap $ ST.syntax e + +renderUnbroken :: (Monoid s, IsString s) => Pretty s -> s +renderUnbroken = render maxBound + +render :: (Monoid s, IsString s) => Width -> Pretty s -> s +render availableWidth p = go mempty [Right p] where + go _ [] = mempty + go cur (p:rest) = case p of + Right p -> -- `p` might fit, let's try it! + if p `fits` cur then flow p <> go (cur <> delta p) rest + else go cur (Left p : rest) -- nope, switch to breaking mode + Left p -> case out p of -- `p` requires breaking + Append ps -> go cur ((Left <$> toList ps) <> rest) + Empty -> go cur rest + Group p -> go cur (Right p : rest) + -- Note: literals can't be broken further so they're + -- added to output unconditionally + Lit l -> l <> go (cur <> delta p) rest + OrElse _ p -> go cur (Right p : rest) + Wrap ps -> go cur ((Right <$> toList ps) <> rest) + + flow p = case out p of + Append ps -> foldMap flow ps + Empty -> mempty + Group p -> flow p + Lit s -> s + OrElse p _ -> flow p + Wrap ps -> foldMap flow ps + + fits p cur = + maxCol (surgery cur <> delta p) < availableWidth + where + -- Surgically modify 'cur' to pretend it has not exceeded availableWidth. + -- This is necessary because sometimes things cannot be split and *must* + -- exceed availableWidth; in this case, we do not want to entirely "blame" + -- the new proposed (cur <> delta p) for this overflow. + -- + -- For example, when appending + -- + -- availableWidth + -- | + -- xxx | + -- yyyyyy + -- zz | + -- + -- with + -- + -- aa | + -- bb | + -- + -- we want to end up with + -- + -- xxx | + -- yyyyyy + -- zzaa| + -- bb | + -- + surgery = \case + SingleLine c -> SingleLine (min c (availableWidth-1)) + MultiLine fc lc mc -> MultiLine fc lc (min mc (availableWidth-1)) + +newline :: IsString s => Pretty s +newline = "\n" + +lineSkip :: IsString s => Pretty s +lineSkip = newline <> newline + +spaceIfBreak :: IsString s => Pretty s +spaceIfBreak = "" `orElse` " " + +spacesIfBreak :: IsString s => Int -> Pretty s +spacesIfBreak n = "" `orElse` fromString (replicate n ' ') + +softbreak :: IsString s => Pretty s +softbreak = " " `orElse` newline + +spaced :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s +spaced = intercalateMap softbreak id + +spacedMap :: (Foldable f, IsString s) => (a -> Pretty s) -> f a -> Pretty s +spacedMap f as = spaced . fmap f $ toList as + +commas :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s +commas = intercalateMap ("," <> softbreak) id + +oxfordCommas :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s +oxfordCommas = oxfordCommasWith "" + +-- Like `oxfordCommas`, but attaches `end` at the end (without a space). +-- For example, `oxfordCommasWith "."` will attach a period. +oxfordCommasWith + :: (Foldable f, IsString s) => Pretty s -> f (Pretty s) -> Pretty s +oxfordCommasWith end xs = case toList xs of + [] -> "" + [x] -> group (x <> end) + [x, y] -> x <> " and " <> group (y <> end) + xs -> + intercalateMap ("," <> softbreak) id (init xs) + <> "," + <> softbreak + <> "and" + <> softbreak + <> group (last xs <> end) + +parenthesizeCommas :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s +parenthesizeCommas = surroundCommas "(" ")" + +surroundCommas + :: (Foldable f, IsString s) + => Pretty s + -> Pretty s + -> f (Pretty s) + -> Pretty s +surroundCommas start stop fs = + group + $ start + <> spaceIfBreak + <> intercalateMap ("," <> softbreak <> align) id fs + <> stop + where align = spacesIfBreak (preferredWidth start + 1) + +sepSpaced :: (Foldable f, IsString s) => Pretty s -> f (Pretty s) -> Pretty s +sepSpaced between = sep (between <> softbreak) + +sep :: (Foldable f, IsString s) => Pretty s -> f (Pretty s) -> Pretty s +sep between = intercalateMap between id + +sepNonEmpty :: (Foldable f, IsString s) => Pretty s -> f (Pretty s) -> Pretty s +sepNonEmpty between ps = sep between (nonEmpty ps) + +-- if list is too long, adds `... 22 more` to the end +excerptSep :: IsString s => Maybe Int -> Pretty s -> [Pretty s] -> Pretty s +excerptSep maxCount = + excerptSep' maxCount (\i -> group ("... " <> shown i <> " more")) + +excerptSep' + :: IsString s + => Maybe Int + -> (Int -> Pretty s) + -> Pretty s + -> [Pretty s] + -> Pretty s +excerptSep' maxCount summarize s ps = case maxCount of + Just max | length ps > max -> + sep s (take max ps) <> summarize (length ps - max) + _ -> sep s ps + +nonEmpty :: (Foldable f, IsString s) => f (Pretty s) -> [Pretty s] +nonEmpty (toList -> l) = case l of + (out -> Empty) : t -> nonEmpty t + h : t -> h : nonEmpty t + [] -> [] + +parenthesize :: IsString s => Pretty s -> Pretty s +parenthesize p = group $ "(" <> p <> ")" + +parenthesizeIf :: IsString s => Bool -> Pretty s -> Pretty s +parenthesizeIf False s = s +parenthesizeIf True s = parenthesize s + +lines :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s +lines = intercalateMap (append newline) id where + append p = Pretty (delta p) (Append $ Seq.singleton p) + +linesNonEmpty :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s +linesNonEmpty = lines . nonEmpty + +linesSpaced :: (Foldable f, IsString s) => f (Pretty s) -> Pretty s +linesSpaced ps = lines (intersperse "" $ toList ps) + +prefixed :: (Foldable f, LL.ListLike s Char, IsString s) + => Pretty s -> Pretty s -> f (Pretty s) -> Pretty s +prefixed first rest = + intercalateMap newline (\b -> first <> indentAfterNewline rest b) + +bulleted + :: (Foldable f, LL.ListLike s Char, IsString s) => f (Pretty s) -> Pretty s +bulleted = prefixed "* " " " + +dashed + :: (Foldable f, LL.ListLike s Char, IsString s) => f (Pretty s) -> Pretty s +dashed = prefixed "- " " " + +commented + :: (Foldable f, LL.ListLike s Char, IsString s) => f (Pretty s) -> Pretty s +commented = prefixed "-- " "-- " + +numbered + :: (Foldable f, LL.ListLike s Char, IsString s) + => (Int -> Pretty s) + -> f (Pretty s) + -> Pretty s +numbered num ps = column2 (fmap num [1 ..] `zip` toList ps) + +numberedHeader + :: (Foldable f, LL.ListLike s Char, IsString s) + => (Maybe Int -> Pretty s) + -> f (Pretty s) + -> Pretty s +numberedHeader num ps = column2 (fmap num (Nothing : fmap Just [1 ..]) `zip` toList ps) + +-- Like `column2` but with the lines numbered. For instance: +-- +-- 1. one thing : this is a thing +-- 2. another thing : this is another thing +-- 3. and another : yet one more thing +numberedColumn2 + :: (Foldable f, LL.ListLike s Char, IsString s) + => (Int -> Pretty s) + -> f (Pretty s, Pretty s) + -> Pretty s +numberedColumn2 num ps = numbered num (align $ toList ps) + +numberedColumn2Header + :: (Foldable f, LL.ListLike s Char, IsString s) + => (Int -> Pretty s) + -> f (Pretty s, Pretty s) + -> Pretty s +numberedColumn2Header num ps = numberedHeader (maybe mempty num) (align $ toList ps) + +-- Opinionated `numbered` that uses bold numbers in front +numberedList :: Foldable f => f (Pretty ColorText) -> Pretty ColorText +numberedList = numbered (\i -> hiBlack . fromString $ show i <> ".") + +leftPad, rightPad :: IsString s => Int -> Pretty s -> Pretty s +leftPad n p = + let rem = n - preferredWidth p + in if rem > 0 then fromString (replicate rem ' ') <> p else p +rightPad n p = + let rem = n - preferredWidth p + in if rem > 0 then p <> fromString (replicate rem ' ') else p + +excerptColumn2Headed + :: (LL.ListLike s Char, IsString s) + => Maybe Int + -> (Pretty s, Pretty s) + -> [(Pretty s, Pretty s)] + -> Pretty s +excerptColumn2Headed max hd cols = case max of + Just max | len > max -> + lines [column2 (hd : take max cols), "... " <> shown (len - max) <> " more"] + _ -> column2 (hd : cols) + where len = length cols + +excerptColumn2 + :: (LL.ListLike s Char, IsString s) + => Maybe Int + -> [(Pretty s, Pretty s)] + -> Pretty s +excerptColumn2 max cols = case max of + Just max | len > max -> lines [column2 cols, "... " <> shown (len - max)] + _ -> column2 cols + where len = length cols + +column2 + :: (LL.ListLike s Char, IsString s) => [(Pretty s, Pretty s)] -> Pretty s +column2 = column2sep "" + +column2Header + :: Pretty ColorText -> Pretty ColorText -> [(Pretty ColorText, Pretty ColorText)] -> Pretty ColorText +column2Header left right = column2sep " " . ((fmap CT.hiBlack left, fmap CT.hiBlack right):) + +column2sep + :: (LL.ListLike s Char, IsString s) => Pretty s -> [(Pretty s, Pretty s)] -> Pretty s +column2sep sep rows = lines . (group <$>) . align $ [(a, sep <> b) | (a, b) <- rows] + +column2M + :: (Applicative m, LL.ListLike s Char, IsString s) + => [m (Pretty s, Pretty s)] + -> m (Pretty s) +column2M = fmap column2 . sequenceA + +mayColumn2 + :: (LL.ListLike s Char, IsString s) + => [(Pretty s, Maybe (Pretty s))] + -> Pretty s +mayColumn2 = lines . (group <$>) . ((uncurry (<>)) <$>) . align' + +column3 + :: (LL.ListLike s Char, IsString s) + => [(Pretty s, Pretty s, Pretty s)] + -> Pretty s +column3 = column3sep "" + +column3M + :: (LL.ListLike s Char, IsString s, Monad m) + => [m (Pretty s, Pretty s, Pretty s)] + -> m (Pretty s) +column3M = fmap column3 . sequence + +column3UnzippedM + :: forall m s . (LL.ListLike s Char, IsString s, Monad m) + => Pretty s + -> [m (Pretty s)] + -> [m (Pretty s)] + -> [m (Pretty s)] + -> m (Pretty s) +column3UnzippedM bottomPadding left mid right = let + rowCount = maximum (fmap length [left, mid, right]) + pad :: [m (Pretty s)] -> [m (Pretty s)] + pad a = a ++ replicate (rowCount - length a) (pure bottomPadding) + (pleft, pmid, pright) = (pad left, pad mid, pad right) + in column3M $ zipWith3 (liftA3 (,,)) pleft pmid pright + +column2UnzippedM + :: forall m s . (LL.ListLike s Char, IsString s, Monad m) + => Pretty s + -> [m (Pretty s)] + -> [m (Pretty s)] + -> m (Pretty s) +column2UnzippedM bottomPadding left right = let + rowCount = length left `max` length right + pad :: [m (Pretty s)] -> [m (Pretty s)] + pad a = a ++ replicate (rowCount - length a) (pure bottomPadding) + sep :: [m (Pretty s)] -> [m (Pretty s)] + sep = fmap (fmap (" " <>)) + (pleft, pright) = (pad left, sep $ pad right) + in column2M $ zipWith (liftA2 (,)) pleft pright + +column3sep + :: (LL.ListLike s Char, IsString s) => Pretty s -> [(Pretty s, Pretty s, Pretty s)] -> Pretty s +column3sep sep rows = let + bc = align [(b,sep <> c) | (_,b,c) <- rows ] + abc = group <$> align [(a,sep <> bc) | ((a,_,_),bc) <- rows `zip` bc ] + in lines abc + +wrapColumn2 :: + (LL.ListLike s Char, IsString s) => [(Pretty s, Pretty s)] -> Pretty s +wrapColumn2 rows = lines (align rows) where + align rows = let lwidth = foldl' max 0 (preferredWidth . fst <$> rows) + 2 + in [ group (rightPad lwidth l <> indentNAfterNewline lwidth (wrap r)) + | (l, r) <- rows] + +align + :: (LL.ListLike s Char, IsString s) => [(Pretty s, Pretty s)] -> [Pretty s] +align rows = (((uncurry (<>)) <$>) . align') (second Just <$> rows) + +-- [("foo", Just "bar") +-- ,("barabaz", Nothing) +-- ,("qux","quux")] +-- +-- results in: +-- +-- [("foo ", "bar"), +-- [("barabaz", ""), +-- [("qux ", "quuxbill")] +-- +-- The first component has padding added, sufficient to align the second +-- component. The second component has whitespace added after its +-- newlines, again sufficient to line it up in a second column. +align' + :: (LL.ListLike s Char, IsString s) + => [(Pretty s, Maybe (Pretty s))] + -> [(Pretty s, Pretty s)] +align' rows = alignedRows + where + col0Width = foldl' max 0 [ preferredWidth col1 | (col1, Just _) <- rows ] + 1 + alignedRows = + [ case col1 of + Just s -> + (rightPad col0Width col0, indentNAfterNewline col0Width s) + Nothing -> (col0, mempty) + | (col0, col1) <- rows + ] + +text :: IsString s => Text -> Pretty s +text t = fromString (Text.unpack t) + +num :: (Show n, Num n, IsString s) => n -> Pretty s +num n = fromString (show n) + +string :: IsString s => String -> Pretty s +string = fromString + +shown :: (Show a, IsString s) => a -> Pretty s +shown = fromString . show + +hang' + :: (LL.ListLike s Char, IsString s) + => Pretty s + -> Pretty s + -> Pretty s + -> Pretty s +hang' from by p = group $ if isMultiLine p + then from <> "\n" <> group (indent by p) + else (from <> " " <> group p) `orElse` (from <> "\n" <> group (indent by p)) + +hangUngrouped' + :: (LL.ListLike s Char, IsString s) + => Pretty s + -> Pretty s + -> Pretty s + -> Pretty s +hangUngrouped' from by p = if isMultiLine p + then from <> "\n" <> indent by p + else (from <> " " <> p) `orElse` (from <> "\n" <> indent by p) + +hangUngrouped + :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -> Pretty s +hangUngrouped from = hangUngrouped' from " " + +hang :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -> Pretty s +hang from = hang' from " " + +nest :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -> Pretty s +nest = hang' "" + +indent :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -> Pretty s +indent by p = by <> indentAfterNewline by p + +indentN :: (LL.ListLike s Char, IsString s) => Width -> Pretty s -> Pretty s +indentN by = indent (fromString $ replicate by ' ') + +indentNonEmptyN :: (LL.ListLike s Char, IsString s) => Width -> Pretty s -> Pretty s +indentNonEmptyN _ (out -> Empty) = mempty +indentNonEmptyN by p = indentN by p + +indentNAfterNewline + :: (LL.ListLike s Char, IsString s) => Width -> Pretty s -> Pretty s +indentNAfterNewline by = indentAfterNewline (fromString $ replicate by ' ') + +indentAfterNewline + :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -> Pretty s +indentAfterNewline by = flatMap f + where + f s0 = case LL.break (== '\n') s0 of + (hd, s) -> if LL.null s + then lit s0 + -- use `take` and `drop` to preserve annotations or + -- or other extra info attached to the original `s` + else lit (LL.take (LL.length hd) s0) <> "\n" <> by <> f (LL.drop 1 s) + +instance IsString s => IsString (Pretty s) where + fromString s = lit' (foldMap chDelta s) (fromString s) + +instance Semigroup (Pretty s) where (<>) = mappend +instance Monoid (Pretty s) where + mempty = Pretty mempty Empty + mappend p1 p2 = Pretty (delta p1 <> delta p2) . + Append $ case (out p1, out p2) of + (Append ps1, Append ps2) -> ps1 <> ps2 + (Append ps1, _) -> ps1 <> pure p2 + (_, Append ps2) -> pure p1 <> ps2 + (_,_) -> pure p1 <> pure p2 + +data Delta = + -- | The number of columns. + SingleLine !Width + -- | The number of columns in the first, last, and longest lines. + | MultiLine !Width !Width !Width + deriving stock (Eq, Ord, Show) + +instance Semigroup Delta where + SingleLine c <> SingleLine c2 = SingleLine (c + c2) + SingleLine c <> MultiLine fc lc mc = + let fc' = c + fc + in MultiLine fc' lc (max fc' mc) + MultiLine fc lc mc <> SingleLine c = + let lc' = lc + c + in MultiLine fc lc' (max lc' mc) + MultiLine fc lc mc <> MultiLine fc2 lc2 mc2 = + MultiLine fc lc2 (max mc (max mc2 (lc + fc2))) + +instance Monoid Delta where + mempty = SingleLine 0 + mappend = (<>) + +maxCol :: Delta -> Width +maxCol = \case + SingleLine c -> c + MultiLine _ _ c -> c + +lastCol :: Delta -> Width +lastCol = \case + SingleLine c -> c + MultiLine _ c _ -> c + +chDelta :: Char -> Delta +chDelta '\n' = MultiLine 0 0 0 +chDelta _ = SingleLine 1 + +preferredWidth :: Pretty s -> Width +preferredWidth p = lastCol (delta p) + +isMultiLine :: Pretty s -> Bool +isMultiLine p = + case delta p of + SingleLine{} -> False + MultiLine{} -> True + +black, red, green, yellow, blue, purple, cyan, white, hiBlack, hiRed, hiGreen, hiYellow, hiBlue, hiPurple, hiCyan, hiWhite, bold, underline + :: Pretty CT.ColorText -> Pretty CT.ColorText +black = map CT.black +red = map CT.red +green = map CT.green +yellow = map CT.yellow +blue = map CT.blue +purple = map CT.purple +cyan = map CT.cyan +white = map CT.white +hiBlack = map CT.hiBlack +hiRed = map CT.hiRed +hiGreen = map CT.hiGreen +hiYellow = map CT.hiYellow +hiBlue = map CT.hiBlue +hiPurple = map CT.hiPurple +hiCyan = map CT.hiCyan +hiWhite = map CT.hiWhite +bold = map CT.bold +underline = map CT.underline + +plural :: Foldable f + => f a -> Pretty ColorText -> Pretty ColorText +plural f p = case length f of + 0 -> mempty + 1 -> p + -- todo: consider use of plural package + _ -> p <> case reverse (toPlainUnbroken p) of + 's' : _ -> "es" + _ -> "s" + +border :: (LL.ListLike s Char, IsString s) => Int -> Pretty s -> Pretty s +border n p = "\n" <> indentN n p <> "\n" + +callout :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s -> Pretty s +callout header p = header <> "\n\n" <> p + +bracket :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s +bracket = indent " " + +boxForkLeft, boxLeft, boxRight :: + forall s . (LL.ListLike s Char, IsString s) => [Pretty s] -> [Pretty s] +boxForkLeft = boxLeft' lBoxStyle1 +boxLeft = boxLeft' lBoxStyle2 +boxRight = boxRight' rBoxStyle2 + +boxLeft', boxRight' :: (LL.ListLike s Char, IsString s) + => BoxStyle s -> [Pretty s] -> [Pretty s] +boxLeft' style = fmap runIdentity . boxLeftM' style . fmap Identity +boxRight' style = fmap runIdentity . boxRightM' style . fmap Identity + +type BoxStyle s = + ( (Pretty s, Pretty s) -- first (start, continue) + , (Pretty s, Pretty s) -- middle + , (Pretty s, Pretty s) -- last + , (Pretty s, Pretty s) -- singleton + ) +lBoxStyle1, lBoxStyle2, rBoxStyle2 :: IsString s => BoxStyle s +lBoxStyle1 = (("┌ ", "│ ") -- first + ,("├ ", "│ ") -- middle + ,("└ ", " ") -- last + ,("" , "" )) -- singleton +lBoxStyle2 = (("┌ "," ") + ,("│ "," ") + ,("└ "," ") + ,("" ,"" )) +rBoxStyle2 = ((" ┐", " │") + ,(" │", " │") + ,(" ┘", " ") + ,(" ", " ")) + +boxLeftM, boxRightM :: forall m s . (Monad m, LL.ListLike s Char, IsString s) + => [m (Pretty s)] -> [m (Pretty s)] +boxLeftM = boxLeftM' lBoxStyle2 +boxRightM = boxRightM' rBoxStyle2 + +boxLeftM' :: forall m s . (Monad m, LL.ListLike s Char, IsString s) + => BoxStyle s -> [m (Pretty s)] -> [m (Pretty s)] +boxLeftM' (first, middle, last, singleton) ps = go (Seq.fromList ps) where + go Seq.Empty = [] + go (p Seq.:<| Seq.Empty) = [decorate singleton <$> p] + go (a Seq.:<| (mid Seq.:|> b)) = + [decorate first <$> a] + ++ toList (fmap (decorate middle) <$> mid) + ++ [decorate last <$> b] + decorate (first, mid) p = first <> indentAfterNewline mid p + +-- this implementation doesn't work for multi-line inputs, +-- because i dunno how to inspect multi-line inputs + + +boxRightM' :: forall m s. (Monad m, LL.ListLike s Char, IsString s) + => BoxStyle s -> [m (Pretty s)] -> [m (Pretty s)] +boxRightM' (first, middle, last, singleton) ps = go (Seq.fromList ps) where + go :: Seq.Seq (m (Pretty s)) -> [m (Pretty s)] + go Seq.Empty = [] + go (p Seq.:<| Seq.Empty) = [decorate singleton <$> p] + go (a Seq.:<| (mid Seq.:|> b)) = + [decorate first <$> a] + ++ toList (fmap (decorate middle) <$> mid) + ++ [decorate last <$> b] + decorate (first, _mid) p = p <> first + +warnCallout, blockedCallout, fatalCallout, okCallout + :: (LL.ListLike s Char, IsString s) => Pretty s -> Pretty s +warnCallout = callout "⚠️" +fatalCallout = callout "❗️" +okCallout = callout "✅" +blockedCallout = callout "🚫" + +backticked :: IsString s => Pretty s -> Pretty s +backticked p = group ("`" <> p <> "`") + +-- |Attach some punctuation after the closing backtick. +backticked' :: IsString s => Pretty s -> Pretty s -> Pretty s +backticked' p end = group ("`" <> p <> "`" <> end) + +instance Show s => Show (Pretty s) where + show p = render 80 (metaPretty p) + +metaPretty :: Show s => Pretty s -> Pretty String +metaPretty = go (0::Int) where + go prec p = case out p of + Lit s -> parenthesizeIf (prec > 0) $ "Lit" `hang` lit (show s) + Empty -> "Empty" + Group g -> parenthesizeIf (prec > 0) $ "Group" `hang` go 1 g + Wrap s -> parenthesizeIf (prec > 0) $ "Wrap" `hang` + surroundCommas "[" "]" (go 1 <$> s) + OrElse a b -> parenthesizeIf (prec > 0) $ + "OrElse" `hang` spaced [go 1 a, go 1 b] + Append s -> surroundCommas "[" "]" (go 1 <$> s) + +map :: LL.ListLike s2 Char => (s -> s2) -> Pretty s -> Pretty s2 +map f p = case out p of + Append ps -> foldMap (map f) ps + Empty -> mempty + Group p -> group (map f p) + Lit s -> lit' (foldMap chDelta $ LL.toList s2) s2 where s2 = f s + OrElse p1 p2 -> orElse (map f p1) (map f p2) + Wrap p -> wrap_ (map f <$> p) + +flatMap :: (s -> Pretty s2) -> Pretty s -> Pretty s2 +flatMap f p = case out p of + Append ps -> foldMap (flatMap f) ps + Empty -> mempty + Group p -> group (flatMap f p) + Lit s -> f s + OrElse p1 p2 -> orElse (flatMap f p1) (flatMap f p2) + Wrap p -> wrap_ (flatMap f <$> p) diff --git a/parser-typechecker/src/Unison/Util/Range.hs b/parser-typechecker/src/Unison/Util/Range.hs new file mode 100644 index 0000000000..e2377bc027 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Range.hs @@ -0,0 +1,27 @@ +module Unison.Util.Range where + +import Unison.Lexer (Pos(..)) + +-- | True if `_x` contains `_y` +contains :: Range -> Range -> Bool +_x@(Range a b) `contains` _y@(Range c d) = a <= c && d <= b + +overlaps :: Range -> Range -> Bool +overlaps (Range a b) (Range c d) = a < d && c < b + +inRange :: Pos -> Range -> Bool +inRange p (Range a b) = p >= a && p < b + +isMultiLine :: Range -> Bool +isMultiLine (Range (Pos startLine _) (Pos endLine _)) = startLine < endLine + +data Range = Range { start :: Pos, end :: Pos } deriving (Eq, Ord, Show) + +startingLine :: Range -> Range +startingLine r@(Range start@(Pos startLine _) (Pos stopLine _)) = + if stopLine == startLine then r + else Range start (Pos (startLine+1) 0) + +instance Semigroup Range where + (Range start end) <> (Range start2 end2) = + Range (min start start2) (max end end2) diff --git a/parser-typechecker/src/Unison/Util/Star3.hs b/parser-typechecker/src/Unison/Util/Star3.hs new file mode 100644 index 0000000000..491b4bfb59 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Star3.hs @@ -0,0 +1,214 @@ +{-# LANGUAGE RecordWildCards #-} + +module Unison.Util.Star3 where + +import Unison.Prelude + +import Unison.Util.Relation (Relation) +import qualified Data.Set as Set +import qualified Unison.Hashable as H +import qualified Unison.Util.Relation as R + +-- Represents a set of (fact, d1, d2, d3), but indexed using a star schema so +-- it can be efficiently queried from any of the dimensions. +data Star3 fact d1 d2 d3 + = Star3 { fact :: Set fact + , d1 :: Relation fact d1 + , d2 :: Relation fact d2 + , d3 :: Relation fact d3 } deriving (Eq,Ord,Show) + +-- Produce the cross-product across all the dimensions +toList :: (Ord fact, Ord d1, Ord d2, Ord d3) + => Star3 fact d1 d2 d3 + -> [(fact, d1, d2, d3)] +toList s = [ (f, x, y, z) | f <- Set.toList (fact s) + , x <- Set.toList (R.lookupDom f (d1 s)) + , y <- Set.toList (R.lookupDom f (d2 s)) + , z <- Set.toList (R.lookupDom f (d3 s)) ] + +-- `difference a b` contains only the facts from `a` that are absent from `b` +-- or differ along any of the dimensions `d1..d3`. +difference + :: (Ord fact, Ord d1, Ord d2, Ord d3) + => Star3 fact d1 d2 d3 + -> Star3 fact d1 d2 d3 + -> Star3 fact d1 d2 d3 +difference a b = Star3 facts d1s d2s d3s + where + d1s = R.difference (d1 a) (d1 b) + d2s = R.difference (d2 a) (d2 b) + d3s = R.difference (d3 a) (d3 b) + facts = R.dom d1s <> R.dom d2s <> R.dom d3s + +d23s :: (Ord fact, Ord d2, Ord d3) + => Star3 fact d1 d2 d3 + -> [(fact, d2, d3)] +d23s s = [ (f, x, y) | f <- Set.toList (fact s) + , x <- Set.toList (R.lookupDom f (d2 s)) + , y <- Set.toList (R.lookupDom f (d3 s)) ] + +d23s' :: (Ord fact, Ord d2, Ord d3) + => Star3 fact d1 d2 d3 + -> [(d2, d3)] +d23s' s = [ (x, y) | f <- Set.toList (fact s) + , x <- Set.toList (R.lookupDom f (d2 s)) + , y <- Set.toList (R.lookupDom f (d3 s)) ] + +d12s :: (Ord fact, Ord d1, Ord d2) + => Star3 fact d1 d2 d3 + -> [(fact, d1, d2)] +d12s s = [ (f, x, y) | f <- Set.toList (fact s) + , x <- Set.toList (R.lookupDom f (d1 s)) + , y <- Set.toList (R.lookupDom f (d2 s)) ] + +d13s :: (Ord fact, Ord d1, Ord d3) + => Star3 fact d1 d2 d3 + -> [(fact, d1, d3)] +d13s s = [ (f, x, y) | f <- Set.toList (fact s) + , x <- Set.toList (R.lookupDom f (d1 s)) + , y <- Set.toList (R.lookupDom f (d3 s)) ] + +mapD1 :: (Ord fact, Ord d1, Ord d1a) => (d1 -> d1a) -> Star3 fact d1 d2 d3 -> Star3 fact d1a d2 d3 +mapD1 f s = s { d1 = R.mapRan f (d1 s) } + +mapD2 :: (Ord fact, Ord d2, Ord d2a) => (d2 -> d2a) -> Star3 fact d1 d2 d3 -> Star3 fact d1 d2a d3 +mapD2 f s = s { d2 = R.mapRan f (d2 s) } + +mapD3 :: (Ord fact, Ord d3, Ord d3a) => (d3 -> d3a) -> Star3 fact d1 d2 d3 -> Star3 fact d1 d2 d3a +mapD3 f s = s { d3 = R.mapRan f (d3 s) } + +fromList :: (Ord fact, Ord d1, Ord d2, Ord d3) + => [(fact, d1, d2, d3)] -> Star3 fact d1 d2 d3 +fromList = foldl' (flip insert) mempty + +selectFact + :: (Ord fact, Ord d1, Ord d2, Ord d3) + => Set fact + -> Star3 fact d1 d2 d3 + -> Star3 fact d1 d2 d3 +selectFact fs s = Star3 fact' d1' d2' d3' where + fact' = Set.intersection fs (fact s) + d1' = fs R.<| d1 s + d2' = fs R.<| d2 s + d3' = fs R.<| d3 s + +select1D3 + :: (Ord fact, Ord d1, Ord d2, Ord d3) + => d3 -> Star3 fact d1 d2 d3 -> Star3 fact d1 d2 d3 +select1D3 = selectD3 . Set.singleton + +selectD3 + :: (Ord fact, Ord d1, Ord d2, Ord d3) + => Set d3 + -> Star3 fact d1 d2 d3 + -> Star3 fact d1 d2 d3 +selectD3 d3s s = Star3 fact' d1' d2' d3' where + fact' = Set.intersection (R.dom d3') (fact s) + d1' = R.dom d3' R.<| d1 s + d2' = R.dom d3' R.<| d2 s + d3' = d3 s R.|> d3s + +-- Deletes tuples of the form (fact, d1, _, _). +-- If no other (fact, dk, _, _) tuples exist for any other dk, then +-- `fact` is removed from the `fact` set and from the other dimensions as well, +-- that is, (fact, d1) is treated as a primary key. +deletePrimaryD1 :: (Ord fact, Ord d1, Ord d2, Ord d3) + => (fact, d1) -> Star3 fact d1 d2 d3 -> Star3 fact d1 d2 d3 +deletePrimaryD1 (f, x) s = let + d1' = R.delete f x (d1 s) + otherX = R.lookupDom f d1' + in if Set.null otherX then + Star3 (Set.delete f (fact s)) d1' (R.deleteDom f (d2 s)) (R.deleteDom f (d3 s)) + else s { d1 = d1' } + +lookupD1 :: (Ord fact, Ord d1) => d1 -> Star3 fact d1 d2 d3 -> Set fact +lookupD1 x s = R.lookupRan x (d1 s) + +insertD1 + :: (Ord fact, Ord d1) + => (fact, d1) + -> Star3 fact d1 d2 d3 + -> Star3 fact d1 d2 d3 +insertD1 (f,x) s = s { fact = Set.insert f (fact s) + , d1 = R.insert f x (d1 s) } + +memberD1 :: (Ord fact, Ord d1) => (fact,d1) -> Star3 fact d1 d2 d3 -> Bool +memberD1 (f, x) s = R.member f x (d1 s) + +memberD2 :: (Ord fact, Ord d2) => (fact,d2) -> Star3 fact d1 d2 d3 -> Bool +memberD2 (f, x) s = R.member f x (d2 s) + +memberD3 :: (Ord fact, Ord d3) => (fact,d3) -> Star3 fact d1 d2 d3 -> Bool +memberD3 (f, x) s = R.member f x (d3 s) + +insert :: (Ord fact, Ord d1, Ord d2, Ord d3) + => (fact, d1, d2, d3) + -> Star3 fact d1 d2 d3 + -> Star3 fact d1 d2 d3 +insert (f, d1i, d2i, d3i) s = Star3 fact' d1' d2' d3' where + fact' = Set.insert f (fact s) + d1' = R.insert f d1i (d1 s) + d2' = R.insert f d2i (d2 s) + d3' = R.insert f d3i (d3 s) + +insertD23 :: (Ord fact, Ord d1, Ord d2, Ord d3) + => (fact, d2, d3) + -> Star3 fact d1 d2 d3 + -> Star3 fact d1 d2 d3 +insertD23 (f, x, y) s = Star3 fact' (d1 s) d2' d3' where + fact' = Set.insert f (fact s) + d2' = R.insert f x (d2 s) + d3' = R.insert f y (d3 s) + +deleteD3 :: (Ord fact, Ord d1, Ord d2, Ord d3) + => (fact, d3) + -> Star3 fact d1 d2 d3 + -> Star3 fact d1 d2 d3 +deleteD3 (f, x) s = Star3 (fact s) (d1 s) (d2 s) d3' where + d3' = R.delete f x (d3 s) + +deleteD2 :: (Ord fact, Ord d1, Ord d2, Ord d3) + => (fact, d2) + -> Star3 fact d1 d2 d3 + -> Star3 fact d1 d2 d3 +deleteD2 (f, x) s = Star3 (fact s) (d1 s) d2' (d3 s) where + d2' = R.delete f x (d2 s) + +deleteFact :: (Ord fact, Ord d1, Ord d2, Ord d3) + => Set fact -> Star3 fact d1 d2 d3 -> Star3 fact d1 d2 d3 +deleteFact facts Star3{..} = + Star3 (fact `Set.difference` facts) + (facts R.<|| d1) + (facts R.<|| d2) + (facts R.<|| d3) + +replaceFact :: (Ord fact, Ord d1, Ord d2, Ord d3) + => fact -> fact -> Star3 fact d1 d2 d3 -> Star3 fact d1 d2 d3 +replaceFact f f' Star3{..} = + let updateFact fact = + if Set.member f fact + then (Set.insert f' . Set.delete f) fact + else fact + in Star3 (updateFact fact) + (R.replaceDom f f' d1) + (R.replaceDom f f' d2) + (R.replaceDom f f' d3) + +instance (Ord fact, Ord d1, Ord d2, Ord d3) => Semigroup (Star3 fact d1 d2 d3) where + (<>) = mappend + +instance (Ord fact, Ord d1, Ord d2, Ord d3) => Monoid (Star3 fact d1 d2 d3) where + mempty = Star3 mempty mempty mempty mempty + s1 `mappend` s2 = Star3 fact' d1' d2' d3' where + fact' = fact s1 <> fact s2 + d1' = d1 s1 <> d1 s2 + d2' = d2 s1 <> d2 s2 + d3' = d3 s1 <> d3 s2 + +instance (H.Hashable fact, H.Hashable d1, H.Hashable d2, H.Hashable d3) + => H.Hashable (Star3 fact d1 d2 d3) where + tokens s = + [ H.accumulateToken (fact s) + , H.accumulateToken (d1 s) + , H.accumulateToken (d2 s) + , H.accumulateToken (d3 s) ] diff --git a/parser-typechecker/src/Unison/Util/SyntaxText.hs b/parser-typechecker/src/Unison/Util/SyntaxText.hs new file mode 100644 index 0000000000..e2fcfb6c36 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/SyntaxText.hs @@ -0,0 +1,62 @@ +module Unison.Util.SyntaxText where + +import Unison.Prelude +import Unison.Reference (Reference) +import Unison.Referent (Referent) +import Unison.HashQualified (HashQualified) +import Unison.Pattern (SeqOp) + +import Unison.Util.AnnotatedText ( AnnotatedText(..), annotate ) + +type SyntaxText = AnnotatedText Element + +-- The elements of the Unison grammar, for syntax highlighting purposes +data Element = NumericLiteral + | TextLiteral + | CharLiteral + | BooleanLiteral + | Blank + | Var + | Reference Reference + | Referent Referent + | Op SeqOp + | Constructor + | Request + | AbilityBraces + -- let|handle|in|where|match|with|cases|->|if|then|else|and|or + | ControlKeyword + -- forall|-> + | TypeOperator + | BindingEquals + | TypeAscriptionColon + -- type|ability + | DataTypeKeyword + | DataTypeParams + | Unit + -- unique + | DataTypeModifier + -- `use Foo bar` is keyword, prefix, suffix + | UseKeyword + | UsePrefix + | UseSuffix + | HashQualifier HashQualified + | DelayForceChar + -- ? , ` [ ] @ | + -- Currently not all commas in the pretty-print output are marked up as DelimiterChar - we miss + -- out characters emitted by Pretty.hs helpers like Pretty.commas. + | DelimiterChar + -- ! ' + | Parenthesis + | LinkKeyword -- `typeLink` and `termLink` + -- [: :] @[] + | DocDelimiter + -- the 'include' in @[include], etc + | DocKeyword + deriving (Eq, Ord, Show) + +syntax :: Element -> SyntaxText -> SyntaxText +syntax = annotate + +-- Convert a `SyntaxText` to a `String`, ignoring syntax markup +toPlain :: SyntaxText -> String +toPlain (AnnotatedText at) = join (toList $ fst <$> at) diff --git a/parser-typechecker/src/Unison/Util/TQueue.hs b/parser-typechecker/src/Unison/Util/TQueue.hs new file mode 100644 index 0000000000..e088b13e85 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/TQueue.hs @@ -0,0 +1,89 @@ +module Unison.Util.TQueue where + +import Unison.Prelude + +import UnliftIO (MonadUnliftIO) +import UnliftIO.STM hiding (TQueue) +import qualified UnliftIO.Async as Async + +import qualified Data.Sequence as S +import Data.Sequence (Seq((:<|)), (|>)) + +data TQueue a = TQueue (TVar (Seq a)) (TVar Word64) + +newIO :: MonadIO m => m (TQueue a) +newIO = TQueue <$> newTVarIO mempty <*> newTVarIO 0 + +size :: TQueue a -> STM Int +size (TQueue q _) = S.length <$> readTVar q + +-- Waits for this queue to reach a size <= target. +-- Consumes no elements; it's expected there is some +-- other thread which is consuming elements from the queue. +awaitSize :: Int -> TQueue a -> STM () +awaitSize target q = size q >>= \n -> + if n <= target then pure () + else retrySTM + +peek :: TQueue a -> STM a +peek (TQueue v _) = readTVar v >>= \case + a :<| _ -> pure a + _ -> retrySTM + +dequeue :: TQueue a -> STM a +dequeue (TQueue v _) = readTVar v >>= \case + a :<| as -> writeTVar v as *> pure a + _ -> retrySTM + +undequeue :: TQueue a -> a -> STM () +undequeue (TQueue v _) a = readTVar v >>= \ + as -> writeTVar v (a :<| as) + +tryDequeue :: TQueue a -> STM (Maybe a) +tryDequeue (TQueue v _) = readTVar v >>= \case + a :<| as -> writeTVar v as *> pure (Just a) + _ -> pure Nothing + +dequeueN :: TQueue a -> Int -> STM [a] +dequeueN (TQueue v _) n = readTVar v >>= \s -> + if length s >= n then writeTVar v (S.drop n s) $> toList (S.take n s) + else retrySTM + +-- return the number of enqueues over the life of the queue +enqueueCount :: TQueue a -> STM Word64 +enqueueCount (TQueue _ count) = readTVar count + +flush :: TQueue a -> STM [a] +flush (TQueue v _) = do + s <- readTVar v + writeTVar v mempty + pure . toList $ s + +enqueue :: TQueue a -> a -> STM () +enqueue (TQueue v count) a = do + modifyTVar' v (|> a) + modifyTVar' count (+1) + +raceIO :: MonadUnliftIO m => STM a -> STM b -> m (Either a b) +raceIO a b = do + aa <- Async.async $ atomically a + ab <- Async.async $ atomically b + Async.waitEitherCancel aa ab + +-- take all elements up to but not including the first not satisfying cond +tryPeekWhile :: (a -> Bool) -> TQueue a -> STM [a] +tryPeekWhile cond (TQueue v _) = toList . S.takeWhileL cond <$> readTVar v + +-- block until at least one element is enqueued not satisfying cond, +-- then return the prefix before that +takeWhile :: (a -> Bool) -> TQueue a -> STM [a] +takeWhile cond (TQueue v _) = readTVar v >>= \s -> let + (left, right) = S.spanl cond s in + if null right then retrySTM + else writeTVar v right $> toList left + +peekWhile :: (a -> Bool) -> TQueue a -> STM [a] +peekWhile cond (TQueue v _) = readTVar v >>= \s -> let + (left, right) = S.spanl cond s in + if null right then retrySTM + else pure $ toList left diff --git a/parser-typechecker/src/Unison/Util/Timing.hs b/parser-typechecker/src/Unison/Util/Timing.hs new file mode 100644 index 0000000000..40faed90b1 --- /dev/null +++ b/parser-typechecker/src/Unison/Util/Timing.hs @@ -0,0 +1,41 @@ +{-# LANGUAGE BangPatterns #-} + +module Unison.Util.Timing where + +import System.CPUTime (getCPUTime) +import System.IO.Unsafe (unsafePerformIO) +import UnliftIO (MonadIO, liftIO) +import Data.Time.Clock.System (getSystemTime, systemToTAITime) +import Data.Time.Clock.TAI (diffAbsoluteTime) +import Data.Time.Clock (picosecondsToDiffTime) + +enabled :: Bool +enabled = False + +time :: MonadIO m => String -> m a -> m a +time _ ma | not enabled = ma +time label ma = do + systemStart <- liftIO getSystemTime + cpuPicoStart <- liftIO getCPUTime + liftIO $ putStrLn $ "Timing " ++ label ++ "..." + a <- ma + cpuPicoEnd <- liftIO getCPUTime + systemEnd <- liftIO getSystemTime + let systemDiff = diffAbsoluteTime (systemToTAITime systemEnd) (systemToTAITime systemStart) + let cpuDiff = picosecondsToDiffTime (cpuPicoEnd - cpuPicoStart) + liftIO $ putStrLn $ "Finished " ++ label ++ " in " ++ show cpuDiff ++ " (cpu), " ++ show systemDiff ++ " (system)" + pure a + +unsafeTime :: Monad m => String -> m a -> m a +unsafeTime _ ma | not enabled = ma +unsafeTime label ma = do + let !systemStart = unsafePerformIO getSystemTime + !cpuPicoStart = unsafePerformIO getCPUTime + !_ = unsafePerformIO $ putStrLn $ "Timing " ++ label ++ "..." + a <- ma + let !cpuPicoEnd = unsafePerformIO getCPUTime + !systemEnd = unsafePerformIO getSystemTime + let systemDiff = diffAbsoluteTime (systemToTAITime systemEnd) (systemToTAITime systemStart) + let cpuDiff = picosecondsToDiffTime (cpuPicoEnd - cpuPicoStart) + let !_ = unsafePerformIO $ putStrLn $ "Finished " ++ label ++ " in " ++ show cpuDiff ++ " (cpu), " ++ show systemDiff ++ " (system)" + pure a diff --git a/parser-typechecker/src/Unison/Util/TransitiveClosure.hs b/parser-typechecker/src/Unison/Util/TransitiveClosure.hs new file mode 100644 index 0000000000..1c865f2ebf --- /dev/null +++ b/parser-typechecker/src/Unison/Util/TransitiveClosure.hs @@ -0,0 +1,31 @@ +module Unison.Util.TransitiveClosure where + +import Unison.Prelude + +import Data.Functor.Identity (runIdentity) +import qualified Data.Set as Set + +transitiveClosure :: forall m a. (Monad m, Ord a) + => (a -> m (Set a)) + -> Set a + -> m (Set a) +transitiveClosure getDependencies open = + let go :: Set a -> [a] -> m (Set a) + go closed [] = pure closed + go closed (h:t) = + if Set.member h closed + then go closed t + else do + deps <- getDependencies h + go (Set.insert h closed) (toList deps ++ t) + in go Set.empty (toList open) + +transitiveClosure' :: Ord a => (a -> Set a) -> Set a -> Set a +transitiveClosure' f as = runIdentity $ transitiveClosure (pure . f) as + +transitiveClosure1 :: forall m a. (Monad m, Ord a) + => (a -> m (Set a)) -> a -> m (Set a) +transitiveClosure1 f a = transitiveClosure f (Set.singleton a) + +transitiveClosure1' :: Ord a => (a -> Set a) -> a -> Set a +transitiveClosure1' f a = runIdentity $ transitiveClosure1 (pure . f) a diff --git a/parser-typechecker/tests/Suite.hs b/parser-typechecker/tests/Suite.hs new file mode 100644 index 0000000000..df763cd654 --- /dev/null +++ b/parser-typechecker/tests/Suite.hs @@ -0,0 +1,87 @@ +{-# OPTIONS_GHC -fno-warn-incomplete-patterns #-} + +module Main where + +import EasyTest +import System.Environment (getArgs) +import System.IO +import qualified Unison.Core.Test.Name as Name +import qualified Unison.Test.ABT as ABT +import qualified Unison.Test.Cache as Cache +import qualified Unison.Test.Codebase as Codebase +import qualified Unison.Test.Codebase.Causal as Causal +import qualified Unison.Test.Codebase.FileCodebase as FileCodebase +import qualified Unison.Test.Codebase.Path as Path +import qualified Unison.Test.ColorText as ColorText +import qualified Unison.Test.DataDeclaration as DataDeclaration +import qualified Unison.Test.FileParser as FileParser +import qualified Unison.Test.Git as Git +import qualified Unison.Test.Lexer as Lexer +import qualified Unison.Test.IO as TestIO +import qualified Unison.Test.Range as Range +import qualified Unison.Test.Referent as Referent +import qualified Unison.Test.Term as Term +import qualified Unison.Test.TermParser as TermParser +import qualified Unison.Test.TermPrinter as TermPrinter +import qualified Unison.Test.Type as Type +import qualified Unison.Test.TypePrinter as TypePrinter +import qualified Unison.Test.Typechecker as Typechecker +import qualified Unison.Test.Typechecker.Context as Context +import qualified Unison.Test.Typechecker.TypeError as TypeError +import qualified Unison.Test.UnisonSources as UnisonSources +import qualified Unison.Test.UriParser as UriParser +import qualified Unison.Test.Util.Bytes as Bytes +import qualified Unison.Test.Util.PinBoard as PinBoard +import qualified Unison.Test.Util.Pretty as Pretty +import qualified Unison.Test.Var as Var +import qualified Unison.Test.ANF as ANF +import qualified Unison.Test.MCode as MCode +import qualified Unison.Test.VersionParser as VersionParser + +test :: Bool -> Test () +test rt = tests + [ Cache.test + , Lexer.test + , Term.test + , TermParser.test + , TermPrinter.test + , Type.test + , TypeError.test + , TypePrinter.test + , UnisonSources.test rt + , FileParser.test + , DataDeclaration.test + , Range.test + , ColorText.test + , Bytes.test + , Path.test + , Causal.test + , Referent.test + , FileCodebase.test + , ABT.test + , ANF.test + , MCode.test + , Var.test + , Codebase.test + , Typechecker.test + , UriParser.test + , Context.test + , Git.test + , TestIO.test rt + , Name.test + , VersionParser.test + , Pretty.test + , PinBoard.test + ] + +main :: IO () +main = do + args0 <- getArgs + let (rt, args) + | "--new-runtime":rest <- args0 = (True, rest) + | otherwise = (False, args0) + mapM_ (`hSetEncoding` utf8) [stdout, stdin, stderr] + case args of + [] -> runOnly "" (test rt) + [prefix] -> runOnly prefix (test rt) + [seed, prefix] -> rerunOnly (read seed) prefix (test rt) diff --git a/parser-typechecker/tests/Unison/Core/Test/Name.hs b/parser-typechecker/tests/Unison/Core/Test/Name.hs new file mode 100644 index 0000000000..e16c07ee7f --- /dev/null +++ b/parser-typechecker/tests/Unison/Core/Test/Name.hs @@ -0,0 +1,29 @@ +{-# Language OverloadedStrings #-} + +module Unison.Core.Test.Name where + +import EasyTest +import Unison.Name as Name +import Unison.NameSegment as NameSegment +import Data.List ( intercalate ) +import Data.Text ( pack ) + +test :: Test () +test = scope "name" $ tests + [ scope "suffixes" $ tests + [ scope "empty" $ expectEqual (suffixes "") [] + , scope "one namespace" $ expectEqual (suffixes "bar") ["bar"] + , scope "two namespaces" + $ expectEqual (suffixes "foo.bar") ["foo.bar", "bar"] + , scope "multiple namespaces" + $ expectEqual (suffixes "foo.bar.baz") ["foo.bar.baz", "bar.baz", "baz"] + , scope "terms named `.`" $ expectEqual (suffixes "base..") ["base..", "."] + ] + , scope "segments" $ do + numDots <- int' 0 10 + numSegs <- int' 0 10 + n <- int' 0 10 + segs <- listOf n . pick $ replicate numDots "." ++ replicate numSegs "foo" + expectEqual (segments $ Name . pack $ intercalate "." segs) + (NameSegment . pack <$> segs) + ] diff --git a/parser-typechecker/tests/Unison/Test/ABT.hs b/parser-typechecker/tests/Unison/Test/ABT.hs new file mode 100644 index 0000000000..2f36c15450 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/ABT.hs @@ -0,0 +1,44 @@ +{-# Language OverloadedStrings #-} + +module Unison.Test.ABT where + +import Data.Set as Set +import EasyTest +import Unison.ABT as ABT +import Unison.Symbol (Symbol(..)) +import Unison.Var as Var +import Unison.Codebase.Serialization ( getFromBytes, putBytes ) +import qualified Unison.Codebase.Serialization.V1 as V1 + +test :: Test () +test = scope "abt" $ tests [ + scope "freshInBoth" $ + let + t1 = var 1 "a" + t2 = var 0 "a" + fresh = ABT.freshInBoth t1 t2 $ symbol 0 "a" + in tests + [ scope "first" $ expect (not $ Set.member fresh (ABT.freeVars t1)) + , scope "second" $ expect (not $ Set.member fresh (ABT.freeVars t2)) + ], + scope "rename" $ do + -- rename x to a in \a -> [a, x] should yield + -- \a1 -> [a1, a] + let t1 = ABT.abs (symbol 0 "a") (ABT.tm [var 0 "a", var 0 "x"]) + t2 = ABT.rename (symbol 0 "x") (symbol 0 "a") t1 + fvs = toList . ABT.freeVars $ t2 + -- make sure the variable wasn't captured + expectEqual fvs [symbol 0 "a"] + -- make sure the resulting term is alpha equiv to \a1 -> [a1, a] + expectEqual t2 (ABT.abs (symbol 0 "b") (ABT.tm [var 0 "b", var 0 "a"])), + + -- confirmation of fix for https://github.com/unisonweb/unison/issues/1388 + -- where symbols with nonzero freshIds did not round trip + scope "putSymbol" $ let + v = Symbol 10 (User "hi") + v' = getFromBytes V1.getSymbol (putBytes V1.putSymbol v) + in expectEqual (Just v) v' + ] + where + symbol i n = Symbol i (Var.User n) + var i n = ABT.var $ symbol i n diff --git a/parser-typechecker/tests/Unison/Test/ANF.hs b/parser-typechecker/tests/Unison/Test/ANF.hs new file mode 100644 index 0000000000..3bca0b4812 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/ANF.hs @@ -0,0 +1,199 @@ +{-# language BangPatterns #-} +{-# language PatternGuards #-} + +module Unison.Test.ANF where + +import EasyTest + +import Unison.ABT.Normalized (Term(TAbs)) +import qualified Unison.Pattern as P +import Unison.Reference (Reference) +import Unison.Runtime.ANF as ANF +import Unison.Runtime.MCode (emitCombs) +import Unison.Type as Ty +import Unison.Var as Var + +import Unison.Util.EnumContainers as EC + +import qualified Data.Set as Set +import qualified Data.Map as Map + +import qualified Unison.Term as Term +import qualified Unison.ABT as ABT +import Unison.Test.Common (tm) + +import Control.Monad.Reader (ReaderT(..)) +import Control.Monad.State (evalState) + +-- testSNF s = ok +-- where +-- t0 = tm s +-- snf = toSuperNormal (const 0) t0 + +simpleRefs :: Reference -> RTag +simpleRefs r + | r == Ty.natRef = 0 + | r == Ty.intRef = 1 + | r == Ty.floatRef = 2 + | r == Ty.booleanRef = 3 + | r == Ty.textRef = 4 + | r == Ty.charRef = 5 + | otherwise = 100 + +runANF :: Var v => ANFM v a -> a +runANF m = evalState (runReaderT m env) (0, []) + where + env = (Set.empty, const 0, simpleRefs) + +testANF :: String -> Test () +testANF s + | t0 == denormalize anf = ok + | otherwise = crash $ show $ denormalize anf + where + t0 = const () `Term.amap` tm s + anf = runANF $ anfTerm t0 + +testLift :: String -> Test () +testLift s = case cs of (!_, !_, _) -> ok + where + cs = emitCombs 0 . superNormalize (const 0) (const 0) . lamLift $ tm s + +denormalize :: Var v => ANormal v -> Term.Term0 v +denormalize (TVar v) = Term.var () v +denormalize (TLit l) = case l of + I i -> Term.int () i + N n -> Term.nat () n + F f -> Term.float () f + T t -> Term.text () t + C c -> Term.char () c + LM r -> Term.termLink () r + LY r -> Term.typeLink () r +denormalize (THnd _ _ _) + = error "denormalize handler" + -- = Term.match () (denormalize b) $ denormalizeHandler h +denormalize (TShift _ _ _) + = error "denormalize shift" +denormalize (TLet v _ bn bo) + | typeOf v == ANFBlank = ABT.subst v dbn dbo + | otherwise = Term.let1_ False [(v, dbn)] dbo + where + dbn = denormalize $ TTm bn + dbo = denormalize bo +denormalize (TName _ _ _ _) + = error "can't denormalize by-name bindings" +denormalize (TMatch v cs) + = Term.match () (ABT.var v) $ denormalizeMatch cs +denormalize (TApp f args) + | FCon rt 0 <- f + , r <- denormalizeRef rt + , r `elem` [Ty.natRef, Ty.intRef] + , [v] <- args + = Term.var () v +denormalize (TApp f args) = Term.apps' df (Term.var () <$> args) + where + df = case f of + FVar v -> Term.var () v + FComb _ -> error "FComb" + FCon r n -> + Term.constructor () (denormalizeRef r) (fromIntegral $ rawTag n) + FReq r n -> + Term.request () (denormalizeRef r) (fromIntegral $ rawTag n) + FPrim _ -> error "FPrim" + FCont _ -> error "denormalize FCont" +denormalize (TFrc _) = error "denormalize TFrc" + +denormalizeRef :: RTag -> Reference +denormalizeRef r + | 0 <- rawTag r = Ty.natRef + | 1 <- rawTag r = Ty.intRef + | 2 <- rawTag r = Ty.floatRef + | 3 <- rawTag r = Ty.booleanRef + | 4 <- rawTag r = Ty.textRef + | 5 <- rawTag r = Ty.charRef + | otherwise = error "denormalizeRef" + +backReference :: RTag -> Reference +backReference _ = error "backReference" + +denormalizeMatch + :: Var v => Branched (ANormal v) -> [Term.MatchCase () (Term.Term0 v)] +denormalizeMatch b + | MatchEmpty <- b = [] + | MatchIntegral m df <- b + = (dcase (ipat Ty.intRef) <$> mapToList m) ++ dfcase df + | MatchText m df <- b + = (dcase (const $ P.Text ()) <$> Map.toList m) ++ dfcase df + | MatchData r cs Nothing <- b + , [(0, ([UN], zb))] <- mapToList cs + , TAbs i (TMatch j (MatchIntegral m df)) <- zb + , i == j + = (dcase (ipat r) <$> mapToList m) ++ dfcase df + | MatchData r m df <- b + = (dcase (dpat r) . fmap snd <$> mapToList m) ++ dfcase df + | MatchRequest hs df <- b = denormalizeHandler hs df + | MatchSum _ <- b = error "MatchSum not a compilation target" + where + dfcase (Just d) + = [Term.MatchCase (P.Unbound ()) Nothing $ denormalize d] + dfcase Nothing = [] + + dcase p (t, br) = Term.MatchCase (p n t) Nothing dbr + where (n, dbr) = denormalizeBranch br + + ipat r _ i + | r == Ty.natRef = P.Nat () $ fromIntegral i + | otherwise = P.Int () $ fromIntegral i + dpat r n t = P.Constructor () r (fromEnum t) (replicate n $ P.Var ()) + +denormalizeBranch (TAbs v br) = (n+1, ABT.abs v dbr) + where (n, dbr) = denormalizeBranch br +denormalizeBranch tm = (0, denormalize tm) + +denormalizeHandler + :: Var v + => EnumMap RTag (EnumMap CTag ([Mem], ANormal v)) + -> ANormal v + -> [Term.MatchCase () (Term.Term0 v)] +denormalizeHandler cs df = dcs + where + dcs = foldMapWithKey rf cs <> dfc + dfc = [ Term.MatchCase + (P.EffectPure () (P.Var ())) + Nothing + db + ] + where (_, db) = denormalizeBranch df + rf r rcs = foldMapWithKey (cf $ backReference r) rcs + cf r t b = [ Term.MatchCase + (P.EffectBind () r (fromEnum t) + (replicate n $ P.Var ()) (P.Var ())) + Nothing + db + ] + where (n, db) = denormalizeBranch (snd b) + +test :: Test () +test = scope "anf" . tests $ + [ scope "lift" . tests $ + [ testLift "let\n\ + \ g = m x -> ##Nat.+ x m\n\ + \ m -> g m m" + , testLift "m n -> let\n\ + \ f acc i = match i with\n\ + \ 0 -> acc\n\ + \ _ -> f (##Nat.+ acc n) (##Nat.sub i 1)\n\ + \ f 0 m" + ] + , scope "denormalize" . tests $ + [ testANF "1" + , testANF "1 + 2" + , testANF "match x with\n\ + \ +1 -> foo\n\ + \ +2 -> bar\n\ + \ +3 -> baz" + , testANF "1 + match x with\n\ + \ +1 -> foo\n\ + \ +2 -> bar" + , testANF "(match x with +3 -> foo) + (match x with +2 -> foo)" + ] + ] diff --git a/parser-typechecker/tests/Unison/Test/Cache.hs b/parser-typechecker/tests/Unison/Test/Cache.hs new file mode 100644 index 0000000000..fafd6459f8 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Cache.hs @@ -0,0 +1,80 @@ +module Unison.Test.Cache where + +import EasyTest +import Control.Monad +import Control.Concurrent.STM +import Control.Concurrent.Async +import qualified Unison.Util.Cache as Cache + +test :: Test () +test = scope "util.cache" $ tests [ + scope "ex1" $ fits Cache.cache + , scope "ex2" $ fits (Cache.semispaceCache n) + , scope "ex3" $ doesn'tFit (Cache.semispaceCache n) + , scope "ex4" $ do + replicateM_ 10 $ concurrent (Cache.semispaceCache n) + ok + ] + where + n :: Word + n = 1000 + + -- This checks that items properly expire from the cache + doesn'tFit mkCache = do + cache <- io $ mkCache + misses <- io $ newTVarIO 0 + let f x = do + atomically $ modifyTVar misses (+1) + pure x + -- populate the cache, all misses (n*2), but first 1-n will have expired by the end + results1 <- io $ traverse (Cache.apply cache f) [1..n*2] + -- should be half hits, so an additional `n` misses + results2 <- io $ traverse (Cache.apply cache f) (reverse [1..n*2]) + misses <- io $ readTVarIO misses + expect' (results1 == [1..n*2]) + expect' (results2 == reverse [1..n*2]) + expect (misses == n * 3) + + -- This checks the simple case that everything fits in the cache + fits mkCache = do + cache <- io $ mkCache + misses <- io $ newTVarIO 0 + let f x = do + atomically $ modifyTVar misses (+1) + pure x + -- populate the cache + results1 <- io $ traverse (Cache.apply cache f) [1..n] + -- should be all hits + results2 <- io $ traverse (Cache.apply cache f) [1..n] + misses <- io $ readTVarIO misses + expect' (results1 == [1..n]) + expect' (results2 == [1..n]) + expect (misses == n) + + -- A simple smoke test of concurrent access. The cache doesn't + -- try to linearize all reads / writes so the number of misses + -- during concurrent access is unpredictable, but once the cache is + -- fully populated, concurrent reads should generate no further misses + concurrent mkCache = do + cache <- io $ mkCache + misses <- io $ newTVarIO 0 + let f x = do + atomically $ modifyTVar misses (+1) + pure x + -- we're populating the cache in parallel + results1 <- io $ async $ traverse (Cache.apply cache f) [1 .. (n `div` 2)] + results2 <- io $ async $ traverse (Cache.apply cache f) [(n `div` 2 + 1) .. n] + (results1, results2) <- io $ waitBoth results1 results2 + -- now the cache should be fully populated, so no further misses + misses1 <- io $ readTVarIO misses + + -- these should be all hits + results3 <- io $ async $ traverse (Cache.apply cache f) [1 .. (n `div` 2)] + results4 <- io $ async $ traverse (Cache.apply cache f) [(n `div` 2 + 1) .. n] + (results3, results4) <- io $ waitBoth results3 results4 + + misses2 <- io $ readTVarIO misses + expect' (results1 ++ results2 == [1..n]) + expect' (results3 ++ results4 == [1..n]) + expect' (misses1 == misses2) + diff --git a/parser-typechecker/tests/Unison/Test/Codebase.hs b/parser-typechecker/tests/Unison/Test/Codebase.hs new file mode 100644 index 0000000000..ad46c853b6 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Codebase.hs @@ -0,0 +1,40 @@ +{-# Language OverloadedStrings #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Test.Codebase where + +import Data.Functor.Identity +import qualified Data.Map as Map +import Data.Map ( (!) ) +import EasyTest +import qualified Unison.Codebase as Codebase +import Unison.Codebase.CodeLookup ( CodeLookup(..) ) +import qualified Unison.Hash as Hash +import qualified Unison.Reference as R +import Unison.Symbol ( Symbol ) +import qualified Unison.Term as Term +import qualified Unison.UnisonFile as UF +import qualified Unison.Var as Var + +test :: Test () +test = scope "codebase" $ tests + [ scope "makeSelfContained" $ + let h = Hash.unsafeFromBase32Hex "abcd" + ref = R.Derived h 0 1 + v1 = Var.refNamed @Symbol ref + foo = Var.named "foo" + -- original binding: `foo = \v1 -> ref` + binding = (foo, Term.lam () v1 (Term.ref () ref)) + uf = UF.UnisonFileId mempty mempty [binding] mempty + code :: CodeLookup Symbol Identity () + code = CodeLookup + { getTerm = \rid -> pure $ + if R.DerivedId rid == ref then Just (Term.int () 42) + else Nothing + , getTypeDeclaration = \_ -> pure Nothing + } + -- expected binding after makeSelfContained: `foo = \v1 -> v2`, where `v2 /= v1` + UF.UnisonFile _ _ (Map.fromList -> bindings) _ = runIdentity $ Codebase.makeSelfContained' code uf + Term.LamNamed' _ (Term.Var' v2) = bindings ! foo + in expect $ v2 /= v1 + ] diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Causal.hs b/parser-typechecker/tests/Unison/Test/Codebase/Causal.hs new file mode 100644 index 0000000000..2aa192a949 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Codebase/Causal.hs @@ -0,0 +1,318 @@ +{-# LANGUAGE PartialTypeSignatures #-} + +module Unison.Test.Codebase.Causal where + +import EasyTest +import Unison.Codebase.Causal ( Causal(Cons, Merge) + , RawHash(..) + , one + , currentHash + , before + ) +import qualified Unison.Codebase.Causal as Causal +import Control.Monad.Trans.State (State, state, put) +import Data.Int (Int64) +import qualified Data.Map as Map +import qualified Data.Set as Set +import Control.Monad (replicateM_) +import Control.Monad.Extra (ifM) +import Control.Applicative (liftA2) +import Data.List (foldl1') +import Data.Functor ((<&>)) +import Unison.Hashable (Hashable) +import Data.Set (Set) +import Data.Functor.Identity +import Unison.Hash (Hash) +import Unison.CommandLine (beforeHash) + +c :: M (Causal M Int64 [Int64]) +c = merge (foldr cons (one [1]) t1) + (foldr cons (foldr cons (one [1]) t2) t3) + where + t1, t2, t3 :: [[Int64]] + t1 = fmap pure [5,4..2] + t2 = fmap pure [100..105] + t3 = fmap pure [999,998] + +c2 :: M (Causal M Int64 [Int64]) +c2 = merge (foldr cons (one [1]) t1) + (foldr cons (foldr cons (one [1]) t2) t3) + where + t1, t2, t3 :: [[Int64]] + t1 = fmap pure [5,4..2] + t2 = fmap pure [10,9..2] + t3 = fmap pure [999,998] + +{- +λ> show Unison.Test.Codebase.Causal.c +"Identity Merge 4gP [999,5] [\"3rG\",\"58U\"]" +λ> runIdentity Unison.Test.Codebase.Causal.result +step a=fromList [1,10] seen=[] rest=fromList [Merge 4gP [999,5] ["3rG","58U"]] +step a=fromList [1,10] seen=["4gP"] rest=fromList [Cons 3rG [999] 4LX,Cons 58U [5] 4vC] +step a=fromList [1,10] seen=["3rG","4gP"] rest=fromList [Cons 58U [5] 4vC,Cons 4LX [998] 26J] +step a=fromList [1,10] seen=["3rG","4gP","58U"] rest=fromList [Cons 4LX [998] 26J,Cons 4vC [4] yFt] +step a=fromList [1,10] seen=["3rG","4LX","4gP","58U"] rest=fromList [Cons 4vC [4] yFt,Cons 26J [100] 4FR] +step a=fromList [1,10] seen=["3rG","4LX","4gP","4vC","58U"] rest=fromList [Cons 26J [100] 4FR,Cons yFt [3] 3So] +step a=fromList [1,10] seen=["26J","3rG","4LX","4gP","4vC","58U"] rest=fromList [Cons yFt [3] 3So,Cons 4FR [101] 4az] +step a=fromList [1,10] seen=["yFt","26J","3rG","4LX","4gP","4vC","58U"] rest=fromList [Cons 4FR [101] 4az,Cons 3So [2] 5Lu] +step a=fromList [1,10] seen=["yFt","26J","3rG","4FR","4LX","4gP","4vC","58U"] rest=fromList [Cons 3So [2] 5Lu,Cons 4az [102] 2V3] +step a=fromList [1,10] seen=["yFt","26J","3So","3rG","4FR","4LX","4gP","4vC","58U"] rest=fromList [Cons 4az [102] 2V3,One 5Lu [1]] +step a=fromList [1,10] seen=["yFt","26J","3So","3rG","4FR","4LX","4az","4gP","4vC","58U"] rest=fromList [One 5Lu [1],Cons 2V3 [103] 5pS] +step a=fromList [10] seen=["yFt","26J","3So","3rG","4FR","4LX","4az","4gP","4vC","58U","5Lu"] rest=fromList [Cons 2V3 [103] 5pS] +step a=fromList [10] seen=["yFt","26J","2V3","3So","3rG","4FR","4LX","4az","4gP","4vC","58U","5Lu"] rest=fromList [Cons 5pS [104] 2tq] +step a=fromList [10] seen=["yFt","26J","2V3","3So","3rG","4FR","4LX","4az","4gP","4vC","58U","5Lu","5pS"] rest=fromList [Cons 2tq [105] 5Lu] +step a=fromList [10] seen=["yFt","26J","2V3","2tq","3So","3rG","4FR","4LX","4az","4gP","4vC","58U","5Lu","5pS"] rest=fromList [One 5Lu [1]] +step a=fromList [10] seen=["yFt","26J","2V3","2tq","3So","3rG","4FR","4LX","4az","4gP","4vC","58U","5Lu","5pS"] rest=fromList [] +Unsatisfied (fromList [10]) + +λ> runIdentity Unison.Test.Codebase.Causal.result (with c2) +step a=fromList [1,10] seen=[] rest=fromList [Cons 2tg [999] 3AW] +step a=fromList [1,10] seen=["2tg"] rest=fromList [Cons 3AW [998] 33b] +step a=fromList [1,10] seen=["2tg","3AW"] rest=fromList [Cons 33b [10] 2NF] +step a=fromList [1] seen=["2tg","33b","3AW"] rest=fromList [Cons 2NF [9] 57i] +step a=fromList [1] seen=["2NF","2tg","33b","3AW"] rest=fromList [Cons 57i [8] ipV] +step a=fromList [1] seen=["2NF","2tg","33b","3AW","57i"] rest=fromList [Cons ipV [7] 3BZ] +step a=fromList [1] seen=["ipV","2NF","2tg","33b","3AW","57i"] rest=fromList [Cons 3BZ [6] 58U] +step a=fromList [1] seen=["ipV","2NF","2tg","33b","3AW","3BZ","57i"] rest=fromList [Cons 58U [5] 4vC] +step a=fromList [1] seen=["ipV","2NF","2tg","33b","3AW","3BZ","57i","58U"] rest=fromList [Cons 4vC [4] yFt] +step a=fromList [1] seen=["ipV","2NF","2tg","33b","3AW","3BZ","4vC","57i","58U"] rest=fromList [Cons yFt [3] 3So] +step a=fromList [1] seen=["ipV","yFt","2NF","2tg","33b","3AW","3BZ","4vC","57i","58U"] rest=fromList [Cons 3So [2] 5Lu] +step a=fromList [1] seen=["ipV","yFt","2NF","2tg","33b","3AW","3BZ","3So","4vC","57i","58U"] rest=fromList [One 5Lu [1]] +Satisfied (fromList []) +λ> + +-} + +test :: Test () +test = + scope "causal" + . tests + $ [ scope "threeWayMerge.ex1" + . expect + $ Causal.head testThreeWay + == Set.fromList [3, 4] + , scope "threeWayMerge.idempotent" + . expect + $ testIdempotent oneCausal -- == oneCausal + -- $ prop_mergeIdempotent + + , scope "threeWayMerge.identity" + . expect + $ testIdentity oneCausal emptyCausal + -- $ prop_mergeIdentity + , scope "threeWayMerge.commutative" + . expect + $ testCommutative (Set.fromList [3,4]) oneRemoved + -- $ prop_mergeCommutative + {- , scope "threeWayMerge.commonAncestor" + . expect + $ testCommonAncestor + -- $ prop_mergeCommonAncestor --} + , scope "lca.hasLca" lcaPairTest + , scope "lca.noLca" noLcaPairTest + , scope "beforeHash" $ beforeHashTests + ] + +beforeHashTests :: Test () +beforeHashTests = do + -- c1 and c2 have unrelated histories + c1 <- pure $ Causal.one (0 :: Int64) + c2 <- pure $ Causal.one (1 :: Int64) + -- c1' and c2' are extension of c1 and c2, respectively + c1' <- pure $ Causal.cons 2 c1 + c2' <- pure $ Causal.cons 3 c2 + c12 <- Causal.threeWayMerge sillyMerge c1' c2' + + -- verifying basic properties of `before` for these examples + expect' =<< before c1 c1 + expect' =<< before c1 c12 + expect' =<< before c2 c2 + expect' =<< before c2 c12 + expect' =<< before c2 c2' + expect' =<< before c1 c1' + expect' . not =<< before c1 c2 + expect' . not =<< before c2 c1 + + -- make sure the search cutoff works - + -- even though both start with `Causal.one 0`, that's + -- more than 10 steps back from `longCausal 1000`, so we + -- want this to be false + expect' . not =<< before c1 (longCausal (1000 :: Int64)) + ok + where + before h c = beforeHash 10 (Causal.currentHash h) c + sillyMerge _lca l _r = pure l + longCausal 0 = Causal.one 0 + longCausal n = Causal.cons n (longCausal (n - 1)) + +int64 :: Test Int64 +int64 = random + +extend + :: Int + -> Causal Identity Hash Int64 + -> Test (Causal Identity Hash Int64) +extend 0 ca = pure ca +extend n ca = do + i <- int64 + extend (n-1) (Causal.cons i ca) + +lcaPair :: Test (Causal Identity Hash Int64, Causal Identity Hash Int64) +lcaPair = do + base <- one <$> int64 + ll <- int' 0 20 + lr <- int' 0 20 + (,) <$> extend ll base <*> extend lr base + +lcaPairTest :: Test () +lcaPairTest = replicateM_ 50 test >> ok + where + test = runIdentity . uncurry Causal.lca <$> lcaPair >>= \case + Just _ -> pure () + Nothing -> crash "expected lca" + +noLcaPair + :: Test (Causal Identity Hash Int64, Causal Identity Hash Int64) +noLcaPair = do + basel <- one <$> int64 + baser <- one <$> int64 + ll <- int' 0 20 + lr <- int' 0 20 + (,) <$> extend ll basel <*> extend lr baser + +noLcaPairTest :: Test () +noLcaPairTest = replicateM_ 50 test >> ok + where + test = runIdentity . uncurry Causal.lca <$> noLcaPair >>= \case + Nothing -> pure () + Just _ -> crash "expected no lca" + +oneRemoved :: Causal Identity Hash (Set Int64) +oneRemoved = foldr Causal.cons + (one (Set.singleton 1)) + (Set.fromList <$> [[2, 3, 4], [1, 2, 3, 4], [1, 2]]) + +twoRemoved :: Causal Identity Hash (Set Int64) +twoRemoved = foldr Causal.cons + (one (Set.singleton 1)) + (Set.fromList <$> [[1, 3, 4], [1, 2, 3], [1, 2]]) + +testThreeWay :: Causal Identity Hash (Set Int64) +testThreeWay = runIdentity + $ threeWayMerge' oneRemoved twoRemoved + +setCombine :: Applicative m => Ord a => Set a -> Set a -> m (Set a) +setCombine a b = pure $ a <> b + +setDiff :: Applicative m => Ord a => Set a -> Set a -> m (Set a, Set a) +setDiff old new = pure (Set.difference new old, Set.difference old new) + +setPatch :: Applicative m => Ord a => Set a -> (Set a, Set a) -> m (Set a) +setPatch s (added, removed) = pure (added <> Set.difference s removed) + +-- merge x x == x, should not add a new head, and also the value at the head should be the same of course +testIdempotent :: Causal Identity Hash (Set Int64) -> Bool -- Causal Identity Hash (Set Int64) +testIdempotent causal = + runIdentity (threeWayMerge' causal causal) + == causal + +-- prop_mergeIdempotent :: Bool +-- prop_mergeIdempotent = and (map testIdempotent (take 1000 generateRandomCausals)) + +oneCausal :: Causal Identity Hash (Set Int64) +oneCausal = Causal.one (Set.fromList [1]) + +-- generateRandomCausals :: Causal Identity Hash (Set Int64) +-- generateRandomCausals = undefined + +easyCombine + :: (Monad m, Semigroup d) + => (e -> e -> m e) + -> (e -> e -> m d) + -> (e -> d -> m e) + -> (Maybe e -> e -> e -> m e) +easyCombine comb _ _ Nothing l r = comb l r +easyCombine _ diff appl (Just ca) l r = do + dl <- diff ca l + dr <- diff ca r + appl ca (dl <> dr) + +threeWayMerge' + :: Causal Identity Hash (Set Int64) + -> Causal Identity Hash (Set Int64) + -> Identity (Causal Identity Hash (Set Int64)) +threeWayMerge' = Causal.threeWayMerge (easyCombine setCombine setDiff setPatch) + +-- merge x mempty == x, merge mempty x == x +testIdentity :: Causal Identity Hash (Set Int64) -> Causal Identity Hash (Set Int64) -> Bool +testIdentity causal mempty = + (threeWayMerge' causal mempty) + == (threeWayMerge' mempty causal) + +emptyCausal :: Causal Identity Hash (Set Int64) +emptyCausal = one (Set.empty) + +-- merge (cons hd tl) tl == cons hd tl, merge tl (cons hd tl) == cons hd tl +testCommutative :: Set Int64 -> Causal Identity Hash (Set Int64) -> Bool +testCommutative hd tl = (threeWayMerge' (Causal.cons hd tl) tl) + == (threeWayMerge' tl (Causal.cons hd tl)) + + +{- +testCommonAncestor :: +testCommonAncestor = +-} + + + +-- [ scope "foldHistoryUntil" . expect $ execState c mempty == Set.fromList [3,2,1]] + +--result :: M (Causal.FoldHistoryResult (Set Int64)) +--result = Causal.foldHistoryUntil f (Set.fromList [10, 1]) =<< c2 where +-- f s e = let s' = Set.difference s (Set.fromList e) in (s', Set.null s') + +result, result2 :: M (Causal.FoldHistoryResult (Set Int64)) +(result, result2) = + (Causal.foldHistoryUntil f (Set.fromList [10, 1]) =<< (do c' <- c; put mempty ; pure c') + ,Causal.foldHistoryUntil f (Set.fromList [10, 1]) =<< (do c' <- c2; put mempty ; pure c')) + where f s e = let s' = Set.difference s (Set.fromList e) in (s', Set.null s') + +---- special cons and merge that mess with state monad for logging +type M = State [[Int64]] +cons :: [Int64] + -> Causal M h [Int64] + -> Causal M h [Int64] + +merge :: Causal M h [Int64] + -> Causal M h [Int64] + -> M (Causal M h [Int64]) + +(cons, merge) = (cons'' pure, merge'' pure) + where + pure :: Causal m h [Int64] -> M (Causal m h [Int64]) + pure c = state (\s -> (c, Causal.head c : s)) + +cons'' :: Hashable e1 + => (Causal m1 h e2 -> m2 (Causal m2 h e1)) + -> e1 -> Causal m1 h e2 -> Causal m2 h e1 +cons'' pure e tl = + Cons (RawHash $ Causal.hash [Causal.hash e, unRawHash . currentHash $ tl]) e (currentHash tl, pure tl) + +merge'' :: (Monad m, Semigroup e) + => (Causal m h e -> m (Causal m h e)) + -> Causal m h e -> Causal m h e -> m (Causal m h e) +merge'' pure a b = + ifM (before a b) (pure b) . ifM (before b a) (pure a) $ case (a, b) of + (Merge _ _ tls, Merge _ _ tls2) -> merge0 $ Map.union tls tls2 + (Merge _ _ tls, b) -> merge0 $ Map.insert (currentHash b) (pure b) tls + (b, Merge _ _ tls) -> merge0 $ Map.insert (currentHash b) (pure b) tls + (a, b) -> + merge0 $ Map.fromList [(currentHash a, pure a), (currentHash b, pure b)] + where + merge0 m = + let e = if Map.null m + then error "Causal.merge0 empty map" + else foldl1' (liftA2 (<>)) (fmap Causal.head <$> Map.elems m) + h = Causal.hash (Map.keys m) -- sorted order + in e <&> \e -> Merge (RawHash h) e m + diff --git a/parser-typechecker/tests/Unison/Test/Codebase/FileCodebase.hs b/parser-typechecker/tests/Unison/Test/Codebase/FileCodebase.hs new file mode 100644 index 0000000000..147477b48c --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Codebase/FileCodebase.hs @@ -0,0 +1,48 @@ +module Unison.Test.Codebase.FileCodebase where + +import EasyTest +import Unison.Codebase.FileCodebase.Common (encodeFileName, decodeFileName) +import qualified Data.Set as Set +import Data.Char as Char +import Data.Foldable (toList) + +test :: Test () +test = scope "FileCodebase" . tests $ + [ scope "encode/decodeFileName" . tests $ + [ encodeDecode "abc" + , encodeDecode "👍" + , encodeDecode "\xfff" + , tests $ encodeDecode . (:[]) <$> ['!'..'~'] + , encodeDecode ("Universal." ++ ['!'..'~']) + , specialEncode "." + , specialEncode ".." + , tests $ map specialEncodeChar (toList specificallyBadChars) + , specialEncodeChar '👍' + , specialEncodeChar '\xfff' + ] + ] + +specialEncode :: String -> Test () +specialEncode s = + scope (" " <> s <> " gets special encoding") $ expect (encodeFileName s /= s) + +specialEncodeChar :: Char -> Test () +specialEncodeChar = specialEncode . pure + +encodeDecode :: String -> Test () +encodeDecode s = + let e = encodeFileName s + d = decodeFileName e + in scope s $ expect $ d == s && all isSafeChar e + +-- In the past we had considered a much smaller set of safe chars: +-- [0-9,a-z,A-Z,-._] from https://superuser.com/a/748264 +-- Currently we are going by https://superuser.com/a/358861 +isSafeChar :: Char -> Bool +isSafeChar c = Set.notMember c specificallyBadChars + && Char.isPrint c + && Char.isAscii c + +specificallyBadChars :: Set.Set Char +specificallyBadChars = Set.fromList "\\/:*?\"<>|" + diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Path.hs b/parser-typechecker/tests/Unison/Test/Codebase/Path.hs new file mode 100644 index 0000000000..e775fb489f --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Codebase/Path.hs @@ -0,0 +1,66 @@ +{-# LANGUAGE OverloadedStrings, OverloadedLists #-} + +module Unison.Test.Codebase.Path where + +import EasyTest +import Unison.Codebase.Path +import Data.Sequence +import Data.Text +import Unison.NameSegment +import Data.Either +import qualified Unison.HashQualified' as HQ' +import qualified Unison.ShortHash as SH + +test :: Test () +test = scope "path" . tests $ + [ scope "parsePathImpl'" . tests $ + [ let s = "foo.bar.baz.34" in scope s . expect $ parsePathImpl' s == Right (relative ["foo","bar","baz"], "34") + , let s = "foo.bar.baz" in scope s . expect $ parsePathImpl' s == Right (relative ["foo", "bar"], "baz") + , let s = "baz" in scope s . expect $ parsePathImpl' s == Right (relative [], "baz") + , let s = "-" in scope s . expect $ parsePathImpl' s == Right (relative [], "-") + , let s = "34" in scope s . pending . expect $ parsePathImpl' s == Right (relative [], "34") + , let s = "foo.bar.baz#a8fj" in scope s . expect $ isLeft $ parsePathImpl' s + ] + , scope "parseSplit'" . tests $ + [ scope "wordyNameSegment" . tests $ + [ let s = "foo.bar.baz" in scope s . expect $ + parseSplit' wordyNameSegment s == Right (relative ["foo", "bar"], NameSegment "baz") + + , let s = "foo.bar.baz#abc" in scope s . expect $ isLeft $ parseSplit' wordyNameSegment s + + , let s = "foo.bar.+" in scope s . expect $ + isLeft $ parseSplit' wordyNameSegment s + ] + + , scope "definitionNameSegment" . tests $ + [ let s = "foo.bar.+" in scope s . expect $ + parseSplit' definitionNameSegment s == Right (relative ["foo", "bar"], NameSegment "+") + ] + ] + , scope "parseShortHashOrHQSplit'" . tests $ + [ let s = "foo.bar#34" in scope s . expect $ + parseShortHashOrHQSplit' s == + (Right . Right) + (relative ["foo"], HQ'.HashQualified (NameSegment "bar") (SH.unsafeFromText "#34")) + + , let s = "foo.bar.+" in scope s . expect $ + parseShortHashOrHQSplit' s == + (Right . Right) + (relative ["foo", "bar"], HQ'.NameOnly (NameSegment "+")) + + , let s = "#123" in scope s . expect $ + parseShortHashOrHQSplit' s == + (Right . Left) (SH.unsafeFromText "#123") + ] + , scope "parseHQ'Split'" . tests $ + [ let s = "foo.bar#34" in scope s . expect $ + parseHQSplit' s == Right (relative ["foo"], HQ'.HashQualified (NameSegment "bar") (SH.unsafeFromText "#34")) + , let s = "foo.bar.+" in scope s . expect $ + parseHQSplit' s == Right (relative ["foo", "bar"], HQ'.NameOnly (NameSegment "+")) + , let s = "#123" in scope s . expect $ isLeft $ parseHQSplit' s + ] + ] + + +relative :: Seq Text -> Path' +relative = Path' . Right . Relative . Path . fmap NameSegment diff --git a/parser-typechecker/tests/Unison/Test/ColorText.hs b/parser-typechecker/tests/Unison/Test/ColorText.hs new file mode 100644 index 0000000000..55375a9728 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/ColorText.hs @@ -0,0 +1,84 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE QuasiQuotes #-} +module Unison.Test.ColorText where + +-- import EasyTest +import qualified Data.Map as Map +import EasyTest +import Text.RawString.QQ +import Unison.Lexer (Pos (..)) +import Unison.Util.AnnotatedText (AnnotatedExcerpt (..), + condensedExcerptToText, markup) +import Unison.Util.ColorText (Color (..), toANSI) +import qualified Unison.Util.ColorText as ColorText +import Unison.Util.Range (Range (..)) + +test :: Test () +test = scope "colortext" . tests $ [ + -- commented out because they don't render exactly the same escape sequences, but they're equivalent4 as of this writing + -- scope "inclusive-exclusive range" . expect . trace ("ex4e: " ++ show (rawRender ex4e) ++ "\n" ++ "ex4t: " ++ show (rawRender ex4t) ++ "\n")$ ex4e == ex4t + ] + +ex4e :: String +ex4e = toANSI . condensedExcerptToText 1 $ markup "abc" m + where m = Map.singleton (Range (Pos 1 2) (Pos 1 3)) Red + +ex4t :: String +ex4t = toANSI $ " 1 | " <> "a" <> ColorText.style Red "b" <> "c" <> "\n" + + +ex2 :: AnnotatedExcerpt Color +ex2 = markup ex (Map.fromList + [ (Range (Pos 3 1) (Pos 3 5), Red) -- SCENE + , (Range (Pos 5 9) (Pos 5 14), Blue) -- Master + , (Range (Pos 5 22) (Pos 5 30), Blue) -- Boatswain + , (Range (Pos 25 1) (Pos 25 6), Red) -- ALONSO + , (Range (Pos 12 30) (Pos 13 27), Green) -- fall ... aground. + ]) + +renderEx2 :: String +renderEx2 = toANSI . condensedExcerptToText 3 $ ex2 + +ex3 :: AnnotatedExcerpt Color +ex3 = markup "Hello, world!" $ Map.fromList + [ (Range (Pos 1 8) (Pos 1 12), Blue) + , (Range (Pos 1 1) (Pos 1 5), Green) ] + +ex4 :: AnnotatedExcerpt Color +ex4 = markup "Hello,\nworld!" $ Map.fromList + [ (Range (Pos 2 1) (Pos 2 5), Blue) + , (Range (Pos 1 1) (Pos 1 5), Green) ] + +ex :: Ord a => AnnotatedExcerpt a +ex = [r|The Tempest | Act 1, Scene 1 + +SCENE I. On a ship at sea: a tempestuous noise +of thunder and lightning heard. +Enter a Master and a Boatswain + +Master +Boatswain! +Boatswain +Here, master: what cheer? +Master +Good, speak to the mariners: fall to't, yarely, +or we run ourselves aground: bestir, bestir. +Exit + +Enter Mariners + +Boatswain +Heigh, my hearts! cheerly, cheerly, my hearts! +yare, yare! Take in the topsail. Tend to the +master's whistle. Blow, till thou burst thy wind, +if room enough! +Enter ALONSO, SEBASTIAN, ANTONIO, FERDINAND, GONZALO, and others + +ALONSO +Good boatswain, have care. Where's the master? +Play the men. +Boatswain +I pray now, keep below. +|] + +-- test = scope "colortext.snipWithContext" . expect $ diff --git a/parser-typechecker/tests/Unison/Test/Common.hs b/parser-typechecker/tests/Unison/Test/Common.hs new file mode 100644 index 0000000000..2c078fdb3b --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Common.hs @@ -0,0 +1,75 @@ +{-# LANGUAGE PatternSynonyms #-} + +module Unison.Test.Common + ( hqLength + , t + , tm + , parseAndSynthesizeAsFile + , parsingEnv + ) where + +import Data.Sequence (Seq) +import qualified Data.Text as Text +import qualified Unison.Builtin as B +import qualified Unison.FileParsers as FP +import Unison.Parser (Ann(..)) +import Unison.PrintError ( prettyParseError ) +import Unison.Result (Result, Note) +import Unison.Symbol (Symbol) +import Unison.Var (Var) +import Unison.UnisonFile (TypecheckedUnisonFile) +import qualified Unison.ABT as ABT +import qualified Unison.Lexer as L +import qualified Unison.Parser as Parser +import qualified Unison.Term as Term +import qualified Unison.TermParser as TermParser +import qualified Unison.Type as Type +import qualified Unison.TypeParser as TypeParser +import qualified Unison.Util.Pretty as Pr +import qualified Text.Megaparsec.Error as MPE +import qualified Unison.Names3 + + +type Term v = Term.Term v Ann +type Type v = Type.Type v Ann + +hqLength :: Int +hqLength = 10 + +t :: String -> Type Symbol +t s = ABT.amap (const Intrinsic) + -- . either (error . show ) id + -- . Type.bindSomeNames B.names0 + . either (error . showParseError s) tweak + $ Parser.run (Parser.root TypeParser.valueType) s parsingEnv + where tweak = Type.generalizeLowercase mempty + +tm :: String -> Term Symbol +tm s = either (error . show) id + -- . Term.bindSomeNames mempty B.names0 + -- . either (error . showParseError s) id + $ Parser.run (Parser.root TermParser.term) s parsingEnv + +showParseError :: Var v + => String + -> MPE.ParseError (L.Token L.Lexeme) (Parser.Error v) + -> String +showParseError s = Pr.toANSI 60 . prettyParseError s + +parseAndSynthesizeAsFile + :: Var v + => [Type v] + -> FilePath + -> String + -> Result + (Seq (Note v Ann)) + (Either Unison.Names3.Names0 (TypecheckedUnisonFile v Ann)) +parseAndSynthesizeAsFile ambient filename s = FP.parseAndSynthesizeFile + ambient + (\_deps -> pure B.typeLookup) + parsingEnv + filename + (Text.pack s) + +parsingEnv :: Parser.ParsingEnv +parsingEnv = Parser.ParsingEnv mempty B.names diff --git a/parser-typechecker/tests/Unison/Test/DataDeclaration.hs b/parser-typechecker/tests/Unison/Test/DataDeclaration.hs new file mode 100644 index 0000000000..40824ecb50 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/DataDeclaration.hs @@ -0,0 +1,121 @@ +{-# LANGUAGE OverloadedStrings, QuasiQuotes #-} + +module Unison.Test.DataDeclaration where + +import qualified Data.Map as Map +import Data.Map ( Map, (!) ) +import EasyTest +import Text.RawString.QQ +import qualified Unison.DataDeclaration as DD +import Unison.DataDeclaration ( DataDeclaration(..), Decl, hashDecls ) +import qualified Unison.Hash as Hash +import Unison.Parser ( Ann ) +import Unison.Parsers ( unsafeParseFile ) +import qualified Unison.Reference as R +import Unison.Symbol ( Symbol ) +import qualified Unison.Test.Common as Common +import qualified Unison.Type as Type +import Unison.UnisonFile ( UnisonFile(..) ) +import qualified Unison.Var as Var + +test :: Test () +test = scope "datadeclaration" $ + let Right hashes = hashDecls . (snd <$>) . dataDeclarationsId $ file + hashMap = Map.fromList $ fmap (\(a,b,_) -> (a,b)) hashes + hashOf k = Map.lookup (Var.named k) hashMap + in tests [ + scope "Bool == Bool'" . expect $ hashOf "Bool" == hashOf "Bool'", + scope "Bool != Option'" . expect $ hashOf "Bool" /= hashOf "Option'", + scope "Option == Option'" . expect $ hashOf "Option" == hashOf "Option'", + scope "List == List'" . expect $ hashOf "List" == hashOf "List'", + scope "List != SnocList" . expect $ hashOf "List" /= hashOf "SnocList", + scope "Ping != Pong" . expect $ hashOf "Ping" /= hashOf "Pong", + scope "Ping == Ling'" . expect $ hashOf "Ping" == hashOf "Ling'", + scope "Pong == Long'" . expect $ hashOf "Pong" == hashOf "Long'", + scope "unhashComponent" unhashComponentTest + ] + +file :: UnisonFile Symbol Ann +file = flip unsafeParseFile Common.parsingEnv $ [r| + +type Bool = True | False +type Bool' = False | True + +type Option a = Some a | None +type Option' b = Nothing | Just b + +type List a = Nil | Cons a (List a) +type List' b = Prepend b (List' b) | Empty +type SnocList a = Snil | Snoc (List a) a + +type ATree a = Tree a (List (ATree a)) | Leaf (Option a) + +type Ping a = Ping a (Pong a) +type Pong a = Pnong | Pong (Ping a) + +type Long' a = Long' (Ling' a) | Lnong +type Ling' a = Ling' a (Long' a) +|] + + +-- faketest = scope "termparser" . tests . map parses $ +-- ["x" +-- , "match x with\n" ++ +-- " {Pair x y} -> 1\n" ++ +-- " {State.set 42 -> k} -> k 42\n" +-- ] +-- +-- builtins = Map.fromList +-- [("Pair", (R.Builtin "Pair", 0)), +-- ("State.set", (R.Builtin "State", 0))] +-- +-- parses s = scope s $ do +-- let p = unsafeParseTerm s builtins :: Term Symbol +-- noteScoped $ "parsing: " ++ s ++ "\n " ++ show p +-- ok + +unhashComponentTest :: Test () +unhashComponentTest = tests + [ scope "invented-vars-are-fresh" inventedVarsFreshnessTest + ] + where + inventedVarsFreshnessTest = + let + var = Type.var () + app = Type.app () + forall = Type.forall () + (-->) = Type.arrow () + h = Hash.unsafeFromBase32Hex "abcd" + ref = R.Derived h 0 1 + a = Var.refNamed ref + b = Var.named "b" + nil = Var.named "Nil" + cons = Var.refNamed ref + listRef = ref + listType = Type.ref () listRef + listDecl = DataDeclaration { + modifier = DD.Structural, + annotation = (), + bound = [], + constructors' = + [ ((), nil, forall a (listType `app` var a)) + , ((), cons, forall b (var b --> listType `app` var b --> listType `app` var b)) + ] + } + component :: Map R.Reference (Decl Symbol ()) + component = Map.singleton listRef (Right listDecl) + component' :: Map R.Reference (Symbol, Decl Symbol ()) + component' = DD.unhashComponent component + (listVar, Right listDecl') = component' ! listRef + listType' = var listVar + constructors = Map.fromList $ DD.constructors listDecl' + nilType' = constructors ! nil + z = Var.named "z" + in tests + [ -- check that `nil` constructor's type did not collapse to `forall a. a a`, + -- which would happen if the var invented for `listRef` was simply `Var.refNamed listRef` + expectEqual (forall z (listType' `app` var z)) nilType' + , -- check that the variable assigned to `listRef` is different from `cons`, + -- which would happen if the var invented for `listRef` was simply `Var.refNamed listRef` + expectNotEqual cons listVar + ] diff --git a/parser-typechecker/tests/Unison/Test/FileParser.hs b/parser-typechecker/tests/Unison/Test/FileParser.hs new file mode 100644 index 0000000000..f45a6298a6 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/FileParser.hs @@ -0,0 +1,136 @@ +{-# Language BangPatterns, OverloadedStrings #-} + +module Unison.Test.FileParser where + + import EasyTest + import Data.List (uncons) + import Data.Set (elems) + import qualified Text.Megaparsec.Error as MPE + import Unison.FileParser (file) + import qualified Unison.Parser as P + import Unison.Parsers (unsafeGetRightFrom, unsafeParseFileBuiltinsOnly) + import Unison.Symbol (Symbol) + import Unison.UnisonFile (UnisonFile) + import Unison.Var (Var) + import qualified Unison.Test.Common as Common + + test1 :: Test () + test1 = scope "test1" . tests . map parses $ + [ + -- , "type () = ()\n()" + "type Pair a b = Pair a b\n" + , "type Optional a = Just a | Nothing\n" + , unlines + ["type Optional2 a" + ," = Just a" + ," | Nothing\n"] + ------ -- ,unlines + ------ -- ["type Optional a b c where" + ------ -- ," Just : a -> Optional a" + ------ -- ," Nothing : Optional Int"] + ------ -- , unlines + ------ -- ["type Optional" + ------ -- ," a" + ------ -- ," b" + ------ -- ," c where" + ------ -- ," Just : a -> Optional a" + ------ -- ," Nothing : Optional Int"] + , unlines -- NB: this currently fails because we don't have type AST or parser for effect types yet + ["ability State s where" + ," get : {State s} s" + ," set : s -> {State s} ()" + ] + , unlines + ["ping x = pong (x + 1)" + ,"pong x = ping (x - 1)" + ] + ] + + test2 :: Test () + test2 = scope "test2" $ + (io $ unsafeParseFileBuiltinsOnly "unison-src/test1.u") *> ok + + test :: Test () + test = scope "fileparser" . tests $ + [test1 + , emptyWatchTest + , signatureNeedsAccompanyingBodyTest + , emptyBlockTest + , expectedBlockOpenTest + , unknownDataConstructorTest + , unknownAbilityConstructorTest + ] + + expectFileParseFailure :: String -> (P.Error Symbol -> Test ()) -> Test () + expectFileParseFailure s expectation = scope s $ do + let result = P.run (P.rootFile file) s Common.parsingEnv + case result of + Right _ -> crash "Parser succeeded" + Left (MPE.FancyError _ sets) -> + case (fmap (fst) . uncons . elems) sets of + Just (MPE.ErrorCustom e) -> expectation e + Just _ -> crash "Error encountered was not custom" + Nothing -> crash "No error found" + Left e -> crash ("Parser failed with an error which was a trivial parser error: " ++ show e) + + emptyWatchTest :: Test () + emptyWatchTest = scope "emptyWatchTest" $ + expectFileParseFailure ">" expectation + where + expectation :: Var e => P.Error e -> Test () + expectation e = case e of + P.EmptyWatch -> ok + _ -> crash "Error wasn't EmptyWatch" + + signatureNeedsAccompanyingBodyTest :: Test () + signatureNeedsAccompanyingBodyTest = scope "signatureNeedsAccompanyingBodyTest" $ + expectFileParseFailure (unlines ["f : Nat -> Nat", "", "g a = a + 1"]) expectation + where + expectation :: Var e => P.Error e -> Test () + expectation e = case e of + P.SignatureNeedsAccompanyingBody _ -> ok + _ -> crash "Error wasn't SignatureNeedsAccompanyingBody" + + emptyBlockTest :: Test () + emptyBlockTest = scope "emptyBlockTest" $ + expectFileParseFailure (unlines ["f a =", "", "> 1 + 1"]) expectation + where + expectation :: Var e => P.Error e -> Test () + expectation e = case e of + P.EmptyBlock _ -> ok + _ -> crash "Error wasn't EmptyBlock" + + expectedBlockOpenTest :: Test () + expectedBlockOpenTest = scope "expectedBlockOpenTest" $ + expectFileParseFailure "f a b = match a b" expectation + where + expectation :: Var e => P.Error e -> Test () + expectation e = case e of + P.ExpectedBlockOpen _ _ -> ok + _ -> crash "Error wasn't ExpectedBlockOpen" + + unknownDataConstructorTest :: Test () + unknownDataConstructorTest = scope "unknownDataConstructorTest" $ + expectFileParseFailure "m a = match a with A -> 1" expectation + where + expectation :: Var e => P.Error e -> Test () + expectation e = case e of + P.UnknownDataConstructor _ _ -> ok + _ -> crash "Error wasn't UnknownDataConstructor" + + unknownAbilityConstructorTest :: Test () + unknownAbilityConstructorTest = scope "unknownAbilityConstructorTest" $ + expectFileParseFailure "f e = match e with {E t -> u} -> 1" expectation + where + expectation :: Var e => P.Error e -> Test () + expectation e = case e of + P.UnknownAbilityConstructor _ _ -> ok + _ -> crash "Error wasn't UnknownAbilityConstructor" + + parses :: String -> Test () + parses s = scope s $ do + let + p :: UnisonFile Symbol P.Ann + !p = unsafeGetRightFrom s $ + P.run (P.rootFile file) s Common.parsingEnv + pure p >> ok diff --git a/parser-typechecker/tests/Unison/Test/Git.hs b/parser-typechecker/tests/Unison/Test/Git.hs new file mode 100644 index 0000000000..f938ce75b9 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Git.hs @@ -0,0 +1,523 @@ +{-# Language OverloadedStrings #-} +{-# Language QuasiQuotes #-} + +module Unison.Test.Git where + +import EasyTest +import Data.List (intercalate) +import Data.List.Split (splitOn) +import qualified Data.Sequence as Seq +import Data.String.Here (iTrim) +import Unison.Prelude +import qualified Data.Text as Text +import qualified System.IO.Temp as Temp +import Shellmet () +import System.FilePath (()) +import System.Directory (doesFileExist, removeDirectoryRecursive, removeFile) + +import Unison.Codebase (BuiltinAnnotation, Codebase, CodebasePath) +import qualified Unison.Codebase as Codebase +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.FileCodebase as FC +import qualified Unison.Codebase.Serialization.V1 as V1 +import qualified Unison.Codebase.SyncMode as SyncMode +import qualified Unison.Codebase.TranscriptParser as TR +import Unison.Codebase.Path (Path(..)) +import Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex as SlimCopyRegenerateIndex +import Unison.Codebase.FileCodebase.Common (SyncToDir, formatAnn) +import Unison.Parser (Ann) +import Unison.Symbol (Symbol) +import qualified Unison.Util.Cache as Cache +import Unison.Var (Var) + +test :: Test () +test = scope "git" . tests $ + [ testPull + , testPush + , syncComplete + , syncTestResults + ] + +traceTranscriptOutput :: Bool +traceTranscriptOutput = False + +-- | make sure that a definition present in the target dir doesn't prevent +-- syncing of its dependencies +syncComplete :: Test () +syncComplete = scope "syncComplete" $ do + tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "syncComplete" + + targetDir <- io $ Temp.createTempDirectory tmp "target" + let + delete = io . traverse_ removeFile . fmap (targetDir ) + observe title expectation files = scope title . for_ files $ \path -> + scope (makeTitle path) $ io (doesFileExist $ targetDir path) >>= expectation + + cache <- io Cache.nullCache + codebase <- io $ snd <$> initCodebase cache tmp "codebase" + + runTranscript_ tmp codebase cache [iTrim| +```ucm:hide +.builtin> alias.type ##Nat Nat +.builtin> alias.term ##Nat.+ Nat.+ +``` +```unison +pushComplete.a.x = 3 +pushComplete.b.c.y = x + 1 +``` +```ucm +.> add +.> history pushComplete.b +``` +|] + + -- sync pushComplete.b to targetDir + -- observe that pushComplete.b.c and x exist + b <- io (Codebase.getRootBranch codebase) + >>= either (crash.show) + (pure . Branch.getAt' (Path $ Seq.fromList ["pushComplete", "b"] )) + io $ Codebase.syncToDirectory codebase targetDir SyncMode.ShortCircuit b + observe "initial" expect files + + -- delete pushComplete.b.c (#5lk9autjd5) + -- delete x (#msp7bv40rv) + -- observe that pushComplete.b.c and x are now gone + delete files + observe "deleted" (expect . not) files + + -- sync again with ShortCircuit + -- observe that pushComplete.b.c and x are still missing. + -- `c` is short-circuited at `b`, and `x` is short-circuited + -- at both `pushComplete` and `y`. + io $ Codebase.syncToDirectory codebase targetDir SyncMode.ShortCircuit b + observe "short-circuited" (expect . not) files + + -- sync again with Complete + -- observe that pushComplete.b.c and x are back + io $ Codebase.syncToDirectory codebase targetDir SyncMode.Complete b + observe "complete" expect files + + -- if we haven't crashed, clean up! + io $ removeDirectoryRecursive tmp + + where + files = + [ ".unison/v1/paths/5lk9autjd5911i8m52vsvf3si8ckino03gqrks1fokd9lf9kvc4id9gmuudjk4q06j3rkhi83o9g47mde5amchc1leqlskjs391m7fg.ub" + , ".unison/v1/terms/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/type.ub" + , ".unison/v1/terms/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/compiled.ub" + ] + +syncTestResults :: Test () +syncTestResults = scope "syncTestResults" $ do + -- put all our junk into here + tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "syncTestResults" + + targetDir <- io $ Temp.createTempDirectory tmp "target" + cache <- io Cache.nullCache + codebase <- io $ snd <$> initCodebase cache tmp "codebase" + + runTranscript_ tmp codebase cache [iTrim| +```ucm +.> builtins.merge +``` +```unison +test> tests.x = [Ok "Great!"] +``` +```ucm +.> add +``` +|] + +{- + .> history tests + ⊙ #0bnfrk7cu4 + .> debug.file + tests.x#2c2hpa2jm1 + .> +-} + + b <- io (Codebase.getRootBranch codebase) >>= \case + Left e -> crash $ show e + Right b -> pure b + + io $ Codebase.syncToDirectory codebase targetDir SyncMode.ShortCircuit + (Branch.getAt' (Path $ pure "tests") b) + + scope "target-should-have" $ + for targetShouldHave $ \path -> + scope (makeTitle path) $ io (doesFileExist $ targetDir path) >>= expect + + -- if we haven't crashed, clean up! + io $ removeDirectoryRecursive tmp + where + targetShouldHave = + [ ".unison/v1/paths/0bnfrk7cu44q0vvaj7a0osl90huv6nj01nkukplcsbgn3i09h6ggbthhrorm01gpqc088673nom2i491fh9rtbqcc6oud6iqq6oam88.ub" + , ".unison/v1/terms/#2c2hpa2jm1101sq10k4jqhpmv5cvvgtqm8sf9710kl8mlrum5b6i2d0rdtrrpg3k1ned5ljna1rvomjte7rcbpd9ouaqcsit1n1np3o/type.ub" + , ".unison/v1/terms/#2c2hpa2jm1101sq10k4jqhpmv5cvvgtqm8sf9710kl8mlrum5b6i2d0rdtrrpg3k1ned5ljna1rvomjte7rcbpd9ouaqcsit1n1np3o/compiled.ub" + , ".unison/v1/watches/test/#2c2hpa2jm1101sq10k4jqhpmv5cvvgtqm8sf9710kl8mlrum5b6i2d0rdtrrpg3k1ned5ljna1rvomjte7rcbpd9ouaqcsit1n1np3o.ub" + ] + +-- goal of this test is to make sure that pull doesn't grab a ton of unneeded +-- dependencies +testPull :: Test () +testPull = scope "pull" $ do + branchCache <- io $ Branch.boundedCache 4096 + -- let's push a broader set of stuff, pull a narrower one (to a fresh codebase) + -- and verify that we have the definitions we expected and don't have some of + -- the ones we didn't expect. + + -- put all our junk into here + tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-pull" + + -- initialize author and user codebases + authorCodebase <- io $ snd <$> initCodebase branchCache tmp "author" + (userDir, userCodebase) <- io $ initCodebase branchCache tmp "user" + + -- initialize git repo + let repo = tmp "repo.git" + io $ "git" ["init", "--bare", Text.pack repo] + + -- run author/push transcript + runTranscript_ tmp authorCodebase branchCache [iTrim| +```ucm:hide +.builtin> alias.type ##Nat Nat +.builtin> alias.term ##Nat.+ Nat.+ +``` +```unison +unique type outside.A = A Nat +unique type outside.B = B Nat Nat +outside.c = 3 +outside.d = 4 + +unique type inside.X = X outside.A +inside.y = c + c +``` +```ucm +.myLib> debug.file +.myLib> add +.myLib> push ${repo} +``` +|] + + -- check out the resulting repo so we can inspect it + io $ "git" ["clone", Text.pack repo, Text.pack $ tmp "repo" ] + + scope "git-should-have" $ + for gitShouldHave $ \path -> + scope (makeTitle path) $ io (doesFileExist $ tmp "repo" path) >>= expect + + -- run user/pull transcript + runTranscript_ tmp userCodebase branchCache [iTrim| +```ucm:hide +.builtin> alias.type ##Nat Nat +.builtin> alias.term ##Nat.+ Nat.+ +``` +```ucm +.yourLib> pull ${repo}:.inside +``` + |] + + -- inspect user codebase + scope "user-should-have" $ + for userShouldHave $ \path -> + scope (makeTitle path) $ io (doesFileExist $ userDir path) >>= expect + scope "user-should-not-have" $ -- this definitely won't pass with current implementation + for userShouldNotHave $ \path -> + scope (makeTitle path) $ io (doesFileExist $ userDir path) >>= expect . not + + -- if we haven't crashed, clean up! + io $ removeDirectoryRecursive tmp + + where + gitShouldHave = userShouldHave ++ userShouldNotHave ++ + [ ".unison/v1/paths/p8ahoj90hkdjpvlcu60f6ks7q2is1uqbn1e74k5qn4jt1qmrhk0a62e9b2gamm6qmjdii478la2fha5pnnuvhit2b1mp439od7mrqmg.ub" + ] + userShouldHave = + [ ".unison/v1/type-mentions-index/_builtin/Nat/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg" + , ".unison/v1/type-mentions-index/_builtin/Nat/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo" + , ".unison/v1/type-mentions-index/_builtin/Nat/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0#d0" + , ".unison/v1/type-mentions-index/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0#d0" + , ".unison/v1/type-mentions-index/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0#d0" + , ".unison/v1/type-mentions-index/#2po5mnhi28fbs9fecf4ceq4q9htbfcgkl3ljnkhmhq30ec7m5h77fpl1ec96it21690ju6gnhkj8sqr2entn0cu1gfvl8rfddohk6ug/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0#d0" + , ".unison/v1/type-mentions-index/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0#d0" + , ".unison/v1/type-mentions-index/#k1lik85h1sgcpqura4riuipjq3mtkkuu5slida6q2lkg028fd7jn12kufrk2sqrtbftq3snteeh8l9o984mhnurmo3arr5j4d7hg5oo/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0#d0" + , ".unison/v1/types/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0/compiled.ub" + , ".unison/v1/types/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0/compiled.ub" + , ".unison/v1/dependents/_builtin/Nat.+/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg" + , ".unison/v1/dependents/_builtin/Nat/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg" + , ".unison/v1/dependents/_builtin/Nat/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0" + , ".unison/v1/dependents/_builtin/Nat/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo" + , ".unison/v1/dependents/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0" + , ".unison/v1/dependents/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg" + , ".unison/v1/terms/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg/type.ub" + , ".unison/v1/terms/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg/compiled.ub" + , ".unison/v1/terms/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/type.ub" + , ".unison/v1/terms/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/compiled.ub" + , ".unison/v1/type-index/_builtin/Nat/#omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg" + , ".unison/v1/type-index/_builtin/Nat/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo" + , ".unison/v1/type-index/#2po5mnhi28fbs9fecf4ceq4q9htbfcgkl3ljnkhmhq30ec7m5h77fpl1ec96it21690ju6gnhkj8sqr2entn0cu1gfvl8rfddohk6ug/#p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0#d0" + , ".unison/v1/type-index/#k1lik85h1sgcpqura4riuipjq3mtkkuu5slida6q2lkg028fd7jn12kufrk2sqrtbftq3snteeh8l9o984mhnurmo3arr5j4d7hg5oo/#19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0#d0" + , ".unison/v1/paths/esvotl1kr2aqo4tkq7p6lp2chkepmg7n3im1t6hqgd93slk97kops8idp7fj7i57pakvg6lhk0efsco6s2vvtql0jffomm8tvngogd0.ub" + , ".unison/v1/paths/ucnhqspklepn3ihu1o3ph2or9hsrhcpoav93v4gi1v97ttoc2vuup173mcophp8r90r0j3k5mg2knlqr85gdq1dseh8mt5t94c4am4o.ub" + ] + userShouldNotHave = + [ ".unison/v1/type-mentions-index/_builtin/Nat/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58#d0" + , ".unison/v1/type-mentions-index/_builtin/Nat/#52addbrohuv4kimiv8n6v00vsv46g3pig4imoor34lojgla9bo2tdcumh07pasuo4lmfnab53s1ulj9toai7963spt2jkk5h1qfdnlg" + , ".unison/v1/type-mentions-index/#ap7kd0rc80kp7vjosb0im9j365kgbqhqhj3fv4ufs7bv5b3ed0d4jleqqulu74lj60fuht1oqr117u17jnp1ql8te67vjit95p7k80o/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58#d0" + , ".unison/v1/type-mentions-index/#7krpfrn5gm7m3beiho9jmar3dojnj7mrksnjbmh8i0p9hbmekqv21kqrtsr5lq4rr4n0sako6e7lmt8k2a39senua9efjfo7214s3q8/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58#d0" + , ".unison/v1/type-mentions-index/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58#d0" + , ".unison/v1/types/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58/compiled.ub" + , ".unison/v1/dependents/_builtin/Nat/#52addbrohuv4kimiv8n6v00vsv46g3pig4imoor34lojgla9bo2tdcumh07pasuo4lmfnab53s1ulj9toai7963spt2jkk5h1qfdnlg" + , ".unison/v1/dependents/_builtin/Nat/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58" + , ".unison/v1/terms/#52addbrohuv4kimiv8n6v00vsv46g3pig4imoor34lojgla9bo2tdcumh07pasuo4lmfnab53s1ulj9toai7963spt2jkk5h1qfdnlg/type.ub" + , ".unison/v1/terms/#52addbrohuv4kimiv8n6v00vsv46g3pig4imoor34lojgla9bo2tdcumh07pasuo4lmfnab53s1ulj9toai7963spt2jkk5h1qfdnlg/compiled.ub" + , ".unison/v1/type-index/_builtin/Nat/#52addbrohuv4kimiv8n6v00vsv46g3pig4imoor34lojgla9bo2tdcumh07pasuo4lmfnab53s1ulj9toai7963spt2jkk5h1qfdnlg" + , ".unison/v1/type-index/#ap7kd0rc80kp7vjosb0im9j365kgbqhqhj3fv4ufs7bv5b3ed0d4jleqqulu74lj60fuht1oqr117u17jnp1ql8te67vjit95p7k80o/#aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58#d0" + , ".unison/v1/paths/000fqlrbs84nui3o3sp04s32vsbq39iv9foqvs4c38ajki3re86v72s0j5deqtcdqqml9r8e50lcmld2j8ncj7a1fqnqb4pvcaphcu0.ub" + , ".unison/v1/paths/d8ercjm1ol1htu82nmr37ejru1lt7lrl03d5j0u0dp0g2a98nl6n8abdjpf2jkvjuoq4u2qrhn99ps6fiqqn60b0tni7nkp7o593sr0.ub" + , ".unison/v1/paths/bih5ebeug86npp1n0mp51vi7a902ma6m1r3s1ehhfhpc0m71le2fdge8nftte5fuambfo2r753bjnguq5e3p6mip7incmghkho643pg.ub" + ] +-- path "[inside]." esvotl1kr2aqo4tkq7p6lp2chkepmg7n3im1t6hqgd93slk97kops8idp7fj7i57pakvg6lhk0efsco6s2vvtql0jffomm8tvngogd0 +-- path "[inside].X" ucnhqspklepn3ihu1o3ph2or9hsrhcpoav93v4gi1v97ttoc2vuup173mcophp8r90r0j3k5mg2knlqr85gdq1dseh8mt5t94c4am4o.ub +-- type outside.A #19lkp9i61r793nmuup58b7g3ssmdip8e76ko3n1r0pjk4ld71euh2prdidhnllkt5lsk0tnpn8lv44t7h7q04eeaqvvh75dg4gi77h0 +-- type outside.B #aocoefu4taepnvd3gsbtgo5rc6a5oa109e0mfqjfg91m422he1m6nugnq1hb4nedvh32r244v6t0a7jq8k30nt92109466udv78cf58 +-- outside.c #msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo +-- outside.d #52addbrohuv4kimiv8n6v00vsv46g3pig4imoor34lojgla9bo2tdcumh07pasuo4lmfnab53s1ulj9toai7963spt2jkk5h1qfdnlg +-- type inside.X #p8f8gc2lehvr6ddq6ggittuo3t330q2pkou9gr1408r7o7r33is5cffstl5p916rbui2sa53iqnppsgsuskgodvd5003550roflmvn0 +-- inside.y #omqnfettvjqrjmpl2mn7s30g94gogjjoi6hd3ob6394r71mkidbg0kqtgtbkjkmhbqvipqed9ql4b0o7kp68c560e3onb0v3lbv6bjg +-- paths: esvot|ucnhq +-- want: A, c, X, y: 19lkp|msp7b|p8f8g|omqnf +-- no: B, d: aocoe|52add| + +-- initialize a fresh codebase +initCodebaseDir :: Branch.Cache IO -> FilePath -> String -> IO CodebasePath +initCodebaseDir branchCache tmpDir name = fst <$> initCodebase branchCache tmpDir name + +initCodebase :: Branch.Cache IO -> FilePath -> String -> IO (CodebasePath, Codebase IO Symbol Ann) +initCodebase branchCache tmpDir name = do + let codebaseDir = tmpDir name + c <- FC.initCodebase branchCache codebaseDir + pure (codebaseDir, c) + +-- run a transcript on an existing codebase +runTranscript_ :: MonadIO m => FilePath -> Codebase IO Symbol Ann -> Branch.Cache IO -> String -> m () +runTranscript_ tmpDir c branchCache transcript = do + let configFile = tmpDir ".unisonConfig" + -- transcript runner wants a "current directory" for I guess writing scratch files? + let cwd = tmpDir "cwd" + let err err = error $ "Parse error: \n" <> show err + + -- parse and run the transcript + flip (either err) (TR.parse "transcript" (Text.pack transcript)) $ \stanzas -> + void . liftIO $ TR.run Nothing cwd configFile stanzas c branchCache >>= + when traceTranscriptOutput . traceM . Text.unpack + +-- goal of this test is to make sure that push works correctly: +-- the destination should contain the right definitions from the namespace, +-- unnamed transitive dependencies (terms and types), +-- dependents, type, and type mentions indices. +testPush :: Test () +testPush = scope "push" $ do + branchCache <- io $ Branch.boundedCache 4096 + tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-push" + + -- initialize a fresh codebase named "c" + (codebasePath, c) <- io $ initCodebase branchCache tmp "c" + + -- Run the "setup transcript" to do the adds and updates; everything short of + -- pushing. + runTranscript_ tmp c branchCache setupTranscript + + -- now we'll try pushing multiple ways. + for_ pushImplementations $ \(implName, impl) -> scope implName $ do + -- initialize git repo + let repoGit = tmp (implName ++ ".git") + io $ "git" ["init", "--bare", Text.pack repoGit] + + -- push one way! + codebase <- io $ FC.codebase1' impl branchCache V1.formatSymbol formatAnn codebasePath + runTranscript_ tmp codebase branchCache (pushTranscript repoGit) + + -- check out the resulting repo so we can inspect it + io $ "git" ["clone", Text.pack repoGit, Text.pack $ tmp implName ] + + -- inspect it + for_ groups $ \(group, list) -> scope group $ + for_ list $ \(title, path) -> scope title $ + io (doesFileExist $ tmp implName path) >>= expect + + for_ notGroups $ \(group, list) -> scope group $ + for_ list $ \(title, path) -> scope title $ + io (fmap not . doesFileExist $ tmp implName path) >>= expect + + -- if we haven't crashed, clean up! + io $ removeDirectoryRecursive tmp + + where + setupTranscript = [iTrim| + ```ucm + .> builtins.merge + ``` + ```unison:hide + --#0n4pbd0q9u + type outside.A = A Nat outside.B + + --#muulibntaq + type outside.B = B Int + + --#msp7bv40rv + outside.c = 3 + + --#6cdi7g1oi2 + outside.d = c < (p + 1) + + --#4idrjau939 + type inside.M = M outside.A + + --#fiupm7pl7o + inside.p = c + + --#l5pndeifuh + inside.q x = x + p * p + + inside.r = d + ``` + ```ucm + .foo> add + ``` + ```unison:hide + r = false + ``` + ```ucm + .foo.inside> update + ``` + |] + pushTranscript repo = [iTrim| + ```ucm + .foo.inside> push ${repo} + ``` + |] + + pushImplementations :: (MonadIO m, Var v, BuiltinAnnotation a) + => [(String, SyncToDir m v a)] + pushImplementations = + [ ("SlimCopyRegenerateIndex", SlimCopyRegenerateIndex.syncToDirectory) + ] + + groups = + [ ("types", types) + , ("terms", terms) + , ("branches", branches) + , ("patches", patches) + , ("dependentsIndex", dependentsIndex) + , ("typeIndex", typeIndex) + , ("typeMentionsIndex", typeMentionsIndex) ] + + notGroups = + [ ("notBranches", notBranches) ] + + types = + [ ("M", ".unison/v1/types/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg/compiled.ub") + , ("A", ".unison/v1/types/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8/compiled.ub") + , ("B", ".unison/v1/types/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0/compiled.ub") + ] + + terms = + [ ("p (type)", ".unison/v1/terms/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500/type.ub") + , ("p (compiled)", ".unison/v1/terms/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500/compiled.ub") + , ("c (type)", ".unison/v1/terms/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/type.ub") + , ("c (compiled)", ".unison/v1/terms/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/compiled.ub") + , ("d (type)", ".unison/v1/terms/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do/type.ub") + , ("d (compiled)", ".unison/v1/terms/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do/compiled.ub") + , ("q (type)", ".unison/v1/terms/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8/type.ub") + , ("q (compiled)", ".unison/v1/terms/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8/compiled.ub") + , ("r (type)", ".unison/v1/terms/#im2kiu2hmnfdvv5fbfc5lhaakebbs69074hjrb3ptkjnrh6dpkcp1rnnq99mhson2gr6g8uduppvpelpq4jvq1rg5p3f9jpiplpk9u8/type.ub") + , ("r (compiled)", ".unison/v1/terms/#im2kiu2hmnfdvv5fbfc5lhaakebbs69074hjrb3ptkjnrh6dpkcp1rnnq99mhson2gr6g8uduppvpelpq4jvq1rg5p3f9jpiplpk9u8/compiled.ub") + , ("r' (type)", ".unison/v1/terms/#gi015he0n17ji9sl5hgh1q8tjas74341p48h719kkgajj75d6qapakq993gu2duvit32b7qhqac1odk6jhvad0ku8ajcj7sup6t6mbo/type.ub") + , ("r' (compiled)", ".unison/v1/terms/#gi015he0n17ji9sl5hgh1q8tjas74341p48h719kkgajj75d6qapakq993gu2duvit32b7qhqac1odk6jhvad0ku8ajcj7sup6t6mbo/compiled.ub") + ] + + branches = + [ ("_head", ".unison/v1/paths/_head/pciob2qnondela4h4u1dtk9pvbc9up7qed0j311lkomordjah2lliddis7tdl76h5mdbs5ja10tm8kh2o3sni1bu2kdsqtm4fkv5288") + , (".foo.inside", ".unison/v1/paths/pciob2qnondela4h4u1dtk9pvbc9up7qed0j311lkomordjah2lliddis7tdl76h5mdbs5ja10tm8kh2o3sni1bu2kdsqtm4fkv5288.ub") + , (".foo.inside'", ".unison/v1/paths/0ufjqqmabderbejfhrled8i4lirgpqgimejbkdnk1m9t90ibj25oi7g1h2adougdqhv72sv939eq67ur77n3qciajh0reiuqs68th00.ub") + , (".foo.inside.M", ".unison/v1/paths/i2p08iv1l50fc934gh6kea181kvjnt3kdgiid5c4r5016kjuliesji43u4j4mjvsne3qvmq43puk9dkm61nuc542n7pchsvg6t0v55o.ub") + , ("", ".unison/v1/paths/7asfbtqmoj56pq7b053v2jc1spgb8g5j4cg1tj97ausi3scveqa50ktv4b2ofoclnkqmnl18vnt5d83jrh85qd43nnrsh6qetbksb70.ub") + ] + + notBranches = + [ (".", ".unison/v1/paths/9r7l4k8ks1tog088fg96evunq1ednlsskf2lh0nacpe5n00khcrl8f1g5sevm7cqd3s64cj22ukvkh2fflm3rhhkn2hh2rj1n20mnm8.ub") + , (".'", ".unison/v1/paths/llton7oiormlimkdmqjdr8tja12i6tebii7cmfd7545b7mt1sb02f9usjqnjd6iaisnn1ngpsl76hfg024l8dlult3s6stkt28j42sg.ub") + , (".''", ".unison/v1/paths/givahf3f6fu8vv07kglsofdcoem7q5dm4rracr78a5didjc4pq2djh2rfdo5sn7nld2757oi02a4a07cv9rk4peafhh76nllcp8l1n8.ub") + , (".foo", ".unison/v1/paths/a8dt4i16905fql2d4fbmtipmj35tj6qmkq176dlnsn6klh0josr255eobn0d3f0aku360h0em6oit9ftjpq3vhcdap8bgpqr79qne58.ub") + , (".foo'", ".unison/v1/paths/l3r86dvdmbe2lsinh213tp9upm5qjtk17iep3n5mah7qg5bupj1e7ikpv1iqbgegp895r0krlo0u2c4nclvfvch3e6kspu766th6tqo.ub") + , (".foo.outside", ".unison/v1/paths/s6iquav10f69pvrpj6rtm7vcp6fs6hgnnmjb1qs00n594ljugbf2qtls93oc4lvb3kjro8fpakoua05gqido4haj4m520rip2gu2hvo.ub") + , (".foo.outside.A", ".unison/v1/paths/2i1lh7pntl3rqrtn4c10ajdg4m3al1rqm6u6ak5ak6urgsaf6nhqn2olt3rjqj5kcj042h8lqseguk3opp019hc7g8ncukds25t9r40.ub") + , (".foo.outside.B", ".unison/v1/paths/jag86haq235jmifji4n8nff8dg1ithenefs2uk5ms6b4qgj9pfa9g40vs4kdn3uhm066ni0bvfb7ib9tqtdgqcn90eadl7282nqqbc0.ub") + ] + + patches = + [ ("patch", ".unison/v1/patches/96b419pm6l896ncmef9kqkpj29gq205amsl6prsl2num29thpn9fej8v8ndcmubadv5hehege4s43n3ljbifsnna92lpeuacq9fm3qo.up") ] + + dependentsIndex = + [ ("Nat <- A", ".unison/v1/dependents/_builtin/Nat/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8") + , ("B <- A", ".unison/v1/dependents/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8") + , ("Int <- B", ".unison/v1/dependents/_builtin/Int/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0") + , ("Nat <- c", ".unison/v1/dependents/_builtin/Nat/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo") + , ("Boolean <- d", ".unison/v1/dependents/_builtin/Boolean/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") + , ("Nat <- d", ".unison/v1/dependents/_builtin/Nat/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") + , ("Nat.+ <- d", ".unison/v1/dependents/_builtin/Nat.+/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") + , ("Universal.< <- d",".unison/v1/dependents/_builtin/Universal.$less-than$/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") + , ("c <- d", ".unison/v1/dependents/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") + , ("p <- d", ".unison/v1/dependents/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") + , ("A <- M", ".unison/v1/dependents/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg") + , ("Nat <- p", ".unison/v1/dependents/_builtin/Nat/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500") + , ("c <- p", ".unison/v1/dependents/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500") + , ("Nat <- q", ".unison/v1/dependents/_builtin/Nat/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") + , ("Nat.* <- q", ".unison/v1/dependents/_builtin/Nat.$star$/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") + , ("Nat.+ <- q", ".unison/v1/dependents/_builtin/Nat.+/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") + , ("p <- q", ".unison/v1/dependents/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") + , ("Boolean <- r", ".unison/v1/dependents/_builtin/Boolean/#im2kiu2hmnfdvv5fbfc5lhaakebbs69074hjrb3ptkjnrh6dpkcp1rnnq99mhson2gr6g8uduppvpelpq4jvq1rg5p3f9jpiplpk9u8") + , ("d <- r", ".unison/v1/dependents/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do/#im2kiu2hmnfdvv5fbfc5lhaakebbs69074hjrb3ptkjnrh6dpkcp1rnnq99mhson2gr6g8uduppvpelpq4jvq1rg5p3f9jpiplpk9u8") + , ("Boolean <- r'", ".unison/v1/dependents/_builtin/Boolean/#gi015he0n17ji9sl5hgh1q8tjas74341p48h719kkgajj75d6qapakq993gu2duvit32b7qhqac1odk6jhvad0ku8ajcj7sup6t6mbo") + ] + + typeIndex = + [ ("(Nat -> B -> A) <- A#0",".unison/v1/type-index/#6n4ih159cqcvr52285qj3899ft380ao9l8is9louoen4ea6thgmq8hu38fmblo3tl6gjp0f6nrifplbh6d7770o96adr3d71i913aco/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8#d0") + , ("(Int -> B) <- B#0", ".unison/v1/type-index/#vjftvem4n0os6pnuko48ld67v7av3hq23r2gqvj7o536tfb1ctsci2fcgmmplj9b6slsege96onv4c2q8a0n8iadpe56mm4bc90muh8/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0#d0") + , ("Nat <- c", ".unison/v1/type-index/_builtin/Nat/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo") + , ("Boolean <- d", ".unison/v1/type-index/_builtin/Boolean/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") + , ("(A -> M) <- M#0", ".unison/v1/type-index/#735ugfihokh6o8ob9akhe1ei05ocsfncdrj76bdomeue5rb9td82q7m4a72e68bpgl3np562fehe9uio4vfcs07ib0mss1o5m08plk8/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg#d0") + , ("Nat <- p", ".unison/v1/type-index/_builtin/Nat/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500") + -- note: typeForIndexing = Type.removeAllEffectVars typ + , ("(Nat -> Nat) <- q", ".unison/v1/type-index/#29pbek54phqkda8dp4erqn9u6etr8dm74h3sbg431kdvrt23l3c2a7eh01qpnc4kqq6i8fu1g0r5dsc08qqofnrlvfhpqs4cb6snls0/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") + , ("Boolean <- r", ".unison/v1/type-index/_builtin/Boolean/#im2kiu2hmnfdvv5fbfc5lhaakebbs69074hjrb3ptkjnrh6dpkcp1rnnq99mhson2gr6g8uduppvpelpq4jvq1rg5p3f9jpiplpk9u8") + , ("Boolean <- r'", ".unison/v1/type-index/_builtin/Boolean/#gi015he0n17ji9sl5hgh1q8tjas74341p48h719kkgajj75d6qapakq993gu2duvit32b7qhqac1odk6jhvad0ku8ajcj7sup6t6mbo") + ] + + typeMentionsIndex = + [ ("(Nat -> B -> A) <- A#0",".unison/v1/type-mentions-index/#6n4ih159cqcvr52285qj3899ft380ao9l8is9louoen4ea6thgmq8hu38fmblo3tl6gjp0f6nrifplbh6d7770o96adr3d71i913aco/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8#d0") + , ("(B -> A) <- A#0", ".unison/v1/type-mentions-index/#7u2a6hguqo74e3aq141fvopo9snclmfbg149k6e51j96hebi23q0tjq2dqjme76smull2r2lkap58ph0pcvpqn0dv1rk1ssfdt20cvo/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8#d0") + , ("Nat <- A#0", ".unison/v1/type-mentions-index/_builtin/Nat/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8#d0") + , ("B <- A#0", ".unison/v1/type-mentions-index/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8#d0") + , ("A <- A#0", ".unison/v1/type-mentions-index/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8#d0") + , ("(Int -> B) <- B#0", ".unison/v1/type-mentions-index/#vjftvem4n0os6pnuko48ld67v7av3hq23r2gqvj7o536tfb1ctsci2fcgmmplj9b6slsege96onv4c2q8a0n8iadpe56mm4bc90muh8/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0#d0") + , ("Int <- B#0", ".unison/v1/type-mentions-index/_builtin/Int/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0#d0") + , ("B <- B#0", ".unison/v1/type-mentions-index/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0/#muulibntaqdk8hn0qjdnf9jn2qjgsh9bbtsrp626dianupo25llnecke6lhgv01vdenra45hor9u855kiiitu3ua60dg1bk4teb4ba0#d0") + , ("Nat <- c", ".unison/v1/type-mentions-index/_builtin/Nat/#msp7bv40rvjd2o8022ti44497ft2hohrg347pu0pfn75vt1s0qh2v8n9ttmmpv23s90fo2v2qpr8o5nl2jelt0cev6pi1sls79kgdoo") + , ("Boolean <- d", ".unison/v1/type-mentions-index/_builtin/Boolean/#6cdi7g1oi2lro3d6n9qg8v8fe3l2clc194qnb507oi72d5ap08gs0v9m80qbe0nc1keui9r03jnb48is0lttbsk336ehetlc2cs37do") + , ("(A -> M) <- M#0", ".unison/v1/type-mentions-index/#735ugfihokh6o8ob9akhe1ei05ocsfncdrj76bdomeue5rb9td82q7m4a72e68bpgl3np562fehe9uio4vfcs07ib0mss1o5m08plk8/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg#d0") + , ("A <- M#0", ".unison/v1/type-mentions-index/#0n4pbd0q9uh78eurgn28gkqk44gdtgttv9uuvusvm1fg6dvapdn76ui86lsn761lop466vo8m80m4is9n5qukg80vr4k8fibpo58rk8/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg#d0") + , ("M <- M#0", ".unison/v1/type-mentions-index/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg/#4idrjau9395kb8lsvielcjkli6dd7kkgalsfsgq4hq1k62n3vgpd2uejfuldmnutn1uch2292cj6ebr4ebvgqopucrp2j6pmv0s5uhg#d0") + , ("Nat <- p", ".unison/v1/type-mentions-index/_builtin/Nat/#fiupm7pl7o6ffitqatr174po7rdoh8ajqtcj7nirbeb9nqm4qd5qg9uvf1hic7lsm7b9qs38ka9lqv1iksmd6mothe816di0vcs0500") + , ("(Nat -> Nat) <- q", ".unison/v1/type-mentions-index/#29pbek54phqkda8dp4erqn9u6etr8dm74h3sbg431kdvrt23l3c2a7eh01qpnc4kqq6i8fu1g0r5dsc08qqofnrlvfhpqs4cb6snls0/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") + , ("Nat <- q", ".unison/v1/type-mentions-index/_builtin/Nat/#l5pndeifuhmue9a204v77h8kgff6lt8i5rnujkv3u74bjqukokol9vj45t291i7grneso95i3jctnr8a1nes523m1gb8jqir3o1k6h8") + , ("Boolean <- r", ".unison/v1/type-mentions-index/_builtin/Boolean/#im2kiu2hmnfdvv5fbfc5lhaakebbs69074hjrb3ptkjnrh6dpkcp1rnnq99mhson2gr6g8uduppvpelpq4jvq1rg5p3f9jpiplpk9u8") + , ("Boolean <- r'", ".unison/v1/type-mentions-index/_builtin/Boolean/#gi015he0n17ji9sl5hgh1q8tjas74341p48h719kkgajj75d6qapakq993gu2duvit32b7qhqac1odk6jhvad0ku8ajcj7sup6t6mbo") + ] + +-- a helper to try turning these repo path names into test titles, by +-- limiting each path segment to 20 chars. may produce duplicate names since +-- it ends up dropping reference cycles suffixes, constructor ids, etc. +makeTitle :: String -> String +makeTitle = intercalate "/" . map (take 20) . drop 2 . splitOn "/" diff --git a/parser-typechecker/tests/Unison/Test/IO.hs b/parser-typechecker/tests/Unison/Test/IO.hs new file mode 100644 index 0000000000..de07354e03 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/IO.hs @@ -0,0 +1,114 @@ +{-# Language OverloadedStrings #-} +{-# Language QuasiQuotes #-} + +module Unison.Test.IO where + +import Unison.Prelude +import EasyTest +import qualified System.IO.Temp as Temp +import qualified Data.Text as Text +import qualified Data.Text.IO as TextIO +import Shellmet () +import Data.String.Here (iTrim) +import System.FilePath (()) +import System.Directory (removeDirectoryRecursive) + +import Unison.Codebase (Codebase, CodebasePath) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.FileCodebase as FC +import qualified Unison.Codebase.TranscriptParser as TR +import Unison.Parser (Ann) +import Unison.Symbol (Symbol) + +-- * IO Tests + +test :: Bool -> Test () +test newRt = scope "IO" . tests $ [ testHandleOps newRt ] + +-- * Implementation + +-- | Test reading from and writing to a handle +-- +-- The transcript writes expectedText to a file, reads the same file and +-- writes the read text to the result file which is then checked by the haskell. +testHandleOps :: Bool -> Test () +testHandleOps newRt = + withScopeAndTempDir "handleOps" $ \workdir codebase cache -> do + let myFile = workdir "handleOps.txt" + resultFile = workdir "handleOps.result" + expectedText = "Good Job!" :: Text.Text + runTranscript_ newRt workdir codebase cache [iTrim| +```ucm:hide +.> builtins.mergeio +``` + +```unison +use io IO + +main : '{IO} () +main = 'let + fp = ${Text.pack myFile} + res = ${Text.pack resultFile} + expected = ${expectedText} + + -- Write to myFile + h1 = builtins.io.openFile (FilePath fp) Write + putText h1 expected + builtins.io.closeFile h1 + + -- Read from myFile + h2 = builtins.io.openFile (FilePath fp) Read + myC = getText h2 + builtins.io.closeFile h2 + + -- Write what we read from myFile to resultFile + h3 = builtins.io.openFile (FilePath res) Write + putText h3 myC + builtins.io.closeFile h3 +``` + +```ucm +.> run main +``` +|] + + res <- io $ TextIO.readFile (resultFile) + if res == expectedText + then ok + else crash $ "Failed to read expectedText from file: " ++ show myFile + +-- * Utilities + +initCodebase :: Branch.Cache IO -> FilePath -> String -> IO (CodebasePath, Codebase IO Symbol Ann) +initCodebase branchCache tmpDir name = do + let codebaseDir = tmpDir name + c <- FC.initCodebase branchCache codebaseDir + pure (codebaseDir, c) + +-- run a transcript on an existing codebase +runTranscript_ + :: MonadIO m + => Bool + -> FilePath + -> Codebase IO Symbol Ann + -> Branch.Cache IO + -> String + -> m () +runTranscript_ newRt tmpDir c branchCache transcript = do + let configFile = tmpDir ".unisonConfig" + let cwd = tmpDir "cwd" + let err err = error $ "Parse error: \n" <> show err + + -- parse and run the transcript + flip (either err) (TR.parse "transcript" (Text.pack transcript)) $ \stanzas -> + void . liftIO $ + TR.run (Just newRt) cwd configFile stanzas c branchCache + >>= traceM . Text.unpack + +withScopeAndTempDir :: String -> (FilePath -> Codebase IO Symbol Ann -> Branch.Cache IO -> Test ()) -> Test () +withScopeAndTempDir name body = scope name $ do + tmp <- io (Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory name) + cache <- io $ Branch.boundedCache 4096 + (_, codebase) <- io $ initCodebase cache tmp "user" + body tmp codebase cache + io $ removeDirectoryRecursive tmp diff --git a/parser-typechecker/tests/Unison/Test/Lexer.hs b/parser-typechecker/tests/Unison/Test/Lexer.hs new file mode 100644 index 0000000000..c9d3c34155 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Lexer.hs @@ -0,0 +1,207 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Test.Lexer where + +import EasyTest +import Unison.Lexer +import qualified Unison.ShortHash as ShortHash + +test :: Test () +test = + scope "lexer" + . tests + $ [ t "1" [Numeric "1"] + , t "+1" [Numeric "+1"] + , t "-1" [Numeric "-1"] + , t "-1.0" [Numeric "-1.0"] + , t "+1.0" [Numeric "+1.0"] + + , t "1e3" [Numeric "1e3"] + , t "1e+3" [Numeric "1e+3"] + , t "1e-3" [Numeric "1e-3"] + , t "+1e3" [Numeric "+1e3"] + , t "+1e+3" [Numeric "+1e+3"] + , t "+1e-3" [Numeric "+1e-3"] + , t "-1e3" [Numeric "-1e3"] + , t "-1e+3" [Numeric "-1e+3"] + , t "-1e-3" [Numeric "-1e-3"] + , t "1.2e3" [Numeric "1.2e3"] + , t "1.2e+3" [Numeric "1.2e+3"] + , t "1.2e-3" [Numeric "1.2e-3"] + , t "+1.2e3" [Numeric "+1.2e3"] + , t "+1.2e+3" [Numeric "+1.2e+3"] + , t "+1.2e-3" [Numeric "+1.2e-3"] + , t "-1.2e3" [Numeric "-1.2e3"] + , t "-1.2e+3" [Numeric "-1.2e+3"] + , t "-1.2e-3" [Numeric "-1.2e-3"] + , t "1E3" [Numeric "1e3"] + , t "1E+3" [Numeric "1e+3"] + , t "1E-3" [Numeric "1e-3"] + , t "+1E3" [Numeric "+1e3"] + , t "+1E+3" [Numeric "+1e+3"] + , t "+1E-3" [Numeric "+1e-3"] + , t "-1E3" [Numeric "-1e3"] + , t "-1E+3" [Numeric "-1e+3"] + , t "-1E-3" [Numeric "-1e-3"] + , t "1.2E3" [Numeric "1.2e3"] + , t "1.2E+3" [Numeric "1.2e+3"] + , t "1.2E-3" [Numeric "1.2e-3"] + , t "+1.2E3" [Numeric "+1.2e3"] + , t "+1.2E+3" [Numeric "+1.2e+3"] + , t "+1.2E-3" [Numeric "+1.2e-3"] + , t "-1.2E3" [Numeric "-1.2e3"] + , t "-1.2E+3" [Numeric "-1.2e+3"] + , t "-1.2E-3" [Numeric "-1.2e-3"] + + , t "1-1" [Numeric "1", simpleSymbolyId "-", Numeric "1"] + , t "1+1" [Numeric "1", simpleSymbolyId "+", Numeric "1"] + , t "1 +1" [Numeric "1", Numeric "+1"] + , t "1+ 1" [Numeric "1", simpleSymbolyId "+", Numeric "1"] + , t "x+y" [simpleWordyId "x", simpleSymbolyId "+", simpleWordyId "y"] + , t "++;++" [simpleSymbolyId "++", Semi False, simpleSymbolyId "++"] + , t "++; woot" [simpleSymbolyId "++", Semi False, simpleWordyId "woot"] + , t "woot;woot" [simpleWordyId "woot", Semi False, simpleWordyId "woot"] + , t "woot;(woot)" [simpleWordyId "woot", Semi False, Open "(", simpleWordyId "woot", Close] + , t + "[+1,+1]" + [Reserved "[", Numeric "+1", Reserved ",", Numeric "+1", Reserved "]"] + , t + "[ +1 , +1 ]" + [Reserved "[", Numeric "+1", Reserved ",", Numeric "+1", Reserved "]"] + , t "-- a comment 1.0" [] + , t "\"woot\" -- a comment 1.0" [Textual "woot"] + , t "0:Int" [Numeric "0", Reserved ":", simpleWordyId "Int"] + , t "0 : Int" [Numeric "0", Reserved ":", simpleWordyId "Int"] + , t + ".Foo Foo . .foo.bar.baz" + [ simpleWordyId ".Foo" + , simpleWordyId "Foo" + , simpleSymbolyId "." + , simpleWordyId ".foo.bar.baz" + ] + , t ".Foo.Bar.+" [simpleSymbolyId ".Foo.Bar.+"] + + -- idents with hashes + , t "foo#bar" [WordyId "foo" (Just (ShortHash.unsafeFromText "#bar"))] + , t "+#bar" [SymbolyId "+" (Just (ShortHash.unsafeFromText "#bar"))] + + -- note - these are all the same, just with different spacing + , let ex1 = "if x then y else z" + ex2 = unlines ["if", " x", "then", " y", "else z"] + ex3 = unlines ["if", " x", " then", " y", "else z"] + ex4 = unlines ["if", " x", " then", " y", "else z"] + expected = + [ Open "if" + , simpleWordyId "x" + , Close + , Open "then" + , simpleWordyId "y" + , Close + , Open "else" + , simpleWordyId "z" + , Close + ] + + -- directly close empty = block + in tests $ map (`t` expected) [ex1, ex2, ex3, ex4] + , let ex = unlines ["test =", "", "x = 1"] + + -- directly close nested empty blocks + in t + ex + [ simpleWordyId "test" + , Open "=" + , Close + , (Semi True) + , simpleWordyId "x" + , Open "=" + , Numeric "1" + , Close + ] + , let ex = unlines ["test =", " test2 =", "", "x = 1"] + in t + ex + [ simpleWordyId "test" + , Open "=" + , simpleWordyId "test2" + , Open "=" + , Close + , Close + , (Semi True) + , simpleWordyId "x" + , Open "=" + , Numeric "1" + , Close + ] + , let + ex = unlines + ["if a then b", "else if c then d", "else if e then f", "else g"] -- close of the three `else` blocks + + -- In an empty `then` clause, the `else` is interpreted as a `Reserved` token + in t + ex + [ Open "if" + , simpleWordyId "a" + , Close + , Open "then" + , simpleWordyId "b" + , Close + , Open "else" + , Open "if" + , simpleWordyId "c" + , Close + , Open "then" + , simpleWordyId "d" + , Close + , Open "else" + , Open "if" + , simpleWordyId "e" + , Close + , Open "then" + , simpleWordyId "f" + , Close + , Open "else" + , simpleWordyId "g" + , Close + , Close + , Close + ] + , t + "if x then else" + [ Open "if" + , simpleWordyId "x" + , Close + , Open "then" + , Reserved "else" + , Close + ] + -- Empty `else` clause + , t + "if x then 1 else" + [ Open "if" + , simpleWordyId "x" + , Close + , Open "then" + , Numeric "1" + , Close + , Open "else" + , Close + ] + -- Test string literals + , t "\"simple string without escape characters\"" + [Textual "simple string without escape characters"] + , t "\"test escaped quotes \\\"in quotes\\\"\"" + [Textual "test escaped quotes \"in quotes\""] + , t "\"\\n \\t \\b \\a\"" [Textual "\n \t \b \a"] + ] + +t :: String -> [Lexeme] -> Test () +t s expected = + let actual0 = payload <$> lexer "ignored filename" s + actual = take (length actual0 - 2) . drop 1 $ actual0 + in scope s $ if actual == expected + then ok + else do + note $ "expected: " ++ show expected + note $ "actual : " ++ show actual + crash "actual != expected" diff --git a/parser-typechecker/tests/Unison/Test/MCode.hs b/parser-typechecker/tests/Unison/Test/MCode.hs new file mode 100644 index 0000000000..fc29af1f2f --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/MCode.hs @@ -0,0 +1,105 @@ +{-# language PatternGuards #-} +{-# language TypeApplications #-} +{-# language OverloadedStrings #-} + +module Unison.Test.MCode where + +import EasyTest + +import qualified Data.Map.Strict as Map + +import Data.Bits (bit) +import Data.Word (Word64) + +import Unison.Util.EnumContainers as EC + +import Unison.Term (unannotate) +import Unison.Symbol (Symbol) +import Unison.Reference (Reference(Builtin)) +import Unison.Runtime.Pattern +import Unison.Runtime.ANF + ( superNormalize + , lamLift + ) +import Unison.Runtime.MCode + ( Section(..) + , Instr(..) + , Args(..) + , Comb(..) + , Branch(..) + , emitComb + , emitCombs + ) +import Unison.Runtime.Builtin +import Unison.Runtime.Machine + ( SEnv(..), eval0 ) + +import Unison.Test.Common (tm) + +testEval0 :: EnumMap Word64 Comb -> Section -> Test () +testEval0 env sect = do + io $ eval0 (SEnv env builtinForeigns mempty mempty) sect + ok + +builtins :: Reference -> Word64 +builtins r + | Builtin "todo" <- r = bit 64 + | Just i <- Map.lookup r builtinTermNumbering = i + | otherwise = error $ "builtins: " ++ show r + +cenv :: EnumMap Word64 Comb +cenv = fmap (emitComb mempty) $ numberedTermLookup @Symbol + +env :: EnumMap Word64 Comb -> EnumMap Word64 Comb +env m = m <> mapInsert (bit 64) (Lam 0 1 2 1 asrt) cenv + +asrt :: Section +asrt = Ins (Unpack 0) + $ Match 0 + $ Test1 1 (Yield ZArgs) + (Die "assertion failed") + +multRec :: String +multRec + = "let\n\ + \ n = 5\n\ + \ f acc i = match i with\n\ + \ 0 -> acc\n\ + \ _ -> f (##Nat.+ acc n) (##Nat.sub i 1)\n\ + \ ##todo (##Nat.== (f 0 1000) 5000)" + +dataSpec :: DataSpec +dataSpec = mempty + +testEval :: String -> Test () +testEval s = testEval0 (env aux) main + where + (Lam 0 0 _ _ main, aux, _) + = emitCombs (bit 24) + . superNormalize builtins (builtinTypeNumbering Map.!) + . lamLift + . splitPatterns dataSpec + . unannotate + $ tm s + +nested :: String +nested + = "let\n\ + \ x = match 2 with\n\ + \ 0 -> ##Nat.+ 0 1\n\ + \ m@n -> n\n\ + \ ##todo (##Nat.== x 2)" + +test :: Test () +test = scope "mcode" . tests $ + [ scope "2=2" $ testEval "##todo (##Nat.== 2 2)" + , scope "2=1+1" $ testEval "##todo (##Nat.== 2 (##Nat.+ 1 1))" + , scope "2=3-1" $ testEval "##todo (##Nat.== 2 (##Nat.sub 3 1))" + , scope "5*5=25" + $ testEval "##todo (##Nat.== (##Nat.* 5 5) 25)" + , scope "5*1000=5000" + $ testEval "##todo (##Nat.== (##Nat.* 5 1000) 5000)" + , scope "5*1000=5000 rec" $ testEval multRec + , scope "nested" + $ testEval nested + ] diff --git a/parser-typechecker/tests/Unison/Test/Range.hs b/parser-typechecker/tests/Unison/Test/Range.hs new file mode 100644 index 0000000000..f0b521ef79 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Range.hs @@ -0,0 +1,33 @@ +module Unison.Test.Range where + +import EasyTest +import Unison.Lexer (Pos (..)) +import Unison.Util.Range + + +test :: Test () +test = scope "range" . tests $ + [ scope "contains 11 11" . expect $ contains zero zero + , antisymmetric "contains 11 12" (not . uncurry contains) $ (zero, one) + , scope "contains 12 23" . expect . not $ contains one one' + , scope "contains 23 12" . expect . not $ contains one' one + + , symmetric "overlaps 11 11" (not . uncurry overlaps) $ (zero, zero) + , symmetric "overlaps 12 11" (not . uncurry overlaps) $ (one, zero) + , symmetric "overlaps 12 23" (not . uncurry overlaps) $ (one, one') + , symmetric "overlaps 12 13" (uncurry overlaps) $ (one, two) + , symmetric "overlaps 23 13" (uncurry overlaps) $ (one', two) + + , scope "inrange 1 12" . expect $ inRange (Pos 1 1) (Range (Pos 1 1) (Pos 1 2)) + , scope "inrange 2 12" . expect . not $ inRange (Pos 1 2) (Range (Pos 1 1) (Pos 1 2)) + ] + where symmetric s f (a,b) = + tests [ scope s . expect $ f (a, b) + , scope (s ++ " (symmetric)") . expect $ f (b, a)] + antisymmetric s f (a,b) = + tests [ scope s . expect $ f (a, b) + , scope (s ++ " (antisymmetric)") . expect . not $ f (b, a)] + zero = Range (Pos 1 1) (Pos 1 1) + one = Range (Pos 1 1) (Pos 1 2) + one' = Range (Pos 1 2) (Pos 1 3) + two = Range (Pos 1 1) (Pos 1 3) diff --git a/parser-typechecker/tests/Unison/Test/Referent.hs b/parser-typechecker/tests/Unison/Test/Referent.hs new file mode 100644 index 0000000000..9c9dfb51db --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Referent.hs @@ -0,0 +1,82 @@ +{-# Language OverloadedStrings #-} + +module Unison.Test.Referent where + +import Data.Text (Text) +import qualified Data.Text as Text +import qualified Unison.Referent as R +import qualified Unison.ShortHash as SH +import qualified Unison.Reference as Rf +import EasyTest + +test :: Test () +test = scope "hashparsing" . tests $ + [ + scope "Reference" $ tests + [ ref h + , ref (h <> "." <> suffix1) + , ref (h <> "." <> suffix2) ], + + scope "Referent" $ tests + [ r h + , r $ h <> "." <> suffix1 + , r $ h <> "#d10" + , r $ h <> "#a0" + , r $ h <> "." <> suffix2 <> "#d6" + , r $ h <> "." <> suffix1 <> "#a9" ], + + scope "ShortHash" $ tests + [ sh h + , sh "#abcd" + , sh $ "#abcd." <> suffix1 + , sh "#abcd#d10" + , sh "#abcd#a3" + , sh $ "#abcd." <> suffix2 <> "#d10" + , sh $ "#abcd.y6#a5" + , scope "builtin" $ + expect (SH.fromText "##Text.take" == Just (SH.Builtin "Text.take")) + , pending $ scope "builtins don't have CIDs" $ + expect (SH.fromText "##FileIO#3" == Nothing) + , scope "term ref, no cycle" $ + expect (SH.fromText "#2tWjVAuc7" == + Just (SH.ShortHash "2tWjVAuc7" Nothing Nothing)) + , scope "term ref, part of cycle" $ + expect (SH.fromText "#y9ycWkiC1.y9" == + Just (SH.ShortHash "y9ycWkiC1" (Just "y9") Nothing)) + , scope "constructor" $ + expect (SH.fromText "#cWkiC1x89#1" == + Just (SH.ShortHash "cWkiC1x89" Nothing (Just "1"))) + , scope "constructor of a type in a cycle" $ + expect (SH.fromText "#DCxrnCAPS.WD#0" == + Just (SH.ShortHash "DCxrnCAPS" (Just "WD") (Just "0"))) + , scope "Anything to the left of the first # is ignored" $ + expect (SH.fromText "foo#abc" == + Just (SH.ShortHash "abc" Nothing Nothing)) + , pending $ scope "Anything including and following a third # is rejected" $ + expect (SH.fromText "foo#abc#2#hello" == Nothing) + , scope "Anything after a second . before a second # is ignored" $ + expect (SH.fromText "foo#abc.1f.x" == + Just (SH.ShortHash "abc" (Just "1f") Nothing)) + ] + ] + where + h = "#1tdqrgl90qnmqvrff0j76kg2rnajq7n8j54e9cbk4p8pdi41q343bnh8h2rv6nadhlin8teg8371d445pvo0as7j2sav8k401d2s3no" + suffix1 = Rf.showSuffix 0 10 + suffix2 = Rf.showSuffix 3 6 + ref txt = scope (Text.unpack txt) $ case Rf.fromText txt of + Left e -> fail e + Right r1 -> case Rf.fromText (Rf.toText r1) of + Left e -> fail e + Right r2 -> expect (r1 == r2) + r :: Text -> Test () + r txt = scope (Text.unpack txt) $ case R.fromText txt of + Nothing -> fail "oh noes" + Just referent -> case R.fromText (R.toText referent) of + Nothing -> fail "oh noes" + Just referent2 -> expect (referent == referent2) + sh :: Text -> Test () + sh txt = scope (Text.unpack txt) $ case SH.fromText txt of + Nothing -> fail "oh noes" + Just shorthash -> case SH.fromText (SH.toText shorthash) of + Nothing -> fail "oh noes" + Just shorthash2 -> expect (shorthash == shorthash2) diff --git a/parser-typechecker/tests/Unison/Test/Term.hs b/parser-typechecker/tests/Unison/Test/Term.hs new file mode 100644 index 0000000000..5afdad551d --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Term.hs @@ -0,0 +1,53 @@ +{-# Language OverloadedStrings #-} + +module Unison.Test.Term where + +import EasyTest +import qualified Data.Map as Map +import Data.Map ( (!) ) +import qualified Unison.Hash as Hash +import qualified Unison.Reference as R +import Unison.Symbol ( Symbol ) +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import qualified Unison.Var as Var + +test :: Test () +test = scope "term" $ tests + [ scope "Term.substTypeVar" $ do + -- check that capture avoidance works in substTypeVar + let v s = Var.nameds s :: Symbol + tv s = Type.var() (v s) + v1 s = Var.freshenId 1 (v s) + tm :: Term.Term Symbol () + tm = Term.ann() (Term.ann() + (Term.nat() 42) + (Type.introOuter() (v "a") $ + Type.arrow() (tv "a") (tv "x"))) + (Type.forall() (v "a") (tv "a")) + tm' = Term.substTypeVar (v "x") (tv "a") tm + expected = + Term.ann() (Term.ann() + (Term.nat() 42) + (Type.introOuter() (v1 "a") $ + Type.arrow() (Type.var() $ v1 "a") (tv "a"))) + (Type.forall() (v1 "a") (Type.var() $ v1 "a")) + note $ show tm' + note $ show expected + expect $ tm == tm + expect $ tm' == tm' + expect $ tm' == expected + ok + , scope "Term.unhashComponent" $ + let h = Hash.unsafeFromBase32Hex "abcd" + ref = R.Derived h 0 1 + v1 = Var.refNamed @Symbol ref + -- input component: `ref = \v1 -> ref` + component = Map.singleton ref (Term.lam () v1 (Term.ref () ref)) + component' = Term.unhashComponent component + -- expected unhashed component: `v2 = \v1 -> v2`, where `v2 /= v1`, + -- i.e. `v2` cannot be just `ref` converted to a ref-named variable, + -- since that would collide with `v1` + (v2, _) = component' ! ref + in expect $ v2 /= v1 + ] diff --git a/parser-typechecker/tests/Unison/Test/TermParser.hs b/parser-typechecker/tests/Unison/Test/TermParser.hs new file mode 100644 index 0000000000..93227022b0 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/TermParser.hs @@ -0,0 +1,227 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE QuasiQuotes #-} + +module Unison.Test.TermParser where + +import Control.Applicative +import Control.Monad (join) +import EasyTest +import qualified Text.Megaparsec as P +import Text.RawString.QQ +import Unison.Parser +import qualified Unison.Parsers as Ps +import Unison.PrintError (renderParseErrorAsANSI) +import Unison.Symbol (Symbol) +import qualified Unison.TermParser as TP +import qualified Unison.Test.Common as Common + +test1 :: Test () +test1 = scope "termparser" . tests . map parses $ + [ "1" + , "1.0" + , "+1" + , "-1" + , "+1.0" + , "-1.0" + + , "1e3" + , "1e+3" + , "1e-3" + , "+1e3" + , "+1e+3" + , "+1e-3" + , "-1e3" + , "-1e+3" + , "-1e-3" + , "1.2e3" + , "1.2e+3" + , "1.2e-3" + , "+1.2e3" + , "+1.2e+3" + , "+1.2e-3" + , "-1.2e3" + , "-1.2e+3" + , "-1.2e-3" + + , "-4th" + , "()" + , "(0)" + , "forty" + , "forty two" + , "\"forty two\"" + , "[1,2,3]" + , "\"abc\"" + , "?x" + , "?\\n" + , "x + 1" + , "1 + 1" + , "1 Nat.+ 1" + , "( x + 1 )" + , "foo 42" + , "1 Nat.== 1" + , "x Nat.== y" + , "if 1 Nat.== 1 then 1 else 1" + , "if 1 Nat.== x then 1 else 1" + , "if x Nat.== 1 then 1 else 1" + , "if x == 1 then 1 else 1" + , "if x Nat.== x then 1 else 1" + -- + -- Block tests + , "let x = 1\n" ++ + " x" + , "let\n" ++ + " y = 1\n" ++ + " x" + , unlines [ + "let y = 1 ", + " x = 2 ", + " x + y"] + , "(let \n" ++ + " x = 23 + 42\n" ++ + " x + 1 )" + -- + -- Handlers + , "handle\n" ++ + " x = 23 + 42\n" ++ + " x + foo 8 102.0 +4\n" ++ + "with foo" + , "handle\n" ++ + " x = 1\n" ++ + " x\n" ++ + "with foo" + , "handle x with foo" + , "handle foo with cases\n" ++ + " { x } -> x" + + -- Patterns + , "match x with x -> x" + , "match x with 0 -> 1" + , "match x with\n" ++ + " 0 -> 1" + , "match +0 with\n" ++ + " +0 -> -1" + , "match x with\n" ++ + " x -> 1\n" ++ + " 2 -> 7\n" ++ + " _ -> 3\n" ++ + " Tuple.Cons x y -> x + y\n" ++ + " Tuple.Cons (Tuple.Cons x y) _ -> x + y \n" + , "match x with\n" ++ + " {Tuple.Cons x y} -> 1\n" ++ + " {Optional.Some 42 -> k} -> k 42\n" + , "match x with\n" ++ + " 0 ->\n" ++ + " z = 0\n" ++ + " z" + , "match x with\n" ++ + " 0 | 1 == 2 -> 123" + , "match x with\n" ++ + " [] -> 0\n" ++ + " [1] -> 1\n" ++ + " 2 +: _ -> 2\n" ++ + " _ :+ 3 -> 3\n" ++ + " [4] ++ _ -> 4\n" ++ + " _ ++ [5] -> 5\n" ++ + " _ -> -1" + , "cases x -> x" + , "cases\n" ++ + " [] -> 0\n" ++ + " [x] -> 1\n" ++ + " _ -> 2" + , "cases\n" ++ + " 0 ->\n" ++ + " z = 0\n" ++ + " z" + + -- Conditionals + , "if x then y else z" + , "-- if test 1\n" ++ + "if\n" ++ + " s = 0\n" ++ + " s > 0\n" ++ + "then\n" ++ + " s = 0\n" ++ + " s + 1\n" ++ + "else\n" ++ + " s = 0\n" ++ + " s + 2\n" + , "-- if test 2\n" ++ + "if\n" ++ + " s = 0\n" ++ + " s > 0\n" ++ + "then\n" ++ + " s: Int\n" ++ + " s = (0: Int)\n" ++ + " s + 1\n" ++ + "else\n" ++ + " s = 0\n" ++ + " s + 2\n" + , "-- if test 3\n" ++ + "if\n" ++ + " s = 0\n" ++ + " s > 0\n" ++ + "then\n" ++ + " s: Int\n" ++ + " s = (0 : Int)\n" ++ + " s + 1\n" ++ + "else\n" ++ + " s = 0\n" ++ + " s + 2\n" + , "x && y" + , "x || y" + , [r|--let r1 + let r1 : Nat + r1 = match Optional.Some 3 with + x -> 1 + 42 |] + , [r|let + increment = (Nat.+) 1 + + (|>) : forall a . a -> (a -> b) -> b + a |> f = f a + + Stream.fromInt -3 + |> Stream.take 10 + |> Stream.foldLeft 0 increment + |] + ] + +test2 :: Test () +test2 = scope "fiddle" . tests $ unitTests + +test :: Test () +test = test1 <|> test2 + +unitTests :: [Test ()] +unitTests = + [ t w "hi" + , t s "foo.+" + , t (w <|> s) "foo.+" + , t (w *> w) "foo bar" + , t (P.try (w *> w) <|> (w *> s)) "foo +" + , t TP.term "x -> x" + , t (TP.lam TP.term) "x y z -> 1 + 1" + , t (sepBy s w) "" + , t (sepBy s w) "uno" + , t (sepBy s w) "uno + dos" + , t (sepBy s w) "uno + dos * tres" + , t (openBlockWith "(" *> sepBy s w <* closeBlock) "(uno + dos + tres)" + , t TP.term "( 0 )" + ] + where + -- type TermP v = P v (AnnotatedTerm v Ann) + t :: P Symbol a -> String -> Test () + t = parseWith + w = wordyDefinitionName + s = symbolyDefinitionName + +parses :: String -> Test () +parses = parseWith TP.term + +parseWith :: P Symbol a -> String -> Test () +parseWith p s = scope (join . take 1 $ lines s) $ + case Ps.parse @ Symbol p s Common.parsingEnv of + Left e -> do + note $ renderParseErrorAsANSI 60 s e + crash $ renderParseErrorAsANSI 60 s e + Right _ -> ok diff --git a/parser-typechecker/tests/Unison/Test/TermPrinter.hs b/parser-typechecker/tests/Unison/Test/TermPrinter.hs new file mode 100755 index 0000000000..80fb635ace --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/TermPrinter.hs @@ -0,0 +1,586 @@ +{-# Language OverloadedStrings #-} + +module Unison.Test.TermPrinter (test) where + +import EasyTest +import qualified Data.Text as Text +import Unison.ABT (annotation) +import qualified Unison.HashQualified as HQ +import Unison.Term (Term) +import qualified Unison.Term as Term +import Unison.TermPrinter +import qualified Unison.Type as Type +import Unison.Symbol (Symbol, symbol) +import qualified Unison.Builtin +import Unison.Parser (Ann(..)) +import qualified Unison.Util.Pretty as PP +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.Util.ColorText as CT +import Unison.Test.Common (t, tm) +import qualified Unison.Test.Common as Common + +getNames :: PPE.PrettyPrintEnv +getNames = PPE.fromNames Common.hqLength Unison.Builtin.names + +-- Test the result of the pretty-printer. Expect the pretty-printer to +-- produce output that differs cosmetically from the original code we parsed. +-- Check also that re-parsing the pretty-printed code gives us the same ABT. +-- (Skip that latter check if rtt is false.) +-- Note that this does not verify the position of the PrettyPrint Break elements. +tcDiffRtt :: Bool -> String -> String -> Int -> Test () +tcDiffRtt rtt s expected width + = let + inputTerm = tm s :: Term Symbol Ann + prettied = CT.toPlain <$> pretty getNames inputTerm + actual = if width == 0 + then PP.renderUnbroken prettied + else PP.render width prettied + actualReparsed = tm actual + in + scope s $ tests + [ if actual == expected + then ok + else do + note $ "expected:\n" ++ expected + note $ "actual:\n" ++ actual + note $ "show(input) : " ++ show inputTerm + -- note $ "prettyprint : " ++ show prettied + crash "actual != expected" + , if not rtt || (inputTerm == actualReparsed) + then ok + else do + note "round trip test..." + note $ "single parse: " ++ show inputTerm + note $ "double parse: " ++ show actualReparsed + note $ "prettyprint : " ++ show prettied + crash "single parse != double parse" + ] + +-- As above, but do the round-trip test unconditionally. +tcDiff :: String -> String -> Test () +tcDiff s expected = tcDiffRtt True s expected 0 + +-- As above, but expect not even cosmetic differences between the input string +-- and the pretty-printed version. +tc :: String -> Test () +tc s = tcDiff s s + +-- Use renderBroken to render the output to some maximum width. +tcBreaksDiff :: Int -> String -> String -> Test () +tcBreaksDiff width s expected = tcDiffRtt True s expected width + +tcBreaks :: Int -> String -> Test () +tcBreaks width s = tcDiffRtt True s s width + +tcBinding :: Int -> String -> Maybe String -> String -> String -> Test () +tcBinding width v mtp tm expected + = let + baseTerm = + Unison.Test.Common.tm tm :: Term Symbol Ann + inputType = fmap Unison.Test.Common.t mtp :: Maybe (Type.Type Symbol Ann) + inputTerm (Just tp) = Term.ann (annotation tp) baseTerm tp + inputTerm Nothing = baseTerm + varV = symbol $ Text.pack v + prettied = fmap CT.toPlain $ PP.syntaxToColor $ prettyBinding + getNames + (HQ.unsafeFromVar varV) + (inputTerm inputType) + actual = if width == 0 + then PP.renderUnbroken prettied + else PP.render width prettied + in + scope expected $ tests + [ if actual == expected + then ok + else do + note $ "expected: " ++ show expected + note $ "actual : " ++ show actual + note $ "show(input) : " ++ show (inputTerm inputType) + note $ "prettyprint : " ++ show prettied + crash "actual != expected" + ] + +test :: Test () +test = scope "termprinter" $ tests + [ scope "splitName" $ tests + [ scope "x" $ expectEqual (splitName "x") [([], "x")] + , scope "A.x" $ expectEqual (splitName "A.x") [([],"A.x"),(["A"],"x")] + , scope "A.B.x" + $ expectEqual (splitName "A.B.x") [([],"A.B.x"),(["A"],"B.x"),(["A","B"],"x")] + ] + , tc "if true then +2 else -2" + , tc "[2, 3, 4]" + , tc "[2]" + , tc "[]" + , tc "true && false" + , tc "false || false" + , tc "g ((true || false) && (f x y))" + , tc "if _something then _foo else _blah" + , tc "3.14159" + , tc "+0" + , tc "\"some text\"" + , tc "\"they said \\\"hi\\\"\"" + , pending $ tc "\'they said \\\'hi\\\'\'" -- TODO lexer doesn't support strings with single quotes in + , tc "Rúnar" + , pending $ tc "῎Ανδρα μοι ἔννεπε, Μοῦσα, πολύτροπον" -- TODO lexer does not like classics! + , tc "古池や蛙飛びこむ水の音" + , tc "2 : Nat" + , tc "x -> x && false" + , tc "x y -> x && y" + , tc "x y z -> x && y" + , tc "x y y -> x && y" + , tc "()" + , tc "Cons" + , tc "foo" + , tc "List.empty" + , tc "None" + , tc "Optional.None" + , tc "handle foo with bar" + , tc "Cons 1 1" + , tc "let\n\ + \ x = 1\n\ + \ x" + , tcBreaks 50 "let\n\ + \ x = 1\n\ + \ x" + , tcBreaks 50 "let\n\ + \ x = 1\n\ + \ y = 2\n\ + \ f x y" + , tc "let\n\ + \ f = cases\n\ + \ 0 -> 0\n\ + \ x -> x\n\ + \ f y" + , tc "let\n\ + \ f z = cases\n\ + \ 0 -> z\n\ + \ y -> g y\n\ + \ f \"\" 1" + , tc "let\n\ + \ f _ = cases\n\ + \ 0 -> 0\n\ + \ x -> x\n\ + \ !f 1" + , pending $ tc "match x with Pair t 0 -> foo t" -- TODO hitting UnknownDataConstructor when parsing pattern + , pending $ tc "match x with Pair t 0 | pred t -> foo t" -- ditto + , pending $ tc "match x with Pair t 0 | pred t -> foo t; Pair t 0 -> foo' t; Pair t u -> bar;" -- ditto + , tc "match x with () -> foo" + , tc "match x with _ -> foo" + , tc "match x with y -> y" + , tc "match x with 1 -> foo" + , tc "match x with +1 -> foo" + , tc "match x with -1 -> foo" + , tc "match x with 3.14159 -> foo" + , tcDiffRtt False "match x with\n\ + \ true -> foo\n\ + \ false -> bar" + "match x with\n\ + \ true -> foo\n\ + \ false -> bar" + 0 + , tcBreaks 50 "match x with\n\ + \ true -> foo\n\ + \ false -> bar" + , tc "match x with false -> foo" + , tc "match x with y@() -> y" + , tc "match x with a@(b@(c@())) -> c" + , tc "match e with { a } -> z" + , pending $ tc "match e with { () -> k } -> z" -- TODO doesn't parse since 'many leaf' expected before the "-> k" + -- need an actual effect constructor to test this with + , tc "cases x -> x" + , tc "cases\n\ + \ [] -> 0\n\ + \ [x] -> 1\n\ + \ _ -> 2" + , tc "if a then if b then c else d else e" + , tc "handle handle foo with bar with baz" + , tcBreaks 16 "match (if a then\n\ + \ b\n\ + \else c) with\n\ + \ 112 -> x" -- dodgy layout. note #517 and #518 + , tc "handle bar with Pair 1 1" + , tc "handle bar with x -> foo" + , tcDiffRtt True "let\n\ + \ x = (1 : Int)\n\ + \ (x : Int)" + "let\n\ + \ x : Int\n\ + \ x = 1\n\ + \ (x : Int)" 50 + , tc "match x with 12 -> (y : Int)" + , tc "if a then (b : Int) else (c : Int)" + , tc "match x with 12 -> if a then b else c" + , tc "match x with 12 -> x -> f x" + , tcDiff "match x with (12) -> x" "match x with 12 -> x" + , tcDiff "match (x) with 12 -> x" "match x with 12 -> x" + , tc "match x with 12 -> x" + , tcDiffRtt True "match x with\n\ + \ 12 -> x" + "match x with 12 -> x" 50 + , tcBreaks 15 "match x with\n\ + \ 12 -> x\n\ + \ 13 -> y\n\ + \ 14 -> z" + , tcBreaks 21 "match x with\n\ + \ 12 | p x -> x\n\ + \ 13 | q x -> y\n\ + \ 14 | r x y -> z" + , tcBreaks 9 "match x with\n\ + \ 112 ->\n\ + \ x\n\ + \ 113 ->\n\ + \ y\n\ + \ 114 ->\n\ + \ z" + , pending $ tcBreaks 19 "match\n\ + \ myFunction\n\ + \ argument1\n\ + \ argument2\n\ + \with\n\ + \ 112 -> x" -- TODO, 'unexpected semi' before 'of' - should the parser accept this? + , tc "if c then x -> f x else x -> g x" + , tc "(f x) : Int" + , tc "(f x) : Pair Int Int" + , tcBreaks 50 "let\n\ + \ x = if a then b else c\n\ + \ if x then y else z" + , tc "f x y" + , tc "f x y z" + , tc "f (g x) y" + , tcDiff "(f x) y" "f x y" + , pending $ tc "1.0e-19" -- TODO parser throws UnknownLexeme + , pending $ tc "-1.0e19" -- ditto + , tc "0.0" + , tc "-0.0" + , pending $ tcDiff "+0.0" "0.0" -- TODO parser throws "Prelude.read: no parse" - should it? Note +0 works for UInt. + , tcBreaksDiff 21 "match x with 12 -> if a then b else c" + "match x with\n\ + \ 12 ->\n\ + \ if a then b\n\ + \ else c" + , tcDiffRtt True "if foo\n\ + \then\n\ + \ true && true\n\ + \ 12\n\ + \else\n\ + \ namespace baz where\n\ + \ f : Int -> Int\n\ + \ f x = x\n\ + \ 13" + "if foo then\n\ + \ true && true\n\ + \ 12\n\ + \else\n\ + \ baz.f : Int -> Int\n\ + \ baz.f x = x\n\ + \ 13" 50 + , tcBreaks 50 "if foo then\n\ + \ true && true\n\ + \ 12\n\ + \else\n\ + \ baz.f : Int -> Int\n\ + \ baz.f x = x\n\ + \ 13" + , tcBreaks 90 "handle\n\ + \ a = 5\n\ + \ b =\n\ + \ c = 3\n\ + \ true\n\ + \ false\n\ + \with foo" + , tcBreaks 50 "match x with\n\ + \ true ->\n\ + \ d = 1\n\ + \ false\n\ + \ false ->\n\ + \ f x = x + 1\n\ + \ true" + , pending $ tcBreaks 50 "x -> e = 12\n\ + \ x + 1" -- TODO parser looks like lambda body should be a block, but we hit 'unexpected =' + , tc "x + y" + , tc "x ~ y" + , tcDiff "x `foo` y" "foo x y" + , tc "x + (y + z)" + , tc "x + y + z" + , tc "x + y * z" -- i.e. (x + y) * z ! + , tc "x \\ y == z ~ a" + , tc "foo x (y + z)" + , tc "foo (x + y) z" + , tc "foo x y + z" + , tc "foo p q + r + s" + , tc "foo (p + q) r + s" + , tc "foo (p + q + r) s" + , tc "p + q + r + s" + , tcDiffRtt False "(foo.+) x y" "x foo.+ y" 0 + , tc "x + y + f a b c" + , tc "x + y + foo a b" + , tc "foo x y p + z" + , tc "foo p q a + r + s" + , tc "foo (p + q) r a + s" + , tc "foo (x + y) (p - q)" + , tc "x -> x + y" + , tc "if p then x + y else a - b" + , tc "(x + y) : Int" + , tc "!foo" + , tc "!(foo a b)" + , tc "!f a" + , tcDiff "f () a ()" "!(!f a)" + , tcDiff "f a b ()" "!(f a b)" + , tcDiff "!f ()" "!(!f)" + , tc "!(!foo)" + , tc "'bar" + , tc "'(bar a b)" + , tc "'('bar)" + , tc "!('bar)" + , tc "'(!foo)" + , tc "x -> '(y -> 'z)" + , tc "'(x -> '(y -> z))" + , tc "(\"a\", 2)" + , tc "(\"a\", 2, 2.0)" + , tcDiff "(2)" "2" + , pending $ tcDiff "Pair \"2\" (Pair 2 ())" "(\"2\", 2)" -- TODO parser produced + -- Pair "2" (Pair 2 ()#0) + -- instead of + -- Pair#0 "2" (Pair#0 2 ()#0) + -- Maybe because in this context the + -- parser can't distinguish between a constructor + -- called 'Pair' and a function called 'Pair'. + , pending $ tc "Pair 2 ()" -- unary tuple; fails for same reason as above + , tc "match x with (a, b) -> a" + , tc "match x with () -> foo" + , pending $ tc "match x with [a, b] -> a" -- issue #266 + , pending $ tc "match x with [a] -> a" -- ditto + , pending $ tc "match x with [] -> a" -- ditto + , tc "match x with Optional.Some (Optional.Some _) -> ()" -- Issue #695 + -- need an actual effect constructor to test the following + , pending $ tc "match x with { SomeRequest (Optional.Some _) -> k } -> ()" + , tcBinding 50 "foo" (Just "Int") "3" "foo : Int\n\ + \foo = 3" + , tcBinding 50 "foo" Nothing "3" "foo = 3" + , tcBinding 50 "foo" (Just "Int -> Int") "n -> 3" "foo : Int -> Int\n\ + \foo n = 3" + , tcBinding 50 "foo" Nothing "n -> 3" "foo n = 3" + , tcBinding 50 "foo" Nothing "n m -> 3" "foo n m = 3" + , tcBinding 9 "foo" Nothing "n m -> 3" "foo n m =\n\ + \ 3" + , tcBinding 50 "+" (Just "Int -> Int -> Int") "a b -> foo a b" "(+) : Int -> Int -> Int\n\ + \a + b = foo a b" + , tcBinding 50 "+" (Just "Int -> Int -> Int -> Int") "a b c -> foo a b c" "(+) : Int -> Int -> Int -> Int\n\ + \(+) a b c = foo a b c" + , tcBinding 50 "+" Nothing "a b -> foo a b" "a + b = foo a b" + , tcBinding 50 "+" Nothing "a b c -> foo a b c" "(+) a b c = foo a b c" + , tcBinding 50 "." Nothing "f g x -> f (g x)" "(.) f g x = f (g x)" + , tcBreaks 32 "let\n\ + \ go acc a b =\n\ + \ match List.at 0 a with\n\ + \ Optional.None -> 0\n\ + \ Optional.Some hd1 -> 0\n\ + \ go [] a b" + , tcBreaks 30 "match x with\n\ + \ (Optional.None, _) -> foo" + , tcBreaks 50 "if true then match x with 12 -> x else x" + , tcBreaks 50 "if true then x else match x with 12 -> x" + , pending $ tcBreaks 80 "x -> (if c then t else f)" -- TODO 'unexpected )', surplus parens + , tcBreaks 80 "'let\n\ + \ foo = bar\n\ + \ baz foo" + , tcBreaks 80 "!let\n\ + \ foo = bar\n\ + \ baz foo" + , tcDiffRtt True "foo let\n\ + \ a = 1\n\ + \ b" + "foo\n\ + \ let\n\ + \ a = 1\n\ + \ b" 80 + , tcBreaks 80 "if\n\ + \ a = b\n\ + \ a then foo else bar" -- missing break before 'then', issue #518 + , tcBreaks 80 "Stream.foldLeft 0 (+) t" + , tcBreaks 80 "let\n\ + \ delay = 'isEven\n\ + \ ()" + , tcBreaks 80 "let\n\ + \ a = ()\n\ + \ b = ()\n\ + \ c = (1, 2)\n\ + \ ()" + , tcBreaks 80 "let\n\ + \ a = [: escaped: \\@ :]\n\ + \ ()" + +-- FQN elision tests + , tcBreaks 12 "if foo then\n\ + \ use A x\n\ + \ f x x\n\ + \else\n\ + \ use B y\n\ + \ f y y" + , tcBreaks 12 "if foo then\n\ + \ use A x\n\ + \ f x x\n\ + \else\n\ + \ use B x\n\ + \ f x x" + , tcBreaks 80 "let\n\ + \ a =\n\ + \ use A x\n\ + \ if foo then f x x else g x x\n\ + \ bar" + , tcBreaks 80 "if foo then f A.x B.x else f A.x B.x" + , tcBreaks 80 "if foo then f A.x A.x B.x else y" + , tcBreaks 80 "if foo then A.f x else y" + , tcBreaks 13 "if foo then\n\ + \ use A +\n\ + \ x + y\n\ + \else y" + , tcBreaks 20 "if p then\n\ + \ use A x\n\ + \ use B y z\n\ + \ f z z y y x x\n\ + \else q" + , tcBreaks 30 "if foo then\n\ + \ use A.X c\n\ + \ use AA.PP.QQ e\n\ + \ f c c e e\n\ + \else\n\ + \ use A.B X.d Y.d\n\ + \ use A.B.X f\n\ + \ g X.d X.d Y.d Y.d f f" + , tcBreaks 30 "if foo then\n\ + \ use A.X c\n\ + \ f c c\n\ + \else\n\ + \ use A X.c YY.c\n\ + \ g X.c X.c YY.c YY.c" + , tcBreaks 20 "handle\n\ + \ if foo then\n\ + \ use A.X c\n\ + \ f c c\n\ + \ else\n\ + \ use A.Y c\n\ + \ g c c\n\ + \with bar" + , tcBreaks 20 "let\n\ + \ a = 2\n\ + \ handle baz\n\ + \ with\n\ + \ use A.X c\n\ + \ if foo then\n\ + \ f c c\n\ + \ else g c c" + , tcBreaks 28 "if foo then\n\ + \ f (x : (∀ t. Pair t t))\n\ + \else\n\ + \ f (x : (∀ t. Pair t t))" + , tcBreaks 15 "handle\n\ + \ use A x\n\ + \ if f x x then\n\ + \ x\n\ + \ else y\n\ + \with foo" -- missing break before 'then', issue #518 + , tcBreaks 20 "match x with\n\ + \ () ->\n\ + \ use A y\n\ + \ f y y" + , tcBreaks 12 "let\n\ + \ use A x\n\ + \ f x x\n\ + \ c = g x x\n\ + \ h x x" + , tcBreaks 15 "handle\n\ + \ use A x\n\ + \ f x x\n\ + \with foo" + , tcBreaks 15 "let\n\ + \ c =\n\ + \ use A x\n\ + \ f x x\n\ + \ g c" + , tcBreaks 20 "if foo then\n\ + \ f x x A.x A.x\n\ + \else g" + , tcBreaks 27 "match t with\n\ + \ () ->\n\ + \ a =\n\ + \ use A B.x\n\ + \ f B.x B.x\n\ + \ handle\n\ + \ q =\n\ + \ use A.B.D x\n\ + \ h x x\n\ + \ foo\n\ + \ with foo\n\ + \ bar\n\ + \ _ ->\n\ + \ b =\n\ + \ use A.C x\n\ + \ g x x\n\ + \ bar" + , tcBreaks 20 "let\n\ + \ a =\n\ + \ handle\n\ + \ use A x\n\ + \ f x x\n\ + \ with foo\n\ + \ bar" + , tcBreaks 16 "let\n\ + \ a =\n\ + \ b =\n\ + \ use A x\n\ + \ f x x\n\ + \ foo\n\ + \ bar" + , tcBreaks 20 "let\n\ + \ a =\n\ + \ match x with\n\ + \ () ->\n\ + \ use A x\n\ + \ f x x\n\ + \ bar" + , tcBreaks 20 "let\n\ + \ a =\n\ + \ use A x\n\ + \ b = f x x\n\ + \ c = g x x\n\ + \ foo\n\ + \ bar" + , tcBreaks 13 "let\n\ + \ a =\n\ + \ use A p q r\n\ + \ f p p\n\ + \ f q q\n\ + \ f r r\n\ + \ foo" + -- The following behaviour is possibly not ideal. Note how the `use A B.x` + -- would have the same effect if it was under the `c =`. It doesn't actually + -- need to be above the `b =`, because all the usages of A.B.X in that tree are + -- covered by another use statement, the `use A.B x`. Fixing this would + -- probably require another annotation pass over the AST, to place 'candidate' + -- use statements, to then push some of them down on the next pass. + -- Not worth it! + , tcBreaks 20 "let\n\ + \ a =\n\ + \ use A B.x\n\ + \ b =\n\ + \ use A.B x\n\ + \ f x x\n\ + \ c =\n\ + \ g B.x B.x\n\ + \ h A.D.x\n\ + \ foo\n\ + \ bar" + , tcBreaks 80 "let\n\ + \ use A x\n\ + \ use A.T.A T1\n\ + \ g = T1 +3\n\ + \ h = T1 +4\n\ + \ i : T -> T -> Int\n\ + \ i p q =\n\ + \ g' = T1 +3\n\ + \ h' = T1 +4\n\ + \ +2\n\ + \ if true then x else x" + ] diff --git a/parser-typechecker/tests/Unison/Test/Type.hs b/parser-typechecker/tests/Unison/Test/Type.hs new file mode 100644 index 0000000000..f0042d3539 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Type.hs @@ -0,0 +1,33 @@ +{-# Language OverloadedStrings #-} + +module Unison.Test.Type where + +import EasyTest +import Unison.Type +import Unison.Symbol (Symbol) +import qualified Unison.Var as Var +import qualified Unison.Typechecker as Typechecker + +infixr 1 --> + +(-->) :: Ord v => Type v () -> Type v () -> Type v () +(-->) a b = arrow() a b + +test :: Test () +test = scope "type" $ tests [ + scope "unArrows" $ + let x = arrow() (builtin() "a") (builtin() "b") :: Type Symbol () + in case x of + Arrows' [i,o] -> + expect (i == builtin() "a" && o == builtin() "b") + _ -> crash "unArrows (a -> b) did not return a spine of [a,b]" + , + scope "subtype" $ do + let v = Var.named "a" + v2 = Var.named "b" + vt = var() v + vt2 = var() v2 + x = forall() v (nat() --> effect() [vt, builtin() "eff"] (nat())) :: Type Symbol () + y = forall() v2 (nat() --> effect() [vt2] (nat())) :: Type Symbol () + expect . not $ Typechecker.isSubtype x y + ] diff --git a/parser-typechecker/tests/Unison/Test/TypePrinter.hs b/parser-typechecker/tests/Unison/Test/TypePrinter.hs new file mode 100755 index 0000000000..5b157ba064 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/TypePrinter.hs @@ -0,0 +1,170 @@ +module Unison.Test.TypePrinter where + +import EasyTest +import qualified Data.Map as Map +import Unison.TypePrinter +import qualified Unison.Builtin +import Unison.Util.ColorText (toPlain) +import qualified Unison.Util.Pretty as PP +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.Test.Common as Common + + +-- Test the result of the pretty-printer. Expect the pretty-printer to +-- produce output that differs cosmetically from the original code we parsed. +-- Check also that re-parsing the pretty-printed code gives us the same ABT. +-- (Skip that latter check if rtt is false.) +-- Note that this does not verify the position of the PrettyPrint Break elements. +tc_diff_rtt :: Bool -> String -> String -> Int -> Test () +tc_diff_rtt rtt s expected width = + let input_type = Common.t s + get_names = PPE.fromNames Common.hqLength Unison.Builtin.names + prettied = fmap toPlain $ PP.syntaxToColor $ prettyRaw get_names Map.empty (-1) input_type + actual = if width == 0 + then PP.renderUnbroken $ prettied + else PP.render width $ prettied + actual_reparsed = Common.t actual + in scope s $ tests [( + if actual == expected then ok + else do note $ "expected: " ++ show expected + note $ "actual : " ++ show actual + note $ "expectedS:\n" ++ expected + note $ "actualS:\n" ++ actual + note $ "show(input) : " ++ show input_type + note $ "prettyprint : " ++ show prettied + crash "actual != expected" + ), ( + if (not rtt) || (input_type == actual_reparsed) then ok + else do note $ "round trip test..." + note $ "single parse: " ++ show input_type + note $ "double parse: " ++ show actual_reparsed + note $ "prettyprint : " ++ show prettied + crash "single parse != double parse" + )] + +-- As above, but do the round-trip test unconditionally. +tc_diff :: String -> String -> Test () +tc_diff s expected = tc_diff_rtt True s expected 0 + +-- As above, but expect not even cosmetic differences between the input string +-- and the pretty-printed version. +tc :: String -> Test () +tc s = tc_diff s s + +-- Use renderBroken to render the output to some maximum width. +tc_breaks :: String -> Int -> String -> Test () +tc_breaks s width expected = tc_diff_rtt True s expected width + +test :: Test () +test = scope "typeprinter" . tests $ + [ tc "a -> b" + , tc "()" + , tc "Pair" + , tc "Pair a b" + , tc "Pair a a" + , tc_diff "((a))" $ "a" + , tc "Pair a ()" -- unary tuple + , tc "(a, a)" + , tc "(a, a, a)" + , tc "(a, b, c, d)" + , tc "Pair a (Pair a a)" + , tc "Pair (Pair a a) a" + , tc "{} (Pair a a)" + , tc "a ->{} b" + , tc "a ->{e1} b" + , tc "a ->{e1, e2} b -> c ->{} d" + , tc "a ->{e1, e2} b ->{} c -> d" + , tc "a -> b -> c ->{} d" + , tc "a -> b ->{} c -> d" + , tc "{e1, e2} (Pair a a)" + , tc "Pair (a -> b) (c -> d)" + , tc "Pair a b ->{e1, e2} Pair a b ->{} Pair (a -> b) d -> Pair c d" + , tc "[Pair a a]" + , tc "'a" + , tc "'Pair a a" + , tc "a -> 'b" + , tc "'(a -> b)" + , tc "(a -> b) -> c" + , tc "'a -> b" + , tc "∀ A. A -> A" + , tc "∀ foo.A. foo.A -> foo.A" + , tc "∀ A B. A -> B -> (A, B)" + , tc "a -> 'b -> c" + , tc "a -> (b -> c) -> d" + , tc "(a -> b) -> c -> d" + , tc "((a -> b) -> c) -> d" + , tc "(∀ a. 'a) -> ()" + , tc "(∀ a. (∀ b. 'b) -> a) -> ()" + , tc_diff "∀ a. 'a" $ "'a" + , tc "a -> '(b -> c)" + , tc "a -> b -> c -> d" + , tc "a -> 'Pair b c" + , tc "a -> b -> 'c" + , tc "a ->{e} 'b" + , tc "a -> '{e} b" + , tc "a -> '{e} b -> c" + , tc "a -> '{e} b ->{f} c" + , tc "a -> '{e} (b -> c)" + , tc "a -> '{e} (b ->{f} c)" + , tc "a -> 'b" + , tc "a -> '('b)" + , tc "a -> '('(b -> c))" + , tc "a -> '('('(b -> c)))" + , tc "a -> '{e} ('('(b -> c)))" + , tc "a -> '('{e} ('(b -> c)))" + , tc "a -> '('('{e} (b -> c)))" + , tc "a -> 'b ->{f} c" + , tc "a -> '(b -> c)" + , tc "a -> '(b ->{f} c)" + , tc "a -> '{e} ('b)" + , pending $ tc "a -> '{e} 'b" -- issue #249 + , pending $ tc "a -> '{e} '{f} b" -- issue #249 + , tc "a -> '{e} ('b)" + , tc_diff "a -> () ->{e} () -> b -> c" $ "a -> '{e} ('(b -> c))" + , tc "a -> '{e} ('(b -> c))" + , tc_diff "a ->{e} () ->{f} b" $ "a ->{e} '{f} b" + , tc "a ->{e} '{f} b" + , tc_diff "a -> () ->{e} () ->{f} b" $ "a -> '{e} ('{f} b)" + , tc "a -> '{e} ('{f} b)" + , tc "a -> '{e} () ->{f} b" + , tc "a -> '{e} ('{f} (b -> c))" + , tc "a ->{e} '(b -> c)" + , tc "a -> '{e} (b -> c)" + , tc_diff "a -> () ->{e} () -> b" $ "a -> '{e} ('b)" + , tc "'{e} a" + , tc "'{e} (a -> b)" + , tc "'{e} (a ->{f} b)" + , pending $ tc "Pair a '{e} b" -- parser hits unexpected ' + , tc_diff_rtt False "Pair a ('{e} b)" "Pair a '{e} b" 80 -- no RTT due to the above + , tc "'(a -> 'a)" + , tc "'()" + , tc "'('a)" + , tc_diff "''a" "'('a)" + , tc_diff "'''a" "'('('a))" + , tc_diff "∀ a . a" $ "a" + , tc_diff "∀ a. a" $ "a" + , tc_diff "∀ a . 'a" $ "'a" + , pending $ tc_diff "∀a . a" $ "a" -- lexer doesn't accept, treats ∀a as one lexeme - feels like it should work + , pending $ tc_diff "∀ A . 'A" $ "'A" -- 'unknown parse error' - should this be accepted? + + , tc_diff_rtt False "a -> b -> c -> d" -- hitting 'unexpected Semi' in the reparse + "a\n\ + \-> b\n\ + \-> c\n\ + \-> d" 10 + + , tc_diff_rtt False "a -> Pair b c -> d" -- ditto, and extra line breaks that seem superfluous in Pair + "a\n\ + \-> Pair b c\n\ + \-> d" 14 + + , tc_diff_rtt False "Pair (forall a. (a -> a -> a)) b" -- as above, and TODO not nesting under Pair + "Pair\n\ + \ (∀ a. a -> a -> a) b" 24 + + , tc_diff_rtt False "Pair (forall a. (a -> a -> a)) b" -- as above, and TODO not breaking under forall + "Pair\n\ + \ (∀ a. a -> a -> a)\n\ + \ b" 21 + + ] diff --git a/parser-typechecker/tests/Unison/Test/Typechecker.hs b/parser-typechecker/tests/Unison/Test/Typechecker.hs new file mode 100644 index 0000000000..d7254ff54c --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Typechecker.hs @@ -0,0 +1,33 @@ +{-# Language OverloadedStrings #-} + +module Unison.Test.Typechecker where + +import EasyTest +import Unison.Symbol ( Symbol(..) ) +import qualified Unison.Type as Type +import qualified Unison.Typechecker as Typechecker +import qualified Unison.Var as Var + +test :: Test () +test = scope "typechecker" $ tests + [ scope "isSubtype" isSubtypeTest + ] + +isSubtypeTest :: Test () +isSubtypeTest = + let + symbol i n = Symbol i (Var.User n) + forall v t = Type.forall () v t + var v = Type.var () v + + a = symbol 0 "a" + a_ i = symbol i "a" + lhs = forall a (var a) -- ∀a. a + rhs_ i = var (a_ i) -- a_i + in + -- check that `∀a. a <: a_i` (used to fail for i = 2, 3) + tests [ expectSubtype lhs (rhs_ i) | i <- [0 .. 5] ] + where + expectSubtype t1 t2 = + scope ("isSubtype (" <> show t1 <> ") (" <> show t2 <> ")") + (expect $ Typechecker.isSubtype t1 t2) diff --git a/parser-typechecker/tests/Unison/Test/Typechecker/Components.hs b/parser-typechecker/tests/Unison/Test/Typechecker/Components.hs new file mode 100644 index 0000000000..327a51d510 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Typechecker/Components.hs @@ -0,0 +1,35 @@ +module Unison.Test.Typechecker.Components where + +-- import Control.Monad +import EasyTest +-- import Unison.Parsers (unsafeParseTerm) +-- import qualified Unison.Note as Note +-- import qualified Unison.Test.Common as Common +-- import qualified Unison.Typechecker.Components as Components + +test :: Test () +test = scope "Typechecker.Components" $ ok + -- [ + -- -- simple case, no minimization done + -- t "{ id x = x; g = id 42; y = id id g; y }" + -- "{ id x = x; g = id 42; y = id id g; y }" + -- -- check that we get let generalization + -- , t "{ id x = x; g = id 42; y = id id g; y }" + -- "{ id x = x; g = id 42; y = id id g; y }" + -- -- check that we preserve order of components as much as possible + -- , t "{ id2 x = x; id1 x = x; id3 x = x; id3 }" + -- "{ id2 x = x; id1 x = x; id3 x = x; id3 }" + -- -- check that we reorder according to dependencies + -- , t "{ g = id 42; y = id id g; id x = x; y }" + -- "{ id x = x; g = id 42; y = id id g; y }" + -- -- insane example, checks for: generalization, reordering, + -- -- preservation of order when possible + -- , t "{ g = id 42; y = id id g; ping x = pong x; pong x = id (ping x); id x = x; y }" + -- "{ id x = x; g = id 42; y = id id g ; ({ ping x = pong x; pong x = id (ping x) ; y })}" + -- ] + -- where + -- t before after = scope (before ++ " ⟹ " ++ after) $ do + -- let term = unsafeParseTerm before + -- let after' = Components.minimize' term + -- guard $ Common.typechecks' after' + -- expect (unsafeParseTerm after == after') diff --git a/parser-typechecker/tests/Unison/Test/Typechecker/Context.hs b/parser-typechecker/tests/Unison/Test/Typechecker/Context.hs new file mode 100644 index 0000000000..a759708341 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Typechecker/Context.hs @@ -0,0 +1,41 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Test.Typechecker.Context ( test ) +where + +import Data.Foldable ( for_ ) +import EasyTest +import Unison.Symbol ( Symbol ) +import qualified Unison.Typechecker.Context as Context +import qualified Unison.Term as Term +import qualified Unison.Type as Type +import qualified Unison.Var as Var + +test :: Test () +test = scope "context" $ tests + [ scope "verifyClosedTerm" verifyClosedTermTest + ] + +type TV = Context.TypeVar Symbol () + +verifyClosedTermTest :: Test () +verifyClosedTermTest = tests + [ scope "report-all-free-vars" $ + let + a = Var.named @Symbol "a" + b = Var.named @Symbol "b" + a' = Var.named @TV "a'" + b' = Var.named @TV "b'" + -- (a : a')(b : b') + t = Term.app() + (Term.ann() (Term.var() a) (Type.var() a')) + (Term.ann() (Term.var() b) (Type.var() b')) + res = Context.synthesizeClosed [] mempty t + errors = Context.typeErrors res + expectUnknownSymbol (Context.ErrorNote cause _) = case cause of + Context.UnknownSymbol _ _ -> ok + e -> crash $ "Unexpected type error " <> show e + in do + expectEqual 4 (length errors) -- there are 4 unknown symbols: a, a', b, b' + for_ errors expectUnknownSymbol + ] diff --git a/parser-typechecker/tests/Unison/Test/Typechecker/TypeError.hs b/parser-typechecker/tests/Unison/Test/Typechecker/TypeError.hs new file mode 100644 index 0000000000..062975322b --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Typechecker/TypeError.hs @@ -0,0 +1,57 @@ +{-# LANGUAGE PatternSynonyms #-} + +module Unison.Test.Typechecker.TypeError where + +import Data.Foldable (toList) +import Data.Maybe (isJust) +import EasyTest +import Unison.Parser (Ann) +import Unison.Result (pattern Result) +import qualified Unison.Result as Result +import Unison.Symbol (Symbol) +import qualified Unison.Typechecker.Context as C +import Unison.Typechecker.Extractor (ErrorExtractor) +import qualified Unison.Typechecker.Extractor as Ex +import qualified Unison.Typechecker.TypeError as Err +import Unison.Var (Var) +import qualified Unison.Test.Common as Common + +test :: Test () +test = scope "> extractor" . tests $ + [ y "> true && 3" Err.and + , y "> true || 3" Err.or + , y "> if 3 then 1 else 2" Err.cond + , y "> if true then 1 else \"surprise\"" Err.ifBody + , y "> match 3 with 3 | 3 -> 3" Err.matchGuard + , y "> match 3 with\n 3 -> 3\n 4 -> \"surprise\"" Err.matchBody + -- , y "> match 3 with true -> true" Err. + , y "> [1, +1]" Err.vectorBody + , n "> true && ((x -> x + 1) true)" Err.and + , n "> true || ((x -> x + 1) true)" Err.or + , n "> if ((x -> x + 1) true) then 1 else 2" Err.cond + , n "> match 3 with 3 | 3 -> 3" Err.matchBody + , y "> 1 1" Err.applyingNonFunction + , y "> 1 Int.+ 1" Err.applyingFunction + , y ( "ability Abort where\n" ++ + " abort : {Abort} a\n" ++ + "\n" ++ + "xyz : t -> Request Abort t -> t\n" ++ + "xyz default abort = match abort with\n" ++ + " {a} -> 3\n" ++ + " {Abort.abort -> k} ->\n" ++ + " handle k 100 with xyz default\n" + ) Err.matchBody + ] + where y, n :: String -> ErrorExtractor Symbol Ann a -> Test () + y s ex = scope s $ expect $ yieldsError s ex + n s ex = scope s $ expect $ noYieldsError s ex + +noYieldsError :: Var v => String -> ErrorExtractor v Ann a -> Bool +noYieldsError s ex = not $ yieldsError s ex + +yieldsError :: forall v a. Var v => String -> ErrorExtractor v Ann a -> Bool +yieldsError s ex = let + Result notes (Just _) = Common.parseAndSynthesizeAsFile [] "> test" s + notes' :: [C.ErrorNote v Ann] + notes' = [ n | Result.TypeError n <- toList notes ] + in any (isJust . Ex.extract ex) notes' diff --git a/parser-typechecker/tests/Unison/Test/UnisonSources.hs b/parser-typechecker/tests/Unison/Test/UnisonSources.hs new file mode 100644 index 0000000000..35452d9b36 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/UnisonSources.hs @@ -0,0 +1,195 @@ +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE RankNTypes #-} + +module Unison.Test.UnisonSources where + +import Control.Lens ( view ) +import Control.Lens.Tuple ( _5 ) +import Control.Monad (void) +import Control.Monad.IO.Class (liftIO) +import qualified Data.Map as Map +import Data.Sequence (Seq) +import Data.Text (unpack) +import Data.Text.IO (readFile) +import EasyTest +import System.FilePath (joinPath, splitPath, replaceExtension) +import System.FilePath.Find (always, extension, find, (==?)) +import System.Directory ( doesFileExist ) +import qualified Unison.ABT as ABT +import qualified Unison.Builtin as Builtin +import Unison.Codebase.Runtime ( Runtime, evaluateWatches ) +import Unison.Codebase.Serialization ( getFromBytes, putBytes ) +import qualified Unison.Codebase.Serialization.V1 as V1 +import Unison.DataDeclaration (EffectDeclaration, DataDeclaration) +import Unison.Parser as Parser +import qualified Unison.Parsers as Parsers +import qualified Unison.PrettyPrintEnv as PPE +import qualified Unison.PrintError as PrintError +import Unison.Reference ( Reference ) +import Unison.Result (pattern Result, Result) +import qualified Unison.Result as Result +import qualified Unison.Runtime.Rt1IO as RT +import qualified Unison.Runtime.Interface as RTI +import Unison.Symbol (Symbol) +import qualified Unison.Term as Term +import Unison.Term ( Term ) +import Unison.Test.Common (parseAndSynthesizeAsFile, parsingEnv) +import Unison.Type ( Type ) +import qualified Unison.UnisonFile as UF +import Unison.Util.Monoid (intercalateMap) +import qualified Unison.Var as Var +import qualified Unison.Test.Common as Common +import qualified Unison.Names3 + +type Note = Result.Note Symbol Parser.Ann + +type TFile = UF.TypecheckedUnisonFile Symbol Ann +type SynthResult = + Result (Seq Note) + (Either Unison.Names3.Names0 TFile) + +type EitherResult = Either String TFile + + +ppEnv :: PPE.PrettyPrintEnv +ppEnv = PPE.fromNames Common.hqLength Builtin.names + +expectRight' :: Either String a -> Test a +expectRight' (Left e) = crash e +expectRight' (Right a) = ok >> pure a + +good :: EitherResult -> Test TFile +good = expectRight' + +bad :: EitherResult -> Test TFile +bad r = EasyTest.expectLeft r >> done + +test :: Bool -> Test () +test new = do + rt <- if new then io RTI.startRuntime else pure RT.runtime + scope "unison-src" + . tests + $ [ go rt shouldPassNow good + , go rt shouldFailNow bad + , go rt shouldPassLater (pending . bad) + , go rt shouldFailLater (pending . good) + ] + +shouldPassPath, shouldFailPath :: String +shouldPassPath = "unison-src/tests" +shouldFailPath = "unison-src/errors" + +shouldPassNow :: IO [FilePath] +shouldPassNow = find always (extension ==? ".u") shouldPassPath + +shouldFailNow :: IO [FilePath] +shouldFailNow = find always (extension ==? ".u") shouldFailPath + +shouldPassLater :: IO [FilePath] +shouldPassLater = find always (extension ==? ".uu") shouldPassPath + +shouldFailLater :: IO [FilePath] +shouldFailLater = find always (extension ==? ".uu") shouldFailPath + +go :: Runtime Symbol -> IO [FilePath] -> (EitherResult -> Test TFile) -> Test () +go rt files how = do + files' <- liftIO files + tests (makePassingTest rt how <$> files') + +showNotes :: Foldable f => String -> PrintError.Env -> f Note -> String +showNotes source env = + intercalateMap "\n\n" $ PrintError.renderNoteAsANSI 60 env source + +decodeResult + :: String -> SynthResult -> EitherResult-- String (UF.TypecheckedUnisonFile Symbol Ann) +decodeResult source (Result notes Nothing) = + Left $ showNotes source ppEnv notes +decodeResult source (Result notes (Just (Left errNames))) = + Left $ showNotes + source + (PPE.fromNames Common.hqLength + (Unison.Names3.shadowing errNames Builtin.names)) + notes +decodeResult _source (Result _notes (Just (Right uf))) = + Right uf + +makePassingTest + :: Runtime Symbol -> (EitherResult -> Test TFile) -> FilePath -> Test () +makePassingTest rt how filepath = scope (shortName filepath) $ do + uf <- typecheckingTest how filepath + resultTest rt uf filepath *> serializationTest uf + +shortName :: FilePath -> FilePath +shortName = joinPath . drop 1 . splitPath + +typecheckingTest :: (EitherResult -> Test TFile) -> FilePath -> Test TFile +typecheckingTest how filepath = scope "typecheck" $ do + source <- io $ unpack <$> Data.Text.IO.readFile filepath + how . decodeResult source $ parseAndSynthesizeAsFile [] (shortName filepath) source + +resultTest + :: Runtime Symbol -> TFile -> FilePath -> Test () +resultTest rt uf filepath = do + let valueFile = replaceExtension filepath "ur" + rFileExists <- io $ doesFileExist valueFile + if rFileExists + then scope "result" $ do + values <- io $ unpack <$> Data.Text.IO.readFile valueFile + let untypedFile = UF.discardTypes uf + let term = Parsers.parseTerm values parsingEnv + (bindings, watches) <- io $ either undefined id <$> + evaluateWatches Builtin.codeLookup + mempty + (const $ pure Nothing) + rt + untypedFile + case term of + Right tm -> do + -- compare the the watch expression from the .u with the expr in .ur + let [watchResult] = view _5 <$> Map.elems watches + tm' = Term.letRec' False bindings watchResult + -- note . show $ tm' + -- note . show $ Term.amap (const ()) tm + expect $ tm' == Term.amap (const ()) tm + Left e -> crash $ show e + else pure () + +serializationTest :: TFile -> Test () +serializationTest uf = scope "serialization" . tests . concat $ + [ map testDataDeclaration (Map.toList $ UF.dataDeclarations' uf) + , map testEffectDeclaration (Map.toList $ UF.effectDeclarations' uf) + , map testTerm (Map.toList $ UF.hashTerms uf) + ] + where + putUnit :: Monad m => () -> m () + putUnit () = pure () + getUnit :: Monad m => m () + getUnit = pure () + testDataDeclaration :: (Symbol, (Reference, DataDeclaration Symbol Ann)) -> Test () + testDataDeclaration (name, (_, decl)) = scope (Var.nameStr name) $ + let decl' :: DataDeclaration Symbol () + decl' = void decl + bytes = putBytes (V1.putDataDeclaration V1.putSymbol putUnit) decl' + decl'' = getFromBytes (V1.getDataDeclaration V1.getSymbol getUnit) bytes + in expectEqual decl'' (Just decl') + testEffectDeclaration :: (Symbol, (Reference, EffectDeclaration Symbol Ann)) -> Test () + testEffectDeclaration (name, (_, decl)) = scope (Var.nameStr name) $ + let decl' :: EffectDeclaration Symbol () + decl' = void decl + bytes = putBytes (V1.putEffectDeclaration V1.putSymbol putUnit) decl' + decl'' = getFromBytes (V1.getEffectDeclaration V1.getSymbol getUnit) bytes + in expectEqual decl'' (Just decl') + testTerm :: (Symbol, (Reference, Term Symbol Ann, Type Symbol Ann)) -> Test () + testTerm (name, (_, tm, tp)) = scope (Var.nameStr name) $ + let tm' :: Term Symbol () + tm' = Term.amap (const ()) tm + tp' :: Type Symbol () + tp' = ABT.amap (const ()) tp + tmBytes = putBytes (V1.putTerm V1.putSymbol putUnit) tm' + tpBytes = putBytes (V1.putType V1.putSymbol putUnit) tp' + tm'' = getFromBytes (V1.getTerm V1.getSymbol getUnit) tmBytes + tp'' = getFromBytes (V1.getType V1.getSymbol getUnit) tpBytes + in tests + [ scope "type" $ expectEqual tp'' (Just tp') + , scope "term" $ expectEqual tm'' (Just tm') + ] diff --git a/parser-typechecker/tests/Unison/Test/UriParser.hs b/parser-typechecker/tests/Unison/Test/UriParser.hs new file mode 100644 index 0000000000..fbea77318a --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/UriParser.hs @@ -0,0 +1,84 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Test.UriParser where + +import EasyTest +import Unison.Codebase.Editor.RemoteRepo (RemoteRepo(..)) +import Unison.Codebase.Path (Path(..)) +import qualified Unison.Codebase.Path as Path +import qualified Text.Megaparsec as P +import qualified Unison.Codebase.Editor.UriParser as UriParser +import qualified Data.Sequence as Seq +import Unison.Codebase.ShortBranchHash (ShortBranchHash(..)) +import Data.Text (Text) +import Unison.NameSegment (NameSegment(..)) +import qualified Data.Text as Text + +test :: Test () +test = scope "uriparser" . tests $ [ testAugmented ] + +testAugmented:: Test () +testAugmented = scope "augmented" . tests $ +-- Local Protocol +-- $ git clone /srv/git/project.git +-- $ git clone /srv/git/project.git[:treeish][:[#hash][.path]] + [ scope "local-protocol" . tests . map parseAugmented $ + [ ("/srv/git/project.git", + (GitRepo "/srv/git/project.git" Nothing, Nothing, Path.empty)) + , ("/srv/git/project.git:abc:#def.hij.klm", + (GitRepo "/srv/git/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) + , ("srv/git/project.git", + (GitRepo "srv/git/project.git" Nothing, Nothing, Path.empty)) + , ("srv/git/project.git:abc:#def.hij.klm", + (GitRepo "srv/git/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) + ], +-- File Protocol +-- $ git clone file:///srv/git/project.git[:treeish][:[#hash][.path]] <- imagined + scope "file-protocol" . tests . map parseAugmented $ + [ ("file:///srv/git/project.git", + (GitRepo "file:///srv/git/project.git" Nothing, Nothing, Path.empty)) + , ("file:///srv/git/project.git:abc:#def.hij.klm", + (GitRepo "file:///srv/git/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) + , ("file://srv/git/project.git", + (GitRepo "file://srv/git/project.git" Nothing, Nothing, Path.empty)) + , ("file://srv/git/project.git:abc:#def.hij.klm", + (GitRepo "file://srv/git/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) + ], +-- Smart / Dumb HTTP protocol +-- $ git clone https://example.com/gitproject.git[:treeish][:[#hash][.path]] <- imagined + scope "http-protocol" . tests . map parseAugmented $ + [ ("https://example.com/git/project.git", + (GitRepo "https://example.com/git/project.git" Nothing, Nothing, Path.empty)) + , ("https://user@example.com/git/project.git:abc:#def.hij.klm]", + (GitRepo "https://user@example.com/git/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) + ], +-- SSH Protocol +-- $ git clone ssh://[user@]server/project.git[:treeish][:[#hash][.path]] + scope "ssh-protocol" . tests . map parseAugmented $ + [ ("ssh://git@8.8.8.8:222/user/project.git", + (GitRepo "ssh://git@8.8.8.8:222/user/project.git" Nothing, Nothing, Path.empty)) + , ("ssh://git@github.com/user/project.git:abc:#def.hij.klm", + (GitRepo "ssh://git@github.com/user/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) + ], +-- $ git clone [user@]server:project.git[:treeish][:[#hash][.path]] + scope "scp-protocol" . tests . map parseAugmented $ + [ ("git@github.com:user/project.git", + (GitRepo "git@github.com:user/project.git" Nothing, Nothing, Path.empty)) + , ("github.com:user/project.git", + (GitRepo "github.com:user/project.git" Nothing, Nothing, Path.empty)) + , ("git@github.com:user/project.git:abc:#def.hij.klm", + (GitRepo "git@github.com:user/project.git" (Just "abc"), sbh "def", path ["hij", "klm"])) + ] + ] + +parseAugmented :: (Text, (RemoteRepo, Maybe ShortBranchHash, Path)) -> Test () +parseAugmented (s, r) = scope (Text.unpack s) $ + case P.parse UriParser.repoPath "test case" s of + Left x -> crash $ show x + Right x -> expectEqual x r + +path :: [Text] -> Path +path = Path . Seq.fromList . fmap NameSegment + +sbh :: Text -> Maybe ShortBranchHash +sbh = Just . ShortBranchHash diff --git a/parser-typechecker/tests/Unison/Test/Util/Bytes.hs b/parser-typechecker/tests/Unison/Test/Util/Bytes.hs new file mode 100644 index 0000000000..549a5eb949 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Util/Bytes.hs @@ -0,0 +1,62 @@ +module Unison.Test.Util.Bytes where + +import EasyTest +import Control.Monad +import Data.List (foldl') +import qualified Unison.Util.Bytes as Bytes +import qualified Data.ByteString as BS + +test :: Test () +test = scope "util.bytes" . tests $ [ + scope "empty ==" . expect $ Bytes.empty == Bytes.empty, + + scope "empty `compare`" . expect $ Bytes.empty `compare` Bytes.empty == EQ, + + scope "==" . expect $ + Bytes.fromWord8s [0,1,2,3,4,5] <> Bytes.fromWord8s [6,7,8,9] + == + Bytes.fromWord8s [0,1,2,3,4,5,6,7,8,9], + + scope "consistency with ByteString" $ do + forM_ [(1::Int)..100] $ \_ -> do + n <- int' 0 50 + m <- int' 0 50 + k <- int' 0 (n + m) + o <- int' 0 50 + b1 <- BS.pack <$> replicateM n word8 + b2 <- BS.pack <$> replicateM m word8 + b3 <- BS.pack <$> replicateM o word8 + let [b1s, b2s, b3s] = Bytes.fromByteString <$> [b1, b2, b3] + scope "associtivity" . expect' $ + b1s <> (b2s <> b3s) == (b1s <> b2s) <> b3s + scope "<>" . expect' $ + Bytes.toByteString (b1s <> b2s <> b3s) == b1 <> b2 <> b3 + scope "Ord" . expect' $ + (b1 <> b2 <> b3) `compare` b3 == + (b1s <> b2s <> b3s) `compare` b3s + scope "take" . expect' $ + Bytes.toByteString (Bytes.take k (b1s <> b2s)) == BS.take k (b1 <> b2) + scope "drop" . expect' $ + Bytes.toByteString (Bytes.drop k (b1s <> b2s)) == BS.drop k (b1 <> b2) + scope "at" $ + let bs = b1s <> b2s <> b3s + b = b1 <> b2 <> b3 + in forM_ [0 .. (BS.length b - 1)] $ \ind -> + expect' $ Just (BS.index b ind) == Bytes.at ind bs + ok, + + scope "lots of chunks" $ do + forM_ [(0::Int)..100] $ \i -> do + n <- int' 0 50 + k <- int' 0 i + chunks <- replicateM n (replicateM k word8) + let b1 = foldMap Bytes.fromWord8s chunks + b2 = foldr (<>) mempty (Bytes.fromWord8s <$> chunks) + b3 = foldl' (<>) mempty (Bytes.fromWord8s <$> chunks) + b = BS.concat (BS.pack <$> chunks) + expect' $ b1 == b2 && b2 == b3 + expect' $ Bytes.toByteString b1 == b + expect' $ Bytes.toByteString b2 == b + expect' $ Bytes.toByteString b3 == b + ok + ] diff --git a/parser-typechecker/tests/Unison/Test/Util/PinBoard.hs b/parser-typechecker/tests/Unison/Test/Util/PinBoard.hs new file mode 100644 index 0000000000..dd8888d598 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Util/PinBoard.hs @@ -0,0 +1,52 @@ +{-# LANGUAGE MagicHash #-} +{-# LANGUAGE UnboxedTuples #-} + +module Unison.Test.Util.PinBoard + ( test, + ) +where + +import qualified Data.ByteString as ByteString +import EasyTest +import GHC.Exts (isTrue#, reallyUnsafePtrEquality#, touch#) +import GHC.IO (IO (IO)) +import System.Mem (performGC) +import qualified Unison.Util.PinBoard as PinBoard + +test :: Test () +test = + scope "util.pinboard" . tests $ + [ scope "pinning equal values stores only one" $ do + let b0 = ByteString.singleton 0 + let b1 = ByteString.copy b0 + + board <- PinBoard.new + + -- pinning a thing for the first time returns it + b0' <- PinBoard.pin board b0 + expectSamePointer b0 b0' + + -- pinning an equal thing returns the first + b1' <- PinBoard.pin board b1 + expectSamePointer b0 b1' + + -- the board should only have one value in it + expect' . (== 1) <$> io (PinBoard.debugSize board) + + -- keep b0 alive until here + touch b0 + + -- observe that the board doesn't keep its value alive + io performGC + expect' . (== 0) <$> io (PinBoard.debugSize board) + + ok + ] + +expectSamePointer :: a -> a -> Test () +expectSamePointer x y = + expect' (isTrue# (reallyUnsafePtrEquality# x y)) + +touch :: a -> Test () +touch x = + io (IO \s -> (# touch# x s, () #)) diff --git a/parser-typechecker/tests/Unison/Test/Util/Pretty.hs b/parser-typechecker/tests/Unison/Test/Util/Pretty.hs new file mode 100644 index 0000000000..6859adc3f6 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Util/Pretty.hs @@ -0,0 +1,33 @@ +module Unison.Test.Util.Pretty + ( test + ) where + +import Control.Monad +import Data.String (fromString) +import EasyTest +import qualified Unison.Util.Pretty as Pretty + +test :: Test () +test = + scope "util.pretty" . tests $ [ + scope "Delta.Semigroup.<>.associative" $ do + replicateM_ 100 $ do + d1 <- randomDelta + d2 <- randomDelta + d3 <- randomDelta + expect' $ (d1 <> d2) <> d3 == d1 <> (d2 <> d3) + ok + ] + +randomDelta :: Test Pretty.Delta +randomDelta = + Pretty.delta <$> randomPretty + + where + randomPretty :: Test (Pretty.Pretty String) + randomPretty = + fromString <$> randomString + + randomString :: Test String + randomString = + replicateM 3 (pick ['x', 'y', 'z', '\n']) diff --git a/parser-typechecker/tests/Unison/Test/Var.hs b/parser-typechecker/tests/Unison/Test/Var.hs new file mode 100644 index 0000000000..938bcb1e8c --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Var.hs @@ -0,0 +1,23 @@ +module Unison.Test.Var where + +import EasyTest +import Unison.Symbol (Symbol) +import Unison.Var as Var + +test :: Test () +test = scope "var" $ tests [ + scope "free synthetic vars are universally quantifiable" $ tests + [ scope (Var.nameStr v) + (expect $ Var.universallyQuantifyIfFree @Symbol v) + | v <- [ Var.inferAbility + , Var.inferInput + , Var.inferOutput + , Var.inferPatternPureE + , Var.inferPatternPureV + , Var.inferPatternBindE + , Var.inferPatternBindV + , Var.inferTypeConstructor + , Var.inferTypeConstructorArg + ] + ] + ] diff --git a/parser-typechecker/tests/Unison/Test/VersionParser.hs b/parser-typechecker/tests/Unison/Test/VersionParser.hs new file mode 100644 index 0000000000..64b5741a75 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/VersionParser.hs @@ -0,0 +1,26 @@ +{-# LANGUAGE OverloadedStrings #-} +module Unison.Test.VersionParser where + +import EasyTest +import Data.Text +import Unison.Codebase.Editor.VersionParser +import qualified Unison.Codebase.Path as Path +import Control.Error.Safe (rightMay) +import Unison.Codebase.Editor.RemoteRepo +import Text.Megaparsec + +test :: Test () +test = scope "versionparser" . tests . fmap makeTest $ + [ ("release/M1j", "releases._M1j") + , ("release/M1j.2", "releases._M1j") + , ("devel/M1k", "trunk") + ] + +makeTest :: (Text, Text) -> Test () +makeTest (version, path) = + scope (unpack version) $ expectEqual + (rightMay $ runParser defaultBaseLib "versionparser" version) + (Just + ( GitRepo "https://github.com/unisonweb/base" Nothing + , Nothing + , Path.fromText path )) diff --git a/parser-typechecker/transcripts/Transcripts.hs b/parser-typechecker/transcripts/Transcripts.hs new file mode 100644 index 0000000000..81fd7a4e06 --- /dev/null +++ b/parser-typechecker/transcripts/Transcripts.hs @@ -0,0 +1,87 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Main where + +import Unison.Prelude +import EasyTest +import Shellmet (($|)) +import System.Directory +import System.FilePath ( () + , takeExtensions + , takeBaseName + ) +import System.Process ( readProcessWithExitCode ) + +import Data.Text ( pack + , unpack + ) +import Data.List + +type TestBuilder = FilePath -> FilePath -> String -> Test () + +testBuilder :: FilePath -> FilePath -> String -> Test () +testBuilder ucm dir transcript = scope transcript $ do + io $ fromString ucm ["transcript", pack (dir transcript)] + ok + +testBuilder' :: FilePath -> FilePath -> String -> Test () +testBuilder' ucm dir transcript = scope transcript $ do + let input = pack (dir transcript) + let output = dir takeBaseName transcript <> ".output.md" + io $ runAndCaptureError ucm ["transcript", input] output + ok + where + -- Given a command and arguments, run it and capture the standard error to a file + -- regardless of success or failure. + runAndCaptureError :: FilePath -> [Text] -> FilePath -> IO () + runAndCaptureError cmd args outfile = do + t <- readProcessWithExitCode cmd (map unpack args) "" + let output = (\(_, _, stderr) -> stderr) t + writeUtf8 outfile $ (pack . dropRunMessage) output + + -- Given the standard error, drops the part in the end that changes each run + dropRunMessage :: String -> String + dropRunMessage = unlines . reverse . drop 3 . reverse . lines + + +buildTests :: TestBuilder -> FilePath -> Test () +buildTests testBuilder dir = do + io + . putStrLn + . unlines + $ [ "" + , "Searching for transcripts to run in: " ++ dir + ] + files <- io $ listDirectory dir + let transcripts = sort . filter (\f -> takeExtensions f == ".md") $ files + ucm <- io $ unpack <$> "stack" $| ["exec", "--", "which", "unison"] -- todo: what is it in windows? + tests (testBuilder ucm dir <$> transcripts) + +-- Transcripts that exit successfully get cleaned-up by the transcript parser. +-- Any remaining folders matching "transcript-.*" are output directories +-- of failed transcripts and should be moved under the "test-output" folder +cleanup :: Test () +cleanup = do + files' <- io $ listDirectory "." + let dirs = filter ("transcript-" `isPrefixOf`) files' + + -- if any such codebases remain they are moved under test-output + unless (null dirs) $ do + io $ createDirectoryIfMissing True "test-output" + io $ for_ dirs (\d -> renameDirectory d ("test-output" d)) + io + . putStrLn + . unlines + $ [ "" + , "NOTE: All transcript codebases have been moved into" + , "the `test-output` directory. Feel free to delete it." + ] + +test :: Test () +test = do + buildTests testBuilder $"unison-src" "transcripts" + buildTests testBuilder' $"unison-src" "transcripts" "errors" + cleanup + +main :: IO () +main = run test diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal new file mode 100644 index 0000000000..b793819511 --- /dev/null +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -0,0 +1,385 @@ +cabal-version: 2.2 +name: unison-parser-typechecker +category: Compiler +version: 0.1 +license: MIT +license-file: LICENSE +author: Unison Computing, public benefit corp +maintainer: Paul Chiusano , Runar Bjarnason , Arya Irani +stability: provisional +homepage: http://unisonweb.org +bug-reports: https://github.com/unisonweb/unison/issues +copyright: Copyright (C) 2013-2018 Unison Computing, PBC and contributors +synopsis: Parser and typechecker for the Unison language +description: + +build-type: Simple +extra-source-files: +data-files: + +source-repository head + type: git + location: git://github.com/unisonweb/unison.git + +-- `cabal install -foptimized` enables optimizations +flag optimized + manual: True + default: False + +flag quiet + manual: True + default: False + +-- NOTE: Keep in sync throughout repo. +common unison-common + default-language: Haskell2010 + default-extensions: + ApplicativeDo, + BlockArguments, + DeriveFunctor, + DerivingStrategies, + DoAndIfThenElse, + FlexibleContexts, + FlexibleInstances, + LambdaCase, + MultiParamTypeClasses, + ScopedTypeVariables, + TupleSections, + TypeApplications + +library + import: unison-common + + hs-source-dirs: src + + exposed-modules: + Unison.Builtin + Unison.Builtin.Decls + Unison.Codecs + Unison.Codebase + Unison.Codebase.Branch + Unison.Codebase.Branch.Dependencies + Unison.Codebase.BranchDiff + Unison.Codebase.BranchUtil + Unison.Codebase.Causal + Unison.Codebase.Classes + Unison.Codebase.CodeLookup + Unison.Codebase.Editor.AuthorInfo + Unison.Codebase.Editor.Command + Unison.Codebase.Editor.DisplayThing + Unison.Codebase.Editor.Git + Unison.Codebase.Editor.HandleInput + Unison.Codebase.Editor.HandleCommand + Unison.Codebase.Editor.Input + Unison.Codebase.Editor.Output + Unison.Codebase.Editor.Output.BranchDiff + Unison.Codebase.Editor.Propagate + Unison.Codebase.Editor.RemoteRepo + Unison.Codebase.Editor.SearchResult' + Unison.Codebase.Editor.SlurpResult + Unison.Codebase.Editor.SlurpComponent + Unison.Codebase.Editor.TodoOutput + Unison.Codebase.Editor.UriParser + Unison.Codebase.Editor.VersionParser + Unison.Codebase.FileCodebase + Unison.Codebase.FileCodebase.Common + Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex + Unison.Codebase.GitError + Unison.Codebase.Metadata + Unison.Codebase.NameEdit + Unison.Codebase.Path + Unison.Codebase.Patch + Unison.Codebase.Reflog + Unison.Codebase.Runtime + Unison.Codebase.SearchResult + Unison.Codebase.Serialization + Unison.Codebase.Serialization.PutT + Unison.Codebase.Serialization.V1 + Unison.Codebase.ShortBranchHash + Unison.Codebase.SyncMode + Unison.Codebase.TermEdit + Unison.Codebase.TranscriptParser + Unison.Codebase.TypeEdit + Unison.Codebase.Watch + Unison.Codebase.Execute + Unison.Codebase.MainTerm + Unison.CommandLine + Unison.CommandLine.DisplayValues + Unison.CommandLine.InputPattern + Unison.CommandLine.InputPatterns + Unison.CommandLine.Main + Unison.CommandLine.OutputMessages + Unison.DeclPrinter + Unison.FileParser + Unison.FileParsers + Unison.Lexer + Unison.NamePrinter + Unison.Parser + Unison.Parsers + Unison.Path + Unison.PrettyPrintEnv + Unison.PrettyTerminal + Unison.PrintError + Unison.Result + Unison.Runtime.ANF + Unison.Runtime.Builtin + Unison.Runtime.Debug + Unison.Runtime.Decompile + Unison.Runtime.Exception + Unison.Runtime.Foreign + Unison.Runtime.Foreign.Function + Unison.Runtime.Interface + Unison.Runtime.IR + Unison.Runtime.MCode + Unison.Runtime.Machine + Unison.Runtime.Pattern + Unison.Runtime.Rt1 + Unison.Runtime.Rt1IO + Unison.Runtime.IOSource + Unison.Runtime.Vector + Unison.Runtime.SparseVector + Unison.Runtime.Stack + Unison.TermParser + Unison.TermPrinter + Unison.TypeParser + Unison.TypePrinter + Unison.Typechecker + Unison.Typechecker.Components + Unison.Typechecker.Context + Unison.Typechecker.Extractor + Unison.Typechecker.TypeError + Unison.Typechecker.TypeLookup + Unison.Typechecker.TypeVar + Unison.UnisonFile + Unison.Util.AnnotatedText + Unison.Util.Bytes + Unison.Util.Cache + Unison.Util.ColorText + Unison.Util.EnumContainers + Unison.Util.Exception + Unison.Util.Free + Unison.Util.Find + Unison.Util.Less + Unison.Util.Logger + Unison.Util.Map + Unison.Util.Menu + Unison.Util.PinBoard + Unison.Util.Pretty + Unison.Util.Range + Unison.Util.Star3 + Unison.Util.SyntaxText + Unison.Util.Timing + Unison.Util.TQueue + Unison.Util.TransitiveClosure + Unison.Util.CycleTable + Unison.Util.CyclicEq + Unison.Util.CyclicOrd + + build-depends: + ansi-terminal, + async, + base, + base16 >= 0.2.1.0, + bifunctors, + bytes, + bytestring, + cereal, + containers, + comonad, + concurrent-supply, + configurator, + cryptonite, + directory, + guid, + data-memocombinators, + edit-distance, + errors, + exceptions, + extra, + filepath, + filepattern, + fingertree, + free, + fsnotify, + generic-monoid, + hashable, + hashtables, + haskeline, + io-streams, + lens, + ListLike, + megaparsec >= 5.0.0 && < 7.0.0, + memory, + mmorph, + monad-loops, + mtl, + murmur-hash, + mutable-containers, + network, + network-simple, + nonempty-containers, + process, + primitive, + random, + raw-strings-qq, + regex-base, + regex-tdfa, + safe, + shellmet, + split, + stm, + strings, + tagged, + terminal-size, + text, + time, + transformers, + unison-core, + unliftio, + util, + vector, + unicode-show + + ghc-options: -Wall -O0 -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures + + if flag(optimized) + ghc-options: -funbox-strict-fields -O2 + + if flag(quiet) + ghc-options: -v0 + +executable unison + import: unison-common + main-is: Main.hs + hs-source-dirs: unison + ghc-options: -Wall -threaded -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -rtsopts -with-rtsopts=-I0 -optP-Wno-nonportable-include-path + other-modules: + System.Path + Version + build-depends: + base, + containers, + configurator, + directory, + errors, + filepath, + megaparsec, + safe, + shellmet, + template-haskell, + temporary, + text, + unison-core, + unison-parser-typechecker + if !os(windows) + build-depends: + unix + +executable prettyprintdemo + import: unison-common + main-is: Main.hs + hs-source-dirs: prettyprintdemo + ghc-options: -Wall -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures + build-depends: + base, + safe, + text, + unison-parser-typechecker + +executable tests + import: unison-common + main-is: Suite.hs + hs-source-dirs: tests + ghc-options: -W -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -threaded -rtsopts "-with-rtsopts=-N -T" -v0 + build-depends: + base, + easytest + other-modules: + Unison.Test.ABT + Unison.Test.ANF + Unison.Test.Cache + Unison.Test.Codebase + Unison.Test.Codebase.Causal + Unison.Test.Codebase.FileCodebase + Unison.Test.Codebase.Path + Unison.Test.ColorText + Unison.Test.Common + Unison.Test.DataDeclaration + Unison.Test.FileParser + Unison.Test.Git + Unison.Test.Lexer + Unison.Test.IO + Unison.Test.MCode + Unison.Test.Range + Unison.Test.Referent + Unison.Test.Term + Unison.Test.TermParser + Unison.Test.TermPrinter + Unison.Test.Type + Unison.Test.TypePrinter + Unison.Test.Typechecker + Unison.Test.Typechecker.Components + Unison.Test.Typechecker.Context + Unison.Test.Typechecker.TypeError + Unison.Test.UnisonSources + Unison.Test.UriParser + Unison.Test.Util.Bytes + Unison.Test.Util.PinBoard + Unison.Test.Util.Pretty + Unison.Test.Var + Unison.Test.VersionParser + Unison.Core.Test.Name + + build-depends: + async, + base, + bytestring, + containers, + directory, + easytest, + errors, + extra, + filepath, + filemanip, + here, + lens, + megaparsec, + mtl, + raw-strings-qq, + stm, + shellmet, + split, + temporary, + text, + transformers, + unison-core, + unison-parser-typechecker + +executable transcripts + import: unison-common + main-is: Transcripts.hs + ghc-options: -W -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -threaded -rtsopts -with-rtsopts=-N -v0 + hs-source-dirs: transcripts + other-modules: + build-depends: + base, + directory, + easytest, + filepath, + shellmet, + process, + text, + unison-core, + unison-parser-typechecker + +benchmark runtime + type: exitcode-stdio-1.0 + main-is: Main.hs + ghc-options: -O2 + hs-source-dirs: benchmarks/runtime + build-depends: + base, + criterion, + containers, + unison-core, + unison-parser-typechecker diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs new file mode 100644 index 0000000000..375c88e819 --- /dev/null +++ b/parser-typechecker/unison/Main.hs @@ -0,0 +1,317 @@ +{-# Language OverloadedStrings #-} +{-# Language PartialTypeSignatures #-} +{-# LANGUAGE CPP #-} +{-# OPTIONS_GHC -Wno-partial-type-signatures #-} + +module Main where + +import Unison.Prelude +import Control.Concurrent ( mkWeakThreadId, myThreadId ) +import Control.Error.Safe (rightMay) +import Control.Exception ( throwTo, AsyncException(UserInterrupt) ) +import Data.Configurator.Types ( Config ) +import System.Directory ( getCurrentDirectory, removeDirectoryRecursive ) +import System.Environment ( getArgs, getProgName ) +import System.Mem.Weak ( deRefWeak ) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Editor.VersionParser as VP +import Unison.Codebase.Execute ( execute ) +import qualified Unison.Codebase.FileCodebase as FileCodebase +import Unison.Codebase.FileCodebase.Common ( codebasePath ) +import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace) +import Unison.Codebase.Runtime ( Runtime ) +import Unison.CommandLine ( watchConfig ) +import qualified Unison.CommandLine.Main as CommandLine +import qualified Unison.Runtime.Rt1IO as Rt1 +import qualified Unison.Runtime.Interface as RTI +import Unison.Symbol ( Symbol ) +import qualified Unison.Codebase.Path as Path +import qualified Unison.Util.Cache as Cache +import qualified Version +import qualified Unison.Codebase.TranscriptParser as TR +import qualified System.Path as Path +import qualified System.FilePath as FP +import qualified System.IO.Temp as Temp +import qualified System.Exit as Exit +import System.IO.Error (catchIOError) +import qualified Unison.Codebase.Editor.Input as Input +import qualified Unison.Util.Pretty as P +import qualified Unison.PrettyTerminal as PT +import qualified Data.Text as Text +import qualified Data.Configurator as Config +import Text.Megaparsec (runParser) + +#if defined(mingw32_HOST_OS) +import qualified GHC.ConsoleHandler as WinSig +#else +import qualified System.Posix.Signals as Sig +#endif + +usage :: String -> P.Pretty P.ColorText +usage executableStr = P.callout "🌻" $ P.lines [ + P.bold "Usage instructions for the Unison Codebase Manager", + "You are running version: " <> P.string Version.gitDescribe, + "", + P.bold executable, + P.wrap "Starts Unison interactively, using the codebase in the home directory.", + "", + P.bold $ executable <> " -codebase path/to/codebase", + P.wrap "Starts Unison interactively, using the specified codebase. This flag can also be set for any of the below commands.", + "", + P.bold $ executable <> " run .mylib.mymain", + P.wrap "Executes the definition `.mylib.mymain` from the codebase, then exits.", + "", + P.bold $ executable <> " run.file foo.u mymain", + P.wrap "Executes the definition called `mymain` in `foo.u`, then exits.", + "", + P.bold $ executable <> " run.pipe mymain", + P.wrap "Executes the definition called `mymain` from a `.u` file read from the standard input, then exits.", + "", + P.bold $ executable <> " transcript mytranscript.md", + P.wrap $ "Executes the `mytranscript.md` transcript and creates" + <> "`mytranscript.output.md` if successful. Exits after completion, and deletes" + <> "the temporary directory created." + <> "Multiple transcript files may be provided; they are processed in sequence" + <> "starting from the same codebase.", + "", + P.bold $ executable <> " transcript -save-codebase mytranscript.md", + P.wrap $ "Executes the `mytranscript.md` transcript and creates" + <> "`mytranscript.output.md` if successful. Exits after completion, and saves" + <> "the resulting codebase to a new directory on disk." + <> "Multiple transcript files may be provided; they are processed in sequence" + <> "starting from the same codebase.", + "", + P.bold $ executable <> " transcript.fork mytranscript.md", + P.wrap $ "Executes the `mytranscript.md` transcript in a copy of the current codebase" + <> "and creates `mytranscript.output.md` if successful. Exits after completion." + <> "Multiple transcript files may be provided; they are processed in sequence" + <> "starting from the same codebase.", + "", + P.bold $ executable <> " transcript.fork -save-codebase mytranscript.md", + P.wrap $ "Executes the `mytranscript.md` transcript in a copy of the current codebase" + <> "and creates `mytranscript.output.md` if successful. Exits after completion," + <> "and saves the resulting codebase to a new directory on disk." + <> "Multiple transcript files may be provided; they are processed in sequence" + <> "starting from the same codebase.", + "", + P.bold $ executable <> " version", + "Prints version of Unison then quits.", + "", + P.bold $ executable <> " help", + "Prints this help."] + where executable = (P.text . Text.pack) executableStr + +installSignalHandlers :: IO () +installSignalHandlers = do + main_thread <- myThreadId + wtid <- mkWeakThreadId main_thread + + let interrupt = do + r <- deRefWeak wtid + case r of + Nothing -> return () + Just t -> throwTo t UserInterrupt + +#if defined(mingw32_HOST_OS) + let sig_handler WinSig.ControlC = interrupt + sig_handler WinSig.Break = interrupt + sig_handler _ = return () + _ <- WinSig.installHandler (WinSig.Catch sig_handler) +#else + _ <- Sig.installHandler Sig.sigQUIT (Sig.Catch interrupt) Nothing + _ <- Sig.installHandler Sig.sigINT (Sig.Catch interrupt) Nothing +#endif + + return () + +main :: IO () +main = do + args <- getArgs + progName <- getProgName + -- hSetBuffering stdout NoBuffering -- cool + + _ <- installSignalHandlers + -- We need to know whether the program was invoked with -codebase for + -- certain messages. Therefore we keep a Maybe FilePath - mcodepath + -- rather than just deciding on whether to use the supplied path or + -- the home directory here and throwing away that bit of information + let (mcodepath, restargs0) = case args of + "-codebase" : codepath : restargs -> (Just codepath, restargs) + _ -> (Nothing, args) + (mNewRun, restargs) = case restargs0 of + "--new-runtime" : rest -> (Just True, rest) + _ -> (Nothing, restargs0) + currentDir <- getCurrentDirectory + configFilePath <- getConfigFilePath mcodepath + config@(config_, _cancelConfig) <- + catchIOError (watchConfig configFilePath) $ \_ -> + Exit.die "Your .unisonConfig could not be loaded. Check that it's correct!" + branchCacheSize :: Word <- Config.lookupDefault 4096 config_ "NamespaceCacheSize" + branchCache <- Cache.semispaceCache branchCacheSize + case restargs of + [] -> do + theCodebase <- FileCodebase.getCodebaseOrExit branchCache mcodepath + launch currentDir mNewRun config theCodebase branchCache [] + [version] | isFlag "version" version -> + putStrLn $ progName ++ " version: " ++ Version.gitDescribe + [help] | isFlag "help" help -> PT.putPrettyLn (usage progName) + ["init"] -> FileCodebase.initCodebaseAndExit mcodepath + "run" : [mainName] -> do + theCodebase <- FileCodebase.getCodebaseOrExit branchCache mcodepath + runtime <- join . getStartRuntime mNewRun $ fst config + execute theCodebase runtime mainName + "run.file" : file : [mainName] | isDotU file -> do + e <- safeReadUtf8 file + case e of + Left _ -> PT.putPrettyLn $ P.callout "⚠️" "I couldn't find that file or it is for some reason unreadable." + Right contents -> do + theCodebase <- FileCodebase.getCodebaseOrExit branchCache mcodepath + let fileEvent = Input.UnisonFileChanged (Text.pack file) contents + launch currentDir mNewRun config theCodebase branchCache [Left fileEvent, Right $ Input.ExecuteI mainName, Right Input.QuitI] + "run.pipe" : [mainName] -> do + e <- safeReadUtf8StdIn + case e of + Left _ -> PT.putPrettyLn $ P.callout "⚠️" "I had trouble reading this input." + Right contents -> do + theCodebase <- FileCodebase.getCodebaseOrExit branchCache mcodepath + let fileEvent = Input.UnisonFileChanged (Text.pack "") contents + launch + currentDir mNewRun config theCodebase branchCache + [Left fileEvent, Right $ Input.ExecuteI mainName, Right Input.QuitI] + "transcript" : args' -> + case args' of + "-save-codebase" : transcripts -> runTranscripts mNewRun branchCache False True mcodepath transcripts + _ -> runTranscripts mNewRun branchCache False False mcodepath args' + "transcript.fork" : args' -> + case args' of + "-save-codebase" : transcripts -> runTranscripts mNewRun branchCache True True mcodepath transcripts + _ -> runTranscripts mNewRun branchCache True False mcodepath args' + _ -> do + PT.putPrettyLn (usage progName) + Exit.exitWith (Exit.ExitFailure 1) + +prepareTranscriptDir :: Branch.Cache IO -> Bool -> Maybe FilePath -> IO FilePath +prepareTranscriptDir branchCache inFork mcodepath = do + tmp <- Temp.getCanonicalTemporaryDirectory >>= (`Temp.createTempDirectory` "transcript") + unless inFork $ do + PT.putPrettyLn . P.wrap $ "Transcript will be run on a new, empty codebase." + _ <- FileCodebase.initCodebase branchCache tmp + pure() + + when inFork $ FileCodebase.getCodebaseOrExit branchCache mcodepath >> do + path <- FileCodebase.getCodebaseDir mcodepath + PT.putPrettyLn $ P.lines [ + P.wrap "Transcript will be run on a copy of the codebase at: ", "", + P.indentN 2 (P.string path) + ] + Path.copyDir (path FP. codebasePath) (tmp FP. codebasePath) + + pure tmp + +runTranscripts' + :: Maybe Bool + -> Branch.Cache IO + -> Maybe FilePath + -> FilePath + -> [String] + -> IO Bool +runTranscripts' mNewRun branchCache mcodepath transcriptDir args = do + currentDir <- getCurrentDirectory + theCodebase <- FileCodebase.getCodebaseOrExit branchCache $ Just transcriptDir + case args of + args@(_:_) -> do + for_ args $ \arg -> case arg of + md | isMarkdown md -> do + parsed <- TR.parseFile arg + case parsed of + Left err -> + PT.putPrettyLn $ P.callout "❓" ( + P.lines [ + P.indentN 2 "A parsing error occurred while reading a file:", "", + P.indentN 2 $ P.string err]) + Right stanzas -> do + configFilePath <- getConfigFilePath mcodepath + mdOut <- TR.run mNewRun transcriptDir configFilePath stanzas theCodebase branchCache + let out = currentDir FP. + FP.addExtension (FP.dropExtension arg ++ ".output") + (FP.takeExtension md) + writeUtf8 out mdOut + putStrLn $ "💾 Wrote " <> out + wat -> + PT.putPrettyLn $ P.callout "❓" ( + P.lines [ + P.indentN 2 "Unrecognized command, skipping:", "", + P.indentN 2 $ P.string wat]) + pure True + [] -> + pure False + +runTranscripts + :: Maybe Bool + -> Branch.Cache IO + -> Bool + -> Bool + -> Maybe FilePath + -> [String] + -> IO () +runTranscripts mNewRun branchCache inFork keepTemp mcodepath args = do + progName <- getProgName + transcriptDir <- prepareTranscriptDir branchCache inFork mcodepath + completed <- + runTranscripts' mNewRun branchCache (Just transcriptDir) transcriptDir args + when completed $ do + unless keepTemp $ removeDirectoryRecursive transcriptDir + when keepTemp $ PT.putPrettyLn $ + P.callout "🌸" ( + P.lines [ + "I've finished running the transcript(s) in this codebase:", "", + P.indentN 2 (P.string transcriptDir), "", + P.wrap $ "You can run" + <> P.backticked (P.string progName <> " -codebase " <> P.string transcriptDir) + <> "to do more work with it."]) + + unless completed $ do + unless keepTemp $ removeDirectoryRecursive transcriptDir + PT.putPrettyLn (usage progName) + Exit.exitWith (Exit.ExitFailure 1) + +initialPath :: Path.Absolute +initialPath = Path.absoluteEmpty + +getStartRuntime :: Maybe Bool -> Config -> IO (IO (Runtime Symbol)) +getStartRuntime newRun config = do + b <- maybe (Config.lookupDefault False config "new-runtime") pure newRun + pure $ if b then RTI.startRuntime else pure Rt1.runtime + +launch + :: FilePath + -> Maybe Bool + -> (Config, IO ()) + -> _ + -> Branch.Cache IO + -> [Either Input.Event Input.Input] + -> IO () +launch dir newRun config code branchCache inputs = do + startRuntime <- getStartRuntime newRun $ fst config + CommandLine.main dir defaultBaseLib initialPath config inputs startRuntime code branchCache Version.gitDescribe + +isMarkdown :: String -> Bool +isMarkdown md = case FP.takeExtension md of + ".md" -> True + ".markdown" -> True + _ -> False + +isDotU :: String -> Bool +isDotU file = FP.takeExtension file == ".u" + +-- so we can do `ucm --help`, `ucm -help` or `ucm help` (I hate +-- having to remember which one is supported) +isFlag :: String -> String -> Bool +isFlag f arg = arg == f || arg == "-" ++ f || arg == "--" ++ f + +getConfigFilePath :: Maybe FilePath -> IO FilePath +getConfigFilePath mcodepath = (FP. ".unisonConfig") <$> FileCodebase.getCodebaseDir mcodepath + +defaultBaseLib :: Maybe RemoteNamespace +defaultBaseLib = rightMay $ + runParser VP.defaultBaseLib "version" (Text.pack Version.gitDescribe) diff --git a/parser-typechecker/unison/System/Path.hs b/parser-typechecker/unison/System/Path.hs new file mode 100644 index 0000000000..df8ac1b1a4 --- /dev/null +++ b/parser-typechecker/unison/System/Path.hs @@ -0,0 +1,106 @@ +-- Copied from +-- +-- +-- +-- because: +-- +-- * base <4.7 upper bound would require patching, but lib hasn't been updated +-- in 8 years +-- * according to Arya, this code will not be necessary soon +-- +-- License file (MIT) was dropped in deps/fsutils + +-- | A collection of file system utilities that appear to be missing from +-- Directory, FilePath, Prelude, etc. Some of these may overlap with MissingH +-- but the versions here will probably be more simplistic. Furthermore, this +-- library is focused on this one thing and not a whole bunch of things. +module System.Path + ( mtreeList + , fileList + , walkDir + , copyDir + , replaceRoot + , removeRoot + , Directory + , dirPath + , subDirs + , files + , createDir + , filterUseless + ) where + +import Control.Monad (filterM, forM_) +import System.Directory +import System.FilePath ((), addTrailingPathSeparator) +import Data.List ((\\)) + +-- | Remove useless paths from a list of paths. +filterUseless :: [FilePath] -> [FilePath] +filterUseless = (\\ [".", ".."]) + +-- | Returns a list of nodes in a tree via a depth-first walk. +mtreeList :: Monad m => (a -> m [a]) -> a -> m [a] +mtreeList children root = do + xs <- children root + subChildren <- mapM (mtreeList children) xs + return $ root : concat subChildren + +-- | Get a list of files in path, but not recursively. Removes '.' and '..'. +topFileList :: FilePath -> IO [FilePath] +topFileList path = + fmap (map (path ) . filterUseless) $ getDirectoryContents path + +-- | Recursively list the contents of a directory. Depth-first. +fileList :: FilePath -> IO [FilePath] +fileList = mtreeList children + where children path = do + directory <- doesDirectoryExist path + if directory + then topFileList path + else return [] + +-- | We can use this data type to represent the pieces of a directory. +data Directory = Directory + { -- | The path of the directory itself. + dirPath :: FilePath + -- | All subdirectories of this directory. + , subDirs :: [FilePath] + -- | All files contained in this directory. + , files :: [FilePath] + } + deriving (Show) + +-- | Creates a Directory instance from a FilePath. +createDir :: FilePath -> IO Directory +createDir path = do + contents <- topFileList path + subdirs <- filterM doesDirectoryExist contents + files <- filterM doesFileExist contents + return (Directory path subdirs files) + +-- | Walk a directory depth-first. Similar to Python's os.walk and fs.core/walk +-- from the fs Clojure library. +walkDir :: FilePath -> IO [Directory] +walkDir root = createDir root >>= mtreeList children + where children path = do + let dirs = subDirs path + mapM createDir dirs + +-- | Given a root (prefix), remove it from a path. This is useful +-- for getting the filename and subdirs of a path inside of a root. +removeRoot :: FilePath -> FilePath -> FilePath +removeRoot prefix = drop . length $ addTrailingPathSeparator prefix + +-- | Given a root path, a new root path, and a path to be changed, +-- removes the old root from the path and replaces it with to. +replaceRoot :: FilePath -> FilePath -> FilePath -> FilePath +replaceRoot root to path = to removeRoot root path + +-- | Copy a directory recursively. Moves every file, creates every directory. +copyDir :: FilePath -> FilePath -> IO () +copyDir from to = do + createDirectoryIfMissing True to + walked <- walkDir from + forM_ walked $ \(Directory _ dirs files) -> do + mapM_ (createDirectoryIfMissing True . replaceRoot from to) dirs + forM_ files $ \path -> copyFile path (replaceRoot from to path) diff --git a/parser-typechecker/unison/Version.hs b/parser-typechecker/unison/Version.hs new file mode 100644 index 0000000000..da45288409 --- /dev/null +++ b/parser-typechecker/unison/Version.hs @@ -0,0 +1,16 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE TemplateHaskell #-} + +module Version where + +import Language.Haskell.TH (runIO) +import Language.Haskell.TH.Syntax (Exp(LitE), Lit(StringL)) +import Shellmet +import Data.Text + +gitDescribe :: String +gitDescribe = $( fmap (LitE . StringL . unpack) . runIO $ + "git" $| ["describe", "--tags", "--always", "--dirty='"] + $? pure "unknown" + ) + diff --git a/unison-core/LICENSE b/unison-core/LICENSE new file mode 100644 index 0000000000..cca9c4376c --- /dev/null +++ b/unison-core/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013, Paul Chiusano and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/unison-core/src/Unison/ABT.hs b/unison-core/src/Unison/ABT.hs new file mode 100644 index 0000000000..4ba958704e --- /dev/null +++ b/unison-core/src/Unison/ABT.hs @@ -0,0 +1,715 @@ +-- Based on: http://semantic-domain.blogspot.com/2015/03/abstract-binding-trees.html +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.ABT where + +import Unison.Prelude + +import Control.Lens (Lens', use, (.=)) +import Control.Monad.State (MonadState,evalState) +import Data.Functor.Identity (runIdentity) +import Data.List hiding (cycle) +import Data.Vector ((!)) +import Prelude hiding (abs,cycle) +import Prelude.Extras (Eq1(..), Show1(..), Ord1(..)) +import Unison.Hashable (Accumulate,Hashable1,hash1) +import qualified Data.Foldable as Foldable +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Data.Vector as Vector +import qualified Unison.Hashable as Hashable +import qualified Unison.Util.Components as Components + +data ABT f v r + = Var v + | Cycle r + | Abs v r + | Tm (f r) deriving (Functor, Foldable, Traversable) + +-- | At each level in the tree, we store the set of free variables and +-- a value of type `a`. Variables are of type `v`. +data Term f v a = Term { freeVars :: Set v, annotation :: a, out :: ABT f v (Term f v a) } + +-- | A class for variables. +-- +-- * `Set.notMember (freshIn vs v) vs`: +-- `freshIn` returns a variable not used in the `Set` +class Ord v => Var v where + freshIn :: Set v -> v -> v + +data V v = Free v | Bound v deriving (Eq,Ord,Show,Functor) + +unvar :: V v -> v +unvar (Free v) = v +unvar (Bound v) = v + +instance Var v => Var (V v) where + freshIn s v = freshIn (Set.map unvar s) <$> v + +newtype Path s t a b m = Path { focus :: s -> Maybe (a, b -> Maybe t, m) } + +here :: Monoid m => Path s t s t m +here = Path $ \s -> Just (s, Just, mempty) + +instance Semigroup (Path s t a b m) where + (<>) = mappend + +instance Monoid (Path s t a b m) where + mempty = Path (const Nothing) + mappend (Path p1) (Path p2) = Path p3 where + p3 s = p1 s <|> p2 s + +type Path' f g m = forall a v . Var v => Path (Term f v a) (Term f (V v) a) (Term g v a) (Term g (V v) a) m + +compose :: Monoid m => Path s t a b m -> Path a b a' b' m -> Path s t a' b' m +compose (Path p1) (Path p2) = Path p3 where + p3 s = do + (get1,set1,m1) <- p1 s + (get2,set2,m2) <- p2 get1 + pure (get2, set2 >=> set1, m1 `mappend` m2) + +at :: Path s t a b m -> s -> Maybe a +at p s = (\(a,_,_) -> a) <$> focus p s + +modify' :: Path s t a b m -> (m -> a -> b) -> s -> Maybe t +modify' p f s = focus p s >>= \(get,set,m) -> set (f m get) + +wrap :: (Functor f, Foldable f, Var v) => v -> Term f (V v) a -> (V v, Term f (V v) a) +wrap v t = + if Set.member (Free v) (freeVars t) + then let v' = fresh t (Bound v) in (v', rename (Bound v) v' t) + else (Bound v, t) + +wrap' :: (Functor f, Foldable f, Var v) + => v -> Term f (V v) a -> (V v -> Term f (V v) a -> c) -> c +wrap' v t f = uncurry f (wrap v t) + +-- | Return the list of all variables bound by this ABT +bound' :: Foldable f => Term f v a -> [v] +bound' t = case out t of + Abs v t -> v : bound' t + Cycle t -> bound' t + Tm f -> Foldable.toList f >>= bound' + _ -> [] + +annotateBound' :: (Ord v, Functor f, Foldable f) => Term f v a0 -> Term f v [v] +annotateBound' t = snd <$> annotateBound'' t + +-- Annotate the tree with the set of bound variables at each node. +annotateBound :: (Ord v, Foldable f, Functor f) => Term f v a -> Term f v (a, Set v) +annotateBound = go Set.empty where + go bound t = let a = (annotation t, bound) in case out t of + Var v -> annotatedVar a v + Cycle body -> cycle' a (go bound body) + Abs x body -> abs' a x (go (Set.insert x bound) body) + Tm body -> tm' a (go bound <$> body) + +annotateBound'' :: (Ord v, Functor f, Foldable f) => Term f v a -> Term f v (a, [v]) +annotateBound'' = go [] where + go env t = let a = (annotation t, env) in case out t of + Abs v body -> abs' a v (go (v : env) body) + Cycle body -> cycle' a (go env body) + Tm f -> tm' a (go env <$> f) + Var v -> annotatedVar a v + +-- | Return the set of all variables bound by this ABT +bound :: (Ord v, Foldable f) => Term f v a -> Set v +bound t = Set.fromList (bound' t) + +-- | `True` if the term has no free variables, `False` otherwise +isClosed :: Term f v a -> Bool +isClosed t = Set.null (freeVars t) + +-- | `True` if `v` is a member of the set of free variables of `t` +isFreeIn :: Ord v => v -> Term f v a -> Bool +isFreeIn v t = Set.member v (freeVars t) + +-- | Replace the annotation with the given argument. +annotate :: a -> Term f v a -> Term f v a +annotate a (Term fvs _ out) = Term fvs a out + +vmap :: (Functor f, Foldable f, Ord v2) => (v -> v2) -> Term f v a -> Term f v2 a +vmap f (Term _ a out) = case out of + Var v -> annotatedVar a (f v) + Tm fa -> tm' a (fmap (vmap f) fa) + Cycle r -> cycle' a (vmap f r) + Abs v body -> abs' a (f v) (vmap f body) + +amap :: (Functor f, Foldable f, Ord v) => (a -> a2) -> Term f v a -> Term f v a2 +amap = amap' . const + +amap' :: (Functor f, Foldable f, Ord v) => (Term f v a -> a -> a2) -> Term f v a -> Term f v a2 +amap' f t@(Term _ a out) = case out of + Var v -> annotatedVar (f t a) v + Tm fa -> tm' (f t a) (fmap (amap' f) fa) + Cycle r -> cycle' (f t a) (amap' f r) + Abs v body -> abs' (f t a) v (amap' f body) + +-- | Modifies the annotations in this tree +instance Functor f => Functor (Term f v) where + fmap f (Term fvs a sub) = Term fvs (f a) (fmap (fmap f) sub) + +extraMap :: Functor g => (forall k . f k -> g k) -> Term f v a -> Term g v a +extraMap p (Term fvs a sub) = Term fvs a (go p sub) where + go :: Functor g => (forall k . f k -> g k) -> ABT f v (Term f v a) -> ABT g v (Term g v a) + go p = \case + Var v -> Var v + Cycle r -> Cycle (extraMap p r) + Abs v r -> Abs v (extraMap p r) + Tm x -> Tm (fmap (extraMap p) (p x)) + +pattern Var' v <- Term _ _ (Var v) +pattern Cycle' vs t <- Term _ _ (Cycle (AbsN' vs t)) +-- pattern Abs' v body <- Term _ _ (Abs v body) +pattern Abs' subst <- (unabs1 -> Just subst) +pattern AbsN' vs body <- (unabs -> (vs, body)) +pattern Tm' f <- Term _ _ (Tm f) +pattern CycleA' a avs t <- Term _ a (Cycle (AbsNA' avs t)) +pattern AbsNA' avs body <- (unabsA -> (avs, body)) +pattern Abs1NA' avs body <- (unabs1A -> Just (avs, body)) + +unabsA :: Term f v a -> ([(a,v)], Term f v a) +unabsA (Term _ a (Abs hd body)) = + let (tl, body') = unabsA body in ((a,hd) : tl, body') +unabsA t = ([], t) + +unabs1A :: Term f v a -> Maybe ([(a,v)], Term f v a) +unabs1A t = case unabsA t of + ([], _) -> Nothing + x -> Just x + +var :: v -> Term f v () +var = annotatedVar () + +annotatedVar :: a -> v -> Term f v a +annotatedVar a v = Term (Set.singleton v) a (Var v) + +abs :: Ord v => v -> Term f v () -> Term f v () +abs = abs' () + +abs' :: Ord v => a -> v -> Term f v a -> Term f v a +abs' a v body = Term (Set.delete v (freeVars body)) a (Abs v body) + +absr :: (Functor f, Foldable f, Var v) => v -> Term f (V v) () -> Term f (V v) () +absr = absr' () + +-- | Rebuild an `abs`, renaming `v` to avoid capturing any `Free v` in `body`. +absr' :: (Functor f, Foldable f, Var v) => a -> v -> Term f (V v) a -> Term f (V v) a +absr' a v body = wrap' v body $ \v body -> abs' a v body + +absChain :: Ord v => [v] -> Term f v () -> Term f v () +absChain vs t = foldr abs t vs + +absCycle :: Ord v => [v] -> Term f v () -> Term f v () +absCycle vs t = cycle $ absChain vs t + +absChain' :: Ord v => [(a, v)] -> Term f v a -> Term f v a +absChain' vs t = foldr (\(a,v) t -> abs' a v t) t vs + +tm :: (Foldable f, Ord v) => f (Term f v ()) -> Term f v () +tm = tm' () + +tm' :: (Foldable f, Ord v) => a -> f (Term f v a) -> Term f v a +tm' a t = + Term (Set.unions (fmap freeVars (Foldable.toList t))) a (Tm t) + +cycle :: Term f v () -> Term f v () +cycle = cycle' () + +cycle' :: a -> Term f v a -> Term f v a +cycle' a t = Term (freeVars t) a (Cycle t) + +cycler' :: (Functor f, Foldable f, Var v) => a -> [v] -> Term f (V v) a -> Term f (V v) a +cycler' a vs t = cycle' a $ foldr (absr' a) t vs + +cycler :: (Functor f, Foldable f, Var v) => [v] -> Term f (V v) () -> Term f (V v) () +cycler = cycler' () + +into :: (Foldable f, Ord v) => ABT f v (Term f v ()) -> Term f v () +into = into' () + +into' :: (Foldable f, Ord v) => a -> ABT f v (Term f v a) -> Term f v a +into' a abt = case abt of + Var x -> annotatedVar a x + Cycle t -> cycle' a t + Abs v r -> abs' a v r + Tm t -> tm' a t + +-- | renames `old` to `new` in the given term, ignoring subtrees that bind `old` +rename :: (Foldable f, Functor f, Var v) => v -> v -> Term f v a -> Term f v a +rename old new t0@(Term fvs ann t) = + if Set.notMember old fvs then t0 + else case t of + Var v -> if v == old then annotatedVar ann new else t0 + Cycle body -> cycle' ann (rename old new body) + Abs v body -> + -- v shadows old, so skip this subtree + if v == old then abs' ann v body + + -- the rename would capture new, freshen this Abs + -- to make that no longer true, then proceed with + -- renaming `old` to `new` + else if v == new then + let v' = freshIn (Set.fromList [new,old] <> freeVars body) v + in abs' ann v' (rename old new (rename v v' body)) + + -- nothing special, just rename inside body of Abs + else abs' ann v (rename old new body) + Tm v -> tm' ann (fmap (rename old new) v) + +changeVars :: (Foldable f, Functor f, Var v) => Map v v -> Term f v a -> Term f v a +changeVars m t = case out t of + Abs v body -> case Map.lookup v m of + Nothing -> abs' (annotation t) v (changeVars m body) + Just v' -> abs' (annotation t) v' (changeVars m body) + Cycle body -> cycle' (annotation t) (changeVars m body) + Var v -> case Map.lookup v m of + Nothing -> t + Just v -> annotatedVar (annotation t) v + Tm v -> tm' (annotation t) (changeVars m <$> v) + +-- | Produce a variable which is free in both terms +freshInBoth :: Var v => Term f v a -> Term f v a -> v -> v +freshInBoth t1 t2 = freshIn $ Set.union (freeVars t1) (freeVars t2) + +fresh :: Var v => Term f v a -> v -> v +fresh t = freshIn (freeVars t) + +freshEverywhere :: (Foldable f, Var v) => Term f v a -> v -> v +freshEverywhere t = freshIn . Set.fromList $ allVars t + +allVars :: Foldable f => Term f v a -> [v] +allVars t = case out t of + Var v -> [v] + Cycle body -> allVars body + Abs v body -> v : allVars body + Tm v -> Foldable.toList v >>= allVars + +freshes :: Var v => Term f v a -> [v] -> [v] +freshes = freshes' . freeVars + +freshes' :: Var v => Set v -> [v] -> [v] +freshes' used vs = evalState (traverse freshenS vs) used + +-- | Freshens the given variable wrt. the set of used variables +-- tracked by state. Adds the result to the set of used variables. +freshenS :: (Var v, MonadState (Set v) m) => v -> m v +freshenS = freshenS' id + +-- | A more general version of `freshenS` that uses a lens +-- to focus on used variables inside state. +freshenS' :: (Var v, MonadState s m) => Lens' s (Set v) -> v -> m v +freshenS' uvLens v = do + usedVars <- use uvLens + let v' = freshIn usedVars v + uvLens .= Set.insert v' usedVars + pure v' + +-- | `subst v e body` substitutes `e` for `v` in `body`, avoiding capture by +-- renaming abstractions in `body` +subst + :: (Foldable f, Functor f, Var v) + => v + -> Term f v a + -> Term f v a + -> Term f v a +subst v r = subst' (const r) v (freeVars r) + +-- Slightly generalized version of `subst`, the replacement action is handled +-- by the function `replace`, which is given the annotation `a` at the point +-- of replacement. `r` should be the set of free variables contained in the +-- term returned by `replace`. See `substInheritAnnotation` for an example usage. +subst' :: (Foldable f, Functor f, Var v) => (a -> Term f v a) -> v -> Set v -> Term f v a -> Term f v a +subst' replace v r t2@(Term fvs ann body) + | Set.notMember v fvs = t2 -- subtrees not containing the var can be skipped + | otherwise = case body of + Var v' | v == v' -> replace ann -- var match; perform replacement + | otherwise -> t2 -- var did not match one being substituted; ignore + Cycle body -> cycle' ann (subst' replace v r body) + Abs x _ | x == v -> t2 -- x shadows v; ignore subtree + Abs x e -> abs' ann x' e' + where x' = freshIn (fvs `Set.union` r) x + -- rename x to something that cannot be captured by `r` + e' = if x /= x' then subst' replace v r (rename x x' e) + else subst' replace v r e + Tm body -> tm' ann (fmap (subst' replace v r) body) + +-- Like `subst`, but the annotation of the replacement is inherited from +-- the previous annotation at each replacement point. +substInheritAnnotation :: (Foldable f, Functor f, Var v) + => v -> Term f v b -> Term f v a -> Term f v a +substInheritAnnotation v r = + subst' (\ann -> const ann <$> r) v (freeVars r) + +substsInheritAnnotation + :: (Foldable f, Functor f, Var v) + => [(v, Term f v b)] + -> Term f v a + -> Term f v a +substsInheritAnnotation replacements body = + foldr (uncurry substInheritAnnotation) body (reverse replacements) + +-- | `substs [(t1,v1), (t2,v2), ...] body` performs multiple simultaneous +-- substitutions, avoiding capture +substs + :: (Foldable f, Functor f, Var v) + => [(v, Term f v a)] + -> Term f v a + -> Term f v a +substs replacements body = foldr (uncurry subst) body (reverse replacements) + +-- Count the number times the given variable appears free in the term +occurrences :: (Foldable f, Var v) => v -> Term f v a -> Int +occurrences v t | not (v `isFreeIn` t) = 0 +occurrences v t = case out t of + Var v2 -> if v == v2 then 1 else 0 + Cycle t -> occurrences v t + Abs v2 t -> if v == v2 then 0 else occurrences v t + Tm t -> foldl' (\s t -> s + occurrences v t) 0 $ Foldable.toList t + +rebuildUp :: (Ord v, Foldable f, Functor f) + => (f (Term f v a) -> f (Term f v a)) + -> Term f v a + -> Term f v a +rebuildUp f (Term _ ann body) = case body of + Var v -> annotatedVar ann v + Cycle body -> cycle' ann (rebuildUp f body) + Abs x e -> abs' ann x (rebuildUp f e) + Tm body -> tm' ann (f $ fmap (rebuildUp f) body) + +rebuildUp' :: (Ord v, Foldable f, Functor f) + => (Term f v a -> Term f v a) + -> Term f v a + -> Term f v a +rebuildUp' f (Term _ ann body) = case body of + Var v -> f (annotatedVar ann v) + Cycle body -> f $ cycle' ann (rebuildUp' f body) + Abs x e -> f $ abs' ann x (rebuildUp' f e) + Tm body -> f $ tm' ann (fmap (rebuildUp' f) body) + +freeVarOccurrences :: (Traversable f, Ord v) => Set v -> Term f v a -> [(v, a)] +freeVarOccurrences except t = + [ (v, a) | (v,a) <- go $ annotateBound t, not (Set.member v except) ] + where + go e = case out e of + Var v -> if Set.member v (snd $ annotation e) + then [] + else [(v, fst $ annotation e)] + Cycle body -> go body + Abs _ body -> go body + Tm body -> foldMap go body + +foreachSubterm + :: (Traversable f, Applicative g, Ord v) + => (Term f v a -> g b) + -> Term f v a + -> g [b] +foreachSubterm f e = case out e of + Var _ -> pure <$> f e + Cycle body -> (:) <$> f e <*> foreachSubterm f body + Abs _ body -> (:) <$> f e <*> foreachSubterm f body + Tm body -> + (:) + <$> f e + <*> (join . Foldable.toList <$> traverse (foreachSubterm f) body) + +subterms :: (Ord v, Traversable f) => Term f v a -> [Term f v a] +subterms t = runIdentity $ foreachSubterm pure t + +-- | `visit f t` applies an effectful function to each subtree of +-- `t` and sequences the results. When `f` returns `Nothing`, `visit` +-- descends into the children of the current subtree. When `f` returns +-- `Just t2`, `visit` replaces the current subtree with `t2`. Thus: +-- `visit (const Nothing) t == pure t` and +-- `visit (const (Just (pure t2))) t == pure t2` +visit + :: (Traversable f, Applicative g, Ord v) + => (Term f v a -> Maybe (g (Term f v a))) + -> Term f v a + -> g (Term f v a) +visit f t = flip fromMaybe (f t) $ case out t of + Var _ -> pure t + Cycle body -> cycle' (annotation t) <$> visit f body + Abs x e -> abs' (annotation t) x <$> visit f e + Tm body -> tm' (annotation t) <$> traverse (visit f) body + +-- | Apply an effectful function to an ABT tree top down, sequencing the results. +visit' :: (Traversable f, Applicative g, Monad g, Ord v) + => (f (Term f v a) -> g (f (Term f v a))) + -> Term f v a + -> g (Term f v a) +visit' f t = case out t of + Var _ -> pure t + Cycle body -> cycle' (annotation t) <$> visit' f body + Abs x e -> abs' (annotation t) x <$> visit' f e + Tm body -> f body >>= (fmap (tm' (annotation t)) . traverse (visit' f)) + +-- | `visit` specialized to the `Identity` effect. +visitPure :: (Traversable f, Ord v) + => (Term f v a -> Maybe (Term f v a)) -> Term f v a -> Term f v a +visitPure f = runIdentity . visit (fmap pure . f) + +rewriteDown :: (Traversable f, Ord v) + => (Term f v a -> Term f v a) + -> Term f v a + -> Term f v a +rewriteDown f t = let t' = f t in case out t' of + Var _ -> t' + Cycle body -> cycle' (annotation t) (rewriteDown f body) + Abs x e -> abs' (annotation t) x (rewriteDown f e) + Tm body -> tm' (annotation t) (rewriteDown f `fmap` body) + +data Subst f v a = + Subst { freshen :: forall m v' . Monad m => (v -> m v') -> m v' + , bind :: Term f v a -> Term f v a + , bindInheritAnnotation :: forall b . Term f v b -> Term f v a + , variable :: v } + +unabs1 :: (Foldable f, Functor f, Var v) => Term f v a -> Maybe (Subst f v a) +unabs1 (Term _ _ (Abs v body)) = Just (Subst freshen bind bindInheritAnnotation v) where + freshen f = f v + bind x = subst v x body + bindInheritAnnotation x = substInheritAnnotation v x body +unabs1 _ = Nothing + +unabs :: Term f v a -> ([v], Term f v a) +unabs (Term _ _ (Abs hd body)) = + let (tl, body') = unabs body in (hd : tl, body') +unabs t = ([], t) + +reabs :: Ord v => [v] -> Term f v () -> Term f v () +reabs vs t = foldr abs t vs + +transform :: (Ord v, Foldable g, Functor f) + => (forall a. f a -> g a) -> Term f v a -> Term g v a +transform f tm = case out tm of + Var v -> annotatedVar (annotation tm) v + Abs v body -> abs' (annotation tm) v (transform f body) + Tm subterms -> + let subterms' = fmap (transform f) subterms + in tm' (annotation tm) (f subterms') + Cycle body -> cycle' (annotation tm) (transform f body) + +-- Rebuild the tree annotations upward, starting from the leaves, +-- using the Monoid to choose the annotation at intermediate nodes +reannotateUp :: (Ord v, Foldable f, Functor f, Monoid b) + => (Term f v a -> b) + -> Term f v a + -> Term f v (a, b) +reannotateUp g t = case out t of + Var v -> annotatedVar (annotation t, g t) v + Cycle body -> + let body' = reannotateUp g body + in cycle' (annotation t, snd (annotation body')) body' + Abs v body -> + let body' = reannotateUp g body + in abs' (annotation t, snd (annotation body')) v body' + Tm body -> + let + body' = reannotateUp g <$> body + ann = g t <> foldMap (snd . annotation) body' + in tm' (annotation t, ann) body' + +-- Find all subterms that match a predicate. Prune the search for speed. +-- (Some patterns of pruning can cut the complexity of the search.) +data FindAction x = Found x | Prune | Continue deriving Show +find :: (Ord v, Foldable f, Functor f) + => (Term f v a -> FindAction x) + -> Term f v a + -> [x] +find p t = case p t of + Found x -> x : go + Prune -> [] + Continue -> go + where go = case out t of + Var _ -> [] + Cycle body -> Unison.ABT.find p body + Abs _ body -> Unison.ABT.find p body + Tm body -> Foldable.concat (Unison.ABT.find p <$> body) + +find' :: (Ord v, Foldable f, Functor f) + => (Term f v a -> Bool) + -> Term f v a + -> [Term f v a] +find' p = Unison.ABT.find (\t -> if p t then Found t else Continue) + +instance (Foldable f, Functor f, Eq1 f, Var v) => Eq (Term f v a) where + -- alpha equivalence, works by renaming any aligned Abs ctors to use a common fresh variable + t1 == t2 = go (out t1) (out t2) where + go (Var v) (Var v2) | v == v2 = True + go (Cycle t1) (Cycle t2) = t1 == t2 + go (Abs v1 body1) (Abs v2 body2) = + if v1 == v2 then body1 == body2 + else let v3 = freshInBoth body1 body2 v1 + in rename v1 v3 body1 == rename v2 v3 body2 + go (Tm f1) (Tm f2) = f1 ==# f2 + go _ _ = False + +instance (Foldable f, Functor f, Ord1 f, Var v) => Ord (Term f v a) where + -- alpha equivalence, works by renaming any aligned Abs ctors to use a common fresh variable + t1 `compare` t2 = go (out t1) (out t2) where + go (Var v) (Var v2) = v `compare` v2 + go (Cycle t1) (Cycle t2) = t1 `compare` t2 + go (Abs v1 body1) (Abs v2 body2) = + if v1 == v2 then body1 `compare` body2 + else let v3 = freshInBoth body1 body2 v1 + in rename v1 v3 body1 `compare` rename v2 v3 body2 + go (Tm f1) (Tm f2) = compare1 f1 f2 + go t1 t2 = tag t1 `compare` tag t2 + tag (Var _) = 0 :: Word + tag (Tm _) = 1 + tag (Abs _ _) = 2 + tag (Cycle _) = 3 + +components :: Var v => [(v, Term f v a)] -> [[(v, Term f v a)]] +components = Components.components freeVars + +-- Converts to strongly connected components while preserving the +-- order of definitions. Satisfies `join (orderedComponents bs) == bs`. +orderedComponents' :: Var v => [(v, Term f v a)] -> [[(v, Term f v a)]] +orderedComponents' tms = go [] Set.empty tms + where + go [] _ [] = [] + go [] deps (hd:rem) = go [hd] (deps <> freeVars (snd hd)) rem + go cur deps rem = case findIndex isDep rem of + Nothing -> reverse cur : let (hd,tl) = splitAt 1 rem + in go hd (depsFor hd) tl + Just i -> go (reverse newMembers ++ cur) deps' (drop (i+1) rem) + where deps' = deps <> depsFor newMembers + newMembers = take (i+1) rem + where + depsFor = foldMap (freeVars . snd) + isDep (v, _) = Set.member v deps + +-- Like `orderedComponents'`, but further break up cycles and move +-- cyclic subcycles before other components in the same cycle. +-- Tweak suggested by @aryairani. +-- +-- Example: given `[[x],[ping,r,s,pong]]`, where `ping` and `pong` +-- are mutually recursive but `r` and `s` are uninvolved, this produces: +-- `[[x], [ping,pong], [r], [s]]`. +orderedComponents :: Var v => [(v, Term f v a)] -> [[(v, Term f v a)]] +orderedComponents bs0 = tweak =<< orderedComponents' bs0 where + tweak :: Var v => [(v,Term f v a)] -> [[(v,Term f v a)]] + tweak bs@(_:_:_) = case takeWhile isCyclic (components bs) of + [] -> [bs] + cycles -> cycles <> orderedComponents rest + where + rest = [ (v,b) | (v,b) <- bs, Set.notMember v cycleVars ] + cycleVars = Set.fromList (fst <$> join cycles) + tweak bs = [bs] -- any cycle with < 2 bindings is left alone + isCyclic [(v,b)] = Set.member v (freeVars b) + isCyclic bs = length bs > 1 + +-- Hash a strongly connected component and sort its definitions into a canonical order. +hashComponent :: + (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Var v, Ord h, Accumulate h) + => Map.Map v (Term f v a) -> (h, [(v, Term f v a)]) +hashComponent byName = let + ts = Map.toList byName + embeds = [ (v, void (transform Embed t)) | (v,t) <- ts ] + vs = fst <$> ts + tms = [ (v, absCycle vs (tm $ Component (snd <$> embeds) (var v))) | v <- vs ] + hashed = [ ((v,t), hash t) | (v,t) <- tms ] + sortedHashed = sortOn snd hashed + overallHash = Hashable.accumulate (Hashable.Hashed . snd <$> sortedHashed) + in (overallHash, [ (v, t) | ((v, _),_) <- sortedHashed, Just t <- [Map.lookup v byName] ]) + +-- Group the definitions into strongly connected components and hash +-- each component. Substitute the hash of each component into subsequent +-- components (using the `termFromHash` function). Requires that the +-- overall component has no free variables. +hashComponents + :: (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Var v, Ord h, Accumulate h) + => (h -> Word64 -> Word64 -> Term f v ()) + -> Map.Map v (Term f v a) + -> [(h, [(v, Term f v a)])] +hashComponents termFromHash termsByName = let + bound = Set.fromList (Map.keys termsByName) + escapedVars = Set.unions (freeVars <$> Map.elems termsByName) `Set.difference` bound + sccs = components (Map.toList termsByName) + go _ [] = [] + go prevHashes (component : rest) = let + sub = substsInheritAnnotation (Map.toList prevHashes) + (h, sortedComponent) = hashComponent $ Map.fromList [ (v, sub t) | (v, t) <- component ] + size = fromIntegral (length sortedComponent) + curHashes = Map.fromList [ (v, termFromHash h i size) | ((v, _),i) <- sortedComponent `zip` [0..]] + newHashes = prevHashes `Map.union` curHashes + newHashesL = Map.toList newHashes + sortedComponent' = [ (v, substsInheritAnnotation newHashesL t) | (v, t) <- sortedComponent ] + in (h, sortedComponent') : go newHashes rest + in if Set.null escapedVars then go Map.empty sccs + else error $ "can't hashComponents if bindings have free variables:\n " + ++ show (map show (Set.toList escapedVars)) + ++ "\n " ++ show (map show (Map.keys termsByName)) + +-- Implementation detail of hashComponent +data Component f a = Component [a] a | Embed (f a) deriving (Functor, Traversable, Foldable) + +instance (Hashable1 f, Functor f) => Hashable1 (Component f) where + hash1 hashCycle hash c = case c of + Component as a -> let + (hs, hash) = hashCycle as + toks = Hashable.Hashed <$> hs + in Hashable.accumulate $ (Hashable.Tag 1 : toks) ++ [Hashable.Hashed (hash a)] + Embed fa -> Hashable.hash1 hashCycle hash fa + +-- | We ignore annotations in the `Term`, as these should never affect the +-- meaning of the term. +hash :: forall f v a h . (Functor f, Hashable1 f, Eq v, Show v, Var v, Ord h, Accumulate h) + => Term f v a -> h +hash = hash' [] where + hash' :: [Either [v] v] -> Term f v a -> h + hash' env (Term _ _ t) = case t of + Var v -> maybe die hashInt ind + where lookup (Left cycle) = v `elem` cycle + lookup (Right v') = v == v' + ind = findIndex lookup env + hashInt :: Int -> h + hashInt i = Hashable.accumulate [Hashable.Nat $ fromIntegral i] + die = error $ "unknown var in environment: " ++ show v + ++ " environment = " ++ show env + Cycle (AbsN' vs t) -> hash' (Left vs : env) t + Cycle t -> hash' env t + Abs v t -> hash' (Right v : env) t + Tm t -> Hashable.hash1 (hashCycle env) (hash' env) t + + hashCycle :: [Either [v] v] -> [Term f v a] -> ([h], Term f v a -> h) + hashCycle env@(Left cycle : envTl) ts | length cycle == length ts = + let + permute p xs = case Vector.fromList xs of xs -> map (xs !) p + hashed = map (\(i,t) -> ((i,t), hash' env t)) (zip [0..] ts) + pt = fst <$> sortOn snd hashed + (p,ts') = unzip pt + in case map Right (permute p cycle) ++ envTl of + env -> (map (hash' env) ts', hash' env) + hashCycle env ts = (map (hash' env) ts, hash' env) + +-- | Use the `hash` function to efficiently remove duplicates from the list, preserving order. +distinct :: forall f v h a proxy . (Functor f, Hashable1 f, Eq v, Show v, Var v, Ord h, Accumulate h) + => proxy h + -> [Term f v a] -> [Term f v a] +distinct _ ts = fst <$> sortOn snd m + where m = Map.elems (Map.fromList (hashes `zip` (ts `zip` [0 :: Int .. 1]))) + hashes = map hash ts :: [h] + +-- | Use the `hash` function to remove elements from `t1s` that exist in `t2s`, preserving order. +subtract :: forall f v h a proxy . (Functor f, Hashable1 f, Eq v, Show v, Var v, Ord h, Accumulate h) + => proxy h + -> [Term f v a] -> [Term f v a] -> [Term f v a] +subtract _ t1s t2s = + let skips = Set.fromList (map hash t2s :: [h]) + in filter (\t -> Set.notMember (hash t) skips) t1s + +instance (Show1 f, Show v) => Show (Term f v a) where + -- annotations not shown + showsPrec p (Term _ _ out) = case out of + Var v -> showParen (p>=9) $ \x -> "Var " ++ show v ++ x + Cycle body -> ("Cycle " ++) . showsPrec p body + Abs v body -> showParen True $ (show v ++) . showString ". " . showsPrec p body + Tm f -> showsPrec1 p f diff --git a/unison-core/src/Unison/ABT/Normalized.hs b/unison-core/src/Unison/ABT/Normalized.hs new file mode 100644 index 0000000000..b5e606c83f --- /dev/null +++ b/unison-core/src/Unison/ABT/Normalized.hs @@ -0,0 +1,134 @@ +{-# language GADTs #-} +{-# language RankNTypes #-} +{-# language ViewPatterns #-} +{-# language DeriveFunctor #-} +{-# language PatternGuards #-} +{-# language DeriveFoldable #-} +{-# language PatternSynonyms #-} +{-# language DeriveTraversable #-} + +{-# language UndecidableInstances #-} +{-# language QuantifiedConstraints #-} + +module Unison.ABT.Normalized + ( ABT(..) + , Term(.., TAbs, TTm, TAbss) + , renames + , rename + , transform + ) + where + +import Data.Bifunctor +import Data.Bifoldable +-- import Data.Bitraversable + +import Data.Set (Set) +import qualified Data.Set as Set + +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map + +import Unison.ABT (Var(..)) + +-- ABTs with support for 'normalized' structure where only variables +-- may occur at some positions. This is accomplished by passing the +-- variable type to the base functor. +data ABT f v + = Abs v (Term f v) + | Tm (f v (Term f v)) + +data Term f v = Term + { freeVars :: Set v + , out :: ABT f v + } + +instance (forall a b. Show a => Show b => Show (f a b), Show v) + => Show (ABT f v) + where + showsPrec p a = showParen (p >= 9) $ case a of + Abs v tm + -> showString "Abs " . showsPrec 10 v + . showString " " . showsPrec 10 tm + Tm e -> showString "Tm " . showsPrec 10 e + +instance (forall a b. Show a => Show b => Show (f a b), Show v) + => Show (Term f v) + where + showsPrec p (Term _ e) + = showParen (p >= 9) $ showString "Term " . showsPrec 10 e + +pattern TAbs :: Var v => v -> Term f v -> Term f v +pattern TAbs u bd <- Term _ (Abs u bd) + where TAbs u bd = Term (Set.delete u (freeVars bd)) (Abs u bd) + +pattern TTm :: (Var v, Bifoldable f) => f v (Term f v) -> Term f v +pattern TTm bd <- Term _ (Tm bd) + where TTm bd = Term (bifoldMap Set.singleton freeVars bd) (Tm bd) + +{-# complete TAbs, TTm #-} + +unabss :: Var v => Term f v -> ([v], Term f v) +unabss (TAbs v (unabss -> (vs, bd))) = (v:vs, bd) +unabss bd = ([], bd) + +pattern TAbss :: Var v => [v] -> Term f v -> Term f v +pattern TAbss vs bd <- (unabss -> (vs, bd)) + where TAbss vs bd = foldr TAbs bd vs + +{-# complete TAbss #-} + +-- Simultaneous variable renaming. +-- +-- subvs0 counts the number of variables being renamed to a particular +-- variable +-- +-- rnv0 is the variable renaming map. +renames + :: (Var v, Ord v, Bifunctor f, Bifoldable f) + => Map v Int -> Map v v -> Term f v -> Term f v +renames subvs0 rnv0 tm = case tm of + TAbs u body + | not $ Map.null rnv' -> TAbs u' (renames subvs' rnv' body) + where + rnv' = Map.alter (const $ adjustment) u rnv + -- if u is in the set of variables we're substituting in, it + -- needs to be renamed to avoid capturing things. + u' | u `Map.member` subvs = freshIn (fvs `Set.union` Map.keysSet subvs) u + | otherwise = u + + -- if u needs to be renamed to avoid capturing subvs + -- and u actually occurs in the body, then add it to + -- the substitutions + (adjustment, subvs') + | u /= u' && u `Set.member` fvs = (Just u', Map.insertWith (+) u' 1 subvs) + | otherwise = (Nothing, subvs) + + TTm body + | not $ Map.null rnv + -> TTm $ bimap (\u -> Map.findWithDefault u u rnv) (renames subvs rnv) body + + _ -> tm + where + fvs = freeVars tm + + -- throw out irrelevant renamings + rnv = Map.restrictKeys rnv0 fvs + + -- decrement the variable usage counts for the renamings we threw away + subvs = Map.foldl' decrement subvs0 $ Map.withoutKeys rnv0 fvs + decrement sv v = Map.update drop v sv + drop n | n <= 1 = Nothing + | otherwise = Just (n-1) + +rename + :: (Var v, Ord v, Bifunctor f, Bifoldable f) + => v -> v -> Term f v -> Term f v +rename old new = renames (Map.singleton new 1) (Map.singleton old new) + +transform + :: (Var v, Bifunctor g, Bifoldable f, Bifoldable g) + => (forall a b. f a b -> g a b) + -> Term f v -> Term g v +transform phi (TTm body) = TTm . second (transform phi) $ phi body +transform phi (TAbs u body) = TAbs u $ transform phi body diff --git a/unison-core/src/Unison/Blank.hs b/unison-core/src/Unison/Blank.hs new file mode 100644 index 0000000000..d10199db95 --- /dev/null +++ b/unison-core/src/Unison/Blank.hs @@ -0,0 +1,22 @@ +module Unison.Blank where + +loc :: Recorded loc -> loc +loc (Placeholder loc _) = loc +loc (Resolve loc _) = loc + +nameb :: Blank loc -> Maybe String +nameb (Recorded (Placeholder _ n)) = Just n +nameb (Recorded (Resolve _ n)) = Just n +nameb _ = Nothing + +data Recorded loc + -- A user-provided named placeholder + = Placeholder loc String + -- A name to be resolved with type-directed name resolution. + | Resolve loc String + deriving (Show, Eq, Ord, Functor) + +data Blank loc = Blank | Recorded (Recorded loc) + deriving (Show, Eq, Ord, Functor) + + diff --git a/unison-core/src/Unison/ConstructorType.hs b/unison-core/src/Unison/ConstructorType.hs new file mode 100644 index 0000000000..4a1796bc31 --- /dev/null +++ b/unison-core/src/Unison/ConstructorType.hs @@ -0,0 +1,8 @@ +module Unison.ConstructorType where + +import Unison.Hashable (Hashable, Token(Tag), tokens) + +data ConstructorType = Data | Effect deriving (Eq, Ord, Show, Enum) + +instance Hashable ConstructorType where + tokens b = [Tag . fromIntegral $ fromEnum b] diff --git a/unison-core/src/Unison/DataDeclaration.hs b/unison-core/src/Unison/DataDeclaration.hs new file mode 100644 index 0000000000..8f6bad3f67 --- /dev/null +++ b/unison-core/src/Unison/DataDeclaration.hs @@ -0,0 +1,413 @@ +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# Language DeriveFoldable #-} +{-# Language DeriveTraversable #-} +{-# Language OverloadedStrings #-} +{-# Language PatternSynonyms #-} +{-# Language ViewPatterns #-} + +module Unison.DataDeclaration where + +import Unison.Prelude + +import Control.Lens (_3, over) +import Control.Monad.State (evalState) + +import Data.Bifunctor (first, second, bimap) +import qualified Unison.Util.Relation as Rel +import Unison.Hash ( Hash ) +import qualified Data.Map as Map +import qualified Data.Set as Set +import Prelude hiding ( cycle ) +import Prelude.Extras ( Show1 ) +import qualified Unison.ABT as ABT +import Unison.Hashable ( Accumulate + , Hashable1 + ) +import qualified Unison.Hashable as Hashable +import qualified Unison.Name as Name +import Unison.Reference ( Reference ) +import qualified Unison.Reference as Reference +import qualified Unison.Reference.Util as Reference.Util +import qualified Unison.Referent as Referent +import qualified Unison.Term as Term +import Unison.Term ( Term ) +import Unison.Type ( Type ) +import qualified Unison.Type as Type +import Unison.Var ( Var ) +import qualified Unison.Var as Var +import Unison.Names3 (Names0) +import qualified Unison.Names3 as Names +import qualified Unison.Pattern as Pattern +import qualified Unison.ConstructorType as CT + +type ConstructorId = Term.ConstructorId + +type Decl v a = Either (EffectDeclaration v a) (DataDeclaration v a) + +data DeclOrBuiltin v a = + Builtin CT.ConstructorType | Decl (Decl v a) + deriving (Eq, Show) + +asDataDecl :: Decl v a -> DataDeclaration v a +asDataDecl = either toDataDecl id + +declDependencies :: Ord v => Decl v a -> Set Reference +declDependencies = either (dependencies . toDataDecl) dependencies + +constructorType :: Decl v a -> CT.ConstructorType +constructorType = \case + Left{} -> CT.Effect + Right{} -> CT.Data + +data Modifier = Structural | Unique Text -- | Opaque (Set Reference) + deriving (Eq, Ord, Show) + +data DataDeclaration v a = DataDeclaration { + modifier :: Modifier, + annotation :: a, + bound :: [v], + constructors' :: [(a, v, Type v a)] +} deriving (Eq, Show, Functor) + +newtype EffectDeclaration v a = EffectDeclaration { + toDataDecl :: DataDeclaration v a +} deriving (Eq,Show,Functor) + +withEffectDecl + :: (DataDeclaration v a -> DataDeclaration v' a') + -> (EffectDeclaration v a -> EffectDeclaration v' a') +withEffectDecl f e = EffectDeclaration (f . toDataDecl $ e) + +withEffectDeclM :: Functor f + => (DataDeclaration v a -> f (DataDeclaration v' a')) + -> EffectDeclaration v a + -> f (EffectDeclaration v' a') +withEffectDeclM f = fmap EffectDeclaration . f . toDataDecl + +generateConstructorRefs + :: (Reference -> ConstructorId -> Reference) + -> Reference.Id + -> Int + -> [(ConstructorId, Reference)] +generateConstructorRefs hashCtor rid n = + (\i -> (i, hashCtor (Reference.DerivedId rid) i)) <$> [0 .. n] + +generateRecordAccessors + :: (Semigroup a, Var v) + => [(v, a)] + -> v + -> Reference + -> [(v, Term v a)] +generateRecordAccessors fields typename typ = + join [ tm t i | (t, i) <- fields `zip` [(0::Int)..] ] + where + argname = Var.uncapitalize typename + tm (fname, ann) i = + [(Var.namespaced [typename, fname], get), + (Var.namespaced [typename, fname, Var.named "set"], set), + (Var.namespaced [typename, fname, Var.named "modify"], modify)] + where + -- example: `point -> case point of Point x _ -> x` + get = Term.lam ann argname $ Term.match ann + (Term.var ann argname) + [Term.MatchCase pat Nothing rhs] + where + pat = Pattern.Constructor ann typ 0 cargs + cargs = [ if j == i then Pattern.Var ann else Pattern.Unbound ann + | (_, j) <- fields `zip` [0..]] + rhs = ABT.abs' ann fname (Term.var ann fname) + -- example: `x point -> case point of Point _ y -> Point x y` + set = Term.lam' ann [fname', argname] $ Term.match ann + (Term.var ann argname) + [Term.MatchCase pat Nothing rhs] + where + fname' = Var.named . Var.name $ + Var.freshIn (Set.fromList $ [argname] <> (fst <$> fields)) fname + pat = Pattern.Constructor ann typ 0 cargs + cargs = [ if j == i then Pattern.Unbound ann else Pattern.Var ann + | (_, j) <- fields `zip` [0..]] + rhs = foldr (ABT.abs' ann) (Term.constructor ann typ 0 `Term.apps'` vargs) + [ f | ((f, _), j) <- fields `zip` [0..], j /= i ] + vargs = [ if j == i then Term.var ann fname' else Term.var ann v + | ((v, _), j) <- fields `zip` [0..]] + -- example: `f point -> case point of Point x y -> Point (f x) y` + modify = Term.lam' ann [fname', argname] $ Term.match ann + (Term.var ann argname) + [Term.MatchCase pat Nothing rhs] + where + fname' = Var.named . Var.name $ + Var.freshIn (Set.fromList $ [argname] <> (fst <$> fields)) + (Var.named "f") + pat = Pattern.Constructor ann typ 0 cargs + cargs = replicate (length fields) $ Pattern.Var ann + rhs = foldr (ABT.abs' ann) (Term.constructor ann typ 0 `Term.apps'` vargs) + (fst <$> fields) + vargs = [ if j == i + then Term.apps' (Term.var ann fname') [Term.var ann v] + else Term.var ann v + | ((v, _), j) <- fields `zip` [0..]] + +-- Returns references to the constructors, +-- along with the terms for those references and their types. +constructorTerms + :: (Reference -> ConstructorId -> Reference) + -> (a -> Reference -> ConstructorId -> Term v a) + -> Reference.Id + -> DataDeclaration v a + -> [(Reference.Id, Term v a, Type v a)] +constructorTerms hashCtor f rid dd = + (\((a, _, t), (i, re@(Reference.DerivedId r))) -> (r, f a re i, t)) <$> zip + (constructors' dd) + (generateConstructorRefs hashCtor rid (length $ constructors dd)) + +dataConstructorTerms + :: Ord v + => Reference.Id + -> DataDeclaration v a + -> [(Reference.Id, Term v a, Type v a)] +dataConstructorTerms = constructorTerms Term.hashConstructor Term.constructor + +effectConstructorTerms + :: Ord v + => Reference.Id + -> EffectDeclaration v a + -> [(Reference.Id, Term v a, Type v a)] +effectConstructorTerms rid ed = + constructorTerms Term.hashRequest Term.request rid $ toDataDecl ed + +constructorTypes :: DataDeclaration v a -> [Type v a] +constructorTypes = (snd <$>) . constructors + +declFields :: Var v => Decl v a -> Either [Int] [Int] +declFields = bimap cf cf . first toDataDecl + where + cf = fmap fields . constructorTypes + fields (Type.ForallsNamed' _ ty) = fields ty + fields (Type.Arrows' spine) = length spine - 1 + fields _ = 0 + +typeOfConstructor :: DataDeclaration v a -> ConstructorId -> Maybe (Type v a) +typeOfConstructor dd i = constructorTypes dd `atMay` i + +constructors :: DataDeclaration v a -> [(v, Type v a)] +constructors (DataDeclaration _ _ _ ctors) = [(v,t) | (_,v,t) <- ctors ] + +constructorVars :: DataDeclaration v a -> [v] +constructorVars dd = fst <$> constructors dd + +constructorNames :: Var v => DataDeclaration v a -> [Text] +constructorNames dd = Var.name <$> constructorVars dd + +declConstructorReferents :: Reference.Id -> Decl v a -> [Referent.Id] +declConstructorReferents rid decl = + [ Referent.Con' rid i ct | i <- constructorIds (asDataDecl decl) ] + where ct = constructorType decl + +constructorIds :: DataDeclaration v a -> [Int] +constructorIds dd = [0 .. length (constructors dd) - 1] + +-- | All variables mentioned in the given data declaration. +-- Includes both term and type variables, both free and bound. +allVars :: Ord v => DataDeclaration v a -> Set v +allVars (DataDeclaration _ _ bound ctors) = Set.unions $ + Set.fromList bound : [ Set.insert v (Set.fromList $ ABT.allVars tp) | (_,v,tp) <- ctors ] + +-- | All variables mentioned in the given declaration. +-- Includes both term and type variables, both free and bound. +allVars' :: Ord v => Decl v a -> Set v +allVars' = allVars . either toDataDecl id + +bindNames :: Var v + => Set v + -> Names0 + -> DataDeclaration v a + -> Names.ResolutionResult v a (DataDeclaration v a) +bindNames keepFree names (DataDeclaration m a bound constructors) = do + constructors <- for constructors $ \(a, v, ty) -> + (a,v,) <$> Type.bindNames keepFree names ty + pure $ DataDeclaration m a bound constructors + +dependencies :: Ord v => DataDeclaration v a -> Set Reference +dependencies dd = + Set.unions (Type.dependencies <$> constructorTypes dd) + +third :: (a -> b) -> (x,y,a) -> (x,y,b) +third f (x,y,a) = (x, y, f a) + +-- implementation of dataDeclToNames and effectDeclToNames +toNames0 :: Var v => CT.ConstructorType -> v -> Reference.Id -> DataDeclaration v a -> Names0 +toNames0 ct typeSymbol (Reference.DerivedId -> r) dd = + -- constructor names + foldMap names (constructorVars dd `zip` [0 ..]) + -- name of the type itself + <> Names.names0 mempty (Rel.singleton (Name.fromVar typeSymbol) r) + where + names (ctor, i) = + Names.names0 (Rel.singleton (Name.fromVar ctor) (Referent.Con r i ct)) mempty + +dataDeclToNames :: Var v => v -> Reference.Id -> DataDeclaration v a -> Names0 +dataDeclToNames = toNames0 CT.Data + +effectDeclToNames :: Var v => v -> Reference.Id -> EffectDeclaration v a -> Names0 +effectDeclToNames typeSymbol r ed = toNames0 CT.Effect typeSymbol r $ toDataDecl ed + +dataDeclToNames' :: Var v => (v, (Reference.Id, DataDeclaration v a)) -> Names0 +dataDeclToNames' (v,(r,d)) = dataDeclToNames v r d + +effectDeclToNames' :: Var v => (v, (Reference.Id, EffectDeclaration v a)) -> Names0 +effectDeclToNames' (v, (r, d)) = effectDeclToNames v r d + +mkEffectDecl' + :: Modifier -> a -> [v] -> [(a, v, Type v a)] -> EffectDeclaration v a +mkEffectDecl' m a b cs = EffectDeclaration (DataDeclaration m a b cs) + +mkEffectDecl :: Modifier -> [v] -> [(v, Type v ())] -> EffectDeclaration v () +mkEffectDecl m b cs = mkEffectDecl' m () b $ map (\(v, t) -> ((), v, t)) cs + +mkDataDecl' + :: Modifier -> a -> [v] -> [(a, v, Type v a)] -> DataDeclaration v a +mkDataDecl' = DataDeclaration + +mkDataDecl :: Modifier -> [v] -> [(v, Type v ())] -> DataDeclaration v () +mkDataDecl m b cs = mkDataDecl' m () b $ map (\(v,t) -> ((),v,t)) cs + +constructorArities :: DataDeclaration v a -> [Int] +constructorArities (DataDeclaration _ _a _bound ctors) = + Type.arity . (\(_,_,t) -> t) <$> ctors + +data F a + = Type (Type.F a) + | LetRec [a] a + | Constructors [a] + | Modified Modifier a + deriving (Functor, Foldable, Show, Show1) + +instance Hashable1 F where + hash1 hashCycle hash e = + let (tag, hashed) = (Hashable.Tag, Hashable.Hashed) + -- Note: start each layer with leading `2` byte, to avoid collisions with + -- terms, which start each layer with leading `1`. See `Hashable1 Term.F` + in Hashable.accumulate $ tag 2 : case e of + Type t -> [tag 0, hashed $ Hashable.hash1 hashCycle hash t] + LetRec bindings body -> + let (hashes, hash') = hashCycle bindings + in [tag 1] ++ map hashed hashes ++ [hashed $ hash' body] + Constructors cs -> + let (hashes, _) = hashCycle cs + in tag 2 : map hashed hashes + Modified m t -> + [tag 3, Hashable.accumulateToken m, hashed $ hash t] + +instance Hashable.Hashable Modifier where + tokens Structural = [Hashable.Tag 0] + tokens (Unique txt) = [Hashable.Tag 1, Hashable.Text txt] + +{- + type UpDown = Up | Down + + type List a = Nil | Cons a (List a) + + type Ping p = Ping (Pong p) + type Pong p = Pong (Ping p) + + type Foo a f = Foo Int (Bar a) + type Bar a f = Bar Long (Foo a) +-} + +hash :: (Eq v, Var v, Ord h, Accumulate h) + => [(v, ABT.Term F v ())] -> [(v, h)] +hash recursiveDecls = zip (fst <$> recursiveDecls) hashes where + hashes = ABT.hash <$> toLetRec recursiveDecls + +toLetRec :: Ord v => [(v, ABT.Term F v ())] -> [ABT.Term F v ()] +toLetRec decls = do1 <$> vs + where + (vs, decls') = unzip decls + -- we duplicate this letrec once (`do1`) + -- for each of the mutually recursive types + do1 v = ABT.cycle (ABT.absChain vs . ABT.tm $ LetRec decls' (ABT.var v)) + +unsafeUnwrapType :: (Var v) => ABT.Term F v a -> Type v a +unsafeUnwrapType typ = ABT.transform f typ + where f (Type t) = t + f _ = error $ "Tried to unwrap a type that wasn't a type: " ++ show typ + +toABT :: Var v => DataDeclaration v () -> ABT.Term F v () +toABT dd = ABT.tm $ Modified (modifier dd) dd' + where + dd' = ABT.absChain (bound dd) $ ABT.cycle + (ABT.absChain + (fst <$> constructors dd) + (ABT.tm . Constructors $ ABT.transform Type <$> constructorTypes dd)) + +updateDependencies :: Ord v => Map Reference Reference -> Decl v a -> Decl v a +updateDependencies typeUpdates decl = back $ dataDecl + { constructors' = over _3 (Type.updateDependencies typeUpdates) + <$> constructors' dataDecl + } + where + dataDecl = either toDataDecl id decl + back = either (const $ Left . EffectDeclaration) (const Right) decl + + +-- This converts `Reference`s it finds that are in the input `Map` +-- back to free variables +unhashComponent + :: forall v a. Var v => Map Reference (Decl v a) -> Map Reference (v, Decl v a) +unhashComponent m + = let + usedVars = foldMap allVars' m + m' :: Map Reference (v, Decl v a) + m' = evalState (Map.traverseWithKey assignVar m) usedVars where + assignVar r d = (,d) <$> ABT.freshenS (Var.refNamed r) + unhash1 = ABT.rebuildUp' go + where + go e@(Type.Ref' r) = case Map.lookup r m' of + Nothing -> e + Just (v,_) -> Type.var (ABT.annotation e) v + go e = e + unhash2 (Right dd@DataDeclaration{}) = Right $ unhash3 dd + unhash2 (Left (EffectDeclaration dd)) = + Left . EffectDeclaration $ unhash3 dd + unhash3 dd@DataDeclaration {..} = + dd { constructors' = fmap (over _3 unhash1) constructors' } + in + second unhash2 <$> m' + +-- Implementation detail of `hashDecls`, works with unannotated data decls +hashDecls0 :: (Eq v, Var v) => Map v (DataDeclaration v ()) -> [(v, Reference.Id)] +hashDecls0 decls = + let abts = toABT <$> decls + ref r = ABT.tm (Type (Type.Ref (Reference.DerivedId r))) + cs = Reference.Util.hashComponents ref abts + in [ (v, r) | (v, (r, _)) <- Map.toList cs ] + +-- | compute the hashes of these user defined types and update any free vars +-- corresponding to these decls with the resulting hashes +-- +-- data List a = Nil | Cons a (List a) +-- becomes something like +-- (List, #xyz, [forall a. #xyz a, forall a. a -> (#xyz a) -> (#xyz a)]) +-- +-- NOTE: technical limitation, this implementation gives diff results if ctors +-- have the same FQN as one of the types. TODO: assert this and bomb if not +-- satisfied, or else do local mangling and unmangling to ensure this doesn't +-- affect the hash. +hashDecls + :: (Eq v, Var v) + => Map v (DataDeclaration v a) + -> Names.ResolutionResult v a [(v, Reference.Id, DataDeclaration v a)] +hashDecls decls = do + -- todo: make sure all other external references are resolved before calling this + let varToRef = hashDecls0 (void <$> decls) + varToRef' = second Reference.DerivedId <$> varToRef + decls' = bindTypes <$> decls + bindTypes dd = dd { constructors' = over _3 (Type.bindExternal varToRef') <$> constructors' dd } + typeNames0 = Names.names0 mempty + $ Rel.fromList (first Name.fromVar <$> varToRef') + -- normalize the order of the constructors based on a hash of their types + sortCtors dd = dd { constructors' = sortOn hash3 $ constructors' dd } + hash3 (_, _, typ) = ABT.hash typ :: Hash + decls' <- fmap sortCtors <$> traverse (bindNames mempty typeNames0) decls' + pure [ (v, r, dd) | (v, r) <- varToRef, Just dd <- [Map.lookup v decls'] ] diff --git a/unison-core/src/Unison/Hash.hs b/unison-core/src/Unison/Hash.hs new file mode 100644 index 0000000000..d6738540a3 --- /dev/null +++ b/unison-core/src/Unison/Hash.hs @@ -0,0 +1,109 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Hash (Hash, toBytes, base32Hex, base32Hexs, fromBase32Hex, fromBytes, unsafeFromBase32Hex, showBase32Hex, validBase32HexChars) where + +import Unison.Prelude + +import Data.ByteString.Builder (doubleBE, word64BE, int64BE, toLazyByteString) +import qualified Data.ByteArray as BA + +import qualified Crypto.Hash as CH +import qualified Data.ByteString as B +import qualified Data.ByteString.Lazy as BL + +import qualified Unison.Hashable as H +import qualified Codec.Binary.Base32Hex as Base32Hex +import qualified Data.Text as Text +import qualified Data.Set as Set + +-- | Hash which uniquely identifies a Unison type or term +newtype Hash = Hash { toBytes :: ByteString } deriving (Eq,Ord,Generic) + +instance Show Hash where + show h = take 999 $ Text.unpack (base32Hex h) + +instance H.Hashable Hash where + tokens h = [H.Bytes (toBytes h)] + +fromBytesImpl :: ByteString -> Hash +fromBytesImpl = fromBytes + +toBytesImpl :: Hash -> ByteString +toBytesImpl = toBytes + +instance H.Accumulate Hash where + accumulate = fromBytes . BA.convert . CH.hashFinalize . go CH.hashInit where + go :: CH.Context CH.SHA3_512 -> [H.Token Hash] -> CH.Context CH.SHA3_512 + go acc tokens = CH.hashUpdates acc (tokens >>= toBS) + toBS (H.Tag b) = [B.singleton b] + toBS (H.Bytes bs) = [encodeLength $ B.length bs, bs] + toBS (H.Int i) = BL.toChunks . toLazyByteString . int64BE $ i + toBS (H.Nat i) = BL.toChunks . toLazyByteString . word64BE $ i + toBS (H.Double d) = BL.toChunks . toLazyByteString . doubleBE $ d + toBS (H.Text txt) = + let tbytes = encodeUtf8 txt + in [encodeLength (B.length tbytes), tbytes] + toBS (H.Hashed h) = [toBytes h] + encodeLength :: Integral n => n -> B.ByteString + encodeLength = BL.toStrict . toLazyByteString . word64BE . fromIntegral + fromBytes = fromBytesImpl + toBytes = toBytesImpl + +-- | Return the lowercase unpadded base32Hex encoding of this 'Hash'. +-- Multibase prefix would be 'v', see https://github.com/multiformats/multibase +base32Hex :: Hash -> Text +base32Hex (Hash h) = + -- we're using an uppercase encoder that adds padding, so we drop the + -- padding and convert it to lowercase + Text.toLower . Text.dropWhileEnd (== '=') . decodeUtf8 $ + Base32Hex.encode h + +validBase32HexChars :: Set Char +validBase32HexChars = Set.fromList $ ['0' .. '9'] ++ ['a' .. 'v'] + +-- | Produce a 'Hash' from a base32hex-encoded version of its binary representation +fromBase32Hex :: Text -> Maybe Hash +fromBase32Hex txt = case Base32Hex.decode (encodeUtf8 $ Text.toUpper txt <> paddingChars) of + Left (_, _rem) -> Nothing + Right h -> pure $ Hash h + where + -- The decoder we're using is a base32 uppercase decoder that expects padding, + -- so we provide it with the appropriate number of padding characters for the + -- expected hash length. + -- + -- The decoder requires 40 bit (8 5-bit characters) chunks, so if the number + -- of characters of the input is not a multiple of 8, we add '=' padding chars + -- until it is. + -- + -- See https://tools.ietf.org/html/rfc4648#page-8 + paddingChars :: Text + paddingChars = case Text.length txt `mod` 8 of + 0 -> "" + n -> Text.replicate (8 - n) "=" + + hashLength :: Int + hashLength = 512 + + _paddingChars :: Text + _paddingChars = case hashLength `mod` 40 of + 0 -> "" + 8 -> "======" + 16 -> "====" + 24 -> "===" + 32 -> "=" + i -> error $ "impossible hash length `mod` 40 not in {0,8,16,24,32}: " <> show i + +base32Hexs :: Hash -> String +base32Hexs = Text.unpack . base32Hex + +unsafeFromBase32Hex :: Text -> Hash +unsafeFromBase32Hex txt = + fromMaybe (error $ "invalid base32Hex value: " ++ Text.unpack txt) $ fromBase32Hex txt + +fromBytes :: ByteString -> Hash +fromBytes = Hash + +showBase32Hex :: H.Hashable t => t -> String +showBase32Hex = base32Hexs . H.accumulate' + diff --git a/unison-core/src/Unison/HashQualified'.hs b/unison-core/src/Unison/HashQualified'.hs new file mode 100644 index 0000000000..23816e06eb --- /dev/null +++ b/unison-core/src/Unison/HashQualified'.hs @@ -0,0 +1,126 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.HashQualified' where + +import Unison.Prelude + +import qualified Data.Text as Text +import Prelude hiding ( take ) +import Unison.Name ( Name ) +import qualified Unison.Name as Name +import Unison.NameSegment ( NameSegment ) +import Unison.Reference ( Reference ) +import qualified Unison.Reference as Reference +import Unison.Referent ( Referent ) +import qualified Unison.Referent as Referent +import Unison.ShortHash ( ShortHash ) +import qualified Unison.ShortHash as SH +import qualified Unison.HashQualified as HQ + +data HashQualified' n = NameOnly n | HashQualified n ShortHash + deriving (Eq, Functor) + +type HQSegment = HashQualified' NameSegment + +type HashQualified = HashQualified' Name + +toHQ :: HashQualified' n -> HQ.HashQualified' n +toHQ = \case + NameOnly n -> HQ.NameOnly n + HashQualified n sh -> HQ.HashQualified n sh + +fromHQ :: HQ.HashQualified' n -> Maybe (HashQualified' n) +fromHQ = \case + HQ.NameOnly n -> Just $ NameOnly n + HQ.HashQualified n sh -> Just $ HashQualified n sh + HQ.HashOnly{} -> Nothing + +-- Like fromHQ, but turns hashes into hash-qualified empty names +fromHQ' :: Monoid n => HQ.HashQualified' n -> HashQualified' n +fromHQ' = \case + HQ.NameOnly n -> NameOnly n + HQ.HashQualified n sh -> HashQualified n sh + HQ.HashOnly h -> HashQualified mempty h + +toName :: HashQualified' n -> n +toName = \case + NameOnly name -> name + HashQualified name _ -> name + +nameLength :: HashQualified' Name -> Int +nameLength = Text.length . toText + +take :: Int -> HashQualified' n -> HashQualified' n +take i = \case + n@(NameOnly _) -> n + HashQualified n s -> if i == 0 then NameOnly n else HashQualified n (SH.take i s) + +toNameOnly :: HashQualified' n -> HashQualified' n +toNameOnly = fromName . toName + +toHash :: HashQualified' n -> Maybe ShortHash +toHash = \case + NameOnly _ -> Nothing + HashQualified _ sh -> Just sh + +toString :: Show n => HashQualified' n -> String +toString = Text.unpack . toText + +-- Parses possibly-hash-qualified into structured type. +fromText :: Text -> Maybe HashQualified +fromText t = case Text.breakOn "#" t of + (name, "" ) -> + Just $ NameOnly (Name.unsafeFromText name) -- safe bc breakOn # + (name, hash) -> + HashQualified (Name.unsafeFromText name) <$> SH.fromText hash + +unsafeFromText :: Text -> HashQualified +unsafeFromText txt = fromMaybe msg (fromText txt) where + msg = error ("HashQualified'.unsafeFromText " <> show txt) + +fromString :: String -> Maybe HashQualified +fromString = fromText . Text.pack + +toText :: Show n => HashQualified' n -> Text +toText = \case + NameOnly name -> Text.pack (show name) + HashQualified name hash -> Text.pack (show name) <> SH.toText hash + +-- Returns the full referent in the hash. Use HQ.take to just get a prefix +fromNamedReferent :: n -> Referent -> HashQualified' n +fromNamedReferent n r = HashQualified n (Referent.toShortHash r) + +-- Returns the full reference in the hash. Use HQ.take to just get a prefix +fromNamedReference :: n -> Reference -> HashQualified' n +fromNamedReference n r = HashQualified n (Reference.toShortHash r) + +fromName :: n -> HashQualified' n +fromName = NameOnly + +matchesNamedReferent :: Eq n => n -> Referent -> HashQualified' n -> Bool +matchesNamedReferent n r = \case + NameOnly n' -> n' == n + HashQualified n' sh -> n' == n && sh `SH.isPrefixOf` Referent.toShortHash r + +matchesNamedReference :: Eq n => n -> Reference -> HashQualified' n -> Bool +matchesNamedReference n r = \case + NameOnly n' -> n' == n + HashQualified n' sh -> n' == n && sh `SH.isPrefixOf` Reference.toShortHash r + +-- Use `requalify hq . Referent.Ref` if you want to pass in a `Reference`. +requalify :: HashQualified -> Referent -> HashQualified +requalify hq r = case hq of + NameOnly n -> fromNamedReferent n r + HashQualified n _ -> fromNamedReferent n r + +instance Ord n => Ord (HashQualified' n) where + compare a b = case compare (toName a) (toName b) of + EQ -> compare (toHash a) (toHash b) + o -> o + +instance IsString HashQualified where + fromString = unsafeFromText . Text.pack + + +instance Show n => Show (HashQualified' n) where + show = Text.unpack . toText diff --git a/unison-core/src/Unison/HashQualified.hs b/unison-core/src/Unison/HashQualified.hs new file mode 100644 index 0000000000..8fafec1e12 --- /dev/null +++ b/unison-core/src/Unison/HashQualified.hs @@ -0,0 +1,169 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.HashQualified where + +import Unison.Prelude hiding (fromString) + +import qualified Data.Text as Text +import Prelude hiding ( take ) +import Unison.Name ( Name ) +import qualified Unison.Name as Name +import Unison.Reference ( Reference ) +import qualified Unison.Reference as Reference +import Unison.Referent ( Referent ) +import qualified Unison.Referent as Referent +import Unison.ShortHash ( ShortHash ) +import qualified Unison.ShortHash as SH +import Unison.Var ( Var ) +import qualified Unison.Var as Var + +data HashQualified' n + = NameOnly n | HashOnly ShortHash | HashQualified n ShortHash + deriving (Eq, Functor, Show) + +type HashQualified = HashQualified' Name + +stripNamespace :: Text -> HashQualified -> HashQualified +stripNamespace namespace hq = case hq of + NameOnly name -> NameOnly $ strip name + HashQualified name sh -> HashQualified (strip name) sh + ho -> ho + where + strip name = + fromMaybe name $ Name.stripNamePrefix (Name.unsafeFromText namespace) name + +toName :: HashQualified' n -> Maybe n +toName = \case + NameOnly name -> Just name + HashQualified name _ -> Just name + HashOnly _ -> Nothing + +-- Sort the list of names by length of segments: smaller number of +-- segments is listed first. NameOnly < Hash qualified < Hash only +-- +-- Examples: +-- [foo.bar.baz, bar.baz] -> [bar.baz, foo.bar.baz] +-- [#a29dj2k91, foo.bar.baz] -> [foo.bar.baz, #a29dj2k91] +-- [foo.bar#abc, foo.bar] -> [foo.bar, foo.bar#abc] +-- [.foo.bar, foo.bar] -> [foo.bar, .foo.bar] +sortByLength :: [HashQualified' Name] -> [HashQualified' Name] +sortByLength hs = sortOn f hs where + f (NameOnly n) = (countDots n, 0, Left n) + f (HashQualified n _h) = (countDots n, 1, Left n) + f (HashOnly h) = (maxBound, 0, Right h) + countDots n = Text.count "." (Text.dropEnd 1 (Name.toText n)) + +hasName, hasHash :: HashQualified -> Bool +hasName = isJust . toName +hasHash = isJust . toHash + +toHash :: HashQualified' n -> Maybe ShortHash +toHash = \case + NameOnly _ -> Nothing + HashQualified _ sh -> Just sh + HashOnly sh -> Just sh + +-- partial: assumes either a name or hash is provided (or both) +fromNameHash :: Maybe Name -> Maybe ShortHash -> HashQualified +fromNameHash n h = case n of + Just name -> case h of + Just hash -> HashQualified name hash + Nothing -> NameOnly name + Nothing -> case h of + Just hash -> HashOnly hash + Nothing -> error "bad HQ construction" + +take :: Int -> HashQualified' n -> HashQualified' n +take i = \case + n@(NameOnly _) -> n + HashOnly s -> HashOnly (SH.take i s) + HashQualified n s -> if i == 0 then NameOnly n else HashQualified n (SH.take i s) + +toString :: Show n => HashQualified' n -> String +toString = Text.unpack . toText + +fromString :: String -> Maybe HashQualified +fromString = fromText . Text.pack + +unsafeFromString :: String -> HashQualified +unsafeFromString s = fromMaybe msg . fromString $ s where + msg = error $ "HashQualified.unsafeFromString " <> show s + +-- Parses possibly-hash-qualified into structured type. +-- Doesn't validate against base58 or the codebase. +fromText :: Text -> Maybe HashQualified +fromText t = case Text.breakOn "#" t of -- breakOn leaves the '#' on the RHS + (name, "" ) -> Just $ NameOnly (Name.unsafeFromText name) -- safe bc breakOn # + ("" , hash) -> HashOnly <$> SH.fromText hash + (name, hash) -> HashQualified (Name.unsafeFromText name) <$> SH.fromText hash + +-- Won't crash as long as SH.unsafeFromText doesn't crash on any input that +-- starts with '#', which is true as of the time of this writing, but not great. +unsafeFromText :: Text -> HashQualified +unsafeFromText txt = fromMaybe msg . fromText $ txt where + msg = error $ "HashQualified.unsafeFromText " <> show txt + +toText :: Show n => HashQualified' n -> Text +toText = \case + NameOnly name -> Text.pack (show name) + HashQualified name hash -> Text.pack (show name) <> SH.toText hash + HashOnly hash -> SH.toText hash + +-- Returns the full referent in the hash. Use HQ.take to just get a prefix +fromNamedReferent :: n -> Referent -> HashQualified' n +fromNamedReferent n r = HashQualified n (Referent.toShortHash r) + +-- Returns the full reference in the hash. Use HQ.take to just get a prefix +fromNamedReference :: n -> Reference -> HashQualified' n +fromNamedReference n r = HashQualified n (Reference.toShortHash r) + +fromReferent :: Referent -> HashQualified +fromReferent = HashOnly . Referent.toShortHash + +fromReference :: Reference -> HashQualified +fromReference = HashOnly . Reference.toShortHash + +fromPattern :: Reference -> Int -> HashQualified +fromPattern r cid = HashOnly $ Referent.patternShortHash r cid + +fromName :: n -> HashQualified' n +fromName = NameOnly + +unsafeFromVar :: Var v => v -> HashQualified +unsafeFromVar = unsafeFromText . Var.name + +fromVar :: Var v => v -> Maybe HashQualified +fromVar = fromText . Var.name + +toVar :: Var v => HashQualified -> v +toVar = Var.named . toText + +-- todo: find this logic elsewhere and replace with call to this +matchesNamedReferent :: Name -> Referent -> HashQualified -> Bool +matchesNamedReferent n r = \case + NameOnly n' -> n' == n + HashOnly sh -> sh `SH.isPrefixOf` Referent.toShortHash r + HashQualified n' sh -> n' == n && sh `SH.isPrefixOf` Referent.toShortHash r + +matchesNamedReference :: Name -> Reference -> HashQualified -> Bool +matchesNamedReference n r = \case + NameOnly n' -> n' == n + HashOnly sh -> sh `SH.isPrefixOf` Reference.toShortHash r + HashQualified n' sh -> n' == n && sh `SH.isPrefixOf` Reference.toShortHash r + +-- Use `requalify hq . Referent.Ref` if you want to pass in a `Reference`. +requalify :: HashQualified -> Referent -> HashQualified +requalify hq r = case hq of + NameOnly n -> fromNamedReferent n r + HashQualified n _ -> fromNamedReferent n r + HashOnly _ -> fromReferent r + +-- this implementation shows HashOnly before the others, because None < Some. +-- Flip it around carefully if HashOnly should come last. +instance Ord n => Ord (HashQualified' n) where + compare a b = case compare (toName a) (toName b) of + EQ -> compare (toHash a) (toHash b) + o -> o + +--instance Show n => Show (HashQualified' n) where +-- show = Text.unpack . toText diff --git a/unison-core/src/Unison/Hashable.hs b/unison-core/src/Unison/Hashable.hs new file mode 100644 index 0000000000..45b06a05ea --- /dev/null +++ b/unison-core/src/Unison/Hashable.hs @@ -0,0 +1,94 @@ +module Unison.Hashable where + +import Unison.Prelude + +import qualified Data.Map as Map +import qualified Data.Set as Set + +data Token h + = Tag !Word8 + | Bytes !ByteString + | Int !Int64 + | Text !Text + | Double !Double + | Hashed !h + | Nat !Word64 + +class Accumulate h where + accumulate :: [Token h] -> h + fromBytes :: ByteString -> h + toBytes :: h -> ByteString + +accumulateToken :: (Accumulate h, Hashable t) => t -> Token h +accumulateToken = Hashed . accumulate' + +accumulate' :: (Accumulate h, Hashable t) => t -> h +accumulate' = accumulate . tokens + +class Hashable t where + tokens :: Accumulate h => t -> [Token h] + +instance Hashable a => Hashable [a] where + tokens = map accumulateToken + +instance (Hashable a, Hashable b) => Hashable (a,b) where + tokens (a,b) = [accumulateToken a, accumulateToken b] + +instance (Hashable a) => Hashable (Set.Set a) where + tokens = tokens . Set.toList + +instance (Hashable k, Hashable v) => Hashable (Map.Map k v) where + tokens = tokens . Map.toList + +class Functor f => Hashable1 f where + -- | Produce a hash for an `f a`, given a hashing function for `a`. + -- If there is a notion of order-independence in some aspect of a subterm + -- of `f`, then the first argument (`hashUnordered :: [a] -> ([h], a -> h)`) + -- should be used to impose an order, and then apply that order in further hashing. + -- Otherwise the second argument (`hash :: a -> h`) should be used. + -- + -- Example 1: A simple functor with no unordered components. Hashable1 instance + -- just uses `hash`: + -- + -- data T a = One a | Two a a deriving Functor + -- + -- instance Hashable1 T where + -- hash1 _ hash t = case t of + -- One a -> accumulate [Tag 0, Hashed (hash a)] + -- Two a a2 -> accumulate [Tag 1, Hashed (hash a), Hashed (hash a2)] + -- + -- Example 2: A functor with unordered components. For hashing, we need to + -- pick a canonical ordering of the unordered components, so we + -- use `hashUnordered`: + -- + -- data U a = U { unordered :: [a], uno :: a, dos :: a } deriving Functor + -- + -- instance Hashable1 U where + -- hash1 hashUnordered _ (U unordered uno dos) = + -- let (hs, hash) = hashUnordered unordered + -- in accumulate $ map Hashed hs ++ [Hashed (hash uno), Hashed (hash dos)] + hash1 :: (Ord h, Accumulate h) => ([a] -> ([h], a -> h)) -> (a -> h) -> f a -> h + +instance Hashable () where + tokens _ = [] + +instance Hashable Double where + tokens d = [Double d] + +instance Hashable Text where + tokens s = [Text s] + +instance Hashable Char where + tokens c = [Nat $ fromIntegral $ fromEnum c] + +instance Hashable ByteString where + tokens bs = [Bytes bs] + +instance Hashable Word64 where + tokens w = [Nat w] + +instance Hashable Int64 where + tokens w = [Int w] + +instance Hashable Bool where + tokens b = [Tag . fromIntegral $ fromEnum b] diff --git a/unison-core/src/Unison/Kind.hs b/unison-core/src/Unison/Kind.hs new file mode 100644 index 0000000000..531ff42268 --- /dev/null +++ b/unison-core/src/Unison/Kind.hs @@ -0,0 +1,15 @@ +{-# LANGUAGE DeriveGeneric #-} + +module Unison.Kind where + +import Unison.Prelude + +import Unison.Hashable (Hashable) +import qualified Unison.Hashable as Hashable + +data Kind = Star | Arrow Kind Kind deriving (Eq,Ord,Read,Show,Generic) + +instance Hashable Kind where + tokens k = case k of + Star -> [Hashable.Tag 0] + Arrow k1 k2 -> (Hashable.Tag 1 : Hashable.tokens k1) ++ Hashable.tokens k2 diff --git a/unison-core/src/Unison/LabeledDependency.hs b/unison-core/src/Unison/LabeledDependency.hs new file mode 100644 index 0000000000..13f5a858a1 --- /dev/null +++ b/unison-core/src/Unison/LabeledDependency.hs @@ -0,0 +1,56 @@ +{-# LANGUAGE PatternSynonyms #-} + +module Unison.LabeledDependency + ( derivedTerm + , derivedType + , termRef + , typeRef + , referent + , dataConstructor + , effectConstructor + , fold + , referents + , toReference + , LabeledDependency + , partition + ) where + +import Unison.Prelude hiding (fold) + +import Unison.ConstructorType (ConstructorType(Data, Effect)) +import Unison.Reference (Reference(DerivedId), Id) +import Unison.Referent (Referent, pattern Ref, pattern Con, Referent'(Ref', Con')) +import qualified Data.Set as Set + +-- dumb constructor name is private +newtype LabeledDependency = X (Either Reference Referent) deriving (Eq, Ord, Show) + +derivedType, derivedTerm :: Id -> LabeledDependency +typeRef, termRef :: Reference -> LabeledDependency +referent :: Referent -> LabeledDependency +dataConstructor :: Reference -> Int -> LabeledDependency +effectConstructor :: Reference -> Int -> LabeledDependency + +derivedType = X . Left . DerivedId +derivedTerm = X . Right . Ref . DerivedId +typeRef = X . Left +termRef = X . Right . Ref +referent = X . Right +dataConstructor r cid = X . Right $ Con r cid Data +effectConstructor r cid = X . Right $ Con r cid Effect + +referents :: Foldable f => f Referent -> Set LabeledDependency +referents rs = Set.fromList (map referent $ toList rs) + +fold :: (Reference -> a) -> (Referent -> a) -> LabeledDependency -> a +fold f g (X e) = either f g e + +partition :: Foldable t => t LabeledDependency -> ([Reference], [Referent]) +partition = partitionEithers . map (\(X e) -> e) . toList + +-- | Left TypeRef | Right TermRef +toReference :: LabeledDependency -> Either Reference Reference +toReference = \case + X (Left r) -> Left r + X (Right (Ref' r)) -> Right r + X (Right (Con' r _ _)) -> Left r diff --git a/unison-core/src/Unison/Name.hs b/unison-core/src/Unison/Name.hs new file mode 100644 index 0000000000..80bd2ddaac --- /dev/null +++ b/unison-core/src/Unison/Name.hs @@ -0,0 +1,173 @@ +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Name + ( Name(Name) + , fromString + , isPrefixOf + , joinDot + , makeAbsolute + , parent + , sortNames + , sortNamed + , sortNamed' + , stripNamePrefix + , stripPrefixes + , segments + , segments' + , suffixes + , toString + , toText + , toVar + , unqualified + , unqualified' + , unsafeFromText + , unsafeFromString + , fromSegment + , fromVar + ) +where + +import Unison.Prelude +import qualified Unison.NameSegment as NameSegment +import Unison.NameSegment ( NameSegment(NameSegment) + , segments' + ) + +import Control.Lens ( unsnoc ) +import qualified Control.Lens as Lens +import qualified Data.Text as Text +import qualified Unison.Hashable as H +import Unison.Var ( Var ) +import qualified Unison.Var as Var +import qualified Data.RFC5051 as RFC5051 +import Data.List ( sortBy, tails ) + +newtype Name = Name { toText :: Text } deriving (Eq, Ord, Monoid, Semigroup) + +sortNames :: [Name] -> [Name] +sortNames = sortNamed id + +sortNamed :: (a -> Name) -> [a] -> [a] +sortNamed by as = let + as' = [ (a, Text.unpack (toText (by a))) | a <- as ] + comp (_,s) (_,s2) = RFC5051.compareUnicode s s2 + in fst <$> sortBy comp as' + +-- | Like sortNamed, but takes an additional backup comparison function if two +-- names are equal. +sortNamed' :: (a -> Name) -> (a -> a -> Ordering) -> [a] -> [a] +sortNamed' by by2 as = let + as' = [ (a, Text.unpack (toText (by a))) | a <- as ] + comp (a,s) (a2,s2) = RFC5051.compareUnicode s s2 <> by2 a a2 + in fst <$> sortBy comp as' + +unsafeFromText :: Text -> Name +unsafeFromText t = + if Text.any (== '#') t then error $ "not a name: " <> show t else Name t + +unsafeFromString :: String -> Name +unsafeFromString = unsafeFromText . Text.pack + +toVar :: Var v => Name -> v +toVar (Name t) = Var.named t + +fromVar :: Var v => v -> Name +fromVar = unsafeFromText . Var.name + +toString :: Name -> String +toString = Text.unpack . toText + +isPrefixOf :: Name -> Name -> Bool +a `isPrefixOf` b = toText a `Text.isPrefixOf` toText b + +-- stripTextPrefix a.b. a.b.c = Just c +-- stripTextPrefix a.b a.b.c = Just .c; you probably don't want to do this +-- stripTextPrefix x.y. a.b.c = Nothing +-- stripTextPrefix "" a.b.c = undefined +_stripTextPrefix :: Text -> Name -> Maybe Name +_stripTextPrefix prefix name = + Name <$> Text.stripPrefix prefix (toText name) + +-- stripNamePrefix a.b a.b.c = Just c +-- stripNamePrefix a.b. a.b.c = undefined, "a.b." isn't a valid name IMO +-- stripNamePrefix x.y a.b.c = Nothing, x.y isn't a prefix of a.b.c +-- stripNamePrefix "" a.b.c = undefined, "" isn't a valid name IMO +-- stripNamePrefix . .Nat = Just Nat +stripNamePrefix :: Name -> Name -> Maybe Name +stripNamePrefix prefix name = + Name <$> Text.stripPrefix (toText prefix <> mid) (toText name) + where + mid = if toText prefix == "." then "" else "." + +-- a.b.c.d -> d +stripPrefixes :: Name -> Name +stripPrefixes = fromSegment . last . segments + +joinDot :: Name -> Name -> Name +joinDot prefix suffix = + if toText prefix == "." then Name (toText prefix <> toText suffix) + else Name (toText prefix <> "." <> toText suffix) + +unqualified :: Name -> Name +unqualified = unsafeFromText . unqualified' . toText + +-- parent . -> Nothing +-- parent + -> Nothing +-- parent foo -> Nothing +-- parent foo.bar -> foo +-- parent foo.bar.+ -> foo.bar +parent :: Name -> Maybe Name +parent n = case unsnoc (NameSegment.toText <$> segments n) of + Nothing -> Nothing + Just ([] , _) -> Nothing + Just (init, _) -> Just $ Name (Text.intercalate "." init) + +-- suffixes "" -> [] +-- suffixes bar -> [bar] +-- suffixes foo.bar -> [foo.bar, bar] +-- suffixes foo.bar.baz -> [foo.bar.baz, bar.baz, baz] +-- suffixes ".base.." -> [base.., .] +suffixes :: Name -> [Name] +suffixes (Name "") = [] +suffixes (Name n ) = fmap up . filter (not . null) . tails $ segments' n + where up ns = Name (Text.intercalate "." ns) + +unqualified' :: Text -> Text +unqualified' = last . segments' + +makeAbsolute :: Name -> Name +makeAbsolute n | toText n == "." = Name ".." + | Text.isPrefixOf "." (toText n) = n + | otherwise = Name ("." <> toText n) + +instance Show Name where + show = toString + +instance IsString Name where + fromString = unsafeFromText . Text.pack + +instance H.Hashable Name where + tokens s = [H.Text (toText s)] + +fromSegment :: NameSegment -> Name +fromSegment = unsafeFromText . NameSegment.toText + +-- Smarter segmentation than `text.splitOn "."` +-- e.g. split `base..` into `[base,.]` +segments :: Name -> [NameSegment] +segments (Name n) = NameSegment <$> segments' n + +instance Lens.Snoc Name Name NameSegment NameSegment where + _Snoc = Lens.prism snoc unsnoc + where + snoc :: (Name, NameSegment) -> Name + snoc (n, s) = joinDot n (fromSegment s) + unsnoc :: Name -> Either Name (Name, NameSegment) + unsnoc n@(segments -> ns) = case Lens.unsnoc (NameSegment.toText <$> ns) of + Nothing -> Left n + Just ([], _) -> Left n + Just (init, last) -> + Right (Name (Text.intercalate "." init), NameSegment last) diff --git a/unison-core/src/Unison/NameSegment.hs b/unison-core/src/Unison/NameSegment.hs new file mode 100644 index 0000000000..d220ebfabc --- /dev/null +++ b/unison-core/src/Unison/NameSegment.hs @@ -0,0 +1,41 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.NameSegment where + +import Unison.Prelude + +import qualified Data.Text as Text +import qualified Unison.Hashable as H + +-- Represents the parts of a name between the `.`s +newtype NameSegment = NameSegment { toText :: Text } deriving (Eq, Ord) + +-- Split text into segments. A smarter version of `Text.splitOn` that handles +-- the name `.` properly. +segments' :: Text -> [Text] +segments' n = go split + where + split = Text.splitOn "." n + go [] = [] + go ("" : "" : z) = "." : go z + go ("" : z) = go z + go (x : y) = x : go y + +instance H.Hashable NameSegment where + tokens s = [H.Text (toText s)] + +isEmpty :: NameSegment -> Bool +isEmpty ns = toText ns == mempty + +isPrefixOf :: NameSegment -> NameSegment -> Bool +isPrefixOf n1 n2 = Text.isPrefixOf (toText n1) (toText n2) + +toString :: NameSegment -> String +toString = Text.unpack . toText + +instance Show NameSegment where + show = Text.unpack . toText + +instance IsString NameSegment where + fromString = NameSegment . Text.pack + diff --git a/unison-core/src/Unison/Names2.hs b/unison-core/src/Unison/Names2.hs new file mode 100644 index 0000000000..6c43f612f1 --- /dev/null +++ b/unison-core/src/Unison/Names2.hs @@ -0,0 +1,334 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE RecordWildCards #-} + +module Unison.Names2 + ( Names0 + , Names'(Names) + , Names + , addTerm + , addType + , allReferences + , conflicts + , contains + , difference + , filter + , filterByHQs + , filterBySHs + , filterTypes + , hqName + , hqTermName + , hqTypeName + , hqTermName' + , hqTypeName' + , _hqTermName + , _hqTypeName + , _hqTermAliases + , _hqTypeAliases + , names0ToNames + , prefix0 + , restrictReferences + , refTermsNamed + , terms + , types + , termReferences + , termReferents + , typeReferences + , termsNamed + , typesNamed + , unionLeft + , unionLeftName + , namesForReference + , namesForReferent + ) +where + +import Unison.Prelude + +import qualified Data.Set as Set +import Prelude hiding (filter) +import Unison.HashQualified' (HashQualified) +import qualified Unison.HashQualified' as HQ +import Unison.Name (Name) +import qualified Unison.Name as Name +import Unison.Reference (Reference) +import qualified Unison.Reference as Reference +import Unison.Referent (Referent) +import qualified Unison.Referent as Referent +import Unison.Util.Relation (Relation) +import qualified Unison.Util.Relation as R +import qualified Unison.ShortHash as SH +import Unison.ShortHash (ShortHash) + +-- This will support the APIs of both PrettyPrintEnv and the old Names. +-- For pretty-printing, we need to look up names for References; they may have +-- some hash-qualification, depending on the context. +-- For parsing (both .u files and command-line args) +data Names' n = Names + { terms :: Relation n Referent + , types :: Relation n Reference + } deriving (Eq,Ord) + +type Names = Names' HashQualified +type Names0 = Names' Name + +names0ToNames :: Names0 -> Names +names0ToNames names0 = Names terms' types' where + terms' = R.map doTerm (terms names0) + types' = R.map doType (types names0) + length = numHashChars names0 + doTerm (n, r) = + if Set.size (R.lookupDom n (terms names0)) > 1 + then (HQ.take length $ HQ.fromNamedReferent n r, r) + else (HQ.NameOnly n, r) + doType (n, r) = + if Set.size (R.lookupDom n (types names0)) > 1 + then (HQ.take length $ HQ.fromNamedReference n r, r) + else (HQ.NameOnly n, r) + +termReferences, typeReferences, allReferences :: Names' n -> Set Reference +termReferences Names{..} = Set.map Referent.toReference $ R.ran terms +typeReferences Names{..} = R.ran types +allReferences n = termReferences n <> typeReferences n + +termReferents :: Names' n -> Set Referent +termReferents Names{..} = R.ran terms + +restrictReferences :: Ord n => Set Reference -> Names' n -> Names' n +restrictReferences refs Names{..} = Names terms' types' where + terms' = R.filterRan ((`Set.member` refs) . Referent.toReference) terms + types' = R.filterRan (`Set.member` refs) types + +-- | Guide to unionLeft* +-- Is it ok to create new aliases for parsing? +-- Sure. +-- +-- Is it ok to create name conflicts for parsing? +-- It's okay but not great. The user will have to hash-qualify to disambiguate. +-- +-- Is it ok to create new aliases for pretty-printing? +-- Not helpful, we need to choose a name to show. +-- We'll just have to choose one at random if there are aliases. +-- Is it ok to create name conflicts for pretty-printing? +-- Still okay but not great. The pretty-printer will have to hash-qualify +-- to disambiguate. +-- +-- Thus, for parsing: +-- unionLeftName is good if the name `n` on the left is the only `n` the +-- user will want to reference. It allows the rhs to add aliases. +-- unionLeftRef allows new conflicts but no new aliases. Lame? +-- (<>) is ok for parsing if we expect to add some conflicted names, +-- e.g. from history +-- +-- For pretty-printing: +-- Probably don't want to add new aliases, unless we don't know which +-- `Names` is higher priority. So if we do have a preferred `Names`, +-- don't use `unionLeftName` or (<>). +-- You don't want to create new conflicts either if you have a preferred +-- `Names`. So in this case, don't use `unionLeftRef` either. +-- I guess that leaves `unionLeft`. +-- +-- Not sure if the above is helpful or correct! + +-- unionLeft two Names, including new aliases, but excluding new name conflicts. +-- e.g. unionLeftName [foo -> #a, bar -> #a, cat -> #c] +-- [foo -> #b, baz -> #c] +-- = [foo -> #a, bar -> #a, baz -> #c, cat -> #c)] +-- Btw, it's ok to create name conflicts for parsing environments, if you don't +-- mind disambiguating. +unionLeftName :: Ord n => Names' n -> Names' n -> Names' n +unionLeftName = unionLeft' $ const . R.memberDom + +-- unionLeft two Names, including new name conflicts, but excluding new aliases. +-- e.g. unionLeftRef [foo -> #a, bar -> #a, cat -> #c] +-- [foo -> #b, baz -> #c] +-- = [foo -> #a, bar -> #a, foo -> #b, cat -> #c] +_unionLeftRef :: Ord n => Names' n -> Names' n -> Names' n +_unionLeftRef = unionLeft' $ const R.memberRan + +-- unionLeft two Names, but don't create new aliases or new name conflicts. +-- e.g. unionLeft [foo -> #a, bar -> #a, cat -> #c] +-- [foo -> #b, baz -> #c] +-- = [foo -> #a, bar -> #a, cat -> #c] +unionLeft :: Ord n => Names' n -> Names' n -> Names' n +unionLeft = unionLeft' go + where go n r acc = R.memberDom n acc || R.memberRan r acc + +-- implementation detail of the above +unionLeft' + :: Ord n + => (forall a b . (Ord a, Ord b) => a -> b -> Relation a b -> Bool) + -> Names' n + -> Names' n + -> Names' n +unionLeft' p a b = Names terms' types' + where + terms' = foldl' go (terms a) (R.toList $ terms b) + types' = foldl' go (types a) (R.toList $ types b) + go :: (Ord a, Ord b) => Relation a b -> (a, b) -> Relation a b + go acc (n, r) = if p n r acc then acc else R.insert n r acc + +-- could move this to a read-only field in Names +-- todo: kill this function and pass thru an Int from the codebase, I suppose +numHashChars :: Names' n -> Int +numHashChars b = lenFor hashes + where lenFor _hashes = 3 + hashes = foldl' f (foldl' g mempty (R.ran $ types b)) (R.ran $ terms b) + g s r = Set.insert r s + f s r = Set.insert (Referent.toReference r) s + +termsNamed :: Ord n => Names' n -> n -> Set Referent +termsNamed = flip R.lookupDom . terms + +refTermsNamed :: Ord n => Names' n -> n -> Set Reference +refTermsNamed names n = + Set.fromList [ r | Referent.Ref r <- toList $ termsNamed names n ] + +typesNamed :: Ord n => Names' n -> n -> Set Reference +typesNamed = flip R.lookupDom . types + +namesForReferent :: Names' n -> Referent -> Set n +namesForReferent names r = R.lookupRan r (terms names) + +namesForReference :: Names' n -> Reference -> Set n +namesForReference names r = R.lookupRan r (types names) + +termAliases :: Ord n => Names' n -> n -> Referent -> Set n +termAliases names n r = Set.delete n $ namesForReferent names r + +typeAliases :: Ord n => Names' n -> n -> Reference -> Set n +typeAliases names n r = Set.delete n $ namesForReference names r + +addType :: Ord n => n -> Reference -> Names' n -> Names' n +addType n r = (<> fromTypes [(n, r)]) + +addTerm :: Ord n => n -> Referent -> Names' n -> Names' n +addTerm n r = (<> fromTerms [(n, r)]) + +-- | Like hqTermName and hqTypeName, but considers term and type names to +-- conflict with each other (so will hash-qualify if there is e.g. both a term +-- and a type named "foo"). +-- +-- This is useful in contexts such as printing branch diffs. Example: +-- +-- - Deletes: +-- +-- foo +-- foo +-- +-- We want to append the hash regardless of whether or not one is a term and the +-- other is a type. +hqName :: Ord n => Names' n -> n -> Either Reference Referent -> HQ.HashQualified' n +hqName b n = \case + Left r -> if ambiguous then _hqTypeName' b n r else HQ.fromName n + Right r -> if ambiguous then _hqTermName' b n r else HQ.fromName n + where + ambiguous = Set.size (termsNamed b n) + Set.size (typesNamed b n) > 1 + +-- Conditionally apply hash qualifier to term name. +-- Should be the same as the input name if the Names0 is unconflicted. +hqTermName :: Ord n => Int -> Names' n -> n -> Referent -> HQ.HashQualified' n +hqTermName hqLen b n r = if Set.size (termsNamed b n) > 1 + then hqTermName' hqLen n r + else HQ.fromName n + +hqTypeName :: Ord n => Int -> Names' n -> n -> Reference -> HQ.HashQualified' n +hqTypeName hqLen b n r = if Set.size (typesNamed b n) > 1 + then hqTypeName' hqLen n r + else HQ.fromName n + +_hqTermName :: Ord n => Names' n -> n -> Referent -> HQ.HashQualified' n +_hqTermName b n r = if Set.size (termsNamed b n) > 1 + then _hqTermName' b n r + else HQ.fromName n + +_hqTypeName :: Ord n => Names' n -> n -> Reference -> HQ.HashQualified' n +_hqTypeName b n r = if Set.size (typesNamed b n) > 1 + then _hqTypeName' b n r + else HQ.fromName n + +_hqTypeAliases :: + Ord n => Names' n -> n -> Reference -> Set (HQ.HashQualified' n) +_hqTypeAliases b n r = Set.map (flip (_hqTypeName b) r) (typeAliases b n r) + +_hqTermAliases :: Ord n => Names' n -> n -> Referent -> Set (HQ.HashQualified' n) +_hqTermAliases b n r = Set.map (flip (_hqTermName b) r) (termAliases b n r) + +-- Unconditionally apply hash qualifier long enough to distinguish all the +-- References in this Names0. +hqTermName' :: Int -> n -> Referent -> HQ.HashQualified' n +hqTermName' hqLen n r = + HQ.take hqLen $ HQ.fromNamedReferent n r + +hqTypeName' :: Int -> n -> Reference -> HQ.HashQualified' n +hqTypeName' hqLen n r = + HQ.take hqLen $ HQ.fromNamedReference n r + +_hqTermName' :: Names' n -> n -> Referent -> HQ.HashQualified' n +_hqTermName' b n r = + HQ.take (numHashChars b) $ HQ.fromNamedReferent n r + +_hqTypeName' :: Names' n -> n -> Reference -> HQ.HashQualified' n +_hqTypeName' b n r = + HQ.take (numHashChars b) $ HQ.fromNamedReference n r + +fromTerms :: Ord n => [(n, Referent)] -> Names' n +fromTerms ts = Names (R.fromList ts) mempty + +fromTypes :: Ord n => [(n, Reference)] -> Names' n +fromTypes ts = Names mempty (R.fromList ts) + +prefix0 :: Name -> Names0 -> Names0 +prefix0 n (Names terms types) = Names terms' types' where + terms' = R.mapDom (Name.joinDot n) terms + types' = R.mapDom (Name.joinDot n) types + +filter :: Ord n => (n -> Bool) -> Names' n -> Names' n +filter f (Names terms types) = Names (R.filterDom f terms) (R.filterDom f types) + +-- currently used for filtering before a conditional `add` +filterByHQs :: Set HashQualified -> Names0 -> Names0 +filterByHQs hqs Names{..} = Names terms' types' where + terms' = R.filter f terms + types' = R.filter g types + f (n, r) = any (HQ.matchesNamedReferent n r) hqs + g (n, r) = any (HQ.matchesNamedReference n r) hqs + +filterBySHs :: Set ShortHash -> Names0 -> Names0 +filterBySHs shs Names{..} = Names terms' types' where + terms' = R.filter f terms + types' = R.filter g types + f (_n, r) = any (`SH.isPrefixOf` Referent.toShortHash r) shs + g (_n, r) = any (`SH.isPrefixOf` Reference.toShortHash r) shs + +filterTypes :: Ord n => (n -> Bool) -> Names' n -> Names' n +filterTypes f (Names terms types) = Names terms (R.filterDom f types) + +difference :: Ord n => Names' n -> Names' n -> Names' n +difference a b = Names (R.difference (terms a) (terms b)) + (R.difference (types a) (types b)) + +contains :: Names' n -> Reference -> Bool +contains names r = + -- this check makes `contains` O(n) instead of O(log n) + (Set.member r . Set.map Referent.toReference . R.ran) (terms names) + || R.memberRan r (types names) + +-- | filters out everything from the domain except what's conflicted +conflicts :: Ord n => Names' n -> Names' n +conflicts Names{..} = Names (R.filterManyDom terms) (R.filterManyDom types) + +instance Ord n => Semigroup (Names' n) where (<>) = mappend + +instance Ord n => Monoid (Names' n) where + mempty = Names mempty mempty + Names e1 t1 `mappend` Names e2 t2 = + Names (e1 <> e2) (t1 <> t2) + +instance Show n => Show (Names' n) where + show (Names terms types) = "Terms:\n" ++ + foldMap (\(n, r) -> " " ++ show n ++ " -> " ++ show r ++ "\n") (R.toList terms) ++ "\n" ++ + "Types:\n" ++ + foldMap (\(n, r) -> " " ++ show n ++ " -> " ++ show r ++ "\n") (R.toList types) ++ "\n" + diff --git a/unison-core/src/Unison/Names3.hs b/unison-core/src/Unison/Names3.hs new file mode 100644 index 0000000000..fba95b72a7 --- /dev/null +++ b/unison-core/src/Unison/Names3.hs @@ -0,0 +1,240 @@ +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Names3 where + +import Unison.Prelude + +import Data.List.Extra (nubOrd) +import Unison.HashQualified (HashQualified) +import qualified Unison.HashQualified as HQ +import qualified Unison.HashQualified' as HQ' +import Unison.Name (Name) +import Unison.Reference as Reference +import Unison.Referent as Referent +import Unison.Util.Relation (Relation) +import qualified Data.Set as Set +import qualified Data.Map as Map +import qualified Unison.Name as Name +import qualified Unison.Names2 +import qualified Unison.Names2 as Names +import qualified Unison.Util.List as List +import qualified Unison.Util.Relation as R +import qualified Unison.ConstructorType as CT + +data Names = Names { currentNames :: Names0, oldNames :: Names0 } deriving Show + +type Names0 = Unison.Names2.Names0 +pattern Names0 terms types = Unison.Names2.Names terms types + +data ResolutionFailure v a + = TermResolutionFailure v a (Set Referent) + | TypeResolutionFailure v a (Set Reference) + deriving (Eq,Ord,Show) + +type ResolutionResult v a r = Either (Seq (ResolutionFailure v a)) r + +-- For all names in `ns`, (ex: foo.bar.baz), generate the list of suffixes +-- of that name [[foo.bar.baz], [bar.baz], [baz]]. Insert these suffixes +-- into a multimap map along with their corresponding refs. Any suffix +-- which is unique is added as an entry to `ns`. +suffixify0 :: Names0 -> Names0 +suffixify0 ns = ns <> suffixNs + where + suffixNs = names0 (R.fromList uniqueTerms) (R.fromList uniqueTypes) + terms = List.multimap [ (n,ref) | (n0,ref) <- R.toList (terms0 ns), n <- Name.suffixes n0 ] + types = List.multimap [ (n,ref) | (n0,ref) <- R.toList (types0 ns), n <- Name.suffixes n0 ] + uniqueTerms = [ (n,ref) | (n, nubOrd -> [ref]) <- Map.toList terms ] + uniqueTypes = [ (n,ref) | (n, nubOrd -> [ref]) <- Map.toList types ] + +suffixify :: Names -> Names +suffixify ns = Names (suffixify0 (currentNames ns)) (oldNames ns) + +filterTypes :: (Name -> Bool) -> Names0 -> Names0 +filterTypes = Unison.Names2.filterTypes + +-- Simple 2 way diff, has the property that: +-- addedNames (diff0 n1 n2) == removedNames (diff0 n2 n1) +-- +-- `addedNames` are names in `n2` but not `n1` +-- `removedNames` are names in `n1` but not `n2` +diff0 :: Names0 -> Names0 -> Diff +diff0 n1 n2 = Diff n1 added removed where + added = Names0 (terms0 n2 `R.difference` terms0 n1) + (types0 n2 `R.difference` types0 n1) + removed = Names0 (terms0 n1 `R.difference` terms0 n2) + (types0 n1 `R.difference` types0 n2) + +data Diff = + Diff { originalNames :: Names0 + , addedNames :: Names0 + , removedNames :: Names0 + } deriving Show + +isEmptyDiff :: Diff -> Bool +isEmptyDiff d = isEmpty0 (addedNames d) && isEmpty0 (removedNames d) + +isEmpty0 :: Names0 -> Bool +isEmpty0 n = R.null (terms0 n) && R.null (types0 n) + +-- Add `n1` to `currentNames`, shadowing anything with the same name and +-- moving shadowed definitions into `oldNames` so they can can still be +-- referenced hash qualified. +push :: Names0 -> Names -> Names +push n1 ns = Names (unionLeft0 n1 cur) (oldNames ns <> shadowed) where + cur = currentNames ns + shadowed = names0 terms' types' where + terms' = R.dom (terms0 n1) R.<| (terms0 cur `R.difference` terms0 n1) + types' = R.dom (types0 n1) R.<| (types0 cur `R.difference` types0 n1) + unionLeft0 :: Names0 -> Names0 -> Names0 + unionLeft0 n1 n2 = names0 terms' types' where + terms' = terms0 n1 <> R.subtractDom (R.dom $ terms0 n1) (terms0 n2) + types' = types0 n1 <> R.subtractDom (R.dom $ types0 n1) (types0 n2) + +unionLeft0 :: Names0 -> Names0 -> Names0 +unionLeft0 = Unison.Names2.unionLeft + +unionLeftName0 :: Names0 -> Names0 -> Names0 +unionLeftName0 = Unison.Names2.unionLeftName + +map0 :: (Name -> Name) -> Names0 -> Names0 +map0 f (Names.Names terms types) = Names.Names terms' types' where + terms' = R.mapDom f terms + types' = R.mapDom f types + +names0 :: Relation Name Referent -> Relation Name Reference -> Names0 +names0 = Unison.Names2.Names + +types0 :: Names0 -> Relation Name Reference +types0 = Names.types + +terms0 :: Names0 -> Relation Name Referent +terms0 = Names.terms + +-- if I push an existing name, the pushed reference should be the thing +-- if I push a different name for the same thing, i suppose they should coexist +-- thus, `unionLeftName0`. +shadowing :: Names0 -> Names -> Names +shadowing prio (Names current old) = + Names (prio `unionLeftName0` current) (current <> old) + +makeAbsolute0:: Names0 -> Names0 +makeAbsolute0 = map0 Name.makeAbsolute + +-- do a prefix match on currentNames and, if no match, then check oldNames. +lookupHQType :: HashQualified -> Names -> Set Reference +lookupHQType hq Names{..} = case hq of + HQ.NameOnly n -> R.lookupDom n (Names.types currentNames) + HQ.HashQualified n sh -> case matches sh currentNames of + s | (not . null) s -> s + | otherwise -> matches sh oldNames + where + matches sh ns = Set.filter (Reference.isPrefixOf sh) (R.lookupDom n $ Names.types ns) + HQ.HashOnly sh -> case matches sh currentNames of + s | (not . null) s -> s + | otherwise -> matches sh oldNames + where + matches sh ns = Set.filter (Reference.isPrefixOf sh) (R.ran $ Names.types ns) + +hasTermNamed :: Name -> Names -> Bool +hasTermNamed n ns = not (Set.null $ lookupHQTerm (HQ.NameOnly n) ns) + +hasTypeNamed :: Name -> Names -> Bool +hasTypeNamed n ns = not (Set.null $ lookupHQType (HQ.NameOnly n) ns) + +lookupHQTerm :: HashQualified -> Names -> Set Referent +lookupHQTerm hq Names{..} = case hq of + HQ.NameOnly n -> R.lookupDom n (Names.terms currentNames) + HQ.HashQualified n sh -> case matches sh currentNames of + s | (not . null) s -> s + | otherwise -> matches sh oldNames + where + matches sh ns = Set.filter (Referent.isPrefixOf sh) (R.lookupDom n $ Names.terms ns) + HQ.HashOnly sh -> case matches sh currentNames of + s | (not . null) s -> s + | otherwise -> matches sh oldNames + where + matches sh ns = Set.filter (Referent.isPrefixOf sh) (R.ran $ Names.terms ns) + +-- If `r` is in "current" names, look up each of its names, and hash-qualify +-- them if they are conflicted names. If `r` isn't in "current" names, look up +-- each of its "old" names and hash-qualify them. +typeName :: Int -> Reference -> Names -> Set HQ'.HashQualified +typeName length r Names{..} = + if R.memberRan r . Names.types $ currentNames + then Set.map (\n -> if isConflicted n then hq n else HQ'.fromName n) + (R.lookupRan r . Names.types $ currentNames) + else Set.map hq (R.lookupRan r . Names.types $ oldNames) + where hq n = HQ'.take length (HQ'.fromNamedReference n r) + isConflicted n = R.manyDom n (Names.types currentNames) + +termName :: Int -> Referent -> Names -> Set HQ'.HashQualified +termName length r Names{..} = + if R.memberRan r . Names.terms $ currentNames + then Set.map (\n -> if isConflicted n then hq n else HQ'.fromName n) + (R.lookupRan r . Names.terms $ currentNames) + else Set.map hq (R.lookupRan r . Names.terms $ oldNames) + where hq n = HQ'.take length (HQ'.fromNamedReferent n r) + isConflicted n = R.manyDom n (Names.terms currentNames) + +-- Set HashQualified -> Branch m -> Action' m v Names +-- Set HashQualified -> Branch m -> Free (Command m i v) Names +-- Set HashQualified -> Branch m -> Command m i v Names +-- populate historical names +lookupHQPattern :: HQ.HashQualified -> Names -> Set (Reference, Int) +lookupHQPattern hq names = Set.fromList + [ (r, cid) | Referent.Con r cid _ <- toList $ lookupHQTerm hq names ] + +-- Finds all the constructors for the given type in the `Names0` +constructorsForType0 :: Reference -> Names0 -> [(Name,Referent)] +constructorsForType0 r ns = let + -- rather than searching all of names, we use the known possible forms + -- that the constructors can take + possibleDatas = [ Referent.Con r cid CT.Data | cid <- [0..] ] + possibleEffects = [ Referent.Con r cid CT.Effect | cid <- [0..] ] + trim [] = [] + trim (h:t) = case R.lookupRan h (terms0 ns) of + s | Set.null s -> [] + | otherwise -> [ (n,h) | n <- toList s ] ++ trim t + in trim possibleEffects ++ trim possibleDatas + +-- Given a mapping from name to qualified name, update a `Names`, +-- so for instance if the input has [(Some, Optional.Some)], +-- and `Optional.Some` is a constructor in the input `Names`, +-- the alias `Some` will map to that same constructor and shadow +-- anything else that is currently called `Some`. +-- +-- Only affects `currentNames`. +importing :: [(Name, Name)] -> Names -> Names +importing shortToLongName ns = + ns { currentNames = importing0 shortToLongName (currentNames ns) } + +importing0 :: [(Name, Name)] -> Names0 -> Names0 +importing0 shortToLongName ns = + Names.Names + (foldl' go (terms0 ns) shortToLongName) + (foldl' go (types0 ns) shortToLongName) + where + go :: (Show a, Ord a, Ord b) => Relation a b -> (a, a) -> Relation a b + go m (shortname, qname) = case R.lookupDom qname m of + s | Set.null s -> m + | otherwise -> R.insertManyRan shortname s (R.deleteDom shortname m) + +-- Converts a wildcard import into a list of explicit imports, of the form +-- [(suffix, full)]. Example: if `io` contains two functions, `foo` and +-- `bar`, then `expandWildcardImport io` will produce +-- `[(foo, io.foo), (bar, io.bar)]`. +expandWildcardImport :: Name -> Names0 -> [(Name,Name)] +expandWildcardImport prefix ns = + [ (suffix, full) | Just (suffix,full) <- go <$> R.toList (terms0 ns) ] <> + [ (suffix, full) | Just (suffix,full) <- go <$> R.toList (types0 ns) ] + where + go (full, _) = case Name.stripNamePrefix prefix full of + Nothing -> Nothing + Just suffix -> Just (suffix, full) + +deleteTerms0 :: [Name] -> Names0 -> Names0 +deleteTerms0 ns n0 = names0 terms' (types0 n0) + where + terms' = R.subtractDom (Set.fromList ns) (terms0 n0) diff --git a/unison-core/src/Unison/Paths.hs b/unison-core/src/Unison/Paths.hs new file mode 100644 index 0000000000..13ca4645e4 --- /dev/null +++ b/unison-core/src/Unison/Paths.hs @@ -0,0 +1,204 @@ +{-# Language DeriveGeneric #-} + +module Unison.Paths where + +import Unison.Prelude + +import Data.List +import Unison.ABT (V) +import Unison.Var (Var) +import qualified Data.Sequence as Sequence +import qualified Unison.ABT as ABT +import qualified Unison.Term as E +import qualified Unison.Type as T + +type Type v = T.Type v () +type Term v = E.Term v () + +data Target v + = Term (Term v) + | Type (Type v) + | Var v + | Declaration v (Term v) deriving Generic + -- Metadata + +vmap :: Ord v2 => (v -> v2) -> Target v -> Target v2 +vmap f (Var v) = Var (f v) +vmap f (Declaration v b) = Declaration (f v) (E.vmap f b) +vmap f (Term t) = Term (E.vmap f t) +vmap f (Type t) = Type (ABT.vmap f t) + +data PathElement + = Fn -- ^ Points at function in a function/type application + | Arg -- ^ Points at the argument of a function/type application + | Body -- ^ Points at the body of a lambda, let, binding, forall, or annotation + | Bound -- ^ Points at the symbol bound by a `let`, `lambda` or `forall` binder + | Binding !Int -- ^ Points at a particular binding in a let + | Index !Int -- ^ Points at the index of a vector + | Annotation -- ^ Points into the annotation + | Input -- ^ Points at the left of an `Arrow` + | Output -- ^ Points at the right of an `Arrow` + deriving (Eq,Ord,Show,Generic) + +focus1 + :: Var v + => PathElement + -> ABT.Path (Target v) (Target (V v)) (Target v) (Target (V v)) [v] +focus1 e = ABT.Path go' + where + go' t = go e t + w = E.vmap ABT.Bound + wt = ABT.vmap ABT.Bound + go Fn (Term (E.App' fn arg)) = Just + (Term fn, \fn -> Term <$> (E.app () <$> asTerm fn <*> pure (w arg)), []) + go Fn (Type (T.App' fn arg)) = + Just + (Type fn, \fn -> Type <$> (T.app () <$> asType fn <*> pure (wt arg)), []) + go Arg (Term (E.App' fn arg)) = + Just (Term arg, \arg -> Term <$> (E.app () (w fn) <$> asTerm arg), []) + go Arg (Type (T.App' fn arg)) = + Just (Type arg, \arg -> Type <$> (T.app () (wt fn) <$> asType arg), []) + go Body (Term (E.LamNamed' v body)) = Just + (Term body, \t -> Term . set <$> asTerm t, [v]) + where set body = ABT.tm (E.Lam (ABT.absr v body)) + go Body (Term (E.Let1NamedTop' top v b body)) = Just + (Term body, \t -> Term . set <$> asTerm t, [v]) + where set body = ABT.tm (E.Let top (w b) (ABT.absr v body)) + go p (Term (ABT.Cycle' vs (ABT.Tm' (E.LetRec top bs body)))) = case p of + Body -> Just (Term body, \body -> Term . set <$> asTerm body, vs) + where set body = ABT.cycler vs (ABT.tm (E.LetRec top (map w bs) body)) + Binding i | i >= 0 && i < length bs -> Just + ( Declaration (vs !! i) (bs !! i) + , \b -> Term . set <$> asDeclaration b + , vs + ) + where + replace f i a vs = map f (take i vs) ++ [a] ++ map f (drop (i + 1) vs) + set (v, b) = + let tm0 = ABT.tm (E.LetRec top (replace w i b bs) (w body)) + v0 = ABT.Bound (vs !! i) + tm = if v /= v0 then ABT.rename v0 v tm0 else tm + in ABT.cycler (replace id i (ABT.unvar v) vs) tm + _ -> Nothing + go Body (Type (T.ForallNamed' v body)) = Just + (Type body, \t -> Type . set <$> asType t, [v]) + where set body = ABT.tm (T.Forall (ABT.absr v body)) + go Body (Declaration v body) = + Just (Term body, \body -> Declaration (ABT.Bound v) <$> asTerm body, []) + go Bound (Declaration v body) = + Just (Var v, \v -> Declaration <$> asVar v <*> pure (w body), []) + go Bound (Term (E.LamNamed' v body)) = + Just (Var v, \v -> Term <$> (E.lam () <$> asVar v <*> pure (w body)), []) + go Bound (Term (E.Let1NamedTop' top v b body)) = Just + ( Var v + , \v -> (\v -> Term $ E.let1 top [(((), v), w b)] (w body)) <$> asVar v + , [] + ) + go Bound (Type (T.ForallNamed' v body)) = Just + (Var v, \v -> Type <$> (T.forall () <$> asVar v <*> pure (wt body)), []) + go (Index i) (Term (E.Sequence' vs)) | i < Sequence.length vs && i >= 0 = Just + ( Term (vs `Sequence.index` i) + , \e -> (\e -> Term $ E.seq' () $ Sequence.update i e (fmap w vs)) <$> asTerm e + , [] + ) + go (Binding i) (Term (E.Let1NamedTop' top v b body)) | i <= 0 = Just + (Declaration v b, set, []) + where + set (Declaration v b) = pure . Term $ E.let1 top [(((), v), b)] (w body) + set _ = Nothing + go Annotation (Term (E.Ann' e t)) = + Just (Type t, \t -> Term . E.ann () (w e) <$> asType t, []) + go Body (Term (E.Ann' body t)) = Just + (Term body, \body -> Term . flip (E.ann ()) (wt t) <$> asTerm body, []) + go Input (Type (T.Arrow' i o)) = Just + (Type i, \i -> Type <$> (T.arrow () <$> asType i <*> pure (wt o)), []) + go Output (Type (T.Arrow' i o)) = + Just (Type o, \o -> Type . T.arrow () (wt i) <$> asType o, []) + go _ _ = Nothing + +type Path = [PathElement] + +focus :: Var v => Path -> Target v -> Maybe (Target v, Target (V v) -> Maybe (Target v), [v]) +focus p t = tweak <$> ABT.focus (foldr ABT.compose ABT.here (map focus1 p)) t where + tweak (get, set, vs) = (get, \t -> vmap ABT.unvar <$> set t, vs) + +at :: Var v => Path -> Target v -> Maybe (Target v) +at path t = (\(a,_,_) -> a) <$> focus path t + +atTerm :: Var v => Path -> Term v -> Maybe (Term v) +atTerm path t = asTerm =<< at path (Term t) + +atType :: Var v => Path -> Type v -> Maybe (Type v) +atType path t = asType =<< at path (Type t) + +modify :: Var v => (Target v -> Target (V v)) -> Path -> Target v -> Maybe (Target v) +modify f path t = focus path t >>= \(at,set,_) -> set (f at) + +modifyTerm :: Var v => (Term v -> Term (V v)) -> Path -> Term v -> Maybe (Term v) +modifyTerm f p t = do + (at,set,_) <- focus p (Term t) + t <- asTerm at + asTerm =<< set (Term $ f t) + +modifyTerm' :: Var v => (Term v -> Term (V v)) -> Path -> Term v -> Term v +modifyTerm' f p t = fromMaybe t $ modifyTerm f p t + +modifyType :: Var v => (Type v -> Type (V v)) -> Path -> Type v -> Maybe (Type v) +modifyType f p t = do + (at,set,_) <- focus p (Type t) + t <- asType at + asType =<< set (Type $ f t) + +inScopeAt :: Var v => Path -> Target v -> [v] +inScopeAt p t = maybe [] (\(_,_,vs) -> vs) (focus p t) + +inScopeAtTerm :: Var v => Path -> Term v -> [v] +inScopeAtTerm p t = inScopeAt p (Term t) + +inScopeAtType :: Var v => Path -> Type v -> [v] +inScopeAtType p t = inScopeAt p (Type t) + +insertTerm :: Var v => Path -> Term v -> Maybe (Term v) +insertTerm at _ | null at = Nothing +insertTerm at ctx = do + let at' = init at + (parent,set,_) <- focus at' (Term ctx) + case parent of + Term (E.Sequence' vs) -> do + i <- listToMaybe [i | Index i <- [last at]] + let v2 = E.seq'() ((E.vmap ABT.Bound <$> Sequence.take (i+1) vs) `mappend` + pure (E.blank ()) `mappend` + (E.vmap ABT.Bound <$> Sequence.drop (i+1) vs)) + asTerm =<< set (Term v2) + _ -> Nothing -- todo - allow other types of insertions, like \x -> y to \x x2 -> y + +-- | Return the list of all prefixes of the input path +pathPrefixes :: Path -> [Path] +pathPrefixes = inits + +-- | Add an element onto the end of this 'Path' +pathExtend :: PathElement -> Path -> Path +pathExtend e p = p ++ [e] + +parent :: Path -> Maybe Path +parent [] = Nothing +parent p = Just (init p) + +parent' :: Path -> Path +parent' = fromMaybe [] . parent + +asTerm :: Target v -> Maybe (Term v) +asTerm (Term t) = Just t +asTerm _ = Nothing + +asType :: Target v -> Maybe (Type v) +asType (Type t) = Just t +asType _ = Nothing + +asVar :: Target v -> Maybe v +asVar (Var v) = Just v +asVar _ = Nothing + +asDeclaration :: Target v -> Maybe (v, Term v) +asDeclaration (Declaration v b) = Just (v,b) +asDeclaration _ = Nothing diff --git a/unison-core/src/Unison/Pattern.hs b/unison-core/src/Unison/Pattern.hs new file mode 100644 index 0000000000..687710430d --- /dev/null +++ b/unison-core/src/Unison/Pattern.hs @@ -0,0 +1,165 @@ +{-# Language DeriveTraversable, DeriveGeneric, PatternSynonyms, OverloadedStrings #-} + +module Unison.Pattern where + +import Unison.Prelude + +import Data.List (intercalate) +import Data.Foldable as Foldable hiding (foldMap') +import Unison.Reference (Reference) +import qualified Unison.Hashable as H +import qualified Unison.Type as Type +import qualified Data.Set as Set +import qualified Unison.LabeledDependency as LD +import Unison.LabeledDependency (LabeledDependency) + +type ConstructorId = Int + +data Pattern loc + = Unbound loc + | Var loc + | Boolean loc !Bool + | Int loc !Int64 + | Nat loc !Word64 + | Float loc !Double + | Text loc !Text + | Char loc !Char + | Constructor loc !Reference !Int [Pattern loc] + | As loc (Pattern loc) + | EffectPure loc (Pattern loc) + | EffectBind loc !Reference !Int [Pattern loc] (Pattern loc) + | SequenceLiteral loc [Pattern loc] + | SequenceOp loc (Pattern loc) !SeqOp (Pattern loc) + deriving (Ord,Generic,Functor,Foldable,Traversable) + +data SeqOp = Cons + | Snoc + | Concat + deriving (Eq, Show, Ord) + +instance H.Hashable SeqOp where + tokens Cons = [H.Tag 0] + tokens Snoc = [H.Tag 1] + tokens Concat = [H.Tag 2] + +instance Show (Pattern loc) where + show (Unbound _ ) = "Unbound" + show (Var _ ) = "Var" + show (Boolean _ x) = "Boolean " <> show x + show (Int _ x) = "Int " <> show x + show (Nat _ x) = "Nat " <> show x + show (Float _ x) = "Float " <> show x + show (Text _ t) = "Text " <> show t + show (Char _ c) = "Char " <> show c + show (Constructor _ r i ps) = + "Constructor " <> unwords [show r, show i, show ps] + show (As _ p) = "As " <> show p + show (EffectPure _ k) = "EffectPure " <> show k + show (EffectBind _ r i ps k) = + "EffectBind " <> unwords [show r, show i, show ps, show k] + show (SequenceLiteral _ ps) = "Sequence " <> intercalate ", " (fmap show ps) + show (SequenceOp _ ph op pt) = "Sequence " <> show ph <> " " <> show op <> " " <> show pt + +application :: Pattern loc -> Bool +application (Constructor _ _ _ (_ : _)) = True +application _ = False + +loc :: Pattern loc -> loc +loc p = head $ Foldable.toList p + +setLoc :: Pattern loc -> loc -> Pattern loc +setLoc p loc = case p of + EffectBind _ a b c d -> EffectBind loc a b c d + EffectPure _ a -> EffectPure loc a + As _ a -> As loc a + Constructor _ a b c -> Constructor loc a b c + SequenceLiteral _ ps -> SequenceLiteral loc ps + SequenceOp _ ph op pt -> SequenceOp loc ph op pt + x -> fmap (const loc) x + +instance H.Hashable (Pattern p) where + tokens (Unbound _) = [H.Tag 0] + tokens (Var _) = [H.Tag 1] + tokens (Boolean _ b) = H.Tag 2 : [H.Tag $ if b then 1 else 0] + tokens (Int _ n) = H.Tag 3 : [H.Int n] + tokens (Nat _ n) = H.Tag 4 : [H.Nat n] + tokens (Float _ f) = H.Tag 5 : H.tokens f + tokens (Constructor _ r n args) = + [H.Tag 6, H.accumulateToken r, H.Nat $ fromIntegral n, H.accumulateToken args] + tokens (EffectPure _ p) = H.Tag 7 : H.tokens p + tokens (EffectBind _ r n args k) = + [H.Tag 8, H.accumulateToken r, H.Nat $ fromIntegral n, H.accumulateToken args, H.accumulateToken k] + tokens (As _ p) = H.Tag 9 : H.tokens p + tokens (Text _ t) = H.Tag 10 : H.tokens t + tokens (SequenceLiteral _ ps) = H.Tag 11 : concatMap H.tokens ps + tokens (SequenceOp _ l op r) = H.Tag 12 : H.tokens op ++ H.tokens l ++ H.tokens r + tokens (Char _ c) = H.Tag 13 : H.tokens c + +instance Eq (Pattern loc) where + Unbound _ == Unbound _ = True + Var _ == Var _ = True + Boolean _ b == Boolean _ b2 = b == b2 + Int _ n == Int _ m = n == m + Nat _ n == Nat _ m = n == m + Float _ f == Float _ g = f == g + Constructor _ r n args == Constructor _ s m brgs = r == s && n == m && args == brgs + EffectPure _ p == EffectPure _ q = p == q + EffectBind _ r ctor ps k == EffectBind _ r2 ctor2 ps2 k2 = r == r2 && ctor == ctor2 && ps == ps2 && k == k2 + As _ p == As _ q = p == q + Text _ t == Text _ t2 = t == t2 + SequenceLiteral _ ps == SequenceLiteral _ ps2 = ps == ps2 + SequenceOp _ ph op pt == SequenceOp _ ph2 op2 pt2 = ph == ph2 && op == op2 && pt == pt2 + _ == _ = False + +foldMap' :: Monoid m => (Pattern loc -> m) -> Pattern loc -> m +foldMap' f p = case p of + Unbound _ -> f p + Var _ -> f p + Boolean _ _ -> f p + Int _ _ -> f p + Nat _ _ -> f p + Float _ _ -> f p + Text _ _ -> f p + Char _ _ -> f p + Constructor _ _ _ ps -> f p <> foldMap (foldMap' f) ps + As _ p' -> f p <> foldMap' f p' + EffectPure _ p' -> f p <> foldMap' f p' + EffectBind _ _ _ ps p' -> f p <> foldMap (foldMap' f) ps <> foldMap' f p' + SequenceLiteral _ ps -> f p <> foldMap (foldMap' f) ps + SequenceOp _ p1 _ p2 -> f p <> foldMap' f p1 <> foldMap' f p2 + +generalizedDependencies + :: Ord r + => (Reference -> r) + -> (Reference -> ConstructorId -> r) + -> (Reference -> r) + -> (Reference -> ConstructorId -> r) + -> (Reference -> r) + -> Pattern loc + -> Set r +generalizedDependencies literalType dataConstructor dataType effectConstructor effectType + = Set.fromList . foldMap' + (\case + Unbound _ -> mempty + Var _ -> mempty + As _ _ -> mempty + Constructor _ r cid _ -> [dataType r, dataConstructor r cid] + EffectPure _ _ -> [effectType Type.effectRef] + EffectBind _ r cid _ _ -> + [effectType Type.effectRef, effectType r, effectConstructor r cid] + SequenceLiteral _ _ -> [literalType Type.vectorRef] + SequenceOp {} -> [literalType Type.vectorRef] + Boolean _ _ -> [literalType Type.booleanRef] + Int _ _ -> [literalType Type.intRef] + Nat _ _ -> [literalType Type.natRef] + Float _ _ -> [literalType Type.floatRef] + Text _ _ -> [literalType Type.textRef] + Char _ _ -> [literalType Type.charRef] + ) + +labeledDependencies :: Pattern loc -> Set LabeledDependency +labeledDependencies = generalizedDependencies LD.typeRef + LD.dataConstructor + LD.typeRef + LD.effectConstructor + LD.typeRef diff --git a/unison-core/src/Unison/PatternCompat.hs b/unison-core/src/Unison/PatternCompat.hs new file mode 100644 index 0000000000..31ee1c532d --- /dev/null +++ b/unison-core/src/Unison/PatternCompat.hs @@ -0,0 +1,30 @@ +{-# Language PatternSynonyms #-} + +module Unison.PatternCompat where + +import qualified Unison.Pattern as P + +type Pattern = P.Pattern () + +{-# COMPLETE Unbound, Var, Boolean, Int, Nat, Float, Text, Char, Constructor, As, EffectPure, EffectBind, SequenceLiteral, SequenceOp #-} + +pattern Unbound = P.Unbound () +pattern Var = P.Var () +pattern Boolean b = P.Boolean () b +pattern Int n = P.Int () n +pattern Nat n = P.Nat () n +pattern Float n = P.Float () n +pattern Text t = P.Text () t +pattern Char c = P.Char () c +pattern Constructor r cid ps = P.Constructor () r cid ps +pattern As p = P.As () p +pattern EffectPure p = P.EffectPure () p +pattern EffectBind r cid ps k = P.EffectBind () r cid ps k +pattern SequenceLiteral ps = P.SequenceLiteral () ps +pattern SequenceOp ph op pt = P.SequenceOp () ph op pt + +{-# COMPLETE Snoc, Cons, Concat #-} +type SeqOp = P.SeqOp +pattern Snoc = P.Snoc +pattern Cons = P.Cons +pattern Concat = P.Concat diff --git a/unison-core/src/Unison/Prelude.hs b/unison-core/src/Unison/Prelude.hs new file mode 100644 index 0000000000..2bea8d7106 --- /dev/null +++ b/unison-core/src/Unison/Prelude.hs @@ -0,0 +1,62 @@ +module Unison.Prelude + ( module X, readUtf8, safeReadUtf8, safeReadUtf8StdIn, writeUtf8, reportBug + ) where + +import Control.Applicative as X +import Control.Exception as X (Exception, SomeException, IOException, try) +import Control.Monad as X +import Control.Monad.Extra as X (ifM, mapMaybeM, unlessM, whenM) +import Control.Monad.IO.Class as X (MonadIO(liftIO)) +import Control.Monad.Trans as X (MonadTrans(lift)) +import Control.Monad.Trans.Maybe as X (MaybeT(MaybeT, runMaybeT)) +import Data.ByteString as X (ByteString) +import Data.Either as X +import Data.Either.Combinators as X (mapLeft, maybeToRight) +import Data.Foldable as X (asum, fold, foldl', for_, toList, traverse_) +import Data.Functor as X +import Data.Int as X +import Data.List as X (foldl1', sortOn) +import Data.Map as X (Map) +import Data.Maybe as X (catMaybes, fromMaybe, isJust, isNothing, listToMaybe, mapMaybe, maybeToList) +import Data.Sequence as X (Seq) +import Data.Set as X (Set) +import Data.String as X (IsString, fromString) +import Data.Text as X (Text) +import Data.Text.Encoding as X (encodeUtf8, decodeUtf8) +import Data.Traversable as X (for) +import Data.Word as X +import Debug.Trace as X +import GHC.Generics as X (Generic, Generic1) +import Safe as X (atMay, headMay, lastMay, readMay) +import Text.Read as X (readMaybe) + +import qualified Data.ByteString as BS + +-- Read an entire file strictly assuming UTF8 +readUtf8 :: FilePath -> IO Text +readUtf8 p = decodeUtf8 <$> BS.readFile p + +safeReadUtf8 :: FilePath -> IO (Either IOException Text) +safeReadUtf8 p = try (readUtf8 p) + +safeReadUtf8StdIn :: IO (Either IOException Text) +safeReadUtf8StdIn = try $ decodeUtf8 <$> BS.getContents + +writeUtf8 :: FilePath -> Text -> IO () +writeUtf8 p txt = BS.writeFile p (encodeUtf8 txt) + +reportBug :: String -> String -> String +reportBug bugId msg = unlines [ + "🐞", + "", + msg, + "", + "This is a Unison bug and you can report it here:", "", + "https://github.com/unisonweb/unison/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+" <> bugId <> "+", + "", + "Bug reference: " <> bugId, "", + "If there's already an issue with this reference, you can give a 👍", + "on the issue to let the team know you encountered it, and you can add", + "any additional details you know of to the issue." + ] + diff --git a/unison-core/src/Unison/Reference.hs b/unison-core/src/Unison/Reference.hs new file mode 100644 index 0000000000..f007e2b764 --- /dev/null +++ b/unison-core/src/Unison/Reference.hs @@ -0,0 +1,179 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Reference + (Reference, + pattern Builtin, + pattern Derived, + pattern DerivedId, + Id(..), + derivedBase32Hex, + Component, members, + components, + groupByComponent, + componentFor, + unsafeFromText, + idFromText, + isPrefixOf, + fromShortHash, + fromText, + readSuffix, + showShort, + showSuffix, + toId, + toText, + unsafeId, + toShortHash) where + +import Unison.Prelude + +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Data.Text as Text +import qualified Unison.Hash as H +import Unison.Hashable as Hashable +import Unison.ShortHash (ShortHash) +import qualified Unison.ShortHash as SH +import Data.Char (isDigit) + +data Reference + = Builtin Text.Text + -- `Derived` can be part of a strongly connected component. + -- The `Pos` refers to a particular element of the component + -- and the `Size` is the number of elements in the component. + -- Using an ugly name so no one tempted to use this + | DerivedId Id deriving (Eq,Ord,Generic) + +pattern Derived h i n = DerivedId (Id h i n) + +-- A good idea, but causes a weird problem with view patterns in PatternP.hs in ghc 8.4.3 +--{-# COMPLETE Builtin, Derived #-} + +data Id = Id H.Hash Pos Size deriving (Eq,Ord,Generic) + +unsafeId :: Reference -> Id +unsafeId (Builtin b) = + error $ "Tried to get the hash of builtin " <> Text.unpack b <> "." +unsafeId (DerivedId x) = x + +-- todo: move these to ShortHash module? +-- but Show Reference currently depends on SH +toShortHash :: Reference -> ShortHash +toShortHash (Builtin b) = SH.Builtin b +toShortHash (Derived h _ 1) = SH.ShortHash (H.base32Hex h) Nothing Nothing +toShortHash (Derived h i n) = SH.ShortHash (H.base32Hex h) index Nothing + where + -- todo: remove `n` parameter; must also update readSuffix + index = Just $ showSuffix i n +toShortHash (DerivedId _) = error "this should be covered above" + +-- toShortHash . fromJust . fromShortHash == id and +-- fromJust . fromShortHash . toShortHash == id +-- but for arbitrary ShortHashes which may be broken at the wrong boundary, it +-- may not be possible to base32Hex decode them. These will return Nothing. +-- Also, ShortHashes that include constructor ids will return Nothing; +-- try Referent.fromShortHash +fromShortHash :: ShortHash -> Maybe Reference +fromShortHash (SH.Builtin b) = Just (Builtin b) +fromShortHash (SH.ShortHash prefix cycle Nothing) = do + h <- H.fromBase32Hex prefix + case cycle of + Nothing -> Just (Derived h 0 1) + Just t -> case Text.splitOn "c" t of + [i,n] -> Derived h <$> readMay (Text.unpack i) <*> readMay (Text.unpack n) + _ -> Nothing +fromShortHash (SH.ShortHash _prefix _cycle (Just _cid)) = Nothing + +-- (3,10) encoded as "3c10" +-- (0,93) encoded as "0c93" +showSuffix :: Pos -> Size -> Text +showSuffix i n = Text.pack $ show i <> "c" <> show n + +-- todo: don't read or return size; must also update showSuffix and fromText +readSuffix :: Text -> Either String (Pos, Size) +readSuffix t = case Text.breakOn "c" t of + (pos, Text.drop 1 -> size) | Text.all isDigit pos && Text.all isDigit size -> + Right (read (Text.unpack pos), read (Text.unpack size)) + _ -> Left "suffix decoding error" + +isPrefixOf :: ShortHash -> Reference -> Bool +isPrefixOf sh r = SH.isPrefixOf sh (toShortHash r) + +toText :: Reference -> Text +toText = SH.toText . toShortHash + +showShort :: Int -> Reference -> Text +showShort numHashChars = SH.toText . SH.take numHashChars . toShortHash + +type Pos = Word64 +type Size = Word64 + +newtype Component = Component { members :: Set Reference } + +-- Gives the component (dependency cycle) that the reference is a part of +componentFor :: Reference -> Component +componentFor b@(Builtin _ ) = Component (Set.singleton b) +componentFor ( DerivedId (Id h _ n)) = Component + (Set.fromList + [ DerivedId (Id h i n) | i <- take (fromIntegral n) [0 ..] ] + ) + +derivedBase32Hex :: Text -> Pos -> Size -> Reference +derivedBase32Hex b32Hex i n = DerivedId (Id (fromMaybe msg h) i n) + where + msg = error $ "Reference.derivedBase32Hex " <> show h + h = H.fromBase32Hex b32Hex + +unsafeFromText :: Text -> Reference +unsafeFromText = either error id . fromText + +idFromText :: Text -> Maybe Id +idFromText s = case fromText s of + Left _ -> Nothing + Right (Builtin _) -> Nothing + Right (DerivedId id) -> pure id + +toId :: Reference -> Maybe Id +toId (DerivedId id) = Just id +toId Builtin{} = Nothing + +-- examples: +-- `##Text.take` — builtins don’t have cycles +-- `#2tWjVAuc7` — derived, no cycle +-- `#y9ycWkiC1.y9` — derived, part of cycle +-- todo: take a (Reference -> CycleSize) so that `readSuffix` doesn't have to parse the size from the text. +fromText :: Text -> Either String Reference +fromText t = case Text.split (=='#') t of + [_, "", b] -> Right (Builtin b) + [_, h] -> case Text.split (=='.') h of + [hash] -> Right (derivedBase32Hex hash 0 1) + [hash, suffix] -> uncurry (derivedBase32Hex hash) <$> readSuffix suffix + _ -> bail + _ -> bail + where bail = Left $ "couldn't parse a Reference from " <> Text.unpack t + +component :: H.Hash -> [k] -> [(k, Id)] +component h ks = let + size = fromIntegral (length ks) + in [ (k, (Id h i size)) | (k, i) <- ks `zip` [0..]] + +components :: [(H.Hash, [k])] -> [(k, Id)] +components sccs = uncurry component =<< sccs + +groupByComponent :: [(k, Reference)] -> [[(k, Reference)]] +groupByComponent refs = done $ foldl' insert Map.empty refs + where + insert m (k, r@(Derived h _ _)) = + Map.unionWith (<>) m (Map.fromList [(Right h, [(k,r)])]) + insert m (k, r) = + Map.unionWith (<>) m (Map.fromList [(Left r, [(k,r)])]) + done m = sortOn snd <$> toList m + +instance Show Id where show = SH.toString . SH.take 5 . toShortHash . DerivedId +instance Show Reference where show = SH.toString . SH.take 5 . toShortHash + +instance Hashable.Hashable Reference where + tokens (Builtin txt) = [Hashable.Tag 0, Hashable.Text txt] + tokens (DerivedId (Id h i n)) = [Hashable.Tag 1, Hashable.Bytes (H.toBytes h), Hashable.Nat i, Hashable.Nat n] diff --git a/unison-core/src/Unison/Reference/Util.hs b/unison-core/src/Unison/Reference/Util.hs new file mode 100644 index 0000000000..2d63d2d6b1 --- /dev/null +++ b/unison-core/src/Unison/Reference/Util.hs @@ -0,0 +1,22 @@ +module Unison.Reference.Util where + +import Unison.Prelude + +import Unison.Reference +import qualified Unison.Reference as Reference +import Unison.Hashable (Hashable1) +import Unison.ABT (Var) +import qualified Unison.ABT as ABT +import qualified Data.Map as Map + +hashComponents :: + (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Var v) + => (Reference.Id -> ABT.Term f v ()) + -> Map v (ABT.Term f v a) + -> Map v (Reference.Id, ABT.Term f v a) +hashComponents embedRef tms = + Map.fromList [ (v, (r,e)) | ((v,e), r) <- cs ] + where cs = components $ ABT.hashComponents ref tms + ref h i n = embedRef (Id h i n) + + diff --git a/unison-core/src/Unison/Referent.hs b/unison-core/src/Unison/Referent.hs new file mode 100644 index 0000000000..700e84ed02 --- /dev/null +++ b/unison-core/src/Unison/Referent.hs @@ -0,0 +1,128 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} + +module Unison.Referent where + +import Unison.Prelude + +import qualified Data.Char as Char +import qualified Data.Text as Text +import Unison.Hashable (Hashable) +import qualified Unison.Hashable as H +import Unison.Reference (Reference) +import qualified Unison.Reference as R +import Unison.ShortHash (ShortHash) +import qualified Unison.ShortHash as SH + +import Unison.ConstructorType (ConstructorType) +import qualified Unison.ConstructorType as CT + +-- Slightly odd naming. This is the "referent of term name in the codebase", +-- rather than the target of a Reference. +type Referent = Referent' Reference +pattern Ref :: Reference -> Referent +pattern Ref r = Ref' r +pattern Con :: Reference -> Int -> ConstructorType -> Referent +pattern Con r i t = Con' r i t +{-# COMPLETE Ref, Con #-} + +type Id = Referent' R.Id + +data Referent' r = Ref' r | Con' r Int ConstructorType + deriving (Show, Ord, Eq, Functor) + +type Pos = Word64 +type Size = Word64 + +-- referentToTerm moved to Term.fromReferent +-- termToReferent moved to Term.toReferent + +-- todo: move these to ShortHash module +toShortHash :: Referent -> ShortHash +toShortHash = \case + Ref r -> R.toShortHash r + Con r i _ -> patternShortHash r i + +toShortHashId :: Id -> ShortHash +toShortHashId = toShortHash . fromId + +-- also used by HashQualified.fromPattern +patternShortHash :: Reference -> Int -> ShortHash +patternShortHash r i = (R.toShortHash r) { SH.cid = Just . Text.pack $ show i } + +showShort :: Int -> Referent -> Text +showShort numHashChars = SH.toText . SH.take numHashChars . toShortHash + +toText :: Referent -> Text +toText = \case + Ref r -> R.toText r + Con r cid ct -> R.toText r <> "#" <> ctorTypeText ct <> Text.pack (show cid) + +ctorTypeText :: CT.ConstructorType -> Text +ctorTypeText CT.Effect = EffectCtor +ctorTypeText CT.Data = DataCtor + +pattern EffectCtor = "a" +pattern DataCtor = "d" + +toString :: Referent -> String +toString = Text.unpack . toText + +isConstructor :: Referent -> Bool +isConstructor Con{} = True +isConstructor _ = False + +toTermReference :: Referent -> Maybe Reference +toTermReference = \case + Ref r -> Just r + _ -> Nothing + +toReference :: Referent -> Reference +toReference = toReference' + +toReference' :: Referent' r -> r +toReference' = \case + Ref' r -> r + Con' r _i _t -> r + +fromId :: Id -> Referent +fromId = fmap R.DerivedId + +toTypeReference :: Referent -> Maybe Reference +toTypeReference = \case + Con r _i _t -> Just r + _ -> Nothing + +isPrefixOf :: ShortHash -> Referent -> Bool +isPrefixOf sh r = SH.isPrefixOf sh (toShortHash r) + +unsafeFromText :: Text -> Referent +unsafeFromText = fromMaybe (error "invalid referent") . fromText + +-- #abc[.xy][#cid] +fromText :: Text -> Maybe Referent +fromText t = either (const Nothing) Just $ + -- if the string has just one hash at the start, it's just a reference + if Text.length refPart == 1 then + Ref <$> R.fromText t + else if Text.all Char.isDigit cidPart then do + r <- R.fromText (Text.dropEnd 1 refPart) + ctorType <- ctorType + let cid = read (Text.unpack cidPart) + pure $ Con r cid ctorType + else + Left ("invalid constructor id: " <> Text.unpack cidPart) + where + ctorType = case Text.take 1 cidPart' of + EffectCtor -> Right CT.Effect + DataCtor -> Right CT.Data + _otherwise -> + Left ("invalid constructor type (expected '" + <> EffectCtor <> "' or '" <> DataCtor <> "'): " <> Text.unpack cidPart') + refPart = Text.dropWhileEnd (/= '#') t + cidPart' = Text.takeWhileEnd (/= '#') t + cidPart = Text.drop 1 cidPart' + +instance Hashable Referent where + tokens (Ref r) = [H.Tag 0] ++ H.tokens r + tokens (Con r i dt) = [H.Tag 2] ++ H.tokens r ++ H.tokens (fromIntegral i :: Word64) ++ H.tokens dt diff --git a/unison-core/src/Unison/Settings.hs b/unison-core/src/Unison/Settings.hs new file mode 100644 index 0000000000..883f7e0a0c --- /dev/null +++ b/unison-core/src/Unison/Settings.hs @@ -0,0 +1,18 @@ +module Unison.Settings where + +debugNoteLoc,debugNoteSummary,debugRevealForalls :: Bool +debugNoteLoc = False +debugNoteSummary = False +debugRevealForalls = False + +renderTermMaxLength :: Int +renderTermMaxLength = 20 + +demoHideVarNumber :: Bool +demoHideVarNumber = False + +removePureEffects :: Bool +removePureEffects = True + +cleanupTypes :: Bool +cleanupTypes = True diff --git a/unison-core/src/Unison/ShortHash.hs b/unison-core/src/Unison/ShortHash.hs new file mode 100644 index 0000000000..7e97b7a722 --- /dev/null +++ b/unison-core/src/Unison/ShortHash.hs @@ -0,0 +1,92 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RecordWildCards #-} + +module Unison.ShortHash where + +import Unison.Prelude + +import qualified Data.Text as Text + +-- Arya created this type to be able to query the Codebase for anonymous definitions. The parsing functions can't fail, because they only try to pull apart the syntactic elements "#" and ".". They don't necessarily produce a meaningful reference; you'll figure that out during base58 decoding. We don't attempt base58 decoding here because the base58 prefix doesn't correspond to anything useful. We'll just compare strings against the codebase or namespace later. +-- None of the punctuation is stored here. +data ShortHash + = Builtin Text + | ShortHash { prefix :: Text, cycle :: Maybe Text, cid :: Maybe Text } + deriving (Eq, Ord, Show) + +-- currently unused +isConstructor :: ShortHash -> Bool +isConstructor = \case + ShortHash _ _ (Just _) -> True + _ -> False + +-- Parse a string like those described in Referent.fromText: +-- examples: +-- `##Text.take` — builtins don’t have cycles or cids +-- `#2tWjVAuc7` — term ref, no cycle +-- `#y9ycWkiC1.y9` — term ref, part of cycle +-- `#cWkiC1x89#1` — constructor +-- `#DCxrnCAPS.WD#0` — constructor of a type in a cycle +-- A constructor ID on a builtin is ignored: +-- e.g. ##FileIO#2 is parsed as ##FileIO +-- Anything to the left of the first # is +-- e.g. foo#abc is parsed as #abc +-- Anything including and following a third # is ignored. +-- e.g. foo#abc#2#hello is parsed as #abc#2 +-- Anything after a second . before a second # is ignored. +-- e.g. foo#abc.1f.x is parsed as #abc.1f +fromText :: Text -> Maybe ShortHash +fromText t = case Text.split (=='#') t of + [_, "", b] -> Just $ Builtin b -- builtin starts with ## + _ : "" : b : _ -> -- builtin with a CID todo: could be rejected + Just $ Builtin b + [_, h] -> Just $ uncurry ShortHash (getCycle h) Nothing + [_, h, c] -> Just $ uncurry ShortHash (getCycle h) (Just c) + _ : h : c : _garbage -> -- CID with more hash after todo: could be rejected + Just $ uncurry ShortHash (getCycle h) (Just c) + _ -> Nothing + where + getCycle :: Text -> (Text, Maybe Text) + getCycle h = case Text.split (=='.') h of + [] -> ("", Nothing) -- e.g. foo#.1j + [hash] -> (hash, Nothing) + hash : suffix : _garbage -> (hash, Just suffix) + +unsafeFromText :: Text -> ShortHash +unsafeFromText t = fromMaybe + (error . Text.unpack $ "can't parse ShortHash from: " <> t) + (fromText t) + +toText :: ShortHash -> Text +toText (Builtin b) = "##" <> b +toText (ShortHash p i cid) = "#" <> p <> i' <> c' where + i', c' :: Text + i' = maybe "" ("."<>) i + c' = maybe "" ("#" <>) cid + +toString :: ShortHash -> String +toString = Text.unpack . toText + +fromString :: String -> Maybe ShortHash +fromString = fromText . Text.pack + +take :: Int -> ShortHash -> ShortHash +take _ b@(Builtin _) = b +take i s@ShortHash{..} = s { prefix = Text.take i prefix } + +-- x `isPrefixOf` y is True iff x might be a shorter version of y +-- if a constructor id is provided on the right-hand side, the left-hand side +-- needs to match exactly (as of this commit). +isPrefixOf :: ShortHash -> ShortHash -> Bool +isPrefixOf (Builtin t) (Builtin t2) = t `Text.isPrefixOf` t2 +isPrefixOf (ShortHash h n cid) (ShortHash h2 n2 cid2) = + Text.isPrefixOf h h2 && maybePrefixOf n n2 && maybePrefixOf cid cid2 + where + Nothing `maybePrefixOf` Nothing = True + Nothing `maybePrefixOf` Just _ = False + Just _ `maybePrefixOf` Nothing = False + Just a `maybePrefixOf` Just b = a == b +isPrefixOf _ _ = False + +--instance Show ShortHash where +-- show = Text.unpack . toText diff --git a/unison-core/src/Unison/Symbol.hs b/unison-core/src/Unison/Symbol.hs new file mode 100644 index 0000000000..b1f7b200d3 --- /dev/null +++ b/unison-core/src/Unison/Symbol.hs @@ -0,0 +1,35 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Symbol where + +import Unison.Prelude + +import Unison.Var (Var(..)) +import qualified Data.Set as Set +import qualified Unison.ABT as ABT +import qualified Unison.Var as Var + +data Symbol = Symbol !Word64 Var.Type deriving (Generic) + +instance ABT.Var Symbol where + freshIn vs s | Set.null vs || Set.notMember s vs = s -- already fresh! + freshIn vs s@(Symbol i n) = case Set.elemAt (Set.size vs - 1) vs of + Symbol i2 _ -> if i > i2 then s else Symbol (i2+1) n + +instance Var Symbol where + typed t = Symbol 0 t + typeOf (Symbol _ t) = t + freshId (Symbol id _) = id + freshenId id (Symbol _ n) = Symbol id n + +instance Eq Symbol where + Symbol id1 name1 == Symbol id2 name2 = id1 == id2 && name1 == name2 +instance Ord Symbol where + Symbol id1 name1 `compare` Symbol id2 name2 = (id1,name1) `compare` (id2,name2) +instance Show Symbol where + show (Symbol 0 n) = show n + show (Symbol id n) = show n ++ "-" ++ show id + +symbol :: Text -> Symbol +symbol = Var.named diff --git a/unison-core/src/Unison/Term.hs b/unison-core/src/Unison/Term.hs new file mode 100644 index 0000000000..ba00d9b277 --- /dev/null +++ b/unison-core/src/Unison/Term.hs @@ -0,0 +1,1123 @@ +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE UnicodeSyntax #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Term where + +import Unison.Prelude + +import Prelude hiding (and,or) +import Control.Monad.State (evalState) +import qualified Control.Monad.Writer.Strict as Writer +import Data.Bifunctor (second) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Data.Text as Text +import qualified Data.Sequence as Sequence +import Prelude.Extras (Eq1(..), Show1(..)) +import Text.Show +import qualified Unison.ABT as ABT +import qualified Unison.Blank as B +import qualified Unison.Hash as Hash +import Unison.Hashable (Hashable1, accumulateToken) +import qualified Unison.Hashable as Hashable +import Unison.Names3 ( Names0 ) +import qualified Unison.Names3 as Names +import Unison.Pattern (Pattern) +import qualified Unison.Pattern as Pattern +import Unison.Reference (Reference, pattern Builtin) +import qualified Unison.Reference as Reference +import qualified Unison.Reference.Util as ReferenceUtil +import Unison.Referent (Referent) +import qualified Unison.Referent as Referent +import Unison.Type (Type) +import qualified Unison.Type as Type +import qualified Unison.Util.Relation as Rel +import qualified Unison.ConstructorType as CT +import Unison.Util.List (multimap, validate) +import Unison.Var (Var) +import qualified Unison.Var as Var +import Unsafe.Coerce +import Unison.Symbol (Symbol) +import qualified Unison.Name as Name +import qualified Unison.LabeledDependency as LD +import Unison.LabeledDependency (LabeledDependency) + +-- This gets reexported; should maybe live somewhere other than Pattern, though. +type ConstructorId = Pattern.ConstructorId + +data MatchCase loc a = MatchCase (Pattern loc) (Maybe a) a + deriving (Show,Eq,Foldable,Functor,Generic,Generic1,Traversable) + +-- | Base functor for terms in the Unison language +-- We need `typeVar` because the term and type variables may differ. +data F typeVar typeAnn patternAnn a + = Int Int64 + | Nat Word64 + | Float Double + | Boolean Bool + | Text Text + | Char Char + | Blank (B.Blank typeAnn) + | Ref Reference + -- First argument identifies the data type, + -- second argument identifies the constructor + | Constructor Reference ConstructorId + | Request Reference ConstructorId + | Handle a a + | App a a + | Ann a (Type typeVar typeAnn) + | Sequence (Seq a) + | If a a a + | And a a + | Or a a + | Lam a + -- Note: let rec blocks have an outer ABT.Cycle which introduces as many + -- variables as there are bindings + | LetRec IsTop [a] a + -- Note: first parameter is the binding, second is the expression which may refer + -- to this let bound variable. Constructed as `Let b (abs v e)` + | Let IsTop a a + -- Pattern matching / eliminating data types, example: + -- case x of + -- Just n -> rhs1 + -- Nothing -> rhs2 + -- + -- translates to + -- + -- Match x + -- [ (Constructor 0 [Var], ABT.abs n rhs1) + -- , (Constructor 1 [], rhs2) ] + | Match a [MatchCase patternAnn a] + | TermLink Referent + | TypeLink Reference + deriving (Foldable,Functor,Generic,Generic1,Traversable) + +type IsTop = Bool + +-- | Like `Term v`, but with an annotation of type `a` at every level in the tree +type Term v a = Term2 v a a v a +-- | Allow type variables and term variables to differ +type Term' vt v a = Term2 vt a a v a +-- | Allow type variables, term variables, type annotations and term annotations +-- to all differ +type Term2 vt at ap v a = ABT.Term (F vt at ap) v a +-- | Like `Term v a`, but with only () for type and pattern annotations. +type Term3 v a = Term2 v () () v a + +-- | Terms are represented as ABTs over the base functor F, with variables in `v` +type Term0 v = Term v () +-- | Terms with type variables in `vt`, and term variables in `v` +type Term0' vt v = Term' vt v () + +-- bindExternals +-- :: forall v a b b2 +-- . Var v +-- => [(v, Term2 v b a v b2)] +-- -> [(v, Reference)] +-- -> Term2 v b a v a +-- -> Term2 v b a v a +-- bindBuiltins termBuiltins typeBuiltins = f . g +-- where +-- f :: Term2 v b a v a -> Term2 v b a v a +-- f = typeMap (Type.bindBuiltins typeBuiltins) +-- g :: Term2 v b a v a -> Term2 v b a v a +-- g = ABT.substsInheritAnnotation termBuiltins +bindNames + :: forall v a . Var v + => Set v + -> Names0 + -> Term v a + -> Names.ResolutionResult v a (Term v a) +-- bindNames keepFreeTerms _ _ | trace "Keep free terms:" False +-- || traceShow keepFreeTerms False = undefined +bindNames keepFreeTerms ns e = do + let freeTmVars = [ (v,a) | (v,a) <- ABT.freeVarOccurrences keepFreeTerms e ] + -- !_ = trace "free term vars: " () + -- !_ = traceShow $ fst <$> freeTmVars + freeTyVars = [ (v, a) | (v,as) <- Map.toList (freeTypeVarAnnotations e) + , a <- as ] + -- !_ = trace "free type vars: " () + -- !_ = traceShow $ fst <$> freeTyVars + okTm :: (v,a) -> Names.ResolutionResult v a (v, Term v a) + okTm (v,a) = case Rel.lookupDom (Name.fromVar v) (Names.terms0 ns) of + rs | Set.size rs == 1 -> + pure (v, fromReferent a $ Set.findMin rs) + | otherwise -> Left (pure (Names.TermResolutionFailure v a rs)) + okTy (v,a) = case Rel.lookupDom (Name.fromVar v) (Names.types0 ns) of + rs | Set.size rs == 1 -> pure (v, Type.ref a $ Set.findMin rs) + | otherwise -> Left (pure (Names.TypeResolutionFailure v a rs)) + termSubsts <- validate okTm freeTmVars + typeSubsts <- validate okTy freeTyVars + pure . substTypeVars typeSubsts . ABT.substsInheritAnnotation termSubsts $ e + +bindSomeNames + :: forall v a . Var v + => Names0 + -> Term v a + -> Names.ResolutionResult v a (Term v a) +-- bindSomeNames ns e | trace "Term.bindSome" False +-- || trace "Names =" False +-- || traceShow ns False +-- || trace "Free type vars:" False +-- || traceShow (freeTypeVars e) False +-- || traceShow e False +-- = undefined +bindSomeNames ns e = bindNames keepFree ns e where + keepFree = Set.difference (freeVars e) + (Set.map Name.toVar $ Rel.dom (Names.terms0 ns)) + +-- Prepare a term for type-directed name resolution by replacing +-- any remaining free variables with blanks to be resolved by TDNR +prepareTDNR :: Var v => ABT.Term (F vt b ap) v b -> ABT.Term (F vt b ap) v b +prepareTDNR t = fmap fst . ABT.visitPure f $ ABT.annotateBound t + where f (ABT.Term _ (a, bound) (ABT.Var v)) | Set.notMember v bound = + Just $ resolve (a, bound) a (Text.unpack $ Var.name v) + f _ = Nothing + +amap :: Ord v => (a -> a2) -> Term v a -> Term v a2 +amap f = fmap f . patternMap (fmap f) . typeMap (fmap f) + +patternMap :: (Pattern ap -> Pattern ap2) -> Term2 vt at ap v a -> Term2 vt at ap2 v a +patternMap f = go where + go (ABT.Term fvs a t) = ABT.Term fvs a $ case t of + ABT.Abs v t -> ABT.Abs v (go t) + ABT.Var v -> ABT.Var v + ABT.Cycle t -> ABT.Cycle (go t) + ABT.Tm (Match e cases) -> ABT.Tm (Match (go e) [ + MatchCase (f p) (go <$> g) (go a) | MatchCase p g a <- cases ]) + -- Safe since `Match` is only ctor that has embedded `Pattern ap` arg + ABT.Tm ts -> unsafeCoerce $ ABT.Tm (fmap go ts) + +vmap :: Ord v2 => (v -> v2) -> Term v a -> Term v2 a +vmap f = ABT.vmap f . typeMap (ABT.vmap f) + +vtmap :: Ord vt2 => (vt -> vt2) -> Term' vt v a -> Term' vt2 v a +vtmap f = typeMap (ABT.vmap f) + +typeMap + :: Ord vt2 + => (Type vt at -> Type vt2 at2) + -> Term2 vt at ap v a + -> Term2 vt2 at2 ap v a +typeMap f = go + where + go (ABT.Term fvs a t) = ABT.Term fvs a $ case t of + ABT.Abs v t -> ABT.Abs v (go t) + ABT.Var v -> ABT.Var v + ABT.Cycle t -> ABT.Cycle (go t) + ABT.Tm (Ann e t) -> ABT.Tm (Ann (go e) (f t)) + -- Safe since `Ann` is only ctor that has embedded `Type v` arg + -- otherwise we'd have to manually match on every non-`Ann` ctor + ABT.Tm ts -> unsafeCoerce $ ABT.Tm (fmap go ts) + +extraMap' + :: (Ord vt, Ord vt') + => (vt -> vt') + -> (at -> at') + -> (ap -> ap') + -> Term2 vt at ap v a + -> Term2 vt' at' ap' v a +extraMap' vtf atf apf = ABT.extraMap (extraMap vtf atf apf) + +extraMap + :: (Ord vt, Ord vt') + => (vt -> vt') + -> (at -> at') + -> (ap -> ap') + -> F vt at ap a + -> F vt' at' ap' a +extraMap vtf atf apf = \case + Int x -> Int x + Nat x -> Nat x + Float x -> Float x + Boolean x -> Boolean x + Text x -> Text x + Char x -> Char x + Blank x -> Blank (fmap atf x) + Ref x -> Ref x + Constructor x y -> Constructor x y + Request x y -> Request x y + Handle x y -> Handle x y + App x y -> App x y + Ann tm x -> Ann tm (ABT.amap atf (ABT.vmap vtf x)) + Sequence x -> Sequence x + If x y z -> If x y z + And x y -> And x y + Or x y -> Or x y + Lam x -> Lam x + LetRec x y z -> LetRec x y z + Let x y z -> Let x y z + Match tm l -> Match tm (map (matchCaseExtraMap apf) l) + TermLink r -> TermLink r + TypeLink r -> TypeLink r + +matchCaseExtraMap :: (loc -> loc') -> MatchCase loc a -> MatchCase loc' a +matchCaseExtraMap f (MatchCase p x y) = MatchCase (fmap f p) x y + +unannotate + :: forall vt at ap v a . Ord v => Term2 vt at ap v a -> Term0' vt v +unannotate = go + where + go :: Term2 vt at ap v a -> Term0' vt v + go (ABT.out -> ABT.Abs v body) = ABT.abs v (go body) + go (ABT.out -> ABT.Cycle body) = ABT.cycle (go body) + go (ABT.Var' v ) = ABT.var v + go (ABT.Tm' f ) = case go <$> f of + Ann e t -> ABT.tm (Ann e (void t)) + Match scrutinee branches -> + let unann (MatchCase pat guard body) = MatchCase (void pat) guard body + in ABT.tm (Match scrutinee (unann <$> branches)) + f' -> ABT.tm (unsafeCoerce f') + go _ = error "unpossible" + +wrapV :: Ord v => Term v a -> Term (ABT.V v) a +wrapV = vmap ABT.Bound + +-- | All variables mentioned in the given term. +-- Includes both term and type variables, both free and bound. +allVars :: Ord v => Term v a -> Set v +allVars tm = Set.fromList $ + ABT.allVars tm ++ [ v | tp <- allTypes tm, v <- ABT.allVars tp ] + where + allTypes tm = case tm of + Ann' e tp -> tp : allTypes e + _ -> foldMap allTypes $ ABT.out tm + +freeVars :: Term' vt v a -> Set v +freeVars = ABT.freeVars + +freeTypeVars :: Ord vt => Term' vt v a -> Set vt +freeTypeVars t = Map.keysSet $ freeTypeVarAnnotations t + +freeTypeVarAnnotations :: Ord vt => Term' vt v a -> Map vt [a] +freeTypeVarAnnotations e = multimap $ go Set.empty e where + go bound tm = case tm of + Var' _ -> mempty + Ann' e (Type.stripIntroOuters -> t1) -> let + bound' = case t1 of Type.ForallsNamed' vs _ -> bound <> Set.fromList vs + _ -> bound + in go bound' e <> ABT.freeVarOccurrences bound t1 + ABT.Tm' f -> foldMap (go bound) f + (ABT.out -> ABT.Abs _ body) -> go bound body + (ABT.out -> ABT.Cycle body) -> go bound body + _ -> error "unpossible" + +substTypeVars :: (Ord v, Var vt) + => [(vt, Type vt b)] + -> Term' vt v a + -> Term' vt v a +substTypeVars subs e = foldl' go e subs where + go e (vt, t) = substTypeVar vt t e + +-- Capture-avoiding substitution of a type variable inside a term. This +-- will replace that type variable wherever it appears in type signatures of +-- the term, avoiding capture by renaming ∀-binders. +substTypeVar + :: (Ord v, ABT.Var vt) + => vt + -> Type vt b + -> Term' vt v a + -> Term' vt v a +substTypeVar vt ty = go Set.empty where + go bound tm | Set.member vt bound = tm + go bound tm = let loc = ABT.annotation tm in case tm of + Var' _ -> tm + Ann' e t -> uncapture [] e (Type.stripIntroOuters t) where + fvs = ABT.freeVars ty + -- if the ∀ introduces a variable, v, which is free in `ty`, we pick a new + -- variable name for v which is unique, v', and rename v to v' in e. + uncapture vs e t@(Type.Forall' body) | Set.member (ABT.variable body) fvs = let + v = ABT.variable body + v2 = Var.freshIn (ABT.freeVars t) . Var.freshIn (Set.insert vt fvs) $ v + t2 = ABT.bindInheritAnnotation body (Type.var() v2) + in uncapture ((ABT.annotation t, v2):vs) (renameTypeVar v v2 e) t2 + uncapture vs e t0 = let + t = foldl (\body (loc,v) -> Type.forall loc v body) t0 vs + bound' = case Type.unForalls (Type.stripIntroOuters t) of + Nothing -> bound + Just (vs, _) -> bound <> Set.fromList vs + t' = ABT.substInheritAnnotation vt ty (Type.stripIntroOuters t) + in ann loc (go bound' e) (Type.freeVarsToOuters bound t') + ABT.Tm' f -> ABT.tm' loc (go bound <$> f) + (ABT.out -> ABT.Abs v body) -> ABT.abs' loc v (go bound body) + (ABT.out -> ABT.Cycle body) -> ABT.cycle' loc (go bound body) + _ -> error "unpossible" + +renameTypeVar :: (Ord v, ABT.Var vt) => vt -> vt -> Term' vt v a -> Term' vt v a +renameTypeVar old new = go Set.empty where + go bound tm | Set.member old bound = tm + go bound tm = let loc = ABT.annotation tm in case tm of + Var' _ -> tm + Ann' e t -> let + bound' = case Type.unForalls (Type.stripIntroOuters t) of + Nothing -> bound + Just (vs, _) -> bound <> Set.fromList vs + t' = ABT.rename old new (Type.stripIntroOuters t) + in ann loc (go bound' e) (Type.freeVarsToOuters bound t') + ABT.Tm' f -> ABT.tm' loc (go bound <$> f) + (ABT.out -> ABT.Abs v body) -> ABT.abs' loc v (go bound body) + (ABT.out -> ABT.Cycle body) -> ABT.cycle' loc (go bound body) + _ -> error "unpossible" + +-- Converts free variables to bound variables using forall or introOuter. Example: +-- +-- foo : x -> x +-- foo a = +-- r : x +-- r = a +-- r +-- +-- This becomes: +-- +-- foo : ∀ x . x -> x +-- foo a = +-- r : outer x . x -- FYI, not valid syntax +-- r = a +-- r +-- +-- More specifically: in the expression `e : t`, unbound lowercase variables in `t` +-- are bound with foralls, and any ∀-quantified type variables are made bound in +-- `e` and its subexpressions. The result is a term with no lowercase free +-- variables in any of its type signatures, with outer references represented +-- with explicit `introOuter` binders. The resulting term may have uppercase +-- free variables that are still unbound. +generalizeTypeSignatures :: (Var vt, Var v) => Term' vt v a -> Term' vt v a +generalizeTypeSignatures = go Set.empty where + go bound tm = let loc = ABT.annotation tm in case tm of + Var' _ -> tm + Ann' e (Type.generalizeLowercase bound -> t) -> let + bound' = case Type.unForalls t of + Nothing -> bound + Just (vs, _) -> bound <> Set.fromList vs + in ann loc (go bound' e) (Type.freeVarsToOuters bound t) + ABT.Tm' f -> ABT.tm' loc (go bound <$> f) + (ABT.out -> ABT.Abs v body) -> ABT.abs' loc v (go bound body) + (ABT.out -> ABT.Cycle body) -> ABT.cycle' loc (go bound body) + _ -> error "unpossible" + +-- nicer pattern syntax + +pattern Var' v <- ABT.Var' v +pattern Cycle' xs t <- ABT.Cycle' xs t +pattern Abs' subst <- ABT.Abs' subst +pattern Int' n <- (ABT.out -> ABT.Tm (Int n)) +pattern Nat' n <- (ABT.out -> ABT.Tm (Nat n)) +pattern Float' n <- (ABT.out -> ABT.Tm (Float n)) +pattern Boolean' b <- (ABT.out -> ABT.Tm (Boolean b)) +pattern Text' s <- (ABT.out -> ABT.Tm (Text s)) +pattern Char' c <- (ABT.out -> ABT.Tm (Char c)) +pattern Blank' b <- (ABT.out -> ABT.Tm (Blank b)) +pattern Ref' r <- (ABT.out -> ABT.Tm (Ref r)) +pattern TermLink' r <- (ABT.out -> ABT.Tm (TermLink r)) +pattern TypeLink' r <- (ABT.out -> ABT.Tm (TypeLink r)) +pattern Builtin' r <- (ABT.out -> ABT.Tm (Ref (Builtin r))) +pattern App' f x <- (ABT.out -> ABT.Tm (App f x)) +pattern Match' scrutinee branches <- (ABT.out -> ABT.Tm (Match scrutinee branches)) +pattern Constructor' ref n <- (ABT.out -> ABT.Tm (Constructor ref n)) +pattern Request' ref n <- (ABT.out -> ABT.Tm (Request ref n)) +pattern RequestOrCtor' ref n <- (unReqOrCtor -> Just (ref, n)) +pattern If' cond t f <- (ABT.out -> ABT.Tm (If cond t f)) +pattern And' x y <- (ABT.out -> ABT.Tm (And x y)) +pattern Or' x y <- (ABT.out -> ABT.Tm (Or x y)) +pattern Handle' h body <- (ABT.out -> ABT.Tm (Handle h body)) +pattern Apps' f args <- (unApps -> Just (f, args)) +-- begin pretty-printer helper patterns +pattern AppsPred' f args <- (unAppsPred -> Just (f, args)) +pattern BinaryApp' f arg1 arg2 <- (unBinaryApp -> Just (f, arg1, arg2)) +pattern BinaryApps' apps lastArg <- (unBinaryApps -> Just (apps, lastArg)) +pattern BinaryAppsPred' apps lastArg <- (unBinaryAppsPred -> Just (apps, lastArg)) +-- end pretty-printer helper patterns +pattern Ann' x t <- (ABT.out -> ABT.Tm (Ann x t)) +pattern Sequence' xs <- (ABT.out -> ABT.Tm (Sequence xs)) +pattern Lam' subst <- ABT.Tm' (Lam (ABT.Abs' subst)) +pattern LamNamed' v body <- (ABT.out -> ABT.Tm (Lam (ABT.Term _ _ (ABT.Abs v body)))) +pattern LamsNamed' vs body <- (unLams' -> Just (vs, body)) +pattern LamsNamedOpt' vs body <- (unLamsOpt' -> Just (vs, body)) +pattern LamsNamedPred' vs body <- (unLamsPred' -> Just (vs, body)) +pattern LamsNamedOrDelay' vs body <- (unLamsUntilDelay' -> Just (vs, body)) +pattern LamsNamedMatch' vs branches <- (unLamsMatch' -> Just (vs, branches)) +pattern Let1' b subst <- (unLet1 -> Just (_, b, subst)) +pattern Let1Top' top b subst <- (unLet1 -> Just (top, b, subst)) +pattern Let1Named' v b e <- (ABT.Tm' (Let _ b (ABT.out -> ABT.Abs v e))) +pattern Let1NamedTop' top v b e <- (ABT.Tm' (Let top b (ABT.out -> ABT.Abs v e))) +pattern Lets' bs e <- (unLet -> Just (bs, e)) +pattern LetRecNamed' bs e <- (unLetRecNamed -> Just (_,bs,e)) +pattern LetRecNamedTop' top bs e <- (unLetRecNamed -> Just (top,bs,e)) +pattern LetRec' subst <- (unLetRec -> Just (_, subst)) +pattern LetRecTop' top subst <- (unLetRec -> Just (top, subst)) +pattern LetRecNamedAnnotated' ann bs e <- (unLetRecNamedAnnotated -> Just (_, ann, bs,e)) +pattern LetRecNamedAnnotatedTop' top ann bs e <- + (unLetRecNamedAnnotated -> Just (top, ann, bs,e)) + +fresh :: Var v => Term0 v -> v -> v +fresh = ABT.fresh + +-- some smart constructors + +var :: a -> v -> Term2 vt at ap v a +var = ABT.annotatedVar + +var' :: Var v => Text -> Term0' vt v +var' = var() . Var.named + +ref :: Ord v => a -> Reference -> Term2 vt at ap v a +ref a r = ABT.tm' a (Ref r) + +refId :: Ord v => a -> Reference.Id -> Term2 vt at ap v a +refId a = ref a . Reference.DerivedId + +termLink :: Ord v => a -> Referent -> Term2 vt at ap v a +termLink a r = ABT.tm' a (TermLink r) + +typeLink :: Ord v => a -> Reference -> Term2 vt at ap v a +typeLink a r = ABT.tm' a (TypeLink r) + +builtin :: Ord v => a -> Text -> Term2 vt at ap v a +builtin a n = ref a (Reference.Builtin n) + +float :: Ord v => a -> Double -> Term2 vt at ap v a +float a d = ABT.tm' a (Float d) + +boolean :: Ord v => a -> Bool -> Term2 vt at ap v a +boolean a b = ABT.tm' a (Boolean b) + +int :: Ord v => a -> Int64 -> Term2 vt at ap v a +int a d = ABT.tm' a (Int d) + +nat :: Ord v => a -> Word64 -> Term2 vt at ap v a +nat a d = ABT.tm' a (Nat d) + +text :: Ord v => a -> Text -> Term2 vt at ap v a +text a = ABT.tm' a . Text + +char :: Ord v => a -> Char -> Term2 vt at ap v a +char a = ABT.tm' a . Char + +watch :: (Var v, Semigroup a) => a -> String -> Term v a -> Term v a +watch a note e = + apps' (builtin a "Debug.watch") [text a (Text.pack note), e] + +watchMaybe :: (Var v, Semigroup a) => Maybe String -> Term v a -> Term v a +watchMaybe Nothing e = e +watchMaybe (Just note) e = watch (ABT.annotation e) note e + +blank :: Ord v => a -> Term2 vt at ap v a +blank a = ABT.tm' a (Blank B.Blank) + +placeholder :: Ord v => a -> String -> Term2 vt a ap v a +placeholder a s = ABT.tm' a . Blank $ B.Recorded (B.Placeholder a s) + +resolve :: Ord v => at -> ab -> String -> Term2 vt ab ap v at +resolve at ab s = ABT.tm' at . Blank $ B.Recorded (B.Resolve ab s) + +constructor :: Ord v => a -> Reference -> ConstructorId -> Term2 vt at ap v a +constructor a ref n = ABT.tm' a (Constructor ref n) + +request :: Ord v => a -> Reference -> ConstructorId -> Term2 vt at ap v a +request a ref n = ABT.tm' a (Request ref n) + +-- todo: delete and rename app' to app +app_ :: Ord v => Term0' vt v -> Term0' vt v -> Term0' vt v +app_ f arg = ABT.tm (App f arg) + +app :: Ord v => a -> Term2 vt at ap v a -> Term2 vt at ap v a -> Term2 vt at ap v a +app a f arg = ABT.tm' a (App f arg) + +match :: Ord v => a -> Term2 vt at a v a -> [MatchCase a (Term2 vt at a v a)] -> Term2 vt at a v a +match a scrutinee branches = ABT.tm' a (Match scrutinee branches) + +handle :: Ord v => a -> Term2 vt at ap v a -> Term2 vt at ap v a -> Term2 vt at ap v a +handle a h block = ABT.tm' a (Handle h block) + +and :: Ord v => a -> Term2 vt at ap v a -> Term2 vt at ap v a -> Term2 vt at ap v a +and a x y = ABT.tm' a (And x y) + +or :: Ord v => a -> Term2 vt at ap v a -> Term2 vt at ap v a -> Term2 vt at ap v a +or a x y = ABT.tm' a (Or x y) + +seq :: Ord v => a -> [Term2 vt at ap v a] -> Term2 vt at ap v a +seq a es = seq' a (Sequence.fromList es) + +seq' :: Ord v => a -> Seq (Term2 vt at ap v a) -> Term2 vt at ap v a +seq' a es = ABT.tm' a (Sequence es) + +apps + :: Ord v + => Term2 vt at ap v a + -> [(a, Term2 vt at ap v a)] + -> Term2 vt at ap v a +apps = foldl' (\f (a, t) -> app a f t) + +apps' + :: (Ord v, Semigroup a) + => Term2 vt at ap v a + -> [Term2 vt at ap v a] + -> Term2 vt at ap v a +apps' = foldl' (\f t -> app (ABT.annotation f <> ABT.annotation t) f t) + +iff :: Ord v => a -> Term2 vt at ap v a -> Term2 vt at ap v a -> Term2 vt at ap v a -> Term2 vt at ap v a +iff a cond t f = ABT.tm' a (If cond t f) + +ann_ :: Ord v => Term0' vt v -> Type vt () -> Term0' vt v +ann_ e t = ABT.tm (Ann e t) + +ann :: Ord v + => a + -> Term2 vt at ap v a + -> Type vt at + -> Term2 vt at ap v a +ann a e t = ABT.tm' a (Ann e t) + +-- arya: are we sure we want the two annotations to be the same? +lam :: Ord v => a -> v -> Term2 vt at ap v a -> Term2 vt at ap v a +lam a v body = ABT.tm' a (Lam (ABT.abs' a v body)) + +lam' :: Ord v => a -> [v] -> Term2 vt at ap v a -> Term2 vt at ap v a +lam' a vs body = foldr (lam a) body vs + +lam'' :: Ord v => [(a,v)] -> Term2 vt at ap v a -> Term2 vt at ap v a +lam'' vs body = foldr (uncurry lam) body vs + +isLam :: Term2 vt at ap v a -> Bool +isLam t = arity t > 0 + +arity :: Term2 vt at ap v a -> Int +arity (LamNamed' _ body) = 1 + arity body +arity (Ann' e _) = arity e +arity _ = 0 + +unLetRecNamedAnnotated + :: Term' vt v a + -> Maybe + (IsTop, a, [((a, v), Term' vt v a)], Term' vt v a) +unLetRecNamedAnnotated (ABT.CycleA' ann avs (ABT.Tm' (LetRec isTop bs e))) = + Just (isTop, ann, avs `zip` bs, e) +unLetRecNamedAnnotated _ = Nothing + +letRec' + :: (Ord v, Monoid a) + => Bool + -> [(v, Term' vt v a)] + -> Term' vt v a + -> Term' vt v a +letRec' isTop bindings body = + letRec isTop + (foldMap (ABT.annotation . snd) bindings <> ABT.annotation body) + [ ((ABT.annotation b, v), b) | (v,b) <- bindings ] + body + +letRec + :: Ord v + => Bool + -> a + -> [((a, v), Term' vt v a)] + -> Term' vt v a + -> Term' vt v a +letRec _ _ [] e = e +letRec isTop a bindings e = ABT.cycle' + a + (foldr (uncurry ABT.abs' . fst) z bindings) + where z = ABT.tm' a (LetRec isTop (map snd bindings) e) + + +-- | Smart constructor for let rec blocks. Each binding in the block may +-- reference any other binding in the block in its body (including itself), +-- and the output expression may also reference any binding in the block. +letRec_ :: Ord v => IsTop -> [(v, Term0' vt v)] -> Term0' vt v -> Term0' vt v +letRec_ _ [] e = e +letRec_ isTop bindings e = ABT.cycle (foldr (ABT.abs . fst) z bindings) + where + z = ABT.tm (LetRec isTop (map snd bindings) e) + +-- | Smart constructor for let blocks. Each binding in the block may +-- reference only previous bindings in the block, not including itself. +-- The output expression may reference any binding in the block. +-- todo: delete me +let1_ :: Ord v => IsTop -> [(v,Term0' vt v)] -> Term0' vt v -> Term0' vt v +let1_ isTop bindings e = foldr f e bindings + where + f (v,b) body = ABT.tm (Let isTop b (ABT.abs v body)) + +-- | annotations are applied to each nested Let expression +let1 + :: Ord v + => IsTop + -> [((a, v), Term2 vt at ap v a)] + -> Term2 vt at ap v a + -> Term2 vt at ap v a +let1 isTop bindings e = foldr f e bindings + where f ((ann, v), b) body = ABT.tm' ann (Let isTop b (ABT.abs' ann v body)) + +let1' + :: (Semigroup a, Ord v) + => IsTop + -> [(v, Term2 vt at ap v a)] + -> Term2 vt at ap v a + -> Term2 vt at ap v a +let1' isTop bindings e = foldr f e bindings + where + ann = ABT.annotation + f (v, b) body = ABT.tm' a (Let isTop b (ABT.abs' a v body)) + where a = ann b <> ann body + +-- let1' :: Var v => [(Text, Term0 vt v)] -> Term0 vt v -> Term0 vt v +-- let1' bs e = let1 [(ABT.v' name, b) | (name,b) <- bs ] e + +unLet1 + :: Var v + => Term' vt v a + -> Maybe (IsTop, Term' vt v a, ABT.Subst (F vt a a) v a) +unLet1 (ABT.Tm' (Let isTop b (ABT.Abs' subst))) = Just (isTop, b, subst) +unLet1 _ = Nothing + +-- | Satisfies `unLet (let' bs e) == Just (bs, e)` +unLet + :: Term2 vt at ap v a + -> Maybe ([(IsTop, v, Term2 vt at ap v a)], Term2 vt at ap v a) +unLet t = fixup (go t) + where + go (ABT.Tm' (Let isTop b (ABT.out -> ABT.Abs v t))) = case go t of + (env, t) -> ((isTop, v, b) : env, t) + go t = ([], t) + fixup ([], _) = Nothing + fixup bst = Just bst + +-- | Satisfies `unLetRec (letRec bs e) == Just (bs, e)` +unLetRecNamed + :: Term2 vt at ap v a + -> Maybe + ( IsTop + , [(v, Term2 vt at ap v a)] + , Term2 vt at ap v a + ) +unLetRecNamed (ABT.Cycle' vs (ABT.Tm' (LetRec isTop bs e))) + | length vs == length bs = Just (isTop, zip vs bs, e) +unLetRecNamed _ = Nothing + +unLetRec + :: (Monad m, Var v) + => Term2 vt at ap v a + -> Maybe + ( IsTop + , (v -> m v) + -> m + ( [(v, Term2 vt at ap v a)] + , Term2 vt at ap v a + ) + ) +unLetRec (unLetRecNamed -> Just (isTop, bs, e)) = Just + ( isTop + , \freshen -> do + vs <- sequence [ freshen v | (v, _) <- bs ] + let sub = ABT.substsInheritAnnotation (map fst bs `zip` map ABT.var vs) + pure (vs `zip` [ sub b | (_, b) <- bs ], sub e) + ) +unLetRec _ = Nothing + +unApps + :: Term2 vt at ap v a + -> Maybe (Term2 vt at ap v a, [Term2 vt at ap v a]) +unApps t = unAppsPred (t, const True) + +-- Same as unApps but taking a predicate controlling whether we match on a given function argument. +unAppsPred :: (Term2 vt at ap v a, Term2 vt at ap v a -> Bool) -> + Maybe (Term2 vt at ap v a, [Term2 vt at ap v a]) +unAppsPred (t, pred) = case go t [] of [] -> Nothing; f:args -> Just (f,args) + where + go (App' i o) acc | pred o = go i (o:acc) + go _ [] = [] + go fn args = fn:args + +unBinaryApp :: Term2 vt at ap v a + -> Maybe (Term2 vt at ap v a, + Term2 vt at ap v a, + Term2 vt at ap v a) +unBinaryApp t = case unApps t of + Just (f, [arg1, arg2]) -> Just (f, arg1, arg2) + _ -> Nothing + +-- "((a1 `f1` a2) `f2` a3)" becomes "Just ([(a2, f2), (a1, f1)], a3)" +unBinaryApps + :: Term2 vt at ap v a + -> Maybe + ( [(Term2 vt at ap v a, Term2 vt at ap v a)] + , Term2 vt at ap v a + ) +unBinaryApps t = unBinaryAppsPred (t, const True) + +-- Same as unBinaryApps but taking a predicate controlling whether we match on a given binary function. +unBinaryAppsPred :: (Term2 vt at ap v a + ,Term2 vt at ap v a -> Bool) + -> Maybe ([(Term2 vt at ap v a, + Term2 vt at ap v a)], + Term2 vt at ap v a) +unBinaryAppsPred (t, pred) = case unBinaryApp t of + Just (f, x, y) | pred f -> case unBinaryAppsPred (x, pred) of + Just (as, xLast) -> Just ((xLast, f) : as, y) + Nothing -> Just ([(x, f)], y) + _ -> Nothing + +unLams' + :: Term2 vt at ap v a -> Maybe ([v], Term2 vt at ap v a) +unLams' t = unLamsPred' (t, const True) + +-- Same as unLams', but always matches. Returns an empty [v] if the term doesn't start with a +-- lambda extraction. +unLamsOpt' :: Term2 vt at ap v a -> Maybe ([v], Term2 vt at ap v a) +unLamsOpt' t = case unLams' t of + r@(Just _) -> r + Nothing -> Just ([], t) + +-- Same as unLams', but stops at any variable named `()`, which indicates a +-- delay (`'`) annotation which we want to preserve. +unLamsUntilDelay' + :: Var v + => Term2 vt at ap v a + -> Maybe ([v], Term2 vt at ap v a) +unLamsUntilDelay' t = case unLamsPred' (t, (/=) $ Var.named "()") of + r@(Just _) -> r + Nothing -> Just ([], t) + +-- Same as unLamsUntilDelay', but only matches if the lambda body is a match +-- expression, where the scrutinee is also the last argument of the lambda +unLamsMatch' + :: Var v + => Term2 vt at ap v a + -> Maybe ([v], [MatchCase ap (Term2 vt at ap v a)]) +unLamsMatch' t = case unLamsUntilDelay' t of + Just (reverse -> (v1:vs), Match' (Var' v1') branches) | + (v1 == v1') && not (Set.member v1' (Set.unions $ freeVars <$> branches)) -> + Just (reverse vs, branches) + _ -> Nothing + where + freeVars (MatchCase _ g rhs) = + let guardVars = (fromMaybe Set.empty $ ABT.freeVars <$> g) + rhsVars = (ABT.freeVars rhs) + in Set.union guardVars rhsVars + +-- Same as unLams' but taking a predicate controlling whether we match on a given binary function. +unLamsPred' :: (Term2 vt at ap v a, v -> Bool) -> + Maybe ([v], Term2 vt at ap v a) +unLamsPred' (LamNamed' v body, pred) | pred v = case unLamsPred' (body, pred) of + Nothing -> Just ([v], body) + Just (vs, body) -> Just (v:vs, body) +unLamsPred' _ = Nothing + +unReqOrCtor :: Term2 vt at ap v a -> Maybe (Reference, ConstructorId) +unReqOrCtor (Constructor' r cid) = Just (r, cid) +unReqOrCtor (Request' r cid) = Just (r, cid) +unReqOrCtor _ = Nothing + +-- Dependencies including referenced data and effect decls +dependencies :: (Ord v, Ord vt) => Term2 vt at ap v a -> Set Reference +dependencies t = Set.map (LD.fold id Referent.toReference) (labeledDependencies t) + +typeDependencies :: (Ord v, Ord vt) => Term2 vt at ap v a -> Set Reference +typeDependencies = + Set.fromList . mapMaybe (LD.fold Just (const Nothing)) . toList . labeledDependencies + +-- Gets the types to which this term contains references via patterns and +-- data constructors. +constructorDependencies + :: (Ord v, Ord vt) => Term2 vt at ap v a -> Set Reference +constructorDependencies = + Set.unions + . generalizedDependencies (const mempty) + (const mempty) + Set.singleton + (const . Set.singleton) + Set.singleton + (const . Set.singleton) + Set.singleton + +generalizedDependencies + :: (Ord v, Ord vt, Ord r) + => (Reference -> r) + -> (Reference -> r) + -> (Reference -> r) + -> (Reference -> ConstructorId -> r) + -> (Reference -> r) + -> (Reference -> ConstructorId -> r) + -> (Reference -> r) + -> Term2 vt at ap v a + -> Set r +generalizedDependencies termRef typeRef literalType dataConstructor dataType effectConstructor effectType + = Set.fromList . Writer.execWriter . ABT.visit' f where + f t@(Ref r) = Writer.tell [termRef r] $> t + f t@(TermLink r) = case r of + Referent.Ref r -> Writer.tell [termRef r] $> t + Referent.Con r id CT.Data -> Writer.tell [dataConstructor r id] $> t + Referent.Con r id CT.Effect -> Writer.tell [effectConstructor r id] $> t + f t@(TypeLink r) = Writer.tell [typeRef r] $> t + f t@(Ann _ typ) = + Writer.tell (map typeRef . toList $ Type.dependencies typ) $> t + f t@(Nat _) = Writer.tell [literalType Type.natRef] $> t + f t@(Int _) = Writer.tell [literalType Type.intRef] $> t + f t@(Float _) = Writer.tell [literalType Type.floatRef] $> t + f t@(Boolean _) = Writer.tell [literalType Type.booleanRef] $> t + f t@(Text _) = Writer.tell [literalType Type.textRef] $> t + f t@(Sequence _) = Writer.tell [literalType Type.vectorRef] $> t + f t@(Constructor r cid) = + Writer.tell [dataType r, dataConstructor r cid] $> t + f t@(Request r cid) = + Writer.tell [effectType r, effectConstructor r cid] $> t + f t@(Match _ cases) = traverse_ goPat cases $> t + f t = pure t + goPat (MatchCase pat _ _) = + Writer.tell . toList $ Pattern.generalizedDependencies literalType + dataConstructor + dataType + effectConstructor + effectType + pat + +labeledDependencies + :: (Ord v, Ord vt) => Term2 vt at ap v a -> Set LabeledDependency +labeledDependencies = generalizedDependencies LD.termRef + LD.typeRef + LD.typeRef + LD.dataConstructor + LD.typeRef + LD.effectConstructor + LD.typeRef + +updateDependencies + :: Ord v + => Map Reference Reference + -> Map Reference Reference + -> Term v a + -> Term v a +updateDependencies termUpdates typeUpdates = ABT.rebuildUp go + where + -- todo: this function might need tweaking if we ever allow type replacements + -- would need to look inside pattern matching and constructor calls + go (Ref r ) = Ref (Map.findWithDefault r r termUpdates) + go (TermLink (Referent.Ref r)) = TermLink (Referent.Ref $ Map.findWithDefault r r termUpdates) + go (TypeLink r) = TypeLink (Map.findWithDefault r r typeUpdates) + go (Ann tm tp) = Ann tm $ Type.updateDependencies typeUpdates tp + go f = f + +-- | If the outermost term is a function application, +-- perform substitution of the argument into the body +betaReduce :: Var v => Term0 v -> Term0 v +betaReduce (App' (Lam' f) arg) = ABT.bind f arg +betaReduce e = e + +betaNormalForm :: Var v => Term0 v -> Term0 v +betaNormalForm (App' f a) = betaNormalForm (betaReduce (app() (betaNormalForm f) a)) +betaNormalForm e = e + +-- x -> f x => f +etaNormalForm :: Eq v => Term0 v -> Term0 v +etaNormalForm (LamNamed' v (App' f (Var' v'))) | v == v' = etaNormalForm f +etaNormalForm t = t + +-- This converts `Reference`s it finds that are in the input `Map` +-- back to free variables +unhashComponent :: forall v a. Var v + => Map Reference (Term v a) + -> Map Reference (v, Term v a) +unhashComponent m = let + usedVars = foldMap (Set.fromList . ABT.allVars) m + m' :: Map Reference (v, Term v a) + m' = evalState (Map.traverseWithKey assignVar m) usedVars where + assignVar r t = (,t) <$> ABT.freshenS (Var.refNamed r) + unhash1 = ABT.rebuildUp' go where + go e@(Ref' r) = case Map.lookup r m' of + Nothing -> e + Just (v, _) -> var (ABT.annotation e) v + go e = e + in second unhash1 <$> m' + +hashComponents + :: Var v => Map v (Term v a) -> Map v (Reference.Id, Term v a) +hashComponents = ReferenceUtil.hashComponents $ refId () + +-- The hash for a constructor +hashConstructor' + :: (Reference -> ConstructorId -> Term0 Symbol) -> Reference -> ConstructorId -> Reference +hashConstructor' f r cid = + let +-- this is a bit circuitous, but defining everything in terms of hashComponents +-- ensure the hashing is always done in the same way + m = hashComponents (Map.fromList [(Var.named "_" :: Symbol, f r cid)]) + in case toList m of + [(r, _)] -> Reference.DerivedId r + _ -> error "unpossible" + +hashConstructor :: Reference -> ConstructorId -> Reference +hashConstructor = hashConstructor' $ constructor () + +hashRequest :: Reference -> ConstructorId -> Reference +hashRequest = hashConstructor' $ request () + +fromReferent :: Ord v + => a + -> Referent + -> Term2 vt at ap v a +fromReferent a = \case + Referent.Ref r -> ref a r + Referent.Con r i ct -> case ct of + CT.Data -> constructor a r i + CT.Effect -> request a r i + +instance Var v => Hashable1 (F v a p) where + hash1 hashCycle hash e + = let (tag, hashed, varint) = + (Hashable.Tag, Hashable.Hashed, Hashable.Nat . fromIntegral) + in + case e of + -- So long as `Reference.Derived` ctors are created using the same + -- hashing function as is used here, this case ensures that references + -- are 'transparent' wrt hash and hashing is unaffected by whether + -- expressions are linked. So for example `x = 1 + 1` and `y = x` hash + -- the same. + Ref (Reference.Derived h 0 1) -> Hashable.fromBytes (Hash.toBytes h) + Ref (Reference.Derived h i n) -> Hashable.accumulate + [ tag 1 + , hashed $ Hashable.fromBytes (Hash.toBytes h) + , Hashable.Nat i + , Hashable.Nat n + ] + -- Note: start each layer with leading `1` byte, to avoid collisions + -- with types, which start each layer with leading `0`. + -- See `Hashable1 Type.F` + _ -> + Hashable.accumulate + $ tag 1 + : case e of + Nat i -> [tag 64, accumulateToken i] + Int i -> [tag 65, accumulateToken i] + Float n -> [tag 66, Hashable.Double n] + Boolean b -> [tag 67, accumulateToken b] + Text t -> [tag 68, accumulateToken t] + Char c -> [tag 69, accumulateToken c] + Blank b -> tag 1 : case b of + B.Blank -> [tag 0] + B.Recorded (B.Placeholder _ s) -> + [tag 1, Hashable.Text (Text.pack s)] + B.Recorded (B.Resolve _ s) -> + [tag 2, Hashable.Text (Text.pack s)] + Ref (Reference.Builtin name) -> [tag 2, accumulateToken name] + Ref Reference.Derived {} -> + error "handled above, but GHC can't figure this out" + App a a2 -> [tag 3, hashed (hash a), hashed (hash a2)] + Ann a t -> [tag 4, hashed (hash a), hashed (ABT.hash t)] + Sequence as -> tag 5 : varint (Sequence.length as) : map + (hashed . hash) + (toList as) + Lam a -> [tag 6, hashed (hash a)] + -- note: we use `hashCycle` to ensure result is independent of + -- let binding order + LetRec _ as a -> case hashCycle as of + (hs, hash) -> tag 7 : hashed (hash a) : map hashed hs + -- here, order is significant, so don't use hashCycle + Let _ b a -> [tag 8, hashed $ hash b, hashed $ hash a] + If b t f -> + [tag 9, hashed $ hash b, hashed $ hash t, hashed $ hash f] + Request r n -> [tag 10, accumulateToken r, varint n] + Constructor r n -> [tag 12, accumulateToken r, varint n] + Match e branches -> + tag 13 : hashed (hash e) : concatMap h branches + where + h (MatchCase pat guard branch) = concat + [ [accumulateToken pat] + , toList (hashed . hash <$> guard) + , [hashed (hash branch)] + ] + Handle h b -> [tag 15, hashed $ hash h, hashed $ hash b] + And x y -> [tag 16, hashed $ hash x, hashed $ hash y] + Or x y -> [tag 17, hashed $ hash x, hashed $ hash y] + TermLink r -> [tag 18, accumulateToken r] + TypeLink r -> [tag 19, accumulateToken r] + _ -> + error $ "unhandled case in hash: " <> show (void e) + +-- mostly boring serialization code below ... + +instance (Eq a, ABT.Var v) => Eq1 (F v a p) where (==#) = (==) +instance (Show v) => Show1 (F v a p) where showsPrec1 = showsPrec + +instance (ABT.Var vt, Eq at, Eq a) => Eq (F vt at p a) where + Int x == Int y = x == y + Nat x == Nat y = x == y + Float x == Float y = x == y + Boolean x == Boolean y = x == y + Text x == Text y = x == y + Char x == Char y = x == y + Blank b == Blank q = b == q + Ref x == Ref y = x == y + TermLink x == TermLink y = x == y + TypeLink x == TypeLink y = x == y + Constructor r cid == Constructor r2 cid2 = r == r2 && cid == cid2 + Request r cid == Request r2 cid2 = r == r2 && cid == cid2 + Handle h b == Handle h2 b2 = h == h2 && b == b2 + App f a == App f2 a2 = f == f2 && a == a2 + Ann e t == Ann e2 t2 = e == e2 && t == t2 + Sequence v == Sequence v2 = v == v2 + If a b c == If a2 b2 c2 = a == a2 && b == b2 && c == c2 + And a b == And a2 b2 = a == a2 && b == b2 + Or a b == Or a2 b2 = a == a2 && b == b2 + Lam a == Lam b = a == b + LetRec _ bs body == LetRec _ bs2 body2 = bs == bs2 && body == body2 + Let _ binding body == Let _ binding2 body2 = + binding == binding2 && body == body2 + Match scrutinee cases == Match s2 cs2 = scrutinee == s2 && cases == cs2 + _ == _ = False + + +instance (Show v, Show a) => Show (F v a0 p a) where + showsPrec = go + where + showConstructor r n = shows r <> s "#" <> shows n + go _ (Int n ) = (if n >= 0 then s "+" else s "") <> shows n + go _ (Nat n ) = shows n + go _ (Float n ) = shows n + go _ (Boolean True ) = s "true" + go _ (Boolean False) = s "false" + go p (Ann t k) = showParen (p > 1) $ shows t <> s ":" <> shows k + go p (App f x) = showParen (p > 9) $ showsPrec 9 f <> s " " <> showsPrec 10 x + go _ (Lam body ) = showParen True (s "λ " <> shows body) + go _ (Sequence vs ) = showListWith shows (toList vs) + go _ (Blank b ) = case b of + B.Blank -> s "_" + B.Recorded (B.Placeholder _ r) -> s ("_" ++ r) + B.Recorded (B.Resolve _ r) -> s r + go _ (Ref r) = s "Ref(" <> shows r <> s ")" + go _ (TermLink r) = s "TermLink(" <> shows r <> s ")" + go _ (TypeLink r) = s "TypeLink(" <> shows r <> s ")" + go _ (Let _ b body) = + showParen True (s "let " <> shows b <> s " in " <> shows body) + go _ (LetRec _ bs body) = showParen + True + (s "let rec" <> shows bs <> s " in " <> shows body) + go _ (Handle b body) = showParen + True + (s "handle " <> shows b <> s " in " <> shows body) + go _ (Constructor r n ) = showConstructor r n + go _ (Match scrutinee cases) = showParen + True + (s "case " <> shows scrutinee <> s " of " <> shows cases) + go _ (Text s ) = shows s + go _ (Char c ) = shows c + go _ (Request r n) = showConstructor r n + go p (If c t f) = + showParen (p > 0) + $ s "if " + <> shows c + <> s " then " + <> shows t + <> s " else " + <> shows f + go p (And x y) = + showParen (p > 0) $ s "and " <> shows x <> s " " <> shows y + go p (Or x y) = + showParen (p > 0) $ s "or " <> shows x <> s " " <> shows y + (<>) = (.) + s = showString diff --git a/unison-core/src/Unison/Type.hs b/unison-core/src/Unison/Type.hs new file mode 100644 index 0000000000..3fb6beeaa6 --- /dev/null +++ b/unison-core/src/Unison/Type.hs @@ -0,0 +1,648 @@ +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Type where + +import Unison.Prelude + +import qualified Control.Monad.Writer.Strict as Writer +import Data.Functor.Identity (runIdentity) +import Data.Monoid (Any(..)) +import Data.List.Extra (nubOrd) +import qualified Data.Map as Map +import qualified Data.Set as Set +import Prelude.Extras (Eq1(..),Show1(..),Ord1(..)) +import qualified Unison.ABT as ABT +import Unison.Hashable (Hashable1) +import qualified Unison.Hashable as Hashable +import qualified Unison.Kind as K +import Unison.Reference (Reference) +import qualified Unison.Reference as Reference +import qualified Unison.Reference.Util as ReferenceUtil +import Unison.Var (Var) +import qualified Unison.Var as Var +import qualified Unison.Settings as Settings +import qualified Unison.Util.Relation as R +import qualified Unison.Names3 as Names +import qualified Unison.Name as Name +import qualified Unison.Util.List as List + +-- | Base functor for types in the Unison language +data F a + = Ref Reference + | Arrow a a + | Ann a K.Kind + | App a a + | Effect a a + | Effects [a] + | Forall a + | IntroOuter a -- binder like ∀, used to introduce variables that are + -- bound by outer type signatures, to support scoped type + -- variables + deriving (Foldable,Functor,Generic,Generic1,Eq,Ord,Traversable) + +instance Eq1 F where (==#) = (==) +instance Ord1 F where compare1 = compare +instance Show1 F where showsPrec1 = showsPrec + +-- | Types are represented as ABTs over the base functor F, with variables in `v` +type Type v a = ABT.Term F v a + +wrapV :: Ord v => Type v a -> Type (ABT.V v) a +wrapV = ABT.vmap ABT.Bound + +freeVars :: Type v a -> Set v +freeVars = ABT.freeVars + +bindExternal + :: ABT.Var v => [(v, Reference)] -> Type v a -> Type v a +bindExternal bs = ABT.substsInheritAnnotation [ (v, ref () r) | (v, r) <- bs ] + +bindNames + :: Var v + => Set v + -> Names.Names0 + -> Type v a + -> Names.ResolutionResult v a (Type v a) +bindNames keepFree ns t = let + fvs = ABT.freeVarOccurrences keepFree t + rs = [(v, a, R.lookupDom (Name.fromVar v) (Names.types0 ns)) | (v,a) <- fvs ] + ok (v, a, rs) = if Set.size rs == 1 then pure (v, Set.findMin rs) + else Left (pure (Names.TypeResolutionFailure v a rs)) + in List.validate ok rs <&> \es -> bindExternal es t + +newtype Monotype v a = Monotype { getPolytype :: Type v a } deriving Eq + +instance (Show v) => Show (Monotype v a) where + show = show . getPolytype + +-- Smart constructor which checks if a `Type` has no `Forall` quantifiers. +monotype :: ABT.Var v => Type v a -> Maybe (Monotype v a) +monotype t = Monotype <$> ABT.visit isMono t where + isMono (Forall' _) = Just Nothing + isMono _ = Nothing + +arity :: Type v a -> Int +arity (ForallNamed' _ body) = arity body +arity (Arrow' _ o) = 1 + arity o +arity (Ann' a _) = arity a +arity _ = 0 + +-- some smart patterns +pattern Ref' r <- ABT.Tm' (Ref r) +pattern Arrow' i o <- ABT.Tm' (Arrow i o) +pattern Arrows' spine <- (unArrows -> Just spine) +pattern EffectfulArrows' fst rest <- (unEffectfulArrows -> Just (fst, rest)) +pattern Ann' t k <- ABT.Tm' (Ann t k) +pattern App' f x <- ABT.Tm' (App f x) +pattern Apps' f args <- (unApps -> Just (f, args)) +pattern Pure' t <- (unPure -> Just t) +pattern Effects' es <- ABT.Tm' (Effects es) +-- Effect1' must match at least one effect +pattern Effect1' e t <- ABT.Tm' (Effect e t) +pattern Effect' es t <- (unEffects1 -> Just (es, t)) +pattern Effect'' es t <- (unEffect0 -> (es, t)) +-- Effect0' may match zero effects +pattern Effect0' es t <- (unEffect0 -> (es, t)) +pattern Forall' subst <- ABT.Tm' (Forall (ABT.Abs' subst)) +pattern IntroOuter' subst <- ABT.Tm' (IntroOuter (ABT.Abs' subst)) +pattern IntroOuterNamed' v body <- ABT.Tm' (IntroOuter (ABT.out -> ABT.Abs v body)) +pattern ForallsNamed' vs body <- (unForalls -> Just (vs, body)) +pattern ForallNamed' v body <- ABT.Tm' (Forall (ABT.out -> ABT.Abs v body)) +pattern Var' v <- ABT.Var' v +pattern Cycle' xs t <- ABT.Cycle' xs t +pattern Abs' subst <- ABT.Abs' subst + +unPure :: Ord v => Type v a -> Maybe (Type v a) +unPure (Effect'' [] t) = Just t +unPure (Effect'' _ _) = Nothing +unPure t = Just t + +unArrows :: Type v a -> Maybe [Type v a] +unArrows t = + case go t of [_] -> Nothing; l -> Just l + where go (Arrow' i o) = i : go o + go o = [o] + +unEffectfulArrows + :: Type v a -> Maybe (Type v a, [(Maybe [Type v a], Type v a)]) +unEffectfulArrows t = case t of + Arrow' i o -> Just (i, go o) + _ -> Nothing + where + go (Effect1' (Effects' es) (Arrow' i o)) = + (Just $ es >>= flattenEffects, i) : go o + go (Effect1' (Effects' es) t) = [(Just $ es >>= flattenEffects, t)] + go (Arrow' i o) = (Nothing, i) : go o + go t = [(Nothing, t)] + +unApps :: Type v a -> Maybe (Type v a, [Type v a]) +unApps t = case go t [] of + [] -> Nothing + [ _ ] -> Nothing + f : args -> Just (f, args) + where + go (App' i o) acc = go i (o : acc) + go fn args = fn : args + +unIntroOuters :: Type v a -> Maybe ([v], Type v a) +unIntroOuters t = go t [] + where go (IntroOuterNamed' v body) vs = go body (v:vs) + go _body [] = Nothing + go body vs = Just (reverse vs, body) + +-- Most code doesn't care about `introOuter` binders and is fine dealing with the +-- these outer variable references as free variables. This function strips out +-- one or more `introOuter` binders, so `outer a b . (a, b)` becomes `(a, b)`. +stripIntroOuters :: Type v a -> Type v a +stripIntroOuters t = case unIntroOuters t of + Just (_, t) -> t + Nothing -> t + +unForalls :: Type v a -> Maybe ([v], Type v a) +unForalls t = go t [] + where go (ForallNamed' v body) vs = go body (v:vs) + go _body [] = Nothing + go body vs = Just(reverse vs, body) + +unEffect0 :: Ord v => Type v a -> ([Type v a], Type v a) +unEffect0 (Effect1' e a) = (flattenEffects e, a) +unEffect0 t = ([], t) + +unEffects1 :: Ord v => Type v a -> Maybe ([Type v a], Type v a) +unEffects1 (Effect1' (Effects' es) a) = Just (es, a) +unEffects1 _ = Nothing + +-- | True if the given type is a function, possibly quantified +isArrow :: ABT.Var v => Type v a -> Bool +isArrow (ForallNamed' _ t) = isArrow t +isArrow (Arrow' _ _) = True +isArrow _ = False + +-- some smart constructors + +--vectorOf :: Ord v => a -> Type v a -> Type v +--vectorOf a t = vector `app` t + +ref :: Ord v => a -> Reference -> Type v a +ref a = ABT.tm' a . Ref + +refId :: Ord v => a -> Reference.Id -> Type v a +refId a = ref a . Reference.DerivedId + +termLink :: Ord v => a -> Type v a +termLink a = ABT.tm' a . Ref $ termLinkRef + +typeLink :: Ord v => a -> Type v a +typeLink a = ABT.tm' a . Ref $ typeLinkRef + +derivedBase32Hex :: Ord v => Reference -> a -> Type v a +derivedBase32Hex r a = ref a r + +-- derivedBase58' :: Text -> Reference +-- derivedBase58' base58 = Reference.derivedBase58 base58 0 1 + +intRef, natRef, floatRef, booleanRef, textRef, charRef, vectorRef, bytesRef, effectRef, termLinkRef, typeLinkRef :: Reference +intRef = Reference.Builtin "Int" +natRef = Reference.Builtin "Nat" +floatRef = Reference.Builtin "Float" +booleanRef = Reference.Builtin "Boolean" +textRef = Reference.Builtin "Text" +charRef = Reference.Builtin "Char" +vectorRef = Reference.Builtin "Sequence" +bytesRef = Reference.Builtin "Bytes" +effectRef = Reference.Builtin "Effect" +termLinkRef = Reference.Builtin "Link.Term" +typeLinkRef = Reference.Builtin "Link.Type" + +builtinIORef, fileHandleRef, threadIdRef, socketRef :: Reference +builtinIORef = Reference.Builtin "IO" +fileHandleRef = Reference.Builtin "Handle" +threadIdRef = Reference.Builtin "ThreadId" +socketRef = Reference.Builtin "Socket" + +mvarRef :: Reference +mvarRef = Reference.Builtin "MVar" + +builtin :: Ord v => a -> Text -> Type v a +builtin a = ref a . Reference.Builtin + +int :: Ord v => a -> Type v a +int a = ref a intRef + +nat :: Ord v => a -> Type v a +nat a = ref a natRef + +float :: Ord v => a -> Type v a +float a = ref a floatRef + +boolean :: Ord v => a -> Type v a +boolean a = ref a booleanRef + +text :: Ord v => a -> Type v a +text a = ref a textRef + +char :: Ord v => a -> Type v a +char a = ref a charRef + +fileHandle :: Ord v => a -> Type v a +fileHandle a = ref a fileHandleRef + +threadId :: Ord v => a -> Type v a +threadId a = ref a threadIdRef + +builtinIO :: Ord v => a -> Type v a +builtinIO a = ref a builtinIORef + +socket :: Ord v => a -> Type v a +socket a = ref a socketRef + +vector :: Ord v => a -> Type v a +vector a = ref a vectorRef + +bytes :: Ord v => a -> Type v a +bytes a = ref a bytesRef + +effectType :: Ord v => a -> Type v a +effectType a = ref a $ effectRef + +app :: Ord v => a -> Type v a -> Type v a -> Type v a +app a f arg = ABT.tm' a (App f arg) + +-- `f x y z` means `((f x) y) z` and the annotation paired with `y` is the one +-- meant for `app (f x) y` +apps :: Ord v => Type v a -> [(a, Type v a)] -> Type v a +apps = foldl' go where go f (a, t) = app a f t + +app' :: (Ord v, Semigroup a) => Type v a -> Type v a -> Type v a +app' f arg = app (ABT.annotation f <> ABT.annotation arg) f arg + +apps' :: (Semigroup a, Ord v) => Type v a -> [Type v a] -> Type v a +apps' = foldl app' + +arrow :: Ord v => a -> Type v a -> Type v a -> Type v a +arrow a i o = ABT.tm' a (Arrow i o) + +arrow' :: (Semigroup a, Ord v) => Type v a -> Type v a -> Type v a +arrow' i o = arrow (ABT.annotation i <> ABT.annotation o) i o + +ann :: Ord v => a -> Type v a -> K.Kind -> Type v a +ann a e t = ABT.tm' a (Ann e t) + +forall :: Ord v => a -> v -> Type v a -> Type v a +forall a v body = ABT.tm' a (Forall (ABT.abs' a v body)) + +introOuter :: Ord v => a -> v -> Type v a -> Type v a +introOuter a v body = ABT.tm' a (IntroOuter (ABT.abs' a v body)) + +iff :: Var v => Type v () +iff = forall () aa $ arrows (f <$> [boolean(), a, a]) a + where aa = Var.named "a" + a = var () aa + f x = ((), x) + +iff' :: Var v => a -> Type v a +iff' loc = forall loc aa $ arrows (f <$> [boolean loc, a, a]) a + where aa = Var.named "a" + a = var loc aa + f x = (loc, x) + +iff2 :: Var v => a -> Type v a +iff2 loc = forall loc aa $ arrows (f <$> [a, a]) a + where aa = Var.named "a" + a = var loc aa + f x = (loc, x) + +andor :: Ord v => Type v () +andor = arrows (f <$> [boolean(), boolean()]) $ boolean() + where f x = ((), x) + +andor' :: Ord v => a -> Type v a +andor' a = arrows (f <$> [boolean a, boolean a]) $ boolean a + where f x = (a, x) + +var :: Ord v => a -> v -> Type v a +var = ABT.annotatedVar + +v' :: Var v => Text -> Type v () +v' s = ABT.var (Var.named s) + +-- Like `v'`, but creates an annotated variable given an annotation +av' :: Var v => a -> Text -> Type v a +av' a s = ABT.annotatedVar a (Var.named s) + +forall' :: Var v => a -> [Text] -> Type v a -> Type v a +forall' a vs body = foldr (forall a) body (Var.named <$> vs) + +foralls :: Ord v => a -> [v] -> Type v a -> Type v a +foralls a vs body = foldr (forall a) body vs + +-- Note: `a -> b -> c` parses as `a -> (b -> c)` +-- the annotation associated with `b` will be the annotation for the `b -> c` +-- node +arrows :: Ord v => [(a, Type v a)] -> Type v a -> Type v a +arrows ts result = foldr go result ts where + go = uncurry arrow + +-- The types of effectful computations +effect :: Ord v => a -> [Type v a] -> Type v a -> Type v a +effect a es (Effect1' fs t) = + let es' = (es >>= flattenEffects) ++ flattenEffects fs + in ABT.tm' a (Effect (ABT.tm' a (Effects es')) t) +effect a es t = ABT.tm' a (Effect (ABT.tm' a (Effects es)) t) + +effects :: Ord v => a -> [Type v a] -> Type v a +effects a es = ABT.tm' a (Effects $ es >>= flattenEffects) + +effect1 :: Ord v => a -> Type v a -> Type v a -> Type v a +effect1 a es (Effect1' fs t) = + let es' = flattenEffects es ++ flattenEffects fs + in ABT.tm' a (Effect (ABT.tm' a (Effects es')) t) +effect1 a es t = ABT.tm' a (Effect es t) + +flattenEffects :: Type v a -> [Type v a] +flattenEffects (Effects' es) = es >>= flattenEffects +flattenEffects es = [es] + +-- The types of first-class effect values +-- which get deconstructed in effect handlers. +effectV :: Ord v => a -> (a, Type v a) -> (a, Type v a) -> Type v a +effectV builtinA e t = apps (builtin builtinA "Effect") [e, t] + +-- Strips effects from a type. E.g. `{e} a` becomes `a`. +stripEffect :: Ord v => Type v a -> ([Type v a], Type v a) +stripEffect (Effect' e t) = case stripEffect t of (ei, t) -> (e ++ ei, t) +stripEffect t = ([], t) + +-- The type of the flipped function application operator: +-- `(a -> (a -> b) -> b)` +flipApply :: Var v => Type v () -> Type v () +flipApply t = forall() b $ arrow() (arrow() t (var() b)) (var() b) + where b = ABT.fresh t (Var.named "b") + +generalize' :: Var v => Var.Type -> Type v a -> Type v a +generalize' k t = generalize vsk t where + vsk = [ v | v <- Set.toList (freeVars t), Var.typeOf v == k ] + +-- | Bind the given variables with an outer `forall`, if they are used in `t`. +generalize :: Ord v => [v] -> Type v a -> Type v a +generalize vs t = foldr f t vs + where + f v t = + if Set.member v (ABT.freeVars t) then forall (ABT.annotation t) v t else t + +unforall :: Type v a -> Type v a +unforall (ForallsNamed' _ t) = t +unforall t = t + +unforall' :: Type v a -> ([v], Type v a) +unforall' (ForallsNamed' vs t) = (vs, t) +unforall' t = ([], t) + +dependencies :: Ord v => Type v a -> Set Reference +dependencies t = Set.fromList . Writer.execWriter $ ABT.visit' f t + where f t@(Ref r) = Writer.tell [r] $> t + f t = pure t + +updateDependencies :: Ord v => Map Reference Reference -> Type v a -> Type v a +updateDependencies typeUpdates = ABT.rebuildUp go + where + go (Ref r) = Ref (Map.findWithDefault r r typeUpdates) + go f = f + +usesEffects :: Ord v => Type v a -> Bool +usesEffects t = getAny . getConst $ ABT.visit go t where + go (Effect1' _ _) = Just (Const (Any True)) + go _ = Nothing + +-- Returns free effect variables in the given type, for instance, in: +-- +-- ∀ e3 . a ->{e,e2} b ->{e3} c +-- +-- This function would return the set {e, e2}, but not `e3` since `e3` +-- is bound by the enclosing forall. +freeEffectVars :: Ord v => Type v a -> Set v +freeEffectVars t = + Set.fromList . join . runIdentity $ + ABT.foreachSubterm go (snd <$> ABT.annotateBound t) + where + go t@(Effects' es) = + let frees = Set.fromList [ v | Var' v <- es >>= flattenEffects ] + in pure . Set.toList $ frees `Set.difference` ABT.annotation t + go t@(Effect1' e _) = + let frees = Set.fromList [ v | Var' v <- flattenEffects e ] + in pure . Set.toList $ frees `Set.difference` ABT.annotation t + go _ = pure [] + +existentializeArrows :: (Ord v, Monad m) => m v -> Type v a -> m (Type v a) +existentializeArrows freshVar = ABT.visit go + where + go t@(Arrow' a b) = case b of + Effect1' _ _ -> Nothing + _ -> Just $ do + e <- freshVar + a <- existentializeArrows freshVar a + b <- existentializeArrows freshVar b + let ann = ABT.annotation t + pure $ arrow ann a (effect ann [var ann e] b) + go _ = Nothing + +-- Remove free effect variables from the type that are in the set +removeEffectVars :: ABT.Var v => Set v -> Type v a -> Type v a +removeEffectVars removals t = + let z = effects () [] + t' = ABT.substsInheritAnnotation ((,z) <$> Set.toList removals) t + -- leave explicitly empty `{}` alone + removeEmpty (Effect1' (Effects' []) v) = Just (ABT.visitPure removeEmpty v) + removeEmpty t@(Effect1' e v) = + case flattenEffects e of + [] -> Just (ABT.visitPure removeEmpty v) + es -> Just (effect (ABT.annotation t) es $ ABT.visitPure removeEmpty v) + removeEmpty t@(Effects' es) = + Just $ effects (ABT.annotation t) (es >>= flattenEffects) + removeEmpty _ = Nothing + in ABT.visitPure removeEmpty t' + +-- Remove all effect variables from the type. +-- Used for type-based search, we apply this transformation to both the +-- indexed type and the query type, so the user can supply `a -> b` that will +-- match `a ->{e} b` (but not `a ->{IO} b`). +removeAllEffectVars :: ABT.Var v => Type v a -> Type v a +removeAllEffectVars t = let + allEffectVars = foldMap go (ABT.subterms t) + go (Effects' vs) = Set.fromList [ v | Var' v <- vs] + go (Effect1' (Var' v) _) = Set.singleton v + go _ = mempty + (vs, tu) = unforall' t + in generalize vs (removeEffectVars allEffectVars tu) + +removePureEffects :: ABT.Var v => Type v a -> Type v a +removePureEffects t | not Settings.removePureEffects = t + | otherwise = + generalize vs $ removeEffectVars (Set.filter isPure fvs) tu + where + (vs, tu) = unforall' t + fvs = freeEffectVars tu `Set.difference` ABT.freeVars t + -- If an effect variable is mentioned only once, it is on + -- an arrow `a ->{e} b`. Generalizing this to + -- `∀ e . a ->{e} b` gives us the pure arrow `a -> b`. + isPure v = ABT.occurrences v tu <= 1 + +editFunctionResult + :: forall v a + . Ord v + => (Type v a -> Type v a) + -> Type v a + -> Type v a +editFunctionResult f = go + where + go :: Type v a -> Type v a + go (ABT.Term s a t) = case t of + ABT.Tm (Forall t) -> + (\x -> ABT.Term (s <> freeVars x) a . ABT.Tm $ Forall x) $ go t + ABT.Tm (Arrow i o) -> + (\x -> ABT.Term (s <> freeVars x) a . ABT.Tm $ Arrow i x) $ go o + ABT.Abs v r -> + (\x -> ABT.Term (s <> freeVars x) a $ ABT.Abs v x) $ go r + _ -> f (ABT.Term s a t) + +functionResult :: Type v a -> Maybe (Type v a) +functionResult = go False + where + go inArr (ForallNamed' _ body) = go inArr body + go _inArr (Arrow' _i o ) = go True o + go inArr t = if inArr then Just t else Nothing + + +-- | Bind all free variables (not in `except`) that start with a lowercase +-- letter and are unqualified with an outer `forall`. +-- `a -> a` becomes `∀ a . a -> a` +-- `B -> B` becomes `B -> B` (not changed) +-- `.foo -> .foo` becomes `.foo -> .foo` (not changed) +-- `.foo.bar -> blarrg.woot` becomes `.foo.bar -> blarrg.woot` (unchanged) +generalizeLowercase :: Var v => Set v -> Type v a -> Type v a +generalizeLowercase except t = foldr (forall (ABT.annotation t)) t vars + where + vars = + [ v | v <- Set.toList (ABT.freeVars t `Set.difference` except), Var.universallyQuantifyIfFree v ] + +-- Convert all free variables in `allowed` to variables bound by an `introOuter`. +freeVarsToOuters :: Ord v => Set v -> Type v a -> Type v a +freeVarsToOuters allowed t = foldr (introOuter (ABT.annotation t)) t vars + where vars = Set.toList $ ABT.freeVars t `Set.intersection` allowed + +-- | This function removes all variable shadowing from the types and reduces +-- fresh ids to the minimum possible to avoid ambiguity. Useful when showing +-- two different types. +cleanupVars :: Var v => [Type v a] -> [Type v a] +cleanupVars ts | not Settings.cleanupTypes = ts +cleanupVars ts = let + changedVars = cleanupVarsMap ts + in cleanupVars1' changedVars <$> ts + +-- Compute a variable replacement map from a collection of types, which +-- can be passed to `cleanupVars1'`. This is used to cleanup variable ids +-- for multiple related types, like when reporting a type error. +cleanupVarsMap :: Var v => [Type v a] -> Map.Map v v +cleanupVarsMap ts = let + varsByName = foldl' step Map.empty (ts >>= ABT.allVars) + step m v = Map.insertWith (++) (Var.name $ Var.reset v) [v] m + changedVars = Map.fromList [ (v, Var.freshenId i v) + | (_, vs) <- Map.toList varsByName + , (v,i) <- nubOrd vs `zip` [0..]] + in changedVars + +cleanupVars1' :: Var v => Map.Map v v -> Type v a -> Type v a +cleanupVars1' = ABT.changeVars + +-- | This function removes all variable shadowing from the type and reduces +-- fresh ids to the minimum possible to avoid ambiguity. +cleanupVars1 :: Var v => Type v a -> Type v a +cleanupVars1 t | not Settings.cleanupTypes = t +cleanupVars1 t = let [t'] = cleanupVars [t] in t' + +-- This removes duplicates and normalizes the order of ability lists +cleanupAbilityLists :: Var v => Type v a -> Type v a +cleanupAbilityLists = ABT.visitPure go + where + -- leave explicitly empty `{}` alone + go (Effect1' (Effects' []) _v) = Nothing + go t@(Effect1' e v) = + let es = Set.toList . Set.fromList $ flattenEffects e + in case es of + [] -> Just (ABT.visitPure go v) + _ -> Just (effect (ABT.annotation t) es $ ABT.visitPure go v) + go _ = Nothing + +cleanups :: Var v => [Type v a] -> [Type v a] +cleanups ts = cleanupVars $ map cleanupAbilityLists ts + +cleanup :: Var v => Type v a -> Type v a +cleanup t | not Settings.cleanupTypes = t +cleanup t = cleanupVars1 . cleanupAbilityLists $ t + +toReference :: (ABT.Var v, Show v) => Type v a -> Reference +toReference (Ref' r) = r +-- a bit of normalization - any unused type parameters aren't part of the hash +toReference (ForallNamed' v body) | not (Set.member v (ABT.freeVars body)) = toReference body +toReference t = Reference.Derived (ABT.hash t) 0 1 + +toReferenceMentions :: (ABT.Var v, Show v) => Type v a -> Set Reference +toReferenceMentions ty = + let (vs, _) = unforall' ty + gen ty = generalize (Set.toList (freeVars ty)) $ generalize vs ty + in Set.fromList $ toReference . gen <$> ABT.subterms ty + +hashComponents + :: Var v => Map v (Type v a) -> Map v (Reference.Id, Type v a) +hashComponents = ReferenceUtil.hashComponents $ refId () + +instance Hashable1 F where + hash1 hashCycle hash e = + let + (tag, hashed) = (Hashable.Tag, Hashable.Hashed) + -- Note: start each layer with leading `0` byte, to avoid collisions with + -- terms, which start each layer with leading `1`. See `Hashable1 Term.F` + in Hashable.accumulate $ tag 0 : case e of + Ref r -> [tag 0, Hashable.accumulateToken r] + Arrow a b -> [tag 1, hashed (hash a), hashed (hash b) ] + App a b -> [tag 2, hashed (hash a), hashed (hash b) ] + Ann a k -> [tag 3, hashed (hash a), Hashable.accumulateToken k ] + -- Example: + -- a) {Remote, Abort} (() -> {Remote} ()) should hash the same as + -- b) {Abort, Remote} (() -> {Remote} ()) but should hash differently from + -- c) {Remote, Abort} (() -> {Abort} ()) + Effects es -> let + (hs, _) = hashCycle es + in tag 4 : map hashed hs + Effect e t -> [tag 5, hashed (hash e), hashed (hash t)] + Forall a -> [tag 6, hashed (hash a)] + IntroOuter a -> [tag 7, hashed (hash a)] + +instance Show a => Show (F a) where + showsPrec = go where + go _ (Ref r) = shows r + go p (Arrow i o) = + showParen (p > 0) $ showsPrec (p+1) i <> s" -> " <> showsPrec p o + go p (Ann t k) = + showParen (p > 1) $ shows t <> s":" <> shows k + go p (App f x) = + showParen (p > 9) $ showsPrec 9 f <> s" " <> showsPrec 10 x + go p (Effects es) = showParen (p > 0) $ + s"{" <> shows es <> s"}" + go p (Effect e t) = showParen (p > 0) $ + showParen True $ shows e <> s" " <> showsPrec p t + go p (Forall body) = case p of + 0 -> showsPrec p body + _ -> showParen True $ s"∀ " <> shows body + go p (IntroOuter body) = case p of + 0 -> showsPrec p body + _ -> showParen True $ s"outer " <> shows body + (<>) = (.) + s = showString + diff --git a/unison-core/src/Unison/Util/Components.hs b/unison-core/src/Unison/Util/Components.hs new file mode 100644 index 0000000000..13a049e799 --- /dev/null +++ b/unison-core/src/Unison/Util/Components.hs @@ -0,0 +1,48 @@ +module Unison.Util.Components where + +import Unison.Prelude + +import qualified Data.Graph as Graph +import qualified Data.Map as Map +import qualified Data.Set as Set + +-- | Order bindings by dependencies and group into components. +-- Each component consists of > 1 bindings, each of which depends +-- transitively on all other bindings in the component. +-- +-- 1-element components may or may not depend on themselves. +-- +-- The order is such that a component at index i will not depend +-- on components and indexes > i. But a component at index i does not +-- _necessarily_ depend on any components at earlier indices. +-- +-- Example: +-- +-- let rec +-- ping n = pong (n + 1); +-- pong n = ping (n + 1); +-- g = id 42; +-- y = id "hi" +-- id x = x; +-- in ping g +-- +-- `components` would produce `[[ping,pong], [id], [g], [y]]` +-- Notice that `id` comes before `g` and `y` in the output, since +-- both `g` and `y` depend on `id`. +-- +-- Uses Tarjan's algorithm: +-- https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm +components :: Ord v => (t -> Set v) -> [(v, t)] -> [[(v, t)]] +components freeVars bs = + let varIds = + Map.fromList (map fst bs `zip` reverse [(1 :: Int) .. length bs]) + -- something horribly wrong if this bombs + msg = error "Components.components bug" + varId v = fromMaybe msg $ Map.lookup v varIds + + -- use ints as keys for graph to preserve original source order as much as + -- possible + graph = [ ((v, b), varId v, deps b) | (v, b) <- bs ] + vars = Set.fromList (map fst bs) + deps b = varId <$> Set.toList (Set.intersection vars (freeVars b)) + in Graph.flattenSCC <$> Graph.stronglyConnComp graph diff --git a/unison-core/src/Unison/Util/List.hs b/unison-core/src/Unison/Util/List.hs new file mode 100644 index 0000000000..dff640a4a1 --- /dev/null +++ b/unison-core/src/Unison/Util/List.hs @@ -0,0 +1,65 @@ +module Unison.Util.List where + +import Unison.Prelude + +import qualified Data.List as List +import qualified Data.Set as Set +import qualified Data.Map as Map + +multimap :: Foldable f => Ord k => f (k, v) -> Map k [v] +multimap kvs = + -- preserve the order of the values from the original list + reverse <$> foldl' step Map.empty kvs + where + step m (k,v) = Map.insertWith (++) k [v] m + +groupBy :: (Foldable f, Ord k) => (v -> k) -> f v -> Map k [v] +groupBy f vs = reverse <$> foldl' step Map.empty vs + where step m v = Map.insertWith (++) (f v) [v] m + +-- returns the subset of `f a` which maps to unique `b`s. +-- prefers earlier copies, if many `a` map to some `b`. +uniqueBy, nubOrdOn :: (Foldable f, Ord b) => (a -> b) -> f a -> [a] +uniqueBy f as = wrangle' (toList as) Set.empty where + wrangle' [] _ = [] + wrangle' (a:as) seen = + if Set.member b seen + then wrangle' as seen + else a : wrangle' as (Set.insert b seen) + where b = f a +nubOrdOn = uniqueBy + +-- prefers later copies +uniqueBy' :: (Foldable f, Ord b) => (a -> b) -> f a -> [a] +uniqueBy' f = reverse . uniqueBy f . reverse . toList + +safeHead :: Foldable f => f a -> Maybe a +safeHead = headMay . toList + +validate :: (Semigroup e, Foldable f) => (a -> Either e b) -> f a -> Either e [b] +validate f as = case partitionEithers (f <$> toList as) of + ([], bs) -> Right bs + (e:es, _) -> Left (foldl' (<>) e es) + +-- Intercalate a list with separators determined by inspecting each +-- adjacent pair. +intercalateMapWith :: (a -> a -> b) -> (a -> b) -> [a] -> [b] +intercalateMapWith sep f xs = result where + xs' = map f xs + pairs = filter (\p -> length p == 2) $ map (take 2) $ List.tails xs + seps = (flip map) pairs $ \case + x1 : x2 : _ -> sep x1 x2 + _ -> error "bad list length" + paired = zipWith (\sep x -> [sep, x]) seps (drop 1 xs') + result = (take 1 xs') ++ mconcat paired + +-- Take runs of consecutive occurrences of r within a list, +-- and in each run, overwrite all but the first occurrence of r with w. +quenchRuns :: Eq a => a -> a -> [a] -> [a] +quenchRuns r w = reverse . (go False r w []) where + go inRun r w acc = \case + [] -> acc + h : tl -> + if h == r + then go True r w ((if inRun then w else r) : acc) tl + else go False r w (h : acc) tl diff --git a/unison-core/src/Unison/Util/Monoid.hs b/unison-core/src/Unison/Util/Monoid.hs new file mode 100644 index 0000000000..1c95bcf1fd --- /dev/null +++ b/unison-core/src/Unison/Util/Monoid.hs @@ -0,0 +1,27 @@ +module Unison.Util.Monoid where + +import Unison.Prelude hiding (whenM) + +import Data.List (intersperse) + +-- List.intercalate extended to any monoid +-- "The type that intercalate should have had to begin with." +intercalateMap :: (Foldable t, Monoid a) => a -> (b -> a) -> t b -> a +intercalateMap separator renderer elements = + mconcat $ intersperse separator (renderer <$> toList elements) + +fromMaybe :: Monoid a => Maybe a -> a +fromMaybe Nothing = mempty +fromMaybe (Just a) = a + +whenM, unlessM :: Monoid a => Bool -> a -> a +whenM True a = a +whenM False _ = mempty +unlessM = whenM . not + +isEmpty, nonEmpty :: (Eq a, Monoid a) => a -> Bool +isEmpty a = a == mempty +nonEmpty = not . isEmpty + +foldMapM :: (Monad m, Foldable f, Monoid b) => (a -> m b) -> f a -> m b +foldMapM f as = foldM (\b a -> fmap (b <>) (f a)) mempty as diff --git a/unison-core/src/Unison/Util/Relation.hs b/unison-core/src/Unison/Util/Relation.hs new file mode 100644 index 0000000000..48d2a3c91c --- /dev/null +++ b/unison-core/src/Unison/Util/Relation.hs @@ -0,0 +1,496 @@ +{-# LANGUAGE ViewPatterns #-} +module Unison.Util.Relation where + +import Unison.Prelude hiding (empty, toList) + +import Prelude hiding ( null, map, filter ) +import Data.Bifunctor ( first, second ) +import qualified Data.List as List +import qualified Data.Map as M +import qualified Data.Set as S +import qualified Data.Map as Map +import qualified Unison.Hashable as H + +-- | +-- This implementation avoids using @"Set (a,b)"@ because +-- it it is necessary to search for an item without knowing both @D@ and @R@. +-- +-- In "Set", you must know both values to search. +-- +-- Thus, we have are two maps to updated together. +-- +-- 1. Always be careful with the associated set of the key. +-- +-- 2. If you union two relations, apply union to the set of values. +-- +-- 3. If you subtract, take care when handling the set of values. +-- +-- As a multi-map, each key is associated with a Set of values v. +-- +-- We do not allow the associations with the 'empty' Set. +-- + +data Relation a b = Relation { domain :: M.Map a (Set b) + , range :: M.Map b (Set a) + } + deriving (Eq, Ord) + +instance (Show a, Show b) => Show (Relation a b) where + show = show . toList + +-- * Functions about relations + +difference :: (Ord a, Ord b) => Relation a b -> Relation a b -> Relation a b +difference a b = fromList . S.toList $ diffSet where + diffSet = S.difference seta setb + seta = S.fromList . toList $ a + setb = S.fromList . toList $ b + +-- The size is calculated using the domain. +-- | @size r@ returns the number of tuples in the relation. +size :: Relation a b -> Int +size r = M.foldr' ((+) . S.size) 0 (domain r) + +-- | Construct a relation with no elements. +empty :: Relation a b +empty = Relation M.empty M.empty + +-- | +-- The list must be formatted like: [(k1, v1), (k2, v2),..,(kn, vn)]. +fromList :: (Ord a, Ord b) => [(a, b)] -> Relation a b +fromList xs = Relation + { domain = M.fromListWith S.union $ snd2Set xs + , range = M.fromListWith S.union $ flipAndSet xs + } + where + snd2Set = List.map (\(x, y) -> (x, S.singleton y)) + flipAndSet = List.map (\(x, y) -> (y, S.singleton x)) + +-- | +-- Builds a List from a Relation. +toList :: Relation a b -> [(a, b)] +toList r = + concatMap (\(x, y) -> zip (repeat x) (S.toList y)) (M.toList . domain $ r) + +-- | Builds a Set from a Relation +toSet :: (Ord a, Ord b) => Relation a b -> S.Set (a, b) +toSet = S.fromList . toList + +-- | +-- Builds a 'Relation' consiting of an association between: @x@ and @y@. +singleton :: a -> b -> Relation a b +singleton x y = Relation + { domain = M.singleton x (S.singleton y) + , range = M.singleton y (S.singleton x) + } + +-- | The 'Relation' that results from the union of two relations: @r@ and @s@. +union :: (Ord a, Ord b) => Relation a b -> Relation a b -> Relation a b +union r s = Relation + { domain = M.unionWith S.union (domain r) (domain s) + , range = M.unionWith S.union (range r) (range s) + } + +intersection :: (Ord a, Ord b) => Relation a b -> Relation a b -> Relation a b +intersection r s + | size r > size s = intersection s r + | otherwise = filter (\(a, b) -> member a b s) r + +outerJoinDomMultimaps :: (Ord a, Ord b, Ord c) + => Relation a b + -> Relation a c + -> Map a (Set b, Set c) +outerJoinDomMultimaps b c = + Map.fromList + [ (a, (lookupDom a b, lookupDom a c)) | a <- S.toList (dom b <> dom c) ] + +outerJoinRanMultimaps :: (Ord a, Ord b, Ord c) + => Relation a c + -> Relation b c + -> Map c (Set a, Set b) +outerJoinRanMultimaps a b = outerJoinDomMultimaps (swap a) (swap b) + +innerJoinDomMultimaps :: (Ord a, Ord b, Ord c) + => Relation a b + -> Relation a c + -> Map a (Set b, Set c) +innerJoinDomMultimaps b c = + Map.fromList + [ (a, (lookupDom a b, lookupDom a c)) + | a <- S.toList $ dom b `S.intersection` dom c ] + +innerJoinRanMultimaps :: (Ord a, Ord b, Ord c) + => Relation a c + -> Relation b c + -> Map c (Set a, Set b) +innerJoinRanMultimaps a b = innerJoinDomMultimaps (swap a) (swap b) + +joinDom :: (Ord a, Ord b, Ord c) => Relation a b -> Relation a c -> Relation a (b,c) +joinDom b c = swap $ joinRan (swap b) (swap c) + +-- joinRan [(1, 'x'), (2, 'x'), (3, 'z')] [(true, 'x'), (true, 'y'), (false, 'z')] +-- == [((1,true), 'x'), ((2,true), 'x'), ((3,false), 'z')] +joinRan :: (Ord a, Ord b, Ord c) => Relation a c -> Relation b c -> Relation (a,b) c +joinRan a b = fromList + [ ((a,b), c) + | c <- S.toList $ ran a `S.intersection` ran b + , a <- S.toList $ lookupRan c a + , b <- S.toList $ lookupRan c b + ] + +--------------------------------------------------------------- +-- | +-- This fragment provided by: +-- +-- @ +-- \ Module : Data.Map +-- \ Copyright : (c) Daan Leijen 2002 +-- \ (c) Andriy Palamarchuk 2008 +-- \ License : BSD-style +-- \ Maintainer : libraries\@haskell.org +-- \ Stability : provisional +-- \ Portability : portable +-- @ +-- +-- +foldlStrict :: (a -> b -> a) -> a -> [b] -> a +foldlStrict f z xs = case xs of + [] -> z + (x : xx) -> let z' = f z x in seq z' (foldlStrict f z' xx) +--------------------------------------------------------------- + +-- | Union a list of relations using the 'empty' relation. +unions :: (Ord a, Ord b) => [Relation a b] -> Relation a b +unions = foldlStrict union empty + +-- | Insert a relation @ x @ and @ y @ in the relation @ r @ +insert :: (Ord a, Ord b) => a -> b -> Relation a b -> Relation a b +insert x y r = -- r { domain = domain', range = range' } + Relation domain' range' + where + domain' = M.insertWith S.union x (S.singleton y) (domain r) + range' = M.insertWith S.union y (S.singleton x) (range r) + + +-- $deletenotes +-- +-- The deletion is not difficult but is delicate: +-- +-- @ +-- r = { domain { (k1, {v1a, v3}) +-- , (k2, {v2a}) +-- , (k3, {v3b, v3}) +-- } +-- , range { (v1a, {k1} +-- , (v2a, {k2{ +-- , (v3 , {k1, k3} +-- , (v3b, {k3} +-- } +-- } +-- @ +-- +-- To delete (k,v) in the relation do: +-- 1. Working with the domain: +-- 1a. Delete v from the Set VS associated with k. +-- 1b. If VS is empty, delete k in the domain. +-- 2. Working in the range: +-- 2a. Delete k from the Set VS associated with v. +-- 2b. If VS is empty, delete v in the range. +-- +-- + +-- | Delete an association in the relation. +delete :: (Ord a, Ord b) => a -> b -> Relation a b -> Relation a b +delete x y r = r { domain = domain', range = range' } + where + domain' = M.update (erase y) x (domain r) + range' = M.update (erase x) y (range r) + erase e s = if S.singleton e == s then Nothing else Just $ S.delete e s + +-- | The Set of values associated with a value in the domain. +lookupDom' :: Ord a => a -> Relation a b -> Maybe (Set b) +lookupDom' x r = M.lookup x (domain r) + +-- | The Set of values associated with a value in the range. +lookupRan' :: Ord b => b -> Relation a b -> Maybe (Set a) +lookupRan' y r = M.lookup y (range r) + +-- | True if the element @ x @ exists in the domain of @ r @. +memberDom :: Ord a => a -> Relation a b -> Bool +memberDom x r = isJust $ lookupDom' x r + +-- | True if the element exists in the range. +memberRan :: Ord b => b -> Relation a b -> Bool +memberRan y r = isJust $ lookupRan' y r + +filterDom :: (Ord a, Ord b) => (a -> Bool) -> Relation a b -> Relation a b +filterDom f r = S.filter f (dom r) <| r + +filterRan :: (Ord a, Ord b) => (b -> Bool) -> Relation a b -> Relation a b +filterRan f r = r |> S.filter f (ran r) + +filter :: (Ord a, Ord b) => ((a, b) -> Bool) -> Relation a b -> Relation a b +filter f = fromList . List.filter f . toList + +-- | Restricts the relation to domain elements having multiple range elements +filterManyDom :: (Ord a, Ord b) => Relation a b -> Relation a b +filterManyDom r = filterDom (`manyDom` r) r + +-- | +-- True if the relation @r@ is the 'empty' relation. +null :: Relation a b -> Bool +null r = M.null $ domain r +-- Before 2010/11/09 null::Ord b => Relation a b -> Bool + + + +-- | True if the relation contains the association @x@ and @y@ +member :: (Ord a, Ord b) => a -> b -> Relation a b -> Bool +member x y r = case lookupDom' x r of + Just s -> S.member y s + Nothing -> False + + + +-- | True if the relation /does not/ contain the association @x@ and @y@ +notMember :: (Ord a, Ord b) => a -> b -> Relation a b -> Bool +notMember x y r = not $ member x y r + +-- | True if a value appears more than one time in the relation. +manyDom :: Ord a => a -> Relation a b -> Bool +manyDom a = (>1) . S.size . lookupDom a + +manyRan :: Ord b => b -> Relation a b -> Bool +manyRan b = (>1) . S.size . lookupRan b + +-- | Returns the domain in the relation, as a Set, in its entirety. +dom :: Relation a b -> Set a +dom r = M.keysSet (domain r) + + + +-- | Returns the range of the relation, as a Set, in its entirety. +ran :: Relation a b -> Set b +ran r = M.keysSet (range r) + + + +-- | +-- A compact set of sets the values of which can be @Just (Set x)@ or @Nothing@. +-- +-- The cases of 'Nothing' are purged. +-- +-- It is similar to 'concat'. +compactSet :: Ord a => Set (Maybe (Set a)) -> Set a +compactSet = S.fold (S.union . fromMaybe S.empty) S.empty + + + +-- $selectops +-- +-- Primitive implementation for the /right selection/ and /left selection/ operators. +-- +-- PICA provides both operators: +-- '|>' and '<|' +-- and '|$>' and '<$|' +-- +-- in this library, for working with Relations and OIS (Ordered, Inductive Sets?). +-- +-- PICA exposes the operators defined here, so as not to interfere with the abstraction +-- of the Relation type and because having access to Relation hidden components is a more +-- efficient implementation of the operation of restriction. +-- +-- @ +-- (a <$| b) r +-- +-- denotes: for every element @b@ from the Set @B@, +-- select an element @a@ from the Set @A@ , +-- if @a@ +-- is related to @b@ +-- in @r@ +-- @ +-- +-- @ +-- (a |$> b) r +-- +-- denotes: for every element @a@ from the Set @A@ , +-- select an element @b@ from the Set @B@, +-- if @a@ +-- is related to @b@ +-- in @r@ +-- @ +-- +-- With regard to domain restriction and range restriction operators +-- of the language, those are described differently and return the domain or the range. + +-- | +-- @(Case b <| r a)@ +-- +(<$|) :: (Ord a, Ord b) => Set a -> Set b -> Relation a b -> Set a +(as <$| bs) r = as `S.intersection` generarAS bs + where generarAS = compactSet . S.map (`lookupRan'` r) + + -- The subsets of the domain (a) associated with each @b@ + -- such that @b@ in @B@ and (b) are in the range of the relation. + -- The expression 'S.map' returns a set of @Either (Set a)@. + + +-- | +-- @( Case a |> r b )@ +(|$>) :: (Ord a, Ord b) => Set a -> Set b -> Relation a b -> Set b +(as |$> bs) r = bs `S.intersection` generarBS as + where generarBS = compactSet . S.map (`lookupDom'` r) + + + +-- | Domain restriction for a relation. Modeled on z. +(<|), restrictDom :: (Ord a, Ord b) => Set a -> Relation a b -> Relation a b +restrictDom = (<|) +s <| r = fromList + $ concatMap (\(x, y) -> zip (repeat x) (S.toList y)) (M.toList domain') + where + domain' = M.unions . List.map filtrar . S.toList $ s + filtrar x = M.filterWithKey (\k _ -> k == x) dr + dr = domain r -- just to memoize the value + +-- | Range restriction for a relation. Modeled on z. +(|>), restrictRan :: (Ord a, Ord b) => Relation a b -> Set b -> Relation a b +restrictRan = (|>) +r |> t = fromList + $ concatMap (\(x, y) -> zip (S.toList y) (repeat x)) (M.toList range') + where + range' = M.unions . List.map filtrar . S.toList $ t + filtrar x = M.filterWithKey (\k _ -> k == x) rr + rr = range r -- just to memoize the value + + +-- Restrict the range to not include these `b`s +(||>) :: (Ord a, Ord b) => Relation a b -> Set b -> Relation a b +r ||> t = fromList [ (a,b) | (a,b) <- toList r, not (b `S.member` t)] + +subtractRan :: (Ord a, Ord b) => Set b -> Relation a b -> Relation a b +subtractRan = flip (||>) + +-- Restrict the domain to not include these `a` +(<||), subtractDom :: (Ord a, Ord b) => Set a -> Relation a b -> Relation a b +s <|| r = fromList [ (a,b) | (a,b) <- toList r, not (a `S.member` s)] +subtractDom = (<||) + +-- Note: +-- +-- As you have seen this implementation is expensive in terms +-- of storage. Information is registered twice. +-- For the operators |> and <| we follow a pattern used in +-- the @fromList@ constructor and @toList@ flattener: +-- It is enough to know one half of the Relation (the domain or +-- the range) to create to other half. + +insertManyRan + :: (Foldable f, Ord a, Ord b) => a -> f b -> Relation a b -> Relation a b +insertManyRan a bs r = foldl' (flip $ insert a) r bs + +insertManyDom + :: (Foldable f, Ord a, Ord b) => f a -> b -> Relation a b -> Relation a b +insertManyDom as b r = foldl' (flip $ flip insert b) r as + +lookupRan :: Ord b => b -> Relation a b -> Set a +lookupRan b r = fromMaybe S.empty $ lookupRan' b r + +lookupDom :: Ord a => a -> Relation a b -> Set b +lookupDom a r = fromMaybe S.empty $ lookupDom' a r + +replaceDom :: (Ord a, Ord b) => a -> a -> Relation a b -> Relation a b +replaceDom a a' r = + foldl' (\r b -> insert a' b $ delete a b r) r (lookupDom a r) + +replaceRan :: (Ord a, Ord b) => b -> b -> Relation a b -> Relation a b +replaceRan b b' r = + foldl' (\r a -> insert a b' $ delete a b r) r (lookupRan b r) + +updateDom :: (Ord a, Ord b) => (a -> a) -> b -> Relation a b -> Relation a b +updateDom f b r = + foldl' (\r a -> insert (f a) b $ delete a b r) r (lookupRan b r) + +updateRan :: (Ord a, Ord b) => (b -> b) -> a -> Relation a b -> Relation a b +updateRan f a r = + foldl' (\r b -> insert a (f b) $ delete a b r) r (lookupDom a r) + +deleteRan :: (Ord a, Ord b) => b -> Relation a b -> Relation a b +deleteRan b r = foldl' (\r a -> delete a b r) r $ lookupRan b r + +deleteDom :: (Ord a, Ord b) => a -> Relation a b -> Relation a b +deleteDom a r = foldl' (flip $ delete a) r $ lookupDom a r + +deleteRanWhere :: (Ord a, Ord b) => (b -> Bool) -> a -> Relation a b -> Relation a b +deleteRanWhere f a r = + foldl' (\r b -> if f b then delete a b r else r) r (lookupDom a r) + +deleteDomWhere :: (Ord a, Ord b) => (a -> Bool) -> b -> Relation a b -> Relation a b +deleteDomWhere f b r = + foldl' (\r a -> if f a then delete a b r else r) r (lookupRan b r) + +map :: (Ord a, Ord b, Ord c, Ord d) + => ((a, b) -> (c, d)) -> Relation a b -> Relation c d +map f = fromList . fmap f . toList + +-- aka first +mapDom :: (Ord a, Ord a', Ord b) => (a -> a') -> Relation a b -> Relation a' b +mapDom f = fromList . fmap (first f) . toList + +-- aka second +mapRan :: (Ord a, Ord b, Ord b') => (b -> b') -> Relation a b -> Relation a b' +mapRan f = fromList . fmap (second f) . toList + +fromMap :: (Ord a, Ord b) => Map a b -> Relation a b +fromMap = fromList . Map.toList + +fromMultimap :: (Ord a, Ord b) => Map a (Set b) -> Relation a b +fromMultimap m = + foldl' (\r (a, bs) -> insertManyRan a bs r) empty $ Map.toList m + +toMultimap :: Relation a b -> Map a (Set b) +toMultimap = domain + +-- Returns Nothing if Relation isn't one-to-one. +toMap :: Ord a => Relation a b -> Maybe (Map a b) +toMap r = + let mm = toMultimap r in + if all (\s -> S.size s == 1) mm + then Just (S.findMin <$> mm) + else Nothing + +fromSet :: (Ord a, Ord b) => Set (a,b) -> Relation a b +fromSet = fromList . S.toList + +fromManyRan + :: (Foldable f, Ord a, Ord b) => a -> f b -> Relation a b +fromManyRan a bs = insertManyRan a bs mempty + +fromManyDom + :: (Foldable f, Ord a, Ord b) => f a -> b -> Relation a b +fromManyDom as b = insertManyDom as b mempty + +swap :: Relation a b -> Relation b a +swap (Relation a b) = Relation b a + +bimap :: (Ord a, Ord b, Ord c, Ord d) + => (a -> c) -> (b -> d) -> Relation a b -> Relation c d +bimap f g = fromList . fmap (\(a,b) -> (f a, g b)) . toList + +instance (Ord a, Ord b) => Monoid (Relation a b) where + mempty = empty + mappend = (<>) + +instance (Ord a, Ord b) => Semigroup (Relation a b) where + (<>) = union + +instance (H.Hashable a, H.Hashable b) => H.Hashable (Relation a b) where + tokens = H.tokens . toList + +toUnzippedMultimap :: + Ord a => Ord b => Ord c => Relation a (b,c) -> Map a (Set b, Set c) +toUnzippedMultimap r = (\s -> (S.map fst s, S.map snd s)) <$> toMultimap r + +collectRan :: Ord a => Ord c => + (b -> Maybe c) -> Relation a b -> Relation a c +collectRan f r = fromList [ (a, c) | (a, f -> Just c) <- toList r ] diff --git a/unison-core/src/Unison/Util/Relation3.hs b/unison-core/src/Unison/Util/Relation3.hs new file mode 100644 index 0000000000..69b6f61d84 --- /dev/null +++ b/unison-core/src/Unison/Util/Relation3.hs @@ -0,0 +1,120 @@ +{-# LANGUAGE RecordWildCards #-} + +module Unison.Util.Relation3 where + +import Unison.Prelude hiding (empty, toList) + +import Unison.Util.Relation (Relation) +import qualified Data.Map as Map +import qualified Unison.Hashable as H +import qualified Unison.Util.Relation as R +import Data.Semigroup (Sum(Sum, getSum)) +import Data.Tuple.Extra (uncurry3) + +data Relation3 a b c + = Relation3 + { d1 :: Map a (Relation b c) + , d2 :: Map b (Relation a c) + , d3 :: Map c (Relation a b) + } deriving (Eq,Ord) + +instance (Show a, Show b, Show c) => Show (Relation3 a b c) where + show = show . toList + +d1s :: Relation3 a b c -> Set a +d1s = Map.keysSet . d1 + +d2s :: Relation3 a b c -> Set b +d2s = Map.keysSet . d2 + +d3s :: Relation3 a b c -> Set c +d3s = Map.keysSet . d3 + +filter :: (Ord a, Ord b, Ord c) + => ((a,b,c) -> Bool) -> Relation3 a b c -> Relation3 a b c +filter f = fromList . Prelude.filter f . toList + +member :: (Ord a, Ord b, Ord c) => a -> b -> c -> Relation3 a b c -> Bool +member a b c = R.member b c . lookupD1 a + +lookupD1 :: (Ord a, Ord b, Ord c) => a -> Relation3 a b c -> Relation b c +lookupD1 a = fromMaybe mempty . Map.lookup a . d1 + +lookupD2 :: (Ord a, Ord b, Ord c) => b -> Relation3 a b c -> Relation a c +lookupD2 b = fromMaybe mempty . Map.lookup b . d2 + +lookupD3 :: (Ord a, Ord b, Ord c) => c -> Relation3 a b c -> Relation a b +lookupD3 c = fromMaybe mempty . Map.lookup c . d3 + +size :: (Ord a, Ord b, Ord c) => Relation3 a b c -> Int +size = getSum . foldMap (Sum . R.size) . d1 + +toList :: Relation3 a b c -> [(a,b,c)] +toList = fmap (\(a,(b,c)) -> (a,b,c)) . toNestedList + +toNestedList :: Relation3 a b c -> [(a,(b,c))] +toNestedList r3 = + [ (a,bc) | (a,r2) <- Map.toList $ d1 r3 + , bc <- R.toList r2 ] + +nestD12 :: (Ord a, Ord b, Ord c) => Relation3 a b c -> Relation (a,b) c +nestD12 r = R.fromList [ ((a,b),c) | (a,b,c) <- toList r ] + +fromNestedDom :: (Ord a, Ord b, Ord c) => Relation (a,b) c -> Relation3 a b c +fromNestedDom = fromList . fmap (\((a,b),c) -> (a,b,c)) . R.toList +fromNestedRan :: (Ord a, Ord b, Ord c) => Relation a (b,c) -> Relation3 a b c +fromNestedRan = fromList . fmap (\(a,(b,c)) -> (a,b,c)) . R.toList + +fromList :: (Ord a, Ord b, Ord c) => [(a,b,c)] -> Relation3 a b c +fromList xs = insertAll xs empty + +empty :: (Ord a, Ord b, Ord c) => Relation3 a b c +empty = mempty + +insert, delete + :: (Ord a, Ord b, Ord c) + => a -> b -> c -> Relation3 a b c -> Relation3 a b c +insert a b c Relation3{..} = + Relation3 + (Map.alter (ins b c) a d1) + (Map.alter (ins a c) b d2) + (Map.alter (ins a b) c d3) + where + ins x y = Just . R.insert x y . fromMaybe mempty + +insertAll, deleteAll :: Foldable f => Ord a => Ord b => Ord c + => f (a,b,c) -> Relation3 a b c -> Relation3 a b c +insertAll f r = foldl' (\r x -> uncurry3 insert x r) r f +deleteAll f r = foldl' (\r x -> uncurry3 delete x r) r f + + +difference :: (Ord a, Ord b, Ord c) + => Relation3 a b c + -> Relation3 a b c + -> Relation3 a b c +difference a b = deleteAll (Unison.Util.Relation3.toList b) a + +delete a b c Relation3{..} = + Relation3 + (Map.alter (del b c) a d1) + (Map.alter (del a c) b d2) + (Map.alter (del a b) c d3) + where + del _ _ Nothing = Nothing + del x y (Just r) = + let r' = R.delete x y r + in if r' == mempty then Nothing else Just r' + +instance (Ord a, Ord b, Ord c) => Semigroup (Relation3 a b c) where + (<>) = mappend + +instance (Ord a, Ord b, Ord c) => Monoid (Relation3 a b c) where + mempty = Relation3 mempty mempty mempty + s1 `mappend` s2 = Relation3 d1' d2' d3' where + d1' = Map.unionWith (<>) (d1 s1) (d1 s2) + d2' = Map.unionWith (<>) (d2 s1) (d2 s2) + d3' = Map.unionWith (<>) (d3 s1) (d3 s2) + +instance (H.Hashable d1, H.Hashable d2, H.Hashable d3) + => H.Hashable (Relation3 d1 d2 d3) where + tokens s = [ H.accumulateToken $ toNestedList s ] diff --git a/unison-core/src/Unison/Util/Relation4.hs b/unison-core/src/Unison/Util/Relation4.hs new file mode 100644 index 0000000000..f094777798 --- /dev/null +++ b/unison-core/src/Unison/Util/Relation4.hs @@ -0,0 +1,122 @@ +{-# LANGUAGE RecordWildCards #-} + +module Unison.Util.Relation4 where + +import Unison.Prelude hiding (toList, empty) +import Prelude +import qualified Data.Map as Map +--import qualified Data.Set as Set +import qualified Unison.Hashable as H +import qualified Unison.Util.Relation as R +import qualified Unison.Util.Relation3 as R3 +import Unison.Util.Relation (Relation) +import Unison.Util.Relation3 (Relation3) +import Data.List.Extra (nubOrd) +import Util (uncurry4) +import Data.Semigroup (Sum(Sum, getSum)) + +data Relation4 a b c d + = Relation4 + { d1 :: Map a (Relation3 b c d) + , d2 :: Map b (Relation3 a c d) + , d3 :: Map c (Relation3 a b d) + , d4 :: Map d (Relation3 a b c) + } deriving (Eq,Ord) + +instance (Show a, Show b, Show c, Show d) => Show (Relation4 a b c d) where + show = show . toList + +size :: (Ord a, Ord b, Ord c, Ord d) => Relation4 a b c d -> Int +size = getSum . foldMap (Sum . R3.size) . d1 + +toNestedList :: Relation4 a b c d -> [(a,(b,(c,d)))] +toNestedList r4 = + [ (a,bcd) + | (a,r3) <- Map.toList $ d1 r4 + , bcd <- R3.toNestedList r3 ] + +toList :: Relation4 a b c d -> [(a,b,c,d)] +toList = fmap (\(a,(b,(c,d))) -> (a,b,c,d)) . toNestedList + +empty :: (Ord a, Ord b, Ord c, Ord d) => Relation4 a b c d +empty = mempty + +fromList :: (Ord a, Ord b, Ord c, Ord d) => [(a,b,c,d)] -> Relation4 a b c d +fromList xs = insertAll xs empty + +filter :: (Ord a, Ord b, Ord c, Ord d) => ((a,b,c,d) -> Bool) -> Relation4 a b c d -> Relation4 a b c d +filter f = fromList . Prelude.filter f . toList + +selectD3 :: (Ord a, Ord b, Ord c, Ord d) + => c -> Relation4 a b c d -> Relation4 a b c d +selectD3 c r = + fromList [ (a,b,c,d) | (a,b,d) <- maybe [] R3.toList $ Map.lookup c (d3 r) ] + +selectD34 :: (Ord a, Ord b, Ord c, Ord d) + => c -> d -> Relation4 a b c d -> Relation4 a b c d +selectD34 c d r = + fromList [ (a,b,c,d) + | (a,b) <- maybe [] (maybe [] R.toList . Map.lookup d . R3.d3) + (Map.lookup c (d3 r)) + ] + +d1set :: Ord a => Relation4 a b c d -> Set a +d1set = Map.keysSet . d1 + +d12 :: (Ord a, Ord b) => Relation4 a b c d -> Relation a b +d12 = R.fromMultimap . fmap (Map.keysSet . R3.d1) . d1 + +d34 :: (Ord c, Ord d) => Relation4 a b c d -> Relation c d +d34 = R.fromMultimap . fmap (Map.keysSet . R3.d3) . d3 + +-- todo: make me faster +d12s :: (Ord a, Ord b) => Relation4 a b c d -> [(a,b)] +d12s = nubOrd . fmap (\(a, (b, _)) -> (a,b)) . toNestedList +--e.g. Map.toList (d1 r) >>= \(a, r3) -> (a,) <$> Map.keys (R3.d1 r3) + +insert, delete + :: (Ord a, Ord b, Ord c, Ord d) + => a -> b -> c -> d -> Relation4 a b c d -> Relation4 a b c d +insert a b c d Relation4{..} = + Relation4 + (Map.alter (ins b c d) a d1) + (Map.alter (ins a c d) b d2) + (Map.alter (ins a b d) c d3) + (Map.alter (ins a b c) d d4) + where + ins x y z = Just . R3.insert x y z . fromMaybe mempty + +delete a b c d Relation4{..} = + Relation4 + (Map.alter (del b c d) a d1) + (Map.alter (del a c d) b d2) + (Map.alter (del a b d) c d3) + (Map.alter (del a b c) d d4) + where + del _ _ _ Nothing = Nothing + del x y z (Just r) = + let r' = R3.delete x y z r + in if r' == mempty then Nothing else Just r' + +mapD2 :: (Ord a, Ord b, Ord b', Ord c, Ord d) + => (b -> b') -> Relation4 a b c d -> Relation4 a b' c d +mapD2 f = fromList . fmap (\(a,b,c,d) -> (a, f b, c, d)) . toList + +insertAll :: Foldable f => Ord a => Ord b => Ord c => Ord d + => f (a,b,c,d) -> Relation4 a b c d -> Relation4 a b c d +insertAll f r = foldl' (\r x -> uncurry4 insert x r) r f + +instance (Ord a, Ord b, Ord c, Ord d) => Semigroup (Relation4 a b c d) where + (<>) = mappend + +instance (Ord a, Ord b, Ord c, Ord d) => Monoid (Relation4 a b c d) where + mempty = Relation4 mempty mempty mempty mempty + s1 `mappend` s2 = Relation4 d1' d2' d3' d4' where + d1' = Map.unionWith (<>) (d1 s1) (d1 s2) + d2' = Map.unionWith (<>) (d2 s1) (d2 s2) + d3' = Map.unionWith (<>) (d3 s1) (d3 s2) + d4' = Map.unionWith (<>) (d4 s1) (d4 s2) + +instance (H.Hashable d1, H.Hashable d2, H.Hashable d3, H.Hashable d4) + => H.Hashable (Relation4 d1 d2 d3 d4) where + tokens s = [ H.accumulateToken $ toNestedList s ] diff --git a/unison-core/src/Unison/Util/Set.hs b/unison-core/src/Unison/Util/Set.hs new file mode 100644 index 0000000000..ea224e8259 --- /dev/null +++ b/unison-core/src/Unison/Util/Set.hs @@ -0,0 +1,10 @@ +{-# LANGUAGE ViewPatterns #-} +module Unison.Util.Set where + +import Data.Set + +symmetricDifference :: Ord a => Set a -> Set a -> Set a +symmetricDifference a b = (a `difference` b) `union` (b `difference` a) + +mapMaybe :: (Ord a, Ord b) => (a -> Maybe b) -> Set a -> Set b +mapMaybe f s = fromList [ r | (f -> Just r) <- toList s ] diff --git a/unison-core/src/Unison/Var.hs b/unison-core/src/Unison/Var.hs new file mode 100644 index 0000000000..22da72d1fe --- /dev/null +++ b/unison-core/src/Unison/Var.hs @@ -0,0 +1,168 @@ +{-# Language OverloadedStrings #-} +{-# Language ViewPatterns #-} +{-# Language PatternSynonyms #-} + +module Unison.Var where + +import Unison.Prelude + +import Data.Char (toLower, isLower) +import Data.Text (pack) +import qualified Data.Text as Text +import qualified Unison.ABT as ABT +import qualified Unison.NameSegment as Name + +import Unison.Util.Monoid (intercalateMap) +import Unison.Reference (Reference) +import qualified Unison.Reference as R + +-- | A class for variables. Variables may have auxiliary information which +-- may not form part of their identity according to `Eq` / `Ord`. Laws: +-- +-- * `typeOf (typed n) == n` +-- * `typeOf (ABT.freshIn vs v) == typeOf v`: +-- `ABT.freshIn` does not alter the name +class (Show v, ABT.Var v) => Var v where + typed :: Type -> v + typeOf :: v -> Type + freshId :: v -> Word64 + freshenId :: Word64 -> v -> v + +freshIn :: ABT.Var v => Set v -> v -> v +freshIn = ABT.freshIn + +named :: Var v => Text -> v +named n = typed (User n) + +-- | Variable whose name is derived from the given reference. +refNamed :: Var v => Reference -> v +refNamed ref = named ("ℍ" <> R.toText ref) + +rawName :: Type -> Text +rawName typ = case typ of + User n -> n + Inference Ability -> "𝕖" + Inference Input -> "𝕒" + Inference Output -> "𝕣" + Inference Other -> "𝕩" + Inference PatternPureE -> "𝕞" + Inference PatternPureV -> "𝕧" + Inference PatternBindE -> "𝕞" + Inference PatternBindV -> "𝕧" + Inference TypeConstructor -> "𝕗" + Inference TypeConstructorArg -> "𝕦" + MissingResult -> "_" + Blank -> "_" + Eta -> "_eta" + ANFBlank -> "_anf" + Float -> "_float" + Pattern -> "_pattern" + Irrelevant -> "_irrelevant" + UnnamedWatch k guid -> fromString k <> "." <> guid + +name :: Var v => v -> Text +name v = rawName (typeOf v) <> showid v + where + showid (freshId -> 0) = "" + showid (freshId -> n) = pack (show n) + +uncapitalize :: Var v => v -> v +uncapitalize v = nameds $ go (nameStr v) where + go (c:rest) = toLower c : rest + go n = n + +missingResult, blank, inferInput, inferOutput, inferAbility, + inferPatternPureE, inferPatternPureV, inferPatternBindE, inferPatternBindV, + inferTypeConstructor, inferTypeConstructorArg, + inferOther :: Var v => v +missingResult = typed MissingResult +blank = typed Blank +inferInput = typed (Inference Input) +inferOutput = typed (Inference Output) +inferAbility = typed (Inference Ability) +inferPatternPureE = typed (Inference PatternPureE) +inferPatternPureV = typed (Inference PatternPureV) +inferPatternBindE = typed (Inference PatternBindE) +inferPatternBindV = typed (Inference PatternBindV) +inferTypeConstructor = typed (Inference TypeConstructor) +inferTypeConstructorArg = typed (Inference TypeConstructorArg) +inferOther = typed (Inference Other) + +unnamedTest :: Var v => Text -> v +unnamedTest guid = typed (UnnamedWatch TestWatch guid) + +data Type + -- User provided variables, these should generally be left alone + = User Text + -- Variables created during type inference + | Inference InferenceType + -- Variables created to finish a block that doesn't end with an expression + | MissingResult + -- Variables invented for placeholder values inserted by user or by TDNR + | Blank + -- An unnamed watch expression of the given kind, for instance: + -- + -- test> Ok "oog" + -- has kind "test" + -- > 1 + 1 + -- has kind "" + | UnnamedWatch WatchKind Text -- guid + -- An unnamed variable for constructor eta expansion + | Eta + -- An unnamed variable introduced by ANF transformation + | ANFBlank + -- An unnamed variable for a floated lambda + | Float + -- An unnamed variable introduced from pattern compilation + | Pattern + -- A variable for situations where we need to make up one that + -- definitely won't be used. + | Irrelevant + deriving (Eq,Ord,Show) + +type WatchKind = String + +pattern RegularWatch = "" +pattern TestWatch = "test" + +data InferenceType = + Ability | Input | Output | + PatternPureE | PatternPureV | + PatternBindE | PatternBindV | + TypeConstructor | TypeConstructorArg | + Other + deriving (Eq,Ord,Show) + +reset :: Var v => v -> v +reset v = typed (typeOf v) + +unqualifiedName :: Var v => v -> Text +unqualifiedName = last . Name.segments' . name + +unqualified :: Var v => v -> v +unqualified v = case typeOf v of + User _ -> named . unqualifiedName $ v + _ -> v + +namespaced :: Var v => [v] -> v +namespaced vs = named $ intercalateMap "." name vs + +nameStr :: Var v => v -> String +nameStr = Text.unpack . name + +nameds :: Var v => String -> v +nameds s = named (Text.pack s) + +joinDot :: Var v => v -> v -> v +joinDot prefix v2 = + if name prefix == "." then named (name prefix `mappend` name v2) + else named (name prefix `mappend` "." `mappend` name v2) + +freshNamed :: Var v => Set v -> Text -> v +freshNamed used n = ABT.freshIn used (named n) + +universallyQuantifyIfFree :: forall v . Var v => v -> Bool +universallyQuantifyIfFree v = + ok (name $ reset v) && unqualified v == v + where + ok n = (all isLower . take 1 . Text.unpack) n diff --git a/unison-core/unison-core.cabal b/unison-core/unison-core.cabal new file mode 100644 index 0000000000..84daab4e41 --- /dev/null +++ b/unison-core/unison-core.cabal @@ -0,0 +1,117 @@ +cabal-version: 2.2 +name: unison-core +category: Compiler +version: 0.1 +license: MIT +license-file: LICENSE +author: Unison Computing, public benefit corp +maintainer: Paul Chiusano , Runar Bjarnason , Arya Irani +stability: provisional +homepage: http://unisonweb.org +bug-reports: https://github.com/unisonweb/unison/issues +copyright: Copyright (C) 2013-2018 Unison Computing, PBC and contributors +synopsis: Parser and typechecker for the Unison language +description: + +build-type: Simple +extra-source-files: +data-files: + +source-repository head + type: git + location: git://github.com/unisonweb/unison.git + +-- `cabal install -foptimized` enables optimizations +flag optimized + manual: True + default: False + +flag quiet + manual: True + default: False + +-- NOTE: Keep in sync throughout repo. +common unison-common + default-language: Haskell2010 + default-extensions: + ApplicativeDo, + BlockArguments, + DeriveFunctor, + DerivingStrategies, + DoAndIfThenElse, + FlexibleContexts, + FlexibleInstances, + LambdaCase, + MultiParamTypeClasses, + ScopedTypeVariables, + TupleSections, + TypeApplications + +library + import: unison-common + + hs-source-dirs: src + + exposed-modules: + Unison.ABT + Unison.ABT.Normalized + Unison.Blank + Unison.ConstructorType + Unison.DataDeclaration + Unison.Hash + Unison.HashQualified + Unison.HashQualified' + Unison.Hashable + Unison.Kind + Unison.LabeledDependency + Unison.Name + Unison.Names2 + Unison.Names3 + Unison.NameSegment + Unison.Paths + Unison.Pattern + Unison.PatternCompat + Unison.Prelude + Unison.Reference + Unison.Reference.Util + Unison.Referent + Unison.Settings + Unison.ShortHash + Unison.Symbol + Unison.Term + Unison.Type + Unison.Util.Components + Unison.Util.List + Unison.Util.Monoid + Unison.Util.Relation + Unison.Util.Relation3 + Unison.Util.Relation4 + Unison.Util.Set + Unison.Var + + build-depends: + base, + bytestring, + containers, + cryptonite, + either, + extra, + lens, + prelude-extras, + memory, + mtl, + rfc5051, + safe, + sandi, + text, + transformers, + util, + vector + + ghc-options: -Wall -O0 -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures + + if flag(optimized) + ghc-options: -funbox-strict-fields + + if flag(quiet) + ghc-options: -v0 diff --git a/yaks/easytest/LICENSE b/yaks/easytest/LICENSE new file mode 100644 index 0000000000..5575aa473b --- /dev/null +++ b/yaks/easytest/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013, Paul Chiusano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/yaks/easytest/README.markdown b/yaks/easytest/README.markdown new file mode 100644 index 0000000000..561cb8016e --- /dev/null +++ b/yaks/easytest/README.markdown @@ -0,0 +1,264 @@ +EasyTest is a simple testing toolkit, meant to replace most uses of QuickCheck, SmallCheck, HUnit, and frameworks like Tasty, etc. Here's an example usage: + +```Haskell +module Main where + +import EasyTest +import Control.Applicative +import Control.Monad + +suite :: Test () +suite = tests + [ scope "addition.ex1" $ expect (1 + 1 == 2) + , scope "addition.ex2" $ expect (2 + 3 == 5) + , scope "list.reversal" . fork $ do + -- generate lists from size 0 to 10, of Ints in (0,43) + -- shorthand: listsOf [0..10] (int' 0 43) + ns <- [0..10] `forM` \n -> replicateM n (int' 0 43) + ns `forM_` \ns -> expect (reverse (reverse ns) == ns) + -- equivalent to `scope "addition.ex3"` + , scope "addition" . scope "ex3" $ expect (3 + 3 == 6) + , scope "always passes" $ do + note "I'm running this test, even though it always passes!" + ok -- like `pure ()`, but records a success result + , scope "failing test" $ crash "oh noes!!" ] + +-- NB: `run suite` would run all tests, but we only run +-- tests whose scopes are prefixed by "addition" +main = runOnly "addition" suite +``` + +This generates the output: + +``` +Randomness seed for this run is 5104092164859451056 +Raw test output to follow ... +------------------------------------------------------------ +OK addition.ex1 +OK addition.ex2 +OK addition.ex3 +------------------------------------------------------------ +✅ 3 tests passed, no failures! 👍 🎉 +``` + +The idea here is to write tests with ordinary Haskell code, with control flow explicit and under programmer control. Tests are values of type `Test a`, and `Test` forms a monad with access to: + +* repeatable randomness (the `random` and `random'` functions for random and bounded random values, or handy specialized `int`, `int'`, `double`, `double'`, etc) +* I/O (via `liftIO` or `EasyTest.io`, which is an alias for `liftIO`) +* failure (via `crash`, which yields a stack trace, or `fail`, which does not) +* logging (via `note`, `noteScoped`, or `note'`) +* hierarchically-named subcomputations which can be switched on and off (in the above code, notice that only the tests scoped under `"addition"` are run, and we could do `run` instead of `runOnly` if we wanted to run the whole suite) +* parallelism (note the `fork` which runs that subtree of the test suite in a parallel thread). +* conjunction of tests via `MonadPlus` (the `<|>` operation runs both tests, even if the first test fails, and the `tests` function used above is just `msum`). + +Using any or all of these capabilities, you assemble `Test` values into a "test suite" (just another `Test` value) using ordinary Haskell code, not framework magic. Notice that to generate a list of random values, we just `replicateM` and `forM` as usual. If this gets tedious... we can factor this logic out into helper functions! For instance: + +```Haskell +listOf :: Int -> Test a -> Test [a] +listOf = replicateM + +listsOf :: [Int] -> Test a -> Test [[a]] +listsOf sizes gen = sizes `forM` \n -> listOf n gen + +ex :: Test () +ex = do + ns <- listsOf [0..100] int + ns `forM_` \ns -> expect (reverse (reverse ns) == ns) +``` + +This library is opinionated and might not be for everyone. If you're curious about any of the design decisions made, see [my rationale](#rationale) for writing it. + +### User guide + +The simplest tests are `ok`, `crash`, and `expect`: + +```Haskell +-- Record a success +ok :: Test () + +-- Record a failure +crash :: String -> Test a + +-- Record a success if `True`, otherwise record a failure +expect :: Bool -> Test () +``` + +NB: `fail` is equivalent to `crash`, but doesn't provide a stack trace on failure. + +We can lift I/O into `Test` using `io` (or `liftIO`, but I always forget where to import that from): + +``` +io :: IO a -> Test a +``` + +`Test` is also a `Monad`. Note that `return` and `pure` do not record a result. Use `ok`, `expect`, or `crash` for that purpose. + +We often want to label tests so we can see when they succeed or fail. For that we use `scope`: + +``` +-- | Label a test. Can be nested. A `'.'` is placed between nested +-- scopes, so `scope "foo" . scope "bar"` is equivalent to `scope "foo.bar"` +scope :: String -> Test a -> Test a +``` + +Here's an example usage, putting all these primitives together: + +```Haskell +module Main where + +import EasyTest (ok, scope, crash, expect, run) + +suite :: Test () +suite = do + ok + scope "test-crash" $ crash "oh noes!" + expect (1 + 1 == 2) + +main = run suite +``` + +This example is _sequencing_ the `ok`, `crash`, and `expect` monadically, so the test halts at the first failure. The output is: + +``` +Randomness seed for this run is 1830293182471192517 +Raw test output to follow ... +------------------------------------------------------------ +test-crash FAILURE oh noes! CallStack (from HasCallStack): + crash, called at /Users/pchiusano/code/easytest/tests/Suite.hs:10:24 in main:Main +OK +FAILED test-crash +------------------------------------------------------------ + + + 1 passed + 1 FAILED (failed scopes below) + "test-crash" + + To rerun with same random seed: + + EasyTest.rerun 1830293182471192517 + EasyTest.rerunOnly 1830293182471192517 "test-crash" + + +------------------------------------------------------------ +❌ +``` + +In the output (which is streamed to the console), we get a stack trace pointing to the line where `crash` was called (`..tests/Suite.hs:10:24`), information about failing tests, and instructions for rerunning the tests with an identical random seed (in this case, there's no randomness, so `rerun` would work fine, but if our test generated random data, we might want to rerun with the exact same random numbers). + +The last line of the output always indicates success or failure of the overall suite... and information about any failing tests is _immediately_ above that. You should NEVER have to scroll through a bunch of test output just to find out which tests actually failed! Also, the streaming output always has `OK` or `FAILED` as the _leftmost_ text for ease of scanning. + +If you try running a test suite that has no results recorded (like if you have a typo in a call to `runOnly`, or you forget to use `ok` or `expect` to record a test result), you'll see a warning like this: + +``` +😶 hmm ... no test results recorded +Tip: use `ok`, `expect`, or `crash` to record results +Tip: if running via `runOnly` or `rerunOnly`, check for typos +``` + +The various `run` functions (`run`, `runOnly`, `rerun`, and `rerunOnly`) all exit the process with a nonzero status in the event of a failure, so they can be used for continuous integration or test running tools that key off the process exit code to determine whether the suite succeeded or failed. For instance, here's the relevant portion of a typical cabal file: + +``` +-- Preferred way to run EasyTest-based test suite +executable runtests + main-is: NameOfYourTestSuite.hs + ghc-options: -w -threaded -rtsopts -with-rtsopts=-N -v0 + hs-source-dirs: tests + other-modules: + build-depends: + base, + easytest + +-- I really have no idea why you'd ever use this, unless you +-- really feel the need to run your tests via cabal's "test runner" +-- which "conveniently" hides all output unless you pass it some +-- random flag I never remember +test-suite tests + type: exitcode-stdio-1.0 + main-is: NameOfYourTestSuite.hs + ghc-options: -w -threaded -rtsopts -with-rtsopts=-N -v0 + hs-source-dirs: tests + other-modules: + build-depends: + base, + easytest +``` + +For tests that are logically separate, we usually combine them into a suite using `tests` (which is just `msum`), as in: + +```Haskell +suite = tests + [ scope "ex1" $ expect (1 + 1 == 2) + , scope "ex2" $ expect (2 + 2 == 4) ] + +-- equivalently +suite = + (scope "ex1" $ expect (1 + 1 == 2)) <|> + (scope "ex2" $ expect (2 + 2 == 4)) +``` + +Importantly, each branch of a `<|>` or `tests` gets its own copy of the randomness source, so even when branches of the test suite are switched on or off, the randomness received by a branch is the same. This is important for being able to quickly iterate on a test failure! + +Sometimes, tests take a while to run and we want to make use of parallelism. For that, use `EasyTest.fork` or `fork'`: + +```Haskell +-- | Run a test in a separate thread, not blocking for its result. +fork :: Test a -> Test () + +-- | Run a test in a separate thread, not blocking for its result, but +-- return a future which can be used to block on the result. +fork' :: Test a -> Test (Test a) +``` + +Note: There's no "framework global" parallelism configuration setting. + +We often want to generate random data for testing purposes: + +```Haskell +reverseTest :: Test () +reverseTest = scope "list reversal" $ do + nums <- listsOf [0..100] (int' 0 99) + nums `forM_` \nums -> expect (reverse (reverse nums) == nums) +``` + +Tip: generate your test cases in order of increasing size. If you get a failure, your test case is closer to "minimal". + +The above code generates lists of sizes `0` through `100`, consisting of `Int` values in the range `0` through `99`. `int' :: Int -> Int -> Test Int`, and there are analogous functions for `Double`, `Word`, etc. The most general functions are: + +```Haskell +random :: Random a => Test a +random' :: Random a => a -> a -> Test a +``` + +The functions `int`, `char`, `bool`, `double`, etc are just specialized aliases for `random`, and `int'`, `char'`, etc are just aliases for `random'`. The aliases are sometimes useful in situations where use of the generic `random` or `random'` would require type annotations. + +If our list reversal test failed, we might use `runOnly "list reversal"` or `rerunOnly "list reversal"` to rerun just that subtree of the test suite, and we might add some additional diagnostics to see what was going on: + +```Haskell +reverseTest :: Test () +reverseTest = scope "list reversal" $ do + nums <- listsOf [0..100] (int' 0 99) + nums `forM_` \nums -> do + note $ "nums: " ++ show nums + let r = reverse (reverse nums) + note $ "reverse (reverse nums): " ++ show r + expect (r == nums) +``` + +The idea is that these sorts of detailed diagnostics are added lazily (and temporarily) to find and fix failing tests. You can also add diagnostics via `io (putStrLn "blah")`, but if you have tests running in parallel this can sometimes get confusing. + +That's it! Just use ordinary monadic code to generate any testing data and to run your tests. + +###
Why? + +Here's some of my thinking in the design of this library: + +* Testing should uncomplicated, minimal friction, and ideally: FUN. If I have to think too much or remember arbitrary framework magic, I get irritated. +* A lot of testing frameworks are weirdly optimized for adding lots of diagnostic information up front, as if whatever diagnostic information you happen to think to capture will be exactly what is needed to fix whatever bugs your tests reveal. In my experience this is almost never the case, so EasyTest takes the opposite approach: be EXTREMELY LAZY about adding diagnostics and labeling subexpressions, but make it trivial to reproduce failing tests without running your entire suite. If a test fails, you can easily rerun just that test, with the exact same random seed, and add whatever diagnostics or print statements you need to track down what's wrong. And EasyTest helpfully tells you how to do this rerunning whenever your tests fail, because otherwise I'd never remember. (Again: keep the friction LOW!) +* Another reason not to add diagnostics up front: you avoid needing to remember two different versions of every function or operator (the one you use in your regular code, and the one you use with your testing "framework" to supply diagnostics). HUnit has operators named `(@=?)`, `(~?=)`, and a bunch of others for asserting equality with diagnostics on failure. QuickCheck has `(.&&.)` and `(.||.)`. Just... no. +* HUnit, QuickCheck, SmallCheck, Tasty, and whatever else are frameworks that hide control flow from the programmer and make some forms of control flow difficult or impossible to specify (for instance, you can't do I/O in your regular QuickCheck tests... unless you use `Test.QuickCheck.Monadic`, which has yet another API you have to learn!). In contrast, EasyTest is just a single data type with a monadic API and a few helper functions. You assemble your tests using ordinary monadic code, and there is never any magic. Want to abstract over something? _Write a regular function._ Need to generate some testing data? Write regular functions. +* "How do I modify the number of generated test cases for QuickCheck for just one of my properties?" Or control the maximum size for these `Gen` and `Arbitrary` types? Some arbitrary "configuration setting" that you have to look up every time. No thanks! +* Seriously, global configuration settings are evil! I want fine-grained control over the amount of parallelism, test case sizes, and so on. And if I find I'm repeating myself a lot... I'll _introduce a regular Haskell variable or function!_. DOWN WITH FRAMEWORKS AND THEIR DAMN CONFIGURATION SETTINGS!! +* Most of the functionality of QuickCheck is overkill anyway! There's no need for `Arbitrary` instances (explicit generation is totally fine, and even preferred in most cases), `Coarbitrary` (cute, but not useful when the HOF you are testing is parametric), or shrinking (just generate your test cases in increasing sizes, and your first failure will be the smallest!). + +I hope that you enjoy writing your tests with this library! diff --git a/yaks/easytest/easytest.cabal b/yaks/easytest/easytest.cabal new file mode 100644 index 0000000000..a20f20e5f4 --- /dev/null +++ b/yaks/easytest/easytest.cabal @@ -0,0 +1,95 @@ +cabal-version: 2.2 +name: easytest +category: Compiler +version: 0.1 +license: MIT +license-file: LICENSE +author: Paul Chiusano +maintainer: Paul Chiusano +stability: provisional +homepage: http://unisonweb.org +bug-reports: https://github.com/unisonweb/unison/issues +copyright: Copyright (C) 2016 Paul Chiusano and contributors +synopsis: Simple, expressive testing library + +build-type: Simple +extra-source-files: +data-files: + +source-repository head + type: git + location: git://github.com/unisonweb/unison.git + +-- `cabal install -foptimized` enables optimizations +flag optimized + manual: True + default: False + +flag quiet + manual: True + default: False + +-- NOTE: Keep in sync throughout repo. +common unison-common + default-language: Haskell2010 + default-extensions: + ApplicativeDo, + BlockArguments, + DeriveFunctor, + DerivingStrategies, + DoAndIfThenElse, + FlexibleContexts, + FlexibleInstances, + LambdaCase, + MultiParamTypeClasses, + ScopedTypeVariables, + TupleSections, + TypeApplications + +library + import: unison-common + + hs-source-dirs: src + + exposed-modules: + EasyTest + + -- these bounds could probably be made looser + build-depends: + async >= 2.1.1, + base >= 4.3, + mtl >= 2.0.1, + containers >= 0.4.0, + stm >= 2.4, + random >= 1.1 + + ghc-options: -Wall -fno-warn-name-shadowing + + if flag(optimized) + ghc-options: -funbox-strict-fields -O2 + + if flag(quiet) + ghc-options: -v0 + +-- Preferred way to run EasyTest-based test suite +executable runtests + import: unison-common + main-is: Suite.hs + ghc-options: -w -threaded -rtsopts -with-rtsopts=-N -v0 + hs-source-dirs: tests + other-modules: + build-depends: + base, + easytest + +-- I really have no idea why you'd ever use this, just use an executable as above +test-suite tests + import: unison-common + type: exitcode-stdio-1.0 + main-is: Suite.hs + ghc-options: -w -threaded -rtsopts -with-rtsopts=-N -v0 + hs-source-dirs: tests + other-modules: + build-depends: + base, + easytest diff --git a/yaks/easytest/src/EasyTest.hs b/yaks/easytest/src/EasyTest.hs new file mode 100644 index 0000000000..3ec96f125b --- /dev/null +++ b/yaks/easytest/src/EasyTest.hs @@ -0,0 +1,458 @@ +{-# Language BangPatterns #-} +{-# Language FunctionalDependencies #-} +{-# Language GeneralizedNewtypeDeriving #-} + +module EasyTest where + +import Control.Applicative +import Control.Concurrent +import Control.Concurrent.STM +import Control.Exception +import Control.Monad +import Control.Monad.IO.Class +import Control.Monad.Reader +import Data.List +import Data.Map (Map) +import Data.Word +import GHC.Stack +import System.Random (Random) +import qualified Control.Concurrent.Async as A +import qualified Data.Map as Map +import qualified System.Random as Random + +data Status = Failed | Passed !Int | Skipped | Pending + +combineStatus :: Status -> Status -> Status +combineStatus Skipped s = s +combineStatus s Skipped = s +combineStatus _ Pending = Pending +combineStatus Pending _ = Pending +combineStatus Failed _ = Failed +combineStatus _ Failed = Failed +combineStatus (Passed n) (Passed m) = Passed (n + m) + +data Env = + Env { rng :: TVar Random.StdGen + , messages :: String + , results :: TBQueue (Maybe (TMVar (String, Status))) + , note_ :: String -> IO () + , allow :: String } + +newtype Test a = Test (ReaderT Env IO (Maybe a)) + +io :: IO a -> Test a +io = liftIO + +atomicLogger :: IO (String -> IO ()) +atomicLogger = do + lock <- newMVar () + pure $ \msg -> + -- force msg before acquiring lock + let dummy = foldl' (\_ ch -> ch == 'a') True msg + in dummy `seq` bracket (takeMVar lock) (\_ -> putMVar lock ()) (\_ -> putStrLn msg) + +expect' :: HasCallStack => Bool -> Test () +expect' False = crash "unexpected" +expect' True = pure () + +expect :: HasCallStack => Bool -> Test () +expect False = crash "unexpected" +expect True = ok + +expectEqual :: (Eq a, Show a) => a -> a -> Test () +expectEqual expected actual = if expected == actual then ok + else crash $ unlines ["", show actual, "** did not equal expected value **", show expected] + +expectNotEqual :: (Eq a, Show a) => a -> a -> Test () +expectNotEqual forbidden actual = + if forbidden /= actual then ok + else crash $ unlines ["", show actual, "** did equal the forbidden value **", show forbidden] + +expectJust :: HasCallStack => Maybe a -> Test a +expectJust Nothing = crash "expected Just, got Nothing" +expectJust (Just a) = ok >> pure a + +expectRight :: HasCallStack => Either e a -> Test a +expectRight (Left _) = crash "expected Right, got Left" +expectRight (Right a) = ok >> pure a + +expectLeft :: HasCallStack => Either e a -> Test e +expectLeft (Left e) = ok >> pure e +expectLeft (Right _) = crash "expected Left, got Right" + +tests :: [Test ()] -> Test () +tests = msum + +-- | Run all tests whose scope starts with the given prefix +runOnly :: String -> Test a -> IO () +runOnly prefix t = do + logger <- atomicLogger + seed <- abs <$> Random.randomIO :: IO Int + run' seed logger prefix t + +-- | Run all tests with the given seed and whose scope starts with the given prefix +rerunOnly :: Int -> String -> Test a -> IO () +rerunOnly seed prefix t = do + logger <- atomicLogger + run' seed logger prefix t + +run :: Test a -> IO () +run = runOnly "" + +rerun :: Int -> Test a -> IO () +rerun seed = rerunOnly seed [] + +run' :: Int -> (String -> IO ()) -> String -> Test a -> IO () +run' seed note allow (Test t) = do + let !rng = Random.mkStdGen seed + resultsQ <- atomically (newTBQueue 50) + rngVar <- newTVarIO rng + note $ "Randomness seed for this run is " ++ show seed ++ "" + results <- atomically $ newTVar Map.empty + rs <- A.async . forever $ do + -- note, totally fine if this bombs once queue is empty + Just result <- atomically $ readTBQueue resultsQ + (msgs, passed) <- atomically $ takeTMVar result + atomically $ modifyTVar results (Map.insertWith combineStatus msgs passed) + resultsMap <- readTVarIO results + case Map.findWithDefault Skipped msgs resultsMap of + Skipped -> pure () + Pending -> note $ "🚧 " ++ msgs + Passed n -> note $ "\129412 " ++ (if n <= 1 then msgs else "(" ++ show n ++ ") " ++ msgs) + Failed -> note $ "💥 " ++ msgs + let line = "------------------------------------------------------------" + note "Raw test output to follow ... " + note line + e <- try (runReaderT (void t) (Env rngVar [] resultsQ note allow)) :: IO (Either SomeException ()) + case e of + Left e -> note $ "Exception while running tests: " ++ show e + Right () -> pure () + atomically $ writeTBQueue resultsQ Nothing + _ <- A.waitCatch rs + resultsMap <- readTVarIO results + let + resultsList = Map.toList resultsMap + succeededList = [ n | (_, Passed n) <- resultsList ] + succeeded = length succeededList + -- totalTestCases = foldl' (+) 0 succeededList + failures = [ a | (a, Failed) <- resultsList ] + failed = length failures + pendings = [ a | (a, Pending) <- resultsList ] + pending = length pendings + pendingSuffix = if pending == 0 then "👍 🎉" else "" + testsPlural n = show n ++ " " ++ if n == 1 then "test" else "tests" + note line + note "\n" + when (pending > 0) $ do + note $ "🚧 " ++ testsPlural pending ++ " still pending (pending scopes below):" + note $ " " ++ intercalate "\n " (map (show . takeWhile (/= '\n')) pendings) + case failures of + [] -> + case succeeded of + 0 -> do + note "😶 hmm ... no test results recorded" + note "Tip: use `ok`, `expect`, or `crash` to record results" + note "Tip: if running via `runOnly` or `rerunOnly`, check for typos" + n -> note $ "✅ " ++ testsPlural n ++ " passed, no failures! " ++ pendingSuffix + (hd:_) -> do + note $ " " ++ show succeeded ++ (if failed == 0 then " PASSED" else " passed") + note $ " " ++ show (length failures) ++ (if failed == 0 then " failed" else " FAILED (failed scopes below)") + note $ " " ++ intercalate "\n " (map (show . takeWhile (/= '\n')) failures) + note "" + note " To rerun with same random seed:\n" + note $ " EasyTest.rerun " ++ show seed + note $ " EasyTest.rerunOnly " ++ show seed ++ " " ++ "\"" ++ hd ++ "\"" + note "\n" + note line + note "❌" + fail "test failures" + +-- | Label a test. Can be nested. A `'.'` is placed between nested +-- scopes, so `scope "foo" . scope "bar"` is equivalent to `scope "foo.bar"` +scope :: String -> Test a -> Test a +scope msg (Test t) = wrap . Test $ do + env <- ask + let messages' = case messages env of [] -> msg; ms -> ms ++ ('.':msg) + if null (allow env) || take (length (allow env)) msg `isPrefixOf` allow env + then liftIO $ + runReaderT t (env {messages = messages', allow = drop (length msg + 1) (allow env)}) + else putResult Skipped >> pure Nothing + +-- | Log a message +note :: String -> Test () +note msg = do + note_ <- asks note_ + liftIO $ note_ msg + pure () + +-- | Log a showable value +note' :: Show s => s -> Test () +note' = note . show + +-- | Generate a random value +random :: Random a => Test a +random = do + rng <- asks rng + liftIO . atomically $ do + rng0 <- readTVar rng + let (a, rng1) = Random.random rng0 + writeTVar rng rng1 + pure a + +-- | Generate a bounded random value. Inclusive on both sides. +random' :: Random a => a -> a -> Test a +random' lower upper = do + rng <- asks rng + liftIO . atomically $ do + rng0 <- readTVar rng + let (a, rng1) = Random.randomR (lower,upper) rng0 + writeTVar rng rng1 + pure a + +bool :: Test Bool +bool = random + +word8 :: Test Word8 +word8 = random + +-- | Generate a random `Char` +char :: Test Char +char = random + +-- | Generate a random `Int` +int :: Test Int +int = random + +-- | Generate a random `Double` +double :: Test Double +double = random + +-- | Generate a random `Word` +word :: Test Word +word = random + +-- | Generate a random `Int` in the given range +-- Note: `int' 0 5` includes both `0` and `5` +int' :: Int -> Int -> Test Int +int' = random' + +-- | Generate a random `Char` in the given range +-- Note: `char' 'a' 'z'` includes both `'a'` and `'z'`. +char' :: Char -> Char -> Test Char +char' = random' + +-- | Generate a random `Double` in the given range +-- Note: `double' 0 1` includes both `0` and `1`. +double' :: Double -> Double -> Test Double +double' = random' + +-- | Generate a random `Double` in the given range +-- Note: `word' 0 10` includes both `0` and `10`. +word' :: Word -> Word -> Test Word +word' = random' + +-- | Generate a random `Double` in the given range +-- Note: `word8' 0 10` includes both `0` and `10`. +word8' :: Word8 -> Word8 -> Test Word8 +word8' = random' + +-- | Sample uniformly from the given list of possibilities +pick :: [a] -> Test a +pick as = let n = length as; ind = picker n as in do + i <- int' 0 (n - 1) + Just a <- pure (ind i) + pure a + +picker :: Int -> [a] -> (Int -> Maybe a) +picker _ [] = const Nothing +picker _ [a] = \i -> if i == 0 then Just a else Nothing +picker size as = go where + lsize = size `div` 2 + rsize = size - lsize + (l,r) = splitAt lsize as + lpicker = picker lsize l + rpicker = picker rsize r + go i = if i < lsize then lpicker i else rpicker (i - lsize) + +-- | Alias for `replicateM` +listOf :: Int -> Test a -> Test [a] +listOf = replicateM + +-- | Generate a list of lists of the given sizes, +-- an alias for `sizes `forM` \n -> listOf n gen` +listsOf :: [Int] -> Test a -> Test [[a]] +listsOf sizes gen = sizes `forM` \n -> listOf n gen + +-- | Alias for `liftA2 (,)`. +pair :: Test a -> Test b -> Test (a,b) +pair = liftA2 (,) + +-- | Generate a `Data.Map k v` of the given size. +mapOf :: Ord k => Int -> Test k -> Test v -> Test (Map k v) +mapOf n k v = Map.fromList <$> listOf n (pair k v) + +-- | Generate a `[Data.Map k v]` of the given sizes. +mapsOf :: Ord k => [Int] -> Test k -> Test v -> Test [Map k v] +mapsOf sizes k v = sizes `forM` \n -> mapOf n k v + +-- | Catch all exceptions that could occur in the given `Test` +wrap :: Test a -> Test a +wrap (Test t) = Test $ do + env <- ask + lift $ runWrap env t + +runWrap :: Env -> ReaderT Env IO (Maybe a) -> IO (Maybe a) +runWrap env t = do + e <- try $ runReaderT t env + case e of + Left e -> do + note_ env (messages env ++ " EXCEPTION!!!: " ++ show (e :: SomeException)) + runReaderT (putResult Failed) env + pure Nothing + Right a -> pure a + +-- | A test with a setup and teardown +using :: IO r -> (r -> IO ()) -> (r -> Test a) -> Test a +using r cleanup use = Test $ do + r <- liftIO r + env <- ask + let Test t = use r + a <- liftIO (runWrap env t) + liftIO (cleanup r) + pure a + +-- | The current scope +currentScope :: Test String +currentScope = asks messages + +-- | Prepend the current scope to a logging message +noteScoped :: String -> Test () +noteScoped msg = do + s <- currentScope + note (s ++ (if null s then "" else " ") ++ msg) + +-- | Record a successful test at the current scope +ok :: Test () +ok = Test (Just <$> putResult (Passed 1)) + +-- | Skip any tests depending on the return value. +done :: Test a +done = Test (pure Nothing) + +-- | Explicitly skip this test +skip :: Test () +skip = Test (Nothing <$ putResult Skipped) + +-- | Record a failure at the current scope +crash :: HasCallStack => String -> Test a +crash msg = do + let trace = callStack + msg' = msg ++ " " ++ prettyCallStack trace + Test (Just <$> putResult Failed) >> noteScoped ("FAILURE " ++ msg') >> Test (pure Nothing) + +-- | Overwrites the env so that note_ (the logger) is a no op +nologging :: HasCallStack => Test a -> Test a +nologging (Test t) = Test $ do + env <- ask + liftIO $ runReaderT t (env {note_ = \_ -> pure ()}) + +-- | Run a test under a new scope, without logs and suppressing all output +attempt :: Test a -> Test (Maybe a) +attempt (Test t) = nologging $ do + env <- ask + let msg = "internal attempt" + let messages' = case messages env of [] -> msg; ms -> ms ++ ('.':msg) + liftIO $ runWrap env { messages = messages', allow = "not visible" } t + +-- | Placeholder wrapper for a failing test. The test being wrapped is expected/known to fail. +-- Will produce a failure if the test being wrapped suddenly becomes a success. +pending :: HasCallStack => Test a -> Test a +pending test = do + m <- attempt test + case m of + Just _ -> + crash "This pending test should not pass!" + Nothing -> + ok >> Test (pure Nothing) + +putResult :: Status -> ReaderT Env IO () +putResult passed = do + msgs <- asks messages + allow <- asks (null . allow) + r <- liftIO . atomically $ newTMVar (msgs, if allow then passed else Skipped) + q <- asks results + lift . atomically $ writeTBQueue q (Just r) + +instance MonadReader Env Test where + ask = Test $ do + allow <- asks (null . allow) + if allow then Just <$> ask else pure Nothing + local f (Test t) = Test (local f t) + reader f = Test (Just <$> reader f) + +instance Monad Test where + return a = Test $ do + allow <- asks (null . allow) + pure $ if allow then Just a else Nothing + Test a >>= f = Test $ do + a <- a + case a of + Nothing -> pure Nothing + Just a -> let Test t = f a in t + +instance MonadFail Test where + fail = crash + +instance Functor Test where + fmap = liftM + +instance Applicative Test where + pure = return + (<*>) = ap + +instance MonadIO Test where + liftIO io = do + s <- asks (null . allow) + if s then + wrap $ Test (Just <$> liftIO io) + else + Test (pure Nothing) + +instance Alternative Test where + empty = Test (pure Nothing) + Test t1 <|> Test t2 = Test $ do + env <- ask + (rng1, rng2) <- liftIO . atomically $ do + currentRng <- readTVar (rng env) + let (rng1, rng2) = Random.split currentRng + (,) <$> newTVar rng1 <*> newTVar rng2 + lift $ do + r1 <- runWrap (env { rng = rng1 }) t1 + (<|> r1) <$> runWrap (env { rng = rng2 }) t2 + +instance MonadPlus Test where + mzero = empty + mplus = (<|>) + +-- | Run a test in a separate thread, not blocking for its result. +fork :: Test a -> Test () +fork t = void (fork' t) + +-- | Run a test in a separate thread, return a future which can be used +-- to block on its result. +fork' :: Test a -> Test (Test a) +fork' (Test t) = do + env <- ask + tmvar <- liftIO newEmptyTMVarIO + liftIO . atomically $ writeTBQueue (results env) (Just tmvar) + r <- liftIO . A.async $ runWrap env t + waiter <- liftIO . A.async $ do + e <- A.waitCatch r + _ <- atomically $ tryPutTMVar tmvar (messages env, Skipped) + case e of + Left _ -> pure Nothing + Right a -> pure a + pure $ do + a <- liftIO (A.wait waiter) + case a of Nothing -> empty + Just a -> pure a diff --git a/yaks/easytest/tests/Suite.hs b/yaks/easytest/tests/Suite.hs new file mode 100644 index 0000000000..77aad62ae3 --- /dev/null +++ b/yaks/easytest/tests/Suite.hs @@ -0,0 +1,34 @@ +module Main where + +import EasyTest +import Control.Applicative +import Control.Monad + +suite1 :: Test () +suite1 = tests + [ scope "a" ok + , scope "b.c" ok + , scope "b" ok + , scope "b" . scope "c" . scope "d" $ ok + , scope "c" ok ] + +suite2 :: Test () +suite2 = tests + [ scope "pending.failure" (pending (expectEqual True False)) + --, scope "pending.success" (pending ok) + ] + +reverseTest :: Test () +reverseTest = scope "list reversal" $ do + nums <- listsOf [0..100] (int' 0 99) + nums `forM_` \nums -> expect (reverse (reverse nums) == nums) + +main :: IO () +main = do + run suite1 + runOnly "a" suite1 + runOnly "b" suite1 + runOnly "b" $ tests [suite1, scope "xyz" (crash "never run")] + runOnly "b.c" $ tests [suite1, scope "b" (crash "never run")] + run reverseTest + run suite2 From e4c26486e7cc0981373bf682902acd1a7f5a404b Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 13 Oct 2020 17:38:13 -0400 Subject: [PATCH 016/225] fix qualified import --- codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index ae8e83ac00..ae17645f45 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -51,12 +51,12 @@ import qualified U.Codebase.Decl as V2.Decl import qualified U.Codebase.Kind as V2.Kind import qualified U.Codebase.Reference as V2.Reference import qualified U.Codebase.Referent as V2.Referent -import qualified U.Codebase.Referent as V2.Sqlite.Referent import Data.String.Here.Uninterpolated (here) import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Queries as Db import qualified U.Codebase.Sqlite.Reference as V2S.Reference import qualified U.Codebase.Sqlite.Reference as V2.Sqlite.Reference +import qualified U.Codebase.Sqlite.Referent as V2.Sqlite.Referent import qualified U.Codebase.Sqlite.Serialization as S.V2 import qualified U.Codebase.Sqlite.Symbol as V2.Symbol import qualified U.Codebase.Sqlite.Term.Format as V2.TermFormat From ec7e3c47f0a7443aa47b0467415548c614d61e1f Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 13 Oct 2020 17:58:01 -0400 Subject: [PATCH 017/225] createTypeSearchIndicesForReferent --- .../lib/U/Codebase/Convert/SyncV1V2.hs | 44 +++++++------------ 1 file changed, 15 insertions(+), 29 deletions(-) diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index ae17645f45..39230f72f4 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -28,8 +28,7 @@ import Data.Bifunctor (Bifunctor (first), second) import Data.Bytes.Get (MonadGet) import Data.Either (partitionEithers) import Data.Either.Extra (mapLeft) -import Data.Foldable (Foldable (toList), for_) -import Data.Foldable (Foldable (foldl')) +import Data.Foldable (Foldable (foldl', toList), for_, traverse_) import Data.Functor ((<&>)) import qualified Data.List as List import Data.List.Extra (nubOrd) @@ -676,31 +675,18 @@ createTypeSearchIndicesForReferent r typ = do typeReferenceForIndexing :: V2.Sqlite.Reference.ReferenceH <- saveReferenceAsReference2 (TypeUtil.toReference typeForIndexing) - -- Db.addToFindByTypeIndex r typeReferenceForIndexing - - -- -- add the term to the type mentions index - -- typeMentionsForIndexing :: [V2.Sqlite.Reference.ReferenceH] <- - -- traverse - -- saveReferenceAsReference2 - -- (toList $ Type.toReferenceMentions typeForIndexing) - - -- traverse_ (Db.addToFindByTypeMentionsIndex r) typeMentionsForIndexing - error "todo" - -- where - -- addTermToFindByTypeIndex :: DB m => (V2.ReferentId Db.ObjectId) -> Reference -> m () - -- addTermToFindByTypeIndex termRef typeRef = do - -- typeRef2 :: (V2.Reference Db.HashId) <- - -- saveReferenceAsReference2 typeRef - -- Db.addToFindByTypeIndex termRef typeRef2 - -- addTermToTypeMentionsIndex :: - -- (DB m, Foldable f) => (V2.ReferentId Db.ObjectId) -> f Reference -> m () - -- addTermToTypeMentionsIndex termRef typeRefs = do - -- typeRefs2 :: [V2.Reference Db.HashId] <- - -- traverse saveReferenceAsReference2 (toList typeRefs) - -- traverse_ (Db.addToFindByTypeMentionsIndex termRef) typeRefs2 - --- createDependencyIndexForTerm :: DB m => V2.ReferenceId Db.ObjectId -> Term2 Db.ObjectId-> m () --- createDependencyIndexForTerm tmRef@(V2.ReferenceId selfId _i) tm = error "todo" + Db.addToTypeIndex typeReferenceForIndexing r + + -- add the term to the type mentions index + typeMentionsForIndexing :: [V2.Sqlite.Reference.ReferenceH] <- + traverse + saveReferenceAsReference2 + (toList $ TypeUtil.toReferenceMentions typeForIndexing) + + traverse_ (flip Db.addToTypeMentionsIndex r) typeMentionsForIndexing + +createDependencyIndexForTerm :: DB m => V2.Sqlite.Reference.Id -> V2HashTerm -> m () +createDependencyIndexForTerm tmRef@(V2.Reference.Id selfId _i) tm = error "todo" -- -- let -- -- -- get the term dependencies @@ -1058,8 +1044,8 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do let rt = V2.Referent.RefId r saveTypeBlobForTerm r (buildTermType2S lookupText lookup2 type2) - -- createTypeSearchIndicesForReferent rt type2 - -- createDependencyIndexForTerm r term2 + createTypeSearchIndicesForReferent rt type2 + -- createDependencyIndexForTerm r term2 error "todo: save types and create type indices for component" convertDecl1 :: DB m => (V1 Hash -> V2 Hash) -> (V2 Hash -> Db.ObjectId) -> V1 Hash -> [V1Decl] -> m () From 6497f8be112e597f22f953502f35ac16130cfe78 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 15 Oct 2020 11:57:21 -0400 Subject: [PATCH 018/225] create dependency index --- .../lib/U/Codebase/Convert/SyncV1V2.hs | 62 ++++++++++++++----- .../lib/U/Codebase/Convert/TermUtil.hs | 54 ++++++++++++++++ .../unison-codebase-convert-1to2.cabal | 1 + .../U/Codebase/Sqlite/LocalIds.hs | 1 - .../U/Codebase/Sqlite/Queries.hs | 2 +- .../U/Codebase/Sqlite/Referent.hs | 4 +- .../U/Codebase/Sqlite/Term/Format.hs | 2 +- codebase2/codebase/U/Codebase/Reference.hs | 6 +- codebase2/codebase/U/Codebase/Referent.hs | 9 +++ codebase2/codebase/U/Codebase/Term.hs | 11 ---- codebase2/codebase/U/Codebase/Type.hs | 5 ++ codebase2/core/U/Core/ABT.hs | 19 ++++++ questions.md | 10 +++ 13 files changed, 155 insertions(+), 31 deletions(-) create mode 100644 codebase-convert-1to2/lib/U/Codebase/Convert/TermUtil.hs create mode 100644 questions.md diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index 39230f72f4..e0514775fb 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -45,6 +45,7 @@ import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple.FromField (FromField) import Database.SQLite.Simple.ToField (ToField) +import qualified U.Codebase.Convert.TermUtil as TermUtil import qualified U.Codebase.Convert.TypeUtil as TypeUtil import qualified U.Codebase.Decl as V2.Decl import qualified U.Codebase.Kind as V2.Kind @@ -91,6 +92,8 @@ import qualified Unison.Codebase.V1.Type as V1.Type import qualified Unison.Codebase.V1.Type.Kind as V1.Kind import UnliftIO (MonadIO, liftIO) import UnliftIO.Directory (listDirectory) +import Data.Set (Set) +import Data.Bifunctor (Bifunctor(bimap)) newtype V1 a = V1 {runV1 :: a} deriving (Eq, Ord, Show) @@ -685,18 +688,50 @@ createTypeSearchIndicesForReferent r typ = do traverse_ (flip Db.addToTypeMentionsIndex r) typeMentionsForIndexing -createDependencyIndexForTerm :: DB m => V2.Sqlite.Reference.Id -> V2HashTerm -> m () -createDependencyIndexForTerm tmRef@(V2.Reference.Id selfId _i) tm = error "todo" - --- -- let --- -- -- get the term dependencies --- -- dependencies :: Set (Reference.ReferenceH Db.ObjectId) --- -- dependencies = Term.dependencies $ Term.hmap (fromMaybe selfId) tm --- -- -- and convert them to Reference2 --- -- dependencies2 :: [V2.Reference Db.ObjectId] --- -- dependencies2 = over Db.referenceTraversal id <$> toList dependencies --- -- -- and then add all of these to the dependency index --- -- in traverse_ (Db.addDependencyToIndex tmRef) dependencies2 +-- todo: +createDependencyIndexForTerm :: DB m => V2.Sqlite.Reference.Id -> V2DiskTermComponent -> m () +createDependencyIndexForTerm tmRef@(V2.Reference.Id selfId i) (V2.TermFormat.LocallyIndexedComponent c) = +-- newtype LocallyIndexedComponent = +-- LocallyIndexedComponent (Vector (LocalIds, Term)) + let + -- | get the ith element from the term component + (localIds, localTerm) = c Vector.! fromIntegral i + + -- get the term dependencies as localids + termRefs :: [V2.TermFormat.TermRef] + typeRefs :: [V2.TermFormat.TypeRef] + termLinks :: [V2.Referent.Referent' V2.TermFormat.TermRef V2.TermFormat.TypeRef] + typeLinks :: [V2.TermFormat.TypeRef] + (termRefs, typeRefs, termLinks, typeLinks) = TermUtil.dependencies localTerm + + -- and convert them to Reference' TextId ObjectId + localToDbTextId :: V2.TermFormat.LocalTextId -> Db.TextId + localToDbTextId (V2.TermFormat.LocalTextId n) = + V2.LocalIds.textLookup localIds Vector.! fromIntegral n + localToDbDefnId :: V2.TermFormat.LocalDefnId -> Db.ObjectId + localToDbDefnId (V2.TermFormat.LocalDefnId n)= + V2.LocalIds.objectLookup localIds Vector.! fromIntegral n + localToDbTermRef :: V2.TermFormat.TermRef -> V2.Sqlite.Reference.Reference + localToDbTermRef = bimap localToDbTextId (maybe selfId localToDbDefnId) + localToDbTypeRef :: V2.TermFormat.TypeRef -> V2.Sqlite.Reference.Reference + localToDbTypeRef = bimap localToDbTextId localToDbDefnId + localFoo :: V2.Referent.Referent' V2.TermFormat.TermRef V2.TermFormat.TypeRef -> V2.Sqlite.Reference.Reference + localFoo = \case + V2.Referent.Ref tm -> localToDbTermRef tm + V2.Referent.Con tp _ -> localToDbTypeRef tp + dependencies :: [V2.Sqlite.Reference.Reference] + dependencies = map localToDbTermRef termRefs + <> map localToDbTypeRef typeRefs + <> map localFoo termLinks + <> map localToDbTypeRef typeLinks + -- and then add all of these to the dependency index + in traverse_ (flip Db.addToDependentsIndex tmRef) dependencies + +localDefnIdToObjectId :: V2.LocalIds.LocalIds -> V2.TermFormat.LocalDefnId -> Db.ObjectId +localDefnIdToObjectId (V2.LocalIds.LocalIds _t d) (V2.TermFormat.LocalDefnId id) = d Vector.! fromIntegral id + +localTextIdToObjectId :: V2.LocalIds.LocalIds -> V2.TermFormat.LocalTextId -> Db.TextId +localTextIdToObjectId (V2.LocalIds.LocalIds t _d) (V2.TermFormat.LocalTextId id) = t Vector.! fromIntegral id -- createDependencyIndexForDecl :: DB m => V2.ReferenceId Db.ObjectId -> Decl2S -> m () -- createDependencyIndexForDecl tmRef@(V2.ReferenceId selfId _i) decl = @@ -1045,8 +1080,7 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do saveTypeBlobForTerm r (buildTermType2S lookupText lookup2 type2) createTypeSearchIndicesForReferent rt type2 - -- createDependencyIndexForTerm r term2 - error "todo: save types and create type indices for component" + createDependencyIndexForTerm r v2diskComponent convertDecl1 :: DB m => (V1 Hash -> V2 Hash) -> (V2 Hash -> Db.ObjectId) -> V1 Hash -> [V1Decl] -> m () convertDecl1 lookup1 lookup2 hash1 v1component = do diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/TermUtil.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/TermUtil.hs new file mode 100644 index 0000000000..60d31295bf --- /dev/null +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/TermUtil.hs @@ -0,0 +1,54 @@ +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE ViewPatterns #-} + +module U.Codebase.Convert.TermUtil where + +import qualified U.Core.ABT as ABT +import qualified U.Codebase.Term as Term +import U.Codebase.Term (Pattern(..), MatchCase(..), F'(..)) +import Control.Monad.Writer (tell, execWriter) +import Data.Foldable (traverse_, for_) + +text :: Ord v => ABT.Term (Term.F' text termRef typeRef termLink typeLink vt) v a -> [text] +text = execWriter . ABT.visit_ \case + Text t -> tell [t] + _ -> pure () + +dependencies :: Ord v => ABT.Term (Term.F' text termRef typeRef termLink typeLink vt) v a -> + ([termRef], [typeRef], [termLink], [typeLink]) +dependencies = execWriter . ABT.visit_ \case + Ref r -> termRef r + Constructor r _ -> typeRef r + Request r _ -> typeRef r + Match _ cases -> for_ cases \case + MatchCase pat _guard _body -> go pat where + go = \case + PConstructor r _i args -> typeRef r *> traverse_ go args + PAs pat -> go pat + PEffectPure pat -> go pat + PEffectBind r _i args k -> typeRef r *> traverse_ go args *> go k + PSequenceLiteral pats -> traverse_ go pats + PSequenceOp l _op r -> go l *> go r + _ -> pure () + TermLink r -> termLink r + TypeLink r -> typeLink r + _ -> pure () + where + termRef r = tell (pure r, mempty, mempty, mempty) + typeRef r = tell (mempty, pure r, mempty, mempty) + termLink r = tell (mempty, mempty, pure r, mempty) + typeLink r = tell (mempty, mempty, mempty, pure r) + + + +fold :: Monad m => + (text -> m ()) -> + (termRef -> m ()) -> + (typeRef -> m ()) -> + (termLink -> m ()) -> + (typeLink -> m ()) -> + ABT.Term (Term.F' text termRef typeRef termLink typeLink vt) v a -> + m () +fold = undefined diff --git a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal index b66e65820e..4fa77ee7a4 100644 --- a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal +++ b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal @@ -39,6 +39,7 @@ library hs-source-dirs: lib exposed-modules: U.Codebase.Convert.SyncV1V2 + U.Codebase.Convert.TermUtil U.Codebase.Convert.TypeUtil -- other-modules: -- other-extensions: diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs index afa044aae7..d529e7b3f7 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs @@ -8,4 +8,3 @@ data LocalIds = LocalIds { textLookup :: Vector TextId, objectLookup :: Vector ObjectId } - diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 3277b4c67b..e4536ff2a2 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -201,7 +201,7 @@ addToTypeMentionsIndex tp tm = execute sql (tp :. tm) where sql = [here| ) VALUES (?, ?, ?, ?, ?, ?) |] -addToDependentsIndex :: DB m => Reference' TextId ObjectId -> Reference.Id -> m () +addToDependentsIndex :: DB m => Reference.Reference -> Reference.Id -> m () addToDependentsIndex dependency dependent = execute sql (dependency :. dependent) where sql = [here| INSERT OR IGNORE INTO dependents_index ( diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs index 32640600cf..459132c322 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs @@ -8,12 +8,12 @@ module U.Codebase.Sqlite.Referent where import Database.SQLite.Simple (SQLData(..), Only(..), ToRow(..)) import U.Codebase.Referent (Id', Referent') -import U.Codebase.Sqlite.Reference (Reference) +import qualified U.Codebase.Sqlite.Reference as Sqlite import U.Codebase.Sqlite.DbId (ObjectId) import qualified U.Codebase.Referent as Referent import qualified U.Codebase.Reference as Reference -type Referent = Referent' Reference Reference +type Referent = Referent' Sqlite.Reference Sqlite.Reference type Id = Id' ObjectId ObjectId instance ToRow Id where diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs index 6be42ed20b..357640bcaa 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs @@ -20,7 +20,7 @@ type TermRef = Reference' LocalTextId (Maybe LocalDefnId) type TypeRef = Reference' LocalTextId LocalDefnId -data LocallyIndexedComponent = +newtype LocallyIndexedComponent = LocallyIndexedComponent (Vector (LocalIds, Term)) type F = diff --git a/codebase2/codebase/U/Codebase/Reference.hs b/codebase2/codebase/U/Codebase/Reference.hs index 0782fc93da..8094c584ba 100644 --- a/codebase2/codebase/U/Codebase/Reference.hs +++ b/codebase2/codebase/U/Codebase/Reference.hs @@ -12,7 +12,7 @@ import qualified U.Util.Hash as Hash import U.Util.Hash (Hash) import U.Util.Hashable (Hashable (..)) import qualified U.Util.Hashable as Hashable -import Control.Lens (Traversal) +import Control.Lens (Bifunctor(..), Traversal) -- |This is the canonical representation of Reference type Reference = Reference' Text Hash @@ -42,6 +42,10 @@ h f = \case ReferenceBuiltin t -> pure (ReferenceBuiltin t) Derived h i -> Derived <$> f h <*> pure i +instance Bifunctor Reference' where + bimap fl _ (ReferenceBuiltin t) = ReferenceBuiltin (fl t) + bimap _ fr (ReferenceDerived id) = ReferenceDerived (fr <$> id) + instance Hashable Reference where tokens (ReferenceBuiltin txt) = [Hashable.Tag 0, Hashable.Text txt] diff --git a/codebase2/codebase/U/Codebase/Referent.hs b/codebase2/codebase/U/Codebase/Referent.hs index 84381096c0..692f0b2e74 100644 --- a/codebase2/codebase/U/Codebase/Referent.hs +++ b/codebase2/codebase/U/Codebase/Referent.hs @@ -11,6 +11,7 @@ import U.Util.Hash (Hash) import U.Util.Hashable (Hashable (..)) import Data.Word (Word64) import qualified U.Util.Hashable as Hashable +import Data.Bifunctor (Bifunctor(..)) type Referent = Referent' Reference Reference type ReferentH = Referent' (Reference' Text (Maybe Hash)) (Reference' Text Hash) @@ -31,3 +32,11 @@ data Id' hTm hTp instance (Hashable rTm, Hashable rTp) => Hashable (Referent' rTm rTp) where tokens (Ref r) = Hashable.Tag 0 : Hashable.tokens r tokens (Con r i) = [Hashable.Tag 1] ++ Hashable.tokens r ++ [Hashable.Nat (fromIntegral i)] + +instance Bifunctor Referent' where + bimap f _ (Ref r) = Ref (f r) + bimap _ g (Con r i) = Con (g r) i + +instance Bifunctor Id' where + bimap f _ (RefId (Reference.Id h i)) = RefId (Reference.Id (f h) i) + bimap _ g (ConId (Reference.Id h i) j) = ConId (Reference.Id (g h) i) j diff --git a/codebase2/codebase/U/Codebase/Term.hs b/codebase2/codebase/U/Codebase/Term.hs index 75e4575d7f..ff7fdbd8e0 100644 --- a/codebase2/codebase/U/Codebase/Term.hs +++ b/codebase2/codebase/U/Codebase/Term.hs @@ -114,17 +114,6 @@ data SeqOp | PConcat deriving (Eq, Show) --- getHashesAndTextF :: - --- rmap :: --- (termRef -> termRef') -> --- (typeRef -> typeRef') -> --- (termLink -> termLink') -> --- TermR termRef typeRef termLink typeRef (TypeR typeRef vt at) blankRepr ap v a -> --- TermR termRef' typeRef' termLink' typeRef' (TypeR typeRef' vt at) blankRepr ap v a --- rmap fTermRef fTypeRef fTermLink t = --- extraMap fTermRef fTypeRef fTermLink fTypeRef (Type.rmap fTypeRef) undefined id t - extraMap :: forall text termRef typeRef termLink typeLink vt text' termRef' typeRef' termLink' typeLink' vt' v a . (Ord v, Ord vt') diff --git a/codebase2/codebase/U/Codebase/Type.hs b/codebase2/codebase/U/Codebase/Type.hs index bcd3777dd0..abef4faa19 100644 --- a/codebase2/codebase/U/Codebase/Type.hs +++ b/codebase2/codebase/U/Codebase/Type.hs @@ -52,6 +52,11 @@ rmap f = ABT.transform \case Ref r -> Ref (f r) x -> unsafeCoerce x +rtraverse :: (Monad g, Ord v) => (r -> g r') -> ABT.Term (F' r) v a -> g (ABT.Term (F' r') v a) +rtraverse g = ABT.transformM \case + Ref r -> Ref <$> g r + x -> pure $ unsafeCoerce x + instance Hashable r => Hashable1 (F' r) where hash1 hashCycle hash e = let diff --git a/codebase2/core/U/Core/ABT.hs b/codebase2/core/U/Core/ABT.hs index 7e0a88175d..94a265f94d 100644 --- a/codebase2/core/U/Core/ABT.hs +++ b/codebase2/core/U/Core/ABT.hs @@ -44,6 +44,13 @@ vmap f (Term _ a out) = case out of Cycle r -> cycle a (vmap f r) Abs v body -> abs a (f v) (vmap f body) +vtraverse :: (Traversable f, Applicative g, Ord v') => (v -> g v') -> Term f v a -> g (Term f v' a) +vtraverse g (Term _ a out) = case out of + Var v -> var a <$> g v + Cycle r -> cycle a <$> vtraverse g r + Abs v r -> abs a <$> g v <*> vtraverse g r + Tm fa -> tm a <$> traverse (vtraverse g) fa + transform :: (Ord v, Foldable g, Functor g) => (forall a. f a -> g a) -> Term f v a -> Term g v a transform f t = case out t of @@ -170,6 +177,18 @@ visit' f t = case out t of Abs x e -> abs (annotation t) x <$> visit' f e Tm body -> f body >>= (fmap (tm (annotation t)) . traverse (visit' f)) +-- | Apply an effectful function to an ABT tree top down, sequencing the results. +visit_ :: (Traversable f, Applicative g, Monad g, Ord v) + => (f (Term f v a) -> g ()) + -> Term f v a + -> g (Term f v a) +visit_ f t = case out t of + Var _ -> pure t + Cycle body -> cycle (annotation t) <$> visit_ f body + Abs x e -> abs (annotation t) x <$> visit_ f e + Tm body -> f body >> tm (annotation t) <$> traverse (visit_ f) body + + -- | `visit` specialized to the `Identity` effect. visitPure :: (Traversable f, Ord v) => (Term f v a -> Maybe (Term f v a)) -> Term f v a -> Term f v a diff --git a/questions.md b/questions.md new file mode 100644 index 0000000000..18459eae03 --- /dev/null +++ b/questions.md @@ -0,0 +1,10 @@ +Question: should the dependents / dependency index be a Relation Reference Reference (like now) a Relation Referent Reference? + +Example, if you have `type Foo = Blah | Blah2 Nat`, + + + +Advantages: + +* If patches can replace constructors (not just types or terms), then having the index keyed by `Referent` lets you efficiently target the definitions that use those constructors. +* Also lets you find things that depend on `Blah2` (rather than depending on `Foo`). From 22f07828d0acf8038d8dd02443ab3042ee59d06b Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 19 Oct 2020 10:17:25 -0400 Subject: [PATCH 019/225] wip --- .../lib/U/Codebase/Convert/SyncV1V2.hs | 104 +++++++--- codebase2/codebase-sqlite/sql/create.sql | 25 ++- codebase2/codebase/U/Codebase/Codebase.hs | 22 +-- codebase2/codebase/U/Codebase/Decl.hs | 51 ++++- codebase2/codebase/U/Codebase/Term.hs | 2 +- codebase2/codebase/U/Codebase/Type.hs | 2 +- codebase2/core/U/Core/ABT.hs | 18 +- hie.yaml | 177 ++++++++++++++++++ parser-typechecker/tests/Unison/Test/ANF.hs | 2 + .../unison-parser-typechecker.cabal | 2 +- stack.yaml | 8 +- unison-core/src/Unison/Reference.hs | 1 + unison-core/src/Unison/Referent.hs | 8 +- .../{unison-core.cabal => unison-core1.cabal} | 2 +- yaks/easytest/tests/Suite.hs | 3 +- 15 files changed, 355 insertions(+), 72 deletions(-) rename unison-core/{unison-core.cabal => unison-core1.cabal} (98%) diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index e0514775fb..e1516d146d 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -13,7 +13,7 @@ {-# OPTIONS_GHC -Wno-incomplete-patterns #-} {-# OPTIONS_GHC -Wno-unused-imports #-} {-# OPTIONS_GHC -Wno-unused-do-bind #-} -{-# OPTIONS_GHC -Wno-unused-local-binds #-} +-- {-# OPTIONS_GHC -Wno-unused-local-binds #-} {-# OPTIONS_GHC -Wno-unused-matches #-} module U.Codebase.Convert.SyncV1V2 where @@ -791,6 +791,25 @@ convertABT ff fv fa = goTerm V1.ABT.Abs v t -> V2.ABT.Abs (fv v) (goTerm t) V1.ABT.Tm ft -> V2.ABT.Tm (ff ft) +convertABT0 :: Functor f => V1.ABT.Term f v a -> V2.ABT.Term f v a +convertABT0 (V1.ABT.Term vs a out) = V2.ABT.Term vs a (goABT out) where + goABT = \case + V1.ABT.Var v -> V2.ABT.Var v + V1.ABT.Cycle t -> V2.ABT.Cycle (convertABT0 t) + V1.ABT.Abs v t -> V2.ABT.Abs v (convertABT0 t) + V1.ABT.Tm ft -> V2.ABT.Tm (convertABT0 <$> ft) + +convertType1to2 :: (V1.Reference.Reference -> r) -> V1.Type.F a -> V2.Type.F' r a +convertType1to2 fr = \case + V1.Type.Ref r -> V2.Type.Ref (fr r) + V1.Type.Arrow i o -> V2.Type.Arrow i o + V1.Type.Ann a k -> V2.Type.Ann a (convertKind k) + V1.Type.App f x -> V2.Type.App f x + V1.Type.Effect e b -> V2.Type.Effect e b + V1.Type.Effects as -> V2.Type.Effects as + V1.Type.Forall a -> V2.Type.Forall a + V1.Type.IntroOuter a -> V2.Type.IntroOuter a + convertSymbol :: V1.Symbol.Symbol -> V2.Symbol.Symbol convertSymbol (V1.Symbol.Symbol id name) = V2.Symbol.Symbol id name @@ -855,31 +874,23 @@ mapVarToTerm fAbs fVar t@(V2.ABT.Term _ a abt) = case abt of convertTerm1 :: DB m => (V1 Hash -> V2 Hash) -> (V2 Hash -> Db.ObjectId) -> (Text -> Db.TextId) -> V1 Hash -> [(V1Term, V1Type)] -> m () convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do -- construct v2 term component for hashing - let buildTermType2H :: (V1 Hash -> V2 Hash) -> V1Type -> V2HashTypeOfTerm - buildTermType2H lookup = goType + let + buildTermType2H :: (V1 Hash -> V2 Hash) -> V1Type -> V2HashTypeOfTerm + buildTermType2H lookup + = V2.ABT.transform (convertType1to2 goRef) + . V2.ABT.vmap convertSymbol + . convertABT0 where - goType :: V1Type -> V2HashTypeOfTerm - goType = convertABT goABT convertSymbol (const ()) - goABT :: V1.Type.F V1Type -> V2.Type.FT V2HashTypeOfTerm - goABT = \case - V1.Type.Ref r -> V2.Type.Ref case r of - V1.Reference.Builtin t -> - V2.Reference.ReferenceBuiltin t - V1.Reference.Derived h i _n -> - V2.Reference.ReferenceDerived - (V2.Reference.Id (runV2 . lookup $ V1 h) i) - V1.Type.Arrow i o -> V2.Type.Arrow (goType i) (goType o) - V1.Type.Ann a k -> V2.Type.Ann (goType a) (convertKind k) - V1.Type.App f x -> V2.Type.App (goType f) (goType x) - V1.Type.Effect e b -> V2.Type.Effect (goType e) (goType b) - V1.Type.Effects as -> V2.Type.Effects (goType <$> as) - V1.Type.Forall a -> V2.Type.Forall (goType a) - V1.Type.IntroOuter a -> V2.Type.IntroOuter (goType a) + goRef = \case + V1.Reference.Builtin t -> V2.Reference.ReferenceBuiltin t + V1.Reference.Derived h i _n -> + V2.Reference.ReferenceDerived + (V2.Reference.Id (runV2 . lookup $ V1 h) i) buildTerm2H :: (V1 Hash -> V2 Hash) -> V1 Hash -> V1Term -> V2HashTerm buildTerm2H lookup self = goTerm where - goTerm = convertABT goABT convertSymbol (const ()) - goABT :: V1.Term.F V1.Symbol.Symbol () V1Term -> V2.Term.F V2.Symbol.Symbol V2HashTerm + goTerm = convertABT goTermF convertSymbol (const ()) + goTermF :: V1.Term.F V1.Symbol.Symbol () V1Term -> V2.Term.F V2.Symbol.Symbol V2HashTerm lookupTermLink = \case V1.Referent.Ref r -> V2.Referent.Ref (lookupTerm r) V1.Referent.Con r i _ct -> V2.Referent.Con (lookupType r) (fromIntegral i) @@ -895,7 +906,7 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do V1.Reference.Derived h i _n -> V2.Reference.ReferenceDerived (V2.Reference.Id (runV2 . lookup $ V1 h) i) - goABT = \case + goTermF = \case V1.Term.Int i -> V2.Term.Int i V1.Term.Nat n -> V2.Term.Nat n V1.Term.Float f -> V2.Term.Float f @@ -1043,24 +1054,39 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do rehashComponent lookup1 hash1 (unzip -> (v1terms, v1types)) = let fromLeft = either id (\x -> error $ "impossibly " ++ show x) in let indexVars = Left . V1 <$> [0 ..] + -- create a [(v, V1Term)] namedTerms1 :: [(Either (V1 Int) V2.Symbol.Symbol, V1Term)] namedTerms1 = zip indexVars v1terms + -- convert [(v, V1Term)] to [(v, V2Term)] namedTerms2 :: [(Either (V1 Int) V2.Symbol.Symbol, V2HashTerm)] namedTerms2 = fmap (second (buildTerm2H lookup1 hash1)) namedTerms1 + -- convert the previous to a map namedTermMap :: Map (Either (V1 Int) V2.Symbol.Symbol) V2HashTerm namedTermMap = Map.fromList namedTerms2 + -- convert the Map v V2Term to (hash, [v, V2Term]) where the list + -- has a new canonical ordering hash2 :: V2 Hash v1Index :: [V1 Int] -- (h, ([2, 0, 1], [t2, t0, t1]) (hash2, unzip -> (fmap fromLeft -> v1Index, v2Terms)) = V2.ABT.hashComponent (refToVarTerm <$> namedTermMap) + + -- a mapping from the v1 canonical order to v2 canonical order + -- Q: Why do you need a map from V1 to V2 Ints? + -- A: the `v`s embed the component index of a self-reference, + -- indexMap :: Map (V1 Int) (V2 Int) indexMap = Map.fromList (zip v1Index (V2 <$> [0 :: Int ..])) + + -- convert the V1TypeOfTerm to V2TypeOfTerm, + -- and permute their order according to indexMap convertedTypes, permutedTypes :: [V2HashTypeOfTerm] convertedTypes = map (buildTermType2H lookup1) v1types -- the first element of v1Index is the V1 index of the first V2 element permutedTypes = map (((!!) convertedTypes) . runV1) v1Index + -- in (hash2, permutedTypes, varToRefTerm indexMap <$> v2Terms) + hash2 :: V2 Hash v2types :: [V2HashTypeOfTerm] v2hashComponent :: V2HashTermComponent @@ -1085,14 +1111,32 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do convertDecl1 :: DB m => (V1 Hash -> V2 Hash) -> (V2 Hash -> Db.ObjectId) -> V1 Hash -> [V1Decl] -> m () convertDecl1 lookup1 lookup2 hash1 v1component = do let -- convert constructor type (similar to buildTermType2H) - v2ctorTypes :: [V2TypeOfConstructor] = error "todo" - -- rehash and reorder component - hash2 :: V2 Hash - v2hashComponent :: V2HashDeclComponent - (hash2, v2hashComponent) = error "todo: rehashComponent lookup1 hash1 v1component" + + -- v2ctorTypes :: [V2TypeOfConstructor] = error "todo" + + -- -- rehash and reorder component + -- hash2 :: V2 Hash + -- v2hashComponent :: V2HashDeclComponent + -- (hash2, v2hashComponent) = rehashComponent lookup1 hash1 v1component + -- where + -- -- take a look at existing DataDeclaration.hashDecls + + -- -- |1. for each decl in a component, convert it to the new abt/functor + -- -- and swap out all its V1 Hashes for V2 Hashes, using `Nothing` for + -- -- a self-reference hash. + -- -- 2. lift the vars so that self-references are Left i + -- -- and local vars are Right Symbol + -- -- 3. call ABT.hashComponent to get a new hash and a new canonical ordering + -- -- 4. unlift the vars back, rewrite them to reflect the new canonical ordering + -- rehashComponent :: (V1 Hash -> V2 Hash) -> V1 Hash -> [V1Decl] -> (V2 Hash, V2HashDeclComponent) + -- rehashComponent = error "todo" + -- convert decl component - v2diskComponent :: V2DiskDeclComponent = error "todo" - componentObjectId :: Db.ObjectId <- saveDeclComponent hash1 hash2 v2diskComponent + -- v2diskComponent :: V2DiskDeclComponent = error "todo" + + -- serialize the v2 decl component + -- componentObjectId :: Db.ObjectId <- saveDeclComponent hash1 hash2 v2diskComponent + error "todo: create type indices for each decl in the component" -- let v2componentI :: [Decl2I] = diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 00810ee628..0b7e5330f2 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -87,8 +87,23 @@ CREATE TABLE type_of_term ( PRIMARY KEY (object_id, component_index) ); --- --CREATE TABLE type_of_referent ( --- -- referent_derived_id INTEGER NOT NULL PRIMARY KEY REFERENCES referent_derived(id), --- -- type_object_id INTEGER NOT NULL REFERENCES object(id) --- --); --- --CREATE INDEX type_of_referent_object_id ON type_of_referent(type_object_id); +CREATE TABLE watch ( + object_id INTEGER NOT NULL REFERENCES object(id), + component_index INTEGER NOT NULL, + result BLOB NOT NULL, + PRIMARY KEY (object_id, component_index) +); + +CREATE TABLE watch_kind ( + object_id INTEGER NOT NULL REFERENCES object(id), + component_index INTEGER NOT NULL, + description_id INTEGER NOT NULL REFERENCES watch_kind_description(id), + PRIMARY KEY (object_id, component_index, watch_kind_id) +); +CREATE TABLE watch_kind_description ( + id PRIMARY KEY INTEGER UNIQUE NOT NULL, + description TEXT UNIQUE NOT NULL +); +INSERT INTO watch_kind_description (id, description) VALUES + (0, "Regular"), -- won't be synced + (1, "Test"); -- will be synced diff --git a/codebase2/codebase/U/Codebase/Codebase.hs b/codebase2/codebase/U/Codebase/Codebase.hs index f4d9fc000a..b3dd52d6d3 100644 --- a/codebase2/codebase/U/Codebase/Codebase.hs +++ b/codebase2/codebase/U/Codebase/Codebase.hs @@ -25,44 +25,44 @@ data Codebase m v = Codebase { getTerm :: Reference.Id -> m (Maybe (Term v)), getTypeOfTerm :: Reference.Id -> m (Maybe (TypeT v)), getTypeDeclaration :: Reference.Id -> m (Maybe (Decl v)), - + putTerm :: Reference.Id -> Term v -> TypeT v -> m (), putTypeDeclaration :: Reference.Id -> Decl v -> m (), - + getBranch :: BranchHash -> m (Maybe (Branch m)), getRootBranch :: m (Either GetRootBranchError (Branch m)), putRootBranch :: Branch m -> m (), - + getBranchForCausal :: CausalHash -> m (Maybe (Branch m)), - + -- |Supports syncing from a current or older codebase format syncFromDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), -- |Only writes the latest codebase format syncToDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), -- ^ maybe return type needs to reflect failure if remote codebase has an old version - + -- |Watch expressions are part of the codebase, the `Reference.Id` is -- the hash of the source of the watch expression, and the `Term v a` -- is the evaluated result of the expression, decompiled to a term. watches :: WatchKind -> m [Reference.Id], getWatch :: WatchKind -> Reference.Id -> m (Maybe (Term v)), putWatch :: WatchKind -> Reference.Id -> Term v -> m (), - + getReflog :: m [Reflog.Entry], appendReflog :: Text -> Branch m -> Branch m -> m (), - + -- |the nicely-named versions will utilize these, and add builtins to the result set termsHavingType :: Reference -> m (Set Referent.Id), termsMentioningType :: Reference -> m (Set Referent.Id), - - -- |number of base58 characters needed to distinguish any two hashes in the codebase; + + -- |number of base32 characters needed to distinguish any two hashes in the codebase; -- we don't have to compute it separately for different namespaces hashLength :: m Int, termReferencesByPrefix :: ShortHash -> m (Set Reference.Id), typeReferencesByPrefix :: ShortHash -> m (Set Reference.Id), termReferentsByPrefix :: ShortHash -> m (Set Referent.Id), branchHashesByPrefix :: ShortBranchHash -> m (Set BranchHash), - + -- lca :: (forall he e. [Causal m CausalHash he e] -> m (Maybe BranchHash)), dependents :: Reference -> m (Maybe (Set Reference.Id)), @@ -73,7 +73,7 @@ data Codebase m v = Codebase { -- Branch.Hash -> m (Maybe (CausalHash, BD.Dependencies)), -- -- |the "new" terms and types mentioned in a patch -- patchDependencies :: EditHash -> m (Set Reference, Set Reference) -} +} data GetRootBranchError = NoRootBranch diff --git a/codebase2/codebase/U/Codebase/Decl.hs b/codebase2/codebase/U/Codebase/Decl.hs index 1c662c7b04..656f96061d 100644 --- a/codebase2/codebase/U/Codebase/Decl.hs +++ b/codebase2/codebase/U/Codebase/Decl.hs @@ -1,3 +1,5 @@ +{-# LANGUAGE DeriveFoldable #-} +{-# LANGUAGE DeriveFunctor #-} module U.Codebase.Decl where import Data.Word (Word64) @@ -5,10 +7,13 @@ import U.Codebase.Reference (Reference') import Data.Text (Text) import U.Util.Hash (Hash) import U.Codebase.Type (TypeR) +import qualified U.Util.Hashable as Hashable +import qualified U.Codebase.Type as Type +-- import qualified U.Core.ABT as ABT type ConstructorId = Word64 -data DeclType = Data | Effect +data DeclType = Data | Effect deriving (Eq, Ord, Show, Enum) type Decl v = DeclR (Reference' Text Hash) v @@ -20,8 +25,50 @@ data DeclR r v = DataDeclaration { declType :: DeclType, modifier :: Modifier, bound :: [v], - constructors' :: [TypeR r v] + constructors' :: [(v, TypeR r v)] } -- instance Hashable ConstructorType where -- tokens b = [Tag . fromIntegral $ fromEnum b] + +-- * Hashing stuff +constructors :: DeclR r v -> [v] +constructors = fmap fst . constructors' + +constructorTypes :: DeclR r v -> [TypeR r v] +constructorTypes = fmap snd . constructors' + +-- toABT :: Ord v => Decl v -> ABT.Term F v () +-- toABT dd = ABT.tm () $ Modified (modifier dd) dd' +-- where +-- dd' = ABT.absChain (bound dd) $ +-- ABT.absCycle +-- (constructors dd) +-- (ABT.tm () . Constructors $ ABT.transform Type <$> constructorTypes dd) + +data F a + = Type (Type.FD a) + | LetRec [a] a + | Constructors [a] + | Modified Modifier a + deriving (Functor, Foldable, Show) + +instance Hashable.Hashable1 F where + hash1 hashCycle hash e = + let (tag, hashed) = (Hashable.Tag, Hashable.Hashed) + -- Note: start each layer with leading `2` byte, to avoid collisions with + -- terms, which start each layer with leading `1`. See `Hashable1 Term.F` + in Hashable.accumulate $ tag 2 : case e of + Type t -> [tag 0, hashed $ Hashable.hash1 hashCycle hash t] + LetRec bindings body -> + let (hashes, hash') = hashCycle bindings + in [tag 1] ++ map hashed hashes ++ [hashed $ hash' body] + Constructors cs -> + let (hashes, _) = hashCycle cs + in tag 2 : map hashed hashes + Modified m t -> + [tag 3, Hashable.accumulateToken m, hashed $ hash t] + +instance Hashable.Hashable Modifier where + tokens Structural = [Hashable.Tag 0] + tokens (Unique txt) = [Hashable.Tag 1, Hashable.Text txt] diff --git a/codebase2/codebase/U/Codebase/Term.hs b/codebase2/codebase/U/Codebase/Term.hs index ff7fdbd8e0..84c87f395b 100644 --- a/codebase2/codebase/U/Codebase/Term.hs +++ b/codebase2/codebase/U/Codebase/Term.hs @@ -30,7 +30,7 @@ import qualified U.Util.Hashable as H import qualified U.Codebase.Type as Type import qualified U.Util.Hash as Hash import qualified Data.Foldable as Foldable - +  type ConstructorId = Word64 type Term v = ABT.Term (F v) v () diff --git a/codebase2/codebase/U/Codebase/Type.hs b/codebase2/codebase/U/Codebase/Type.hs index abef4faa19..86aa35752a 100644 --- a/codebase2/codebase/U/Codebase/Type.hs +++ b/codebase2/codebase/U/Codebase/Type.hs @@ -37,7 +37,7 @@ data F' r a | IntroOuter a -- binder like ∀, used to introduce variables that are -- bound by outer type signatures, to support scoped type -- variables - deriving (Foldable, Functor, Eq, Ord, Traversable) + deriving (Foldable, Functor, Eq, Ord, Show, Traversable) -- | Non-recursive type type TypeT v = ABT.Term FT v () diff --git a/codebase2/core/U/Core/ABT.hs b/codebase2/core/U/Core/ABT.hs index 94a265f94d..e9148d5375 100644 --- a/codebase2/core/U/Core/ABT.hs +++ b/codebase2/core/U/Core/ABT.hs @@ -76,6 +76,12 @@ var a v = Term (Set.singleton v) a (Var v) cycle :: a -> Term f v a -> Term f v a cycle a t = Term (freeVars t) a (Cycle t) +absChain' :: Ord v => [v] -> Term f v () -> Term f v () +absChain' vs t = foldr (\v t -> abs () v t) t vs + +absCycle' :: Ord v => [v] -> Term f v () -> Term f v () +absCycle' vs t = cycle () $ absChain' vs t + tm :: (Foldable f, Ord v) => a -> f (Term f v a) -> Term f v a tm a t = Term (Set.unions (fmap freeVars (Foldable.toList t))) a (Tm t) @@ -97,11 +103,6 @@ hash = hash' [] where Cycle (unabs -> (vs, t)) -> hash' (Left vs : env) t Abs v t -> hash' (Right v : env) t Tm t -> Hashable.hash1 (hashCycle env) (hash' env) t - unabs :: Term f v a -> ([v], Term f v a) - unabs = \case - Term _ _ (Abs hd body) -> - let (tl, body') = unabs body in (hd : tl, body') - t -> ([], t) hashCycle :: [Either [v] v] -> [Term f v a] -> ([h], Term f v a -> h) hashCycle env@(Left cycle : envTl) ts | length cycle == length ts = let @@ -127,16 +128,11 @@ hashComponent byName = let -- so that we can then hash them (closed terms can be hashed) -- so that we can sort them by hash. this is the "canonical, name-agnostic" -- hash that yields the canonical ordering of the component. - tms = [ (v, absCycle vs (tm () $ Component (snd <$> embeds) (var () v))) | v <- vs ] + tms = [ (v, absCycle' vs (tm () $ Component (snd <$> embeds) (var () v))) | v <- vs ] hashed = [ ((v,t), hash t) | (v,t) <- tms ] sortedHashed = List.sortOn snd hashed overallHash = Hashable.accumulate (Hashable.Hashed . snd <$> sortedHashed) in (overallHash, [ (v, t) | ((v, _),_) <- sortedHashed, Just t <- [Map.lookup v byName] ]) - where - absChain :: Ord v => [v] -> Term f v () -> Term f v () - absChain vs t = foldr (abs ()) t vs - absCycle :: Ord v => [v] -> Term f v () -> Term f v () - absCycle vs t = cycle () $ absChain vs t -- Implementation detail of hashComponent data Component f a = Component [a] a | Embed (f a) deriving (Functor, Traversable, Foldable) diff --git a/hie.yaml b/hie.yaml index 039b9feec1..20b376182f 100644 --- a/hie.yaml +++ b/hie.yaml @@ -35,3 +35,180 @@ cradle: - path: "codebase2/util-serialization/." component: "unison-util-serialization:lib" + + - path: "parser-typechecker/src" + component: "unison-parser-typechecker:lib" + + - path: "parser-typechecker/unison/Main.hs" + component: "unison-parser-typechecker:exe:unison" + + - path: "parser-typechecker/unison/System.Path.hs" + component: "unison-parser-typechecker:exe:unison" + + - path: "parser-typechecker/unison/Version.hs" + component: "unison-parser-typechecker:exe:unison" + + - path: "parser-typechecker/prettyprintdemo/Main.hs" + component: "unison-parser-typechecker:exe:prettyprintdemo" + + - path: "parser-typechecker/tests/Suite.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.ABT.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.ANF.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Cache.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Codebase.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Codebase.Causal.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Codebase.FileCodebase.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Codebase.Path.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.ColorText.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Common.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.DataDeclaration.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.FileParser.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Git.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Lexer.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.IO.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.MCode.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Range.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Referent.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Term.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.TermParser.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.TermPrinter.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Type.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.TypePrinter.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Typechecker.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Typechecker.Components.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Typechecker.Context.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Typechecker.TypeError.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.UnisonSources.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.UriParser.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Util.Bytes.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Util.PinBoard.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Util.Pretty.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.Var.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Test.VersionParser.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison.Core.Test.Name.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/transcripts/Transcripts.hs" + component: "unison-parser-typechecker:exe:transcripts" + + - path: "parser-typechecker/transcripts/build-depends:.hs" + component: "unison-parser-typechecker:exe:transcripts" + + - path: "parser-typechecker/transcripts/base.hs" + component: "unison-parser-typechecker:exe:transcripts" + + - path: "parser-typechecker/transcripts/directory.hs" + component: "unison-parser-typechecker:exe:transcripts" + + - path: "parser-typechecker/transcripts/easytest.hs" + component: "unison-parser-typechecker:exe:transcripts" + + - path: "parser-typechecker/transcripts/filepath.hs" + component: "unison-parser-typechecker:exe:transcripts" + + - path: "parser-typechecker/transcripts/shellmet.hs" + component: "unison-parser-typechecker:exe:transcripts" + + - path: "parser-typechecker/transcripts/process.hs" + component: "unison-parser-typechecker:exe:transcripts" + + - path: "parser-typechecker/transcripts/text.hs" + component: "unison-parser-typechecker:exe:transcripts" + + - path: "parser-typechecker/transcripts/unison-core.hs" + component: "unison-parser-typechecker:exe:transcripts" + + - path: "parser-typechecker/transcripts/unison-parser-typechecker.hs" + component: "unison-parser-typechecker:exe:transcripts" + + - path: "parser-typechecker/benchmarks/runtime/Main.hs" + component: "unison-parser-typechecker:bench:runtime" + + - path: "unison-core/src" + component: "unison-core1:lib" + + - path: "yaks/easytest/src" + component: "easytest:lib" + + - path: "yaks/easytest/tests/Suite.hs" + component: "easytest:exe:runtests" + + - path: "yaks/easytest/tests/build-depends:.hs" + component: "easytest:exe:runtests" + + - path: "yaks/easytest/tests/base.hs" + component: "easytest:exe:runtests" + + - path: "yaks/easytest/tests/easytest.hs" + component: "easytest:exe:runtests" + + - path: "yaks/easytest/tests" + component: "easytest:test:tests" diff --git a/parser-typechecker/tests/Unison/Test/ANF.hs b/parser-typechecker/tests/Unison/Test/ANF.hs index 3bca0b4812..9719dbf48c 100644 --- a/parser-typechecker/tests/Unison/Test/ANF.hs +++ b/parser-typechecker/tests/Unison/Test/ANF.hs @@ -145,6 +145,8 @@ denormalizeMatch b | otherwise = P.Int () $ fromIntegral i dpat r n t = P.Constructor () r (fromEnum t) (replicate n $ P.Var ()) +denormalizeBranch :: + (Num a, Var v) => Term ANormalBF v -> (a, ABT.Term (Term.F v () ()) v ()) denormalizeBranch (TAbs v br) = (n+1, ABT.abs v dbr) where (n, dbr) = denormalizeBranch br denormalizeBranch tm = (0, denormalize tm) diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index b793819511..42425aaee0 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -234,7 +234,7 @@ library text, time, transformers, - unison-core, + unison-core1, unliftio, util, vector, diff --git a/stack.yaml b/stack.yaml index b566938bd6..d8409d66b4 100644 --- a/stack.yaml +++ b/stack.yaml @@ -5,9 +5,9 @@ flags: {} allow-different-user: true packages: -# - yaks/easytest -# - parser-typechecker -# - unison-core +- yaks/easytest +- parser-typechecker +- unison-core - codebase-convert-1to2 - codebase1/codebase - codebase2/codebase @@ -41,4 +41,4 @@ extra-deps: ghc-options: # All packages - "$locals": -Wall -Wno-name-shadowing -Werror -Wno-type-defaults #-freverse-errors + "$locals": -Wall -Wno-name-shadowing -Werror -Wno-type-defaults -Wno-missing-pattern-synonym-signatures #-freverse-errors diff --git a/unison-core/src/Unison/Reference.hs b/unison-core/src/Unison/Reference.hs index f007e2b764..d538a09bbf 100644 --- a/unison-core/src/Unison/Reference.hs +++ b/unison-core/src/Unison/Reference.hs @@ -46,6 +46,7 @@ data Reference -- Using an ugly name so no one tempted to use this | DerivedId Id deriving (Eq,Ord,Generic) +pattern Derived :: H.Hash -> Pos -> Size -> Reference pattern Derived h i n = DerivedId (Id h i n) -- A good idea, but causes a weird problem with view patterns in PatternP.hs in ghc 8.4.3 diff --git a/unison-core/src/Unison/Referent.hs b/unison-core/src/Unison/Referent.hs index 700e84ed02..d74b1d00a0 100644 --- a/unison-core/src/Unison/Referent.hs +++ b/unison-core/src/Unison/Referent.hs @@ -42,7 +42,7 @@ toShortHash :: Referent -> ShortHash toShortHash = \case Ref r -> R.toShortHash r Con r i _ -> patternShortHash r i - + toShortHashId :: Id -> ShortHash toShortHashId = toShortHash . fromId @@ -62,7 +62,9 @@ ctorTypeText :: CT.ConstructorType -> Text ctorTypeText CT.Effect = EffectCtor ctorTypeText CT.Data = DataCtor +pattern EffectCtor :: (Eq a, IsString a) => a pattern EffectCtor = "a" +pattern DataCtor :: (Eq a, IsString a) => a pattern DataCtor = "d" toString :: Referent -> String @@ -84,9 +86,9 @@ toReference' :: Referent' r -> r toReference' = \case Ref' r -> r Con' r _i _t -> r - + fromId :: Id -> Referent -fromId = fmap R.DerivedId +fromId = fmap R.DerivedId toTypeReference :: Referent -> Maybe Reference toTypeReference = \case diff --git a/unison-core/unison-core.cabal b/unison-core/unison-core1.cabal similarity index 98% rename from unison-core/unison-core.cabal rename to unison-core/unison-core1.cabal index 84daab4e41..865ab8f2cb 100644 --- a/unison-core/unison-core.cabal +++ b/unison-core/unison-core1.cabal @@ -1,5 +1,5 @@ cabal-version: 2.2 -name: unison-core +name: unison-core1 category: Compiler version: 0.1 license: MIT diff --git a/yaks/easytest/tests/Suite.hs b/yaks/easytest/tests/Suite.hs index 77aad62ae3..dd51f74e6b 100644 --- a/yaks/easytest/tests/Suite.hs +++ b/yaks/easytest/tests/Suite.hs @@ -1,7 +1,6 @@ module Main where import EasyTest -import Control.Applicative import Control.Monad suite1 :: Test () @@ -15,7 +14,7 @@ suite1 = tests suite2 :: Test () suite2 = tests [ scope "pending.failure" (pending (expectEqual True False)) - --, scope "pending.success" (pending ok) + --, scope "pending.success" (pending ok) ] reverseTest :: Test () From 8025f85d2de49230034cfca3114aaf6cacd1001d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 19 Oct 2020 10:34:23 -0400 Subject: [PATCH 020/225] restore unison-src --- unison-src/Base.u | 444 +++++++++++ unison-src/Cofree.u | 20 + unison-src/EasyTest.u | 263 +++++++ unison-src/Trie.u | 39 + unison-src/WeightedSearch.u | 69 ++ unison-src/base58.u | 60 ++ unison-src/basics.u | 72 ++ unison-src/demo/1.u | 6 + unison-src/demo/2.u | 46 ++ unison-src/demo/3.u | 115 +++ unison-src/errors/407.u | 8 + unison-src/errors/X-array.u | 6 + .../abort-ability-checks-against-pure.u | 9 + unison-src/errors/all-errors.u | 43 + unison-src/errors/check-for-regressions/and.u | 7 + .../check-for-regressions/app-polymorphic.u | 4 + unison-src/errors/check-for-regressions/app.u | 4 + .../applying-non-function.u | 4 + .../errors/check-for-regressions/casebody.u | 3 + .../errors/check-for-regressions/caseguard.u | 2 + .../check-for-regressions/casepattern.u | 3 + .../errors/check-for-regressions/ifcond.u | 1 + .../errors/check-for-regressions/ifelse.u | 1 + .../errors/check-for-regressions/lens.u | 9 + .../errors/check-for-regressions/not-and.u | 14 + .../errors/check-for-regressions/not-and0.u | 9 + .../check-for-regressions/not-caseguard.u | 4 + .../check-for-regressions/not-caseguard2.u | 2 + .../errors/check-for-regressions/not-or.u | 1 + .../errors/check-for-regressions/not-vector.u | 1 + unison-src/errors/check-for-regressions/or.u | 1 + .../errors/check-for-regressions/vector.u | 1 + unison-src/errors/compiler-bug.u | 5 + unison-src/errors/console.u | 19 + unison-src/errors/console2.u | 29 + unison-src/errors/cyclic-unguarded.u | 8 + unison-src/errors/effect-inference1.u | 12 + unison-src/errors/effect_unknown_type.uu | 20 + unison-src/errors/empty-block.u | 1 + unison-src/errors/ex1.u | 49 ++ unison-src/errors/fix745.u | 19 + unison-src/errors/handle-inference.u | 22 + .../errors/handler-coverage-checking.uu | 29 + unison-src/errors/id.u | 3 + unison-src/errors/io-effect.u | 9 + unison-src/errors/io-state1.u | 17 + unison-src/errors/map-reduce.u | 102 +++ unison-src/errors/map-traverse3.u | 26 + unison-src/errors/mismatched-braces.u | 4 + unison-src/errors/need-nominal-type.uu | 7 + ...ity-check-fail-by-calling-wrong-function.u | 27 + .../errors/poor-error-message/consoleh.u | 57 ++ .../doesnt-match-annotation.u | 5 + .../poor-error-message/function-calls.u | 11 + .../poor-error-message/function-calls1.u | 16 + .../poor-error-message/function-calls2.u | 19 + .../poor-error-message/function-calls3.u | 26 + unison-src/errors/poor-error-message/handle.u | 40 + .../errors/poor-error-message/handler-ex.u | 24 + .../mismatched-case-result-types.u | 20 + .../errors/poor-error-message/notaguard.u | 21 + .../overapplied-data-constructor-loc.u | 17 + .../pattern-case-location.u | 10 + .../poor-error-message/pattern-matching-1.u | 28 + .../errors/poor-error-message/tdnr-demo.u | 55 ++ .../poor-error-message/token-printing.u | 25 + unison-src/errors/rank2a.u | 8 + .../errors/seq-concat-constant-length.u | 3 + unison-src/errors/state4.u | 13 + unison-src/errors/tdnr.u | 3 + unison-src/errors/tdnr2.u | 1 + unison-src/errors/tdnr3.u | 10 + .../errors/term-functor-inspired/effect1.u | 9 + .../term-functor-inspired/if-body-mismatch.u | 3 + .../term-functor-inspired/if-cond-not-bool.u | 1 + .../mismatched-case-result-types.u | 5 + unison-src/errors/type-apply.u | 15 + .../errors/type-functor-inspired/app2.u | 4 + .../errors/type-functor-inspired/arrow1.u | 3 + .../errors/type-functor-inspired/effect2.u | 11 + .../type-functor-inspired/forall-arrow.u | 3 + .../type-functor-inspired/forall-arrow2.u | 4 + .../type-functor-inspired/forall-arrow3.u | 4 + .../need-nonstructural-types.uu | 12 + .../errors/type-functor-inspired/parens.u | 4 + .../errors/type-functor-inspired/subtuple.u | 5 + .../type-functor-inspired/synthesizeApp.u | 4 + .../errors/type-functor-inspired/tuple.u | 4 + .../errors/type-functor-inspired/tuple2.u | 3 + .../errors/type-functor-inspired/unit.u | 3 + unison-src/errors/unexpected-loop.u | 11 + unison-src/errors/unresolved-symbol-1.u | 6 + unison-src/errors/unsound-cont.u | 12 + unison-src/example-errors.u | 181 +++++ unison-src/new-runtime-transcripts/README | 1 + unison-src/new-runtime-transcripts/fix1709.md | 15 + .../new-runtime-transcripts/fix1709.output.md | 49 ++ unison-src/new-runtime-transcripts/hashing.md | 264 +++++++ .../new-runtime-transcripts/hashing.output.md | 418 ++++++++++ unison-src/parser-tests/GenerateErrors.hs | 48 ++ .../parser-tests/empty-match-list.message.txt | 3 + unison-src/parser-tests/empty-match-list.u | 3 + .../if-without-condition.message.txt | 3 + .../parser-tests/if-without-condition.u | 1 + unison-src/remote-api.u | 95 +++ unison-src/remote.u | 67 ++ unison-src/sheepshead.u | 39 + unison-src/tests/324.u | 7 + unison-src/tests/344.uu | 5 + unison-src/tests/514.u | 13 + unison-src/tests/595.u | 13 + unison-src/tests/868.u | 8 + unison-src/tests/868.ur | 1 + unison-src/tests/a-tale-of-two-optionals.u | 13 + unison-src/tests/ability-inference-fail.uu | 35 + unison-src/tests/ability-keyword.u | 7 + unison-src/tests/abort.u | 13 + unison-src/tests/ask-inferred.u | 23 + unison-src/tests/boolean-ops-in-sequence.u | 1 + unison-src/tests/builtin-arity-0-evaluation.u | 3 + .../tests/builtin-arity-0-evaluation.ur | 1 + unison-src/tests/caseguard.u | 15 + unison-src/tests/cce.u | 116 +++ unison-src/tests/cce.ur | 1 + unison-src/tests/compose-inference.u | 4 + unison-src/tests/console.u | 52 ++ unison-src/tests/console1.u | 41 + unison-src/tests/data-references-builtins.u | 4 + unison-src/tests/delay.u | 37 + unison-src/tests/delay_parse.u | 20 + unison-src/tests/effect-instantiation.u | 10 + unison-src/tests/effect-instantiation2.u | 8 + unison-src/tests/effect1.u | 8 + unison-src/tests/empty-above-the-fold.u | 6 + unison-src/tests/fib4.ur | 1 + unison-src/tests/fix1640.u | 25 + unison-src/tests/fix528.u | 12 + unison-src/tests/fix528.ur | 1 + unison-src/tests/fix739.u | 4 + unison-src/tests/force.u | 9 + unison-src/tests/guard-boolean-operators.u | 11 + unison-src/tests/handler-stacking.u | 34 + unison-src/tests/hang.u | 88 +++ unison-src/tests/id.u | 5 + unison-src/tests/if.u | 2 + unison-src/tests/imports.u | 22 + unison-src/tests/imports2.u | 12 + unison-src/tests/inner-lambda1.u | 15 + unison-src/tests/inner-lambda2.u | 16 + unison-src/tests/io-state2.u | 23 + unison-src/tests/io-state3.u | 10 + unison-src/tests/keyword-parse.u | 4 + .../tests/lambda-closing-over-effectful-fn.u | 10 + .../tests/lambda-closing-over-effectful-fn.ur | 1 + unison-src/tests/links.u | 13 + unison-src/tests/links.ur | 1 + unison-src/tests/map-traverse.u | 30 + unison-src/tests/map-traverse2.u | 32 + unison-src/tests/mergesort.u | 26 + unison-src/tests/methodical/abilities.u | 48 ++ unison-src/tests/methodical/abilities.ur | 1 + .../tests/methodical/apply-constructor.u | 29 + .../tests/methodical/apply-constructor.ur | 1 + unison-src/tests/methodical/apply.u | 43 + unison-src/tests/methodical/apply.ur | 1 + .../tests/methodical/builtin-nat-to-float.u | 1 + .../tests/methodical/builtin-nat-to-float.ur | 1 + unison-src/tests/methodical/builtins.u | 14 + unison-src/tests/methodical/cycle-minimize.u | 11 + unison-src/tests/methodical/dots.u | 28 + unison-src/tests/methodical/dots.ur | 1 + unison-src/tests/methodical/empty.u | 0 unison-src/tests/methodical/empty2.u | 1 + unison-src/tests/methodical/empty3.u | 3 + unison-src/tests/methodical/exponential.u | 5 + unison-src/tests/methodical/exponential.ur | 3 + unison-src/tests/methodical/float.u | 15 + unison-src/tests/methodical/float.ur | 7 + unison-src/tests/methodical/hyperbolic.u | 8 + unison-src/tests/methodical/hyperbolic.ur | 6 + unison-src/tests/methodical/int.u | 24 + unison-src/tests/methodical/int.ur | 17 + unison-src/tests/methodical/let.u | 12 + unison-src/tests/methodical/let.ur | 1 + unison-src/tests/methodical/literals.u | 12 + unison-src/tests/methodical/literals.ur | 1 + unison-src/tests/methodical/loop.u | 8 + unison-src/tests/methodical/nat.u | 31 + unison-src/tests/methodical/nat.ur | 23 + .../tests/methodical/overapply-ability.u | 47 ++ .../tests/methodical/overapply-ability.ur | 1 + unison-src/tests/methodical/parens.u | 27 + .../tests/methodical/pattern-matching.u | 28 + .../tests/methodical/pattern-matching.ur | 7 + unison-src/tests/methodical/power.u | 4 + unison-src/tests/methodical/power.ur | 2 + unison-src/tests/methodical/rank2.u | 9 + unison-src/tests/methodical/rounding.u | 8 + unison-src/tests/methodical/rounding.ur | 6 + unison-src/tests/methodical/scopedtypevars.u | 29 + unison-src/tests/methodical/semis.u | 13 + unison-src/tests/methodical/semis.ur | 1 + unison-src/tests/methodical/trig.u | 9 + unison-src/tests/methodical/trig.ur | 7 + unison-src/tests/methodical/universals.u | 20 + unison-src/tests/methodical/universals.ur | 17 + unison-src/tests/methodical/wildcardimports.u | 6 + unison-src/tests/multiple-effects.u | 16 + unison-src/tests/one-liners.uu | 2 + unison-src/tests/parenthesized-blocks.u | 5 + unison-src/tests/parenthesized-blocks.ur | 1 + unison-src/tests/pattern-match-seq.u | 86 ++ unison-src/tests/pattern-match-seq.ur | 20 + unison-src/tests/pattern-matching.u | 36 + unison-src/tests/pattern-matching2.u | 21 + unison-src/tests/pattern-typing-bug.u | 9 + unison-src/tests/pattern-typing-bug.ur | 1 + unison-src/tests/pattern-weirdness.u | 16 + unison-src/tests/pattern-weirdness.ur | 1 + unison-src/tests/quote-parse-bug.uu | 6 + unison-src/tests/r0.u | 5 + unison-src/tests/r1.u | 6 + unison-src/tests/r10.u | 5 + unison-src/tests/r11.u | 7 + unison-src/tests/r12.u | 4 + unison-src/tests/r13.u | 5 + unison-src/tests/r14.u | 4 + unison-src/tests/r2.u | 6 + unison-src/tests/r3.u | 6 + unison-src/tests/r4negate.u | 5 + unison-src/tests/r4x.u | 3 + unison-src/tests/r5.u | 6 + unison-src/tests/r6.u | 4 + unison-src/tests/r7.0.u | 6 + unison-src/tests/r7.1.u | 5 + unison-src/tests/r7.2.u | 4 + unison-src/tests/r8.u | 5 + unison-src/tests/r9.u | 11 + unison-src/tests/rainbow.u | 32 + unison-src/tests/records.u | 12 + unison-src/tests/runtime-crash.uu | 13 + unison-src/tests/sequence-at-0.u | 2 + .../tests/sequence-literal-argument-parsing.u | 5 + unison-src/tests/sequence-literal.u | 19 + unison-src/tests/soe.u | 124 +++ unison-src/tests/soe2.u | 47 ++ .../tests/spurious-ability-fail-underapply.u | 8 + unison-src/tests/spurious-ability-fail.u | 16 + unison-src/tests/state1.u | 15 + unison-src/tests/state1a.u | 11 + unison-src/tests/state2.u | 11 + unison-src/tests/state2a-min.u | 17 + unison-src/tests/state2a-min.ur | 1 + unison-src/tests/state2a.u | 50 ++ unison-src/tests/state2a.uu | 30 + unison-src/tests/state2b-min.u | 15 + unison-src/tests/state2b-min.ur | 1 + unison-src/tests/state2b.u | 39 + unison-src/tests/state3.u | 30 + unison-src/tests/state4.u | 26 + unison-src/tests/state4.ur | 1 + unison-src/tests/state4a.u | 26 + unison-src/tests/state4a.ur | 1 + unison-src/tests/stream.u | 70 ++ unison-src/tests/stream2.uu | 81 ++ unison-src/tests/stream3.uu | 71 ++ unison-src/tests/suffix-resolve.u | 23 + unison-src/tests/tdnr.u | 4 + unison-src/tests/tdnr2.u | 13 + unison-src/tests/tdnr3.u | 6 + unison-src/tests/tdnr4.u | 4 + unison-src/tests/text-escaping.u | 10 + unison-src/tests/text-escaping.ur | 1 + unison-src/tests/text-pattern.u | 6 + unison-src/tests/text-pattern.ur | 1 + unison-src/tests/tictactoe.u | 34 + unison-src/tests/tictactoe0-array-oob1.u | 12 + unison-src/tests/tictactoe0-npe.u | 17 + unison-src/tests/tictactoe0.u | 33 + unison-src/tests/tictactoe2.u | 54 ++ unison-src/tests/tuple.u | 4 + unison-src/tests/tuple.ur | 1 + unison-src/tests/type-application.u | 11 + unison-src/tests/underscore-parsing.u | 7 + unison-src/tests/ungeneralize-bug.uu | 22 + unison-src/tests/unique.u | 28 + unison-src/tests/void.u | 3 + unison-src/transcripts/addupdatemessages.md | 63 ++ .../transcripts/addupdatemessages.output.md | 159 ++++ unison-src/transcripts/alias-many.md | 130 ++++ unison-src/transcripts/alias-many.output.md | 424 ++++++++++ unison-src/transcripts/ambiguous-metadata.md | 17 + .../transcripts/ambiguous-metadata.output.md | 42 + unison-src/transcripts/blocks.md | 177 +++++ unison-src/transcripts/blocks.output.md | 339 ++++++++ unison-src/transcripts/builtins-merge.md | 6 + .../transcripts/builtins-merge.output.md | 51 ++ unison-src/transcripts/bytesFromList.md | 11 + .../transcripts/bytesFromList.output.md | 21 + unison-src/transcripts/cd-back.md | 46 ++ unison-src/transcripts/cd-back.output.md | 40 + unison-src/transcripts/check763.md | 17 + unison-src/transcripts/check763.output.md | 34 + unison-src/transcripts/check873.md | 17 + unison-src/transcripts/check873.output.md | 40 + unison-src/transcripts/copy-patch.md | 39 + unison-src/transcripts/copy-patch.output.md | 91 +++ unison-src/transcripts/create-author.md | 17 + .../transcripts/create-author.output.md | 44 ++ unison-src/transcripts/delete.md | 100 +++ unison-src/transcripts/delete.output.md | 237 ++++++ unison-src/transcripts/deleteReplacements.md | 46 ++ .../transcripts/deleteReplacements.output.md | 132 ++++ .../dependents-dependencies-debugfile.md | 38 + ...ependents-dependencies-debugfile.output.md | 93 +++ unison-src/transcripts/destructuring-binds.md | 85 ++ .../transcripts/destructuring-binds.output.md | 174 +++++ unison-src/transcripts/diff.md | 201 +++++ unison-src/transcripts/diff.output.md | 733 ++++++++++++++++++ unison-src/transcripts/doc-formatting.md | 254 ++++++ .../transcripts/doc-formatting.output.md | 512 ++++++++++++ unison-src/transcripts/docs.md | 95 +++ unison-src/transcripts/docs.output.md | 211 +++++ unison-src/transcripts/emptyCodebase.md | 27 + .../transcripts/emptyCodebase.output.md | 41 + .../transcripts/errors/ucm-hide-all-error.md | 12 + .../errors/ucm-hide-all-error.output.md | 17 + unison-src/transcripts/errors/ucm-hide-all.md | 12 + .../transcripts/errors/ucm-hide-all.output.md | 17 + .../transcripts/errors/ucm-hide-error.md | 12 + .../errors/ucm-hide-error.output.md | 17 + unison-src/transcripts/errors/ucm-hide.md | 12 + .../transcripts/errors/ucm-hide.output.md | 17 + .../errors/unison-hide-all-error.md | 10 + .../errors/unison-hide-all-error.output.md | 16 + .../transcripts/errors/unison-hide-all.md | 10 + .../errors/unison-hide-all.output.md | 16 + .../transcripts/errors/unison-hide-error.md | 10 + .../errors/unison-hide-error.output.md | 16 + unison-src/transcripts/errors/unison-hide.md | 10 + .../transcripts/errors/unison-hide.output.md | 16 + unison-src/transcripts/escape-sequences.md | 5 + .../transcripts/escape-sequences.output.md | 28 + unison-src/transcripts/find-patch.md | 28 + unison-src/transcripts/find-patch.output.md | 81 ++ .../transcripts/fix-1381-excess-propagate.md | 28 + .../fix-1381-excess-propagate.output.md | 55 ++ unison-src/transcripts/fix-big-list-crash.md | 13 + .../transcripts/fix-big-list-crash.output.md | 22 + unison-src/transcripts/fix1063.md | 17 + unison-src/transcripts/fix1063.output.md | 35 + unison-src/transcripts/fix1334.md | 36 + unison-src/transcripts/fix1334.output.md | 101 +++ unison-src/transcripts/fix1356.md | 41 + unison-src/transcripts/fix1356.output.md | 94 +++ unison-src/transcripts/fix689.md | 13 + unison-src/transcripts/fix689.output.md | 21 + unison-src/transcripts/fix849.md | 12 + unison-src/transcripts/fix849.output.md | 27 + unison-src/transcripts/fix942.md | 37 + unison-src/transcripts/fix942.output.md | 114 +++ unison-src/transcripts/fix987.md | 37 + unison-src/transcripts/fix987.output.md | 65 ++ unison-src/transcripts/hello.md | 69 ++ unison-src/transcripts/hello.output.md | 92 +++ unison-src/transcripts/link.md | 70 ++ unison-src/transcripts/link.output.md | 202 +++++ unison-src/transcripts/merge.md | 101 +++ unison-src/transcripts/merge.output.md | 227 ++++++ unison-src/transcripts/mergeloop.md | 51 ++ unison-src/transcripts/mergeloop.output.md | 143 ++++ unison-src/transcripts/merges.md | 119 +++ unison-src/transcripts/merges.output.md | 436 +++++++++++ unison-src/transcripts/names.md | 20 + unison-src/transcripts/names.output.md | 32 + unison-src/transcripts/numbered-args.md | 56 ++ .../transcripts/numbered-args.output.md | 162 ++++ unison-src/transcripts/propagate.md | 134 ++++ unison-src/transcripts/propagate.output.md | 280 +++++++ unison-src/transcripts/redundant.output.md | 45 ++ unison-src/transcripts/reflog.md | 31 + unison-src/transcripts/reflog.output.md | 90 +++ unison-src/transcripts/resolve.md | 115 +++ unison-src/transcripts/resolve.output.md | 259 +++++++ unison-src/transcripts/squash.md | 132 ++++ unison-src/transcripts/squash.output.md | 472 +++++++++++ unison-src/transcripts/suffixes.md | 40 + unison-src/transcripts/suffixes.output.md | 59 ++ unison-src/transcripts/todo-bug-builtins.md | 27 + .../transcripts/todo-bug-builtins.output.md | 89 +++ .../transcripts/transcript-parser-commands.md | 41 + .../transcript-parser-commands.output.md | 72 ++ unison-src/transcripts/unitnamespace.md | 9 + .../transcripts/unitnamespace.output.md | 35 + 394 files changed, 15902 insertions(+) create mode 100644 unison-src/Base.u create mode 100644 unison-src/Cofree.u create mode 100644 unison-src/EasyTest.u create mode 100644 unison-src/Trie.u create mode 100644 unison-src/WeightedSearch.u create mode 100644 unison-src/base58.u create mode 100644 unison-src/basics.u create mode 100644 unison-src/demo/1.u create mode 100644 unison-src/demo/2.u create mode 100644 unison-src/demo/3.u create mode 100644 unison-src/errors/407.u create mode 100644 unison-src/errors/X-array.u create mode 100644 unison-src/errors/abort-ability-checks-against-pure.u create mode 100644 unison-src/errors/all-errors.u create mode 100644 unison-src/errors/check-for-regressions/and.u create mode 100644 unison-src/errors/check-for-regressions/app-polymorphic.u create mode 100644 unison-src/errors/check-for-regressions/app.u create mode 100644 unison-src/errors/check-for-regressions/applying-non-function.u create mode 100644 unison-src/errors/check-for-regressions/casebody.u create mode 100644 unison-src/errors/check-for-regressions/caseguard.u create mode 100644 unison-src/errors/check-for-regressions/casepattern.u create mode 100644 unison-src/errors/check-for-regressions/ifcond.u create mode 100644 unison-src/errors/check-for-regressions/ifelse.u create mode 100644 unison-src/errors/check-for-regressions/lens.u create mode 100644 unison-src/errors/check-for-regressions/not-and.u create mode 100644 unison-src/errors/check-for-regressions/not-and0.u create mode 100644 unison-src/errors/check-for-regressions/not-caseguard.u create mode 100644 unison-src/errors/check-for-regressions/not-caseguard2.u create mode 100644 unison-src/errors/check-for-regressions/not-or.u create mode 100644 unison-src/errors/check-for-regressions/not-vector.u create mode 100644 unison-src/errors/check-for-regressions/or.u create mode 100644 unison-src/errors/check-for-regressions/vector.u create mode 100644 unison-src/errors/compiler-bug.u create mode 100644 unison-src/errors/console.u create mode 100644 unison-src/errors/console2.u create mode 100644 unison-src/errors/cyclic-unguarded.u create mode 100644 unison-src/errors/effect-inference1.u create mode 100755 unison-src/errors/effect_unknown_type.uu create mode 100644 unison-src/errors/empty-block.u create mode 100644 unison-src/errors/ex1.u create mode 100644 unison-src/errors/fix745.u create mode 100644 unison-src/errors/handle-inference.u create mode 100644 unison-src/errors/handler-coverage-checking.uu create mode 100644 unison-src/errors/id.u create mode 100644 unison-src/errors/io-effect.u create mode 100644 unison-src/errors/io-state1.u create mode 100644 unison-src/errors/map-reduce.u create mode 100644 unison-src/errors/map-traverse3.u create mode 100644 unison-src/errors/mismatched-braces.u create mode 100644 unison-src/errors/need-nominal-type.uu create mode 100644 unison-src/errors/poor-error-message/ability-check-fail-by-calling-wrong-function.u create mode 100644 unison-src/errors/poor-error-message/consoleh.u create mode 100644 unison-src/errors/poor-error-message/doesnt-match-annotation.u create mode 100644 unison-src/errors/poor-error-message/function-calls.u create mode 100644 unison-src/errors/poor-error-message/function-calls1.u create mode 100644 unison-src/errors/poor-error-message/function-calls2.u create mode 100644 unison-src/errors/poor-error-message/function-calls3.u create mode 100644 unison-src/errors/poor-error-message/handle.u create mode 100644 unison-src/errors/poor-error-message/handler-ex.u create mode 100644 unison-src/errors/poor-error-message/mismatched-case-result-types.u create mode 100644 unison-src/errors/poor-error-message/notaguard.u create mode 100644 unison-src/errors/poor-error-message/overapplied-data-constructor-loc.u create mode 100644 unison-src/errors/poor-error-message/pattern-case-location.u create mode 100644 unison-src/errors/poor-error-message/pattern-matching-1.u create mode 100644 unison-src/errors/poor-error-message/tdnr-demo.u create mode 100644 unison-src/errors/poor-error-message/token-printing.u create mode 100644 unison-src/errors/rank2a.u create mode 100644 unison-src/errors/seq-concat-constant-length.u create mode 100644 unison-src/errors/state4.u create mode 100644 unison-src/errors/tdnr.u create mode 100644 unison-src/errors/tdnr2.u create mode 100644 unison-src/errors/tdnr3.u create mode 100644 unison-src/errors/term-functor-inspired/effect1.u create mode 100644 unison-src/errors/term-functor-inspired/if-body-mismatch.u create mode 100644 unison-src/errors/term-functor-inspired/if-cond-not-bool.u create mode 100644 unison-src/errors/term-functor-inspired/mismatched-case-result-types.u create mode 100644 unison-src/errors/type-apply.u create mode 100644 unison-src/errors/type-functor-inspired/app2.u create mode 100644 unison-src/errors/type-functor-inspired/arrow1.u create mode 100644 unison-src/errors/type-functor-inspired/effect2.u create mode 100644 unison-src/errors/type-functor-inspired/forall-arrow.u create mode 100644 unison-src/errors/type-functor-inspired/forall-arrow2.u create mode 100644 unison-src/errors/type-functor-inspired/forall-arrow3.u create mode 100644 unison-src/errors/type-functor-inspired/need-nonstructural-types.uu create mode 100644 unison-src/errors/type-functor-inspired/parens.u create mode 100644 unison-src/errors/type-functor-inspired/subtuple.u create mode 100644 unison-src/errors/type-functor-inspired/synthesizeApp.u create mode 100644 unison-src/errors/type-functor-inspired/tuple.u create mode 100644 unison-src/errors/type-functor-inspired/tuple2.u create mode 100644 unison-src/errors/type-functor-inspired/unit.u create mode 100644 unison-src/errors/unexpected-loop.u create mode 100644 unison-src/errors/unresolved-symbol-1.u create mode 100644 unison-src/errors/unsound-cont.u create mode 100644 unison-src/example-errors.u create mode 100644 unison-src/new-runtime-transcripts/README create mode 100644 unison-src/new-runtime-transcripts/fix1709.md create mode 100644 unison-src/new-runtime-transcripts/fix1709.output.md create mode 100644 unison-src/new-runtime-transcripts/hashing.md create mode 100644 unison-src/new-runtime-transcripts/hashing.output.md create mode 100644 unison-src/parser-tests/GenerateErrors.hs create mode 100644 unison-src/parser-tests/empty-match-list.message.txt create mode 100644 unison-src/parser-tests/empty-match-list.u create mode 100644 unison-src/parser-tests/if-without-condition.message.txt create mode 100644 unison-src/parser-tests/if-without-condition.u create mode 100644 unison-src/remote-api.u create mode 100644 unison-src/remote.u create mode 100644 unison-src/sheepshead.u create mode 100644 unison-src/tests/324.u create mode 100644 unison-src/tests/344.uu create mode 100644 unison-src/tests/514.u create mode 100644 unison-src/tests/595.u create mode 100644 unison-src/tests/868.u create mode 100644 unison-src/tests/868.ur create mode 100644 unison-src/tests/a-tale-of-two-optionals.u create mode 100644 unison-src/tests/ability-inference-fail.uu create mode 100644 unison-src/tests/ability-keyword.u create mode 100644 unison-src/tests/abort.u create mode 100644 unison-src/tests/ask-inferred.u create mode 100644 unison-src/tests/boolean-ops-in-sequence.u create mode 100644 unison-src/tests/builtin-arity-0-evaluation.u create mode 100644 unison-src/tests/builtin-arity-0-evaluation.ur create mode 100644 unison-src/tests/caseguard.u create mode 100644 unison-src/tests/cce.u create mode 100644 unison-src/tests/cce.ur create mode 100644 unison-src/tests/compose-inference.u create mode 100644 unison-src/tests/console.u create mode 100644 unison-src/tests/console1.u create mode 100644 unison-src/tests/data-references-builtins.u create mode 100644 unison-src/tests/delay.u create mode 100644 unison-src/tests/delay_parse.u create mode 100644 unison-src/tests/effect-instantiation.u create mode 100644 unison-src/tests/effect-instantiation2.u create mode 100644 unison-src/tests/effect1.u create mode 100644 unison-src/tests/empty-above-the-fold.u create mode 100644 unison-src/tests/fib4.ur create mode 100644 unison-src/tests/fix1640.u create mode 100644 unison-src/tests/fix528.u create mode 100644 unison-src/tests/fix528.ur create mode 100644 unison-src/tests/fix739.u create mode 100644 unison-src/tests/force.u create mode 100644 unison-src/tests/guard-boolean-operators.u create mode 100644 unison-src/tests/handler-stacking.u create mode 100644 unison-src/tests/hang.u create mode 100644 unison-src/tests/id.u create mode 100644 unison-src/tests/if.u create mode 100644 unison-src/tests/imports.u create mode 100644 unison-src/tests/imports2.u create mode 100644 unison-src/tests/inner-lambda1.u create mode 100644 unison-src/tests/inner-lambda2.u create mode 100644 unison-src/tests/io-state2.u create mode 100644 unison-src/tests/io-state3.u create mode 100644 unison-src/tests/keyword-parse.u create mode 100644 unison-src/tests/lambda-closing-over-effectful-fn.u create mode 100644 unison-src/tests/lambda-closing-over-effectful-fn.ur create mode 100644 unison-src/tests/links.u create mode 100644 unison-src/tests/links.ur create mode 100644 unison-src/tests/map-traverse.u create mode 100644 unison-src/tests/map-traverse2.u create mode 100644 unison-src/tests/mergesort.u create mode 100644 unison-src/tests/methodical/abilities.u create mode 100644 unison-src/tests/methodical/abilities.ur create mode 100644 unison-src/tests/methodical/apply-constructor.u create mode 100644 unison-src/tests/methodical/apply-constructor.ur create mode 100644 unison-src/tests/methodical/apply.u create mode 100644 unison-src/tests/methodical/apply.ur create mode 100644 unison-src/tests/methodical/builtin-nat-to-float.u create mode 100644 unison-src/tests/methodical/builtin-nat-to-float.ur create mode 100644 unison-src/tests/methodical/builtins.u create mode 100644 unison-src/tests/methodical/cycle-minimize.u create mode 100644 unison-src/tests/methodical/dots.u create mode 100644 unison-src/tests/methodical/dots.ur create mode 100644 unison-src/tests/methodical/empty.u create mode 100644 unison-src/tests/methodical/empty2.u create mode 100644 unison-src/tests/methodical/empty3.u create mode 100644 unison-src/tests/methodical/exponential.u create mode 100644 unison-src/tests/methodical/exponential.ur create mode 100644 unison-src/tests/methodical/float.u create mode 100644 unison-src/tests/methodical/float.ur create mode 100644 unison-src/tests/methodical/hyperbolic.u create mode 100644 unison-src/tests/methodical/hyperbolic.ur create mode 100644 unison-src/tests/methodical/int.u create mode 100644 unison-src/tests/methodical/int.ur create mode 100644 unison-src/tests/methodical/let.u create mode 100644 unison-src/tests/methodical/let.ur create mode 100644 unison-src/tests/methodical/literals.u create mode 100644 unison-src/tests/methodical/literals.ur create mode 100644 unison-src/tests/methodical/loop.u create mode 100644 unison-src/tests/methodical/nat.u create mode 100644 unison-src/tests/methodical/nat.ur create mode 100644 unison-src/tests/methodical/overapply-ability.u create mode 100644 unison-src/tests/methodical/overapply-ability.ur create mode 100644 unison-src/tests/methodical/parens.u create mode 100644 unison-src/tests/methodical/pattern-matching.u create mode 100644 unison-src/tests/methodical/pattern-matching.ur create mode 100644 unison-src/tests/methodical/power.u create mode 100644 unison-src/tests/methodical/power.ur create mode 100644 unison-src/tests/methodical/rank2.u create mode 100644 unison-src/tests/methodical/rounding.u create mode 100644 unison-src/tests/methodical/rounding.ur create mode 100644 unison-src/tests/methodical/scopedtypevars.u create mode 100644 unison-src/tests/methodical/semis.u create mode 100644 unison-src/tests/methodical/semis.ur create mode 100644 unison-src/tests/methodical/trig.u create mode 100644 unison-src/tests/methodical/trig.ur create mode 100644 unison-src/tests/methodical/universals.u create mode 100644 unison-src/tests/methodical/universals.ur create mode 100644 unison-src/tests/methodical/wildcardimports.u create mode 100644 unison-src/tests/multiple-effects.u create mode 100644 unison-src/tests/one-liners.uu create mode 100644 unison-src/tests/parenthesized-blocks.u create mode 100644 unison-src/tests/parenthesized-blocks.ur create mode 100644 unison-src/tests/pattern-match-seq.u create mode 100644 unison-src/tests/pattern-match-seq.ur create mode 100644 unison-src/tests/pattern-matching.u create mode 100644 unison-src/tests/pattern-matching2.u create mode 100644 unison-src/tests/pattern-typing-bug.u create mode 100644 unison-src/tests/pattern-typing-bug.ur create mode 100644 unison-src/tests/pattern-weirdness.u create mode 100644 unison-src/tests/pattern-weirdness.ur create mode 100644 unison-src/tests/quote-parse-bug.uu create mode 100644 unison-src/tests/r0.u create mode 100644 unison-src/tests/r1.u create mode 100644 unison-src/tests/r10.u create mode 100644 unison-src/tests/r11.u create mode 100644 unison-src/tests/r12.u create mode 100644 unison-src/tests/r13.u create mode 100644 unison-src/tests/r14.u create mode 100644 unison-src/tests/r2.u create mode 100644 unison-src/tests/r3.u create mode 100644 unison-src/tests/r4negate.u create mode 100644 unison-src/tests/r4x.u create mode 100644 unison-src/tests/r5.u create mode 100644 unison-src/tests/r6.u create mode 100644 unison-src/tests/r7.0.u create mode 100644 unison-src/tests/r7.1.u create mode 100644 unison-src/tests/r7.2.u create mode 100644 unison-src/tests/r8.u create mode 100644 unison-src/tests/r9.u create mode 100644 unison-src/tests/rainbow.u create mode 100644 unison-src/tests/records.u create mode 100644 unison-src/tests/runtime-crash.uu create mode 100644 unison-src/tests/sequence-at-0.u create mode 100644 unison-src/tests/sequence-literal-argument-parsing.u create mode 100644 unison-src/tests/sequence-literal.u create mode 100644 unison-src/tests/soe.u create mode 100644 unison-src/tests/soe2.u create mode 100644 unison-src/tests/spurious-ability-fail-underapply.u create mode 100644 unison-src/tests/spurious-ability-fail.u create mode 100644 unison-src/tests/state1.u create mode 100644 unison-src/tests/state1a.u create mode 100644 unison-src/tests/state2.u create mode 100644 unison-src/tests/state2a-min.u create mode 100644 unison-src/tests/state2a-min.ur create mode 100644 unison-src/tests/state2a.u create mode 100644 unison-src/tests/state2a.uu create mode 100644 unison-src/tests/state2b-min.u create mode 100644 unison-src/tests/state2b-min.ur create mode 100644 unison-src/tests/state2b.u create mode 100644 unison-src/tests/state3.u create mode 100644 unison-src/tests/state4.u create mode 100644 unison-src/tests/state4.ur create mode 100644 unison-src/tests/state4a.u create mode 100644 unison-src/tests/state4a.ur create mode 100644 unison-src/tests/stream.u create mode 100644 unison-src/tests/stream2.uu create mode 100644 unison-src/tests/stream3.uu create mode 100644 unison-src/tests/suffix-resolve.u create mode 100644 unison-src/tests/tdnr.u create mode 100644 unison-src/tests/tdnr2.u create mode 100644 unison-src/tests/tdnr3.u create mode 100644 unison-src/tests/tdnr4.u create mode 100644 unison-src/tests/text-escaping.u create mode 100644 unison-src/tests/text-escaping.ur create mode 100644 unison-src/tests/text-pattern.u create mode 100644 unison-src/tests/text-pattern.ur create mode 100644 unison-src/tests/tictactoe.u create mode 100644 unison-src/tests/tictactoe0-array-oob1.u create mode 100644 unison-src/tests/tictactoe0-npe.u create mode 100644 unison-src/tests/tictactoe0.u create mode 100644 unison-src/tests/tictactoe2.u create mode 100644 unison-src/tests/tuple.u create mode 100644 unison-src/tests/tuple.ur create mode 100644 unison-src/tests/type-application.u create mode 100644 unison-src/tests/underscore-parsing.u create mode 100644 unison-src/tests/ungeneralize-bug.uu create mode 100644 unison-src/tests/unique.u create mode 100644 unison-src/tests/void.u create mode 100644 unison-src/transcripts/addupdatemessages.md create mode 100644 unison-src/transcripts/addupdatemessages.output.md create mode 100644 unison-src/transcripts/alias-many.md create mode 100644 unison-src/transcripts/alias-many.output.md create mode 100644 unison-src/transcripts/ambiguous-metadata.md create mode 100644 unison-src/transcripts/ambiguous-metadata.output.md create mode 100644 unison-src/transcripts/blocks.md create mode 100644 unison-src/transcripts/blocks.output.md create mode 100644 unison-src/transcripts/builtins-merge.md create mode 100644 unison-src/transcripts/builtins-merge.output.md create mode 100644 unison-src/transcripts/bytesFromList.md create mode 100644 unison-src/transcripts/bytesFromList.output.md create mode 100644 unison-src/transcripts/cd-back.md create mode 100644 unison-src/transcripts/cd-back.output.md create mode 100644 unison-src/transcripts/check763.md create mode 100644 unison-src/transcripts/check763.output.md create mode 100644 unison-src/transcripts/check873.md create mode 100644 unison-src/transcripts/check873.output.md create mode 100644 unison-src/transcripts/copy-patch.md create mode 100644 unison-src/transcripts/copy-patch.output.md create mode 100644 unison-src/transcripts/create-author.md create mode 100644 unison-src/transcripts/create-author.output.md create mode 100644 unison-src/transcripts/delete.md create mode 100644 unison-src/transcripts/delete.output.md create mode 100644 unison-src/transcripts/deleteReplacements.md create mode 100644 unison-src/transcripts/deleteReplacements.output.md create mode 100644 unison-src/transcripts/dependents-dependencies-debugfile.md create mode 100644 unison-src/transcripts/dependents-dependencies-debugfile.output.md create mode 100644 unison-src/transcripts/destructuring-binds.md create mode 100644 unison-src/transcripts/destructuring-binds.output.md create mode 100644 unison-src/transcripts/diff.md create mode 100644 unison-src/transcripts/diff.output.md create mode 100644 unison-src/transcripts/doc-formatting.md create mode 100644 unison-src/transcripts/doc-formatting.output.md create mode 100644 unison-src/transcripts/docs.md create mode 100644 unison-src/transcripts/docs.output.md create mode 100644 unison-src/transcripts/emptyCodebase.md create mode 100644 unison-src/transcripts/emptyCodebase.output.md create mode 100644 unison-src/transcripts/errors/ucm-hide-all-error.md create mode 100644 unison-src/transcripts/errors/ucm-hide-all-error.output.md create mode 100644 unison-src/transcripts/errors/ucm-hide-all.md create mode 100644 unison-src/transcripts/errors/ucm-hide-all.output.md create mode 100644 unison-src/transcripts/errors/ucm-hide-error.md create mode 100644 unison-src/transcripts/errors/ucm-hide-error.output.md create mode 100644 unison-src/transcripts/errors/ucm-hide.md create mode 100644 unison-src/transcripts/errors/ucm-hide.output.md create mode 100644 unison-src/transcripts/errors/unison-hide-all-error.md create mode 100644 unison-src/transcripts/errors/unison-hide-all-error.output.md create mode 100644 unison-src/transcripts/errors/unison-hide-all.md create mode 100644 unison-src/transcripts/errors/unison-hide-all.output.md create mode 100644 unison-src/transcripts/errors/unison-hide-error.md create mode 100644 unison-src/transcripts/errors/unison-hide-error.output.md create mode 100644 unison-src/transcripts/errors/unison-hide.md create mode 100644 unison-src/transcripts/errors/unison-hide.output.md create mode 100644 unison-src/transcripts/escape-sequences.md create mode 100644 unison-src/transcripts/escape-sequences.output.md create mode 100644 unison-src/transcripts/find-patch.md create mode 100644 unison-src/transcripts/find-patch.output.md create mode 100644 unison-src/transcripts/fix-1381-excess-propagate.md create mode 100644 unison-src/transcripts/fix-1381-excess-propagate.output.md create mode 100644 unison-src/transcripts/fix-big-list-crash.md create mode 100644 unison-src/transcripts/fix-big-list-crash.output.md create mode 100644 unison-src/transcripts/fix1063.md create mode 100644 unison-src/transcripts/fix1063.output.md create mode 100644 unison-src/transcripts/fix1334.md create mode 100644 unison-src/transcripts/fix1334.output.md create mode 100644 unison-src/transcripts/fix1356.md create mode 100644 unison-src/transcripts/fix1356.output.md create mode 100644 unison-src/transcripts/fix689.md create mode 100644 unison-src/transcripts/fix689.output.md create mode 100644 unison-src/transcripts/fix849.md create mode 100644 unison-src/transcripts/fix849.output.md create mode 100644 unison-src/transcripts/fix942.md create mode 100644 unison-src/transcripts/fix942.output.md create mode 100644 unison-src/transcripts/fix987.md create mode 100644 unison-src/transcripts/fix987.output.md create mode 100644 unison-src/transcripts/hello.md create mode 100644 unison-src/transcripts/hello.output.md create mode 100644 unison-src/transcripts/link.md create mode 100644 unison-src/transcripts/link.output.md create mode 100644 unison-src/transcripts/merge.md create mode 100644 unison-src/transcripts/merge.output.md create mode 100644 unison-src/transcripts/mergeloop.md create mode 100644 unison-src/transcripts/mergeloop.output.md create mode 100644 unison-src/transcripts/merges.md create mode 100644 unison-src/transcripts/merges.output.md create mode 100644 unison-src/transcripts/names.md create mode 100644 unison-src/transcripts/names.output.md create mode 100644 unison-src/transcripts/numbered-args.md create mode 100644 unison-src/transcripts/numbered-args.output.md create mode 100644 unison-src/transcripts/propagate.md create mode 100644 unison-src/transcripts/propagate.output.md create mode 100644 unison-src/transcripts/redundant.output.md create mode 100644 unison-src/transcripts/reflog.md create mode 100644 unison-src/transcripts/reflog.output.md create mode 100644 unison-src/transcripts/resolve.md create mode 100644 unison-src/transcripts/resolve.output.md create mode 100644 unison-src/transcripts/squash.md create mode 100644 unison-src/transcripts/squash.output.md create mode 100644 unison-src/transcripts/suffixes.md create mode 100644 unison-src/transcripts/suffixes.output.md create mode 100644 unison-src/transcripts/todo-bug-builtins.md create mode 100644 unison-src/transcripts/todo-bug-builtins.output.md create mode 100644 unison-src/transcripts/transcript-parser-commands.md create mode 100644 unison-src/transcripts/transcript-parser-commands.output.md create mode 100644 unison-src/transcripts/unitnamespace.md create mode 100644 unison-src/transcripts/unitnamespace.output.md diff --git a/unison-src/Base.u b/unison-src/Base.u new file mode 100644 index 0000000000..2236899e64 --- /dev/null +++ b/unison-src/Base.u @@ -0,0 +1,444 @@ +namespace Nat where + maxNat = 18446744073709551615 + + (-) : Nat -> Nat -> Int + (-) = Nat.sub + +namespace Int where + maxInt = +9223372036854775807 + minInt = -9223372036854775808 + +use Universal == < > >= +use Optional None Some + +-- Function composition +dot : (b -> c) -> (a -> b) -> a -> c +dot f g x = f (g x) + +-- Function composition +andThen : (a -> b) -> (b -> c) -> a -> c +andThen f g x = g (f x) + +const : a -> b -> a +const a _ = a + +use Tuple Cons + +namespace Tuple where + at1 : Tuple a b -> a + at1 = cases Cons a _ -> a + + at2 : Tuple a (Tuple b c) -> b + at2 = cases Cons _ (Cons b _) -> b + + at3 : Tuple a (Tuple b (Tuple c d)) -> c + at3 = cases Cons _ (Cons _ (Cons c _)) -> c + + at4 : Tuple a (Tuple b (Tuple c (Tuple d e))) -> d + at4 = cases Cons _ (Cons _ (Cons _ (Cons d _))) -> d + +namespace List where + + map : (a -> b) -> [a] -> [b] + map f a = + go i as acc = match List.at i as with + None -> acc + Some a -> go (i + 1) as (acc `snoc` f a) + go 0 a [] + + zip : [a] -> [b] -> [(a,b)] + zip as bs = + go acc i = match (at i as, at i bs) with + (None,_) -> acc + (_,None) -> acc + (Some a, Some b) -> go (acc `snoc` (a,b)) (i + 1) + go [] 0 + + insert : Nat -> a -> [a] -> [a] + insert i a as = take i as ++ [a] ++ drop i as + + replace : Nat -> a -> [a] -> [a] + replace i a as = take i as ++ [a] ++ drop (i + 1) as + + slice : Nat -> Nat -> [a] -> [a] + slice start stopExclusive s = + take (stopExclusive `Nat.drop` start) (drop start s) + + unsafeAt : Nat -> [a] -> a + unsafeAt n as = match at n as with + Some a -> a + None -> Debug.watch "oh noes" (unsafeAt n as) -- Debug.crash "oh noes!" + + foldl : (b -> a -> b) -> b -> [a] -> b + foldl f b as = + go b i = match List.at i as with + None -> b + Some a -> go (f b a) (i + 1) + go b 0 + + foldb : (a -> b) -> (b -> b -> b) -> b -> [a] -> b + foldb f op z as = + if List.size as == 0 then z + else if List.size as == 1 then f (unsafeAt 0 as) + else match halve as with (left, right) -> + foldb f op z left `op` foldb f op z right + + reverse : [a] -> [a] + reverse as = foldl (acc a -> List.cons a acc) [] as + + indexed : [a] -> [(a, Nat)] + indexed as = as `zip` range 0 (size as) + + sortBy : (a -> b) -> [a] -> [a] + sortBy f as = + tweak p = match p with (p1,p2) -> (f p1, p2, p1) + Heap.sort (map tweak (indexed as)) |> map Tuple.at3 + + halve : [a] -> ([a], [a]) + halve s = + n = size s / 2 + (take n s, drop n s) + + unfold : s -> (s -> Optional (a, s)) -> [a] + unfold s0 f = + go f s acc = match f s with + None -> acc + Some (a, s) -> go f s (acc `snoc` a) + go f s0 [] + + uncons : [a] -> Optional (a, [a]) + uncons as = match at 0 as with + None -> None + Some a -> Some (a, drop 1 as) + + unsnoc : [a] -> Optional ([a], a) + unsnoc as = + i = size (drop 1 as) + match at i as with + None -> None + Some a -> Some (take i as, a) + + join : [[a]] -> [a] + join = foldl (++) [] + + flatMap : (a -> [b]) -> [a] -> [b] + flatMap f as = join (map f as) + + range : Nat -> Nat -> [Nat] + range start stopExclusive = + f i = if i < stopExclusive then Some (i, i + 1) else None + unfold start f + + distinct : [a] -> [a] + distinct as = + go i seen acc = match List.at i as with + None -> acc + Some a -> if Set.contains a seen then go (i + 1) seen acc + else go (i + 1) (Set.insert a seen) (acc `snoc` a) + go 0 Set.empty [] + + -- Joins a list of lists in a "fair diagonal" fashion. + -- Adapted from the Haskell version written by Luke Palmer. + diagonal : [[a]] -> [a] + diagonal = + let + x = 23 + stripe = cases + [] -> [] + [] +: xxs -> stripe xxs + (x +: xs) +: xxs -> cons [x] (zipCons xs (stripe xxs)) + zipCons xs ys = match (xs, ys) with + ([], ys) -> ys + (xs, []) -> map (x -> [x]) xs + (x +: xs, y +: ys) -> cons (cons x y) (zipCons xs ys) + List.join `dot` stripe + +-- > List.foldb "" (t t2 -> "(" ++ t ++ " " ++ t2 ++ ")") (x -> x) ["Alice", "Bob", "Carol", "Dave", "Eve", "Frank", "Gerald", "Henry"] + +-- Sorted maps, represented as a pair of sequences +-- Use binary search to do lookups and find insertion points +-- This relies on the underlying sequence having efficient +-- slicing and concatenation +type Map k v = Map [k] [v] + +use Map Map + +namespace Search where + + indexOf : a -> [a] -> Optional Nat + indexOf a s = + ao = Some a + Search.exact (i -> ao `compare` List.at i s) 0 (size s) + + lubIndexOf' : a -> Nat -> [a] -> Nat + lubIndexOf' a start s = + ao = Some a + Search.lub (i -> ao `compare` List.at i s) start (size s) + + lubIndexOf : a -> [a] -> Nat + lubIndexOf a s = lubIndexOf' a 0 s + + lub : (Nat -> Int) -> Nat -> Nat -> Nat + lub hit bot top = + if bot >= top then top + else + mid = (bot + top) / 2 + match hit mid with + +0 -> mid + -1 -> lub hit bot mid + +1 -> lub hit (mid + 1) top + + exact : (Nat -> Int) -> Nat -> Nat -> Optional Nat + exact hit bot top = + if bot >= top then None + else + mid = (bot + top) / 2 + match hit mid with + +0 -> Some mid + -1 -> exact hit bot mid + +1 -> exact hit (mid + 1) top + +-- > ex = [0,2,4,6,77,192,3838,12000] +-- > List.map (e -> indexOf e ex) ex +-- > lubIndexOf 193 ex + + +(|>) : a -> (a -> b) -> b +a |> f = f a + +(<|) : (a -> b) -> a -> b +f <| a = f a + +id : a -> a +id a = a + +namespace Map where + + empty : Map k v + empty = Map [] [] + + singleton : k -> v -> Map k v + singleton k v = Map [k] [v] + + fromList : [(k,v)] -> Map k v + fromList kvs = + go acc i = match List.at i kvs with + None -> acc + Some (k,v) -> go (insert k v acc) (i + 1) + go empty 0 + + toList : Map k v -> [(k,v)] + toList m = List.zip (keys m) (values m) + + size : Map k v -> Nat + size s = List.size (keys s) + + lookup : k -> Map k v -> Optional v + lookup k = cases + Map ks vs -> match Search.indexOf k ks with + None -> None + Some i -> at i vs + + contains : k -> Map k v -> Boolean + contains k cases Map ks _ -> match Search.indexOf k ks with + None -> false + _ -> true + + insert : k -> v -> Map k v -> Map k v + insert k v = cases Map ks vs -> + use Search lubIndexOf + i = lubIndexOf k ks + match at i ks with + Some k' -> + if k == k' then Map ks (List.replace i v vs) + else Map (List.insert i k ks) (List.insert i v vs) + None -> Map (ks `snoc` k) (vs `snoc` v) + + map : (v -> v2) -> Map k v -> Map k v2 + map f m = Map (keys m) (List.map f (values m)) + + mapKeys : (k -> k2) -> Map k v -> Map k2 v + mapKeys f m = Map (List.map f (keys m)) (values m) + + union : Map k v -> Map k v -> Map k v + union = unionWith (_ v -> v) + + unionWith : (v -> v -> v) -> Map k v -> Map k v -> Map k v + unionWith f m1 m2 = match (m1, m2) with (Map k1 v1, Map k2 v2) -> + go i j ko vo = match (at i k1, at j k2) with + (None, _) -> Map (ko ++ drop j k2) (vo ++ drop j v2) + (_, None) -> Map (ko ++ drop i k1) (vo ++ drop i v1) + (Some kx, Some ky) -> + use List slice unsafeAt + use Search lubIndexOf' + if kx == ky then + go (i + 1) (j + 1) + (ko `snoc` kx) + (vo `snoc` f (unsafeAt i v1) (unsafeAt j v2)) + else if kx < ky then + i' = lubIndexOf' ky i k1 + go i' j (ko ++ slice i i' k1) (vo ++ slice i i' v1) + else + j' = lubIndexOf' kx j k2 + go i j' (ko ++ slice j j' k2) (vo ++ slice j j' v2) + go 0 0 [] [] + + intersect : Map k v -> Map k v -> Map k v + intersect = intersectWith (_ v -> v) + + intersectWith : (v -> v -> v2) -> Map k v -> Map k v -> Map k v2 + intersectWith f m1 m2 = match (m1, m2) with (Map k1 v1, Map k2 v2) -> + go i j ko vo = match (at i k1, at j k2) with + (None, _) -> Map ko vo + (_, None) -> Map ko vo + (Some kx, Some ky) -> + if kx == ky then + go (i + 1) (j + 1) + (ko `snoc` kx) + (vo `snoc` f (List.unsafeAt i v1) (List.unsafeAt j v2)) + else if kx < ky then + i' = Search.lubIndexOf' ky i k1 + go i' j ko vo + else + j' = Search.lubIndexOf' kx j k2 + go i j' ko vo + go 0 0 [] [] + + keys : Map k v -> [k] + keys = cases Map ks _ -> ks + + values : Map k v -> [v] + values = cases Map _ vs -> vs + +namespace Multimap where + + insert : k -> v -> Map k [v] -> Map k [v] + insert k v m = match Map.lookup k m with + None -> Map.insert k [v] m + Some vs -> Map.insert k (vs `snoc` v) m + + lookup : k -> Map k [v] -> [v] + lookup k m = Optional.orDefault [] (Map.lookup k m) + +type Set a = Set (Map a ()) +use Set Set + +namespace Set where + + empty : Set k + empty = Set Map.empty + + underlying : Set k -> Map k () + underlying = cases Set s -> s + + toMap : (k -> v) -> Set k -> Map k v + toMap f = cases Set (Map ks vs) -> Map ks (List.map f ks) + + fromList : [k] -> Set k + fromList ks = Set (Map.fromList (List.map (k -> (k,())) ks)) + + toList : Set k -> [k] + toList = cases Set (Map ks _) -> ks + + contains : k -> Set k -> Boolean + contains k = cases Set m -> Map.contains k m + + insert : k -> Set k -> Set k + insert k = cases Set s -> Set (Map.insert k () s) + + union : Set k -> Set k -> Set k + union s1 s2 = Set (Map.union (underlying s1) (underlying s2)) + + size : Set k -> Nat + size s = Map.size (underlying s) + + intersect : Set k -> Set k -> Set k + intersect s1 s2 = Set (Map.intersect (underlying s1) (underlying s2)) + +type Heap k v = Heap Nat k v [Heap k v] +use Heap Heap + +namespace Heap where + + singleton : k -> v -> Heap k v + singleton k v = Heap 1 k v [] + + size : Heap k v -> Nat + size = cases Heap n _ _ _ -> n + + union : Heap k v -> Heap k v -> Heap k v + union h1 h2 = match (h1, h2) with + (Heap n k1 v1 hs1, Heap m k2 v2 hs2) -> + if k1 >= k2 then Heap (n + m) k1 v1 (cons h2 hs1) + else Heap (n + m) k2 v2 (cons h1 hs2) + + pop : Heap k v -> Optional (Heap k v) + pop h = + go h subs = + use List drop size unsafeAt + if size subs == 0 then h + else if size subs == 1 then h `union` unsafeAt 0 subs + else union h (unsafeAt 0 subs) `union` go (unsafeAt 1 subs) (drop 2 subs) + match List.uncons (children h) with + None -> None + Some (s0, subs) -> Some (go s0 subs) + + children : Heap k v -> [Heap k v] + children = cases Heap _ _ _ cs -> cs + + max : Heap k v -> (k, v) + max = cases Heap _ k v _ -> (k, v) + + maxKey : Heap k v -> k + maxKey = cases Heap _ k _ _ -> k + + fromList : [(k,v)] -> Optional (Heap k v) + fromList kvs = + op a b = match a with + None -> b + Some a -> match b with + None -> Some a + Some b -> Some (union a b) + single = cases + (k, v) -> Some (singleton k v) + List.foldb single op None kvs + + fromKeys : [a] -> Optional (Heap a a) + fromKeys as = fromList (List.map (a -> (a,a)) as) + + sortDescending : [a] -> [a] + sortDescending as = + step = cases + None -> None + Some h -> Some (max h, pop h) + List.unfold (fromKeys as) step |> List.map Tuple.at1 + + sort : [a] -> [a] + sort as = sortDescending as |> List.reverse + +-- > sort [11,9,8,4,5,6,7,3,2,10,1] + +namespace Optional where + + map : (a -> b) -> Optional a -> Optional b + map f = cases + None -> None + Some a -> Some (f a) + + orDefault : a -> Optional a -> a + orDefault a = cases + None -> a + Some a -> a + + orElse : Optional a -> Optional a -> Optional a + orElse a b = match a with + None -> b + Some _ -> a + + flatMap : (a -> Optional b) -> Optional a -> Optional b + flatMap f = cases + None -> None + Some a -> f a + + map2 : (a -> b -> c) -> Optional a -> Optional b -> Optional c + map2 f oa ob = flatMap (a -> map (f a) ob) oa diff --git a/unison-src/Cofree.u b/unison-src/Cofree.u new file mode 100644 index 0000000000..c697feb1cb --- /dev/null +++ b/unison-src/Cofree.u @@ -0,0 +1,20 @@ +type Cofree f a = Cofree a (f (Cofree f a)) + +type Functor f = Functor (forall a b. (a ->{} b) -> f a ->{} f b) + +use Functor Functor +fmap : Functor f -> (a -> b) -> f a -> f b +fmap fn f = match fn with + Functor map -> map f + +use Cofree Cofree + +namespace Cofree where + + extract : Cofree f a -> a + extract = cases + Cofree a _ -> a + + duplicate : Functor f -> Cofree f a -> Cofree f (Cofree f a) + duplicate f c = match c with + Cofree a p -> Cofree c (fmap f (duplicate f) p) diff --git a/unison-src/EasyTest.u b/unison-src/EasyTest.u new file mode 100644 index 0000000000..4ebef149b5 --- /dev/null +++ b/unison-src/EasyTest.u @@ -0,0 +1,263 @@ +use Test Success Status Report Test Scope +use Test.Status Failed Expected Unexpected Pending +use Test.Success Passed Proved +use Test.Report Report +use Test.Test Test +use Test passed proved failed expected unexpected pending finished label +use Test.Scope Scope +use List flatMap + +type Test.Success = Passed Nat | Proved + +type Test.Status = Failed + | Expected Test.Success + | Unexpected Test.Success + | Pending + +-- Current scope together with accumulated test report. +type Test.Report = Report (Trie Text Test.Status) + +type Test.Test = Test (Test.Scope -> Test.Report) + +unique type Test.Scope = Scope [Text] + +foldSuccess : (Nat -> r) -> r -> Success -> r +foldSuccess passed proved = cases + Passed n -> passed n + Proved -> proved + +foldStatus : r -> (Success -> r) -> (Success -> r) -> r -> Status -> r +foldStatus failed expected unexpected pending = cases + Failed -> failed + Expected s -> expected s + Unexpected s -> unexpected s + Pending -> pending + +foldReport : (Trie Text Test.Status -> r) -> Report -> r +foldReport k r = case r of Report t -> k t + +foldScope : ([Text] -> r) -> Scope -> r +foldScope k = cases Scope ss -> k ss + +Scope.cons : Text -> Scope -> Scope +Scope.cons n = foldScope (Scope . List.cons n) + +-- Basic building blocks of tests +Test.finished : Status -> Test +Test.finished st = + Test (Report . foldScope (sc -> Trie.singleton sc st)) + +Test.failed : Test +Test.failed = finished Failed + +Test.proved : Test +Test.proved = finished <| Expected Proved + +Test.passed : Test +Test.passed = finished . Expected <| Passed 1 + +Test.passedUnexpectedly : Test +Test.passedUnexpectedly = finished . Unexpected <| Passed 1 + +Test.provedUnexpectedly : Test +Test.provedUnexpectedly = finished <| Unexpected Proved + +-- Basic test combinators + +Test.modifyStatus : (Status -> Status) -> Test -> Test +Test.modifyStatus f = + cases Test k -> Test (foldReport (Report . map f) . k) + +Test.label : Text -> Test -> Test +Test.label n = cases + Test.Test.Test k -> Test (scope -> k <| Scope.cons n scope) + +use Test.Report combine + +(Test.&&) : Test -> Test -> Test +(Test.&&) a b = match (a,b) with + (Test k1, Test k2) -> + Test ( + scope -> + let r1 = k1 scope + r2 = k2 scope + combine r1 r2) + +Test.passedWith : Text -> Test +Test.passedWith m = label m passed + +Test.provedWith : Text -> Test +Test.provedWith m = label m proved + +Test.failedWith : Text -> Test +Test.failedWith m = Test.label m Test.failed + +-- Report combinators + +Test.Report.combine : Report -> Report -> Report +Test.Report.combine r1 r2 = match (r1, r2) with + (Test.Report.Report t1, Test.Report.Report t2) -> + Report <| Trie.unionWith Status.combine t1 t2 + +Test.Report.empty : Report +Test.Report.empty = Report empty + +Test.Report.toCLIResult : Report -> [Test.Result] +Test.Report.toCLIResult r = + descend scope = cases (k, t) -> + go ((if scope != "" then (scope ++ ".") else "") ++ k) t + convert : Text -> Test.Status -> Test.Result + convert scope = cases + Test.Status.Failed -> Test.Result.Fail scope + Test.Status.Expected (Test.Success.Passed n) -> + Test.Result.Ok (scope ++ " : Passed " ++ Nat.toText n ++ " tests.") + Test.Status.Expected (Test.Success.Proved) -> + Test.Result.Ok (scope ++ " : Proved.") + go : Text -> Trie Text Test.Status -> [Test.Result] + go scope t = + rest = flatMap (descend scope) (Map.toList (tail t)) + match head t with + Optional.Some status -> + cons (convert scope status) rest + Optional.None -> rest + match r with Test.Report.Report t -> go "" t + +Test.report : Test -> Report +Test.report = cases Test k -> k (Scope []) + +-- Running tests + +Test.run : Test -> [Test.Result] +Test.run = Test.Report.toCLIResult . Test.report + +Test.runAll : [Test] -> [Test.Result] +Test.runAll = flatMap Test.run + +-- Status combinators + +Status.combine : Test.Status -> Test.Status -> Test.Status +Status.combine s1 s2 = match (s1, s2) with + (_, Pending) -> Pending + (Pending, _) -> Pending + (Failed, _) -> Failed + (_, Failed) -> Failed + (Unexpected a, Unexpected b) -> Unexpected (Success.combine a b) + (Unexpected a, _) -> Unexpected a + (_, Unexpected b) -> Unexpected b + (Expected a, Expected b) -> Expected (Success.combine a b) + + +Status.pending : Test.Status -> Test.Status +Status.pending = cases + Failed -> Pending + Expected s -> Unexpected s + Unexpected s -> Pending + Pending -> Pending + +-- Make a test pending +Test.pending : Test -> Test +Test.pending = modifyStatus Status.pending + +Test.modifyScope : (Scope -> Scope) -> Test -> Test +Test.modifyScope f = cases Test k -> Test (k . f) + +Success.combine s1 s2 = match (s1, s2) with + (Passed n, Passed m) -> Passed (n + m) + (Passed n, Proved) -> Passed n + (Proved, Passed n) -> Passed n + (Proved, Proved) -> Proved + +-- Test case generation + +-- A domain is either small, in which case we can exhaustively list all the +-- values in the domain, or it's large, in which case we can ask for a value +-- of a particular size. +type Domain a = Small [a] | Large (Weighted a) + +-- The domain of natural numbers is large. +Domain.nats : Domain Nat +Domain.nats = Large Weighted.nats + +-- The domain of all integers +Domain.ints : Domain Int +Domain.ints = let + go n = yield n <|> weight 1 + '(go (if n > +0 then negate n else increment (negate n))) + Large (List.foldl (a n -> a <|> yield n) + Weighted.Fail + [+0, +1, -1, maxInt, minInt] <|> go +2) + +use Universal == < > + +namespace Domain where + + -- The threshold of "small" domains. + smallSize = 10000 + + -- The Boolean domain is small + boolean : Domain Boolean + boolean = Small [false, true] + + -- The domain of lists of arbitrary data is large + listsOf : Domain a -> Domain [a] + listsOf d = + Large (Weighted.lists match d with + Domain.Small as -> Weighted.fromList as + Domain.Large w -> w) + + lists : Domain [()] + lists = Domain.listsOf (Small [()]) + + sample : Nat -> Domain a -> [a] + sample n = cases + Domain.Large w -> Weighted.sample n w + Domain.Small xs -> take n xs + + map : (a -> b) -> Domain a -> Domain b + map f = cases + Domain.Large w -> Domain.Large (Weighted.map f w) + Domain.Small as -> Domain.Small (List.map f as) + + pairs : Domain a -> Domain (a,a) + pairs d = lift2 (a b -> (a,b)) d d + + tuples : Domain a -> Domain b -> Domain (Pair a b) + tuples = lift2 (a b -> Pair a b) + + lift2 : (a -> b -> c) -> Domain a -> Domain b -> Domain c + lift2 f da db = let + wa = weighted da + wb = weighted db + wc = mergeWith (a1 a2 -> f a1 a2) wa wb + match (da, db) with + (Domain.Small as, Domain.Small bs) | size as + size bs < smallSize -> + Small (Weighted.sample smallSize wc) + _ -> Large wc + + weighted : Domain a -> Weighted a + weighted = cases + Domain.Small as -> Weighted.fromList as + Domain.Large w -> w + +-- Test a property for a given domain up to a maximum size +Test.forAll' : Nat -> Domain a -> (a -> Boolean) -> Test +Test.forAll' maxSize domain property = + check xs s = + List.map ( + cases (c, i) -> + if property c then finished (Expected s) + else label ("test case " ++ Nat.toText i) (finished Failed) + ) (indexed xs) + List.foldb id (Test.&&) proved <| + match domain with + Domain.Small xs -> check (take maxSize xs) Proved + Domain.Large _ -> check (sample maxSize domain) (Passed 1) + +Test.check' : Boolean -> Test +Test.check' b = if b then Test.proved else Test.failed + +Test.forAll : Nat -> Domain a -> (a -> Boolean) -> [Test.Result] +Test.forAll n d p = Test.run (Test.forAll' n d p) + +Test.check : Boolean -> [Test.Result] +Test.check = Test.run . Test.check' diff --git a/unison-src/Trie.u b/unison-src/Trie.u new file mode 100644 index 0000000000..9a54522e18 --- /dev/null +++ b/unison-src/Trie.u @@ -0,0 +1,39 @@ +type Trie k v = { head : Optional v, tail : Map k (Trie k v) } + +namespace Trie where + empty : Trie k v + empty = Trie None Map.empty + + lookup : [k] -> Trie k v -> Optional v + lookup path t = match path with + [] -> Trie.head t + p +: ps -> flatMap (lookup ps) (Map.lookup p (Trie.tail t)) + + unionWith : (v -> v -> v) -> Trie k v -> Trie k v -> Trie k v + unionWith f t1 t2 = + h1 = Trie.head t1 + h2 = Trie.head t2 + Trie (map2 f h1 h2 `orElse` h1 `orElse` h2) + (Map.unionWith (unionWith f) (Trie.tail t1) (Trie.tail t2)) + +Trie.union : Trie k v -> Trie k v -> Trie k v +Trie.union = Trie.unionWith const + +Trie.insert : [k] -> v -> Trie k v -> Trie k v +Trie.insert path v t = + Trie.unionWith const (Trie.singleton path v) t + +Trie.singleton : [k] -> v -> Trie k v +Trie.singleton path v = + match path with + [] -> Trie (Some v) empty + k +: ks -> Trie None (Map.fromList [(k, Trie.singleton ks v)]) + +use Trie tail head + +Trie.map : (v1 -> v2) -> Trie k v1 -> Trie k v2 +Trie.map f t = Trie (map f (head t)) (map (Trie.map f) (tail t)) + +Trie.mapKeys : (k1 -> k2) -> Trie k1 v -> Trie k2 v +Trie.mapKeys f t = + Trie (head t) (Map.mapKeys f (Map.map (Trie.mapKeys f) (tail t))) diff --git a/unison-src/WeightedSearch.u b/unison-src/WeightedSearch.u new file mode 100644 index 0000000000..789425191f --- /dev/null +++ b/unison-src/WeightedSearch.u @@ -0,0 +1,69 @@ +-- A data structure that allows giving computations weight such that the +-- lowest-cost computation will be returned first. Allows searching +-- infinite spaces productively. +-- +-- Adapted from http://hackage.haskell.org/package/weighted-search-0.1.0.1 +use Universal == < > + +type Weighted a + = Fail + | Yield a (Weighted a) + | Weight Nat (() -> Weighted a) + +namespace Weighted where + + weight : Nat ->{e} (() ->{e} Weighted a) ->{e} Weighted a + weight w ws = Weight w ws + + map : (a ->{e} b) -> Weighted a ->{e} Weighted b + map f = cases + Weighted.Fail -> Weighted.Fail + Weighted.Yield x w -> Yield (f x) (map f w) + Weighted.Weight a w -> weight a '(map f !w) + + yield : a -> Weighted a + yield a = Yield a Fail + + flatMap : (a -> Weighted b) -> Weighted a -> Weighted b + flatMap f = cases + Weighted.Fail -> Weighted.Fail + Weighted.Yield x m -> f x <|> flatMap f m + Weighted.Weight w m -> Weight w '(flatMap f !m) + + mergeWith : (a -> b -> c) -> Weighted a -> Weighted b -> Weighted c + mergeWith f as bs = + flatMap (a -> map (b -> f a b) bs) as + + (<|>): Weighted a -> Weighted a -> Weighted a + (<|>) m n = match (m, n) with + (Weighted.Fail, n) -> n + (Weighted.Yield x m, n) -> Yield x (m <|> n) + (Weighted.Weight w m, Weighted.Fail) -> Weight w m + (Weighted.Weight w m, Weighted.Yield x n) -> + Yield x (Weight w m <|> n) + (Weighted.Weight w m, Weighted.Weight w' n) -> + if w < w' then Weight w '(!m <|> Weight (w' `drop` w) n) + else if w == w' then Weight w '(!m <|> !n) + else Weight w '(Weight (w `drop` w') m <|> !n) + + sample : Nat -> Weighted a -> [a] + sample n wsa = + if n > 0 then + match wsa with + Weighted.Fail -> [] + Weighted.Yield a ms -> cons a (sample (n `drop` 1) ms) + Weighted.Weight _ w -> sample n !w + else [] + + nats : Weighted Nat + nats = let + go n = yield n <|> weight 1 '(go (n + 1)) + go 0 + + lists : Weighted a -> Weighted [a] + lists w = yield [] <|> weight 1 '(mergeWith cons w (lists w)) + + fromList : [a] -> Weighted a + fromList = cases + [] -> Weighted.Fail + a +: as -> yield a <|> weight 1 '(fromList as) diff --git a/unison-src/base58.u b/unison-src/base58.u new file mode 100644 index 0000000000..a14d6eccce --- /dev/null +++ b/unison-src/base58.u @@ -0,0 +1,60 @@ +-- TODO: Characters +-- TODO: Bytes + +type Optional a = Some a | None + +type Words = Words (List Nat) +type Integer = Integer + +Integer.zero : Integer +Integer.zero = _ + +shiftLeft : Nat -> Integer -> Integer +shiftLeft x y = _ + +(+) : Integer -> Integer -> Integer +(+) x y = _ + +unfoldRight : ∀ a b . (a -> Optional (a, b)) -> a -> List b +unfoldRight f z = _ + +foldLeft : ∀ a b . a -> (a -> b -> a) -> List b -> a +foldLeft z f s = _ + +toInteger : Nat -> Integer +toInteger x = _ + +bigEndian : Words -> Integer +bigEndian = cases + Words.Words s -> + foldLeft Integer.zero (acc w -> shiftLeft 8 acc + toInteger w) s + +-- TODO: Need some conversions between integers and machine integers +divmod : Integer -> Nat -> (Integer, Nat) +divmod x y = _ + +(|>) : ∀ a b c . (a -> b) -> (b -> c) -> a -> c +(|>) g f x = f (g x) + +(==) : Integer -> Nat -> Boolean +(==) a b = _ + +charAt : Nat -> Text -> Text +charAt n = Text.drop n |> Text.take 1 + +codeString : Text +codeString = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" + +base58Encode : Words -> Text +base58Encode ws = + x = bigEndian ws + base58 : Integer -> Optional (Integer, Text) + base58 a = + if a == 0 + then Optional.None + else match divmod a 58 with + (d, m) -> Optional.Some (d, charAt m codeString) + foldLeft "" Text.concatenate (unfoldRight base58 x) + +base58Decode : Text -> Words +base58Decode txt = _ diff --git a/unison-src/basics.u b/unison-src/basics.u new file mode 100644 index 0000000000..073da21f27 --- /dev/null +++ b/unison-src/basics.u @@ -0,0 +1,72 @@ + +-- Unison is a statically typed functional language + +increment : Nat -> Nat -- signature is optional +increment n = n + 1 + +-- Lines starting with `>` are evaluated and printed on every file save. +> increment 99 + +replicate : Nat -> a -> [a] +replicate n a = toSequence (take n (constant a)) + +-- this is nice for quick testing! + +> replicate 3 "bye" + +-- can ask Unison for the type of any expression just by adding `?` to the end of it + +-- > (replicate 4)? + +-- here's a more interesting example, mergesort - + +-- First we define the merge function, it merges two sorted lists +merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] +merge lte a b = + use Sequence ++ + use Optional None Some + go acc a b = match at 0 a with + None -> acc ++ b + Some hd1 -> match at 0 b with + None -> acc ++ a + Some hd2 -> + if hd1 `lte` hd2 then go (acc `snoc` hd1) (drop 1 a) b + else go (acc `snoc` hd2) a (drop 1 b) + go [] a b + +-- let's make sure it works +> merge (<) [1,3,4,99,504,799] [0,19,22,23] + +-- looks good, now let's write mergesort + +sort : (a -> a -> Boolean) -> [a] -> [a] +sort lte a = + if Sequence.size a < 2 then a + else + l = sort lte (take (size a / 2) a) + r = sort lte (drop (size a / 2) a) + merge lte l r + +-- let's make sure it works + +> sort (<) [3,2,1,1,2,3,9182,1,2,34,1,80] + +> sort (<) ["Dave", "Carol", "Eve", "Alice", "Bob", "Francis", "Hal", "Illy", "Joanna", "Greg", "Karen"] + +-- SHIP IT!! 🚢 + +-- If you make a mistake, we try to have nice friendly error messages, not: +-- 🤖 ERROR DETECTED ⚡️ BEEP BOOP ⚡️ PLS RESUBMIT PROGRAM TO MAINFRAME + +-- a few examples of failing programs - + +--err1 = +-- a = "3" +-- sort (<) [1,2,a] + +-- err1a = sort (<) "not a list" + +--err2 : x -> y -> x +--err2 thing1 thing2 = +-- if true then thing1 +-- else thing2 diff --git a/unison-src/demo/1.u b/unison-src/demo/1.u new file mode 100644 index 0000000000..02ccb456de --- /dev/null +++ b/unison-src/demo/1.u @@ -0,0 +1,6 @@ +increment : Nat -> Nat +increment n = n + 1 + +> x = 1 + 40 +> increment x + diff --git a/unison-src/demo/2.u b/unison-src/demo/2.u new file mode 100644 index 0000000000..530ea9ade4 --- /dev/null +++ b/unison-src/demo/2.u @@ -0,0 +1,46 @@ +use Optional None Some +use Universal < + +uncons : [a] -> Optional (a, [a]) +uncons as = match at 0 as with + None -> None + Some hd -> Some (hd, drop 1 as) + +halve : [a] -> ([a], [a]) +halve s = splitAt (size s / 2) s + +splitAt : Nat -> [a] -> ([a], [a]) +splitAt n as = (take n as, drop n as) + +merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] +merge lte a b = + use List ++ + go out a b = match (uncons a, uncons b) with + (None,_) -> out ++ b + (_,None) -> out ++ a + (Some (hA, tA), Some (hB, tB)) -> + if hA `lte` hB then go (out `snoc` hA) tA b + else go (out `snoc` hB) a tB + go [] a b + +sort : (a -> a -> Boolean) -> [a] -> [a] +sort lte as = + if size as < 2 then as + else match halve as with (left, right) -> + l = sort lte left + r = sort lte right + merge lte l r + +-- let's make sure it works + +> uncons [1, 2, 3] + +> sort (<) [3,2,1,1,2,3,9182,1,2,34,1,23] + +> sort (<) ["Dave", "Carol", "Eve", "Alice", "Bob", "Francis", "Hal", "Illy", "Joanna", "Greg", "Karen"] + +-- these programs have some type errors + +-- > sort (<) [3,2,1,1,2,3,9182,1,2,34,1,"oops"] + +-- > merge (<) [1,4,5,90,102] ["a", "b"] diff --git a/unison-src/demo/3.u b/unison-src/demo/3.u new file mode 100644 index 0000000000..b630facd7d --- /dev/null +++ b/unison-src/demo/3.u @@ -0,0 +1,115 @@ + +type Future a = Future ('{Remote} a) + +-- A simple distributed computation ability +ability Remote where + + -- Spawn a new node + spawn : {Remote} Node + + -- Start evaluating a computation on another node + at : Node -> '{Remote} a ->{Remote} Future a + +type Node = Node Nat -- more realistic would be perhaps a (Hostname, PublicKey) pair + +force : Future a ->{Remote} a +force = cases Future.Future r -> !r + +-- Let's test out this beast! do we need to deploy our code to some EC2 instances?? +-- Gak, no not yet, we just want to test locally, let's write a handler +-- for the `Remote` ability that simulates everything locally! + +use Future Future +use Optional None Some +use Monoid Monoid +use List ++ at +use Universal < + +List.map : (a ->{e} b) -> [a] ->{e} [b] +List.map f as = + go f acc as i = match at i as with + None -> acc + Some a -> go f (acc `snoc` f a) as (i + 1) + go f [] as 0 + +type Monoid a = Monoid (a -> a -> a) a + +Monoid.zero = cases Monoid.Monoid op z -> z +Monoid.op = cases Monoid.Monoid op z -> op + +Monoid.orElse m = cases + None -> Monoid.zero m + Some a -> a + +uncons : [a] -> Optional (a, [a]) +uncons as = match at 0 as with + None -> None + Some hd -> Some (hd, drop 1 as) + +dreduce : Monoid a -> [a] ->{Remote} a +dreduce m a = + if size a < 2 then Monoid.orElse m (List.at 0 a) + else + l = Remote.at Remote.spawn '(dreduce m (take (size a / 2) a)) + r = Remote.at Remote.spawn '(dreduce m (drop (size a / 2) a)) + Monoid.op m (force l) (force r) + +dmapReduce : (a ->{Remote} b) -> Monoid b -> [a] ->{Remote} b +dmapReduce f m as = dreduce m (List.map f as) + +dsort2 : (a -> a -> Boolean) -> [a] ->{Remote} [a] +dsort2 lte as = + dreduce (Monoid (merge lte) []) + (List.map (a -> [a]) as) + +halve : [a] -> ([a], [a]) +halve s = splitAt (size s / 2) s + +splitAt : Nat -> [a] -> ([a], [a]) +splitAt n as = (take n as, drop n as) + +Node.increment : Node -> Node +Node.increment n = + use Node.Node -- the constructor + match n with Node n -> Node (n + 1) + +Remote.runLocal : '{Remote} a -> a +Remote.runLocal r = + step nid = cases + {Remote.spawn -> k} -> handle k nid with step (Node.increment nid) + {Remote.at _ t -> k} -> handle k (Future t) with step nid + {a} -> a -- the pure case + handle !r with step (Node.Node 0) + +merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] +merge lte a b = + go out a b = match (uncons a, uncons b) with + (None,_) -> out ++ b + (_,None) -> out ++ a + (Some (hA, tA), Some (hB, tB)) -> + if hA `lte` hB then go (out `snoc` hA) tA b + else go (out `snoc` hB) a tB + go [] a b + +> merge (<) [1,3,4,99,504,799] [0,19,22,23] + +sort : (a -> a -> Boolean) -> [a] -> [a] +sort lte as = + if size as < 2 then as + else match halve as with (left, right) -> + l = sort lte left + r = sort lte right + merge lte l r + +dsort : (a -> a -> Boolean) -> [a] ->{Remote} [a] +dsort lte as = + use Remote at spawn + if size as < 2 then as + else match halve as with (left, right) -> + r1 = at spawn '(dsort lte left) + r2 = at spawn '(dsort lte right) + merge lte (force r1) (force r2) + +> sort (<) [1,2,3,234,6,2,4,66,2,33,4,2,57] + +> Remote.runLocal '(dsort (<) [1,2,3,234,6,2,4,66,2,33,4,2,57]) diff --git a/unison-src/errors/407.u b/unison-src/errors/407.u new file mode 100644 index 0000000000..4a4705a63a --- /dev/null +++ b/unison-src/errors/407.u @@ -0,0 +1,8 @@ +use Universal < + +foo : x -> x -> Nat +foo x y = 42 + + +> foo "hi" 2 + diff --git a/unison-src/errors/X-array.u b/unison-src/errors/X-array.u new file mode 100644 index 0000000000..6323617195 --- /dev/null +++ b/unison-src/errors/X-array.u @@ -0,0 +1,6 @@ +type X = S Text | I Nat + +foo : a -> b -> c -> X +foo x y z = X.S "" + +[foo +1 1 1.0, 1] diff --git a/unison-src/errors/abort-ability-checks-against-pure.u b/unison-src/errors/abort-ability-checks-against-pure.u new file mode 100644 index 0000000000..1d41bf7a1e --- /dev/null +++ b/unison-src/errors/abort-ability-checks-against-pure.u @@ -0,0 +1,9 @@ +--Abort +ability Abort where + Abort : forall a . () -> {Abort} a + +bork = u -> 1 + (Abort.Abort ()) + +(bork : () -> {} Nat) + +-- failing to fail in commit 2819c206acf80f926c6d970a4ffd47c961fa4502 diff --git a/unison-src/errors/all-errors.u b/unison-src/errors/all-errors.u new file mode 100644 index 0000000000..91d44a3e79 --- /dev/null +++ b/unison-src/errors/all-errors.u @@ -0,0 +1,43 @@ +type Optional a = Some a | None + +ability Abort where + Abort : forall a . () -> {Abort} a + +ability Abort2 where + Abort2 : forall a . () -> {Abort2} a + Abort2' : forall a . () -> {Abort2} a + +app : Optional Int +app = Optional.Some 3 + +app' : Optional Int +app' = 3 + +arrow : Int -> Int -> Int +arrow a = 3 + +ability' : Nat -> { Abort } Int +ability' n = Abort2.Abort2 () + +id : forall a . a -> a +id x = 3 + +f2 : forall a . a -> a -> a +f2 x = x + +const : forall a b . a -> b -> a +const a b = 3 + +y : (Optional Int) +y = 3 + +z' : (Optional Int, Optional Text, Optional Float) +z' = (None, 3) + +z : (Optional Int, Optional Text, Optional Float) +z = 3 + +x : () +x = 3 + +() diff --git a/unison-src/errors/check-for-regressions/and.u b/unison-src/errors/check-for-regressions/and.u new file mode 100644 index 0000000000..07af2a22fc --- /dev/null +++ b/unison-src/errors/check-for-regressions/and.u @@ -0,0 +1,7 @@ +and true 3 + +-- InSubtype t1=Nat, t2=Boolean +-- InCheck e=3, t=Boolean +-- InSynthesizeApp t=Boolean -> Boolean, e=3, n=2 +-- InAndApp +-- InSynthesize e=and true 3 diff --git a/unison-src/errors/check-for-regressions/app-polymorphic.u b/unison-src/errors/check-for-regressions/app-polymorphic.u new file mode 100644 index 0000000000..fa91f0d73a --- /dev/null +++ b/unison-src/errors/check-for-regressions/app-polymorphic.u @@ -0,0 +1,4 @@ +foo : a -> a -> a -> a -> a -> a +foo a b c d e = e + +foo 1 2 3 "ha" 5 diff --git a/unison-src/errors/check-for-regressions/app.u b/unison-src/errors/check-for-regressions/app.u new file mode 100644 index 0000000000..6db180418f --- /dev/null +++ b/unison-src/errors/check-for-regressions/app.u @@ -0,0 +1,4 @@ +foo : Nat -> Nat -> Nat -> Nat -> Nat -> Nat +foo a b c d e = a + b + c + d + e + +foo 1 2 3 "ha" 5 diff --git a/unison-src/errors/check-for-regressions/applying-non-function.u b/unison-src/errors/check-for-regressions/applying-non-function.u new file mode 100644 index 0000000000..8aad6fb95a --- /dev/null +++ b/unison-src/errors/check-for-regressions/applying-non-function.u @@ -0,0 +1,4 @@ +-- "Hello" "world" + +id a = a +id 1 1 diff --git a/unison-src/errors/check-for-regressions/casebody.u b/unison-src/errors/check-for-regressions/casebody.u new file mode 100644 index 0000000000..48f06838e4 --- /dev/null +++ b/unison-src/errors/check-for-regressions/casebody.u @@ -0,0 +1,3 @@ +match 3 with + 3 -> 4 + 4 -> "Surprise!" diff --git a/unison-src/errors/check-for-regressions/caseguard.u b/unison-src/errors/check-for-regressions/caseguard.u new file mode 100644 index 0000000000..751aa609ae --- /dev/null +++ b/unison-src/errors/check-for-regressions/caseguard.u @@ -0,0 +1,2 @@ +match 3 with + 3 | 3 -> 4 diff --git a/unison-src/errors/check-for-regressions/casepattern.u b/unison-src/errors/check-for-regressions/casepattern.u new file mode 100644 index 0000000000..d9f2230872 --- /dev/null +++ b/unison-src/errors/check-for-regressions/casepattern.u @@ -0,0 +1,3 @@ +match 3 with + 3 -> "Great!" + "Great" -> "Terrible." diff --git a/unison-src/errors/check-for-regressions/ifcond.u b/unison-src/errors/check-for-regressions/ifcond.u new file mode 100644 index 0000000000..4db94c8e04 --- /dev/null +++ b/unison-src/errors/check-for-regressions/ifcond.u @@ -0,0 +1 @@ +if 3 then 4 else 5 diff --git a/unison-src/errors/check-for-regressions/ifelse.u b/unison-src/errors/check-for-regressions/ifelse.u new file mode 100644 index 0000000000..c9907d3b47 --- /dev/null +++ b/unison-src/errors/check-for-regressions/ifelse.u @@ -0,0 +1 @@ +if true then 4 else "Surprise!" diff --git a/unison-src/errors/check-for-regressions/lens.u b/unison-src/errors/check-for-regressions/lens.u new file mode 100644 index 0000000000..9a4e4b1cd0 --- /dev/null +++ b/unison-src/errors/check-for-regressions/lens.u @@ -0,0 +1,9 @@ +type Foo a b = Foo a b +use Foo Foo +use Optional Some +setA : Foo a b -> Optional a -> Foo a b +setA foo a = match (foo, a) with + (Foo _ b, Some a) -> Foo a b + _ -> foo + +setA (Foo "hello" 3) (Some 7) diff --git a/unison-src/errors/check-for-regressions/not-and.u b/unison-src/errors/check-for-regressions/not-and.u new file mode 100644 index 0000000000..0563d88621 --- /dev/null +++ b/unison-src/errors/check-for-regressions/not-and.u @@ -0,0 +1,14 @@ +notid : Int -> Boolean +notid a = true +and (notid 3) true + +-- InSubtype t1=Nat, t2=Int +-- InCheck e=3, t=Int +-- InSynthesizeApp t=Int -> Boolean, e=3, n=1 +-- InSynthesizeApps f=notid1 ft=Int -> Boolean, es=[3] +-- InSynthesize e=notid1 3 +-- InCheck e=notid1 3, t=Boolean +-- InSynthesizeApp t=Boolean -> Boolean -> Boolean, e=notid1 3, n=1 +-- InAndApp +-- InSynthesize e=and notid1 3 true +-- InSynthesize e=Cycle (notid. (let r... diff --git a/unison-src/errors/check-for-regressions/not-and0.u b/unison-src/errors/check-for-regressions/not-and0.u new file mode 100644 index 0000000000..e57717510d --- /dev/null +++ b/unison-src/errors/check-for-regressions/not-and0.u @@ -0,0 +1,9 @@ +and (3 : Boolean) true + +-- InSubtype t1=Nat, t2=Boolean +-- InCheck e=3, t=Boolean +-- InSynthesize e=3:Boolean +-- InCheck e=3:Boolean, t=Boolean +-- InSynthesizeApp t=Boolean -> Boolean -> Boolean, e=3:Boolean, n=1 +-- InAndApp +-- InSynthesize e=and 3:Boolean true diff --git a/unison-src/errors/check-for-regressions/not-caseguard.u b/unison-src/errors/check-for-regressions/not-caseguard.u new file mode 100644 index 0000000000..45447642d3 --- /dev/null +++ b/unison-src/errors/check-for-regressions/not-caseguard.u @@ -0,0 +1,4 @@ +notid : Int -> Boolean +notid a = true +match 3 with + 3 | notid 3 -> 4 diff --git a/unison-src/errors/check-for-regressions/not-caseguard2.u b/unison-src/errors/check-for-regressions/not-caseguard2.u new file mode 100644 index 0000000000..899ca1d150 --- /dev/null +++ b/unison-src/errors/check-for-regressions/not-caseguard2.u @@ -0,0 +1,2 @@ +match 3 with + 3 | (3 : Boolean) -> 4 diff --git a/unison-src/errors/check-for-regressions/not-or.u b/unison-src/errors/check-for-regressions/not-or.u new file mode 100644 index 0000000000..d7af8da9e9 --- /dev/null +++ b/unison-src/errors/check-for-regressions/not-or.u @@ -0,0 +1 @@ +or (3 : Boolean) true diff --git a/unison-src/errors/check-for-regressions/not-vector.u b/unison-src/errors/check-for-regressions/not-vector.u new file mode 100644 index 0000000000..07721922df --- /dev/null +++ b/unison-src/errors/check-for-regressions/not-vector.u @@ -0,0 +1 @@ +[1, +1 : Nat] diff --git a/unison-src/errors/check-for-regressions/or.u b/unison-src/errors/check-for-regressions/or.u new file mode 100644 index 0000000000..842c3d3662 --- /dev/null +++ b/unison-src/errors/check-for-regressions/or.u @@ -0,0 +1 @@ +or true 3 diff --git a/unison-src/errors/check-for-regressions/vector.u b/unison-src/errors/check-for-regressions/vector.u new file mode 100644 index 0000000000..dc4fc92d9a --- /dev/null +++ b/unison-src/errors/check-for-regressions/vector.u @@ -0,0 +1 @@ +[1, +1] diff --git a/unison-src/errors/compiler-bug.u b/unison-src/errors/compiler-bug.u new file mode 100644 index 0000000000..9349ad0532 --- /dev/null +++ b/unison-src/errors/compiler-bug.u @@ -0,0 +1,5 @@ +x = 1 +Foo.y = 4.0 +a = + x + y + () diff --git a/unison-src/errors/console.u b/unison-src/errors/console.u new file mode 100644 index 0000000000..761be8aa84 --- /dev/null +++ b/unison-src/errors/console.u @@ -0,0 +1,19 @@ +ability State s where + get : Nat -> {State s} s + set : s -> {State s} () + +ability Console where + read : () -> {Console} (Optional Text) + write : Text -> {Console} () + +fst = cases Tuple.Cons a _ -> a + +snd = cases Tuple.Cons _ (Tuple.Cons b _) -> b + +namespace Console where + + simulate : Request Console a -> {State ([Text], [Text])} a + simulate = cases + {Console.read _ -> k} -> k Optional.None + +Console.simulate diff --git a/unison-src/errors/console2.u b/unison-src/errors/console2.u new file mode 100644 index 0000000000..c57b382e3a --- /dev/null +++ b/unison-src/errors/console2.u @@ -0,0 +1,29 @@ +ability State s where + get : {State s} s + set : s -> {State s} () + +ability Console where + read : {Console} (Optional Text) + write : Text -> {Console} () + +fst = cases Tuple.Cons a _ -> a + +snd = cases Tuple.Cons _ (Tuple.Cons b _) -> b + +namespace Console where + + simulate : Request Console a -> {State ([Text], [Text])} a + simulate = cases + {Console.read -> k} -> + io = State.get + ins = fst io + outs = snd io + State.set (drop 1 ins, outs) + k (at 0 ins) -- this is missing recursive call to handle + {Console.write t -> k} -> + io = State.get + ins = fst io + outs = snd io + k (State.set (ins, cons t outs)) -- this is missing recursive call + +() diff --git a/unison-src/errors/cyclic-unguarded.u b/unison-src/errors/cyclic-unguarded.u new file mode 100644 index 0000000000..96d6c671b2 --- /dev/null +++ b/unison-src/errors/cyclic-unguarded.u @@ -0,0 +1,8 @@ + +x = y + 1 +y = x + 1 + +> x + + + diff --git a/unison-src/errors/effect-inference1.u b/unison-src/errors/effect-inference1.u new file mode 100644 index 0000000000..d65321e992 --- /dev/null +++ b/unison-src/errors/effect-inference1.u @@ -0,0 +1,12 @@ +ability Abort where + Abort : forall a . () -> {Abort} a + +foo n = if n >= 1000 then n else !Abort.Abort + +bar : (Nat -> {} Nat) -> Nat -> Nat +bar f i = f i + +bar foo 3 + +-- as of 3935b366383fe8184f96cfe714c31ca04234cf27, this typechecks (unexpected) +-- and then bombs in the runtime because the Abort ability isn't handled. diff --git a/unison-src/errors/effect_unknown_type.uu b/unison-src/errors/effect_unknown_type.uu new file mode 100755 index 0000000000..37fb492e4b --- /dev/null +++ b/unison-src/errors/effect_unknown_type.uu @@ -0,0 +1,20 @@ +ability T where + a : Unknown -> {T} () + +--b : Unknown +--b = () + +---- + +unison: can't hashComponents if bindings have free variables: + ["Unknown"] + ["T"] +CallStack (from HasCallStack): + error, called at src/Unison/ABT.hs:504:11 in unison-parser-typechecker-0.1-I7C95FdIglBGnISbV534LW:Unison.ABT + +-- Typechecker emits a helpful error about b's use of an unknown type, but not a's. +-- +-- Error for b: +-- typechecker.tests/ability_unknown_type.u FAILURE I don't know about the type Unknown. Make sure it's imported and spelled correctly: +-- +-- 22 | b : Unknown diff --git a/unison-src/errors/empty-block.u b/unison-src/errors/empty-block.u new file mode 100644 index 0000000000..312149d407 --- /dev/null +++ b/unison-src/errors/empty-block.u @@ -0,0 +1 @@ +foo = diff --git a/unison-src/errors/ex1.u b/unison-src/errors/ex1.u new file mode 100644 index 0000000000..0a20aec50e --- /dev/null +++ b/unison-src/errors/ex1.u @@ -0,0 +1,49 @@ +use Optional None Some + +foo : Optional a -> [a] +foo = cases + None -> [] + Some a -> [a] + +"hello" `Sequence.cons` foo (Some 3) + +-- Sequence.cons has type `a -> [a] -> [a]` +-- `a` was determined to be `Text` because "hello" had type `Text`. +-- Therefore `foo (Some 3)` was checked against `[Text]` +-- but it actually had type `[Nat]`. Use `> why err1` for more detail. +-- type Extractor v loc a = Note v loc -> Maybe a +-- do +-- e <- errorTerm +-- b <- isFunctionCall +-- if b then do + -- + + +-- in reply to `> why err1`: +-- `foo` has type `Optional a -> [a]` +-- `a` was determined to be `Nat` because +-- `Some 3` has type `Optional Nat`. Use `> why err2` for more detail + +-- in reply to `> why err2`: +-- `Some` has type `a -> Optional a` +-- `a` was determinewd to be `Nat` because `3` has type `Nat` + +x = 3 + +and x 4 +------------- generic synthesizeApp possibility +-- `and` has type `Boolean -> Boolean -> Boolean` +-- .. (no typevars to explain, so skip) +-- Therefore `3` was checked against `Boolean`, +-- but it actually had type `Nat`. + +------------- specialized "and" possibility +-- The arguments to `and` must be of type `Boolean`, +-- but `x` has type `Nat`. Use `> why err1` for more detail. + +and 3 4 +-- but the literal `3` has type `Nat`. + +match 3 with + 3 -> "text" + 4 -> 4.0 diff --git a/unison-src/errors/fix745.u b/unison-src/errors/fix745.u new file mode 100644 index 0000000000..98af39ab79 --- /dev/null +++ b/unison-src/errors/fix745.u @@ -0,0 +1,19 @@ + +unique ability A where a : Nat +unique ability B where b : Nat + +noGood : Nat ->{A} '{B} () +noGood n unit = + -- The A.a should be an ability check failure, since we are in the body + -- of an arrow which only has the {B} ability set. + A.a + B.b + () + +ok : Nat ->{A} '{B} () +ok n = + -- This is okay, because the A.a is being evaluated in the body of an + -- arrow with {A}. The result of the body is another lambda which + -- is allowed {B} requests by type signature of `ok`. + A.a + 'let B.b; () diff --git a/unison-src/errors/handle-inference.u b/unison-src/errors/handle-inference.u new file mode 100644 index 0000000000..8d5dc87c7a --- /dev/null +++ b/unison-src/errors/handle-inference.u @@ -0,0 +1,22 @@ +--handle inference +ability State s where + get : ∀ s . () -> {State s} s + set : ∀ s . s -> {State s} () +state : ∀ a s . s -> Request (State s) a -> a +state s = cases + {a} -> a + {State.get _ -> k} -> handle k s with state s + {State.set s -> k} -> handle k () with state s +-- modify : ∀ s . (s -> s) -> {State s} () +-- modify f = State.set (f (State.get())) +ex : () -> {State Nat} Nat +ex blah = + State.get() Nat.+ 42 +-- note this currently succeeds, the handle block +-- gets an inferred type of ∀ a . a, it appears that +-- the existential `a` which gets instantiated for the +-- state call never gets refined, most likely due to +-- missing a subtype check in handle +y : Text +y = handle ex () with state 5 +() diff --git a/unison-src/errors/handler-coverage-checking.uu b/unison-src/errors/handler-coverage-checking.uu new file mode 100644 index 0000000000..134519ef01 --- /dev/null +++ b/unison-src/errors/handler-coverage-checking.uu @@ -0,0 +1,29 @@ +--State3 ability +ability State se2 where + put : ∀ se . se -> {State se} () + get : ∀ se . () -> {State se} se + +state : ∀ s a . s -> Request (State s) a -> (s, a) +state woot = cases + { State.get () -> k } -> handle k woot with state woot + { State.put snew -> k } -> handle k () with state snew + +ex1 : (Nat, Nat) +ex1 = handle State.get () with state 42 + +ex1a : (Nat, Nat) +ex1a = handle 49 with state 42 + +ex1b = handle 0 with x -> 10 + +ex1c : Nat +ex1c = handle 0 with x -> 10 + +ex1d = handle 49 with state 42 + +ex2 = handle State.get () with state 42 + +ex3 : (Nat, Nat) +ex3 = ex2 + +() diff --git a/unison-src/errors/id.u b/unison-src/errors/id.u new file mode 100644 index 0000000000..e9e8fb0a95 --- /dev/null +++ b/unison-src/errors/id.u @@ -0,0 +1,3 @@ +id a = a + +(id 42 : Text) diff --git a/unison-src/errors/io-effect.u b/unison-src/errors/io-effect.u new file mode 100644 index 0000000000..7373163531 --- /dev/null +++ b/unison-src/errors/io-effect.u @@ -0,0 +1,9 @@ +--IO ability +ability IO where + launchMissiles : () -> {IO} () +-- binding is not guarded by a lambda, it only can access +-- ambient abilities (which will be empty) +ex1 : {IO} () +ex1 = IO.launchMissiles() +() + diff --git a/unison-src/errors/io-state1.u b/unison-src/errors/io-state1.u new file mode 100644 index 0000000000..a9d1c11c6a --- /dev/null +++ b/unison-src/errors/io-state1.u @@ -0,0 +1,17 @@ +--IO/State1 ability +ability IO where + launchMissiles : {IO} () +ability State se2 where + put : ∀ se . se -> {State se} () + get : ∀ se . () -> {State se} se +foo : () -> {IO} () +foo unit = +-- inner binding can't access outer abilities unless it declares +-- them explicitly + incBy : Int -> {State Int} () + incBy i = + launchMissiles -- not allowed + y = State.get() + State.put (y Int.+ i) + () +() diff --git a/unison-src/errors/map-reduce.u b/unison-src/errors/map-reduce.u new file mode 100644 index 0000000000..d29cc69089 --- /dev/null +++ b/unison-src/errors/map-reduce.u @@ -0,0 +1,102 @@ + +-- A simple distributed computation ability +ability Remote n where + + -- Spawn a new node, of type `n` + spawn : {Remote n} n + + -- Sequentially evaluate the given thunk on another node + -- then return to the current node when it completes + at : n -> '{Remote n} a -> {Remote n} a + + -- Start a computation running, returning an `r` that can be forced to + -- await the result of the computation + fork : '{Remote n} a -> {Remote n} ('{Remote n} a) + +type Monoid a = Monoid (a -> a -> a) a + +use Nat + - * / == < +use Sequence map take drop size foldLeft halve +use Optional None Some +use Monoid.Monoid -- import the constructor +use Remote fork spawn at + +namespace Monoid where + + zero : Monoid a -> a + zero = cases Monoid _ z -> z + + op : Monoid a -> a -> a -> a + op = cases Monoid op _ -> op + + foldMap : (a -> {e} b) -> Monoid b -> [a] -> {e} b + foldMap f m as = + op = Monoid.op m + -- this line has a type error, `op` is (b -> b -> b) + -- and `zero m` is of type `b`, but `as` is of type `[a]` + -- 👇 + if size as < 2 then Sequence.foldLeft op (zero m) as + else match Sequence.halve as with (l, r) -> foldMap f m l `op` foldMap f m r + + par : Monoid a -> Monoid ('{Remote n} a) + par m = + o = op m + z = zero m + -- note - does not typecheck if flip the order of the constructor! + -- the 'z has type 'a, which fails to match the later remote thunk + Monoid (a1 a2 -> parApply o a1 a2) 'z + +force : '{e} a -> {e} a +force a = !a + +mapReduce : (a -> {Remote n} b) -> Monoid b -> [a] -> {Remote n} b +mapReduce f m a = + force <| Monoid.foldMap (a -> fork '(f a)) (Monoid.par m) a + +namespace Sequence where + + foldLeft : (b -> a -> b) -> b -> [a] -> b + foldLeft f z as = _todo2 + + halve : [a] -> ([a], [a]) + halve as = (take (size as / 2) as, drop (size as / 2) as) + +ex : '{Remote n} Nat +ex = 'let + alice = spawn + bob = spawn + f1 = fork '(1 + 1) + f2 = fork '(2 + 2) + !f1 + !f2 + +parApply : (a -> b -> c) -> '{Remote n} a -> '{Remote n} b -> '{Remote n} c +parApply f a b = 'let + x = fork a + y = fork b + f !x !y + +-- this currently crashes the compiler +Remote.runLocal : '{Remote Nat} a -> a +Remote.runLocal r = + step : Nat -> Request (Remote Nat) a -> a + step nid = cases + {a} -> a + {Remote.fork t -> k} -> handle k t with step nid + {Remote.spawn -> k} -> handle k nid with step (nid + 1) + {Remote.at _ t -> k} -> handle k !t with step (nid + 1) + + handle !r with step 0 + +uno : '{e} a -> '{e} a -> {e} a +uno a a2 = !a + +dos : (a -> a -> a) -> '{e} a -> '{e} a -> {e} a +dos f a a2 = f !a !a2 + +(<|) : (i -> o) -> i -> o +f <| i = f i +i |> f = f i + +Stream.fromNat 1 + |> Stream.take 15 + |> Stream.toSequence diff --git a/unison-src/errors/map-traverse3.u b/unison-src/errors/map-traverse3.u new file mode 100644 index 0000000000..724a5bdeee --- /dev/null +++ b/unison-src/errors/map-traverse3.u @@ -0,0 +1,26 @@ +--map/traverse +ability Noop where + noop : a -> {Noop} a + +type List a = Nil | Cons a (List a) + +map : (a ->{} b) -> List a -> List b +map f = cases + List.Nil -> List.Nil + List.Cons h t -> List.Cons (f h) (map f t) + +c = List.Cons + +z : ∀ a . List a +z = List.Nil + +ex = c 1 (c 2 (c 3 z)) + +pureMap : List Text +pureMap = map (a -> "hello") ex + +-- this should not typecheck because map is annotated to take a pure function +zappy : '{Noop} (List Nat) +zappy = 'let map (zap -> Noop.noop (zap Nat.+ 1)) ex + +pureMap diff --git a/unison-src/errors/mismatched-braces.u b/unison-src/errors/mismatched-braces.u new file mode 100644 index 0000000000..e638736768 --- /dev/null +++ b/unison-src/errors/mismatched-braces.u @@ -0,0 +1,4 @@ +} + +x = 3 + diff --git a/unison-src/errors/need-nominal-type.uu b/unison-src/errors/need-nominal-type.uu new file mode 100644 index 0000000000..14b48ed3cc --- /dev/null +++ b/unison-src/errors/need-nominal-type.uu @@ -0,0 +1,7 @@ +type Foo = Foo +type Bar = Bar + +x : Foo +x = Bar.Bar + +x diff --git a/unison-src/errors/poor-error-message/ability-check-fail-by-calling-wrong-function.u b/unison-src/errors/poor-error-message/ability-check-fail-by-calling-wrong-function.u new file mode 100644 index 0000000000..f1fcdf7f80 --- /dev/null +++ b/unison-src/errors/poor-error-message/ability-check-fail-by-calling-wrong-function.u @@ -0,0 +1,27 @@ +reduce2 : a -> (a -> a -> a) -> Sequence a -> a +reduce2 a0 f s = match at 0 s with + Optional.None -> a0 + Optional.Some a1 -> reduce (f a0 a1) f (drop 1 s) + +() + +-- as of commit a48fa3b, we get the following error + + --The expression at Line 18, columns 40-41 (in red below) is requesting + -- {𝛆3} abilitys, but this location only has access to + -- {} + -- + -- 18 | Optional.Some a1 -> reduce (f a0 a1) f (drop 1 s) + -- ^ + -- simple cause: + -- AbilityCheckFailure: ambient={} requested={𝛆3} + +-- The problem is that I've accidentally called `reduce` instead of `reduce2`, +-- which TDNRs to `Stream.reduce`, which doesn't allow abilitys, and `f` isn't +-- restricted to be pure. + +-- I'd like to know: +-- a) reduce is the built-in +-- Stream.reduce : a -> (a ->{} a ->{} a) -> Stream a -> a +-- b) maybe those suggestions, like did you mean reduce2 instead of reduce, +-- which would typecheck. I understand that would not be a quick fix. diff --git a/unison-src/errors/poor-error-message/consoleh.u b/unison-src/errors/poor-error-message/consoleh.u new file mode 100644 index 0000000000..12b92f50db --- /dev/null +++ b/unison-src/errors/poor-error-message/consoleh.u @@ -0,0 +1,57 @@ +-- Token {payload = Semi, start = Pos 51 1, end = Pos 51 1} :| [] +-- bootstrap: unison-src/tests/console.uu:51:1: +-- unexpected Semi +-- expecting : or the rest of infixApp +-- 51 | () + +ability State s where + get : {State s} s + set : s -> {State s} () + +ability Console where + read : {Console} (Optional Text) + write : Text -> {Console} () + +fst = cases Tuple.Cons a _ -> a + +snd = cases Tuple.Cons _ (Tuple.Cons b _) -> b + +namespace Console where + + state : s -> Request (State s) a -> a + state s = cases + {State.get -> k} -> handle k s with state s + {State.set s' -> k} -> handle k () with state s' + {a} -> a + + simulate : Request Console d -> {State ([Text], [Text])} d + simulate = cases + {Console.read -> k} -> + io = State.get + ins = fst io + outs = snd io + State.set (drop 1 ins, outs) + -- this really should typecheck but doesn't for some reason + -- error is that `simulate` doesn't check against `Request Console c -> r`, + -- but seems like that `r` should get instantiated as `{State (..)} c`. + handle k (at 0 ins) with simulate + {Console.write t -> k} -> + io = State.get + ins = fst io + outs = snd io + -- same deal here + handle k (State.set (ins, cons t outs)) with simulate + {a} -> a + +(++) = concatenate + +handle + handle + use Console read write + use Optional Some None + write "What's your name?" + match read with + Some name -> write ("Hello" ++ name) + None -> write "Fine, be that way." + with Console.simulate +() diff --git a/unison-src/errors/poor-error-message/doesnt-match-annotation.u b/unison-src/errors/poor-error-message/doesnt-match-annotation.u new file mode 100644 index 0000000000..34d5d146ab --- /dev/null +++ b/unison-src/errors/poor-error-message/doesnt-match-annotation.u @@ -0,0 +1,5 @@ +crazyTuple : a -> b -> (a,b)-- -> (a,b) +crazyTuple a b c = c + +() + diff --git a/unison-src/errors/poor-error-message/function-calls.u b/unison-src/errors/poor-error-message/function-calls.u new file mode 100644 index 0000000000..d3109a5cc0 --- /dev/null +++ b/unison-src/errors/poor-error-message/function-calls.u @@ -0,0 +1,11 @@ +f1 : Int -> Int +f1 n = n + +1 + +f1 (3 Nat.+ 3) + +-- idea: + -- I was expecting Int + -- vs + -- f1 was expecting Int + +-- "In the call below" vs "In the call to `foo`" diff --git a/unison-src/errors/poor-error-message/function-calls1.u b/unison-src/errors/poor-error-message/function-calls1.u new file mode 100644 index 0000000000..8a2a41c0f8 --- /dev/null +++ b/unison-src/errors/poor-error-message/function-calls1.u @@ -0,0 +1,16 @@ +f1 : Int -> Int +f1 n = n + +1 + +f1 (3 Nat.+ 3) + +-- issues: +-- - the highlight term is '3' but should be (3 + 3) +-- - + + +-- Paul Thought: + -- Whenever we synthesize a type, we can set the location + -- of the synthesized type to be the location of the + -- synthesized expression. +-- Arya & Runar: isn't that what we already do +-- Paul: No, we only something something when we refine an existential diff --git a/unison-src/errors/poor-error-message/function-calls2.u b/unison-src/errors/poor-error-message/function-calls2.u new file mode 100644 index 0000000000..10a3812c8d --- /dev/null +++ b/unison-src/errors/poor-error-message/function-calls2.u @@ -0,0 +1,19 @@ +id : a -> a +id a = a + +f1 : Int -> Int +f1 n = n + +1 + +f1 (id 3) + +-- issues: +-- - the highlight term is '3' but should be (3 + 3) +-- - + + +-- Paul Thought: + -- Whenever we synthesize a type, we can set the location + -- of the synthesized type to be the location of the + -- synthesized expression. +-- Arya & Runar: isn't that what we already do +-- Paul: No, we only something something when we refine an existential diff --git a/unison-src/errors/poor-error-message/function-calls3.u b/unison-src/errors/poor-error-message/function-calls3.u new file mode 100644 index 0000000000..44665aff00 --- /dev/null +++ b/unison-src/errors/poor-error-message/function-calls3.u @@ -0,0 +1,26 @@ +-- first : a -> a -> a +-- first a b = a + +id5 : a -> a -> a -> a -> a -> (a,a,a,a,a) +id5 a b c d e = (a, b, c, d, e) + +-- second : a -> a -> a +-- second a b = b + +id5 1 +2 3 4 5 + +-- (match true with +-- true -> first +-- false -> second) 1 +2 + +-- issues: +-- - the highlight term is '3' but should be (3 + 3) +-- - + + +-- Paul Thought: + -- Whenever we synthesize a type, we can set the location + -- of the synthesized type to be the location of the + -- synthesized expression. +-- Arya & Runar: isn't that what we already do +-- Paul: No, we only something something when we refine an existential diff --git a/unison-src/errors/poor-error-message/handle.u b/unison-src/errors/poor-error-message/handle.u new file mode 100644 index 0000000000..6f476f6890 --- /dev/null +++ b/unison-src/errors/poor-error-message/handle.u @@ -0,0 +1,40 @@ +--Parsing/typechecking... +--Token {payload = Close, start = Pos 27 5, end = Pos 27 5} :| [] +--bootstrap: /Users/pchiusano/work/unison/unison-src/tests/state2.u:27:5: +--unexpected Close +-- 27 | let +-- + +type Optional a = None | Some a + +ability State s where + put : s -> {State s} () + get : {State s} s + +state : s -> Request (State s) a -> (s, a) +state woot = cases + { State.get -> k } -> handle k woot with state woot + { State.put snew -> k } -> handle k () with state snew + { a } -> (woot, a) + +modify : (s -> s) -> {State s} () +modify f = State.put (f State.get) + +increment : '{State Nat} () +increment = '(modify ((+) 1)) + +first : (a, b) -> a +first = cases (a,_) -> a + +ex : Nat +ex = + result = handle (state 0) + let + x = State.get + !increment + !increment + () + + first result + +() diff --git a/unison-src/errors/poor-error-message/handler-ex.u b/unison-src/errors/poor-error-message/handler-ex.u new file mode 100644 index 0000000000..9e07c1262c --- /dev/null +++ b/unison-src/errors/poor-error-message/handler-ex.u @@ -0,0 +1,24 @@ +--Line 13, columns 44-46 has a type mismatch (in red below): +-- +-- 13 | {Ask.ask _ -> k} -> handle k () with supply t +-- +--The two types involved are: +-- +-- () (an intrinsic, in blue) +-- Text (line 8, columns 30-34, in green) +-- +-- 8 | supply : Text -> Request (Ask Text) a -> a +-- +-- Verbiage could be improved, but also the `()` location should +-- point to line 22, the `k ()` call. +ability Ask foo where + ask : () -> {Ask a} a + +supply : Text -> Request (Ask Text) a -> a +supply t = cases + {a} -> a + -- `k` should be of type `Text -> Request Ask a`, + -- so calling it with `()` here should be a type error + {Ask.ask _ -> k} -> handle k () with supply t + +supply diff --git a/unison-src/errors/poor-error-message/mismatched-case-result-types.u b/unison-src/errors/poor-error-message/mismatched-case-result-types.u new file mode 100644 index 0000000000..e1dd520475 --- /dev/null +++ b/unison-src/errors/poor-error-message/mismatched-case-result-types.u @@ -0,0 +1,20 @@ +--mismatched case result types +type Optional a = None | Some a +match Optional.Some 3 with + x -> 1 + y -> "boo" + +-- as of 5ae98f7, produces this message: + + --Each case of a match/with expression need to have the same type. + -- Here, one is Nat, and another is Text: + -- + -- 4 | x -> 1 -- x is highlighted + -- 5 | y -> "boo" -- "boo" is highlighted + -- + -- from right here: + -- + -- 4 | x -> 1 -- 1 is highlighted + +-- IMO, 1 should be highlighted instead of x on line 12; +-- then lines 14-17 would be omitted. diff --git a/unison-src/errors/poor-error-message/notaguard.u b/unison-src/errors/poor-error-message/notaguard.u new file mode 100644 index 0000000000..54c3f0e373 --- /dev/null +++ b/unison-src/errors/poor-error-message/notaguard.u @@ -0,0 +1,21 @@ +-- Getting the error +--The guard expression for a case has to be Boolean, but this one is a7: +-- +-- 13 | {Ask.ask -> k} -> handle k () with supply t +-- +-- from right here: +-- +-- 8 | supply : Text -> Request (Ask Text) a -> a +-- +-- +-- even though this program doesn't use guards! + +ability Ask a where + ask : {Ask a} a + +supply : Text -> Request (Ask Text) a -> a +supply t = cases + {a} -> "foo" -- a + {Ask.ask -> k} -> handle k () with supply t + +() diff --git a/unison-src/errors/poor-error-message/overapplied-data-constructor-loc.u b/unison-src/errors/poor-error-message/overapplied-data-constructor-loc.u new file mode 100644 index 0000000000..4f9b25c325 --- /dev/null +++ b/unison-src/errors/poor-error-message/overapplied-data-constructor-loc.u @@ -0,0 +1,17 @@ +-- board piece +type P = X | O | E + +type Board = Board P P + +use Board.Board +use P O X E + +match Board X O X + with Board a b c -> a + + +-- gives this error: + -- This looks like a function call, but with a Board where the function should be. Are you missing an operator? + -- ^^^^^ + -- 13 | match Board X O X + -- ^^^^^ diff --git a/unison-src/errors/poor-error-message/pattern-case-location.u b/unison-src/errors/poor-error-message/pattern-case-location.u new file mode 100644 index 0000000000..3cba3a76b2 --- /dev/null +++ b/unison-src/errors/poor-error-message/pattern-case-location.u @@ -0,0 +1,10 @@ +-- The location of the error is imprecise. It should point to +-- the pattern `Bar.Bar`. + +unique type Foo = Foo +unique type Bar = Bar + +foo : Foo -> Foo +foo = cases + Foo.Foo -> Foo + Bar.Bar -> Foo diff --git a/unison-src/errors/poor-error-message/pattern-matching-1.u b/unison-src/errors/poor-error-message/pattern-matching-1.u new file mode 100644 index 0000000000..2e53532d39 --- /dev/null +++ b/unison-src/errors/poor-error-message/pattern-matching-1.u @@ -0,0 +1,28 @@ +type Foo0 = Foo0 +type Foo1 a = Foo1 a +type Foo2 a b = Foo2 a b +type Foo3 a b c = Foo3 a b c + +use Foo0 Foo0 +use Foo1 Foo1 +use Foo2 Foo2 + +x = match Foo0 with + Foo0 -> 1 + +y = match Foo1 1 with + Foo1 1 -> 0 + Foo1 _ -> 10 + +z = match Foo2 1 "hi" with + Foo2 x _ -> x + Foo2 1 _ -> 1 + +w = match Foo3.Foo3 1 2 "bye" with + Foo3.Foo3 1 2 x -> Text.concatenate x "bye" + -- where the heck are these locations coming from? + -- I feel, since concatenate isn't polymorphic, that `Text` + -- should come from there, not from `x`. + _ -> () + +() diff --git a/unison-src/errors/poor-error-message/tdnr-demo.u b/unison-src/errors/poor-error-message/tdnr-demo.u new file mode 100644 index 0000000000..d244e35e5f --- /dev/null +++ b/unison-src/errors/poor-error-message/tdnr-demo.u @@ -0,0 +1,55 @@ +left = take 3 (fromNat 5) +right = take 10 (fromNat 100) +sum = reduce 0 (+) + +iterate : a -> Nat -> (a -> a) -> Sequence a +iterate a n f = + iterate0 a n f acc = + if n > 0 then + a' = f a + iterate0 a' (n `drop` 1) f (snoc acc a') + else acc + iterate0 a n f [] + +use Optional Some None +reduce : a -> (a -> a -> a) -> Sequence a -> a +reduce a0 f s = match at 0 s with + Optional.None -> a0 + Optional.Some a1 -> reduce (f a0 a1) f (drop 1 s) + +pseudo-Stream : Sequence Nat +pseudo-Stream = iterate 0 200 increment +left2 = take 3 (drop 3 pseudo-Stream) +right2 = take 10 (drop 99 pseudo-Stream) +sum2 = reduce 0 (+) + +(sum (append left right)) == (sum2 (left2 ++ right2)) + +-- local definition of `reduce` for Sequence understandably breaks TDNR +-- of Stream.reduce on line 3, which makes `sum` on line 3 expect +-- `Sequence Nat`, which constrains `append` on line 26 to return +-- `Sequence Nat`, so it no longer matches `Stream Nat -> Stream Nat -> 'a`, +-- which breaks TDNR of Stream.append, resulting in the error message: + + --Sorry, you hit an error we didn't make a nice message for yet. + -- + --Here is a summary of the Note: + -- simple cause: + -- SolvedBlank: Resolve "append" Line 26, columns 7-13 v=_170 t=Stream Nat -> Stream Nat -> [Nat] + -- path: + -- InSynthesize e=(let reduce1 0 (UInt... + -- InSynthesize e=(let (Sequence.take:... + -- InSynthesize e=(let (Sequence.take:... + -- InSynthesize e=(let (iterate:(𝛆. (a... + -- InSynthesize e=(let (λ (a. (λ (n. (... + -- InSynthesize e=(let reduce1 0 (UInt... + -- InSynthesize e=Cycle (left. (right.... + -- + -- + --I'm not sure what append means at Line 26, columns 7-13 + -- + -- 26 | (sum (append left right)) == (sum2 (left2 ++ right2)) + -- + --Whatever it is, it has a type that conforms to Stream Nat -> Stream Nat -> [Nat] + +-- Commenting out the local definition of `reduce` mysteriously fixes TDNR of `append` for the above reasons. diff --git a/unison-src/errors/poor-error-message/token-printing.u b/unison-src/errors/poor-error-message/token-printing.u new file mode 100644 index 0000000000..98d4ad1f9d --- /dev/null +++ b/unison-src/errors/poor-error-message/token-printing.u @@ -0,0 +1,25 @@ +-- board piece + +type Board = Board Nat Nat Nat + +use Board.Board + +-- uncommenting these gives errors from NPE to array index out of bounds -1, -2 +-- x = 1 +-- y = 2 + +ex = match Board 77 88 99 + with Board a b c -> c + +-- yields: + + +-- master> +-- I was expecting an indented block following the`of` keyword +-- but instead found an outdent: +-- +-- 12 | with Board a b c -> c +-- SourcePos {sourceName = "/Users/pchiusano/work/unison/unison-src/tests/tictactoe0-array-oob1.u", sourceLine = Pos 12, sourceColumn = Pos 3} + +-- What's with the `SourcePos` default show instance here? +-- Expecting it to just color the token or something diff --git a/unison-src/errors/rank2a.u b/unison-src/errors/rank2a.u new file mode 100644 index 0000000000..4f01835c12 --- /dev/null +++ b/unison-src/errors/rank2a.u @@ -0,0 +1,8 @@ + +-- We expect this to not typecheck since a `Nat -> Nat` cannot +-- be passed where a `∀ a . a -> a` is expected. +rank2a : (Nat -> Nat) -> Nat +rank2a = + inner : (∀ a . a -> a) -> Nat + inner f = 42 + inner diff --git a/unison-src/errors/seq-concat-constant-length.u b/unison-src/errors/seq-concat-constant-length.u new file mode 100644 index 0000000000..5f5ca9fc16 --- /dev/null +++ b/unison-src/errors/seq-concat-constant-length.u @@ -0,0 +1,3 @@ +test : [a] -> ([a], [a]) +test l = match l with + x ++ y -> (x, y) diff --git a/unison-src/errors/state4.u b/unison-src/errors/state4.u new file mode 100644 index 0000000000..b4890f65e7 --- /dev/null +++ b/unison-src/errors/state4.u @@ -0,0 +1,13 @@ +--State4 ability +ability State se2 where + put : ∀ se . se -> {State se} () + get : ∀ se . () -> {State se} se +-- binding is not guarded by a lambda, it only can access +-- ambient abilities (which will be empty) +ex1 : {State Int} () +ex1 = + y = State.get + State.put (y Int.+ +1) + () +() + diff --git a/unison-src/errors/tdnr.u b/unison-src/errors/tdnr.u new file mode 100644 index 0000000000..c1a98cfc0b --- /dev/null +++ b/unison-src/errors/tdnr.u @@ -0,0 +1,3 @@ +foo a b = a + b + +foo diff --git a/unison-src/errors/tdnr2.u b/unison-src/errors/tdnr2.u new file mode 100644 index 0000000000..a9924c0703 --- /dev/null +++ b/unison-src/errors/tdnr2.u @@ -0,0 +1 @@ +2.0 + 4 diff --git a/unison-src/errors/tdnr3.u b/unison-src/errors/tdnr3.u new file mode 100644 index 0000000000..275847929c --- /dev/null +++ b/unison-src/errors/tdnr3.u @@ -0,0 +1,10 @@ +-- + Should get resolved to Nat.+, making this fail + +x : Nat +x = 42 + +Foo.z : Float +Foo.z = 4.0 + +a = x + z + diff --git a/unison-src/errors/term-functor-inspired/effect1.u b/unison-src/errors/term-functor-inspired/effect1.u new file mode 100644 index 0000000000..1c3f007c35 --- /dev/null +++ b/unison-src/errors/term-functor-inspired/effect1.u @@ -0,0 +1,9 @@ +ability State s where + get : () -> {State s} s + set : s -> {State s} () + +x : {State Nat} Nat +x = + State.get () + +() diff --git a/unison-src/errors/term-functor-inspired/if-body-mismatch.u b/unison-src/errors/term-functor-inspired/if-body-mismatch.u new file mode 100644 index 0000000000..b0371175d8 --- /dev/null +++ b/unison-src/errors/term-functor-inspired/if-body-mismatch.u @@ -0,0 +1,3 @@ +if true +then 1 +else 1.0 diff --git a/unison-src/errors/term-functor-inspired/if-cond-not-bool.u b/unison-src/errors/term-functor-inspired/if-cond-not-bool.u new file mode 100644 index 0000000000..d132b2abd3 --- /dev/null +++ b/unison-src/errors/term-functor-inspired/if-cond-not-bool.u @@ -0,0 +1 @@ +if "true" then 1 else 1 diff --git a/unison-src/errors/term-functor-inspired/mismatched-case-result-types.u b/unison-src/errors/term-functor-inspired/mismatched-case-result-types.u new file mode 100644 index 0000000000..3aed71fd9f --- /dev/null +++ b/unison-src/errors/term-functor-inspired/mismatched-case-result-types.u @@ -0,0 +1,5 @@ +--mismatched case result types +type Optional a = None | Some a +match Optional.Some 3 with + x -> 1 + y -> "boo" diff --git a/unison-src/errors/type-apply.u b/unison-src/errors/type-apply.u new file mode 100644 index 0000000000..c44b882242 --- /dev/null +++ b/unison-src/errors/type-apply.u @@ -0,0 +1,15 @@ +--Type.apply +type List a = Nil | Cons a (List a) +map : ∀ a b . (a -> b) -> List a -> List b +map f = cases + List.Nil -> List.Nil + List.Cons h t -> List.Cons h (map f t) -- should not typecheck, missing (f h) +-- definitely should not typecheck! +map2 : ∀ a . a +map2 = map +c = List.Cons +z = List.Nil +ex = c 1 (c 2 (c 3 z)) +pureMap : List Int -- should fail, output is a `List Text` +pureMap = map (a -> "hi") ex +() diff --git a/unison-src/errors/type-functor-inspired/app2.u b/unison-src/errors/type-functor-inspired/app2.u new file mode 100644 index 0000000000..b9b422b846 --- /dev/null +++ b/unison-src/errors/type-functor-inspired/app2.u @@ -0,0 +1,4 @@ +type Optional a = Some a | None +app' : Optional Int +app' = 3 +() diff --git a/unison-src/errors/type-functor-inspired/arrow1.u b/unison-src/errors/type-functor-inspired/arrow1.u new file mode 100644 index 0000000000..630e157c2e --- /dev/null +++ b/unison-src/errors/type-functor-inspired/arrow1.u @@ -0,0 +1,3 @@ +arrow : Int -> Int -> Int +arrow a = 3 +() \ No newline at end of file diff --git a/unison-src/errors/type-functor-inspired/effect2.u b/unison-src/errors/type-functor-inspired/effect2.u new file mode 100644 index 0000000000..90615b8ea8 --- /dev/null +++ b/unison-src/errors/type-functor-inspired/effect2.u @@ -0,0 +1,11 @@ +ability Abort where + Abort : forall a . () -> {Abort} a + +ability Abort2 where + Abort2 : forall a . () -> {Abort2} a + Abort2' : forall a . () -> {Abort2} a + +ability' : Nat -> { Abort } Int +ability' n = Abort2.Abort2 () + +() diff --git a/unison-src/errors/type-functor-inspired/forall-arrow.u b/unison-src/errors/type-functor-inspired/forall-arrow.u new file mode 100644 index 0000000000..47368c44bb --- /dev/null +++ b/unison-src/errors/type-functor-inspired/forall-arrow.u @@ -0,0 +1,3 @@ +id : forall a . a -> a +id x = 3 +() diff --git a/unison-src/errors/type-functor-inspired/forall-arrow2.u b/unison-src/errors/type-functor-inspired/forall-arrow2.u new file mode 100644 index 0000000000..5512aeb417 --- /dev/null +++ b/unison-src/errors/type-functor-inspired/forall-arrow2.u @@ -0,0 +1,4 @@ +f2 : forall a . a -> a -> a +f2 x = x + +() \ No newline at end of file diff --git a/unison-src/errors/type-functor-inspired/forall-arrow3.u b/unison-src/errors/type-functor-inspired/forall-arrow3.u new file mode 100644 index 0000000000..c9a4dae153 --- /dev/null +++ b/unison-src/errors/type-functor-inspired/forall-arrow3.u @@ -0,0 +1,4 @@ +const : forall a b . a -> b -> a +const a b = 3 + +() \ No newline at end of file diff --git a/unison-src/errors/type-functor-inspired/need-nonstructural-types.uu b/unison-src/errors/type-functor-inspired/need-nonstructural-types.uu new file mode 100644 index 0000000000..dc731e635f --- /dev/null +++ b/unison-src/errors/type-functor-inspired/need-nonstructural-types.uu @@ -0,0 +1,12 @@ +ability Abort where + Abort : forall a . () -> {Abort} a + +ability Abort2 where + Abort2 : forall a . () -> {Abort2} a + +ability' : Nat -> { Abort } Int +ability' n = Abort2.Abort2 () + +() + +-- oops, Abort and Abort2 end up being synonyms diff --git a/unison-src/errors/type-functor-inspired/parens.u b/unison-src/errors/type-functor-inspired/parens.u new file mode 100644 index 0000000000..22d02da2db --- /dev/null +++ b/unison-src/errors/type-functor-inspired/parens.u @@ -0,0 +1,4 @@ +type Optional a = Some a | None +y : (Optional Int) +y = 3 +() \ No newline at end of file diff --git a/unison-src/errors/type-functor-inspired/subtuple.u b/unison-src/errors/type-functor-inspired/subtuple.u new file mode 100644 index 0000000000..f1aab6f7fd --- /dev/null +++ b/unison-src/errors/type-functor-inspired/subtuple.u @@ -0,0 +1,5 @@ +type Optional a = Some a | None +z' : (Optional Int, Optional Text, Optional Float) +z' = (None, 3) + +() \ No newline at end of file diff --git a/unison-src/errors/type-functor-inspired/synthesizeApp.u b/unison-src/errors/type-functor-inspired/synthesizeApp.u new file mode 100644 index 0000000000..833909d07f --- /dev/null +++ b/unison-src/errors/type-functor-inspired/synthesizeApp.u @@ -0,0 +1,4 @@ +foo : a -> a -> Nat +foo x z = 42 + +foo +1 "hi" diff --git a/unison-src/errors/type-functor-inspired/tuple.u b/unison-src/errors/type-functor-inspired/tuple.u new file mode 100644 index 0000000000..e7f0019f78 --- /dev/null +++ b/unison-src/errors/type-functor-inspired/tuple.u @@ -0,0 +1,4 @@ +type Optional a = Some a | None +z : (Optional Int, Optional Text, Optional Float) +z = 3 +() \ No newline at end of file diff --git a/unison-src/errors/type-functor-inspired/tuple2.u b/unison-src/errors/type-functor-inspired/tuple2.u new file mode 100644 index 0000000000..08351f1a9a --- /dev/null +++ b/unison-src/errors/type-functor-inspired/tuple2.u @@ -0,0 +1,3 @@ +y : (Nat, Optional Int, Text) +y = (42, 3, "") +y diff --git a/unison-src/errors/type-functor-inspired/unit.u b/unison-src/errors/type-functor-inspired/unit.u new file mode 100644 index 0000000000..29b6bdf363 --- /dev/null +++ b/unison-src/errors/type-functor-inspired/unit.u @@ -0,0 +1,3 @@ +x : () +x = 3 +() diff --git a/unison-src/errors/unexpected-loop.u b/unison-src/errors/unexpected-loop.u new file mode 100644 index 0000000000..16cada0892 --- /dev/null +++ b/unison-src/errors/unexpected-loop.u @@ -0,0 +1,11 @@ +--Abort +ability Abort where + Abort : forall a . () -> {Abort} a + +use Nat + + +bork = u -> 1 + (Abort.Abort ()) + +(bork : Nat) + +-- fails with loop instead of with type mismatch in commit 2819c206acf80f926c6d970a4ffd47c961fa4502 diff --git a/unison-src/errors/unresolved-symbol-1.u b/unison-src/errors/unresolved-symbol-1.u new file mode 100644 index 0000000000..fd0eef6db0 --- /dev/null +++ b/unison-src/errors/unresolved-symbol-1.u @@ -0,0 +1,6 @@ +let + (|>) : a -> (a -> WHat) -> b -- unresolved symbol + a |> f = f a + Stream.fromInt -3 + |> Stream.take 10 + |> Stream.foldLeft +0 (Int.+) diff --git a/unison-src/errors/unsound-cont.u b/unison-src/errors/unsound-cont.u new file mode 100644 index 0000000000..f05745d9fa --- /dev/null +++ b/unison-src/errors/unsound-cont.u @@ -0,0 +1,12 @@ + +ability Ask a where + ask : {Ask a} a + +supply : Text -> Request (Ask Text) a -> a +supply t = cases + {a} -> a + -- `k` should be of type `Text -> Request Ask a`, + -- so calling it with `()` here should be a type error + {Ask.ask -> k} -> handle k () with supply t + +() diff --git a/unison-src/example-errors.u b/unison-src/example-errors.u new file mode 100644 index 0000000000..e0f248552c --- /dev/null +++ b/unison-src/example-errors.u @@ -0,0 +1,181 @@ +-- Each of the programs in this file generates an error of some sort. +-- We want error messages to be awesome so for each wrong program we +-- give "the ideal error message" and a uniform, simple algorithm for producing +-- that message. + +ex1 : Int +ex1 = "hello" + +{- Ideal error: + + Type mismatch in example-errors.u, line 6: `Text` vs `Int` + + | + 6 | ex1 = "hello" + | ^^^^^^^ + + `Text` comes from a literal: + | + 6 | ex1 = "hello" + | ^^^^^^^ + + `Int` comes from type signature: + | + 5 | ex1 : Int + | ^^^^^ + + Thoughts: + + * The first line marker is the _site of the error_ + * The next two line markers are the _provenance of the mismatched types_ + * In this case, the provenance of `Text` is the same location as the error, + but this won't always be the case. Optimized message just omits the + site of the error if it matches the provenance location of either of + the mismatched types. + * The backticks might be in color, formatted as bold, whatever, be + thoughtful about using visual indicators to draw attention to most important + aspects of the message. + * For implementation - when `check e t` hits `Term.Text'`, it does: + `subtype Type.text t`, but `Type.text` requires a `loc`, and we'll provide + `ABT.annotation e`. This logic can go in synthesize. + * Q: should we just ALWAYS set the location of a synthesized type + to be the location of the term that type was synthesized from? + * A: No, + + foo : Text + foo x = + y = x + 1 + x + + + In this example, x will synthesize to the type `Int`, but the location + of that type shouldn't just be + * When you synthesize a type for a lambda, `x -> x` + the location of the synthesized type `∀ a . a -> a` + is just the location of `x -> x`. + The location of the `a` also needs to be this same location. + Might want to have a special kind of location which indicates + that the location came from an inferred type. +-} + +ex2 : Int -- `Int` comes from +ex2 = + y = "hello" -- `Text` comes from "hello" + y + +{- Ideal error: + +example-errors.u contained errors: + + The highlighted expression on line 42 + | +42 | y + | ^ + was inferred to have type `Text` but was expected to have type `Int` + + `Int` comes from type signature: + | +39 | ex2 : Int + | ^^^^^ + + `Text` was inferred from a literal: + | +41 | y = "hello" + | ^^^^^^^ + + + Thoughts: + * `y` is the body of the block, and the body of the block is expected to + have type `Int` + * Maybe use bold or some visual indicator rather than the ^^^ nonsense + * Do we include parent scopes? +-} + +ex3 = + x = 1 + 1 + if x then 42 else "hkjh" + +{- + +example-errors.u contained 1 error: + +-- 1 ------------------------------------------------------- + + The highlighted expression on line 73 + | +73 | if x then 42 else -1 + | ^ + has type `Nat` but was expected to have type `Boolean` + + `Boolean` comes from `if`, whose first argument must be of type `Boolean` + + `Nat` comes from line 72: + | +72 | x = 1 + 1 + | ^ + x = id 42 + ^^^^^ + ^^ + + * "The function <...> expects its th argument to be of type <...>, but + on line it appears to have type <...>." + * `synthesizeApp` can take an argument for what numberth argument it's + testing + * "An if-expression expects its condition to be of type Boolean, but + * In a `synthesizeApp`, report the function input type first, as the + "expected" type. + * `if` and other builtin syntax should have some special treatment. + * Might want signature of function whose application was involved in the + mismatched types. (But don't necessarily want to dump this on users up + front, like GHC tells you the type of every binding in the vicinity) + * Issue maybe not that you didn't know the function's type, but that + you were wrong about the types of the args you passed; also, you might + accidentally omit an argument, supply an extra argument, or supply + arguments in the wrong order. + * We don't bother reporting the other type errors in the same expression, + but we potentially could have an algorithm that could do more fine-grained + recovery. + * Not totally sure on the location of the `Nat`, but the thought + is that if `+` is monomorophic in its return type (always `Nat`), + then it makes sense to attribute the `Nat` of `x` to the `+` call site. + (why not attibute that to the definition site of `+`?) + * Is this inconsistent with treatment of `Boolean` location?? + * Since `if` is monomorphic in its first arg, we could use the same logic to + say that the location of that Boolean argument type is the call site + rather than its definition site. + * When encounter an error for a variable x, can add that to an erroneous + variables set - if you ever encounter a subexpression that references + those variables, skip over it? +-} + +ex4 f x = + if f x then f 42 else 50 + +{- + Type mismatch on line : `Nat` vs `Boolean`. + + `Nat` comes from the literal: + | +42 | if f x then f 42 else 50 + | ^^ + ∀ a -> [(a, Tree (Text a)) -> (a -> Text -> Text) -> Tree Text + + ∀ a . Boolean -> a -> a + + Not sure what to report for the origin of the `Boolean`: + + `Boolean` comes from `f 42`?? + `Boolean` comes from `f : Nat -> Boolean`?? + But then why does `f` have that type? + Because `if` takes a `Boolean` as first arg.. + `Boolean` comes from `if` + + `f 42` has type `Boolean` because `f` had to have type `x -> Boolean` + because `f x` was passed as the first argument of `if`: + + if f x then f 42 else 50 + ^^^ + Arya thought - when there's a type mismatch between A and B, and A and B + are both inferred types, might make sense to provide more info about provenance. +-} + diff --git a/unison-src/new-runtime-transcripts/README b/unison-src/new-runtime-transcripts/README new file mode 100644 index 0000000000..f61a2fe305 --- /dev/null +++ b/unison-src/new-runtime-transcripts/README @@ -0,0 +1 @@ +Transcripts in this directory are only run using the new runtime. diff --git a/unison-src/new-runtime-transcripts/fix1709.md b/unison-src/new-runtime-transcripts/fix1709.md new file mode 100644 index 0000000000..bc254f3b24 --- /dev/null +++ b/unison-src/new-runtime-transcripts/fix1709.md @@ -0,0 +1,15 @@ +```unison +id x = x + +id2 x = + z = 384849 + id x +``` + +```ucm +.scratch> add +``` + +```unison +> id2 "hi" +``` diff --git a/unison-src/new-runtime-transcripts/fix1709.output.md b/unison-src/new-runtime-transcripts/fix1709.output.md new file mode 100644 index 0000000000..532194c862 --- /dev/null +++ b/unison-src/new-runtime-transcripts/fix1709.output.md @@ -0,0 +1,49 @@ +```unison +id x = x + +id2 x = + z = 384849 + id x +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + id : x -> x + id2 : x -> x + +``` +```ucm + ☝️ The namespace .scratch is empty. + +.scratch> add + + ⍟ I've added these definitions: + + id : x -> x + id2 : x -> x + +``` +```unison +> id2 "hi" +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | > id2 "hi" + ⧩ + "hi" + +``` diff --git a/unison-src/new-runtime-transcripts/hashing.md b/unison-src/new-runtime-transcripts/hashing.md new file mode 100644 index 0000000000..c986f25c46 --- /dev/null +++ b/unison-src/new-runtime-transcripts/hashing.md @@ -0,0 +1,264 @@ +# Hashing and HMAC builtins + +```ucm:hide +.> builtins.merge +.> cd builtin +``` + +Unison has cryptographic builtins for hashing and computing [HMACs](https://en.wikipedia.org/wiki/HMAC) (hash-based message authentication codes). This transcript shows their usage and has some test cases. + +## Setup + +You can skip this section, which is just needed to make the transcript self-contained. In order to print out and test these hashes we will be using some builtins for base16 (aka hexidecimal) encoding and decoding. + +```ucm +.builtin> ls Bytes +``` + +Notice the `fromBase16` and `toBase16` functions. Here's some (somewhat inefficient) convenience functions for converting `Bytes` to and from base-16 `Text`. These could be replaced by use of `Text.toUtf8` and `Text.tryFromUtf8` once those builtins exist: + +```unison:hide +a |> f = f a + +List.map f as = + go acc = cases + [] -> acc + (h +: t) -> go (acc :+ f h) t + go [] as + +-- not very efficient, but okay for testing +hex : Bytes -> Text +hex b = + Bytes.toBase16 b + |> Bytes.toList + |> List.map Char.fromNat + |> Text.fromCharList + +ascii : Text -> Bytes +ascii t = Text.toCharList t |> List.map Char.toNat |> Bytes.fromList + +fromHex : Text -> Bytes +fromHex txt = + match Text.toCharList txt + |> List.map Char.toNat + |> Bytes.fromList + |> Bytes.fromBase16 + with + Left e -> bug e + Right bs -> bs + +check : Boolean -> [Result] +check b = if b then [Result.Ok "Passed."] + else [Result.Fail "Failed."] + +test> hex.tests.ex1 = check let + s = "3984af9b" + hex (fromHex s) == s +``` + +```ucm:hide +.scratch> add +``` + +The test shows that `hex (fromHex str) == str` as expected. + +```ucm +.scratch> test +``` + +## API overview + +Here's a few usage examples: + +```unison +ex1 = fromHex "2947db" + |> crypto.hashBytes Sha3_512 + |> hex + +ex2 = fromHex "02f3ab" + |> crypto.hashBytes Blake2b_256 + |> hex + +mysecret : Bytes +mysecret = fromHex "237be2" + +ex3 = fromHex "50d3ab" + |> crypto.hmacBytes Sha2_256 mysecret + |> hex + +> ex1 +> ex2 +> ex3 +``` + +And here's the full API: + +```ucm +.builtin.crypto> find +``` + +Note that the universal versions of `hash` and `hmac` are currently unimplemented and will bomb at runtime: + +``` +> crypto.hash Sha3_256 (fromHex "3849238492") +``` + +## Hashing tests + +Here are some test vectors (taken from [here](https://www.di-mgt.com.au/sha_testvectors.html) and [here](https://en.wikipedia.org/wiki/BLAKE_(hash_function))) for the various hashing algorithms: + +```unison:hide +ex alg input expected = check let + hashBytes alg (ascii input) == + fromHex expected + +test> sha3_512.tests.ex1 = + ex Sha3_512 + "abc" + "b751850b1a57168a5693cd924b6b096e08f621827444f70d884f5d0240d2712e10e116e9192af3c91a7ec57647e3934057340b4cf408d5a56592f8274eec53f0" + +test> sha3_512.tests.ex2 = + ex Sha3_512 + "" + "a69f73cca23a9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281dcd26" + +test> sha3_512.tests.ex3 = + ex Sha3_512 + "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" + "04a371e84ecfb5b8b77cb48610fca8182dd457ce6f326a0fd3d7ec2f1e91636dee691fbe0c985302ba1b0d8dc78c086346b533b49c030d99a27daf1139d6e75e" + +test> sha3_512.tests.ex4 = + ex Sha3_512 + "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu" + "afebb2ef542e6579c50cad06d2e578f9f8dd6881d7dc824d26360feebf18a4fa73e3261122948efcfd492e74e82e2189ed0fb440d187f382270cb455f21dd185" + +test> sha3_256.tests.ex1 = + ex Sha3_256 + "abc" + "3a985da74fe225b2045c172d6bd390bd855f086e3e9d525b46bfe24511431532" + +test> sha3_256.tests.ex2 = + ex Sha3_256 + "" + "a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a" + +test> sha3_256.tests.ex3 = + ex Sha3_256 + "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" + "41c0dba2a9d6240849100376a8235e2c82e1b9998a999e21db32dd97496d3376" + +test> sha3_256.tests.ex4 = + ex Sha3_256 + "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu" + "916f6061fe879741ca6469b43971dfdb28b1a32dc36cb3254e812be27aad1d18" + +test> sha2_512.tests.ex1 = + ex Sha2_512 + "abc" + "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f" + +test> sha2_512.tests.ex2 = + ex Sha2_512 + "" + "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e" + +test> sha2_512.tests.ex3 = + ex Sha2_512 + "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" + "204a8fc6dda82f0a0ced7beb8e08a41657c16ef468b228a8279be331a703c33596fd15c13b1b07f9aa1d3bea57789ca031ad85c7a71dd70354ec631238ca3445" + +test> sha2_512.tests.ex4 = + ex Sha2_512 + "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu" + "8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909" + +test> sha2_256.tests.ex1 = + ex Sha2_256 + "abc" + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad" + +test> sha2_256.tests.ex2 = + ex Sha2_256 + "" + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + +test> sha2_256.tests.ex3 = + ex Sha2_256 + "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" + "248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1" + +test> sha2_256.tests.ex4 = + ex Sha2_256 + "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu" + "cf5b16a778af8380036ce59e7b0492370b249b11e8f07a51afac45037afee9d1" + +test> blake2s_256.tests.ex1 = + ex Blake2s_256 + "" + "69217a3079908094e11121d042354a7c1f55b6482ca1a51e1b250dfd1ed0eef9" + +test> blake2b_512.tests.ex1 = + ex Blake2b_512 + "" + "786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce" + +test> blake2b_512.tests.ex2 = + ex Blake2b_512 + "The quick brown fox jumps over the lazy dog" + "a8add4bdddfd93e4877d2746e62817b116364a1fa7bc148d95090bc7333b3673f82401cf7aa2e4cb1ecd90296e3f14cb5413f8ed77be73045b13914cdcd6a918" + +test> blake2b_512.tests.ex3 = + ex Blake2b_512 + "The quick brown fox jumps over the lazy dof" + "ab6b007747d8068c02e25a6008db8a77c218d94f3b40d2291a7dc8a62090a744c082ea27af01521a102e42f480a31e9844053f456b4b41e8aa78bbe5c12957bb" +``` + +```ucm:hide +.scratch> add +``` + +```ucm +.scratch> test +``` + +## HMAC tests + +These test vectors are taken from [RFC 4231](https://tools.ietf.org/html/rfc4231#section-4.3). + +```unison +ex' alg secret msg expected = check let + hmacBytes alg (fromHex secret) (ascii msg) == + fromHex expected + +test> hmac_sha2_256.tests.ex1 = + ex' Sha2_256 + "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b" + "Hi There" + "b0344c61d8db38535ca8afceaf0bf12b881dc200c9833da726e9376c2e32cff7" + +test> hmac_sha2_512.tests.ex1 = + ex' Sha2_512 + "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b" + "Hi There" + "87aa7cdea5ef619d4ff0b4241a1d6cb02379f4e2ce4ec2787ad0b30545e17cdedaa833b7d6b8a702038b274eaea3f4e4be9d914eeb61f1702e696c203a126854" + +test> hmac_sha2_256.tests.ex2 = + ex' Sha2_256 + "4a656665" + "what do ya want for nothing?" + "5bdcc146bf60754e6a042426089575c75a003f089d2739839dec58b964ec3843" + +test> hmac_sha2_512.tests.ex2 = + ex' Sha2_512 + "4a656665" + "what do ya want for nothing?" + "164b7a7bfcf819e2e395fbe73b56e0a387bd64222e831fd610270cd7ea2505549758bf75c05a994a6d034f65f8f0e6fdcaeab1a34d4a6b4b636e070a38bce737" +``` + +```ucm:hide +.scratch> add +``` + +```ucm +.scratch> test +``` diff --git a/unison-src/new-runtime-transcripts/hashing.output.md b/unison-src/new-runtime-transcripts/hashing.output.md new file mode 100644 index 0000000000..14c35df8b7 --- /dev/null +++ b/unison-src/new-runtime-transcripts/hashing.output.md @@ -0,0 +1,418 @@ +# Hashing and HMAC builtins + +Unison has cryptographic builtins for hashing and computing [HMACs](https://en.wikipedia.org/wiki/HMAC) (hash-based message authentication codes). This transcript shows their usage and has some test cases. + +## Setup + +You can skip this section, which is just needed to make the transcript self-contained. In order to print out and test these hashes we will be using some builtins for base16 (aka hexidecimal) encoding and decoding. + +```ucm +.builtin> ls Bytes + + 1. ++ (Bytes -> Bytes -> Bytes) + 2. at (Nat -> Bytes -> Optional Nat) + 3. drop (Nat -> Bytes -> Bytes) + 4. empty (Bytes) + 5. flatten (Bytes -> Bytes) + 6. fromBase16 (Bytes -> Either Text Bytes) + 7. fromBase32 (Bytes -> Either Text Bytes) + 8. fromBase64 (Bytes -> Either Text Bytes) + 9. fromBase64UrlUnpadded (Bytes -> Either Text Bytes) + 10. fromList ([Nat] -> Bytes) + 11. size (Bytes -> Nat) + 12. take (Nat -> Bytes -> Bytes) + 13. toBase16 (Bytes -> Bytes) + 14. toBase32 (Bytes -> Bytes) + 15. toBase64 (Bytes -> Bytes) + 16. toBase64UrlUnpadded (Bytes -> Bytes) + 17. toList (Bytes -> [Nat]) + +``` +Notice the `fromBase16` and `toBase16` functions. Here's some (somewhat inefficient) convenience functions for converting `Bytes` to and from base-16 `Text`. These could be replaced by use of `Text.toUtf8` and `Text.tryFromUtf8` once those builtins exist: + +```unison +a |> f = f a + +List.map f as = + go acc = cases + [] -> acc + (h +: t) -> go (acc :+ f h) t + go [] as + +-- not very efficient, but okay for testing +hex : Bytes -> Text +hex b = + Bytes.toBase16 b + |> Bytes.toList + |> List.map Char.fromNat + |> Text.fromCharList + +ascii : Text -> Bytes +ascii t = Text.toCharList t |> List.map Char.toNat |> Bytes.fromList + +fromHex : Text -> Bytes +fromHex txt = + match Text.toCharList txt + |> List.map Char.toNat + |> Bytes.fromList + |> Bytes.fromBase16 + with + Left e -> bug e + Right bs -> bs + +check : Boolean -> [Result] +check b = if b then [Result.Ok "Passed."] + else [Result.Fail "Failed."] + +test> hex.tests.ex1 = check let + s = "3984af9b" + hex (fromHex s) == s +``` + +The test shows that `hex (fromHex str) == str` as expected. + +```ucm +.scratch> test + + Cached test results (`help testcache` to learn more) + + ◉ hex.tests.ex1 Passed. + + ✅ 1 test(s) passing + + Tip: Use view hex.tests.ex1 to view the source of a test. + +``` +## API overview + +Here's a few usage examples: + +```unison +ex1 = fromHex "2947db" + |> crypto.hashBytes Sha3_512 + |> hex + +ex2 = fromHex "02f3ab" + |> crypto.hashBytes Blake2b_256 + |> hex + +mysecret : Bytes +mysecret = fromHex "237be2" + +ex3 = fromHex "50d3ab" + |> crypto.hmacBytes Sha2_256 mysecret + |> hex + +> ex1 +> ex2 +> ex3 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ex1 : Text + ex2 : Text + ex3 : Text + mysecret : Bytes + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 16 | > ex1 + ⧩ + "f3c342040674c50ab45cb1874b6dbc81447af5958201ed4127e03b56725664d7cc44b88b9afadb371898fcaf5d0adeff60837ef93b514f99da43539d79820c99" + + 17 | > ex2 + ⧩ + "84bb437497f26fc33c51e57e64c37958c3918d50dfe75b91c661a85c2f8f8304" + + 18 | > ex3 + ⧩ + "c692fc54df921f7fa51aad9178327c5a097784b02212d571fb40facdfff881fd" + +``` +And here's the full API: + +```ucm +.builtin.crypto> find + + 1. builtin type HashAlgorithm + 2. HashAlgorithm.Blake2b_256 : HashAlgorithm + 3. HashAlgorithm.Blake2b_512 : HashAlgorithm + 4. HashAlgorithm.Blake2s_256 : HashAlgorithm + 5. HashAlgorithm.Sha2_256 : HashAlgorithm + 6. HashAlgorithm.Sha2_512 : HashAlgorithm + 7. HashAlgorithm.Sha3_256 : HashAlgorithm + 8. HashAlgorithm.Sha3_512 : HashAlgorithm + 9. hash : HashAlgorithm -> a -> Bytes + 10. hashBytes : HashAlgorithm -> Bytes -> Bytes + 11. hmac : HashAlgorithm -> Bytes -> a -> Bytes + 12. hmacBytes : HashAlgorithm -> Bytes -> Bytes -> Bytes + + +``` +Note that the universal versions of `hash` and `hmac` are currently unimplemented and will bomb at runtime: + +``` +> crypto.hash Sha3_256 (fromHex "3849238492") + +``` + +## Hashing tests + +Here are some test vectors (taken from [here](https://www.di-mgt.com.au/sha_testvectors.html) and [here](https://en.wikipedia.org/wiki/BLAKE_(hash_function))) for the various hashing algorithms: + +```unison +ex alg input expected = check let + hashBytes alg (ascii input) == + fromHex expected + +test> sha3_512.tests.ex1 = + ex Sha3_512 + "abc" + "b751850b1a57168a5693cd924b6b096e08f621827444f70d884f5d0240d2712e10e116e9192af3c91a7ec57647e3934057340b4cf408d5a56592f8274eec53f0" + +test> sha3_512.tests.ex2 = + ex Sha3_512 + "" + "a69f73cca23a9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281dcd26" + +test> sha3_512.tests.ex3 = + ex Sha3_512 + "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" + "04a371e84ecfb5b8b77cb48610fca8182dd457ce6f326a0fd3d7ec2f1e91636dee691fbe0c985302ba1b0d8dc78c086346b533b49c030d99a27daf1139d6e75e" + +test> sha3_512.tests.ex4 = + ex Sha3_512 + "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu" + "afebb2ef542e6579c50cad06d2e578f9f8dd6881d7dc824d26360feebf18a4fa73e3261122948efcfd492e74e82e2189ed0fb440d187f382270cb455f21dd185" + +test> sha3_256.tests.ex1 = + ex Sha3_256 + "abc" + "3a985da74fe225b2045c172d6bd390bd855f086e3e9d525b46bfe24511431532" + +test> sha3_256.tests.ex2 = + ex Sha3_256 + "" + "a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a" + +test> sha3_256.tests.ex3 = + ex Sha3_256 + "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" + "41c0dba2a9d6240849100376a8235e2c82e1b9998a999e21db32dd97496d3376" + +test> sha3_256.tests.ex4 = + ex Sha3_256 + "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu" + "916f6061fe879741ca6469b43971dfdb28b1a32dc36cb3254e812be27aad1d18" + +test> sha2_512.tests.ex1 = + ex Sha2_512 + "abc" + "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f" + +test> sha2_512.tests.ex2 = + ex Sha2_512 + "" + "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e" + +test> sha2_512.tests.ex3 = + ex Sha2_512 + "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" + "204a8fc6dda82f0a0ced7beb8e08a41657c16ef468b228a8279be331a703c33596fd15c13b1b07f9aa1d3bea57789ca031ad85c7a71dd70354ec631238ca3445" + +test> sha2_512.tests.ex4 = + ex Sha2_512 + "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu" + "8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909" + +test> sha2_256.tests.ex1 = + ex Sha2_256 + "abc" + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad" + +test> sha2_256.tests.ex2 = + ex Sha2_256 + "" + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + +test> sha2_256.tests.ex3 = + ex Sha2_256 + "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" + "248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1" + +test> sha2_256.tests.ex4 = + ex Sha2_256 + "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu" + "cf5b16a778af8380036ce59e7b0492370b249b11e8f07a51afac45037afee9d1" + +test> blake2s_256.tests.ex1 = + ex Blake2s_256 + "" + "69217a3079908094e11121d042354a7c1f55b6482ca1a51e1b250dfd1ed0eef9" + +test> blake2b_512.tests.ex1 = + ex Blake2b_512 + "" + "786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce" + +test> blake2b_512.tests.ex2 = + ex Blake2b_512 + "The quick brown fox jumps over the lazy dog" + "a8add4bdddfd93e4877d2746e62817b116364a1fa7bc148d95090bc7333b3673f82401cf7aa2e4cb1ecd90296e3f14cb5413f8ed77be73045b13914cdcd6a918" + +test> blake2b_512.tests.ex3 = + ex Blake2b_512 + "The quick brown fox jumps over the lazy dof" + "ab6b007747d8068c02e25a6008db8a77c218d94f3b40d2291a7dc8a62090a744c082ea27af01521a102e42f480a31e9844053f456b4b41e8aa78bbe5c12957bb" +``` + +```ucm +.scratch> test + + Cached test results (`help testcache` to learn more) + + ◉ blake2s_256.tests.ex1 Passed. + ◉ sha2_256.tests.ex2 Passed. + ◉ hex.tests.ex1 Passed. + ◉ sha2_256.tests.ex3 Passed. + ◉ sha3_512.tests.ex2 Passed. + ◉ blake2b_512.tests.ex1 Passed. + ◉ sha3_512.tests.ex1 Passed. + ◉ sha3_256.tests.ex2 Passed. + ◉ sha3_512.tests.ex3 Passed. + ◉ sha2_512.tests.ex1 Passed. + ◉ sha2_256.tests.ex4 Passed. + ◉ blake2b_512.tests.ex3 Passed. + ◉ sha3_256.tests.ex1 Passed. + ◉ sha2_512.tests.ex4 Passed. + ◉ sha3_256.tests.ex4 Passed. + ◉ sha3_256.tests.ex3 Passed. + ◉ sha3_512.tests.ex4 Passed. + ◉ sha2_256.tests.ex1 Passed. + ◉ sha2_512.tests.ex3 Passed. + ◉ blake2b_512.tests.ex2 Passed. + ◉ sha2_512.tests.ex2 Passed. + + ✅ 21 test(s) passing + + Tip: Use view blake2s_256.tests.ex1 to view the source of a + test. + +``` +## HMAC tests + +These test vectors are taken from [RFC 4231](https://tools.ietf.org/html/rfc4231#section-4.3). + +```unison +ex' alg secret msg expected = check let + hmacBytes alg (fromHex secret) (ascii msg) == + fromHex expected + +test> hmac_sha2_256.tests.ex1 = + ex' Sha2_256 + "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b" + "Hi There" + "b0344c61d8db38535ca8afceaf0bf12b881dc200c9833da726e9376c2e32cff7" + +test> hmac_sha2_512.tests.ex1 = + ex' Sha2_512 + "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b" + "Hi There" + "87aa7cdea5ef619d4ff0b4241a1d6cb02379f4e2ce4ec2787ad0b30545e17cdedaa833b7d6b8a702038b274eaea3f4e4be9d914eeb61f1702e696c203a126854" + +test> hmac_sha2_256.tests.ex2 = + ex' Sha2_256 + "4a656665" + "what do ya want for nothing?" + "5bdcc146bf60754e6a042426089575c75a003f089d2739839dec58b964ec3843" + +test> hmac_sha2_512.tests.ex2 = + ex' Sha2_512 + "4a656665" + "what do ya want for nothing?" + "164b7a7bfcf819e2e395fbe73b56e0a387bd64222e831fd610270cd7ea2505549758bf75c05a994a6d034f65f8f0e6fdcaeab1a34d4a6b4b636e070a38bce737" +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ex' : HashAlgorithm + -> Text + -> Text + -> Text + -> [Result] + hmac_sha2_256.tests.ex1 : [Result] + hmac_sha2_256.tests.ex2 : [Result] + hmac_sha2_512.tests.ex1 : [Result] + hmac_sha2_512.tests.ex2 : [Result] + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 6 | ex' Sha2_256 + + ✅ Passed Passed. + + 12 | ex' Sha2_512 + + ✅ Passed Passed. + + 18 | ex' Sha2_256 + + ✅ Passed Passed. + + 24 | ex' Sha2_512 + + ✅ Passed Passed. + +``` +```ucm +.scratch> test + + Cached test results (`help testcache` to learn more) + + ◉ blake2s_256.tests.ex1 Passed. + ◉ sha2_256.tests.ex2 Passed. + ◉ hex.tests.ex1 Passed. + ◉ hmac_sha2_256.tests.ex1 Passed. + ◉ sha2_256.tests.ex3 Passed. + ◉ sha3_512.tests.ex2 Passed. + ◉ blake2b_512.tests.ex1 Passed. + ◉ sha3_512.tests.ex1 Passed. + ◉ sha3_256.tests.ex2 Passed. + ◉ sha3_512.tests.ex3 Passed. + ◉ sha2_512.tests.ex1 Passed. + ◉ sha2_256.tests.ex4 Passed. + ◉ blake2b_512.tests.ex3 Passed. + ◉ sha3_256.tests.ex1 Passed. + ◉ hmac_sha2_512.tests.ex1 Passed. + ◉ sha2_512.tests.ex4 Passed. + ◉ hmac_sha2_256.tests.ex2 Passed. + ◉ sha3_256.tests.ex4 Passed. + ◉ sha3_256.tests.ex3 Passed. + ◉ sha3_512.tests.ex4 Passed. + ◉ hmac_sha2_512.tests.ex2 Passed. + ◉ sha2_256.tests.ex1 Passed. + ◉ sha2_512.tests.ex3 Passed. + ◉ blake2b_512.tests.ex2 Passed. + ◉ sha2_512.tests.ex2 Passed. + + ✅ 25 test(s) passing + + Tip: Use view blake2s_256.tests.ex1 to view the source of a + test. + +``` diff --git a/unison-src/parser-tests/GenerateErrors.hs b/unison-src/parser-tests/GenerateErrors.hs new file mode 100644 index 0000000000..795f17e693 --- /dev/null +++ b/unison-src/parser-tests/GenerateErrors.hs @@ -0,0 +1,48 @@ +{- For every file foo.u in the current directory write the parse error to foo.message.txt -} +module GenerateErrors where +import qualified Data.Text as Text +import Data.Text.IO ( readFile ) +import Prelude hiding ( readFile ) +import System.Directory ( listDirectory, getCurrentDirectory ) +import System.FilePath ( takeExtension, dropExtension ) +import System.IO ( putStrLn ) +import qualified Unison.Builtin as B +import Unison.Parser ( Err ) +import qualified Unison.Parsers as P +import Unison.PrintError ( prettyParseError ) +import Unison.Symbol ( Symbol ) +import qualified Unison.Util.ColorText as Color +import Unison.Var ( Var ) + + +unisonFilesInDir :: FilePath -> IO [String] +unisonFilesInDir p = do + files <- listDirectory p + pure $ filter ((==) ".u" . takeExtension) files + +unisonFilesInCurrDir :: IO [String] +unisonFilesInCurrDir = getCurrentDirectory >>= unisonFilesInDir + +errorFileName :: String -> String +errorFileName n = dropExtension n ++ ".message.txt" + +emitAsPlainTextTo :: Var v => String -> Err v -> FilePath -> IO () +emitAsPlainTextTo src e f = writeFile f plainErr + where plainErr = Color.toPlain $ prettyParseError src e + +printError :: Var v => String -> Err v -> IO () +printError src e = putStrLn $ B.showParseError src e + +processFile :: FilePath -> IO () +processFile f = do + content <- Text.unpack <$> readFile f + let res = P.parseFile f content B.names + case res of + Left err -> do + emitAsPlainTextTo content (err :: Err Symbol) (errorFileName f) + printError content err + Right _ -> putStrLn $ + "Error: " ++ f ++ " parses successfully but none of the files in this directory should parse" + +main :: IO () +main = unisonFilesInCurrDir >>= mapM_ processFile diff --git a/unison-src/parser-tests/empty-match-list.message.txt b/unison-src/parser-tests/empty-match-list.message.txt new file mode 100644 index 0000000000..d1395d8fed --- /dev/null +++ b/unison-src/parser-tests/empty-match-list.message.txt @@ -0,0 +1,3 @@ +empty-match-list.u:3:5: +unexpected = + 3 | bar = 3 diff --git a/unison-src/parser-tests/empty-match-list.u b/unison-src/parser-tests/empty-match-list.u new file mode 100644 index 0000000000..bb5224c2fb --- /dev/null +++ b/unison-src/parser-tests/empty-match-list.u @@ -0,0 +1,3 @@ +foo n = match n with + +bar = 3 diff --git a/unison-src/parser-tests/if-without-condition.message.txt b/unison-src/parser-tests/if-without-condition.message.txt new file mode 100644 index 0000000000..6b3ef1a0ef --- /dev/null +++ b/unison-src/parser-tests/if-without-condition.message.txt @@ -0,0 +1,3 @@ +if-without-condition.u:1:10: +unexpected then + 1 | foo = if then 4 else 8 diff --git a/unison-src/parser-tests/if-without-condition.u b/unison-src/parser-tests/if-without-condition.u new file mode 100644 index 0000000000..15ca6f8d26 --- /dev/null +++ b/unison-src/parser-tests/if-without-condition.u @@ -0,0 +1 @@ +foo = if then 4 else 8 diff --git a/unison-src/remote-api.u b/unison-src/remote-api.u new file mode 100644 index 0000000000..ac29fba16c --- /dev/null +++ b/unison-src/remote-api.u @@ -0,0 +1,95 @@ +type Either a b = Left a | Right b +type Status = Running | Finished | Canceled | Error Error +type Duration = Seconds Nat +-- type Abilities e = Abilities {e} + +ability Remote loc where + fork : loc {e} + -> '{e} a + -> {Remote loc} Future loc a + +forkRegistered : (Future loc a -> {e2} ()) -> loc {e} -> '{e} a + -> {Remote loc, e2} Future loc a +forkRegistered register loc t = + future = Remote.fork loc t + register future + Future.begin future + future + + +ability Error e where error : e ->{Error e} () + +type Future loc a = Future + ('{Remote loc} () -- begin + ,'{Remote loc} () -- cancel + ,'{Remote loc} Status -- status + ,'{Remote loc, Error Future.Error} a -- join + ) +type Future.Error = UnknownFuture | UnreachableLocation | UnresponsiveLocation | Terminated | AbilityCheckFailure + +-- Ability.check : Abilities {a} -> Request {b} x -> Boolean +-- Ability.check = _ + +-- Remote.server : (loc {e} -> {e} a) -> {e} a +-- Remote.server computation = + +Future.join : Future loc a ->{Remote loc, Error Future.Error} a +Future.join = cases Future.Future (b, c, s, j) -> !j + +Future.cancel : Future loc a ->{Remote loc} () +Future.cancel = cases Future.Future (b, c, s, j) -> !c + +Future.status : Future loc a ->{Remote loc} Status +Future.status = cases Future.Future (b, c, s, j) -> !s + +Future.begin : Future loc a ->{Remote loc} () +Future.begin = cases Future.Future (b, c, s, j) -> !b + + +type UnitLoc e = UnitLoc + +-- Remote.runSequential : '{Remote UnitLoc, Error e} a -> Either e a +-- Remote.runSequential r = +-- step : Request {Remote UnitLoc} a -> a +-- step = cases +-- {a} -> a +-- {Remote.fork loc t -> k} -> +-- join = Right !t +-- cancel = () +-- status = Finished +-- keepalive d = () +-- handle k (Future ('join, 'cancel, 'status, keepalive)) with step +-- err : Request {Error e} a -> Either e a +-- err = cases +-- {a} -> Right a +-- {Error.error t -> k} ->handle k (Left t) with err +-- handle handle !r with step with err + +-- > Remote.runSequential + +-- use Optional Some None +-- use Either Left Right +-- Either.join : Either a (Either a b) -> Either a b +-- Either.join = cases +-- Left a -> Left a +-- Right e -> e +-- +-- parMergeSort : (a -> a -> Boolean) -> [a] ->{Remote UnitLoc, Error} [a] +-- parMergeSort (<) as = +-- -- merge : [a] -> [a] -> [a] -> [a] +-- merge z l r = +-- l0 = at 0 l +-- r0 = at 0 r +-- match (l0, r0) with +-- (None, _) -> z ++ r +-- (_, None) -> z ++ l +-- (Some l0, Some r0) -> +-- if l0 < r0 +-- then merge (z `snoc` l0) (drop 1 l) r +-- else merge (z `snoc` r0) l (drop 1 r) +-- split = size as / 2 +-- if split == 0 then as +-- else +-- fl = Remote.fork UnitLoc '(parMergeSort (<) (take split as)) +-- fr = Remote.fork UnitLoc '(parMergeSort (<) (drop split as)) +-- merge [] (Future.join fl) (Future.join fr) diff --git a/unison-src/remote.u b/unison-src/remote.u new file mode 100644 index 0000000000..08bfe7d1dc --- /dev/null +++ b/unison-src/remote.u @@ -0,0 +1,67 @@ + +-- A simple distributed computation ability +ability Remote where + + -- Spawn a new node + spawn : {Remote} Node + + -- Sequentially evaluate the given thunk on another node + -- then return to the current node when it completes + at : n -> '{Remote} a -> {Remote} a + + -- Start a computation running, returning an `r` that can be forced to + -- await the result of the computation + fork : '{Remote} a -> {Remote} ('{Remote} a) + +type Node = Node Nat -- more realistic would be perhaps a (Hostname, PublicKey) pair + +replicate : Nat -> a -> [a] +replicate n a = toSequence (take n (constant a)) + +-- here's a simple usage of it - this ships the program `replicate n a` +-- to another node and evaluates it there before returning to the current node + +ex1 : Nat -> a -> {Remote} [a] +ex1 n a = + node = Remote.spawn -- conjures up a new node! + Remote.at node '(replicate n a) -- and transports a computation to it! + +-- Let's test out this beast! do we need to deploy our code to some EC2 instances?? +-- Gak, no not yet, we just want to test locally, let's write a handler +-- for the `Remote` ability that simulates everything locally! + +Remote.runLocal : '{Remote} a -> a +Remote.runLocal r = + step nid = cases + {a} -> a + {Remote.fork t -> k} -> handle k t with step nid + {Remote.spawn -> k} -> handle k nid with step (Node.increment nid) + {Remote.at _ t -> k} -> handle k !t with step nid + handle !r with step (Node.Node 0) + +-- Q: where do these nodes come from? that depends on the handler - +-- you might have a handler like this, or a handler backed by an autoscaling EC2 pool... + +-- easy peasy, let's give it a go... + +> Remote.runLocal '(ex1 10 "hi") + +-- let's do some stuff in parallel on multiple nodes + +ex2 n = + -- spin up two remote computations on fresh nodes, in parallel, then combine their results + r1 = Remote.forkAt Remote.spawn '(replicate n "hi") -- returns a 'future' + r2 = Remote.forkAt Remote.spawn '(replicate n "there") + !r1 ++ !r2 + +> Remote.runLocal '(ex2 5) + +-- little helper functions used above + +Remote.forkAt : Node -> '{Remote} a -> {Remote} ('{Remote} a) +Remote.forkAt node r = Remote.fork '(Remote.at node r) + +Node.increment : Node -> Node +Node.increment n = + use Node.Node -- the constructor + match n with Node n -> Node (n + 1) diff --git a/unison-src/sheepshead.u b/unison-src/sheepshead.u new file mode 100644 index 0000000000..d0f0f8d90e --- /dev/null +++ b/unison-src/sheepshead.u @@ -0,0 +1,39 @@ +type Suit = Club | Spade | Heart | Diamond +type Card = Card Rank Suit +type Rank = A | K | Q | J | _10 | _9 | _8 | _7 +type NonEmpty a = NonEmpty a [a] + +use Rank A K Q J _10 _9 _8 _7 +use Suit Club Spade Heart Diamond +use NonEmpty NonEmpty +use Optional Some None + +namespace Suit where + all = [Club, Spade, Heart, Diamond] + +namespace Rank where + all = [A, _10, K, Q, J, _9, _8, _7] + points = cases + A -> 11 + _10 -> 10 + K -> 4 + Q -> 3 + J -> 2 + _ -> 0 + toText = cases + A -> "A" + K -> "K" + Q -> "Q" + J -> "J" + _10 -> "10" + _9 -> "9" + _8 -> "8" + _7 -> "7" + +namespace NonEmpty where + toList = cases + NonEmpty h t -> Sequence.cons h t + fromList : [a] -> Optional (NonEmpty a) + fromList l = match Sequence.at 0 l with + None -> None + Some a -> Some (NonEmpty a (Sequence.drop 1 l)) diff --git a/unison-src/tests/324.u b/unison-src/tests/324.u new file mode 100644 index 0000000000..e7e5c45417 --- /dev/null +++ b/unison-src/tests/324.u @@ -0,0 +1,7 @@ +foo a b = + if a Text.== "" then + match Text.size b with + 1 -> false + _ -> true + else + true diff --git a/unison-src/tests/344.uu b/unison-src/tests/344.uu new file mode 100644 index 0000000000..6749329c28 --- /dev/null +++ b/unison-src/tests/344.uu @@ -0,0 +1,5 @@ +ability Either a b where + left : a -> {Either a b} () + right : b -> {Either a b} () + +type Either a b = Left a | Right b diff --git a/unison-src/tests/514.u b/unison-src/tests/514.u new file mode 100644 index 0000000000..4177359481 --- /dev/null +++ b/unison-src/tests/514.u @@ -0,0 +1,13 @@ + +-- all these can be added + +idNat : Nat -> Nat +idNat x = x + +idInt : Int -> Int +idInt x = x + +idPoly x = x + +idPoly2 : x -> x +idPoly2 y = y diff --git a/unison-src/tests/595.u b/unison-src/tests/595.u new file mode 100644 index 0000000000..b6383b6b58 --- /dev/null +++ b/unison-src/tests/595.u @@ -0,0 +1,13 @@ + +type Any = Any (∀ r . (∀ a . a -> r) -> r) + +-- also typechecks as expected +any : a -> Any +any a = Any.Any (k -> k a) + +--- +This typechecks fine, as expected, but try to `add` to codebase, get: + +unison: unknown var in environment: "r" environment = [Right User "a"] +CallStack (from HasCallStack): + error, called at src/Unison/ABT.hs:632:19 in unison-parser-typechecker-0.1-JxZSVhIPWTr4SazQ0mw03q:Unison.ABT diff --git a/unison-src/tests/868.u b/unison-src/tests/868.u new file mode 100644 index 0000000000..21cef2773a --- /dev/null +++ b/unison-src/tests/868.u @@ -0,0 +1,8 @@ +type Choice = First | Second +type Wrapper = Wrapper Choice + +broken = match Wrapper.Wrapper Choice.Second with + Wrapper.Wrapper Choice.First -> true + _ -> false + +> broken diff --git a/unison-src/tests/868.ur b/unison-src/tests/868.ur new file mode 100644 index 0000000000..c508d5366f --- /dev/null +++ b/unison-src/tests/868.ur @@ -0,0 +1 @@ +false diff --git a/unison-src/tests/a-tale-of-two-optionals.u b/unison-src/tests/a-tale-of-two-optionals.u new file mode 100644 index 0000000000..d91fafa6e6 --- /dev/null +++ b/unison-src/tests/a-tale-of-two-optionals.u @@ -0,0 +1,13 @@ +type Optional a = None | Some a + +Optional.isEmpty : Optional a -> Boolean +Optional.isEmpty = cases + Optional.None -> true + Optional.Some _ -> false + +increment x = x + 1 + +(|>) : forall a b . a -> (a -> b) -> b +a |> f = f a + +> Optional.Some 4 diff --git a/unison-src/tests/ability-inference-fail.uu b/unison-src/tests/ability-inference-fail.uu new file mode 100644 index 0000000000..e0dfbf2d7f --- /dev/null +++ b/unison-src/tests/ability-inference-fail.uu @@ -0,0 +1,35 @@ +ability Emit a where + emit : a ->{Emit a} () + +type Stream a = Stream ('{Emit a} ()) + +use Stream Stream +use Optional None Some + +namespace Stream where + + unfold : s -> (s -> Optional (a, s)) -> Stream a + unfold s f = Stream 'let + -- step : (s -> Optional (a,s)) -> s ->{Emit a} () + step f s = match f s with + None -> () + Some (a, s) -> emit a + step f s + step f s + +--- + +I found a value of type a where I expected to find one of type a: + + 11 | unfold : s -> (s -> Optional (a, s)) -> Stream a + 12 | unfold s f = Stream 'let + 13 | -- step : (s -> Optional (a,s)) -> s ->{Emit a} () + 14 | step f s = match f s with + 15 | None -> () + 16 | Some (a, s) -> emit a + 17 | step f s + 18 | step f s + + from right here: + + 4 | type Stream a = Stream ('{Emit a} ()) diff --git a/unison-src/tests/ability-keyword.u b/unison-src/tests/ability-keyword.u new file mode 100644 index 0000000000..afe11e7a94 --- /dev/null +++ b/unison-src/tests/ability-keyword.u @@ -0,0 +1,7 @@ + +ability Foo where + foo : {Foo} Text + +x = 'let + y = Foo.foo + () diff --git a/unison-src/tests/abort.u b/unison-src/tests/abort.u new file mode 100644 index 0000000000..f5649ac457 --- /dev/null +++ b/unison-src/tests/abort.u @@ -0,0 +1,13 @@ +--Abort +ability Abort where + Abort : forall a . () -> {Abort} a +eff : forall a b . (a -> b) -> b -> Request Abort a -> b +eff f z = cases + { Abort.Abort _ -> k } -> z + { a } -> f a +-- heff : Nat +heff = handle Abort.Abort () with eff (x -> x Nat.+ 2) 1 +hudy : Nat +hudy = handle 42 with eff (x -> x Nat.+ 2) 1 +bork : () -> {Abort} Nat +bork = u -> 1 Nat.+ (Abort.Abort ()) diff --git a/unison-src/tests/ask-inferred.u b/unison-src/tests/ask-inferred.u new file mode 100644 index 0000000000..266eb12e2c --- /dev/null +++ b/unison-src/tests/ask-inferred.u @@ -0,0 +1,23 @@ +--Ask inferred + +ability Ask a where + ask : {Ask a} a + +ability AskU where + ask : {AskU} Nat + +use Nat + + +ability AskT where + ask : {AskT} Text + +x = '(Ask.ask + 1) +x2 = '(Ask.ask + AskU.ask) + +x3 = '(Ask.ask + AskU.ask + size AskT.ask) + +y : '{Ask Nat} Nat +y = '(!x) + +y2 : '{Ask Nat, AskU} Nat +y2 = x2 diff --git a/unison-src/tests/boolean-ops-in-sequence.u b/unison-src/tests/boolean-ops-in-sequence.u new file mode 100644 index 0000000000..afc21f3477 --- /dev/null +++ b/unison-src/tests/boolean-ops-in-sequence.u @@ -0,0 +1 @@ +test = true || false && true diff --git a/unison-src/tests/builtin-arity-0-evaluation.u b/unison-src/tests/builtin-arity-0-evaluation.u new file mode 100644 index 0000000000..c857b3c259 --- /dev/null +++ b/unison-src/tests/builtin-arity-0-evaluation.u @@ -0,0 +1,3 @@ +use Universal == + +> Text.empty == "" diff --git a/unison-src/tests/builtin-arity-0-evaluation.ur b/unison-src/tests/builtin-arity-0-evaluation.ur new file mode 100644 index 0000000000..27ba77ddaf --- /dev/null +++ b/unison-src/tests/builtin-arity-0-evaluation.ur @@ -0,0 +1 @@ +true diff --git a/unison-src/tests/caseguard.u b/unison-src/tests/caseguard.u new file mode 100644 index 0000000000..16e949af5e --- /dev/null +++ b/unison-src/tests/caseguard.u @@ -0,0 +1,15 @@ +-- Used to fail on the guard +-- +-- typechecker.tests/caseguard.u FAILURE I'm not sure what x means at line 3, columns 9-10 +-- +-- 3 | x | x == "woot" -> false +-- +-- Whatever it is, it has a type that conforms to Text. + +use Universal == + +f = cases + x | x == "woot" -> false + y | y == "foo" -> true + +-- > f "woot" diff --git a/unison-src/tests/cce.u b/unison-src/tests/cce.u new file mode 100644 index 0000000000..bd67157035 --- /dev/null +++ b/unison-src/tests/cce.u @@ -0,0 +1,116 @@ +use Universal < + +type Future a = Future ('{Remote} a) + +-- A simple distributed computation ability +ability Remote where + + -- Spawn a new node + spawn : {Remote} Node + + -- Sequentially evaluate the given thunk on another node + -- then return to the current node when it completes + at : n -> '{Remote} a -> {Remote} a + + -- Start a computation running, returning an `r` that can be forced to + -- await the result of the computation + fork : '{Remote} a ->{Remote} Future a + +type Node = Node Nat -- more realistic would be perhaps a (Hostname, PublicKey) pair + +force : Future a ->{Remote} a +force = cases Future.Future r -> !r + +Future.fromThunk : '{Remote} a -> Future a +Future.fromThunk = Future.Future + +-- Let's test out this beast! do we need to deploy our code to some EC2 instances?? +-- Gak, no not yet, we just want to test locally, let's write a handler +-- for the `Remote` ability that simulates everything locally! + +Remote.runLocal : '{Remote} a -> a +Remote.runLocal r = + step nid = cases + {a} -> a + {Remote.fork t -> k} -> handle k (Future.fromThunk t) with step nid + {Remote.spawn -> k} -> handle k nid with step (Node.increment nid) + {Remote.at _ t -> k} -> handle k !t with step nid + handle !r with step (Node.Node 0) + +Remote.forkAt : Node -> '{Remote} a ->{Remote} (Future a) +Remote.forkAt node r = Remote.fork '(Remote.at node r) + +use Optional None Some +use Monoid Monoid +use List ++ + +List.map : (a ->{e} b) -> [a] ->{e} [b] +List.map f as = + go f acc as i = match List.at i as with + None -> acc + Some a -> go f (acc `snoc` f a) as (i + 1) + go f [] as 0 + +type Monoid a = Monoid (a -> a -> a) a + +Monoid.zero = cases Monoid.Monoid op z -> z +Monoid.op = cases Monoid.Monoid op z -> op + +Monoid.orElse m = cases + None -> Monoid.zero m + Some a -> a + +merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] +merge lte a b = + go acc a b = match List.at 0 a with + None -> acc ++ b + Some hd1 -> match at 0 b with + None -> acc ++ a + Some hd2 -> + if hd1 `lte` hd2 then go (acc `snoc` hd1) (drop 1 a) b + else go (acc `snoc` hd2) a (drop 1 b) + go [] a b + +dmap : (a ->{Remote} b) -> [a] ->{Remote} [b] +dmap f as = + bs = List.map (a -> Remote.forkAt Remote.spawn '(f a)) as + List.map force bs + +dreduce : Monoid a -> [a] ->{Remote} a +dreduce m a = + if size a < 2 then Monoid.orElse m (List.at 0 a) + else + l = Remote.forkAt Remote.spawn '(dreduce m (List.take (size a / 2) a)) + r = Remote.forkAt Remote.spawn '(dreduce m (List.drop (size a / 2) a)) + Monoid.op m (force l) (force r) + +dmapReduce : (a ->{Remote} b) -> Monoid b -> [a] ->{Remote} b +dmapReduce f m as = dreduce m (dmap f as) + +dsort : (a -> a -> Boolean) -> [a] ->{Remote} [a] +dsort lte a = + dmapReduce (a -> [a]) (Monoid (merge lte) []) a + +sort : (a -> a -> Boolean) -> [a] -> [a] +sort lte a = + if List.size a < 2 then a + else + l = sort lte (take (size a / 2) a) + r = sort lte (drop (size a / 2) a) + merge lte l r + +Node.increment : Node -> Node +Node.increment n = + use Node Node -- the constructor + match n with Node n -> Node (n + 1) + +> Remote.runLocal '(dsort (<) [3,2,1,1,2,3,9182,1,2,34,1,23]) + +---- + +java.lang.ClassCastException: org.unisonweb.UnboxedType$Nat$ cannot be cast to org.unisonweb.Value$Lambda + at org.unisonweb.compilation.package$.org$unisonweb$compilation$package$$$anonfun$dynamicCall$1(compilation.scala:747) + at org.unisonweb.compilation.package$$anonfun$dynamicCall$2.apply(compilation.scala:714) + at org.unisonweb.compilation.package$.org$unisonweb$compilation$package$$$anonfun$compileMatchCase$3(compilation.scala:332) + at org.unisonweb.compilation.package$$anonfun$compileMatchCase$6.apply(compilation.scala:324) + at org.unisonweb.compilation.package$.org$unisonweb$compilation$package$$$anonfun$compile$12(compilation.scala:1070) diff --git a/unison-src/tests/cce.ur b/unison-src/tests/cce.ur new file mode 100644 index 0000000000..f168e469e2 --- /dev/null +++ b/unison-src/tests/cce.ur @@ -0,0 +1 @@ +[1, 1, 1, 1, 2, 2, 2, 3, 3, 23, 34, 9182] diff --git a/unison-src/tests/compose-inference.u b/unison-src/tests/compose-inference.u new file mode 100644 index 0000000000..984abc6467 --- /dev/null +++ b/unison-src/tests/compose-inference.u @@ -0,0 +1,4 @@ + +f `compose` g = x -> f (g x) + +> compose diff --git a/unison-src/tests/console.u b/unison-src/tests/console.u new file mode 100644 index 0000000000..881c2ed157 --- /dev/null +++ b/unison-src/tests/console.u @@ -0,0 +1,52 @@ +ability State s where + get : {State s} s + set : s -> {State s} () + +ability Console where + read : {Console} (Optional Text) + write : Text -> {Console} () + +fst = cases Tuple.Cons a _ -> a + +--TODO type is wrongly being inferred (or at least displayed) as `Tuple a (Tuple a b) ->{} a` +snd = cases Tuple.Cons _ (Tuple.Cons b _) -> b + +state : s -> Request (State s) a -> a +state s = cases + {State.get -> k} -> handle k s with state s + {State.set s' -> k} -> handle k () with state s' + {a} -> a + +simulate : Request Console d -> {State ([Text], [Text])} d +simulate = cases + {Console.read -> k} -> + io = State.get + ins = fst io + outs = snd io + State.set (drop 1 ins, outs) + -- this really should typecheck but doesn't for some reason + -- error is that `simulate` doesn't check against `Request Console c -> r`, + -- but seems like that `r` should get instantiated as `{State (..)} c`. + handle k (at 0 ins) with simulate + {Console.write t -> k} -> + io = State.get + ins = fst io + outs = snd io + -- same deal here + handle k (State.set (ins, cons t outs)) with simulate + {a} -> a + +(++) = (Text.++) + +x = handle + handle + use Console read write + use Optional Some None + write "What's your name?" + match read with + Some name -> write ("Hello" ++ name) + None -> write "Fine, be that way." + with simulate + with state ([],[]) + +> x diff --git a/unison-src/tests/console1.u b/unison-src/tests/console1.u new file mode 100644 index 0000000000..c29d7b7ebf --- /dev/null +++ b/unison-src/tests/console1.u @@ -0,0 +1,41 @@ +-- This confusingly gives an error that +-- it doesn't know what `Console.simulate` is. + +ability State s where + get : {State s} s + set : s -> {State s} () + +ability Console where + read : {Console} (Optional Text) + write : Text -> {Console} () + +fst = cases Tuple.Cons a _ -> a + +snd = cases Tuple.Cons _ (Tuple.Cons b _) -> b + +simulate : Request Console a -> {State ([Text], [Text])} a +simulate = cases + {Console.read -> k} -> handle + io = State.get + ins = fst io + outs = snd io + State.set (drop 1 ins, outs) + k (at 0 ins) + with simulate + + {Console.write t -> k} -> handle + io = State.get + ins = fst io + outs = snd io + State.set (ins, outs ++ [t]) + k () + with simulate + +e = 'let handle + use Console read write + use Optional Some None + write "What's your name?" + match read with + Some name -> write ("Hello" ++ name) + None -> write "Fine, be that way." + with simulate diff --git a/unison-src/tests/data-references-builtins.u b/unison-src/tests/data-references-builtins.u new file mode 100644 index 0000000000..099ef4e284 --- /dev/null +++ b/unison-src/tests/data-references-builtins.u @@ -0,0 +1,4 @@ +--data references builtins +type StringOrInt = S Text | I Nat +> [StringOrInt.S "YO", StringOrInt.I 1] + diff --git a/unison-src/tests/delay.u b/unison-src/tests/delay.u new file mode 100644 index 0000000000..0935bbabb3 --- /dev/null +++ b/unison-src/tests/delay.u @@ -0,0 +1,37 @@ + +type Foo a = Foo a + +(+) = (Nat.+) + +-- The type 'a is sugar for `() -> a`. +-- The term 'a is sugar for `() -> a`. +-- !a forces a delayed expression (equivalent to `a()`) + +woot : 'Nat +woot = '42 + +-- A 'a can also be created by prefixing `let` with a ' +woot2 : 'Nat +woot2 = 'let + x = 1 + y = 2 + x + y + +-- ' has higher precedence than -> in type signatures +-- and a lower precedence than type application +woot3 : 'Nat -> Nat +woot3 x = !x + 1 + +woot4 : ∀ a . 'Foo a -> Foo a +woot4 foo = !foo + +woot4Usage = woot4 '(Foo.Foo 19) + +woot4Usage2 = + foo = 'let + x : Nat + x = 99 + Foo.Foo (x + x) + woot4 foo + +> woot4Usage2 diff --git a/unison-src/tests/delay_parse.u b/unison-src/tests/delay_parse.u new file mode 100644 index 0000000000..525f62eaa4 --- /dev/null +++ b/unison-src/tests/delay_parse.u @@ -0,0 +1,20 @@ +ability T where + foo : {T} () + +-- parses fine +a : () -> {T} () +a x = () + +-- parses fine +b : () -> '() +b = x -> (y -> ()) + +-- parse error +c : () -> {T} '() +c = x -> (y -> ()) + +-- parses fine with extra parentheses +d : () -> {T} ('()) +d = x -> (y -> ()) + + diff --git a/unison-src/tests/effect-instantiation.u b/unison-src/tests/effect-instantiation.u new file mode 100644 index 0000000000..5ec6e1679b --- /dev/null +++ b/unison-src/tests/effect-instantiation.u @@ -0,0 +1,10 @@ + +blah : a -> a -> a +blah a a2 = a2 + +ability Foo where + foo : {Foo} Text + +-- previously this didn't work as first argument was pure +-- and second argument was impure +> blah '("hello!") 'Foo.foo diff --git a/unison-src/tests/effect-instantiation2.u b/unison-src/tests/effect-instantiation2.u new file mode 100644 index 0000000000..6a12abb9ab --- /dev/null +++ b/unison-src/tests/effect-instantiation2.u @@ -0,0 +1,8 @@ + +woot : a -> a -> a +woot a a2 = a + +ability Hi where + hi : Float ->{Hi} Int + +> woot Float.floor Hi.hi diff --git a/unison-src/tests/effect1.u b/unison-src/tests/effect1.u new file mode 100644 index 0000000000..81c772401b --- /dev/null +++ b/unison-src/tests/effect1.u @@ -0,0 +1,8 @@ + +eff : forall a b . (a -> b) -> b -> Request Abort a -> b +eff f z = cases + { Abort.Abort _ -> k } -> z + { a } -> f a + +ability Abort where + Abort : forall a . () -> {Abort} a diff --git a/unison-src/tests/empty-above-the-fold.u b/unison-src/tests/empty-above-the-fold.u new file mode 100644 index 0000000000..edeba5919a --- /dev/null +++ b/unison-src/tests/empty-above-the-fold.u @@ -0,0 +1,6 @@ +-- Empty files and all-comment files parse fine, so this one should too. +---- Anything below this line is ignored by Unison. + +-- /Users/arya/unison/unison-src/tests/empty-above-the-fold.u:1:1: +-- unexpected end of input +-- expecting ability, ability, or use diff --git a/unison-src/tests/fib4.ur b/unison-src/tests/fib4.ur new file mode 100644 index 0000000000..42c52724b0 --- /dev/null +++ b/unison-src/tests/fib4.ur @@ -0,0 +1 @@ +2249999 diff --git a/unison-src/tests/fix1640.u b/unison-src/tests/fix1640.u new file mode 100644 index 0000000000..1e339c8387 --- /dev/null +++ b/unison-src/tests/fix1640.u @@ -0,0 +1,25 @@ + +unique type Color = Red | Black +unique type RBTree a = Leaf | Tree Color (RBTree a) a (RBTree a) + +-- interesting, this typechecks fine +isRed = cases + Color.Red -> true + Color.Black -> false + +-- as does this +RBTree.isRed1 = cases + RBTree.Tree _ _ _ _ -> true + _ -> false + +-- but this did not (before this fix) +RBTree.isRed = cases + RBTree.Tree Color.Red _ _ _ -> true + _ -> false + +-- In fixing this bug, I noticed that the parser would previously reject +-- this perfectly cromulent pattern match, so I fixed that too. +thisIsTotallyLegit = cases + [RBTree.Tree _ _ _ _] -> true + _ -> false + diff --git a/unison-src/tests/fix528.u b/unison-src/tests/fix528.u new file mode 100644 index 0000000000..c0dff14ec0 --- /dev/null +++ b/unison-src/tests/fix528.u @@ -0,0 +1,12 @@ + +(|>) : a -> (a -> b) -> b +a |> f = f a + +ex1 = "bob" |> (Text.++) "hi, " + +type Woot = Woot Text Int Nat + +ex2 = match 0 |> Woot "Zonk" +10 with + Woot.Woot _ i _ -> i + +> (ex1, ex2) diff --git a/unison-src/tests/fix528.ur b/unison-src/tests/fix528.ur new file mode 100644 index 0000000000..9131151dcf --- /dev/null +++ b/unison-src/tests/fix528.ur @@ -0,0 +1 @@ +("hi, bob", +10) diff --git a/unison-src/tests/fix739.u b/unison-src/tests/fix739.u new file mode 100644 index 0000000000..28d36405c4 --- /dev/null +++ b/unison-src/tests/fix739.u @@ -0,0 +1,4 @@ +type MonoidRec a = { + combine : a -> a -> a, + empty : a +} diff --git a/unison-src/tests/force.u b/unison-src/tests/force.u new file mode 100644 index 0000000000..b4e1d2bdf8 --- /dev/null +++ b/unison-src/tests/force.u @@ -0,0 +1,9 @@ +ability Woot where woot : {Woot} Text + +force : '{e} a ->{e} a +force a = !a + +ex : '{Woot} Text +ex = '(force 'Woot.woot) + +> ex diff --git a/unison-src/tests/guard-boolean-operators.u b/unison-src/tests/guard-boolean-operators.u new file mode 100644 index 0000000000..a5da96a178 --- /dev/null +++ b/unison-src/tests/guard-boolean-operators.u @@ -0,0 +1,11 @@ +type Foo = Foo Boolean Boolean + +f : Foo -> Boolean +f = cases + Foo.Foo a b | a || b -> true + _ -> false + +g : Foo -> Boolean +g = cases + Foo.Foo a b | a && b -> true + _ -> false diff --git a/unison-src/tests/handler-stacking.u b/unison-src/tests/handler-stacking.u new file mode 100644 index 0000000000..bfc1e3c129 --- /dev/null +++ b/unison-src/tests/handler-stacking.u @@ -0,0 +1,34 @@ +use State get put +use Writer tell + +> handle + handle replicate 5 main + with writerHandler [] + with stateHandler "hello" + + +main = '(tell get) + +replicate : Nat -> '{e} () -> {e} () +replicate n x = + if n Nat.== 0 then () else + !x + replicate (n `drop` 1) x + +ability State a where + get : {State a} a + put : a -> {State a} () + +ability Writer w where + tell : w -> {Writer w} () + +stateHandler : s -> Request {State s} a -> (s, a) +stateHandler s = cases + { State.get -> k } -> handle k s with stateHandler s + { State.put s -> k } -> handle k () with stateHandler s + { a } -> (s, a) + +writerHandler : [w] -> Request {Writer w} a -> ([w], a) +writerHandler ww = cases + { Writer.tell w -> k } -> handle k () with writerHandler (ww `snoc` w) + { a } -> (ww, a) diff --git a/unison-src/tests/hang.u b/unison-src/tests/hang.u new file mode 100644 index 0000000000..2313c4f017 --- /dev/null +++ b/unison-src/tests/hang.u @@ -0,0 +1,88 @@ + +use Universal == < + +type Future a = Future ('{Remote} a) + +-- A simple distributed computation ability +ability Remote where + + -- Spawn a new node + spawn : {Remote} Node + + -- Sequentially evaluate the given thunk on another node + -- then return to the current node when it completes + at : n -> '{Remote} a -> {Remote} a + + -- Start a computation running, returning an `r` that can be forced to + -- await the result of the computation + fork : '{Remote} a ->{Remote} Future a + +type Node = Node Nat -- more realistic would be perhaps a (Hostname, PublicKey) pair + +force : Future a ->{Remote} a +force = cases Future.Future r -> !r + +-- Let's test out this beast! do we need to deploy our code to some EC2 instances?? +-- Gak, no not yet, we just want to test locally, let's write a handler +-- for the `Remote` ability that simulates everything locally! + +Remote.runLocal : '{Remote} a -> a +Remote.runLocal r = + use Future Future + step nid = cases + {a} -> a + {Remote.fork t -> k} -> handle k (Future t) with step nid + {Remote.spawn -> k} -> handle k nid with step (Node.increment nid) + {Remote.at _ t -> k} -> handle k !t with step nid + handle !r with step (Node.Node 0) + +Remote.forkAt : Node -> '{Remote} a ->{Remote} (Future a) +Remote.forkAt node r = Remote.fork '(Remote.at node r) + +use Optional None Some +use Monoid Monoid +use List ++ + +List.map : (a ->{e} b) -> [a] ->{e} [b] +List.map f as = + go f acc as i = match List.at i as with + None -> acc + Some a -> go f (acc `snoc` f a) as (i + 1) + go f [] as 0 + +merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] +merge lte a b = + go acc a b = match at 0 a with + None -> acc ++ b + Some hd1 -> match at 0 b with + None -> acc ++ a + Some hd2 -> + if hd1 `lte` hd2 then go (acc `snoc` hd1) (drop 1 a) b + else go (acc `snoc` hd2) a (drop 1 b) + go [] a b + +dsort2 : (a -> a -> Boolean) -> [a] ->{Remote} [a] +dsort2 lte as = + if size as < 2 then as + else match halve as with + None -> as + Some (left, right) -> + use Remote forkAt spawn + l = forkAt spawn '(dsort2 lte left) + r = forkAt spawn '(dsort2 lte right) + merge lte (force l) (force r) + +isEmpty : [a] -> Boolean +isEmpty a = size a == 0 + +halve : [a] -> Optional ([a], [a]) +halve as = + if isEmpty as then None + else Some (take (size as / 2) as, drop (size as / 2) as) + +Node.increment : Node -> Node +Node.increment n = + use Node Node -- the constructor + match n with Node n -> Node (n + 1) + +> Remote.runLocal '(dsort2 (<) [3,2,1,1,2,3,9182,1,2,34,1,23]) diff --git a/unison-src/tests/id.u b/unison-src/tests/id.u new file mode 100644 index 0000000000..7d0bd3d4d2 --- /dev/null +++ b/unison-src/tests/id.u @@ -0,0 +1,5 @@ +id : a -> a +id x = x + +> id + diff --git a/unison-src/tests/if.u b/unison-src/tests/if.u new file mode 100644 index 0000000000..e3af85295c --- /dev/null +++ b/unison-src/tests/if.u @@ -0,0 +1,2 @@ +foo = if true then true else false +> foo diff --git a/unison-src/tests/imports.u b/unison-src/tests/imports.u new file mode 100644 index 0000000000..0748ae2845 --- /dev/null +++ b/unison-src/tests/imports.u @@ -0,0 +1,22 @@ +use . Int -- imports `.Int` from root path and gives it the local name `Int` + +-- This brings `None` into scope unqualified +use Optional None + +-- '.' is optional, this brings `None` and `Some` into +-- scope unqualified +use Optional None Some + +-- Can import operators this way also +-- no need to put them in parens +use Nat + + +-- Later imports shadow earlier ones +use Nat - * / +use Nat drop * +use Nat drop +-- use Int + -- this would cause type error below! + +> match Some (100 + 200 / 3 * 2) with + Optional.None -> 19 + Some 200 -> 20 diff --git a/unison-src/tests/imports2.u b/unison-src/tests/imports2.u new file mode 100644 index 0000000000..73c38949a7 --- /dev/null +++ b/unison-src/tests/imports2.u @@ -0,0 +1,12 @@ +-- This gives the error: +-- I'm not sure what Optional.orElse means at Line 12 +-- which is weird because it means Optional.orElse, which is defined here. + +use Optional Some None orElse + +Optional.orElse a b = + match a with + None -> b + a -> a + +> orElse diff --git a/unison-src/tests/inner-lambda1.u b/unison-src/tests/inner-lambda1.u new file mode 100644 index 0000000000..4213c556b3 --- /dev/null +++ b/unison-src/tests/inner-lambda1.u @@ -0,0 +1,15 @@ +use Nat drop >= +use Optional None Some + +search : (Nat -> Int) -> Nat -> Nat -> Optional Nat +search hit bot top = + -- go : Nat -> Nat -> Optional Nat + go bot top = + if bot >= top then None + else + mid = (bot + top) / 2 + match hit mid with + +0 -> Some mid + -1 -> go bot (mid `drop` 1) + +1 -> go (mid + 1) top + go bot top diff --git a/unison-src/tests/inner-lambda2.u b/unison-src/tests/inner-lambda2.u new file mode 100644 index 0000000000..329a296e6c --- /dev/null +++ b/unison-src/tests/inner-lambda2.u @@ -0,0 +1,16 @@ + +use Nat drop >= +use Optional None Some + +search : (Nat -> Int) -> Nat -> Nat -> Optional Nat +search hit bot top = + go : Nat -> Nat -> Optional Nat + go bot top = + if bot >= top then None + else + mid = (bot + top) / 2 + match hit mid with + +0 -> Some mid + -1 -> go bot (mid `drop` 1) + +1 -> go (mid + 1) top + go bot top diff --git a/unison-src/tests/io-state2.u b/unison-src/tests/io-state2.u new file mode 100644 index 0000000000..e5ac00d21c --- /dev/null +++ b/unison-src/tests/io-state2.u @@ -0,0 +1,23 @@ +--IO/State2 ability +ability IO where + launchMissiles : {IO} () + +foo : Int -> {IO} Int +foo unit = + incBy : Int -> {IO, State Int} Int + incBy i = + IO.launchMissiles -- OK, since declared by `incBy` signature + y = State.get + State.put (y Int.+ i) + +42 + +43 + +type Optional a = + Some a | None + +ability State se2 where + put : ∀ se . se -> {State se} () + get : ∀ se . {State se} se + + + diff --git a/unison-src/tests/io-state3.u b/unison-src/tests/io-state3.u new file mode 100644 index 0000000000..ca05a59cd0 --- /dev/null +++ b/unison-src/tests/io-state3.u @@ -0,0 +1,10 @@ +--IO3 ability +ability IO where + launchMissiles : () -> {IO} () +-- binding IS guarded, so its body can access whatever abilities +-- are declared by the type of the binding +-- ambient abilities (which will be empty) +ex1 : () -> {IO} () +ex1 unit = IO.launchMissiles() + + diff --git a/unison-src/tests/keyword-parse.u b/unison-src/tests/keyword-parse.u new file mode 100644 index 0000000000..e54ac9592d --- /dev/null +++ b/unison-src/tests/keyword-parse.u @@ -0,0 +1,4 @@ +f x = x + +> (f false) && false +-- and false false diff --git a/unison-src/tests/lambda-closing-over-effectful-fn.u b/unison-src/tests/lambda-closing-over-effectful-fn.u new file mode 100644 index 0000000000..867fc85c2f --- /dev/null +++ b/unison-src/tests/lambda-closing-over-effectful-fn.u @@ -0,0 +1,10 @@ +use Optional None Some + +unfold : s -> (s ->{z} Optional (a, s)) ->{z} [a] +unfold s f = + go s acc = match f s with + None -> acc + Some (hd, s) -> go s (acc `List.snoc` hd) + go s [] + +> unfold 0 (n -> if n Nat.< 5 then Some (n, n + 1) else None) diff --git a/unison-src/tests/lambda-closing-over-effectful-fn.ur b/unison-src/tests/lambda-closing-over-effectful-fn.ur new file mode 100644 index 0000000000..2fdebcae86 --- /dev/null +++ b/unison-src/tests/lambda-closing-over-effectful-fn.ur @@ -0,0 +1 @@ +[0,1,2,3,4] diff --git a/unison-src/tests/links.u b/unison-src/tests/links.u new file mode 100644 index 0000000000..67a6f68b81 --- /dev/null +++ b/unison-src/tests/links.u @@ -0,0 +1,13 @@ + + +natLink : Link.Type +natLink = typeLink Nat + +takeLink : Link.Term +takeLink = termLink List.take + +dropLink : Link.Term +dropLink = termLink List.drop + +> (takeLink == dropLink, natLink == typeLink Nat) + diff --git a/unison-src/tests/links.ur b/unison-src/tests/links.ur new file mode 100644 index 0000000000..e868f25d0e --- /dev/null +++ b/unison-src/tests/links.ur @@ -0,0 +1 @@ +(false, true) diff --git a/unison-src/tests/map-traverse.u b/unison-src/tests/map-traverse.u new file mode 100644 index 0000000000..980927ca77 --- /dev/null +++ b/unison-src/tests/map-traverse.u @@ -0,0 +1,30 @@ +--map/traverse +ability Noop where + noop : ∀ a . a -> {Noop} a + +ability Noop2 where + noop2 : ∀ a . a -> a -> {Noop2} a + +type List a = Nil | Cons a (List a) + +map : ∀ a b e . (a -> {e} b) -> List a -> {e} (List b) +map f = cases + List.Nil -> List.Nil + List.Cons h t -> List.Cons (f h) (map f t) + +c = List.Cons +z : ∀ a . List a +z = List.Nil + +ex = (c 1 (c 2 (c 3 z))) + +pureMap : List Text +pureMap = map (a -> "hello") ex + +-- `map` is ability polymorphic +zappy : () -> {Noop} (List Nat) +zappy u = map (zap -> (Noop.noop (zap Nat.+ 1))) ex + +-- mixing multiple abilitys in a call to `map` works fine +zappy2 : () -> {Noop, Noop2} (List Nat) +zappy2 u = map (zap -> Noop.noop (zap Nat.+ Noop2.noop2 2 7)) ex diff --git a/unison-src/tests/map-traverse2.u b/unison-src/tests/map-traverse2.u new file mode 100644 index 0000000000..61ee14c168 --- /dev/null +++ b/unison-src/tests/map-traverse2.u @@ -0,0 +1,32 @@ +--map/traverse +ability Noop where + noop : a -> {Noop} a + +ability Noop2 where + noop2 : a -> a -> {Noop2} a + +type List a = Nil | Cons a (List a) + +map : (a -> b) -> List a -> List b +map f = cases + List.Nil -> List.Nil + List.Cons h t -> List.Cons (f h) (map f t) + +c = List.Cons + +z : ∀ a . List a +z = List.Nil + +ex = (c 1 (c 2 (c 3 z))) + +pureMap : List Text +pureMap = map (a -> "hello") ex + +-- `map` is ability polymorphic +zappy : '{Noop} (List Nat) +zappy = 'let map (zap -> Noop.noop (zap Nat.+ 1)) ex + +-- mixing multiple abilitys in a call to `map` works fine +zappy2 : '{Noop, Noop2} (List Nat) +zappy2 = 'let + map (zap -> Noop.noop (zap Nat.+ Noop2.noop2 2 7)) ex diff --git a/unison-src/tests/mergesort.u b/unison-src/tests/mergesort.u new file mode 100644 index 0000000000..1f46d7ba26 --- /dev/null +++ b/unison-src/tests/mergesort.u @@ -0,0 +1,26 @@ +use Universal < + +> sort (<) [9234,23,1,3,6,2,3,51,24,1,3,55,2,1] + +halveWith : ([a] -> [a] -> b) -> [a] -> b +halveWith k a = k (take (size a / 2) a) (drop (size a / 2) a) + +sort : (a -> a -> Boolean) -> [a] -> [a] +sort lte a = + if size a < 2 then a + else halveWith (l r -> merge lte (sort lte l) (sort lte r)) a + +merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] +merge lte a b = + use List ++ + use Optional None Some + go acc a b = match at 0 a with + None -> acc ++ b + Some hd1 -> match at 0 b with + None -> acc ++ a + Some hd2 -> + if hd1 `lte` hd2 then + go (acc `snoc` hd1) (drop 1 a) b + else + go (acc `snoc` hd2) a (drop 1 b) + go [] a b diff --git a/unison-src/tests/methodical/abilities.u b/unison-src/tests/methodical/abilities.u new file mode 100644 index 0000000000..e29c5a794d --- /dev/null +++ b/unison-src/tests/methodical/abilities.u @@ -0,0 +1,48 @@ + +-- ABILITIES + +ability A where + woot : {A} Nat + +unA = cases + {a} -> a + {A.woot -> k} -> handle k 10 with unA + +-- This verifies that the continuation captures local variables +a1 = handle + x = 42 + y = A.woot + x + with unA + +ability B where + zing : {B} Int + +abh = cases + {r} -> r + {A.woot -> k} -> handle k 10 with abh + {B.zing -> k} -> handle k (+11) with abh + +ab1 = handle + x = 32 + y = A.woot + z = B.zing + x + y + truncate0 z + with abh + +nh = cases + {r} -> r + +ab2 = + handle + handle + x = 22 + y = A.woot + z = B.zing + x + y + truncate0 z + with nh + with abh + + +> (a1, ab1, ab2) -- should be (42, 53, 43) + diff --git a/unison-src/tests/methodical/abilities.ur b/unison-src/tests/methodical/abilities.ur new file mode 100644 index 0000000000..8b2924f1cb --- /dev/null +++ b/unison-src/tests/methodical/abilities.ur @@ -0,0 +1 @@ +(42, 53, 43) diff --git a/unison-src/tests/methodical/apply-constructor.u b/unison-src/tests/methodical/apply-constructor.u new file mode 100644 index 0000000000..a652f0cba4 --- /dev/null +++ b/unison-src/tests/methodical/apply-constructor.u @@ -0,0 +1,29 @@ + +-- Now check exact and underapply cases for constructors +-- (overapply of a constructor is always a type error) + +type Woot = Woot Nat Nat Nat Nat + +toSeq : Woot -> [Nat] +toSeq = cases + Woot a b c d -> [a,b,c,d] + +use Woot Woot + +exactt = Woot 1 2 3 4 + +underapply0t = + p1 = 1 + f = Woot p1 + f 2 3 4 + +underapply1t = + p2 = 2 + f = Woot 1 p2 + f 3 4 + +underapply2t = + f = Woot 1 2 3 + f 4 + +> (toSeq exactt, toSeq underapply0t, toSeq underapply1t, toSeq underapply2t) diff --git a/unison-src/tests/methodical/apply-constructor.ur b/unison-src/tests/methodical/apply-constructor.ur new file mode 100644 index 0000000000..9aec08492d --- /dev/null +++ b/unison-src/tests/methodical/apply-constructor.ur @@ -0,0 +1 @@ +([1,2,3,4], [1,2,3,4], [1,2,3,4], [1,2,3,4]) diff --git a/unison-src/tests/methodical/apply.u b/unison-src/tests/methodical/apply.u new file mode 100644 index 0000000000..67134523ed --- /dev/null +++ b/unison-src/tests/methodical/apply.u @@ -0,0 +1,43 @@ +-- FUNCTION APPLICATION. There are several cases to check: +-- * Exact application, underapplication, overapplication +-- * Closure formation (used by data types and builtins) vs specialization +-- * Overapplication yielding a request + +fn p1 p2 p3 p4 = [p1, p2, p3, p4] + +exact = + p1 = 1 + p2 = 2 + fn p1 p2 3 4 + +underapply0 = + f = fn 1 + f 2 3 4 + +underapply1 = + f = fn 1 2 + f 3 4 + +underapply2 = + f = fn 1 2 3 + f 4 + +fn2 p1 p2 = + f p3 p4 = [p1, p2] ++ [p3, p4] + f + +exact1 = + f = fn2 1 2 + f 3 4 + +overapply1 = fn2 1 2 3 4 + +overapply2 = + f = fn2 1 2 3 + f 4 + +overapplyN = + id x = x + id id id id id 99 + +> (exact, underapply0, underapply1, underapply2, exact1, overapply1, overapply2, overapplyN) diff --git a/unison-src/tests/methodical/apply.ur b/unison-src/tests/methodical/apply.ur new file mode 100644 index 0000000000..fc9cf62f12 --- /dev/null +++ b/unison-src/tests/methodical/apply.ur @@ -0,0 +1 @@ +([1,2,3,4], [1,2,3,4], [1,2,3,4], [1,2,3,4], [1,2,3,4], [1,2,3,4], [1,2,3,4], 99) diff --git a/unison-src/tests/methodical/builtin-nat-to-float.u b/unison-src/tests/methodical/builtin-nat-to-float.u new file mode 100644 index 0000000000..4b8e15e48f --- /dev/null +++ b/unison-src/tests/methodical/builtin-nat-to-float.u @@ -0,0 +1 @@ +> .builtin.Nat.toFloat 4 diff --git a/unison-src/tests/methodical/builtin-nat-to-float.ur b/unison-src/tests/methodical/builtin-nat-to-float.ur new file mode 100644 index 0000000000..5186d07068 --- /dev/null +++ b/unison-src/tests/methodical/builtin-nat-to-float.ur @@ -0,0 +1 @@ +4.0 diff --git a/unison-src/tests/methodical/builtins.u b/unison-src/tests/methodical/builtins.u new file mode 100644 index 0000000000..597f2d2272 --- /dev/null +++ b/unison-src/tests/methodical/builtins.u @@ -0,0 +1,14 @@ +use Optional Some None + +> natTextRoundTrip = + Nat.fromText (Nat.toText 123) Universal.== Some 123 + +> intTextRoundTripPos = + Int.fromText (Int.toText +123) Universal.== Some +123 + +> intTextRoundTripNeg = + Int.fromText (Int.toText -123) Universal.== Some -123 + +> intFloatRoundTrip = + Float.round (Int.toFloat +123) Universal.== +123 + diff --git a/unison-src/tests/methodical/cycle-minimize.u b/unison-src/tests/methodical/cycle-minimize.u new file mode 100644 index 0000000000..fc6356e719 --- /dev/null +++ b/unison-src/tests/methodical/cycle-minimize.u @@ -0,0 +1,11 @@ + +ability SpaceAttack where + launchMissiles : Text -> () + +-- should typecheck fine, as the `launchMissiles "saturn"` +-- gets moved out of the `ping` / `pong` cycle +ex x = + ping x = pong (x + 1) + launchMissiles "saturn" + pong x = ping (x `Nat.drop` 1) + launchMissiles "neptune" diff --git a/unison-src/tests/methodical/dots.u b/unison-src/tests/methodical/dots.u new file mode 100644 index 0000000000..dcd584a560 --- /dev/null +++ b/unison-src/tests/methodical/dots.u @@ -0,0 +1,28 @@ + +-- You can define an operator called dot +(.) f g x = f (g x) + +id : ∀ a. a -> a -- dot still fine in type parser +id x = x + +id2 = id . id + +-- You need a space or delimiter char after the dot, +-- otherwise Unison assumes it's a rooted name - this will look for +-- a term called `zonk` in the root: +-- +-- foo = id .zonk + +-- You can define qualified functions +(base.function..) f g x = f (g x) + +-- looks weird, but consistent syntax with any other infix binding +object oop.syntax.. method = method object + +ex = + use base.function . + (id . id) 42 + +ex2 = use oop.syntax .; 42 . id . id + +> (ex, ex2) diff --git a/unison-src/tests/methodical/dots.ur b/unison-src/tests/methodical/dots.ur new file mode 100644 index 0000000000..7b97e73294 --- /dev/null +++ b/unison-src/tests/methodical/dots.ur @@ -0,0 +1 @@ +(42,42) diff --git a/unison-src/tests/methodical/empty.u b/unison-src/tests/methodical/empty.u new file mode 100644 index 0000000000..e69de29bb2 diff --git a/unison-src/tests/methodical/empty2.u b/unison-src/tests/methodical/empty2.u new file mode 100644 index 0000000000..9cfa2b17cc --- /dev/null +++ b/unison-src/tests/methodical/empty2.u @@ -0,0 +1 @@ +-- This file is empty but has a comment diff --git a/unison-src/tests/methodical/empty3.u b/unison-src/tests/methodical/empty3.u new file mode 100644 index 0000000000..3ee1a03056 --- /dev/null +++ b/unison-src/tests/methodical/empty3.u @@ -0,0 +1,3 @@ +--- + +This file is empty but has some stuff below the fold diff --git a/unison-src/tests/methodical/exponential.u b/unison-src/tests/methodical/exponential.u new file mode 100644 index 0000000000..d7597653a9 --- /dev/null +++ b/unison-src/tests/methodical/exponential.u @@ -0,0 +1,5 @@ +use Float exp log logBase + +> (exp 0.0, + log (exp 1.0), + logBase 10.0 100.0) \ No newline at end of file diff --git a/unison-src/tests/methodical/exponential.ur b/unison-src/tests/methodical/exponential.ur new file mode 100644 index 0000000000..78041679ec --- /dev/null +++ b/unison-src/tests/methodical/exponential.ur @@ -0,0 +1,3 @@ +(1.0, + 1.0, + 2.0) \ No newline at end of file diff --git a/unison-src/tests/methodical/float.u b/unison-src/tests/methodical/float.u new file mode 100644 index 0000000000..5fde45c0b4 --- /dev/null +++ b/unison-src/tests/methodical/float.u @@ -0,0 +1,15 @@ +use Float abs max min toText fromText +use Optional Some None + +withDefault : Optional a -> a -> a +withDefault opt d = match opt with + Some x -> x + None -> d + +> (abs 1.1, + abs -1.1, + max 1.1 1.5, + min 1.1 1.5, + toText 1.1, + withDefault (fromText "1.5") -1.0, + withDefault (fromText "Hello world!") -1.0) diff --git a/unison-src/tests/methodical/float.ur b/unison-src/tests/methodical/float.ur new file mode 100644 index 0000000000..1bbbd63b9b --- /dev/null +++ b/unison-src/tests/methodical/float.ur @@ -0,0 +1,7 @@ +(1.1, + 1.1, + 1.5, + 1.1, + "1.1", + 1.5, + -1.0) diff --git a/unison-src/tests/methodical/hyperbolic.u b/unison-src/tests/methodical/hyperbolic.u new file mode 100644 index 0000000000..4d5850ad20 --- /dev/null +++ b/unison-src/tests/methodical/hyperbolic.u @@ -0,0 +1,8 @@ +use Float acosh asinh atanh cosh sinh tanh + +> (acosh 1.0, + asinh 0.0, + atanh 0.0, + cosh 0.0, + sinh 0.0, + tanh 0.0) \ No newline at end of file diff --git a/unison-src/tests/methodical/hyperbolic.ur b/unison-src/tests/methodical/hyperbolic.ur new file mode 100644 index 0000000000..4556da9ce8 --- /dev/null +++ b/unison-src/tests/methodical/hyperbolic.ur @@ -0,0 +1,6 @@ +(0.0, + 0.0, + 0.0, + 1.0, + 0.0, + 0.0) \ No newline at end of file diff --git a/unison-src/tests/methodical/int.u b/unison-src/tests/methodical/int.u new file mode 100644 index 0000000000..8fe2cb90c5 --- /dev/null +++ b/unison-src/tests/methodical/int.u @@ -0,0 +1,24 @@ +use Int increment isEven isOdd signum negate mod pow shiftLeft shiftRight truncate0 toText fromText toFloat leadingZeros trailingZeros and or xor complement + +withDefault : Optional a -> a -> a +withDefault opt d = match opt with + Some x -> x + None -> d + +> (increment +3, + isEven +3, + isOdd +3, + signum +3, + negate +3, + mod +10 +3, + pow +10 3, + shiftLeft +7 2, + shiftRight +7 2, + truncate0 +3, + truncate0 -3, + withDefault (fromText "3x") -1, + withDefault (fromText "+3") -1, + leadingZeros +0, + leadingZeros +1, + leadingZeros +8, + toFloat +3) \ No newline at end of file diff --git a/unison-src/tests/methodical/int.ur b/unison-src/tests/methodical/int.ur new file mode 100644 index 0000000000..0e2ef9c33d --- /dev/null +++ b/unison-src/tests/methodical/int.ur @@ -0,0 +1,17 @@ +(+4, + false, + true, + +1, + -3, + +1, + +1000, + +28, + +1, + 3, + 0, + -1, + +3, + 64, + 63, + 60, + 3.0) diff --git a/unison-src/tests/methodical/let.u b/unison-src/tests/methodical/let.u new file mode 100644 index 0000000000..97a9690226 --- /dev/null +++ b/unison-src/tests/methodical/let.u @@ -0,0 +1,12 @@ + +-- LET +c0 = + a = 1000 + b = 100 + c = 10 + d = 1 + [a + b + c + d, b + c + d, c + d, d] + +-- Make sure we can push values onto the stack and reference them as expected +> c0 + diff --git a/unison-src/tests/methodical/let.ur b/unison-src/tests/methodical/let.ur new file mode 100644 index 0000000000..ad1a2189f6 --- /dev/null +++ b/unison-src/tests/methodical/let.ur @@ -0,0 +1 @@ +[1111,111,11,1] diff --git a/unison-src/tests/methodical/literals.u b/unison-src/tests/methodical/literals.u new file mode 100644 index 0000000000..f8cb3a2945 --- /dev/null +++ b/unison-src/tests/methodical/literals.u @@ -0,0 +1,12 @@ + +-- LITERALS +ln = 10 +li = +10 +lf = 10.0 +lt = "text" +lc = ?ひ +lb = true +sn = [1,2,3,4,5,6,ln] + +-- Make sure we can compile all literals +> (ln, li, lf, lt, lc, lb, sn) diff --git a/unison-src/tests/methodical/literals.ur b/unison-src/tests/methodical/literals.ur new file mode 100644 index 0000000000..af9c7335a3 --- /dev/null +++ b/unison-src/tests/methodical/literals.ur @@ -0,0 +1 @@ +(10, +10, 10.0, "text", ?ひ, true, [1,2,3,4,5,6,10]) diff --git a/unison-src/tests/methodical/loop.u b/unison-src/tests/methodical/loop.u new file mode 100644 index 0000000000..7d5a2484f4 --- /dev/null +++ b/unison-src/tests/methodical/loop.u @@ -0,0 +1,8 @@ + +use Universal == + +loop acc n = + if n == 0 then acc + else loop (acc + n) (n `drop` 1) + +> loop 0 10000 diff --git a/unison-src/tests/methodical/nat.u b/unison-src/tests/methodical/nat.u new file mode 100644 index 0000000000..baa4a3c95f --- /dev/null +++ b/unison-src/tests/methodical/nat.u @@ -0,0 +1,31 @@ +use Nat drop fromText increment isEven isOdd mod pow shiftLeft shiftRight sub toFloat toInt toText trailingZeros leadingZeros and or xor complement + +withDefault : Optional a -> a -> a +withDefault opt d = match opt with + Some x -> x + None -> d + +> (withDefault (fromText "3") 0, + drop 3 2, + increment 3, + isEven 3, + isOdd 3, + mod 10 3, + pow 10 3, + shiftLeft 7 2, + shiftRight 7 2, + trailingZeros 0, + leadingZeros 1, + leadingZeros 8, + trailingZeros 0, + trailingZeros 1, + trailingZeros 8, + 7 `and` 10, + 7 `or` 10, + 7 `xor` 14, + complement 0, + sub 3 2, + toFloat 3, + toInt 3, + toText 3) + \ No newline at end of file diff --git a/unison-src/tests/methodical/nat.ur b/unison-src/tests/methodical/nat.ur new file mode 100644 index 0000000000..77e2ca1f9c --- /dev/null +++ b/unison-src/tests/methodical/nat.ur @@ -0,0 +1,23 @@ +(3, + 1, + 4, + false, + true, + 1, + 1000, + 28, + 1, + 64, + 63, + 60, + 64, + 0, + 3, + 2, + 15, + 9, + 18446744073709551615, + +1, + 3.0, + +3, + "3") diff --git a/unison-src/tests/methodical/overapply-ability.u b/unison-src/tests/methodical/overapply-ability.u new file mode 100644 index 0000000000..539871c4f4 --- /dev/null +++ b/unison-src/tests/methodical/overapply-ability.u @@ -0,0 +1,47 @@ + +-- A corner case in the runtime is when a function is being overapplied and +-- the exactly applied function requests an ability (and returns a new function) + +ability Zing where + zing : Nat -> {Zing} (Nat -> Nat) + zing2 : Nat -> Nat ->{Zing} (Nat -> Nat -> [Nat]) + +unzing = cases + {a} -> a + {Zing.zing n -> k} -> handle k (x -> x `drop` n) with unzing + {Zing.zing2 n1 n2 -> k} -> handle k (n3 n4 -> [n1, n2, n3, n4]) with unzing + +exacth = handle + f = Zing.zing 3 + f 20 + 1 + with unzing + +overapplyh = handle + Zing.zing 3 20 + 1 + with unzing + +-- SEQUENCES with abilities + +sequence1 = handle [Zing.zing 1 4] with unzing +sequence2 = handle [Zing.zing 1 4, Zing.zing 1 4] with unzing +sequence3 = handle [Zing.zing 1 4, Zing.zing 2 4, Zing.zing 3 4, Zing.zing 4 4] with unzing + +-- Overapply of requests + +overapplyh2 = handle Zing.zing2 1 2 3 4 with unzing + +overapplyh3a = handle Zing.zing2 1 2 3 4 ++ [5] with unzing + +overapplyh3b = handle Zing.zing2 1 2 3 4 ++ [5, Zing.zing 2 8] with unzing + +overapplyh3c = handle Zing.zing2 1 2 3 4 ++ [5, Zing.zing 2 7 + 1] with unzing + +> (exacth, + overapplyh, + sequence1, + sequence2, + sequence3, + overapplyh2, + overapplyh3a, + overapplyh3b, + overapplyh3c) diff --git a/unison-src/tests/methodical/overapply-ability.ur b/unison-src/tests/methodical/overapply-ability.ur new file mode 100644 index 0000000000..879c57ffb3 --- /dev/null +++ b/unison-src/tests/methodical/overapply-ability.ur @@ -0,0 +1 @@ +(18, 18, [3], [3, 3], [3, 2, 1, 0], [1,2,3,4], [1,2,3,4,5], [1,2,3,4,5,6], [1,2,3,4,5,6]) diff --git a/unison-src/tests/methodical/parens.u b/unison-src/tests/methodical/parens.u new file mode 100644 index 0000000000..d99181ac07 --- /dev/null +++ b/unison-src/tests/methodical/parens.u @@ -0,0 +1,27 @@ +ex1 = '(let + use List ++ + [1] ++ [3]) + +ex2 = '(let + use List ++ + [1] ++ [3] +) + +ex3 = '(let + use List ++ + [1] ++ [3] + ) + +ex4 = '( + let + use List ++ + [1] ++ [3]; [4] +) + +ex5 = '( + let + use List ++ + [1] ++ [3] + ) + +> (ex1, ex2, ex3, ex4, ex5) diff --git a/unison-src/tests/methodical/pattern-matching.u b/unison-src/tests/methodical/pattern-matching.u new file mode 100644 index 0000000000..4feedfacf2 --- /dev/null +++ b/unison-src/tests/methodical/pattern-matching.u @@ -0,0 +1,28 @@ + +use Universal == + +-- PATTERN MATCHING + +pat1 x y p = match p with x0 -> (x0, x, y, p) + +pat2 x y p = match p with _ -> (x, y, p) + +pat3 x y = cases (x, y) -> (y, x) + +pat4 x y = cases (p1, _) -> (x, y, p1) + +pat5 x y = cases (_, p2) -> (x, y, p2) + +pat6 x y = cases (p1, _) -> (x + y : Nat, p1) + +pat7 x y = cases + (p1, _) | p1 == 9 -> (x + y : Nat, p1) + (p1, _) | true -> (0, p1) + +> (pat1 0 1 (2, 3), + pat2 0 1 "hi", + pat3 0 1 (2, 3), + pat4 0 1 (2, 3), + pat5 0 1 (3, 2), + pat6 1 2 (3, 4), + pat7 1 2 (20, 10)) diff --git a/unison-src/tests/methodical/pattern-matching.ur b/unison-src/tests/methodical/pattern-matching.ur new file mode 100644 index 0000000000..6782995807 --- /dev/null +++ b/unison-src/tests/methodical/pattern-matching.ur @@ -0,0 +1,7 @@ +(((2,3), 0, 1, (2,3)), + (0, 1, "hi"), + (3, 2), + (0, 1, 2), + (0, 1, 2), + (3, 3), + (0, 20)) diff --git a/unison-src/tests/methodical/power.u b/unison-src/tests/methodical/power.u new file mode 100644 index 0000000000..055298e622 --- /dev/null +++ b/unison-src/tests/methodical/power.u @@ -0,0 +1,4 @@ +use Float pow sqrt + +> (pow 10.0 2.0, + sqrt 4.0) \ No newline at end of file diff --git a/unison-src/tests/methodical/power.ur b/unison-src/tests/methodical/power.ur new file mode 100644 index 0000000000..b25571e9c5 --- /dev/null +++ b/unison-src/tests/methodical/power.ur @@ -0,0 +1,2 @@ +(100.0, + 2.0 \ No newline at end of file diff --git a/unison-src/tests/methodical/rank2.u b/unison-src/tests/methodical/rank2.u new file mode 100644 index 0000000000..6afe1e6ed8 --- /dev/null +++ b/unison-src/tests/methodical/rank2.u @@ -0,0 +1,9 @@ + +-- This should typecheck, since `∀ a . a -> a` can be passed in place of +-- a `Nat -> Nat`. Verifies that subtyping of `->` is contravariant in the +-- input types. +rank2b : (∀ a . a -> a) -> Nat +rank2b = + inner : (Nat -> Nat) -> Nat + inner f = 42 + inner diff --git a/unison-src/tests/methodical/rounding.u b/unison-src/tests/methodical/rounding.u new file mode 100644 index 0000000000..ac0023f875 --- /dev/null +++ b/unison-src/tests/methodical/rounding.u @@ -0,0 +1,8 @@ +use Float ceiling floor round truncate + +> (ceiling 1.1, + floor 1.7, + round 1.1, + round 1.7, + truncate 1.1, + truncate -1.1) \ No newline at end of file diff --git a/unison-src/tests/methodical/rounding.ur b/unison-src/tests/methodical/rounding.ur new file mode 100644 index 0000000000..14a1c53a4d --- /dev/null +++ b/unison-src/tests/methodical/rounding.ur @@ -0,0 +1,6 @@ +(+2, + +1, + +1, + +2, + +1, + -1) diff --git a/unison-src/tests/methodical/scopedtypevars.u b/unison-src/tests/methodical/scopedtypevars.u new file mode 100644 index 0000000000..aaf8904bde --- /dev/null +++ b/unison-src/tests/methodical/scopedtypevars.u @@ -0,0 +1,29 @@ + +ex1 : x -> y -> x +ex1 a b = + temp : x -- refers to the variable in the outer scope + temp = a + a + +ex2 : x -> y -> x +ex2 a b = + id : ∀ x . x -> x -- doesn't refer the variable in outer scope + id x = x + id 42 + id a + +merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] +merge lte a b = + use List ++ drop at snoc + use Optional None Some + go : [a] -> [a] -> [a] -> [a] -- refers to the outer `a` type + go acc a b = match at 0 a with + None -> acc ++ b + Some hd1 -> match at 0 b with + None -> acc ++ a + Some hd2 -> + if hd1 `lte` hd2 then + go (acc `snoc` hd1) (drop 1 a) b + else + go (acc `snoc` hd2) a (drop 1 b) + go [] a b diff --git a/unison-src/tests/methodical/semis.u b/unison-src/tests/methodical/semis.u new file mode 100644 index 0000000000..5853665f8f --- /dev/null +++ b/unison-src/tests/methodical/semis.u @@ -0,0 +1,13 @@ + +-- explicit semicolons allowed inside the block parser +-- no need for a space either before or after +x = 0; 1 +y = 0;1 +z = x;y +(**) = x ; y +a = (**);(**);z +p = 1;-- comments are okay after a semi + 1 -- and you can put a semi between lines, + -- even if a virtual semi would have been emitted + +> (x,y,z,(**),a,p) diff --git a/unison-src/tests/methodical/semis.ur b/unison-src/tests/methodical/semis.ur new file mode 100644 index 0000000000..4573fd6ebc --- /dev/null +++ b/unison-src/tests/methodical/semis.ur @@ -0,0 +1 @@ +(1,1,1,1,1,1) diff --git a/unison-src/tests/methodical/trig.u b/unison-src/tests/methodical/trig.u new file mode 100644 index 0000000000..a7461ab36c --- /dev/null +++ b/unison-src/tests/methodical/trig.u @@ -0,0 +1,9 @@ +use Float acos asin atan atan2 cos sin tan + +> (cos 0.0, + sin 0.0, + tan 0.0, + acos (cos 0.0), + asin (sin 0.0), + atan (tan 0.0), + atan2 0.0 0.0) diff --git a/unison-src/tests/methodical/trig.ur b/unison-src/tests/methodical/trig.ur new file mode 100644 index 0000000000..10b3c403d4 --- /dev/null +++ b/unison-src/tests/methodical/trig.ur @@ -0,0 +1,7 @@ +(1.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0) \ No newline at end of file diff --git a/unison-src/tests/methodical/universals.u b/unison-src/tests/methodical/universals.u new file mode 100644 index 0000000000..29d5cb117e --- /dev/null +++ b/unison-src/tests/methodical/universals.u @@ -0,0 +1,20 @@ +use Universal == < > <= >= compare + +> ([1,2,3] `compare` [1,2,3], + [1,2,3] `compare` [1], + [1] `compare` [1,2,3], + ?a `compare` ?b, + ("hi", "there") == ("hi", "there"), + 1 < 1, + 1 < 2, + 2 < 1, + 1 <= 2, + 1 <= 1, + 2 <= 1, + 0 > 1, + 1 > 0, + 0 > 0, + 0 >= 0, + 1 >= 0, + 0 >= 1) + diff --git a/unison-src/tests/methodical/universals.ur b/unison-src/tests/methodical/universals.ur new file mode 100644 index 0000000000..6b7d581ce3 --- /dev/null +++ b/unison-src/tests/methodical/universals.ur @@ -0,0 +1,17 @@ +(+0, + +1, + -1, + -1, + true, + false, + true, + false, + true, + true, + false, + false, + true, + false, + true, + true, + false) diff --git a/unison-src/tests/methodical/wildcardimports.u b/unison-src/tests/methodical/wildcardimports.u new file mode 100644 index 0000000000..4882ed8e79 --- /dev/null +++ b/unison-src/tests/methodical/wildcardimports.u @@ -0,0 +1,6 @@ + +use Text + +-- note: this `drop` call would be ambiguous normally (`Bytes.drop`, `Nat.drop`...) +-- but the wildcard import of `Text` brings it into scope +foo x = drop 10 x diff --git a/unison-src/tests/multiple-effects.u b/unison-src/tests/multiple-effects.u new file mode 100644 index 0000000000..e01edb87b6 --- /dev/null +++ b/unison-src/tests/multiple-effects.u @@ -0,0 +1,16 @@ +ability State s where + get : {State s} s + set : s -> {State s} () + +ability Console where + read : {Console} (Optional Text) + write : Text -> {Console} () + +Console.state : s -> Request (State s) a -> a +Console.state s = cases + {State.get -> k} -> handle k s with Console.state s + {State.set s' -> k} -> handle k () with Console.state s' + {a} -> a + +multiHandler : s -> [w] -> Nat -> Request {State s, Console} a -> () +multiHandler _ _ _ _ = () diff --git a/unison-src/tests/one-liners.uu b/unison-src/tests/one-liners.uu new file mode 100644 index 0000000000..e8ee998e18 --- /dev/null +++ b/unison-src/tests/one-liners.uu @@ -0,0 +1,2 @@ +(if true then 1 else 2) : Nat +(if true then (x -> x) else (x -> x) : forall a . a -> a) diff --git a/unison-src/tests/parenthesized-blocks.u b/unison-src/tests/parenthesized-blocks.u new file mode 100644 index 0000000000..5824dbbec0 --- /dev/null +++ b/unison-src/tests/parenthesized-blocks.u @@ -0,0 +1,5 @@ + +x = (if true then 1 else 0) + 1 +y = (match 1 with 1 -> 1) + 1 + +> (x, y) diff --git a/unison-src/tests/parenthesized-blocks.ur b/unison-src/tests/parenthesized-blocks.ur new file mode 100644 index 0000000000..529bf66567 --- /dev/null +++ b/unison-src/tests/parenthesized-blocks.ur @@ -0,0 +1 @@ +(2, 2) diff --git a/unison-src/tests/pattern-match-seq.u b/unison-src/tests/pattern-match-seq.u new file mode 100644 index 0000000000..58c0a04840 --- /dev/null +++ b/unison-src/tests/pattern-match-seq.u @@ -0,0 +1,86 @@ +use Optional None Some + +optionToList : Optional a -> [a] +optionToList = cases + Some a -> [a] + None -> [] + +lenLit : [a] -> Nat +lenLit = cases + [] -> 0 + [_] -> 1 + [_, _] -> 2 + [_, _, _] -> 3 + +lenCons : [a] -> Nat +lenCons = cases + [] -> 0 + _ +: t -> 1 + lenCons t + _ +: (_ +: t) -> 2 + lenCons t + +lenSnoc : [a] -> Nat +lenSnoc = cases + [] -> 0 + t :+ _ -> 1 + lenSnoc t + +lenConcat1 : [a] -> Nat +lenConcat1 = cases + [] -> 0 + [_] ++ tail -> 1 + lenConcat1 tail + +lenConcat2 : [a] -> Nat +lenConcat2 = cases + [] -> 0 + prefix ++ [_] -> 1 + lenConcat2 prefix + +head : [a] -> Optional a +head = cases + h +: _ -> Some h + _ -> None + +firstTwo : [a] -> Optional (a, a) +firstTwo = cases + x +: (y +: _) -> Some (x, y) + _ -> None + +lastTwo : [a] -> Optional (a, a) +lastTwo = cases + _ :+ x :+ y -> Some (x, y) + _ -> None + +middle : [a] -> Optional [a] +middle = cases + [_] ++ m ++ [_] -> Some m + _ -> None + +middleNel : [a] -> Optional (a, [a]) +middleNel = cases + [_] ++ (h +: t) ++ [_] -> Some (h, t) + _ -> None + +splitAtFour : [a] -> ([a], [a]) +splitAtFour l = match l with + [a] ++ x@(b +: (c +: y@([] :+ d))) ++ tail -> ([a, b, c, d], tail) + _ -> (l, []) + +> ( + lenLit [1, 2, 3], + lenCons [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + lenSnoc [1, 2, 3, 4, 5, 6, 7, 8], + lenConcat1 [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + lenConcat2 [1, 2, 3, 4, 5], + optionToList (head []), + optionToList (head [1, 2, 3]), + optionToList (firstTwo []), + optionToList (firstTwo [1]), + optionToList (firstTwo [1, 2, 3]), + optionToList (lastTwo []), + optionToList (lastTwo [1]), + optionToList (lastTwo [1, 2, 3]), + optionToList (middle []), + optionToList (middle [1, 2]), + optionToList (middle [1, 2, 3, 4, 5, 6]), + optionToList (middleNel []), + optionToList (middleNel [1, 2]), + optionToList (middleNel [1, 2, 3, 4, 5, 6]), + splitAtFour [1, 2, 3, 4, 5, 6, 7]) diff --git a/unison-src/tests/pattern-match-seq.ur b/unison-src/tests/pattern-match-seq.ur new file mode 100644 index 0000000000..323ca8182f --- /dev/null +++ b/unison-src/tests/pattern-match-seq.ur @@ -0,0 +1,20 @@ +( 3, + 15, + 8, + 11, + 5, + [], + [1], + [], + [], + [(1, 2)], + [], + [], + [(2, 3)], + [], + [[]], + [[2, 3, 4, 5]], + [], + [], + [(2, [3, 4, 5])], + ([1, 2, 3, 4], [5, 6, 7]) ) diff --git a/unison-src/tests/pattern-matching.u b/unison-src/tests/pattern-matching.u new file mode 100644 index 0000000000..a1403ac474 --- /dev/null +++ b/unison-src/tests/pattern-matching.u @@ -0,0 +1,36 @@ +type Foo0 = Foo0 +type Foo1 a = Foo1 a +type Foo2 a b = Foo2 a b +type Foo3 a b c = Foo3 a b c +type List a = Nil | Cons a (List a) + +use Foo0 Foo0 +use Foo1 Foo1 +use Foo2 Foo2 + +x = match Foo0 with + Foo0 -> 1 + +y = match Foo1 1 with + Foo1 1 -> 0 + Foo1 _ -> 10 + +z = match Foo2 1 "hi" with + Foo2 x _ -> x + Foo2 1 _ -> 1 + +w = match Foo3.Foo3 1 2 "bye" with + Foo3.Foo3 1 2 x -> x Text.++ "bye" + _ -> "" + +w2 = cases + Foo3.Foo3 1 4 x -> x Text.++ "bye" + Foo3.Foo3 x y z -> z Text.++ z + _ -> "hi" + +len : List a -> Nat +len = cases + List.Nil -> 0 + List.Cons _ t -> len t + 1 + +> (w, w2, len) diff --git a/unison-src/tests/pattern-matching2.u b/unison-src/tests/pattern-matching2.u new file mode 100644 index 0000000000..7bd1bf069b --- /dev/null +++ b/unison-src/tests/pattern-matching2.u @@ -0,0 +1,21 @@ +type Foo0 = Foo0 +type Foo1 a = Foo1 a +type Foo2 a b = Foo2 a b +type Foo3 a b c = Foo3 a b c + +use Foo0 Foo0 +use Foo1 Foo1 +use Foo2 Foo2 + +x = match Foo0 with + Foo0 -> 1 + +y = match Foo1 1 with + Foo1 1 -> 0 + Foo1 _ -> 10 + +z = match Foo2 1 "hi" with + Foo2 x "bye" -> x + Foo2 1 "hi" -> 1 + +> z diff --git a/unison-src/tests/pattern-typing-bug.u b/unison-src/tests/pattern-typing-bug.u new file mode 100644 index 0000000000..5ac1d44814 --- /dev/null +++ b/unison-src/tests/pattern-typing-bug.u @@ -0,0 +1,9 @@ +type Value = String Text + | Bool Boolean + +f : Value -> Nat +f = cases + Value.Bool true -> 3 + _ -> 4 + +> f (Value.String "foo") diff --git a/unison-src/tests/pattern-typing-bug.ur b/unison-src/tests/pattern-typing-bug.ur new file mode 100644 index 0000000000..b8626c4cff --- /dev/null +++ b/unison-src/tests/pattern-typing-bug.ur @@ -0,0 +1 @@ +4 diff --git a/unison-src/tests/pattern-weirdness.u b/unison-src/tests/pattern-weirdness.u new file mode 100644 index 0000000000..e139078013 --- /dev/null +++ b/unison-src/tests/pattern-weirdness.u @@ -0,0 +1,16 @@ +go = + a = 1 + match "" with + pos | false -> a + _ -> a + +> go + +-- 7 | > go +-- ⧩ +-- "" + +-- should be 1, not "" + +-- seems to have something to do with the wildcard + guard, as changing or +-- or deleting that line makes the problem go away diff --git a/unison-src/tests/pattern-weirdness.ur b/unison-src/tests/pattern-weirdness.ur new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/unison-src/tests/pattern-weirdness.ur @@ -0,0 +1 @@ +1 diff --git a/unison-src/tests/quote-parse-bug.uu b/unison-src/tests/quote-parse-bug.uu new file mode 100644 index 0000000000..5b7935fa38 --- /dev/null +++ b/unison-src/tests/quote-parse-bug.uu @@ -0,0 +1,6 @@ + +blah : a -> a -> a +blah a a2 = a2 + +> blah '"hi" '"arya" + diff --git a/unison-src/tests/r0.u b/unison-src/tests/r0.u new file mode 100644 index 0000000000..c878aa490a --- /dev/null +++ b/unison-src/tests/r0.u @@ -0,0 +1,5 @@ +r0 : Nat +r0 = match Optional.Some 3 with + x -> 1 + +> r0 diff --git a/unison-src/tests/r1.u b/unison-src/tests/r1.u new file mode 100644 index 0000000000..855e2d2bf1 --- /dev/null +++ b/unison-src/tests/r1.u @@ -0,0 +1,6 @@ +--r1 +type Optional a = None | Some a +r1 : Nat +r1 = match Optional.Some 3 with + x -> 1 + diff --git a/unison-src/tests/r10.u b/unison-src/tests/r10.u new file mode 100644 index 0000000000..6cc2d5a6ce --- /dev/null +++ b/unison-src/tests/r10.u @@ -0,0 +1,5 @@ +r10 : Nat +r10 = match 1 with + 1 | true -> 3 + _ -> 4 + diff --git a/unison-src/tests/r11.u b/unison-src/tests/r11.u new file mode 100644 index 0000000000..0e94ea072c --- /dev/null +++ b/unison-src/tests/r11.u @@ -0,0 +1,7 @@ +use Universal == + +r11 : Nat +r11 = match 1 with + 1 | 2 == 3 -> 4 + _ -> 5 + diff --git a/unison-src/tests/r12.u b/unison-src/tests/r12.u new file mode 100644 index 0000000000..6c3a0de852 --- /dev/null +++ b/unison-src/tests/r12.u @@ -0,0 +1,4 @@ +r12 : Nat +r12 = (x -> x) 64 + + diff --git a/unison-src/tests/r13.u b/unison-src/tests/r13.u new file mode 100644 index 0000000000..f44048ea00 --- /dev/null +++ b/unison-src/tests/r13.u @@ -0,0 +1,5 @@ +r13 : (Nat, Text) +r13 = + id = ((x -> x): forall a. a -> a) + (id 10, id "foo") + diff --git a/unison-src/tests/r14.u b/unison-src/tests/r14.u new file mode 100644 index 0000000000..3922c6613a --- /dev/null +++ b/unison-src/tests/r14.u @@ -0,0 +1,4 @@ +r14 : (forall a . a -> a) -> (Nat, Text) +r14 id = (id 10, id "foo") + + diff --git a/unison-src/tests/r2.u b/unison-src/tests/r2.u new file mode 100644 index 0000000000..a3b925bc1e --- /dev/null +++ b/unison-src/tests/r2.u @@ -0,0 +1,6 @@ +type Optional a = None | Some a +r2 : Nat +r2 = match Optional.Some true with + Optional.Some true -> 1 + Optional.Some false -> 0 + diff --git a/unison-src/tests/r3.u b/unison-src/tests/r3.u new file mode 100644 index 0000000000..74b76105f8 --- /dev/null +++ b/unison-src/tests/r3.u @@ -0,0 +1,6 @@ +r3 : Nat +r3 = match Optional.Some true with + Optional.Some true -> 1 + Optional.Some false -> 0 + + diff --git a/unison-src/tests/r4negate.u b/unison-src/tests/r4negate.u new file mode 100644 index 0000000000..ea19a3b4bb --- /dev/null +++ b/unison-src/tests/r4negate.u @@ -0,0 +1,5 @@ +r4 : Int -> Int +r4 x = match x with + +1 -> -1 + _ -> Int.negate x + diff --git a/unison-src/tests/r4x.u b/unison-src/tests/r4x.u new file mode 100644 index 0000000000..1e7123f6ec --- /dev/null +++ b/unison-src/tests/r4x.u @@ -0,0 +1,3 @@ +r4 : Int -> Int +r4 = cases + +1 -> +1 diff --git a/unison-src/tests/r5.u b/unison-src/tests/r5.u new file mode 100644 index 0000000000..249bf9e034 --- /dev/null +++ b/unison-src/tests/r5.u @@ -0,0 +1,6 @@ +r5 : Float +r5 = match 2.2 with + 2.2 -> 3.0 + _ -> 1.0 + + diff --git a/unison-src/tests/r6.u b/unison-src/tests/r6.u new file mode 100644 index 0000000000..34a3ab4224 --- /dev/null +++ b/unison-src/tests/r6.u @@ -0,0 +1,4 @@ +r6 : () +r6 = match () with + () -> () + diff --git a/unison-src/tests/r7.0.u b/unison-src/tests/r7.0.u new file mode 100644 index 0000000000..fe8fbbdb34 --- /dev/null +++ b/unison-src/tests/r7.0.u @@ -0,0 +1,6 @@ +r7 : Nat +r7 = match () with + () -> 1 + +> r7 + diff --git a/unison-src/tests/r7.1.u b/unison-src/tests/r7.1.u new file mode 100644 index 0000000000..5f1eab0958 --- /dev/null +++ b/unison-src/tests/r7.1.u @@ -0,0 +1,5 @@ +r7 : Nat +r7 = match () with + x@() -> 1 + + diff --git a/unison-src/tests/r7.2.u b/unison-src/tests/r7.2.u new file mode 100644 index 0000000000..5ca723ffc1 --- /dev/null +++ b/unison-src/tests/r7.2.u @@ -0,0 +1,4 @@ +r7 : () +r7 = match () with + x@() -> x + diff --git a/unison-src/tests/r8.u b/unison-src/tests/r8.u new file mode 100644 index 0000000000..f745983558 --- /dev/null +++ b/unison-src/tests/r8.u @@ -0,0 +1,5 @@ +r8 = match (1,(2,(3,(4,(5,(6,(7,8))))))) with + (x,(y,(_,_))) -> 0 + +> r8 + diff --git a/unison-src/tests/r9.u b/unison-src/tests/r9.u new file mode 100644 index 0000000000..e274d714d1 --- /dev/null +++ b/unison-src/tests/r9.u @@ -0,0 +1,11 @@ +r9 : Nat +r9 = match 1 with + 9 -> 9 + 8 -> 8 + 7 -> 7 + 6 -> 6 + 5 -> 5 + _ -> 1 + +> r9 + diff --git a/unison-src/tests/rainbow.u b/unison-src/tests/rainbow.u new file mode 100644 index 0000000000..378118d1d5 --- /dev/null +++ b/unison-src/tests/rainbow.u @@ -0,0 +1,32 @@ +-- Hits all the syntactic elements listed in SyntaxHighlights.hs. +-- Use the 'view' command to see this in colour. + +rainbow : Int ->{Ask Int} Int +rainbow x = + use Int isEven + number = 3 + text = "hello" + float = 3.14 + bool = false + lam z = + use Nat * + + z + 1 * 2 + seq = [1, 2, 3] + delay : '(Int -> Boolean) + delay _ = isEven + force = !delay +2 + a = if isEven x then Either.Left 0 else Either.Right 0 + b = if isEven x then 1 else 0 + c = match x with _ -> 3 + d = (Ask.ask : Int) + +42 + +ability Ask a where + ask : {Ask a} a + +type Either a b = Left a | Right b + +unique ability Zang where + zang : {Zang} Nat + +> () diff --git a/unison-src/tests/records.u b/unison-src/tests/records.u new file mode 100644 index 0000000000..2528896a65 --- /dev/null +++ b/unison-src/tests/records.u @@ -0,0 +1,12 @@ + +type Point x y = { x : x, y : y } + +type Point2 = { point2 : Nat, f : Nat } + +type Monoid a = { zero : a, plus : a -> a -> a } + +> Point.x.set 10 (Point 0 0) +> Point.x (Point 10 0) +> Point.y (Point 0 10) +> Point.x.modify ((+) 1) (Point 0 0) +> Point.y.modify ((+) 1) (Point 0 0) diff --git a/unison-src/tests/runtime-crash.uu b/unison-src/tests/runtime-crash.uu new file mode 100644 index 0000000000..dc5ba83a32 --- /dev/null +++ b/unison-src/tests/runtime-crash.uu @@ -0,0 +1,13 @@ + +drop1 = Text.drop 1 + +> drop1 "heyo" + +--- + +gives a runtime error - + +unison: user error (type error, expecting N, got "heyo") + +which indicates it is going to the runtime stack to get that `1` value, +rather than pulling it from the arguments diff --git a/unison-src/tests/sequence-at-0.u b/unison-src/tests/sequence-at-0.u new file mode 100644 index 0000000000..37f6429351 --- /dev/null +++ b/unison-src/tests/sequence-at-0.u @@ -0,0 +1,2 @@ +> match at 0 [100] with + Optional.Some _ -> "Hooray!" diff --git a/unison-src/tests/sequence-literal-argument-parsing.u b/unison-src/tests/sequence-literal-argument-parsing.u new file mode 100644 index 0000000000..8005a67566 --- /dev/null +++ b/unison-src/tests/sequence-literal-argument-parsing.u @@ -0,0 +1,5 @@ +type X a = X [a] + +f : X a -> a +f = cases + X.X [b] -> b diff --git a/unison-src/tests/sequence-literal.u b/unison-src/tests/sequence-literal.u new file mode 100644 index 0000000000..f2d43795cd --- /dev/null +++ b/unison-src/tests/sequence-literal.u @@ -0,0 +1,19 @@ +a = [1,2,3] +b = [1 ,2 ,3 + ] +c = [ 1 , 2 , 3 ] +d = [ 1 + , 2 + , 3 ] +e = [ 1 + , 2 + , 3 + ] +f = + [ 1 + , 2 + , 3 + ] +g = [ 1 + , 2, + 3 ] diff --git a/unison-src/tests/soe.u b/unison-src/tests/soe.u new file mode 100644 index 0000000000..0acf142239 --- /dev/null +++ b/unison-src/tests/soe.u @@ -0,0 +1,124 @@ + +use Universal == < + +type Future a = Future ('{Remote} a) + +-- A simple distributed computation ability +ability Remote where + + -- Spawn a new node + spawn : {Remote} Node + + -- Sequentially evaluate the given thunk on another node + -- then return to the current node when it completes + at : n -> '{Remote} a -> {Remote} a + + -- Start a computation running, returning an `r` that can be forced to + -- await the result of the computation + fork : '{Remote} a ->{Remote} Future a + +type Node = Node Nat -- more realistic would be perhaps a (Hostname, PublicKey) pair + +force : Future a ->{Remote} a +force = cases Future.Future r -> !r + +-- Let's test out this beast! do we need to deploy our code to some EC2 instances?? +-- Gak, no not yet, we just want to test locally, let's write a handler +-- for the `Remote` ability that simulates everything locally! + +Remote.runLocal : '{Remote} a -> a +Remote.runLocal r = + use Future Future + step nid = cases + {a} -> a + {Remote.fork t -> k} -> handle k (Future t) with step nid + {Remote.spawn -> k} -> handle k nid with step (Node.increment nid) + {Remote.at _ t -> k} -> handle k !t with step nid + handle !r with step (Node.Node 0) + +Remote.forkAt : Node -> '{Remote} a ->{Remote} (Future a) +Remote.forkAt node r = Remote.fork '(Remote.at node r) + +use Optional None Some +use Monoid Monoid +use List ++ + +List.map : (a ->{e} b) -> [a] ->{e} [b] +List.map f as = + go f acc as i = match List.at i as with + None -> acc + Some a -> go f (acc `snoc` f a) as (i + 1) + go f [] as 0 + +type Monoid a = Monoid (a -> a -> a) a + +Monoid.zero = cases Monoid.Monoid op z -> z +Monoid.op = cases Monoid.Monoid op z -> op + +Monoid.orElse m = cases + None -> Monoid.zero m + Some a -> a + +merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] +merge lte a b = + go acc a b = match List.at 0 a with + None -> acc ++ b + Some hd1 -> match at 0 b with + None -> acc ++ a + Some hd2 -> + if hd1 `lte` hd2 then go (acc `snoc` hd1) (drop 1 a) b + else go (acc `snoc` hd2) a (drop 1 b) + go [] a b + +dmap : (a ->{Remote} b) -> [a] ->{Remote} [b] +dmap f as = + bs = List.map (a -> Remote.forkAt Remote.spawn '(f a)) as + List.map force bs + +dreduce : Monoid a -> [a] ->{Remote} a +dreduce m a = + if size a < 2 then Monoid.orElse m (List.at 0 a) + else + l = Remote.forkAt Remote.spawn '(dreduce m (take (size a / 2) a)) + r = Remote.forkAt Remote.spawn '(dreduce m (drop (size a / 2) a)) + Monoid.op m (force l) (force r) + +dmapReduce : (a ->{Remote} b) -> Monoid b -> [a] ->{Remote} b +dmapReduce f m as = dreduce m (List.map f as) + +dsort : (a -> a -> Boolean) -> [a] ->{Remote} [a] +dsort lte a = + dmapReduce (a -> [a]) (Monoid (merge lte) []) a + +sort : (a -> a -> Boolean) -> [a] -> [a] +sort lte a = + if List.size a < 2 then a + else + l = sort lte (take (size a / 2) a) + r = sort lte (drop (size a / 2) a) + merge lte l r + +Node.increment : Node -> Node +Node.increment n = + use Node Node -- the constructor + match n with Node n -> Node (n + 1) + +> Remote.runLocal '(dsort (<) [3,2,1,1,2,3,9182,1,2,34,1,23]) + +halve : [a] -> Optional ([a], [a]) +halve a = + if size a == 0 then None + else Some (take (size a / 2) a, drop (size a / 2) a) + +foldMap : (a -> b) -> Monoid b -> [a] -> b +foldMap f m a = + base a = match List.at 0 a with + None -> zero m + Some a -> f a + if size a < 2 then base a + else match halve a with + None -> zero m + Some (l, r) -> op m (foldMap f m l) (foldMap f m r) + +> foldMap (x -> x) (Monoid (+) 0) [1] +> Remote.runLocal '(dmap (x -> x + 1) [1,2,3,4]) diff --git a/unison-src/tests/soe2.u b/unison-src/tests/soe2.u new file mode 100644 index 0000000000..86001e78f0 --- /dev/null +++ b/unison-src/tests/soe2.u @@ -0,0 +1,47 @@ +use Universal == < +use Optional None Some + +uncons : [a] -> Optional (a, [a]) +uncons a = match at 0 a with + None -> None + Some hd -> Some (hd, drop 1 a) + +merge : (a -> a -> Boolean) -> [a] -> [a] -> [a] +merge lte a b = + go acc a b = match (uncons a, uncons b) with + (None, _) -> acc ++ b + (_, None) -> acc ++ a + (Some (h1,t1), Some (h2,t2)) -> + if h1 `lte` h2 then go (acc `snoc` h1) (drop 1 a) b + else go (acc `snoc` h2) a (drop 1 b) + go [] a b + +-- let's make sure it works +> merge (<) [1,3,4,99,504,799] [0,19,22,23] + +isEmpty : [a] -> Boolean +isEmpty a = size a == 0 + +halve : [a] -> Optional ([a], [a]) +halve as = + if isEmpty as then None + else Some (take (size as / 2) as, drop (size as / 2) as) + +sort : (a -> a -> Boolean) -> [a] -> [a] +sort lte as = if size as < 2 then as else match halve as with + None -> as + Some (left, right) -> + l = sort lte left + r = sort lte right + merge lte l r + +-- let's make sure it works + +> sort (<) [3,2,1,1,2,3,9182,1,2,34,1,23] + +-- > sort (<) ["Dave", "Carol", "Eve", "Alice", "Bob", "Francis", "Hal", "Illy", "Joanna", "Greg", "Karen"] + +-- > sort (<) [3,2,1,1,2,3,9182,1,2,34,1,"oops"] + +-- > merge (<) [1,4,5,90,102] ["a", "b"] + diff --git a/unison-src/tests/spurious-ability-fail-underapply.u b/unison-src/tests/spurious-ability-fail-underapply.u new file mode 100644 index 0000000000..6d3c1fe79f --- /dev/null +++ b/unison-src/tests/spurious-ability-fail-underapply.u @@ -0,0 +1,8 @@ +ability Woot where + woot : {Woot} Nat + +wha : ((a ->{Woot} a) -> a ->{Woot} a) -> Nat +wha f = + blah a = f' a + f' = f blah + 42 diff --git a/unison-src/tests/spurious-ability-fail.u b/unison-src/tests/spurious-ability-fail.u new file mode 100644 index 0000000000..4bee905a42 --- /dev/null +++ b/unison-src/tests/spurious-ability-fail.u @@ -0,0 +1,16 @@ +--The expression in red needs the {𝛆} ability, but this location only has access to the {𝛆} ability, +-- +-- 8 | odd n = if n == 1 then true else even2 (n `drop` 1) + +use Universal == + +even : Nat -> Boolean +even n = if n == 0 then true else odd (n `drop` 1) + +odd : Nat -> Boolean +odd n = if n == 1 then true else even2 (n `drop` 1) + +even2 = even + +increment : Nat -> Nat -- signature is optional +increment n = n + 1 diff --git a/unison-src/tests/state1.u b/unison-src/tests/state1.u new file mode 100644 index 0000000000..61b0e2cb98 --- /dev/null +++ b/unison-src/tests/state1.u @@ -0,0 +1,15 @@ +--State1 ability +ability State se2 where + put : ∀ se . se -> {State se} () + get : ∀ se . () -> {State se} se + +-- state : ∀ s a . s -> Request (State s) a -> (s, a) +state woot = cases + { State.put snew -> k } -> handle k () with state snew + { State.get () -> k } -> handle k woot with state woot + { a } -> (woot, a) + +blah : ∀ s a . s -> Request (State s) a -> (s, a) +blah = state + +> () diff --git a/unison-src/tests/state1a.u b/unison-src/tests/state1a.u new file mode 100644 index 0000000000..471170b869 --- /dev/null +++ b/unison-src/tests/state1a.u @@ -0,0 +1,11 @@ +--State1a ability +ability State se2 where + put : ∀ se . se -> {State se} () + get : ∀ se . {State se} se +id : Int -> Int +id i = i +foo : () -> {State Int} Int +foo unit = id (State.get Int.+ State.get) + +> () + diff --git a/unison-src/tests/state2.u b/unison-src/tests/state2.u new file mode 100644 index 0000000000..62337b1074 --- /dev/null +++ b/unison-src/tests/state2.u @@ -0,0 +1,11 @@ +--State2 ability +ability State se2 where + put : ∀ se . se -> {State se} () + get : ∀ se . () -> {State se} se +state : ∀ s a . s -> Request (State s) a -> (s, a) +state woot = cases + { State.get () -> k } -> handle k woot with state woot + { State.put snew -> k } -> handle k () with state snew + { a } -> (woot, a) + +> () diff --git a/unison-src/tests/state2a-min.u b/unison-src/tests/state2a-min.u new file mode 100644 index 0000000000..63a632a703 --- /dev/null +++ b/unison-src/tests/state2a-min.u @@ -0,0 +1,17 @@ +--State2 ability +ability State s where + put : s -> {State s} () + +state : s -> Request (State s) a -> a +state s = cases + { State.put snew -> k } -> handle k () with state snew + { a } -> a + +ex : Text +ex = handle + State.put (11 + 1) + State.put (5 + 5) + "hello" + with state 10 + +> ex diff --git a/unison-src/tests/state2a-min.ur b/unison-src/tests/state2a-min.ur new file mode 100644 index 0000000000..3580093b9d --- /dev/null +++ b/unison-src/tests/state2a-min.ur @@ -0,0 +1 @@ +"hello" diff --git a/unison-src/tests/state2a.u b/unison-src/tests/state2a.u new file mode 100644 index 0000000000..c2dcc58a00 --- /dev/null +++ b/unison-src/tests/state2a.u @@ -0,0 +1,50 @@ +--State2 ability + +type Optional a = None | Some a + +ability State s where + put : s -> {State s} () + get : {State s} s + +state : s -> Request (State s) a -> (s, a) +state s = cases + { State.get -> k } -> handle k s with state s + { State.put snew -> k } -> handle k () with state snew + { a } -> (s, a) + +modify : (s ->{} s) ->{State s} () +modify f = + s = State.get + s2 = f s + State.put s2 + +increment : '{State Nat} () +increment = '(modify ((+) 1)) + +second : (a, b) -> b +second = cases (_,b) -> b + +first : (a, b) -> a +first = cases (a,_) -> a + +ex : Text +ex = + result : (Nat, Text) + result = handle + State.put (11 + 1) + x = State.get + State.put (5 + 5) + "hello" + with state 10 + + second result + +> ex + +modify2 : (s -> s) ->{State s} () +modify2 f = + s = State.get + s2 = f s + State.put s2 + +--- diff --git a/unison-src/tests/state2a.uu b/unison-src/tests/state2a.uu new file mode 100644 index 0000000000..82a2306eb9 --- /dev/null +++ b/unison-src/tests/state2a.uu @@ -0,0 +1,30 @@ +--State2 ability + +type Optional a = None | Some a + +ability State s where + put : s -> {State s} () + get : {State s} s + +state : s -> Request (State s) a -> (s, a) +state s = cases + { State.get -> k } -> handle k s with state s + { State.put snew -> k } -> handle k () with state snew + { a } -> (s, a) + +modify3 : (s -> s) -> () +modify3 f = + s = State.get + s2 = f s + State.put s2 + +--- + +limitation here is that inferred ability vars can't refer to universal vars in +the same type signature + +the inferred abilities are existentials, which are allocated up front, so +they can't bind to the universals nor does that really make sense + +would need some nondeterminism or multiple phases in the typechecking process to +do better diff --git a/unison-src/tests/state2b-min.u b/unison-src/tests/state2b-min.u new file mode 100644 index 0000000000..257ca9e3e3 --- /dev/null +++ b/unison-src/tests/state2b-min.u @@ -0,0 +1,15 @@ +--State2 ability +ability State s where + put : s -> {State s} () + +state : s -> Request (State s) a -> s +state s = cases + { State.put snew -> k } -> handle k () with state snew + { a } -> s + +> handle + State.put (11 + 1) + State.put (5 + 15) + () + with state 10 + -- should be 20 diff --git a/unison-src/tests/state2b-min.ur b/unison-src/tests/state2b-min.ur new file mode 100644 index 0000000000..209e3ef4b6 --- /dev/null +++ b/unison-src/tests/state2b-min.ur @@ -0,0 +1 @@ +20 diff --git a/unison-src/tests/state2b.u b/unison-src/tests/state2b.u new file mode 100644 index 0000000000..b036ed0283 --- /dev/null +++ b/unison-src/tests/state2b.u @@ -0,0 +1,39 @@ +--State2 ability + +type Optional a = None | Some a + +ability State s where + put : s -> {State s} () + get : {State s} s + +state : s -> Request (State s) a -> (s, a) +state s = cases + { State.get -> k } -> handle k s with state s + { State.put snew -> k } -> handle k () with state snew + { a } -> (s, a) + +modify : (s ->{} s) -> {State s} () +modify f = State.put (f State.get) + +increment : '{State Nat} () +increment = '(modify ((+) 1)) + +second : (a, b) -> b +second = cases (_,b) -> b + +first : (a, b) -> a +first = cases (a,_) -> a + +ex : Nat +ex = + result = handle + State.put (11 + 1) + State.put (5 + 15) + () + with state 10 + + first result + +-- should return `20`, but actually returns `12` +-- seems like only one `put` is actually being run +> ex diff --git a/unison-src/tests/state3.u b/unison-src/tests/state3.u new file mode 100644 index 0000000000..cc15016819 --- /dev/null +++ b/unison-src/tests/state3.u @@ -0,0 +1,30 @@ +--State3 ability +ability State se2 where + put : ∀ se . se -> {State se} () + get : ∀ se . () -> {State se} se + +state : ∀ s a . s -> Request (State s) a -> (s, a) +state woot = cases + { State.get () -> k } -> handle k woot with state woot + { State.put snew -> k } -> handle k () with state snew + { a } -> (woot, a) + +ex1 : (Nat, Nat) +ex1 = handle State.get () with state 42 + +ex1a : (Nat, Nat) +ex1a = handle 49 with state 42 + +ex1b = handle 0 with x -> 10 + +ex1c : Nat +ex1c = handle 0 with x -> 10 + +ex1d = handle 49 with state 42 + +ex2 = handle State.get () with state 42 + +ex3 : (Nat, Nat) +ex3 = ex2 + +> ex3 diff --git a/unison-src/tests/state4.u b/unison-src/tests/state4.u new file mode 100644 index 0000000000..3db4bd9c40 --- /dev/null +++ b/unison-src/tests/state4.u @@ -0,0 +1,26 @@ +ability State s where + put : s -> {State s} () + get : {State s} s + +state : s -> Request (State s) a -> s +state s = cases + { State.get -> k } -> handle k s with state s + { State.put snew -> k } -> handle k () with state snew + { a } -> s + +modify : (s ->{} s) -> {State s} () +modify f = State.put (f State.get) + +increment : '{State Nat} () +increment = '(modify ((+) 1)) + +ex : Nat +ex = handle + State.put (11 + 1) + !increment + !increment + !increment + State.get -- should be 15, amirite?? + with state 10 + +> ex diff --git a/unison-src/tests/state4.ur b/unison-src/tests/state4.ur new file mode 100644 index 0000000000..60d3b2f4a4 --- /dev/null +++ b/unison-src/tests/state4.ur @@ -0,0 +1 @@ +15 diff --git a/unison-src/tests/state4a.u b/unison-src/tests/state4a.u new file mode 100644 index 0000000000..04544e9451 --- /dev/null +++ b/unison-src/tests/state4a.u @@ -0,0 +1,26 @@ +ability State s where + put : s -> {State s} () + get : {State s} s + +state : s -> Request (State s) a -> s +state s = cases + { State.get -> k } -> handle k s with state s + { State.put snew -> k } -> handle k () with state snew + { a } -> s + +modify : (s ->{} s) -> {State s} () +modify f = State.put (f State.get) + +increment : '{State Nat} () +increment = '(modify ((+) 1)) + +ex : Nat +ex = handle + State.put (11 + 1) + -- !increment + -- !increment + -- !increment + State.get -- should be 15, amirite?? + with state 10 + +> ex diff --git a/unison-src/tests/state4a.ur b/unison-src/tests/state4a.ur new file mode 100644 index 0000000000..48082f72f0 --- /dev/null +++ b/unison-src/tests/state4a.ur @@ -0,0 +1 @@ +12 diff --git a/unison-src/tests/stream.u b/unison-src/tests/stream.u new file mode 100644 index 0000000000..d9b093266c --- /dev/null +++ b/unison-src/tests/stream.u @@ -0,0 +1,70 @@ +ability Emit a where + emit : a ->{Emit a} () + +type Stream e a r = Stream ('{e, Emit a} r) + +use Stream Stream +use Optional None Some +use Universal == + +-- unfold : s -> (s ->{} Optional (a, s)) -> Stream e a () +unfold s f = + step s = match f s with + None -> () + Some (a, s) -> emit a + step s + Stream '(step s) + +run : Stream e a r ->{e, Emit a} r +run = cases Stream c -> !c + +run' = cases Stream s -> s + +(++) : Stream {e} a r -> Stream {e} a r -> Stream {e} a r +s1 ++ s2 = Stream '(forceBoth (run' s1) (run' s2)) + +from : Nat -> Stream e Nat () +from n = unfold n (n -> Some (n, n + 1)) + +-- take : Nat -> Stream {} a () -> Stream {} a () +take n s = + step n = cases + {Emit.emit a -> k} -> + if n Nat.== 0 then () + else + Emit.emit a + handle k () with step (n `drop` 1) + {r} -> () + Stream ' handle run s with step n + +-- map : (a -> b) -> Stream {e} a r -> Stream {e} b r +map f s = + step = cases + {r} -> r + {Emit.emit a -> k} -> + Emit.emit (f a) + handle k () with step + Stream ' handle run s with step + +-- toSeq : Stream {e} a r ->{e} [a] +toSeq s = + step acc = cases + {Emit.emit a -> k} -> handle k () with step (acc `snoc` a) + {_} -> acc + handle run s with step [] + +fromSeq : [a] -> Stream e a () +fromSeq a = + step a = match List.at 0 a with + None -> None + Some h -> Some (h, drop 1 a) + unfold a step + +> toSeq (take 7 (map (x -> x + 10) (from 0))) +-- > toSeq (Stream.fromSeq [1,2,3] ++ Stream.fromSeq [4,5,6]) +-- > toSeq (Stream.take 20 (from 0)) + +-- run two thunks in sequence +forceBoth a b = + !a + !b diff --git a/unison-src/tests/stream2.uu b/unison-src/tests/stream2.uu new file mode 100644 index 0000000000..fd2862d479 --- /dev/null +++ b/unison-src/tests/stream2.uu @@ -0,0 +1,81 @@ +ability Emit a where + emit : a ->{Emit a} () + +type Stream e a r = Stream ('{e, Emit a} r) + +use Stream Stream +use Optional None Some +use Universal == + +namespace Stream where + + step : + (a ->{e} b) -> + Request {Emit a} r ->{e, Emit b} r + step f = cases + {r} -> r + {Emit.emit a -> k} -> + Emit.emit (f a) + handle k () with step f + + -- map : (a -> b) -> Stream {e} a r -> Stream {e} b r + map : (a ->{e} b) + -> Stream {e} a r + -> Stream {e} b r + map f s = Stream ' handle run s with step f + + run : Stream e a r ->{e, Emit a} r + run = cases Stream c -> !c + + --- + -- run' = cases Stream s -> s + -- unfold : s -> (s ->{} Optional (a, s)) -> Stream e a () + unfold s f = + step = cases + None -> () + Some (a, s) -> emit a + step s + Stream '(step s) + + + (++) : Stream {e} a r -> Stream {e} a r -> Stream {e} a r + s1 ++ s2 = Stream '(run' s1 !! run' s2) + + from : Nat -> Stream e Nat () + from n = unfold n (n -> Some (n, n + 1)) + + -- take : Nat -> Stream {} a () -> Stream {} a () + take n s = + step n = cases + {Emit.emit a -> k} -> + if n Nat.== 0 then () + else + Emit.emit a + handle k () with step (n `drop` 1) + {r} -> () + Stream ' handle run s with step n + + +--- + -- toSeq : Stream {e} a r ->{e} [a] + toSeq s = + step acc = cases + {Emit.emit a -> k} -> handle k () with step (acc `snoc` a) + {_} -> acc + handle run s with step [] + + fromSeq : [a] -> Stream e a () + fromSeq a = + step a = match List.at 0 a with + None -> None + Some h -> Some (h, drop 1 a) + unfold a step + +> toSeq (Stream.take 7 (Stream.map (x -> x + 10) (from 0))) +-- > toSeq (Stream.fromSeq [1,2,3] ++ Stream.fromSeq [4,5,6]) +-- > toSeq (Stream.take 20 (from 0)) + +-- run two thunks in sequence +a !! b = + !a + !b diff --git a/unison-src/tests/stream3.uu b/unison-src/tests/stream3.uu new file mode 100644 index 0000000000..3e6a2d5e8d --- /dev/null +++ b/unison-src/tests/stream3.uu @@ -0,0 +1,71 @@ +ability Emit a where + emit : a ->{Emit a} () + +type Stream e a r = Stream ('{e, Emit a} r) + +use Stream Stream +use Optional None Some +use Universal == + +namespace Stream where + + step : + (a ->{e} b) -> + Request {Emit a} r ->{e, Emit b} r + step f = cases + {r} -> r + {Emit.emit a -> k} -> + Emit.emit (f a) + handle k () with step f + + + -- map : (a -> b) -> Stream {e} a r -> Stream {e} b r + -- map : (a ->{e} b) + -- -> Stream {e} a r + -- -> Stream {e} b r + -- 0. this gets a weird type + map f s = Stream ' handle run s with step f + + -- 1. inferred type of `map` required an `o -> o` for some reason + map1 f s = + step f = cases + {r} -> r + {Emit.emit a -> k} -> + Emit.emit (f a) + handle k () with step f + Stream ' handle run s with step f + + -- 2. gets the same weird type + map2 f s = + step : + (a ->{e} b) -> + Request {Emit a} r ->{e, Emit b} r + step f = cases + {r} -> r + {Emit.emit a -> k} -> + Emit.emit (f a) + handle k () with step f + Stream ' handle run s with step f + + run : Stream e a r ->{e, Emit a} r + run = cases Stream c -> !c + +ability Abort where + abort : {Abort} a + +--- +-- x : Stream {Abort} Nat () +x = Stream 'let + Emit.emit 1 + Abort.abort + Emit.emit 2 + +--- +I found a value of type Var User "a"-94 where I expected to find one of type b96: + + 24 | -> Stream {e} b r + 25 | map f s = Stream ' handle run s with step f + + from right here: + + 22 | map : (a ->{e} b) diff --git a/unison-src/tests/suffix-resolve.u b/unison-src/tests/suffix-resolve.u new file mode 100644 index 0000000000..1e3fc5563c --- /dev/null +++ b/unison-src/tests/suffix-resolve.u @@ -0,0 +1,23 @@ + +-- This file shows that any unique suffix can be used to refer +-- to a definition. + +-- no imports needed here, even though FQN is builtin.Int +foo : Int +foo = +1 + +-- no imports needed here, even though FQNs are builtin.Optional.{None,Some} +ex1 = cases + None -> 0 + Some a -> a + 1 + +-- you can still use the +ex2 = cases + Optional.None -> 99 + Optional.Some _ -> 0 + +ex3 = builtin.Optional.None + +-- TDNR would have handled this one before, but TDNR can't do +-- type resolution or pattern resolution +zoink = Some 42 diff --git a/unison-src/tests/tdnr.u b/unison-src/tests/tdnr.u new file mode 100644 index 0000000000..cf29ddf4ae --- /dev/null +++ b/unison-src/tests/tdnr.u @@ -0,0 +1,4 @@ +-- Should resolve + with Type-directeded name resolution + +x : Nat +x = 4 + 2 diff --git a/unison-src/tests/tdnr2.u b/unison-src/tests/tdnr2.u new file mode 100644 index 0000000000..ae1d855278 --- /dev/null +++ b/unison-src/tests/tdnr2.u @@ -0,0 +1,13 @@ +x : Nat +x = 42 + 2 + +y : Int +y = +42 + -2 + +z : Float +z = 42.0 - 2.0 + +foo a b = (a + b) + 3 + +bar a b = 3 + b + a + diff --git a/unison-src/tests/tdnr3.u b/unison-src/tests/tdnr3.u new file mode 100644 index 0000000000..b57cb6aad1 --- /dev/null +++ b/unison-src/tests/tdnr3.u @@ -0,0 +1,6 @@ +-- Local definitions should be resolved by type + +Foo.bar x = x + 1 + +z = bar 99 + diff --git a/unison-src/tests/tdnr4.u b/unison-src/tests/tdnr4.u new file mode 100644 index 0000000000..e07bdfdd43 --- /dev/null +++ b/unison-src/tests/tdnr4.u @@ -0,0 +1,4 @@ +x = None + +y = Some 10 + diff --git a/unison-src/tests/text-escaping.u b/unison-src/tests/text-escaping.u new file mode 100644 index 0000000000..d1dfa01b1b --- /dev/null +++ b/unison-src/tests/text-escaping.u @@ -0,0 +1,10 @@ +id x = x + +x = id ("\n") + +find : Text -> Text +find s = match (Text.take 1 s) with + "\n" -> "found" + _ -> "not found" + +> (x, find "\nbar") diff --git a/unison-src/tests/text-escaping.ur b/unison-src/tests/text-escaping.ur new file mode 100644 index 0000000000..7b7648190e --- /dev/null +++ b/unison-src/tests/text-escaping.ur @@ -0,0 +1 @@ +("\n", "found") diff --git a/unison-src/tests/text-pattern.u b/unison-src/tests/text-pattern.u new file mode 100644 index 0000000000..c906396156 --- /dev/null +++ b/unison-src/tests/text-pattern.u @@ -0,0 +1,6 @@ +foo = cases + "xyz" -> false + "abc" -> true + _ -> false + +> (foo "abc", foo "xyz", foo "hello, world") diff --git a/unison-src/tests/text-pattern.ur b/unison-src/tests/text-pattern.ur new file mode 100644 index 0000000000..8733091c2a --- /dev/null +++ b/unison-src/tests/text-pattern.ur @@ -0,0 +1 @@ +(true, false, false) diff --git a/unison-src/tests/tictactoe.u b/unison-src/tests/tictactoe.u new file mode 100644 index 0000000000..e3dde4d4ba --- /dev/null +++ b/unison-src/tests/tictactoe.u @@ -0,0 +1,34 @@ +-- board piece +type P = X | O | E + +type Board = Board P P P P P P P P P + +use Board Board +use P O X E +use Optional Some None + +orElse a b = + match a with + None -> b + a -> a + +isWin : Board -> Optional P +isWin board = + same : P -> P -> P -> Optional P + same a b c = if (a == b) && (a == c) && (not (a == E)) + then Some a + else None + match board with + -- vertical top/center/bottom + -- horizontal left/center/right + -- diagonal rising/falling + Board a b c + d e f + g h i -> + (same a b c `orElse` same d e f `orElse` same g h i `orElse` + same a d g `orElse` same b e h `orElse` same c f i `orElse` + same a e i `orElse` same g e c) + +> isWin (Board X O X + O X X + O E X) diff --git a/unison-src/tests/tictactoe0-array-oob1.u b/unison-src/tests/tictactoe0-array-oob1.u new file mode 100644 index 0000000000..22989cd6e6 --- /dev/null +++ b/unison-src/tests/tictactoe0-array-oob1.u @@ -0,0 +1,12 @@ +-- board piece + +type Board = Board Nat Nat Nat + +use Board Board + +-- uncommenting these gives errors from NPE to array index out of bounds -1, -2 +-- x = 1 +-- y = 2 + +ex = match Board 77 88 99 with + Board a b c -> c diff --git a/unison-src/tests/tictactoe0-npe.u b/unison-src/tests/tictactoe0-npe.u new file mode 100644 index 0000000000..d1845df897 --- /dev/null +++ b/unison-src/tests/tictactoe0-npe.u @@ -0,0 +1,17 @@ +-- board piece +type P = X | O | E + +type Board = Board P P P P P P P P P + +use Board Board +use P O X E + +whatevs a b c = a + +b = Board X O X O X X O E X +x = 1 +y = 2 +z = 3 + +ex = match b with + Board a b c d e f g h i -> a diff --git a/unison-src/tests/tictactoe0.u b/unison-src/tests/tictactoe0.u new file mode 100644 index 0000000000..a6e0ff7a52 --- /dev/null +++ b/unison-src/tests/tictactoe0.u @@ -0,0 +1,33 @@ +-- board piece +type P = X | O | E + +type Board = Board P P P P P P P P P + +use Board Board +use P O X E +use Optional Some None + +orElse a b = + match a with + None -> b + a -> a + +b = (Board X O X + O X X + O E X) + +isWin board = + same : P -> P -> P -> Optional P + same a b c = if (a == b) && (a == c) && not (a == E) + then Some a + else None + match board with + -- vertical top/center/bottom + -- horizontal left/center/right + -- diagonal rising/falling + Board a b c + d e f + g h i -> (same a b c) + +> isWin b +-- Some 3 diff --git a/unison-src/tests/tictactoe2.u b/unison-src/tests/tictactoe2.u new file mode 100644 index 0000000000..cf02bcc44c --- /dev/null +++ b/unison-src/tests/tictactoe2.u @@ -0,0 +1,54 @@ +-- board piece +type P = X | O | E + +type Board = Board P P P P P P P P P + +use Board Board +use P O X E +use Optional Some None + +isWin : Board -> Optional P +isWin board = + same : P -> P -> P -> Optional P + same a b c = if ((a == b) && (a == c)) && (not (a == E)) + then Some a + else None + match board with + -- vertical top/center/bottom + -- horizontal left/center/right + -- diagonal rising/falling + Board a b c + _ _ _ + _ _ _ -> same a b c + + Board _ _ _ + a b c + _ _ _ -> same a b c + + Board _ _ _ + _ _ _ + a b c -> same a b c + + Board a _ _ + b _ _ + c _ _ -> same a b c + + Board _ a _ + _ b _ + _ c _ -> same a b c + + Board _ _ a + _ _ b + _ _ c -> same a b c + + Board a _ _ + _ b _ + _ _ c -> same a b c + + Board _ _ a + _ b _ + c _ _ -> same a b c + +x = isWin (Board X O X + O X X + O E X) diff --git a/unison-src/tests/tuple.u b/unison-src/tests/tuple.u new file mode 100644 index 0000000000..c568307866 --- /dev/null +++ b/unison-src/tests/tuple.u @@ -0,0 +1,4 @@ +(+) = (Nat.+) + +> match (1,2,3,4) with + (a,b,c,d) -> (a + b, c + d) diff --git a/unison-src/tests/tuple.ur b/unison-src/tests/tuple.ur new file mode 100644 index 0000000000..cdaaab5be0 --- /dev/null +++ b/unison-src/tests/tuple.ur @@ -0,0 +1 @@ +(3,7) diff --git a/unison-src/tests/type-application.u b/unison-src/tests/type-application.u new file mode 100644 index 0000000000..ae54823ad7 --- /dev/null +++ b/unison-src/tests/type-application.u @@ -0,0 +1,11 @@ + +ability Foo where + foo : {Foo} Nat + +type Wrap a = Wrap Nat + +blah : Wrap {Foo} -> Nat +blah = cases + Wrap.Wrap n -> n + 1 + +> blah (Wrap 99) diff --git a/unison-src/tests/underscore-parsing.u b/unison-src/tests/underscore-parsing.u new file mode 100644 index 0000000000..928cebb1a2 --- /dev/null +++ b/unison-src/tests/underscore-parsing.u @@ -0,0 +1,7 @@ +_prefix = 1 +prefix_ _x = _x +_prefix_ _ = 2 + +_x `_infix` y_ = (_x, y_) +x_ `infix_` _y = (x_, _y) +_ `_infix_` _ = () diff --git a/unison-src/tests/ungeneralize-bug.uu b/unison-src/tests/ungeneralize-bug.uu new file mode 100644 index 0000000000..5a5448ed17 --- /dev/null +++ b/unison-src/tests/ungeneralize-bug.uu @@ -0,0 +1,22 @@ + +use Foo Foo +use Optional Some None + +type Foo a b = Foo a (Optional b) + +foo : Foo a b -> (b -> c) -> Foo a c +foo x f = match x with + Foo a None -> Foo a None + +-- +-- 🌻 /Users/pchiusano/work/unison/unison-src/tests/typechecker-bug.u has changed, reloading... +-- I found a value of type b where I expected to find one of type c: +-- +-- 7 | foo : Foo a b -> (b -> c) -> Foo a c +-- 8 | foo x f = match x with +-- 9 | Foo a None -> Foo a None +-- +-- from right here: +-- +-- 7 | foo : Foo a b -> (b -> c) -> Foo a c +-- shouldn't be a type error diff --git a/unison-src/tests/unique.u b/unison-src/tests/unique.u new file mode 100644 index 0000000000..29b9745d66 --- /dev/null +++ b/unison-src/tests/unique.u @@ -0,0 +1,28 @@ + +unique ability Zing where zing : {Zang} Nat + +unique[asdlfkjasdflkj] ability Zang where + zang : {Zing} Nat + +unique + ability Blarg where + oog : {Blarg} Text + +unique type Bool = T | F + +unique[sdalfkjsdf] type BetterBool = Ya | Nah + +unique[asdflkajsdf] type Day + = Sun | Mon | Tue | Wed | Thu | Fri | Sat + +id x = x + +unique type Day2 + = Sun + | Mon + | Tue + | Wed + | Thu + | Fri + | Sat + diff --git a/unison-src/tests/void.u b/unison-src/tests/void.u new file mode 100644 index 0000000000..a4e646ad32 --- /dev/null +++ b/unison-src/tests/void.u @@ -0,0 +1,3 @@ +type Void = + +> 3 diff --git a/unison-src/transcripts/addupdatemessages.md b/unison-src/transcripts/addupdatemessages.md new file mode 100644 index 0000000000..68f7be5a52 --- /dev/null +++ b/unison-src/transcripts/addupdatemessages.md @@ -0,0 +1,63 @@ +# Adds and updates + +Let's set up some definitions to start: + +```ucm:hide +.> builtins.merge +``` + +```unison +x = 1 +y = 2 + +type X = One Nat +type Y = Two Nat Nat +``` + +Expected: `x` and `y`, `X`, and `Y` exist as above. UCM tells you this. + +```ucm +.scratch> add +``` + +Let's add an alias for `1` and `One`: + +```unison +z = 1 + +type Z = One Nat +``` + +Expected: `z` is now `1`. UCM tells you that this definition is also called `x`. +Also, `Z` is an alias for `X`. + +```ucm +.scratch> add +``` + +Let's update something that has an alias (to a value that doesn't have a name already): + +```unison +x = 3 +type X = Three Nat Nat Nat +``` + +Expected: `x` is now `3` and `X` has constructor `Three`. UCM tells you the old definitions were also called `z` and `Z` and these names have also been updated. + +```ucm +.scratch> update +``` + +Update it to something that already exists with a different name: + +```unison +x = 2 +type X = Two Nat Nat +``` + +Expected: `x` is now `2` and `X` is `Two`. UCM says the old definition was also named `z/Z`, and was also updated. And it says the new definition is also named `y/Y`. + +```ucm +.scratch> update +``` + diff --git a/unison-src/transcripts/addupdatemessages.output.md b/unison-src/transcripts/addupdatemessages.output.md new file mode 100644 index 0000000000..424c9b08f2 --- /dev/null +++ b/unison-src/transcripts/addupdatemessages.output.md @@ -0,0 +1,159 @@ +# Adds and updates + +Let's set up some definitions to start: + +```unison +x = 1 +y = 2 + +type X = One Nat +type Y = Two Nat Nat +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + type X + type Y + x : Nat + y : Nat + +``` +Expected: `x` and `y`, `X`, and `Y` exist as above. UCM tells you this. + +```ucm + ☝️ The namespace .scratch is empty. + +.scratch> add + + ⍟ I've added these definitions: + + type X + type Y + x : Nat + y : Nat + +``` +Let's add an alias for `1` and `One`: + +```unison +z = 1 + +type Z = One Nat +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + type Z + (also named X) + z : Nat + (also named x) + +``` +Expected: `z` is now `1`. UCM tells you that this definition is also called `x`. +Also, `Z` is an alias for `X`. + +```ucm +.scratch> add + + ⍟ I've added these definitions: + + type Z + (also named X) + z : Nat + (also named x) + +``` +Let's update something that has an alias (to a value that doesn't have a name already): + +```unison +x = 3 +type X = Three Nat Nat Nat +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + type X + (The old definition is also named Z. I'll update this + name too.) + x : Nat + (The old definition is also named z. I'll update this + name too.) + +``` +Expected: `x` is now `3` and `X` has constructor `Three`. UCM tells you the old definitions were also called `z` and `Z` and these names have also been updated. + +```ucm +.scratch> update + + ⍟ I've updated these names to your new definition: + + type X + (The old definition was also named Z. I updated this name + too.) + x : Nat + (The old definition was also named z. I updated this name + too.) + +``` +Update it to something that already exists with a different name: + +```unison +x = 2 +type X = Two Nat Nat +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + type X + (The old definition is also named Z. I'll update this + name too.) + (The new definition is already named Y as well.) + x : Nat + (The old definition is also named z. I'll update this + name too.) + (The new definition is already named y as well.) + +``` +Expected: `x` is now `2` and `X` is `Two`. UCM says the old definition was also named `z/Z`, and was also updated. And it says the new definition is also named `y/Y`. + +```ucm +.scratch> update + + ⍟ I've updated these names to your new definition: + + type X + (The old definition was also named Z. I updated this name + too.) + (The new definition is already named Y as well.) + x : Nat + (The old definition was also named z. I updated this name + too.) + (The new definition is already named y as well.) + +``` diff --git a/unison-src/transcripts/alias-many.md b/unison-src/transcripts/alias-many.md new file mode 100644 index 0000000000..bd92d3bf31 --- /dev/null +++ b/unison-src/transcripts/alias-many.md @@ -0,0 +1,130 @@ +```ucm:hide +.> builtins.merge +``` +```unison:hide:all +List.adjacentPairs : [a] -> [(a, a)] +List.adjacentPairs as = + go xs acc = + match xs with + [x, y] ++ t -> go t (acc :+ (x, y)) + _ -> acc + go as [] + +List.all : (a -> Boolean) -> [a] -> Boolean +List.all p xs = + match xs with + [] -> true + x +: xs -> (p x) && (List.all p xs) + +List.any : (a -> Boolean) -> [a] -> Boolean +List.any p xs = + match xs with + [] -> false + x +: xs -> (p x) || (List.any p xs) + +List.chunk : Nat -> [a] -> [[a]] +List.chunk n as = + go acc rest = + match splitAt n rest with + (c, []) -> acc :+ c + (c, cs) -> go (acc :+ c) cs + go [] as + +List.chunksOf : Nat -> [a] -> [[a]] +List.chunksOf n text = + go acc text = + p = splitAt n text + match p with + ([], _) -> acc + (a, b) -> go (acc :+ a) b + go [] text + +List.dropWhile : (a -> Boolean) -> [a] -> [a] +List.dropWhile p xs = + match xs with + i +: l -> if p i then List.dropWhile p l else xs + _ -> [] + +List.first : [a] -> Optional a +List.first a = List.at 0 a + +List.init : [a] -> Optional [a] +List.init as = + match as with + [] -> None + as :+ _ -> Some as + +List.intersperse : a -> [a] -> [a] +List.intersperse a as = + go acc as = + match as with + [] -> acc + [x] -> acc :+ x + x +: xs -> go (acc :+ x :+ a) xs + go [] as + +List.isEmpty : [a] -> Boolean +List.isEmpty as = List.size as == 0 + +List.last : [a] -> Optional a +List.last as = + match as with + [] -> None + _ :+ a -> Some a + +List.replicate : Nat -> a -> [a] +List.replicate n a = + go n acc = if n == 0 then acc else go (Nat.drop n 1) (a +: acc) + go n [] + +List.splitAt : Nat -> [a] -> ([a], [a]) +List.splitAt n as = (List.take n as, List.drop n as) + +List.tail : [a] -> Optional [a] +List.tail as = + match as with + [] -> None + _ +: as -> Some as + +List.takeWhile : (a ->{𝕖} Boolean) -> [a] ->{𝕖} [a] +List.takeWhile p xs = + go xs acc = + match xs with + x +: xs -> if p x then go xs (acc :+ x) else acc + _ -> acc + go xs [] +``` +```ucm:hide +.runar> add +``` + +The `alias.many` command can be used to copy definitions from the current namespace into your curated one. +The names that will be used in the target namespace are the names you specify, relative to the current namespace: + +``` +.> help alias.many + + alias.many (or copy) + `alias.many [relative2...] ` creates aliases `relative1`, `relative2`, ... + in the namespace `namespace`. + `alias.many foo.foo bar.bar .quux` creates aliases `.quux.foo.foo` and `.quux.bar.bar`. +``` + +Let's try it! + +```ucm +.> cd .builtin +.builtin> find +.builtin> alias.many 94-104 .mylib +``` + +I want to incorporate a few more from another namespace: +```ucm +.builtin> cd .runar +.runar> find +.runar> alias.many 1-15 .mylib +.runar> cd .mylib +.mylib> find +``` + +Thanks, `alias.many`! diff --git a/unison-src/transcripts/alias-many.output.md b/unison-src/transcripts/alias-many.output.md new file mode 100644 index 0000000000..6ce351cd5d --- /dev/null +++ b/unison-src/transcripts/alias-many.output.md @@ -0,0 +1,424 @@ +The `alias.many` command can be used to copy definitions from the current namespace into your curated one. +The names that will be used in the target namespace are the names you specify, relative to the current namespace: + +``` +.> help alias.many + + alias.many (or copy) + `alias.many [relative2...] ` creates aliases `relative1`, `relative2`, ... + in the namespace `namespace`. + `alias.many foo.foo bar.bar .quux` creates aliases `.quux.foo.foo` and `.quux.bar.bar`. + +``` + +Let's try it! + +```ucm +.> cd .builtin + +.builtin> find + + 1. builtin type Boolean + 2. Boolean.not : Boolean -> Boolean + 3. builtin type Bytes + 4. Bytes.++ : Bytes -> Bytes -> Bytes + 5. Bytes.at : Nat -> Bytes -> Optional Nat + 6. Bytes.drop : Nat -> Bytes -> Bytes + 7. Bytes.empty : Bytes + 8. Bytes.flatten : Bytes -> Bytes + 9. Bytes.fromBase16 : Bytes -> Either Text Bytes + 10. Bytes.fromBase32 : Bytes -> Either Text Bytes + 11. Bytes.fromBase64 : Bytes -> Either Text Bytes + 12. Bytes.fromBase64UrlUnpadded : Bytes -> Either Text Bytes + 13. Bytes.fromList : [Nat] -> Bytes + 14. Bytes.size : Bytes -> Nat + 15. Bytes.take : Nat -> Bytes -> Bytes + 16. Bytes.toBase16 : Bytes -> Bytes + 17. Bytes.toBase32 : Bytes -> Bytes + 18. Bytes.toBase64 : Bytes -> Bytes + 19. Bytes.toBase64UrlUnpadded : Bytes -> Bytes + 20. Bytes.toList : Bytes -> [Nat] + 21. builtin type Char + 22. Char.fromNat : Nat -> Char + 23. Char.toNat : Char -> Nat + 24. Debug.watch : Text -> a -> a + 25. unique type Doc + 26. Doc.Blob : Text -> Doc + 27. Doc.Evaluate : Term -> Doc + 28. Doc.Join : [Doc] -> Doc + 29. Doc.Link : Link -> Doc + 30. Doc.Signature : Term -> Doc + 31. Doc.Source : Link -> Doc + 32. type Either a b + 33. Either.Left : a -> Either a b + 34. Either.Right : b -> Either a b + 35. builtin type Float + 36. Float.* : Float -> Float -> Float + 37. Float.+ : Float -> Float -> Float + 38. Float.- : Float -> Float -> Float + 39. Float./ : Float -> Float -> Float + 40. Float.abs : Float -> Float + 41. Float.acos : Float -> Float + 42. Float.acosh : Float -> Float + 43. Float.asin : Float -> Float + 44. Float.asinh : Float -> Float + 45. Float.atan : Float -> Float + 46. Float.atan2 : Float -> Float -> Float + 47. Float.atanh : Float -> Float + 48. Float.ceiling : Float -> Int + 49. Float.cos : Float -> Float + 50. Float.cosh : Float -> Float + 51. Float.eq : Float -> Float -> Boolean + 52. Float.exp : Float -> Float + 53. Float.floor : Float -> Int + 54. Float.fromText : Text -> Optional Float + 55. Float.gt : Float -> Float -> Boolean + 56. Float.gteq : Float -> Float -> Boolean + 57. Float.log : Float -> Float + 58. Float.logBase : Float -> Float -> Float + 59. Float.lt : Float -> Float -> Boolean + 60. Float.lteq : Float -> Float -> Boolean + 61. Float.max : Float -> Float -> Float + 62. Float.min : Float -> Float -> Float + 63. Float.pow : Float -> Float -> Float + 64. Float.round : Float -> Int + 65. Float.sin : Float -> Float + 66. Float.sinh : Float -> Float + 67. Float.sqrt : Float -> Float + 68. Float.tan : Float -> Float + 69. Float.tanh : Float -> Float + 70. Float.toText : Float -> Text + 71. Float.truncate : Float -> Int + 72. builtin type Int + 73. Int.* : Int -> Int -> Int + 74. Int.+ : Int -> Int -> Int + 75. Int.- : Int -> Int -> Int + 76. Int./ : Int -> Int -> Int + 77. Int.and : Int -> Int -> Int + 78. Int.complement : Int -> Int + 79. Int.eq : Int -> Int -> Boolean + 80. Int.fromText : Text -> Optional Int + 81. Int.gt : Int -> Int -> Boolean + 82. Int.gteq : Int -> Int -> Boolean + 83. Int.increment : Int -> Int + 84. Int.isEven : Int -> Boolean + 85. Int.isOdd : Int -> Boolean + 86. Int.leadingZeros : Int -> Nat + 87. Int.lt : Int -> Int -> Boolean + 88. Int.lteq : Int -> Int -> Boolean + 89. Int.mod : Int -> Int -> Int + 90. Int.negate : Int -> Int + 91. Int.or : Int -> Int -> Int + 92. Int.pow : Int -> Nat -> Int + 93. Int.shiftLeft : Int -> Nat -> Int + 94. Int.shiftRight : Int -> Nat -> Int + 95. Int.signum : Int -> Int + 96. Int.toFloat : Int -> Float + 97. Int.toText : Int -> Text + 98. Int.trailingZeros : Int -> Nat + 99. Int.truncate0 : Int -> Nat + 100. Int.xor : Int -> Int -> Int + 101. unique type Link + 102. builtin type Link.Term + 103. Link.Term : Term -> Link + 104. builtin type Link.Type + 105. Link.Type : Type -> Link + 106. builtin type List + 107. List.++ : [a] -> [a] -> [a] + 108. List.+: : a -> [a] -> [a] + 109. List.:+ : [a] -> a -> [a] + 110. List.at : Nat -> [a] -> Optional a + 111. List.cons : a -> [a] -> [a] + 112. List.drop : Nat -> [a] -> [a] + 113. List.empty : [a] + 114. List.size : [a] -> Nat + 115. List.snoc : [a] -> a -> [a] + 116. List.take : Nat -> [a] -> [a] + 117. builtin type Nat + 118. Nat.* : Nat -> Nat -> Nat + 119. Nat.+ : Nat -> Nat -> Nat + 120. Nat./ : Nat -> Nat -> Nat + 121. Nat.and : Nat -> Nat -> Nat + 122. Nat.complement : Nat -> Nat + 123. Nat.drop : Nat -> Nat -> Nat + 124. Nat.eq : Nat -> Nat -> Boolean + 125. Nat.fromText : Text -> Optional Nat + 126. Nat.gt : Nat -> Nat -> Boolean + 127. Nat.gteq : Nat -> Nat -> Boolean + 128. Nat.increment : Nat -> Nat + 129. Nat.isEven : Nat -> Boolean + 130. Nat.isOdd : Nat -> Boolean + 131. Nat.leadingZeros : Nat -> Nat + 132. Nat.lt : Nat -> Nat -> Boolean + 133. Nat.lteq : Nat -> Nat -> Boolean + 134. Nat.mod : Nat -> Nat -> Nat + 135. Nat.or : Nat -> Nat -> Nat + 136. Nat.pow : Nat -> Nat -> Nat + 137. Nat.shiftLeft : Nat -> Nat -> Nat + 138. Nat.shiftRight : Nat -> Nat -> Nat + 139. Nat.sub : Nat -> Nat -> Int + 140. Nat.toFloat : Nat -> Float + 141. Nat.toInt : Nat -> Int + 142. Nat.toText : Nat -> Text + 143. Nat.trailingZeros : Nat -> Nat + 144. Nat.xor : Nat -> Nat -> Nat + 145. type Optional a + 146. Optional.None : Optional a + 147. Optional.Some : a -> Optional a + 148. builtin type Request + 149. type SeqView a b + 150. SeqView.VElem : a -> b -> SeqView a b + 151. SeqView.VEmpty : SeqView a b + 152. unique type Test.Result + 153. Test.Result.Fail : Text -> Result + 154. Test.Result.Ok : Text -> Result + 155. builtin type Text + 156. Text.!= : Text -> Text -> Boolean + 157. Text.++ : Text -> Text -> Text + 158. Text.drop : Nat -> Text -> Text + 159. Text.empty : Text + 160. Text.eq : Text -> Text -> Boolean + 161. Text.fromCharList : [Char] -> Text + 162. Text.gt : Text -> Text -> Boolean + 163. Text.gteq : Text -> Text -> Boolean + 164. Text.lt : Text -> Text -> Boolean + 165. Text.lteq : Text -> Text -> Boolean + 166. Text.size : Text -> Nat + 167. Text.take : Nat -> Text -> Text + 168. Text.toCharList : Text -> [Char] + 169. Text.uncons : Text -> Optional (Char, Text) + 170. Text.unsnoc : Text -> Optional (Text, Char) + 171. type Tuple a b + 172. Tuple.Cons : a -> b -> Tuple a b + 173. type Unit + 174. Unit.Unit : () + 175. Universal.< : a -> a -> Boolean + 176. Universal.<= : a -> a -> Boolean + 177. Universal.== : a -> a -> Boolean + 178. Universal.> : a -> a -> Boolean + 179. Universal.>= : a -> a -> Boolean + 180. Universal.compare : a -> a -> Int + 181. bug : a -> b + 182. builtin type crypto.HashAlgorithm + 183. crypto.HashAlgorithm.Blake2b_256 : HashAlgorithm + 184. crypto.HashAlgorithm.Blake2b_512 : HashAlgorithm + 185. crypto.HashAlgorithm.Blake2s_256 : HashAlgorithm + 186. crypto.HashAlgorithm.Sha2_256 : HashAlgorithm + 187. crypto.HashAlgorithm.Sha2_512 : HashAlgorithm + 188. crypto.HashAlgorithm.Sha3_256 : HashAlgorithm + 189. crypto.HashAlgorithm.Sha3_512 : HashAlgorithm + 190. crypto.hash : HashAlgorithm -> a -> Bytes + 191. crypto.hashBytes : HashAlgorithm -> Bytes -> Bytes + 192. crypto.hmac : HashAlgorithm -> Bytes -> a -> Bytes + 193. crypto.hmacBytes : HashAlgorithm + -> Bytes + -> Bytes + -> Bytes + 194. unique type io2.BufferMode + 195. io2.BufferMode.BlockBuffering : BufferMode + 196. io2.BufferMode.LineBuffering : BufferMode + 197. io2.BufferMode.NoBuffering : BufferMode + 198. io2.BufferMode.SizedBlockBuffering : Nat -> BufferMode + 199. unique type io2.FileMode + 200. io2.FileMode.Append : FileMode + 201. io2.FileMode.Read : FileMode + 202. io2.FileMode.ReadWrite : FileMode + 203. io2.FileMode.Write : FileMode + 204. builtin type io2.Handle + 205. builtin type io2.IO + 206. io2.IO.clientSocket : Text + -> Text + ->{IO} Either IOError Socket + 207. io2.IO.closeFile : Handle ->{IO} Either IOError () + 208. io2.IO.closeSocket : Socket ->{IO} Either IOError () + 209. io2.IO.createDirectory : Text ->{IO} Either IOError () + 210. io2.IO.delay : Nat ->{IO} Either IOError () + 211. io2.IO.fileExists : Text ->{IO} Either IOError Boolean + 212. io2.IO.forkComp : '{IO} Either IOError a ->{IO} ThreadId + 213. io2.IO.getBuffering : Handle + ->{IO} Either IOError BufferMode + 214. io2.IO.getCurrentDirectory : '{IO} Either IOError Text + 215. io2.IO.getFileSize : Text ->{IO} Either IOError Nat + 216. io2.IO.getFileTimestamp : Text ->{IO} Either IOError Nat + 217. io2.IO.getLine : Handle ->{IO} Either IOError Text + 218. io2.IO.getTempDirectory : '{IO} Either IOError Text + 219. io2.IO.getText : Handle ->{IO} Either IOError Text + 220. io2.IO.handlePosition : Handle ->{IO} Either IOError Int + 221. io2.IO.isDirectory : Text ->{IO} Either IOError Boolean + 222. io2.IO.isFileEOF : Handle ->{IO} Either IOError Boolean + 223. io2.IO.isFileOpen : Handle ->{IO} Either IOError Boolean + 224. io2.IO.isSeekable : Handle ->{IO} Either IOError Boolean + 225. io2.IO.kill : ThreadId ->{IO} Either IOError () + 226. io2.IO.listen : Socket ->{IO} Either IOError () + 227. io2.IO.openFile : Text + -> FileMode + ->{IO} Either IOError Handle + 228. io2.IO.putText : Handle -> Text ->{IO} Either IOError () + 229. io2.IO.removeDirectory : Text ->{IO} Either IOError () + 230. io2.IO.removeFile : Text ->{IO} Either IOError () + 231. io2.IO.renameDirectory : Text + -> Text + ->{IO} Either IOError () + 232. io2.IO.renameFile : Text -> Text ->{IO} Either IOError () + 233. io2.IO.seekHandle : Handle + -> SeekMode + -> Int + ->{IO} Either IOError () + 234. io2.IO.serverSocket : Text + -> Text + ->{IO} Either IOError Socket + 235. io2.IO.setBuffering : Handle + -> BufferMode + ->{IO} Either IOError () + 236. io2.IO.setCurrentDirectory : Text + ->{IO} Either IOError () + 237. io2.IO.socketAccept : Socket ->{IO} Either IOError Socket + 238. io2.IO.socketReceive : Socket + -> Nat + ->{IO} Either IOError Bytes + 239. io2.IO.socketSend : Socket + -> Bytes + ->{IO} Either IOError () + 240. io2.IO.stdHandle : StdHandle -> Handle + 241. io2.IO.systemTime : '{IO} Either IOError Nat + 242. unique type io2.IOError + 243. io2.IOError.AlreadyExists : IOError + 244. io2.IOError.EOF : IOError + 245. io2.IOError.IllegalOperation : IOError + 246. io2.IOError.NoSuchThing : IOError + 247. io2.IOError.PermissionDenied : IOError + 248. io2.IOError.ResourceBusy : IOError + 249. io2.IOError.ResourceExhausted : IOError + 250. io2.IOError.UserError : IOError + 251. builtin type io2.MVar + 252. io2.MVar.isEmpty : MVar a ->{IO} Boolean + 253. io2.MVar.new : a ->{IO} MVar a + 254. io2.MVar.newEmpty : {IO} (MVar a) + 255. io2.MVar.put : MVar a -> a ->{IO} Either IOError () + 256. io2.MVar.read : MVar a ->{IO} Either IOError a + 257. io2.MVar.swap : MVar a -> a ->{IO} Either IOError a + 258. io2.MVar.take : MVar a ->{IO} Either IOError a + 259. io2.MVar.tryPut : MVar a -> a ->{IO} Boolean + 260. io2.MVar.tryRead : MVar a ->{IO} Optional a + 261. io2.MVar.tryTake : MVar a ->{IO} Optional a + 262. unique type io2.SeekMode + 263. io2.SeekMode.AbsoluteSeek : SeekMode + 264. io2.SeekMode.RelativeSeek : SeekMode + 265. io2.SeekMode.SeekFromEnd : SeekMode + 266. builtin type io2.Socket + 267. unique type io2.StdHandle + 268. io2.StdHandle.StdErr : StdHandle + 269. io2.StdHandle.StdIn : StdHandle + 270. io2.StdHandle.StdOut : StdHandle + 271. builtin type io2.ThreadId + 272. todo : a -> b + + +.builtin> alias.many 94-104 .mylib + + Here's what changed in .mylib : + + Added definitions: + + 1. unique type Link + 2. builtin type Link.Term + 3. builtin type Link.Type + 4. Link.Term : Term -> Link + 5. Int.shiftRight : Int -> Nat -> Int + 6. Int.signum : Int -> Int + 7. Int.toFloat : Int -> Float + 8. Int.toText : Int -> Text + 9. Int.trailingZeros : Int -> Nat + 10. Int.truncate0 : Int -> Nat + 11. Int.xor : Int -> Int -> Int + + Tip: You can use `undo` or `reflog` to undo this change. + +``` +I want to incorporate a few more from another namespace: +```ucm +.builtin> cd .runar + +.runar> find + + 1. List.adjacentPairs : [a] -> [(a, a)] + 2. List.all : (a ->{𝕖} Boolean) ->{𝕖} [a] ->{𝕖} Boolean + 3. List.any : (a ->{𝕖} Boolean) ->{𝕖} [a] ->{𝕖} Boolean + 4. List.chunk : Nat -> [a] -> [[a]] + 5. List.chunksOf : Nat -> [a] -> [[a]] + 6. List.dropWhile : (a ->{𝕖} Boolean) ->{𝕖} [a] ->{𝕖} [a] + 7. List.first : [a] -> Optional a + 8. List.init : [a] -> Optional [a] + 9. List.intersperse : a -> [a] -> [a] + 10. List.isEmpty : [a] -> Boolean + 11. List.last : [a] -> Optional a + 12. List.replicate : Nat -> a -> [a] + 13. List.splitAt : Nat -> [a] -> ([a], [a]) + 14. List.tail : [a] -> Optional [a] + 15. List.takeWhile : (a ->{𝕖} Boolean) -> [a] ->{𝕖} [a] + + +.runar> alias.many 1-15 .mylib + + Here's what changed in .mylib : + + Added definitions: + + 1. List.adjacentPairs : [a] -> [(a, a)] + 2. List.all : (a ->{𝕖} Boolean) + ->{𝕖} [a] + ->{𝕖} Boolean + 3. List.any : (a ->{𝕖} Boolean) + ->{𝕖} [a] + ->{𝕖} Boolean + 4. List.chunk : Nat -> [a] -> [[a]] + 5. List.chunksOf : Nat -> [a] -> [[a]] + 6. List.dropWhile : (a ->{𝕖} Boolean) + ->{𝕖} [a] + ->{𝕖} [a] + 7. List.first : [a] -> Optional a + 8. List.init : [a] -> Optional [a] + 9. List.intersperse : a -> [a] -> [a] + 10. List.isEmpty : [a] -> Boolean + 11. List.last : [a] -> Optional a + 12. List.replicate : Nat -> a -> [a] + 13. List.splitAt : Nat -> [a] -> ([a], [a]) + 14. List.tail : [a] -> Optional [a] + 15. List.takeWhile : (a ->{𝕖} Boolean) -> [a] ->{𝕖} [a] + + Tip: You can use `undo` or `reflog` to undo this change. + +.runar> cd .mylib + +.mylib> find + + 1. Int.shiftRight : Int -> Nat -> Int + 2. Int.signum : Int -> Int + 3. Int.toFloat : Int -> Float + 4. Int.toText : Int -> Text + 5. Int.trailingZeros : Int -> Nat + 6. Int.truncate0 : Int -> Nat + 7. Int.xor : Int -> Int -> Int + 8. unique type Link + 9. builtin type Link.Term + 10. Link.Term : Term -> Link + 11. builtin type Link.Type + 12. List.adjacentPairs : [a] -> [(a, a)] + 13. List.all : (a ->{𝕖} Boolean) ->{𝕖} [a] ->{𝕖} Boolean + 14. List.any : (a ->{𝕖} Boolean) ->{𝕖} [a] ->{𝕖} Boolean + 15. List.chunk : Nat -> [a] -> [[a]] + 16. List.chunksOf : Nat -> [a] -> [[a]] + 17. List.dropWhile : (a ->{𝕖} Boolean) ->{𝕖} [a] ->{𝕖} [a] + 18. List.first : [a] -> Optional a + 19. List.init : [a] -> Optional [a] + 20. List.intersperse : a -> [a] -> [a] + 21. List.isEmpty : [a] -> Boolean + 22. List.last : [a] -> Optional a + 23. List.replicate : Nat -> a -> [a] + 24. List.splitAt : Nat -> [a] -> ([a], [a]) + 25. List.tail : [a] -> Optional [a] + 26. List.takeWhile : (a ->{𝕖} Boolean) -> [a] ->{𝕖} [a] + + +``` +Thanks, `alias.many! diff --git a/unison-src/transcripts/ambiguous-metadata.md b/unison-src/transcripts/ambiguous-metadata.md new file mode 100644 index 0000000000..09d5dfa8b3 --- /dev/null +++ b/unison-src/transcripts/ambiguous-metadata.md @@ -0,0 +1,17 @@ + +## An example scenario that surfaces an 'ambiguous metadata' error. + +```unison:hide +foo.doc = [: a :] +boo.doc = [: b :] +x = 1 +``` + +```ucm:hide:all +.> add +``` + +```ucm:error +.> merge foo boo +.> link boo.doc x +``` \ No newline at end of file diff --git a/unison-src/transcripts/ambiguous-metadata.output.md b/unison-src/transcripts/ambiguous-metadata.output.md new file mode 100644 index 0000000000..c0d958fc6d --- /dev/null +++ b/unison-src/transcripts/ambiguous-metadata.output.md @@ -0,0 +1,42 @@ + +## An example scenario that surfaces an 'ambiguous metadata' error. + +```unison +foo.doc = [: a :] +boo.doc = [: b :] +x = 1 +``` + +```ucm +.> merge foo boo + + Here's what's changed in boo after the merge: + + New name conflicts: + + 1. doc#tj3gfqdnje : #v00j3buk6m + ↓ + 2. ┌ doc#d4ormokpf9 : #v00j3buk6m + 3. └ doc#tj3gfqdnje : #v00j3buk6m + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +.> link boo.doc x + + ⚠️ + + I'm not sure which metadata value you're referring to since + there are multiple matches: + + foo.doc + boo.doc#tj3gfqdnje + + Tip: Try again and supply one of the above definitions + explicitly. + + I didn't make any changes. + +``` diff --git a/unison-src/transcripts/blocks.md b/unison-src/transcripts/blocks.md new file mode 100644 index 0000000000..c7f4277090 --- /dev/null +++ b/unison-src/transcripts/blocks.md @@ -0,0 +1,177 @@ +## Blocks and scoping + +```ucm:hide +.> builtins.merge +``` + +### Names introduced by a block shadow names introduced in outer scopes + +For example: + +```unison +ex thing = + thing y = y + -- refers to `thing` in this block + -- not the argument to `ex` + bar x = thing x + 1 + bar 42 + +> ex "hello" +``` + +### Whether a block shadows outer names doesn't depend on the order of bindings in the block + +The `thing` reference in `bar` refers to the one declared locally in the block that `bar` is part of. This is true even if the declaration which shadows the outer name appears later in the block, for instance: + +```unison +ex thing = + bar x = thing x + 1 + thing y = y + bar 42 + +> ex "hello" +``` + +### Blocks use lexical scoping and can only reference definitions in parent scopes or in the same block + +This is just the normal lexical scoping behavior. For example: + +```unison +ex thing = + bar x = thing x + 1 -- references outer `thing` + baz z = + thing y = y -- shadows the outer `thing` + thing z -- references the inner `thing` + bar 42 + +> ex (x -> x * 100) +``` + +Here's another example, showing that bindings cannot reference bindings declared in blocks nested in the _body_ (the final expression) of a block: + +```unison +ex thing = + bar x = thing x + 1 -- refers to outer thing + let + thing y = y + bar 42 + +> ex (x -> x * 100) +``` + +### Blocks can define one or more functions which are recursive or mutually recursive + +We call these groups of definitions that reference each other in a block _cycles_. For instance: + +```unison +sumTo n = + -- A recursive function, defined inside a block + go acc n = + if n == 0 then acc + else go (acc + n) (n `drop` 1) + go 0 n + +ex n = + -- Two mutually recursive functions, defined in a block + ping x = pong (x + 1) + pong x = ping (x + 2) + ping 42 +``` + +The `go` function is a one-element cycle (it reference itself), and `ping` and `pong` form a two-element cycle. + +### Cyclic references or forward reference must be guarded + +For instance, this works: + +```unison +ex n = + ping x = pong + 1 + x + pong = 42 + ping 0 +``` + +Since the forward reference to `pong` appears inside `ping`. + +This, however, will not compile: + +```unison:error +ex n = + pong = ping + 1 + ping = 42 + pong +``` + +This also won't compile; it's a cyclic reference that isn't guarded: + +```unison:error +ex n = + loop = loop + loop +``` + +This, however, will compile. This also shows that `'expr` is another way of guarding a definition. + +```unison +ex n = + loop = '(!loop) + !loop +``` + +Just don't try to run it as it's an infinite loop! + +### Cyclic definitions in a block don't have access to any abilities + +The reason is it's unclear what the order should be of any requests that are made. It can also be viewed of a special case of the restriction that elements of a cycle must all be guarded. Here's an example: + +```unison:error +ability SpaceAttack where + launchMissiles : Text -> Nat + +ex n = + zap1 = launchMissiles "neptune" + zap2 + zap2 = launchMissiles "pluto" + zap1 + zap1 +``` + +### The _body_ of recursive functions can certainly access abilities + +For instance, this works fine: + +```unison +ability SpaceAttack where + launchMissiles : Text -> Nat + +ex n = + zap1 planet = launchMissiles planet + zap2 planet + zap2 planet = launchMissiles planet + zap1 planet + zap1 "pluto" +``` + +### Unrelated definitions not part of a cycle and are moved after the cycle + +For instance, `zap` here isn't considered part of the cycle (it doesn't reference `ping` or `pong`), so this typechecks fine: + +```unison +ability SpaceAttack where + launchMissiles : Text -> Nat + +ex n = + ping x = pong (x + 1) + zap = launchMissiles "neptune" + pong x = ping (x + 2) + ping 42 +``` + +This is actually parsed as if you moved `zap` after the cycle it find itself a part of: + +```unison +ability SpaceAttack where + launchMissiles : Text -> Nat + +ex n = + ping x = pong (x + 1) + pong x = ping (x + 2) + zap = launchMissiles "neptune" + ping 42 +``` diff --git a/unison-src/transcripts/blocks.output.md b/unison-src/transcripts/blocks.output.md new file mode 100644 index 0000000000..cca31f6bd2 --- /dev/null +++ b/unison-src/transcripts/blocks.output.md @@ -0,0 +1,339 @@ +## Blocks and scoping + +### Names introduced by a block shadow names introduced in outer scopes + +For example: + +```unison +ex thing = + thing y = y + -- refers to `thing` in this block + -- not the argument to `ex` + bar x = thing x + 1 + bar 42 + +> ex "hello" +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ex : thing -> Nat + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 8 | > ex "hello" + ⧩ + 43 + +``` +### Whether a block shadows outer names doesn't depend on the order of bindings in the block + +The `thing` reference in `bar` refers to the one declared locally in the block that `bar` is part of. This is true even if the declaration which shadows the outer name appears later in the block, for instance: + +```unison +ex thing = + bar x = thing x + 1 + thing y = y + bar 42 + +> ex "hello" +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ex : thing -> Nat + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 6 | > ex "hello" + ⧩ + 43 + +``` +### Blocks use lexical scoping and can only reference definitions in parent scopes or in the same block + +This is just the normal lexical scoping behavior. For example: + +```unison +ex thing = + bar x = thing x + 1 -- references outer `thing` + baz z = + thing y = y -- shadows the outer `thing` + thing z -- references the inner `thing` + bar 42 + +> ex (x -> x * 100) +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ex : (Nat ->{𝕖} Nat) ->{𝕖} Nat + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 8 | > ex (x -> x * 100) + ⧩ + 4201 + +``` +Here's another example, showing that bindings cannot reference bindings declared in blocks nested in the _body_ (the final expression) of a block: + +```unison +ex thing = + bar x = thing x + 1 -- refers to outer thing + let + thing y = y + bar 42 + +> ex (x -> x * 100) +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ex : (Nat ->{𝕖} Nat) ->{𝕖} Nat + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 7 | > ex (x -> x * 100) + ⧩ + 4201 + +``` +### Blocks can define one or more functions which are recursive or mutually recursive + +We call these groups of definitions that reference each other in a block _cycles_. For instance: + +```unison +sumTo n = + -- A recursive function, defined inside a block + go acc n = + if n == 0 then acc + else go (acc + n) (n `drop` 1) + go 0 n + +ex n = + -- Two mutually recursive functions, defined in a block + ping x = pong (x + 1) + pong x = ping (x + 2) + ping 42 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ex : n -> 𝕣 + sumTo : Nat -> Nat + +``` +The `go` function is a one-element cycle (it reference itself), and `ping` and `pong` form a two-element cycle. + +### Cyclic references or forward reference must be guarded + +For instance, this works: + +```unison +ex n = + ping x = pong + 1 + x + pong = 42 + ping 0 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ex : n -> Nat + +``` +Since the forward reference to `pong` appears inside `ping`. + +This, however, will not compile: + +```unison +ex n = + pong = ping + 1 + ping = 42 + pong +``` + +```ucm + + These definitions depend on each other cyclically but aren't guarded by a lambda: pong9 + 2 | pong = ping + 1 + 3 | ping = 42 + + +``` +This also won't compile; it's a cyclic reference that isn't guarded: + +```unison +ex n = + loop = loop + loop +``` + +```ucm + + These definitions depend on each other cyclically but aren't guarded by a lambda: loop9 + 2 | loop = loop + + +``` +This, however, will compile. This also shows that `'expr` is another way of guarding a definition. + +```unison +ex n = + loop = '(!loop) + !loop +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ex : n -> 𝕣 + +``` +Just don't try to run it as it's an infinite loop! + +### Cyclic definitions in a block don't have access to any abilities + +The reason is it's unclear what the order should be of any requests that are made. It can also be viewed of a special case of the restriction that elements of a cycle must all be guarded. Here's an example: + +```unison +ability SpaceAttack where + launchMissiles : Text -> Nat + +ex n = + zap1 = launchMissiles "neptune" + zap2 + zap2 = launchMissiles "pluto" + zap1 + zap1 +``` + +```ucm + + These definitions depend on each other cyclically but aren't guarded by a lambda: zap19, zap210 + 5 | zap1 = launchMissiles "neptune" + zap2 + 6 | zap2 = launchMissiles "pluto" + zap1 + + +``` +### The _body_ of recursive functions can certainly access abilities + +For instance, this works fine: + +```unison +ability SpaceAttack where + launchMissiles : Text -> Nat + +ex n = + zap1 planet = launchMissiles planet + zap2 planet + zap2 planet = launchMissiles planet + zap1 planet + zap1 "pluto" +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ability SpaceAttack + ex : n ->{SpaceAttack} Nat + +``` +### Unrelated definitions not part of a cycle and are moved after the cycle + +For instance, `zap` here isn't considered part of the cycle (it doesn't reference `ping` or `pong`), so this typechecks fine: + +```unison +ability SpaceAttack where + launchMissiles : Text -> Nat + +ex n = + ping x = pong (x + 1) + zap = launchMissiles "neptune" + pong x = ping (x + 2) + ping 42 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ability SpaceAttack + ex : n ->{SpaceAttack} 𝕣 + +``` +This is actually parsed as if you moved `zap` after the cycle it find itself a part of: + +```unison +ability SpaceAttack where + launchMissiles : Text -> Nat + +ex n = + ping x = pong (x + 1) + pong x = ping (x + 2) + zap = launchMissiles "neptune" + ping 42 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ability SpaceAttack + ex : n ->{SpaceAttack} 𝕣 + +``` diff --git a/unison-src/transcripts/builtins-merge.md b/unison-src/transcripts/builtins-merge.md new file mode 100644 index 0000000000..28bfb426ca --- /dev/null +++ b/unison-src/transcripts/builtins-merge.md @@ -0,0 +1,6 @@ +The `builtins.merge` command adds the known builtins to a `builtin` subnamespace within the current namespace. + +```ucm +.tmp> builtins.merge +.tmp> ls builtin +``` diff --git a/unison-src/transcripts/builtins-merge.output.md b/unison-src/transcripts/builtins-merge.output.md new file mode 100644 index 0000000000..0bdf00e1e5 --- /dev/null +++ b/unison-src/transcripts/builtins-merge.output.md @@ -0,0 +1,51 @@ +The `builtins.merge` command adds the known builtins to a `builtin` subnamespace within the current namespace. + +```ucm + ☝️ The namespace .tmp is empty. + +.tmp> builtins.merge + + Done. + +.tmp> ls builtin + + 1. Boolean (builtin type) + 2. Boolean/ (1 definition) + 3. Bytes (builtin type) + 4. Bytes/ (17 definitions) + 5. Char (builtin type) + 6. Char/ (2 definitions) + 7. Debug/ (1 definition) + 8. Doc (type) + 9. Doc/ (6 definitions) + 10. Either (type) + 11. Either/ (2 definitions) + 12. Float (builtin type) + 13. Float/ (36 definitions) + 14. Int (builtin type) + 15. Int/ (28 definitions) + 16. Link (type) + 17. Link/ (4 definitions) + 18. List (builtin type) + 19. List/ (10 definitions) + 20. Nat (builtin type) + 21. Nat/ (27 definitions) + 22. Optional (type) + 23. Optional/ (2 definitions) + 24. Request (builtin type) + 25. SeqView (type) + 26. SeqView/ (2 definitions) + 27. Test/ (3 definitions) + 28. Text (builtin type) + 29. Text/ (15 definitions) + 30. Tuple (type) + 31. Tuple/ (1 definition) + 32. Unit (type) + 33. Unit/ (1 definition) + 34. Universal/ (6 definitions) + 35. bug (a -> b) + 36. crypto/ (12 definitions) + 37. io2/ (78 definitions) + 38. todo (a -> b) + +``` diff --git a/unison-src/transcripts/bytesFromList.md b/unison-src/transcripts/bytesFromList.md new file mode 100644 index 0000000000..9da15329f3 --- /dev/null +++ b/unison-src/transcripts/bytesFromList.md @@ -0,0 +1,11 @@ + +```ucm:hide +.> builtins.merge +``` + +This should render as `Bytes.fromList [1,2,3,4]`, not `##Bytes.fromSequence [1,2,3,4]`: + +```unison +> Bytes.fromList [1,2,3,4] +``` + diff --git a/unison-src/transcripts/bytesFromList.output.md b/unison-src/transcripts/bytesFromList.output.md new file mode 100644 index 0000000000..9ba8af1b32 --- /dev/null +++ b/unison-src/transcripts/bytesFromList.output.md @@ -0,0 +1,21 @@ + +This should render as `Bytes.fromList [1,2,3,4]`, not `##Bytes.fromSequence [1,2,3,4]`: + +```unison +> Bytes.fromList [1,2,3,4] +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | > Bytes.fromList [1,2,3,4] + ⧩ + fromList [1, 2, 3, 4] + +``` diff --git a/unison-src/transcripts/cd-back.md b/unison-src/transcripts/cd-back.md new file mode 100644 index 0000000000..7a7df60eb5 --- /dev/null +++ b/unison-src/transcripts/cd-back.md @@ -0,0 +1,46 @@ +## Switching between namespaces / projects / branches / modules + +Unison uses the same organizational element to represent directories, projects, sub-projects, forks, modules, etc.; currently called a "namespace". + +Namespaces are trees that contain definitions of "types" and "terms", "patches", and other child namespaces. + +We're still working out what a nice codebase layout might be (feel free to write up a blog post if you find one that works well for you), but in this example, we have these, along with their children (not shown): + +> .libs.base +> .libs.megaparser.master +> .libs.megaparser.v1 +> .libs.megaparser.v2 +> .arya.base +> .arya.myproject +> .pullrequests.runarorama.base_3.base +> .pullrequests.runarorama.base_3.head +> .pullrequests.runarorama.base_3.merged +> .temp + +```ucm:hide +.> builtins.merge +.> move.namespace builtin .arya.base +``` + +```ucm +.> cd arya.base +.arya.base> find Boolean +``` +```ucm:hide +.arya.base> cd .arya.myproject +``` + +blah blah blah more stuff about project management and patches and the value of working from the appropriate namespace, and what that is in any given case + +We can pop back to the previous namespace with the `back` command. + +```ucm:hide +.arya.myproject> back +``` +```ucm:hide +.arya.base> back +``` +```ucm:error +.> back +``` +😬 Right, ok. diff --git a/unison-src/transcripts/cd-back.output.md b/unison-src/transcripts/cd-back.output.md new file mode 100644 index 0000000000..9b89c23353 --- /dev/null +++ b/unison-src/transcripts/cd-back.output.md @@ -0,0 +1,40 @@ +## Switching between namespaces / projects / branches / modules + +Unison uses the same organizational element to represent directories, projects, sub-projects, forks, modules, etc.; currently called a "namespace". + +Namespaces are trees that contain definitions of "types" and "terms", "patches", and other child namespaces. + +We're still working out what a nice codebase layout might be (feel free to write up a blog post if you find one that works well for you), but in this example, we have these, along with their children (not shown): + +> .libs.base +> .libs.megaparser.master +> .libs.megaparser.v1 +> .libs.megaparser.v2 +> .arya.base +> .arya.myproject +> .pullrequests.runarorama.base_3.base +> .pullrequests.runarorama.base_3.head +> .pullrequests.runarorama.base_3.merged +> .temp + +```ucm +.> cd arya.base + +.arya.base> find Boolean + + 1. builtin type Boolean + 2. Boolean.not : Boolean -> Boolean + + +``` +blah blah blah more stuff about project management and patches and the value of working from the appropriate namespace, and what that is in any given case + +We can pop back to the previous namespace with the `back` command. + +```ucm +.> back + + You're already at the very beginning! 🙂 + +``` +😬 Right, ok. diff --git a/unison-src/transcripts/check763.md b/unison-src/transcripts/check763.md new file mode 100644 index 0000000000..3bb162b344 --- /dev/null +++ b/unison-src/transcripts/check763.md @@ -0,0 +1,17 @@ +Regression test for https://github.com/unisonweb/unison/issues/763 + +```ucm:hide +.> builtins.merge +``` + +```unison +(+-+) : Nat -> Nat -> Nat +(+-+) x y = x * y +``` + +```ucm +.> add +.> move.term +-+ boppitybeep +.> move.term boppitybeep +-+ +``` + diff --git a/unison-src/transcripts/check763.output.md b/unison-src/transcripts/check763.output.md new file mode 100644 index 0000000000..568f1d330b --- /dev/null +++ b/unison-src/transcripts/check763.output.md @@ -0,0 +1,34 @@ +Regression test for https://github.com/unisonweb/unison/issues/763 + +```unison +(+-+) : Nat -> Nat -> Nat +(+-+) x y = x * y +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + +-+ : Nat -> Nat -> Nat + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + +-+ : Nat -> Nat -> Nat + +.> move.term +-+ boppitybeep + + Done. + +.> move.term boppitybeep +-+ + + Done. + +``` diff --git a/unison-src/transcripts/check873.md b/unison-src/transcripts/check873.md new file mode 100644 index 0000000000..7145186286 --- /dev/null +++ b/unison-src/transcripts/check873.md @@ -0,0 +1,17 @@ +See [this ticket](https://github.com/unisonweb/unison/issues/873); the point being, this shouldn't crash the runtime. :) + +```ucm:hide +.> builtins.merge +``` + +```unison +(-) = builtin.Nat.sub +``` + +```ucm +.> add +``` + +```unison +baz x = x - 1 +``` diff --git a/unison-src/transcripts/check873.output.md b/unison-src/transcripts/check873.output.md new file mode 100644 index 0000000000..8935c2336f --- /dev/null +++ b/unison-src/transcripts/check873.output.md @@ -0,0 +1,40 @@ +See [this ticket](https://github.com/unisonweb/unison/issues/873); the point being, this shouldn't crash the runtime. :) + +```unison +(-) = builtin.Nat.sub +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + - : Nat -> Nat -> Int + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + - : Nat -> Nat -> Int + +``` +```unison +baz x = x - 1 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + baz : Nat -> Int + +``` diff --git a/unison-src/transcripts/copy-patch.md b/unison-src/transcripts/copy-patch.md new file mode 100644 index 0000000000..11f54e99fc --- /dev/null +++ b/unison-src/transcripts/copy-patch.md @@ -0,0 +1,39 @@ +# Test that copying a patch works as expected + +```unison +x = 1 +``` + +```ucm +.> add +``` + +Change the definition of `x` so something goes in our patch: + +```unison +x = 2 +``` + +```ucm +.> update foo.patch +``` + +Copy the patch and make sure it's still there. + +```ucm +.> copy.patch foo.patch bar.patch +.> view.patch foo.patch +.> view.patch bar.patch +``` + +Now move the patch. + +```ucm +.> move.patch foo.patch qux.patch +``` + +The moved patch should be gone. + +```ucm +.> view.patch foo.patch +``` diff --git a/unison-src/transcripts/copy-patch.output.md b/unison-src/transcripts/copy-patch.output.md new file mode 100644 index 0000000000..2fc3645c30 --- /dev/null +++ b/unison-src/transcripts/copy-patch.output.md @@ -0,0 +1,91 @@ +# Test that copying a patch works as expected + +```unison +x = 1 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + x : ##Nat + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + x : ##Nat + +``` +Change the definition of `x` so something goes in our patch: + +```unison +x = 2 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + x : ##Nat + +``` +```ucm +.> update foo.patch + + ⍟ I've updated these names to your new definition: + + x : ##Nat + +``` +Copy the patch and make sure it's still there. + +```ucm +.> copy.patch foo.patch bar.patch + + Done. + +.> view.patch foo.patch + + Edited Terms: #jk19sm5bf8 -> x + + Tip: To remove entries from a patch, use + delete.term-replacement or delete.type-replacement, as + appropriate. + +.> view.patch bar.patch + + Edited Terms: #jk19sm5bf8 -> x + + Tip: To remove entries from a patch, use + delete.term-replacement or delete.type-replacement, as + appropriate. + +``` +Now move the patch. + +```ucm +.> move.patch foo.patch qux.patch + + Done. + +``` +The moved patch should be gone. + +```ucm +.> view.patch foo.patch + + This patch is empty. + +``` diff --git a/unison-src/transcripts/create-author.md b/unison-src/transcripts/create-author.md new file mode 100644 index 0000000000..69b45b19cd --- /dev/null +++ b/unison-src/transcripts/create-author.md @@ -0,0 +1,17 @@ +```ucm:hide +.> builtins.mergeio +``` + +Demonstrating `create.author`: + +```unison:hide +def1 = 1 +def2 = 2 +``` + +```ucm +.foo> add +.foo> create.author alicecoder "Alice McGee" +.foo> view 3 +.foo> link metadata.authors.alicecoder def1 def2 +``` diff --git a/unison-src/transcripts/create-author.output.md b/unison-src/transcripts/create-author.output.md new file mode 100644 index 0000000000..69d0df22cd --- /dev/null +++ b/unison-src/transcripts/create-author.output.md @@ -0,0 +1,44 @@ +Demonstrating `create.author`: + +```unison +def1 = 1 +def2 = 2 +``` + +```ucm + ☝️ The namespace .foo is empty. + +.foo> add + + ⍟ I've added these definitions: + + def1 : Nat + def2 : Nat + +.foo> create.author alicecoder "Alice McGee" + + Added definitions: + + 1. metadata.authors.alicecoder : Author + 2. metadata.authors.alicecoder.guid : GUID + 3. metadata.copyrightHolders.alicecoder : CopyrightHolder + + Tip: Add License values for alicecoder under metadata. + +.foo> view 3 + + metadata.copyrightHolders.alicecoder : CopyrightHolder + metadata.copyrightHolders.alicecoder = + CopyrightHolder alicecoder.guid "Alice McGee" + +.foo> link metadata.authors.alicecoder def1 def2 + + Updates: + + 1. foo.def1 : Nat + + 2. authors.alicecoder : Author + + 3. foo.def2 : Nat + + 4. authors.alicecoder : Author + +``` diff --git a/unison-src/transcripts/delete.md b/unison-src/transcripts/delete.md new file mode 100644 index 0000000000..4fcf029d5f --- /dev/null +++ b/unison-src/transcripts/delete.md @@ -0,0 +1,100 @@ +# Delete + +```ucm:hide +.> builtins.merge +``` + +The delete command can delete both terms and types. + +First, let's make sure it complains when we try to delete a name that doesn't +exist. + +```ucm:error +.> delete foo +``` + +Now for some easy cases. Deleting an unambiguous term, then deleting an +unambiguous type. + +```unison:hide +foo = 1 +type Foo = Foo Nat +``` + +```ucm +.> add +.> delete foo +.> delete Foo +.> delete Foo.Foo +``` + +How about an ambiguous term? + +```unison:hide +foo = 1 +``` + +```ucm +.a> add +``` + +```unison:hide +foo = 2 +``` + +```ucm +.b> add +.a> merge .b +``` + +A delete should remove both versions of the term. + +```ucm +.a> delete foo +``` + +```ucm:error +.a> ls +``` + +Let's repeat all that on a type, for completeness. + +```unison:hide +type Foo = Foo Nat +``` + +```ucm +.a> add +``` + +```unison:hide +type Foo = Foo Boolean +``` + +```ucm +.b> add +.a> merge .b +``` + +```ucm +.a> delete Foo +``` + +```ucm +.a> delete Foo.Foo +``` + +Finally, let's try to delete a term and a type with the same name. + +```unison:hide +foo = 1 +type foo = Foo Nat +``` + +```ucm +.> add +``` + +```ucm +.> delete foo +``` diff --git a/unison-src/transcripts/delete.output.md b/unison-src/transcripts/delete.output.md new file mode 100644 index 0000000000..61af13bc5f --- /dev/null +++ b/unison-src/transcripts/delete.output.md @@ -0,0 +1,237 @@ +# Delete + +The delete command can delete both terms and types. + +First, let's make sure it complains when we try to delete a name that doesn't +exist. + +```ucm +.> delete foo + + ⚠️ + + I don't know about that name. + +``` +Now for some easy cases. Deleting an unambiguous term, then deleting an +unambiguous type. + +```unison +foo = 1 +type Foo = Foo Nat +``` + +```ucm +.> add + + ⍟ I've added these definitions: + + type Foo + foo : Nat + +.> delete foo + + Removed definitions: + + 1. foo : Nat + + Tip: You can use `undo` or `reflog` to undo this change. + +.> delete Foo + + Removed definitions: + + 1. type Foo + + Tip: You can use `undo` or `reflog` to undo this change. + +.> delete Foo.Foo + + Removed definitions: + + 1. Foo.Foo : Nat -> #d97e0jhkmd + + Tip: You can use `undo` or `reflog` to undo this change. + +``` +How about an ambiguous term? + +```unison +foo = 1 +``` + +```ucm + ☝️ The namespace .a is empty. + +.a> add + + ⍟ I've added these definitions: + + foo : Nat + +``` +```unison +foo = 2 +``` + +```ucm + ☝️ The namespace .b is empty. + +.b> add + + ⍟ I've added these definitions: + + foo : Nat + +.a> merge .b + + Here's what's changed in the current namespace after the + merge: + + New name conflicts: + + 1. foo#jk19sm5bf8 : Nat + ↓ + 2. ┌ foo#0ja1qfpej6 : Nat + 3. └ foo#jk19sm5bf8 : Nat + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +``` +A delete should remove both versions of the term. + +```ucm +.a> delete foo + + Removed definitions: + + 1. a.foo#jk19sm5bf8 : Nat + + Name changes: + + Original Changes + 2. a.foo#0ja1qfpej6 ┐ 3. a.foo#0ja1qfpej6 (removed) + 4. b.foo ┘ + + Tip: You can use `undo` or `reflog` to undo this change. + +``` +```ucm +.a> ls + + nothing to show + +``` +Let's repeat all that on a type, for completeness. + +```unison +type Foo = Foo Nat +``` + +```ucm +.a> add + + ⍟ I've added these definitions: + + type Foo + +``` +```unison +type Foo = Foo Boolean +``` + +```ucm +.b> add + + ⍟ I've added these definitions: + + type Foo + +.a> merge .b + + Here's what's changed in the current namespace after the + merge: + + New name conflicts: + + 1. type Foo#d97e0jhkmd + + ↓ + 2. ┌ type Foo#d97e0jhkmd + + 3. └ type Foo#gq9inhvg9h + + + 4. Foo.Foo#d97e0jhkmd#0 : Nat -> Foo#d97e0jhkmd + ↓ + 5. ┌ Foo.Foo#d97e0jhkmd#0 : Nat -> Foo#d97e0jhkmd + 6. └ Foo.Foo#gq9inhvg9h#0 : Boolean -> Foo#gq9inhvg9h + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +``` +```ucm +.a> delete Foo + + Removed definitions: + + 1. type a.Foo#d97e0jhkmd + + Name changes: + + Original Changes + 2. a.Foo#gq9inhvg9h ┐ 3. a.Foo#gq9inhvg9h (removed) + 4. b.Foo ┘ + + Tip: You can use `undo` or `reflog` to undo this change. + +``` +```ucm +.a> delete Foo.Foo + + Removed definitions: + + 1. a.Foo.Foo#d97e0jhkmd#0 : Nat -> #d97e0jhkmd + + Name changes: + + Original Changes + 2. a.Foo.Foo#gq9inhvg9h#0 ┐ 3. a.Foo.Foo#gq9inhvg9h#0 (removed) + 4. b.Foo.Foo ┘ + + Tip: You can use `undo` or `reflog` to undo this change. + +``` +Finally, let's try to delete a term and a type with the same name. + +```unison +foo = 1 +type foo = Foo Nat +``` + +```ucm +.> add + + ⍟ I've added these definitions: + + type foo + foo : Nat + +``` +```ucm +.> delete foo + + Removed definitions: + + 1. type foo + 2. foo : Nat + + Tip: You can use `undo` or `reflog` to undo this change. + +``` diff --git a/unison-src/transcripts/deleteReplacements.md b/unison-src/transcripts/deleteReplacements.md new file mode 100644 index 0000000000..f08f81fca2 --- /dev/null +++ b/unison-src/transcripts/deleteReplacements.md @@ -0,0 +1,46 @@ +# Deleting term and type replacements from patches + +```unison +x = 1 +``` + +```ucm +.> add +``` + +```unison +x = 2 +``` + +```ucm +.> update +.> view.patch +``` + +```ucm +.> delete.term-replacement #jk19 +.> view.patch +``` + +```unison +type Foo = Foo +``` + +```ucm +.> add +``` + +```unison +type Foo = Foo | Bar +``` + +```ucm +.> update +.> view.patch +``` + +```ucm +.> delete.type-replacement #568rsi7o3g +.> view.patch +``` + diff --git a/unison-src/transcripts/deleteReplacements.output.md b/unison-src/transcripts/deleteReplacements.output.md new file mode 100644 index 0000000000..4def203f84 --- /dev/null +++ b/unison-src/transcripts/deleteReplacements.output.md @@ -0,0 +1,132 @@ +# Deleting term and type replacements from patches + +```unison +x = 1 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + x : ##Nat + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + x : ##Nat + +``` +```unison +x = 2 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + x : ##Nat + +``` +```ucm +.> update + + ⍟ I've updated these names to your new definition: + + x : ##Nat + +.> view.patch + + Edited Terms: x#jk19sm5bf8 -> x + + Tip: To remove entries from a patch, use + delete.term-replacement or delete.type-replacement, as + appropriate. + +``` +```ucm +.> delete.term-replacement #jk19 + + Done. + +.> view.patch + + This patch is empty. + +``` +```unison +type Foo = Foo +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + type Foo + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + type Foo + +``` +```unison +type Foo = Foo | Bar +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + type Foo + +``` +```ucm +.> update + + ⍟ I've updated these names to your new definition: + + type Foo + +.> view.patch + + Edited Types: Foo#568rsi7o3g -> Foo + + Tip: To remove entries from a patch, use + delete.term-replacement or delete.type-replacement, as + appropriate. + +``` +```ucm +.> delete.type-replacement #568rsi7o3g + + Done. + +.> view.patch + + This patch is empty. + +``` diff --git a/unison-src/transcripts/dependents-dependencies-debugfile.md b/unison-src/transcripts/dependents-dependencies-debugfile.md new file mode 100644 index 0000000000..0bebc6f1cb --- /dev/null +++ b/unison-src/transcripts/dependents-dependencies-debugfile.md @@ -0,0 +1,38 @@ +```ucm:hide +.> builtins.merge +``` + +### `debug.file` +I can use `debug.file` to see the hashes of the last typechecked file. + +Given this .u file: +```unison:hide +type outside.A = A Nat outside.B +type outside.B = B Int +outside.c = 3 +outside.d = c < (p + 1) + +type inside.M = M outside.A +inside.p = c +inside.q x = x + p * p +inside.r = d +``` +```ucm +.> debug.file +``` + +This will help me make progress in some situations when UCM is being deficient or broken. + +### `dependents` / `dependencies` +But wait, there's more. I can check the dependencies and dependents of a definition: +```ucm +.> add +.> dependents q +.> dependencies q +.> dependencies B +.> dependencies d +.> dependents d +.> +``` + +We don't have an index for dependents of constructors, but iirc if you ask for that, it will show you dependents of the type that provided the constructor. diff --git a/unison-src/transcripts/dependents-dependencies-debugfile.output.md b/unison-src/transcripts/dependents-dependencies-debugfile.output.md new file mode 100644 index 0000000000..b86c321eb4 --- /dev/null +++ b/unison-src/transcripts/dependents-dependencies-debugfile.output.md @@ -0,0 +1,93 @@ +### `debug.file` +I can use `debug.file` to see the hashes of the last typechecked file. + +Given this .u file: +```unison +type outside.A = A Nat outside.B +type outside.B = B Int +outside.c = 3 +outside.d = c < (p + 1) + +type inside.M = M outside.A +inside.p = c +inside.q x = x + p * p +inside.r = d +``` + +```ucm +.> debug.file + + type inside.M#4idrjau939 + type outside.A#0n4pbd0q9u + type outside.B#muulibntaq + inside.p#fiupm7pl7o + inside.q#l5pndeifuh + inside.r#im2kiu2hmn + outside.c#msp7bv40rv + outside.d#6cdi7g1oi2 + +``` +This will help me make progress in some situations when UCM is being deficient or broken. + +### `dependents` / `dependencies` +But wait, there's more. I can check the dependencies and dependents of a definition: +```ucm +.> add + + ⍟ I've added these definitions: + + type inside.M + type outside.A + type outside.B + inside.p : Nat + inside.q : Nat -> Nat + inside.r : Boolean + outside.c : Nat + outside.d : Boolean + +.> dependents q + + #l5pndeifuh doesn't have any dependents. + +.> dependencies q + + Dependencies of #l5pndeifuh: + + Reference Name + 1. ##Nat.* builtin.Nat.* + 2. ##Nat.+ builtin.Nat.+ + 3. #fiupm7pl7o inside.p + +.> dependencies B + + Dependencies of #muulibntaq: + + Reference Name + 1. ##Int builtin.Int + + Dependencies of #muulibntaq#0: + + Reference Name + 1. ##Int builtin.Int + 2. #muulibntaq outside.B + +.> dependencies d + + Dependencies of #6cdi7g1oi2: + + Reference Name + 1. ##Nat builtin.Nat + 2. ##Nat.+ builtin.Nat.+ + 3. ##Universal.< builtin.Universal.< + 4. #fiupm7pl7o inside.p + 5. #msp7bv40rv outside.c + +.> dependents d + + Dependents of #6cdi7g1oi2: + + Reference Name + 1. #im2kiu2hmn inside.r + +``` +We don't have an index for dependents of constructors, but iirc if you ask for that, it will show you dependents of the type that provided the constructor. diff --git a/unison-src/transcripts/destructuring-binds.md b/unison-src/transcripts/destructuring-binds.md new file mode 100644 index 0000000000..0170860d76 --- /dev/null +++ b/unison-src/transcripts/destructuring-binds.md @@ -0,0 +1,85 @@ +# Destructuring binds + +```ucm:hide +.> builtins.merge +``` + +Here's a couple examples: + +```unison +ex0 : Nat -> Nat +ex0 n = + (a, _, (c,d)) = ("uno", "dos", (n, 7)) + c + d + +ex1 : (a,b,(Nat,Nat)) -> Nat +ex1 tup = + (a, b, (c,d)) = tup + c + d +``` + +```ucm +.> add +.> view ex0 ex1 +``` + +Notice that `ex0` is printed using the `cases` syntax (but `ex1` is not). The pretty-printer currently prefers the `cases` syntax if definition can be printed using either destructuring bind or `cases`. + +A destructuring bind is just syntax for a single branch pattern match. Notice that Unison detects this function as an alias of `ex1`: + +```unison +ex2 : (a,b,(Nat,Nat)) -> Nat +ex2 tup = match tup with + (a, b, (c,d)) -> c + d +``` + +Syntactically, the left-hand side of the bind can be any pattern and can even include guards, for instance, see below. Because a destructuring bind desugars to a regular pattern match, pattern match coverage will eventually cause this to not typecheck: + +```unison:hide +ex3 = + Some x | x > 10 = Some 19 + x + 1 +``` + +## Corner cases + +Destructuring binds can't be recursive: the left-hand side bound variables aren't available on the right hand side. For instance, this doesn't typecheck: + +```unison:error +ex4 = + (a,b) = (a Nat.+ b, 19) + "Doesn't typecheck" +``` + +Even though the parser accepts any pattern on the LHS of a bind, it looks pretty weird to see things like `12 = x`, so we avoid showing a destructuring bind when the LHS is a "literal" pattern (like `42` or "hi"). Again these examples wouldn't compile with coverage checking. + +```unison +ex5 : 'Text +ex5 _ = match 99 + 1 with + 12 -> "Hi" + +ex5a : 'Text +ex5a _ = match (99 + 1, "hi") with + (x, "hi") -> "Not printed as a destructuring bind." +``` + +```ucm +.> add +.> view ex5 ex5a +``` + +Notice how it prints both an ordinary match. + +Also, for clarity, the pretty-printer shows a single-branch match if the match shadows free variables of the scrutinee, for example: + +```unison:hide +ex6 x = match x with + (x, y) -> x Nat.+ y +``` + +For clarity, the pretty-printer leaves this alone, even though in theory it could be written `(x,y) = x; x + y`: + +```ucm +.> add +.> view ex6 +``` diff --git a/unison-src/transcripts/destructuring-binds.output.md b/unison-src/transcripts/destructuring-binds.output.md new file mode 100644 index 0000000000..38cee10ac3 --- /dev/null +++ b/unison-src/transcripts/destructuring-binds.output.md @@ -0,0 +1,174 @@ +# Destructuring binds + +Here's a couple examples: + +```unison +ex0 : Nat -> Nat +ex0 n = + (a, _, (c,d)) = ("uno", "dos", (n, 7)) + c + d + +ex1 : (a,b,(Nat,Nat)) -> Nat +ex1 tup = + (a, b, (c,d)) = tup + c + d +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ex0 : Nat -> Nat + ex1 : (a, b, (Nat, Nat)) -> Nat + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + ex0 : Nat -> Nat + ex1 : (a, b, (Nat, Nat)) -> Nat + +.> view ex0 ex1 + + ex0 : Nat -> Nat + ex0 n = + use Nat + + (a, _, (c, d)) = ("uno", "dos", (n, 7)) + c + d + + ex1 : (a, b, (Nat, Nat)) -> Nat + ex1 = cases + (a, b, (c, d)) -> + use Nat + + c + d + +``` +Notice that `ex0` is printed using the `cases` syntax (but `ex1` is not). The pretty-printer currently prefers the `cases` syntax if definition can be printed using either destructuring bind or `cases`. + +A destructuring bind is just syntax for a single branch pattern match. Notice that Unison detects this function as an alias of `ex1`: + +```unison +ex2 : (a,b,(Nat,Nat)) -> Nat +ex2 tup = match tup with + (a, b, (c,d)) -> c + d +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ex2 : (a, b, (Nat, Nat)) -> Nat + (also named ex1) + +``` +Syntactically, the left-hand side of the bind can be any pattern and can even include guards, for instance, see below. Because a destructuring bind desugars to a regular pattern match, pattern match coverage will eventually cause this to not typecheck: + +```unison +ex3 = + Some x | x > 10 = Some 19 + x + 1 +``` + +## Corner cases + +Destructuring binds can't be recursive: the left-hand side bound variables aren't available on the right hand side. For instance, this doesn't typecheck: + +```unison +ex4 = + (a,b) = (a Nat.+ b, 19) + "Doesn't typecheck" +``` + +```ucm + + I'm not sure what a means at line 2, columns 12-13 + + 2 | (a,b) = (a Nat.+ b, 19) + + Whatever it is, it has a type that conforms to builtin.Nat. + + +``` +Even though the parser accepts any pattern on the LHS of a bind, it looks pretty weird to see things like `12 = x`, so we avoid showing a destructuring bind when the LHS is a "literal" pattern (like `42` or "hi"). Again these examples wouldn't compile with coverage checking. + +```unison +ex5 : 'Text +ex5 _ = match 99 + 1 with + 12 -> "Hi" + +ex5a : 'Text +ex5a _ = match (99 + 1, "hi") with + (x, "hi") -> "Not printed as a destructuring bind." +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ex5 : 'Text + ex5a : 'Text + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + ex5 : 'Text + ex5a : 'Text + +.> view ex5 ex5a + + ex5 : 'Text + ex5 _ = + use Nat + + match 99 + 1 with 12 -> "Hi" + + ex5a : 'Text + ex5a _ = + use Nat + + match (99 + 1, "hi") with + (x, "hi") -> "Not printed as a destructuring bind." + +``` +Notice how it prints both an ordinary match. + +Also, for clarity, the pretty-printer shows a single-branch match if the match shadows free variables of the scrutinee, for example: + +```unison +ex6 x = match x with + (x, y) -> x Nat.+ y +``` + +For clarity, the pretty-printer leaves this alone, even though in theory it could be written `(x,y) = x; x + y`: + +```ucm +.> add + + ⍟ I've added these definitions: + + ex6 : (Nat, Nat) -> Nat + +.> view ex6 + + ex6 : (Nat, Nat) -> Nat + ex6 = cases + (x, y) -> + use Nat + + x + y + +``` diff --git a/unison-src/transcripts/diff.md b/unison-src/transcripts/diff.md new file mode 100644 index 0000000000..4d08c841f3 --- /dev/null +++ b/unison-src/transcripts/diff.md @@ -0,0 +1,201 @@ +```ucm:hide +.> builtins.mergeio +``` + +```unison:hide +x = 23 +``` + +```ucm +.b1> add +.b1> alias.term x fslkdjflskdjflksjdf +.> fork b1 b2 +.b2> alias.term x abc +``` +```unison:hide +fslkdjflskdjflksjdf = 663 +``` +```ucm +.b0> add +.> merge b0 b1 +.> diff.namespace b1 b2 +``` +Things we want to test: + +* Diffing identical namespaces +* Adds, removes, updates (with and without metadata updates) + * Adds with multiple names + * Adds with multiple names and different metadata on each +* Moved and copied definitions + * Moves that have more that 1 initial or final name +* ... terms and types +* New patches, modified patches, deleted patches, moved patches +* With and without propagated updates + +```unison:hide +fromJust = 1 +b = 2 +bdependent = b +c = 3 +helloWorld = '(printLine "Hello, world!") + +type A a = A Nat +ability X a1 a2 where x : Nat +``` + +```ucm +.ns1> add +.ns1> alias.term fromJust fromJust' +.ns1> alias.term helloWorld helloWorld2 +.ns1> link b fromJust +.ns1> fork .ns1 .ns2 +.ns1> cd . +``` +Here's what we've done so far: +```ucm +.> diff.namespace nothing ns1 +.> diff.namespace ns1 ns2 +``` + +```unison:hide +fromJust = "asldkfjasldkfj" +``` + +```ucm +.ns1b> add +.> merge ns1b ns1 +``` + +```unison:hide +fromJust = 99 +b = "oog" +d = 4 +e = 5 +f = 6 +unique type Y a b = Y a b +``` + +```ucm +.ns2> update +.ns2> links fromJust +.> diff.namespace ns1 ns2 +.> alias.term ns2.d ns2.d' +.> alias.type ns2.A ns2.A' +.> alias.type ns2.X ns2.X' +.> diff.namespace ns1 ns2 +.> link ns1.c ns2.f +.> link ns2.c ns2.c +.> diff.namespace ns1 ns2 +.> unlink ns2.b ns2.fromJust +.> diff.namespace ns1 ns2 +.> alias.type ns1.X ns1.X2 +.> alias.type ns2.A' ns2.A'' +.> view.patch ns2.patch +.> fork ns2 ns3 +.> alias.term ns2.fromJust' ns2.yoohoo +.> delete.term ns2.fromJust' +.> diff.namespace ns3 ns2 +``` +```unison:hide +bdependent = "banana" +``` +```ucm +.ns3> update +.> diff.namespace ns2 ns3 +``` + + +## Two different auto-propagated changes creating a name conflict +Currently, the auto-propagated name-conflicted definitions are not explicitly +shown, only their also-conflicted dependency is shown. +```unison:hide +a = 333 +b = a + 1 +``` +```ucm +.nsx> add +.> fork nsx nsy +.> fork nsx nsz +``` +```unison:hide +a = 444 +``` +```ucm +.nsy> update +``` +```unison:hide +a = 555 +``` +```ucm +.nsz> update +.> merge nsy nsw +``` +```ucm:error +.> merge nsz nsw +``` +```ucm +.> diff.namespace nsx nsw +.nsw> view a b +``` +```unison +a = 777 +``` + +```ucm:error +.nsw> update +.nsw> view a b +``` + +## + +Updates: -- 1 to 1 + +New name conflicts: -- updates where RHS has multiple hashes (excluding when RHS=LHS) + + 1. foo#jk19sm5bf8 : Nat - do we want to force a hashqualified? Arya thinks so + ↓ + 2. ┌ foo#0ja1qfpej6 : Nat + 3. └ foo#jk19sm5bf8 : Nat + +Resolved name conflicts: -- updates where LHS had multiple hashes and RHS has one + + 4. ┌ bar#0ja1qfpej6 : Nat + 5. └ bar#jk19sm5bf8 : Nat + ↓ + 6. bar#jk19sm5bf8 : Nat + +## Display issues to fixup + +- [d] Do we want to surface new edit conflicts in patches? +- [t] two different auto-propagated changes creating a name conflict should show + up somewhere besides the auto-propagate count +- [t] Things look screwy when the type signature doesn't fit and has to get broken + up into multiple lines. Maybe just disallow that? +- [d] Delete blank line in between copies / renames entries if all entries are 1 to 1 + see todo in the code +- [x] incorrectly calculated bracket alignment on hashqualified "Name changes" (delete.output.md) +- [x] just handle deletion of isPropagated in propagate function, leave HandleInput alone (assuming this does the trick) +- [x] might want unqualified names to be qualified sometimes: +- [x] if a name is updated to a not-yet-named reference, it's shown as both an update and an add +- [x] similarly, if a conflicted name is resolved by deleting the last name to + a reference, I (arya) suspect it will show up as a Remove +- [d] Maybe group and/or add headings to the types, constructors, terms +- [x] check whether creating a name conflict + adding metadata puts the update + in both categories; if it does, then filter out metadataUpdates from the + other categories +- [x] add tagging of propagated updates to test propagated updates output +- [x] missing old names in deletion ppe (delete.output.md) (superseded by \#1143) +- [x] delete.term has some bonkers output +- [x] Make a decision about how we want to show constructors in the diff +- [x] When you delete a name with metadata, it also shows up in updates section + with the deleted metadata. +- [x] An add with new metadata is getting characterized as an update +- [x] can there be a metadata-only update where it's not a singleton old and new reference +- [x] 12.patch patch needs a space +- [x] This looks like garbage +- [x] Extra 2 blank lines at the end of the add section +- [x] Fix alignment issues with buildTable, convert to column3M (to be written) +- [x] adding an alias is showing up as an Add and a Copy; should just show as Copy +- [x] removing one of multiple aliases appears in removes + moves + copies section +- [x] some overlapping cases between Moves and Copies^ +- [x] Maybe don't list the type signature twice for aliases? diff --git a/unison-src/transcripts/diff.output.md b/unison-src/transcripts/diff.output.md new file mode 100644 index 0000000000..7420b0c7b2 --- /dev/null +++ b/unison-src/transcripts/diff.output.md @@ -0,0 +1,733 @@ +```unison +x = 23 +``` + +```ucm + ☝️ The namespace .b1 is empty. + +.b1> add + + ⍟ I've added these definitions: + + x : Nat + +.b1> alias.term x fslkdjflskdjflksjdf + + Done. + +.> fork b1 b2 + + Done. + +.b2> alias.term x abc + + Done. + +``` +```unison +fslkdjflskdjflksjdf = 663 +``` + +```ucm + ☝️ The namespace .b0 is empty. + +.b0> add + + ⍟ I've added these definitions: + + fslkdjflskdjflksjdf : Nat + +.> merge b0 b1 + + Here's what's changed in b1 after the merge: + + New name conflicts: + + 1. fslkdjflskdjflksjdf#4kipsv2tm6 : Nat + ↓ + 2. ┌ fslkdjflskdjflksjdf#4kipsv2tm6 : Nat + 3. └ fslkdjflskdjflksjdf#s5tu4n7rlb : Nat + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +.> diff.namespace b1 b2 + + Resolved name conflicts: + + 1. ┌ fslkdjflskdjflksjdf#4kipsv2tm6 : Nat + 2. └ fslkdjflskdjflksjdf#s5tu4n7rlb : Nat + ↓ + 3. fslkdjflskdjflksjdf#4kipsv2tm6 : Nat + + Name changes: + + Original Changes + 4. fslkdjflskdjflksjdf#4kipsv2tm6 ┐ 5. abc (added) + 6. x ┘ 7. fslkdjflskdjflksjdf (added) + 8. fslkdjflskdjflksjdf#4kipsv2tm6 (removed) + +``` +Things we want to test: + +* Diffing identical namespaces +* Adds, removes, updates (with and without metadata updates) + * Adds with multiple names + * Adds with multiple names and different metadata on each +* Moved and copied definitions + * Moves that have more that 1 initial or final name +* ... terms and types +* New patches, modified patches, deleted patches, moved patches +* With and without propagated updates + +```unison +fromJust = 1 +b = 2 +bdependent = b +c = 3 +helloWorld = '(printLine "Hello, world!") + +type A a = A Nat +ability X a1 a2 where x : Nat +``` + +```ucm + ☝️ The namespace .ns1 is empty. + +.ns1> add + + ⍟ I've added these definitions: + + type A a + ability X a1 a2 + b : Nat + bdependent : Nat + c : Nat + fromJust : Nat + helloWorld : '{io.IO} () + +.ns1> alias.term fromJust fromJust' + + Done. + +.ns1> alias.term helloWorld helloWorld2 + + Done. + +.ns1> link b fromJust + + Updates: + + 1. ns1.fromJust : Nat + + 2. b : Nat + + 3. ns1.fromJust' : Nat + + 4. b : Nat + +.ns1> fork .ns1 .ns2 + + Done. + +.ns1> cd . + +``` +Here's what we've done so far: +```ucm +.> diff.namespace nothing ns1 + + Added definitions: + + 1. type A a + 2. ability X a1 a2 + 3. A.A : Nat -> A a + 4. X.x : {X a1 a2} Nat + 5. b : Nat + 6. bdependent : Nat + 7. c : Nat + 8. ┌ fromJust : Nat (+1 metadata) + 9. └ fromJust' : Nat (+1 metadata) + 10. ┌ helloWorld : '{io.IO} () + 11. └ helloWorld2 : '{io.IO} () + +.> diff.namespace ns1 ns2 + + The namespaces are identical. + +``` +```unison +fromJust = "asldkfjasldkfj" +``` + +```ucm + ☝️ The namespace .ns1b is empty. + +.ns1b> add + + ⍟ I've added these definitions: + + fromJust : Text + +.> merge ns1b ns1 + + Here's what's changed in ns1 after the merge: + + New name conflicts: + + 1. fromJust#jk19sm5bf8 : Nat + ↓ + 2. ┌ fromJust#hs2i9lcgkd : Text + 3. └ fromJust#jk19sm5bf8 : Nat + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +``` +```unison +fromJust = 99 +b = "oog" +d = 4 +e = 5 +f = 6 +unique type Y a b = Y a b +``` + +```ucm +.ns2> update + + ⍟ I've added these definitions: + + unique type Y a b + d : Nat + e : Nat + f : Nat + + ⍟ I've updated these names to your new definition: + + b : Text + fromJust : Nat + (The old definition was also named fromJust'. I updated + this name too.) + +.ns2> links fromJust + + 1. b : Text + + Tip: Try using `display 1` to display the first result or + `view 1` to view its source. + +.> diff.namespace ns1 ns2 + + Resolved name conflicts: + + 1. ┌ fromJust#hs2i9lcgkd : Text + 2. └ fromJust#jk19sm5bf8 : Nat + ↓ + 3. fromJust#1o1iq26cq7 : Nat + - 4. ns1.b : Nat + + 5. ns2.b : Text + + Updates: + + 6. b : Nat + ↓ + 7. b : Text + + 8. fromJust' : Nat + ↓ + 9. fromJust' : Nat + - 10. ns1.b : Nat + + 11. ns2.b : Text + + There were 1 auto-propagated updates. + + Added definitions: + + 12. unique type Y a b + 13. Y.Y : a -> b -> Y a b + 14. d : Nat + 15. e : Nat + 16. f : Nat + + 17. patch patch (added 2 updates) + +.> alias.term ns2.d ns2.d' + + Done. + +.> alias.type ns2.A ns2.A' + + Done. + +.> alias.type ns2.X ns2.X' + + Done. + +.> diff.namespace ns1 ns2 + + Resolved name conflicts: + + 1. ┌ fromJust#hs2i9lcgkd : Text + 2. └ fromJust#jk19sm5bf8 : Nat + ↓ + 3. fromJust#1o1iq26cq7 : Nat + - 4. ns1.b : Nat + + 5. ns2.b : Text + + Updates: + + 6. b : Nat + ↓ + 7. b : Text + + 8. fromJust' : Nat + ↓ + 9. fromJust' : Nat + - 10. ns1.b : Nat + + 11. ns2.b : Text + + There were 1 auto-propagated updates. + + Added definitions: + + 12. unique type Y a b + 13. Y.Y : a -> b -> Y a b + 14. ┌ d : Nat + 15. └ d' : Nat + 16. e : Nat + 17. f : Nat + + 18. patch patch (added 2 updates) + + Name changes: + + Original Changes + 19. A 20. A' (added) + + 21. X 22. X' (added) + +.> link ns1.c ns2.f + + Updates: + + 1. ns2.f : Nat + + 2. c : Nat + +.> link ns2.c ns2.c + + Updates: + + 1. ns2.c : Nat + + 2. c : Nat + +.> diff.namespace ns1 ns2 + + Resolved name conflicts: + + 1. ┌ fromJust#hs2i9lcgkd : Text + 2. └ fromJust#jk19sm5bf8 : Nat + ↓ + 3. fromJust#1o1iq26cq7 : Nat + - 4. ns1.b : Nat + + 5. ns2.b : Text + + Updates: + + 6. b : Nat + ↓ + 7. b : Text + + 8. c : Nat + + 9. c : Nat + + 10. fromJust' : Nat + ↓ + 11. fromJust' : Nat + - 12. ns1.b : Nat + + 13. ns2.b : Text + + There were 1 auto-propagated updates. + + Added definitions: + + 14. unique type Y a b + 15. Y.Y : a -> b -> Y a b + 16. ┌ d : Nat + 17. └ d' : Nat + 18. e : Nat + 19. f : Nat (+1 metadata) + + 20. patch patch (added 2 updates) + + Name changes: + + Original Changes + 21. A 22. A' (added) + + 23. X 24. X' (added) + +.> unlink ns2.b ns2.fromJust + + I didn't make any changes. + +.> diff.namespace ns1 ns2 + + Resolved name conflicts: + + 1. ┌ fromJust#hs2i9lcgkd : Text + 2. └ fromJust#jk19sm5bf8 : Nat + ↓ + 3. fromJust#1o1iq26cq7 : Nat + - 4. ns1.b : Nat + + 5. ns2.b : Text + + Updates: + + 6. b : Nat + ↓ + 7. b : Text + + 8. c : Nat + + 9. c : Nat + + 10. fromJust' : Nat + ↓ + 11. fromJust' : Nat + - 12. ns1.b : Nat + + 13. ns2.b : Text + + There were 1 auto-propagated updates. + + Added definitions: + + 14. unique type Y a b + 15. Y.Y : a -> b -> Y a b + 16. ┌ d : Nat + 17. └ d' : Nat + 18. e : Nat + 19. f : Nat (+1 metadata) + + 20. patch patch (added 2 updates) + + Name changes: + + Original Changes + 21. A 22. A' (added) + + 23. X 24. X' (added) + +.> alias.type ns1.X ns1.X2 + + Done. + +.> alias.type ns2.A' ns2.A'' + + Done. + +.> view.patch ns2.patch + + Edited Terms: + ns1.b -> ns2.b + ns1.fromJust' -> ns2.fromJust + + Tip: To remove entries from a patch, use + delete.term-replacement or delete.type-replacement, as + appropriate. + +.> fork ns2 ns3 + + Done. + +.> alias.term ns2.fromJust' ns2.yoohoo + + Done. + +.> delete.term ns2.fromJust' + + Name changes: + + Original Changes + 1. ns2.fromJust ┐ 2. ns2.fromJust' (removed) + 3. ns2.fromJust' │ + 4. ns2.yoohoo │ + 5. ns3.fromJust │ + 6. ns3.fromJust' ┘ + + Tip: You can use `undo` or `reflog` to undo this change. + +.> diff.namespace ns3 ns2 + + Name changes: + + Original Changes + 1. fromJust ┐ 2. yoohoo (added) + 3. fromJust' ┘ 4. fromJust' (removed) + +``` +```unison +bdependent = "banana" +``` + +```ucm +.ns3> update + + ⍟ I've updated these names to your new definition: + + bdependent : Text + +.> diff.namespace ns2 ns3 + + Updates: + + 1. bdependent : Text + ↓ + 2. bdependent : Text + + 3. patch patch (added 1 updates) + + Name changes: + + Original Changes + 4. fromJust ┐ 5. fromJust' (added) + 6. yoohoo ┘ 7. yoohoo (removed) + +``` +## Two different auto-propagated changes creating a name conflict +Currently, the auto-propagated name-conflicted definitions are not explicitly +shown, only their also-conflicted dependency is shown. +```unison +a = 333 +b = a + 1 +``` + +```ucm + ☝️ The namespace .nsx is empty. + +.nsx> add + + ⍟ I've added these definitions: + + a : Nat + b : Nat + +.> fork nsx nsy + + Done. + +.> fork nsx nsz + + Done. + +``` +```unison +a = 444 +``` + +```ucm +.nsy> update + + ⍟ I've updated these names to your new definition: + + a : Nat + +``` +```unison +a = 555 +``` + +```ucm +.nsz> update + + ⍟ I've updated these names to your new definition: + + a : Nat + +.> merge nsy nsw + + Here's what's changed in nsw after the merge: + + Added definitions: + + 1. a : Nat + 2. b : Nat (+1 metadata) + + 3. patch patch (added 1 updates) + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +``` +```ucm +.> merge nsz nsw + + Here's what's changed in nsw after the merge: + + New name conflicts: + + 1. a#ekguc9h648 : Nat + ↓ + 2. ┌ a#5f8uodgrtf : Nat + 3. └ a#ekguc9h648 : Nat + + Updates: + + 4. b#be9a2abbbg : Nat + + There were 1 auto-propagated updates. + + 5. patch patch (added 1 updates) + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + + I tried to auto-apply the patch, but couldn't because it + contained contradictory entries. + +``` +```ucm +.> diff.namespace nsx nsw + + New name conflicts: + + 1. a#8ss2r9gqe7 : Nat + ↓ + 2. ┌ a#5f8uodgrtf : Nat + 3. └ a#ekguc9h648 : Nat + + Updates: + + There were 2 auto-propagated updates. + + Added definitions: + + 4. patch patch (added 2 updates) + +.nsw> view a b + + a#5f8uodgrtf : Nat + a#5f8uodgrtf = 555 + + a#ekguc9h648 : Nat + a#ekguc9h648 = 444 + + b#be9a2abbbg : Nat + b#be9a2abbbg = + use Nat + + a#ekguc9h648 + 1 + + b#kut4vstim7 : Nat + b#kut4vstim7 = + use Nat + + a#5f8uodgrtf + 1 + +``` +```unison +a = 777 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + x These definitions would fail on `add` or `update`: + + Reason + conflicted a : Nat + + Tip: Use `help filestatus` to learn more. + +``` +```ucm +.nsw> update + + x These definitions failed: + + Reason + conflicted a : Nat + + Tip: Use `help filestatus` to learn more. + + I tried to auto-apply the patch, but couldn't because it + contained contradictory entries. + +.nsw> view a b + + a#5f8uodgrtf : Nat + a#5f8uodgrtf = 555 + + a#ekguc9h648 : Nat + a#ekguc9h648 = 444 + + b#be9a2abbbg : Nat + b#be9a2abbbg = + use Nat + + a#ekguc9h648 + 1 + + b#kut4vstim7 : Nat + b#kut4vstim7 = + use Nat + + a#5f8uodgrtf + 1 + +``` +## + +Updates: -- 1 to 1 + +New name conflicts: -- updates where RHS has multiple hashes (excluding when RHS=LHS) + + 1. foo#jk19sm5bf8 : Nat - do we want to force a hashqualified? Arya thinks so + ↓ + 2. ┌ foo#0ja1qfpej6 : Nat + 3. └ foo#jk19sm5bf8 : Nat + +Resolved name conflicts: -- updates where LHS had multiple hashes and RHS has one + + 4. ┌ bar#0ja1qfpej6 : Nat + 5. └ bar#jk19sm5bf8 : Nat + ↓ + 6. bar#jk19sm5bf8 : Nat + +## Display issues to fixup + +- [d] Do we want to surface new edit conflicts in patches? +- [t] two different auto-propagated changes creating a name conflict should show + up somewhere besides the auto-propagate count +- [t] Things look screwy when the type signature doesn't fit and has to get broken + up into multiple lines. Maybe just disallow that? +- [d] Delete blank line in between copies / renames entries if all entries are 1 to 1 + see todo in the code +- [x] incorrectly calculated bracket alignment on hashqualified "Name changes" (delete.output.md) +- [x] just handle deletion of isPropagated in propagate function, leave HandleInput alone (assuming this does the trick) +- [x] might want unqualified names to be qualified sometimes: +- [x] if a name is updated to a not-yet-named reference, it's shown as both an update and an add +- [x] similarly, if a conflicted name is resolved by deleting the last name to + a reference, I (arya) suspect it will show up as a Remove +- [d] Maybe group and/or add headings to the types, constructors, terms +- [x] check whether creating a name conflict + adding metadata puts the update + in both categories; if it does, then filter out metadataUpdates from the + other categories +- [x] add tagging of propagated updates to test propagated updates output +- [x] missing old names in deletion ppe (delete.output.md) (superseded by \#1143) +- [x] delete.term has some bonkers output +- [x] Make a decision about how we want to show constructors in the diff +- [x] When you delete a name with metadata, it also shows up in updates section + with the deleted metadata. +- [x] An add with new metadata is getting characterized as an update +- [x] can there be a metadata-only update where it's not a singleton old and new reference +- [x] 12.patch patch needs a space +- [x] This looks like garbage +- [x] Extra 2 blank lines at the end of the add section +- [x] Fix alignment issues with buildTable, convert to column3M (to be written) +- [x] adding an alias is showing up as an Add and a Copy; should just show as Copy +- [x] removing one of multiple aliases appears in removes + moves + copies section +- [x] some overlapping cases between Moves and Copies^ +- [x] Maybe don't list the type signature twice for aliases? diff --git a/unison-src/transcripts/doc-formatting.md b/unison-src/transcripts/doc-formatting.md new file mode 100644 index 0000000000..f5b816f1d5 --- /dev/null +++ b/unison-src/transcripts/doc-formatting.md @@ -0,0 +1,254 @@ +This transcript explains a few minor details about doc parsing and pretty-printing, both from a user point of view and with some implementation notes. The later stuff is meant more as unit testing than for human consumption. (The ucm `add` commands and their output are hidden for brevity.) + +Docs can be used as inline code comments. + +```ucm:hide +.> builtins.merge +``` + +```unison +foo : Nat -> Nat +foo n = + [: do the thing :] + n + 1 +``` + +```ucm:hide +.> add +``` +```ucm +.> view foo +``` + +Note that `@` and `:]` must be escaped within docs. + +```unison +escaping = [: Docs look [: like \@this \:] :] +``` + +```ucm:hide +.> add +``` +```ucm +.> view escaping +``` + +(Alas you can't have `\@` or `\:]` in your doc, as there's currently no way to 'unescape' them.) + +```unison +-- Note that -- comments are preserved within doc literals. +commented = [: + example: + + -- a comment + f x = x + 1 +:] +``` + +```ucm:hide +.> add +``` +```ucm +.> view commented +``` + +### Indenting, and paragraph reflow + +Handling of indenting in docs between the parser and pretty-printer is a bit fiddly. + +```unison +-- The leading and trailing spaces are stripped from the stored Doc by the +-- lexer, and one leading and trailing space is inserted again on view/edit +-- by the pretty-printer. +doc1 = [: hi :] +``` + +```ucm:hide +.> add +``` +```ucm +.> view doc1 +``` + +```unison +-- Lines (apart from the first line, i.e. the bit between the [: and the +-- first newline) are unindented until at least one of +-- them hits the left margin (by a post-processing step in the parser). +-- You may not notice this because the pretty-printer indents them again on +-- view/edit. +doc2 = [: hello + - foo + - bar + and the rest. :] +``` + +```ucm:hide +.> add +``` +```ucm +.> view doc2 +``` + +```unison +doc3 = [: When Unison identifies a paragraph, it removes any newlines from it before storing it, and then reflows the paragraph text to fit the display window on display/view/edit. + +For these purposes, a paragraph is any sequence of non-empty lines that have zero indent (after the unindenting mentioned above.) + + - So this is not a paragraph, even + though you might want it to be. + + And this text | as a paragraph + is not treated | either. + +Note that because of the special treatment of the first line mentioned above, where its leading space is removed, it is always treated as a paragraph. + :] +``` + +```ucm:hide +.> add +``` +```ucm +.> view doc3 +``` + +```unison +doc4 = [: Here's another example of some paragraphs. + + All these lines have zero indent. + + - Apart from this one. :] +``` + +```ucm:hide +.> add +``` +```ucm +.> view doc4 +``` + +```unison +-- The special treatment of the first line does mean that the following +-- is pretty-printed not so prettily. To fix that we'd need to get the +-- lexer to help out with interpreting doc literal indentation (because +-- it knows what columns the `[:` was in.) +doc5 = [: - foo + - bar + and the rest. :] +``` + +```ucm:hide +.> add +``` +```ucm +.> view doc5 +``` + +```unison +-- You can do the following to avoid that problem. +doc6 = [: + - foo + - bar + and the rest. + :] +``` + +```ucm:hide +.> add +``` +```ucm +.> view doc6 +``` + +### More testing + +```unison +-- Check empty doc works. +empty = [::] + +expr = foo 1 +``` +```ucm:hide +.> add +``` +```ucm +.> view empty +``` + +```unison +test1 = [: +The internal logic starts to get hairy when you use the \@ features, for example referencing a name like @List.take. Internally, the text between each such usage is its own blob (blob ends here --> @List.take), so paragraph reflow has to be aware of multiple blobs to do paragraph reflow (or, more accurately, to do the normalization step where newlines with a paragraph are removed.) + +Para to reflow: lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor ending in ref @List.take + +@List.take starting para lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. + +Middle of para: lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor @List.take lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. + + - non-para line (@List.take) with ref @List.take + Another non-para line + @List.take starting non-para line + + - non-para line with ref @List.take +before a para-line lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. + + - non-para line followed by a para line starting with ref +@List.take lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. + +a para-line ending with ref lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor @List.take + - non-para line + +para line lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor + @List.take followed by non-para line starting with ref. + +@[signature] List.take + +@[source] foo + +@[evaluate] expr + +@[include] doc1 + +-- note the leading space below + @[signature] List.take + +:] +``` +```ucm:hide +.> add +``` +```ucm +.> view test1 +``` + +```unison +-- Regression test for #1363 - preservation of spaces after @ directives in first line when unindenting +reg1363 = [: `@List.take foo` bar + baz :] +``` +```ucm:hide +.> add +``` +```ucm +.> view reg1363 +``` + +```unison +-- Demonstrate doc display when whitespace follows a @[source] or @[evaluate] +-- whose output spans multiple lines. + +test2 = [: + Take a look at this: + @[source] foo ▶ bar +:] +``` +```ucm:hide +.> add +``` +View is fine. +```ucm +.> view test2 +``` +But note it's not obvious how display should best be handling this. At the moment it just does the simplest thing: +```ucm +.> display test2 +``` diff --git a/unison-src/transcripts/doc-formatting.output.md b/unison-src/transcripts/doc-formatting.output.md new file mode 100644 index 0000000000..81149a2c5a --- /dev/null +++ b/unison-src/transcripts/doc-formatting.output.md @@ -0,0 +1,512 @@ +This transcript explains a few minor details about doc parsing and pretty-printing, both from a user point of view and with some implementation notes. The later stuff is meant more as unit testing than for human consumption. (The ucm `add` commands and their output are hidden for brevity.) + +Docs can be used as inline code comments. + +```unison +foo : Nat -> Nat +foo n = + [: do the thing :] + n + 1 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + foo : Nat -> Nat + +``` +```ucm +.> view foo + + foo : Nat -> Nat + foo n = + use Nat + + [: do the thing :] + n + 1 + +``` +Note that `@` and `:]` must be escaped within docs. + +```unison +escaping = [: Docs look [: like \@this \:] :] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + escaping : Doc + +``` +```ucm +.> view escaping + + escaping : Doc + escaping = [: Docs look [: like \@this \:] :] + +``` +(Alas you can't have `\@` or `\:]` in your doc, as there's currently no way to 'unescape' them.) + +```unison +-- Note that -- comments are preserved within doc literals. +commented = [: + example: + + -- a comment + f x = x + 1 +:] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + commented : Doc + +``` +```ucm +.> view commented + + commented : Doc + commented = + [: + example: + + -- a comment + f x = x + 1 + :] + +``` +### Indenting, and paragraph reflow + +Handling of indenting in docs between the parser and pretty-printer is a bit fiddly. + +```unison +-- The leading and trailing spaces are stripped from the stored Doc by the +-- lexer, and one leading and trailing space is inserted again on view/edit +-- by the pretty-printer. +doc1 = [: hi :] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + doc1 : Doc + +``` +```ucm +.> view doc1 + + doc1 : Doc + doc1 = [: hi :] + +``` +```unison +-- Lines (apart from the first line, i.e. the bit between the [: and the +-- first newline) are unindented until at least one of +-- them hits the left margin (by a post-processing step in the parser). +-- You may not notice this because the pretty-printer indents them again on +-- view/edit. +doc2 = [: hello + - foo + - bar + and the rest. :] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + doc2 : Doc + +``` +```ucm +.> view doc2 + + doc2 : Doc + doc2 = + [: hello + - foo + - bar + and the rest. :] + +``` +```unison +doc3 = [: When Unison identifies a paragraph, it removes any newlines from it before storing it, and then reflows the paragraph text to fit the display window on display/view/edit. + +For these purposes, a paragraph is any sequence of non-empty lines that have zero indent (after the unindenting mentioned above.) + + - So this is not a paragraph, even + though you might want it to be. + + And this text | as a paragraph + is not treated | either. + +Note that because of the special treatment of the first line mentioned above, where its leading space is removed, it is always treated as a paragraph. + :] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + doc3 : Doc + +``` +```ucm +.> view doc3 + + doc3 : Doc + doc3 = + [: When Unison identifies a paragraph, it removes any newlines + from it before storing it, and then reflows the paragraph text + to fit the display window on display/view/edit. + + For these purposes, a paragraph is any sequence of non-empty + lines that have zero indent (after the unindenting mentioned + above.) + + - So this is not a paragraph, even + though you might want it to be. + + And this text | as a paragraph + is not treated | either. + + Note that because of the special treatment of the first line + mentioned above, where its leading space is removed, it is always + treated as a paragraph. + :] + +``` +```unison +doc4 = [: Here's another example of some paragraphs. + + All these lines have zero indent. + + - Apart from this one. :] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + doc4 : Doc + +``` +```ucm +.> view doc4 + + doc4 : Doc + doc4 = + [: Here's another example of some paragraphs. + + All these lines have zero indent. + + - Apart from this one. :] + +``` +```unison +-- The special treatment of the first line does mean that the following +-- is pretty-printed not so prettily. To fix that we'd need to get the +-- lexer to help out with interpreting doc literal indentation (because +-- it knows what columns the `[:` was in.) +doc5 = [: - foo + - bar + and the rest. :] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + doc5 : Doc + +``` +```ucm +.> view doc5 + + doc5 : Doc + doc5 = + [: - foo + - bar + and the rest. :] + +``` +```unison +-- You can do the following to avoid that problem. +doc6 = [: + - foo + - bar + and the rest. + :] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + doc6 : Doc + +``` +```ucm +.> view doc6 + + doc6 : Doc + doc6 = + [: + - foo + - bar + and the rest. + :] + +``` +### More testing + +```unison +-- Check empty doc works. +empty = [::] + +expr = foo 1 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + empty : Doc + expr : Nat + +``` +```ucm +.> view empty + + empty : Doc + empty = [: :] + +``` +```unison +test1 = [: +The internal logic starts to get hairy when you use the \@ features, for example referencing a name like @List.take. Internally, the text between each such usage is its own blob (blob ends here --> @List.take), so paragraph reflow has to be aware of multiple blobs to do paragraph reflow (or, more accurately, to do the normalization step where newlines with a paragraph are removed.) + +Para to reflow: lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor ending in ref @List.take + +@List.take starting para lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. + +Middle of para: lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor @List.take lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. + + - non-para line (@List.take) with ref @List.take + Another non-para line + @List.take starting non-para line + + - non-para line with ref @List.take +before a para-line lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. + + - non-para line followed by a para line starting with ref +@List.take lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor. + +a para-line ending with ref lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor @List.take + - non-para line + +para line lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor + @List.take followed by non-para line starting with ref. + +@[signature] List.take + +@[source] foo + +@[evaluate] expr + +@[include] doc1 + +-- note the leading space below + @[signature] List.take + +:] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + test1 : Doc + +``` +```ucm +.> view test1 + + test1 : Doc + test1 = + [: + The internal logic starts to get hairy when you use the \@ features, + for example referencing a name like @List.take. Internally, + the text between each such usage is its own blob (blob ends here + --> @List.take), so paragraph reflow has to be aware of multiple + blobs to do paragraph reflow (or, more accurately, to do the + normalization step where newlines with a paragraph are removed.) + + Para to reflow: lorem ipsum dolor lorem ipsum dolor lorem ipsum + dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem + ipsum dolor ending in ref @List.take + + @List.take starting para lorem ipsum dolor lorem ipsum dolor + lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum + dolor lorem ipsum dolor. + + Middle of para: lorem ipsum dolor lorem ipsum dolor lorem ipsum + dolor @List.take lorem ipsum dolor lorem ipsum dolor lorem ipsum + dolor lorem ipsum dolor. + + - non-para line (@List.take) with ref @List.take + Another non-para line + @List.take starting non-para line + + - non-para line with ref @List.take + before a para-line lorem ipsum dolor lorem ipsum dolor lorem + ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor + lorem ipsum dolor lorem ipsum dolor. + + - non-para line followed by a para line starting with ref + @List.take lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor + lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum + dolor lorem ipsum dolor. + + a para-line ending with ref lorem ipsum dolor lorem ipsum dolor + lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum + dolor lorem ipsum dolor lorem ipsum dolor @List.take + - non-para line + + para line lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor + lorem ipsum dolor lorem ipsum dolor lorem ipsum dolor lorem ipsum + dolor lorem ipsum dolor + @List.take followed by non-para line starting with ref. + + @[signature] List.take + + @[source] foo + + @[evaluate] expr + + @[include] doc1 + + -- note the leading space below + @[signature] List.take + + :] + +``` +```unison +-- Regression test for #1363 - preservation of spaces after @ directives in first line when unindenting +reg1363 = [: `@List.take foo` bar + baz :] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + reg1363 : Doc + +``` +```ucm +.> view reg1363 + + reg1363 : Doc + reg1363 = [: `@List.take foo` bar baz :] + +``` +```unison +-- Demonstrate doc display when whitespace follows a @[source] or @[evaluate] +-- whose output spans multiple lines. + +test2 = [: + Take a look at this: + @[source] foo ▶ bar +:] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + test2 : Doc + +``` +View is fine. +```ucm +.> view test2 + + test2 : Doc + test2 = + [: + Take a look at this: + @[source] foo ▶ bar + :] + +``` +But note it's not obvious how display should best be handling this. At the moment it just does the simplest thing: +```ucm +.> display test2 + + + Take a look at this: + foo n = + use Nat + + [: do the thing :] + n + 1 ▶ bar + + +``` diff --git a/unison-src/transcripts/docs.md b/unison-src/transcripts/docs.md new file mode 100644 index 0000000000..0ce76d7bab --- /dev/null +++ b/unison-src/transcripts/docs.md @@ -0,0 +1,95 @@ +# Documenting Unison code + +```ucm:hide +.> builtins.merge +``` + +Unison documentation is written in Unison. Documentation is a value of the following type: + +```ucm +.> view builtin.Doc +``` + +You can create these `Doc` values with ordinary code, or you can use the special syntax. A value of type `Doc` can be created via syntax like: + +```unison +use .builtin + +doc1 = [: This is some documentation. + +It can span multiple lines. + +Can link to definitions like @List.drop or @List + +:] +``` + +Syntax: + +`[:` starts a documentation block; `:]` finishes it. Within the block: + +* Links to definitions are done with `@List`. `\@` (and `\:]`) if you want to escape. +* `@[signature] List.take` expands to the type signature of `List.take` +* `@[source] List.map` expands to the full source of `List.map` +* `@[include] someOtherDoc`, inserts a value `someOtherDoc : Doc` here. +* `@[evaluate] someDefinition` expands to the result of evaluating `someDefinition`, which must be a pre-existing definition in the codebase (can't be an arbitrary expression). + +### An example + +We are going to document `List.take` using some verbiage and a few examples. First we have to add the examples to the codebase: + +```unison +List.take.ex1 = take 0 [1,2,3,4,5] +List.take.ex2 = take 2 [1,2,3,4,5] +``` + +```ucm +.> add +``` + +And now let's write our docs and reference these examples: + +```unison +use .builtin + +docs.List.take = [: +`@List.take n xs` returns the first `n` elements of `xs`. (No need to add line breaks manually. The display command will do wrapping of text for you. Indent any lines where you don't want it to do this.) + +## Examples: + + @[source] List.take.ex1 + 🔽 + @List.take.ex1 = @[evaluate] List.take.ex1 + + + @[source] List.take.ex2 + 🔽 + @List.take.ex2 = @[evaluate] List.take.ex2 +:] +``` + +Let's add it to the codebase, and link it to the definition: + +```ucm +.> add +.> link docs.List.take builtin.List.take +``` + +Now that documentation is linked to the definition. We can view it if we like: + +```ucm +.> links builtin.List.take builtin.Doc +.> display 1 +``` + +Or there's also a convenient function, `docs`, which shows the `Doc` values that are linked to a definition. It's implemented in terms of `links` and `display`: + +```ucm +.> docs builtin.List.take +``` + +Note that if we view the source of the documentation, the various references are *not* expanded. + +```ucm +.> view docs.List.take +``` diff --git a/unison-src/transcripts/docs.output.md b/unison-src/transcripts/docs.output.md new file mode 100644 index 0000000000..cdc1b91284 --- /dev/null +++ b/unison-src/transcripts/docs.output.md @@ -0,0 +1,211 @@ +# Documenting Unison code + +Unison documentation is written in Unison. Documentation is a value of the following type: + +```ucm +.> view builtin.Doc + + unique type builtin.Doc + = Link Link + | Source Link + | Blob Text + | Join [builtin.Doc] + | Signature Term + | Evaluate Term + +``` +You can create these `Doc` values with ordinary code, or you can use the special syntax. A value of type `Doc` can be created via syntax like: + +```unison +use .builtin + +doc1 = [: This is some documentation. + +It can span multiple lines. + +Can link to definitions like @List.drop or @List + +:] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + doc1 : Doc + +``` +Syntax: + +`[:` starts a documentation block; `:]` finishes it. Within the block: + +* Links to definitions are done with `@List`. `\@` (and `\:]`) if you want to escape. +* `@[signature] List.take` expands to the type signature of `List.take` +* `@[source] List.map` expands to the full source of `List.map` +* `@[include] someOtherDoc`, inserts a value `someOtherDoc : Doc` here. +* `@[evaluate] someDefinition` expands to the result of evaluating `someDefinition`, which must be a pre-existing definition in the codebase (can't be an arbitrary expression). + +### An example + +We are going to document `List.take` using some verbiage and a few examples. First we have to add the examples to the codebase: + +```unison +List.take.ex1 = take 0 [1,2,3,4,5] +List.take.ex2 = take 2 [1,2,3,4,5] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + List.take.ex1 : [Nat] + List.take.ex2 : [Nat] + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + List.take.ex1 : [Nat] + List.take.ex2 : [Nat] + +``` +And now let's write our docs and reference these examples: + +```unison +use .builtin + +docs.List.take = [: +`@List.take n xs` returns the first `n` elements of `xs`. (No need to add line breaks manually. The display command will do wrapping of text for you. Indent any lines where you don't want it to do this.) + +## Examples: + + @[source] List.take.ex1 + 🔽 + @List.take.ex1 = @[evaluate] List.take.ex1 + + + @[source] List.take.ex2 + 🔽 + @List.take.ex2 = @[evaluate] List.take.ex2 +:] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + docs.List.take : Doc + +``` +Let's add it to the codebase, and link it to the definition: + +```ucm +.> add + + ⍟ I've added these definitions: + + docs.List.take : Doc + +.> link docs.List.take builtin.List.take + + Updates: + + 1. builtin.List.take : Nat -> [a] -> [a] + + 2. docs.List.take : Doc + +``` +Now that documentation is linked to the definition. We can view it if we like: + +```ucm +.> links builtin.List.take builtin.Doc + + 1. docs.List.take : Doc + + Tip: Try using `display 1` to display the first result or + `view 1` to view its source. + +.> display 1 + + + `builtin.List.take n xs` returns the first `n` elements of `xs`. + (No need to add line breaks manually. The display command will + do wrapping of text for you. Indent any lines where you don't + want it to do this.) + + ## Examples: + + List.take.ex1 = builtin.List.take 0 [1, 2, 3, 4, 5] + 🔽 + ex1 = [] + + + List.take.ex2 = builtin.List.take 2 [1, 2, 3, 4, 5] + 🔽 + ex2 = [1, 2] + + +``` +Or there's also a convenient function, `docs`, which shows the `Doc` values that are linked to a definition. It's implemented in terms of `links` and `display`: + +```ucm +.> docs builtin.List.take + + + `builtin.List.take n xs` returns the first `n` elements of `xs`. + (No need to add line breaks manually. The display command will + do wrapping of text for you. Indent any lines where you don't + want it to do this.) + + ## Examples: + + List.take.ex1 = builtin.List.take 0 [1, 2, 3, 4, 5] + 🔽 + ex1 = [] + + + List.take.ex2 = builtin.List.take 2 [1, 2, 3, 4, 5] + 🔽 + ex2 = [1, 2] + + +``` +Note that if we view the source of the documentation, the various references are *not* expanded. + +```ucm +.> view docs.List.take + + docs.List.take : Doc + docs.List.take = + [: + `@builtin.List.take n xs` returns the first `n` elements of `xs`. + (No need to add line breaks manually. The display command will + do wrapping of text for you. Indent any lines where you don't + want it to do this.) + + ## Examples: + + @[source] ex1 + 🔽 + @ex1 = @[evaluate] ex1 + + + @[source] ex2 + 🔽 + @ex2 = @[evaluate] ex2 + :] + +``` diff --git a/unison-src/transcripts/emptyCodebase.md b/unison-src/transcripts/emptyCodebase.md new file mode 100644 index 0000000000..a9ea55b850 --- /dev/null +++ b/unison-src/transcripts/emptyCodebase.md @@ -0,0 +1,27 @@ +# The empty codebase + +The Unison codebase, when first initialized, contains no definitions in its namespace. + +Not even `Nat` or `+`! + +BEHOLD!!! + +```ucm:error +.> ls +``` + +Technically, the definitions all exist, but they have no names. `builtins.merge` brings them into existence, under the current namespace: + +```ucm +.foo> builtins.merge +.foo> ls +``` + +And for a limited time, you can get even more builtin goodies: + +```ucm +.foo> builtins.mergeio +.foo> ls +``` + +More typically, you'd start out by pulling `base`. diff --git a/unison-src/transcripts/emptyCodebase.output.md b/unison-src/transcripts/emptyCodebase.output.md new file mode 100644 index 0000000000..8da1952194 --- /dev/null +++ b/unison-src/transcripts/emptyCodebase.output.md @@ -0,0 +1,41 @@ +# The empty codebase + +The Unison codebase, when first initialized, contains no definitions in its namespace. + +Not even `Nat` or `+`! + +BEHOLD!!! + +```ucm +.> ls + + nothing to show + +``` +Technically, the definitions all exist, but they have no names. `builtins.merge` brings them into existence, under the current namespace: + +```ucm + ☝️ The namespace .foo is empty. + +.foo> builtins.merge + + Done. + +.foo> ls + + 1. builtin/ (272 definitions) + +``` +And for a limited time, you can get even more builtin goodies: + +```ucm +.foo> builtins.mergeio + + Done. + +.foo> ls + + 1. builtin/ (435 definitions) + +``` +More typically, you'd start out by pulling `base. diff --git a/unison-src/transcripts/errors/ucm-hide-all-error.md b/unison-src/transcripts/errors/ucm-hide-all-error.md new file mode 100644 index 0000000000..dcf94d8d32 --- /dev/null +++ b/unison-src/transcripts/errors/ucm-hide-all-error.md @@ -0,0 +1,12 @@ + +### Transcript parser hidden errors + +Dangerous scary words! + +When an expected error is not encountered in a `ucm:hide:all` block +then the transcript parser should print the stanza +and surface a helpful message. + +```ucm:hide:all:error +.> history +``` diff --git a/unison-src/transcripts/errors/ucm-hide-all-error.output.md b/unison-src/transcripts/errors/ucm-hide-all-error.output.md new file mode 100644 index 0000000000..e3a9558abd --- /dev/null +++ b/unison-src/transcripts/errors/ucm-hide-all-error.output.md @@ -0,0 +1,17 @@ + +### Transcript parser hidden errors + +Dangerous scary words! + +When an expected error is not encountered in a `ucm:hide:all` block +then the transcript parser should print the stanza +and surface a helpful message. + +```ucm +.> history +``` + + +🛑 + +The transcript was expecting an error in the stanza above, but did not encounter one. diff --git a/unison-src/transcripts/errors/ucm-hide-all.md b/unison-src/transcripts/errors/ucm-hide-all.md new file mode 100644 index 0000000000..22950a9334 --- /dev/null +++ b/unison-src/transcripts/errors/ucm-hide-all.md @@ -0,0 +1,12 @@ + +### Transcript parser hidden errors + +Dangerous scary words! + +When an error is encountered in a `ucm:hide:all` block +then the transcript parser should print the stanza +and surface a helpful message. + +```ucm:hide:all +.> move.namespace foo bar +``` diff --git a/unison-src/transcripts/errors/ucm-hide-all.output.md b/unison-src/transcripts/errors/ucm-hide-all.output.md new file mode 100644 index 0000000000..e626779a3b --- /dev/null +++ b/unison-src/transcripts/errors/ucm-hide-all.output.md @@ -0,0 +1,17 @@ + +### Transcript parser hidden errors + +Dangerous scary words! + +When an error is encountered in a `ucm:hide:all` block +then the transcript parser should print the stanza +and surface a helpful message. + +```ucm +.> move.namespace foo bar +``` + + +🛑 + +The transcript failed due to an error encountered in the stanza above. diff --git a/unison-src/transcripts/errors/ucm-hide-error.md b/unison-src/transcripts/errors/ucm-hide-error.md new file mode 100644 index 0000000000..68da57efc2 --- /dev/null +++ b/unison-src/transcripts/errors/ucm-hide-error.md @@ -0,0 +1,12 @@ + +### Transcript parser hidden errors + +Dangerous scary words! + +When an expected error is not encountered in a `ucm:hide` block +then the transcript parser should print the stanza +and surface a helpful message. + +```ucm:hide:error +.> history +``` diff --git a/unison-src/transcripts/errors/ucm-hide-error.output.md b/unison-src/transcripts/errors/ucm-hide-error.output.md new file mode 100644 index 0000000000..0056a35888 --- /dev/null +++ b/unison-src/transcripts/errors/ucm-hide-error.output.md @@ -0,0 +1,17 @@ + +### Transcript parser hidden errors + +Dangerous scary words! + +When an expected error is not encountered in a `ucm:hide` block +then the transcript parser should print the stanza +and surface a helpful message. + +```ucm +.> history +``` + + +🛑 + +The transcript was expecting an error in the stanza above, but did not encounter one. diff --git a/unison-src/transcripts/errors/ucm-hide.md b/unison-src/transcripts/errors/ucm-hide.md new file mode 100644 index 0000000000..aa725ada4c --- /dev/null +++ b/unison-src/transcripts/errors/ucm-hide.md @@ -0,0 +1,12 @@ + +### Transcript parser hidden errors + +Dangerous scary words! + +When an error is encountered in a `ucm:hide` block +then the transcript parser should print the stanza +and surface a helpful message. + +```ucm:hide +.> move.namespace foo bar +``` diff --git a/unison-src/transcripts/errors/ucm-hide.output.md b/unison-src/transcripts/errors/ucm-hide.output.md new file mode 100644 index 0000000000..e012f7457f --- /dev/null +++ b/unison-src/transcripts/errors/ucm-hide.output.md @@ -0,0 +1,17 @@ + +### Transcript parser hidden errors + +Dangerous scary words! + +When an error is encountered in a `ucm:hide` block +then the transcript parser should print the stanza +and surface a helpful message. + +```ucm +.> move.namespace foo bar +``` + + +🛑 + +The transcript failed due to an error encountered in the stanza above. diff --git a/unison-src/transcripts/errors/unison-hide-all-error.md b/unison-src/transcripts/errors/unison-hide-all-error.md new file mode 100644 index 0000000000..0364b35fdf --- /dev/null +++ b/unison-src/transcripts/errors/unison-hide-all-error.md @@ -0,0 +1,10 @@ + +### Transcript parser hidden errors + +When an expected error is not encountered in a `unison:hide:all:error` block +then the transcript parser should print the stanza +and surface a helpful message. + +```unison:hide:all:error +myVal = 3 +``` \ No newline at end of file diff --git a/unison-src/transcripts/errors/unison-hide-all-error.output.md b/unison-src/transcripts/errors/unison-hide-all-error.output.md new file mode 100644 index 0000000000..3c3e6f3e5f --- /dev/null +++ b/unison-src/transcripts/errors/unison-hide-all-error.output.md @@ -0,0 +1,16 @@ + +### Transcript parser hidden errors + +When an expected error is not encountered in a `unison:hide:all:error` block +then the transcript parser should print the stanza +and surface a helpful message. + +```unison +myVal = 3 +``` + + + +🛑 + +The transcript was expecting an error in the stanza above, but did not encounter one. diff --git a/unison-src/transcripts/errors/unison-hide-all.md b/unison-src/transcripts/errors/unison-hide-all.md new file mode 100644 index 0000000000..b722caad70 --- /dev/null +++ b/unison-src/transcripts/errors/unison-hide-all.md @@ -0,0 +1,10 @@ + +### Transcript parser hidden errors + +When an error is encountered in a `unison:hide:all` block +then the transcript parser should print the stanza +and surface a helpful message. + +```unison:hide:all +g 3 +``` diff --git a/unison-src/transcripts/errors/unison-hide-all.output.md b/unison-src/transcripts/errors/unison-hide-all.output.md new file mode 100644 index 0000000000..7c4d462c08 --- /dev/null +++ b/unison-src/transcripts/errors/unison-hide-all.output.md @@ -0,0 +1,16 @@ + +### Transcript parser hidden errors + +When an error is encountered in a `unison:hide:all` block +then the transcript parser should print the stanza +and surface a helpful message. + +```unison +g 3 +``` + + + +🛑 + +The transcript failed due to an error encountered in the stanza above. diff --git a/unison-src/transcripts/errors/unison-hide-error.md b/unison-src/transcripts/errors/unison-hide-error.md new file mode 100644 index 0000000000..1ab6e675d3 --- /dev/null +++ b/unison-src/transcripts/errors/unison-hide-error.md @@ -0,0 +1,10 @@ + +### Transcript parser hidden errors + +When an expected error is not encountered in a `unison:hide:error` block +then the transcript parser should print the stanza +and surface a helpful message. + +```unison:hide:error +myVal = 3 +``` \ No newline at end of file diff --git a/unison-src/transcripts/errors/unison-hide-error.output.md b/unison-src/transcripts/errors/unison-hide-error.output.md new file mode 100644 index 0000000000..30ab85dc58 --- /dev/null +++ b/unison-src/transcripts/errors/unison-hide-error.output.md @@ -0,0 +1,16 @@ + +### Transcript parser hidden errors + +When an expected error is not encountered in a `unison:hide:error` block +then the transcript parser should print the stanza +and surface a helpful message. + +```unison +myVal = 3 +``` + + + +🛑 + +The transcript was expecting an error in the stanza above, but did not encounter one. diff --git a/unison-src/transcripts/errors/unison-hide.md b/unison-src/transcripts/errors/unison-hide.md new file mode 100644 index 0000000000..52b5ef4000 --- /dev/null +++ b/unison-src/transcripts/errors/unison-hide.md @@ -0,0 +1,10 @@ + +### Transcript parser hidden errors + +When an error is encountered in a `unison:hide` block +then the transcript parser should print the stanza +and surface a helpful message. + +```unison:hide +g 3 +``` diff --git a/unison-src/transcripts/errors/unison-hide.output.md b/unison-src/transcripts/errors/unison-hide.output.md new file mode 100644 index 0000000000..0b369a71ac --- /dev/null +++ b/unison-src/transcripts/errors/unison-hide.output.md @@ -0,0 +1,16 @@ + +### Transcript parser hidden errors + +When an error is encountered in a `unison:hide` block +then the transcript parser should print the stanza +and surface a helpful message. + +```unison +g 3 +``` + + + +🛑 + +The transcript failed due to an error encountered in the stanza above. diff --git a/unison-src/transcripts/escape-sequences.md b/unison-src/transcripts/escape-sequences.md new file mode 100644 index 0000000000..fc7955ff3d --- /dev/null +++ b/unison-src/transcripts/escape-sequences.md @@ -0,0 +1,5 @@ +```unison +> "Rúnar" +> "῎Ανδρα μοι ἔννεπε, Μοῦσα, πολύτροπον" +> "古池や蛙飛びこむ水の音" +``` diff --git a/unison-src/transcripts/escape-sequences.output.md b/unison-src/transcripts/escape-sequences.output.md new file mode 100644 index 0000000000..f0f0947cfa --- /dev/null +++ b/unison-src/transcripts/escape-sequences.output.md @@ -0,0 +1,28 @@ +```unison +> "Rúnar" +> "῎Ανδρα μοι ἔννεπε, Μοῦσα, πολύτροπον" +> "古池や蛙飛びこむ水の音" +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | > "Rúnar" + ⧩ + "Rúnar" + + 2 | > "῎Ανδρα μοι ἔννεπε, Μοῦσα, πολύτροπον" + ⧩ + "῎Ανδρα μοι ἔννεπε, Μοῦσα, πολύτροπον" + + 3 | > "古池や蛙飛びこむ水の音" + ⧩ + "古池や蛙飛びこむ水の音" + +``` diff --git a/unison-src/transcripts/find-patch.md b/unison-src/transcripts/find-patch.md new file mode 100644 index 0000000000..f990704bc2 --- /dev/null +++ b/unison-src/transcripts/find-patch.md @@ -0,0 +1,28 @@ +# find.patch Test + +```ucm:hide +.> builtins.merge +``` + +```unison test.u +hey = "yello" +``` + +```ucm +.> add +``` + +Update + +```unison test.u +hey = "hello" +``` + +Update + +```ucm +.> update +.> find.patch +.> view.patch patch +.> view.patch 1 +``` diff --git a/unison-src/transcripts/find-patch.output.md b/unison-src/transcripts/find-patch.output.md new file mode 100644 index 0000000000..d021f0cc6d --- /dev/null +++ b/unison-src/transcripts/find-patch.output.md @@ -0,0 +1,81 @@ +# find.patch Test + +```unison +--- +title: test.u +--- +hey = "yello" + +``` + + +```ucm + + I found and typechecked these definitions in test.u. If you do + an `add` or `update`, here's how your codebase would change: + + ⍟ These new definitions are ok to `add`: + + hey : Text + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + hey : Text + +``` +Update + +```unison +--- +title: test.u +--- +hey = "hello" + +``` + + +```ucm + + I found and typechecked these definitions in test.u. If you do + an `add` or `update`, here's how your codebase would change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + hey : Text + +``` +Update + +```ucm +.> update + + ⍟ I've updated these names to your new definition: + + hey : Text + +.> find.patch + + 1. patch + +.> view.patch patch + + Edited Terms: hey#8e79ctircj -> hey + + Tip: To remove entries from a patch, use + delete.term-replacement or delete.type-replacement, as + appropriate. + +.> view.patch 1 + + Edited Terms: hey#8e79ctircj -> hey + + Tip: To remove entries from a patch, use + delete.term-replacement or delete.type-replacement, as + appropriate. + +``` diff --git a/unison-src/transcripts/fix-1381-excess-propagate.md b/unison-src/transcripts/fix-1381-excess-propagate.md new file mode 100644 index 0000000000..84da98c5bc --- /dev/null +++ b/unison-src/transcripts/fix-1381-excess-propagate.md @@ -0,0 +1,28 @@ +We were seeing an issue where (it seemed) that every namespace that was visited during a propagate would get a new history node, even when it didn't contain any dependents. + +Example: +```unison:hide +a = "a term" +X.foo = "a namespace" +``` + +```ucm +.> add +``` + +Here is an update which should not affect `X`: +```unison:hide +a = "an update" +``` +```ucm +.> update +``` + +As of the time of this writing, the history for `X` should be a single node, `#4eeuo5bsfr`; +```ucm +.> history X +``` +however, as of release/M1i, we saw an extraneous node appear. If your `ucm` is fixed, you won't see it below: +```ucm:error +.> history #7nl6ppokhg +``` diff --git a/unison-src/transcripts/fix-1381-excess-propagate.output.md b/unison-src/transcripts/fix-1381-excess-propagate.output.md new file mode 100644 index 0000000000..7b6db698c5 --- /dev/null +++ b/unison-src/transcripts/fix-1381-excess-propagate.output.md @@ -0,0 +1,55 @@ +We were seeing an issue where (it seemed) that every namespace that was visited during a propagate would get a new history node, even when it didn't contain any dependents. + +Example: +```unison +a = "a term" +X.foo = "a namespace" +``` + +```ucm +.> add + + ⍟ I've added these definitions: + + X.foo : ##Text + a : ##Text + +``` +Here is an update which should not affect `X`: +```unison +a = "an update" +``` + +```ucm +.> update + + ⍟ I've updated these names to your new definition: + + a : ##Text + +``` +As of the time of this writing, the history for `X` should be a single node, `#4eeuo5bsfr`; +```ucm +.> history X + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #4eeuo5bsfr + + + Adds / updates: + + foo + + □ #7asfbtqmoj (start of history) + +``` +however, as of release/M1i, we saw an extraneous node appear. If your `ucm` is fixed, you won't see it below: +```ucm +.> history #7nl6ppokhg + + 😶 + + I don't know of a namespace with that hash. + +``` diff --git a/unison-src/transcripts/fix-big-list-crash.md b/unison-src/transcripts/fix-big-list-crash.md new file mode 100644 index 0000000000..22be8f0cb1 --- /dev/null +++ b/unison-src/transcripts/fix-big-list-crash.md @@ -0,0 +1,13 @@ +#### Big list crash + +```ucm:hide +.> builtins.merge +``` + +Big lists have been observed to crash, while in the garbage collection step. + +```unison +unique type Direction = U | D | L | R + +x = [(R,1005),(U,563),(R,417),(U,509),(L,237),(U,555),(R,397),(U,414),(L,490),(U,336),(L,697),(D,682),(L,180),(U,951),(L,189),(D,547),(R,697),(U,583),(L,172),(D,859),(L,370),(D,114),(L,519),(U,829),(R,389),(U,608),(R,66),(D,634),(L,320),(D,49),(L,931),(U,137),(L,349),(D,689),(L,351),(D,829),(R,819),(D,138),(L,118),(D,849),(R,230),(U,858),(L,509),(D,311),(R,815),(U,217),(R,359),(U,840),(R,77),(U,230),(R,361),(U,322),(R,300),(D,646),(R,348),(U,815),(R,793),(D,752),(R,967),(U,128),(R,948),(D,499),(R,359),(U,572),(L,566),(U,815),(R,630),(D,290),(L,829),(D,736),(R,358),(U,778),(R,891),(U,941),(R,544),(U,889),(L,920),(U,913),(L,447),(D,604),(R,538),(U,818),(L,215),(D,437),(R,447),(U,576),(R,452),(D,794),(R,864),(U,269),(L,325),(D,35),(L,268),(D,639),(L,101),(U,777),(L,776),(U,958),(R,105),(U,517),(R,667),(D,423),(R,603),(U,469),(L,125),(D,919),(R,879),(U,994),(R,665),(D,377),(R,456),(D,570),(L,685),(U,291),(R,261),(U,846),(R,840),(U,418),(L,974),(D,270),(L,312),(D,426),(R,621),(D,334),(L,855),(D,378),(R,694),(U,845),(R,481),(U,895),(L,362),(D,840),(L,712),(U,57),(R,276),(D,643),(R,566),(U,348),(R,361),(D,144),(L,287),(D,864),(L,556),(U,610),(L,927),(U,322),(R,271),(D,90),(L,741),(U,446),(R,181),(D,527),(R,56),(U,805),(L,907),(D,406),(L,286),(U,873),(L,79),(D,280),(L,153),(D,377),(R,253),(D,61),(R,475),(D,804),(R,788),(U,393),(L,660),(U,314),(R,489),(D,491),(L,234),(D,712),(L,253),(U,651),(L,777),(D,726),(R,146),(U,47),(R,630),(U,517),(R,226),(U,624),(L,834),(D,153),(L,513),(U,799),(R,287),(D,868),(R,982),(U,390),(L,296),(D,373),(R,9),(U,994),(R,105),(D,673),(L,657),(D,868),(R,738),(D,277),(R,374),(U,828),(R,860),(U,247),(R,484),(U,986),(L,723),(D,847),(L,578),(U,487),(L,51),(D,865),(L,328),(D,199),(R,812),(D,726),(R,355),(D,463),(R,761),(U,69),(R,508),(D,753),(L,81),(D,50),(L,345),(D,66),(L,764),(D,466),(L,975),(U,619),(R,59),(D,788),(L,737),(D,360),(R,14),(D,253),(L,512),(D,417),(R,828),(D,188),(L,394),(U,212),(R,658),(U,369),(R,920),(U,927),(L,339),(U,552),(R,856),(D,458),(R,407),(U,41),(L,930),(D,460),(R,809),(U,467),(L,410),(D,800),(L,135),(D,596),(R,678),(D,4),(L,771),(D,637),(L,876),(U,192),(L,406),(D,136),(R,666),(U,730),(R,711),(D,291),(L,586),(U,845),(R,606),(U,2),(L,228),(D,759),(R,244),(U,946),(R,948),(U,160),(R,397),(U,134),(R,188),(U,850),(R,623),(D,315),(L,219),(D,450),(R,489),(U,374),(R,299),(D,474),(L,767),(D,679),(L,160),(D,403),(L,708)] +``` diff --git a/unison-src/transcripts/fix-big-list-crash.output.md b/unison-src/transcripts/fix-big-list-crash.output.md new file mode 100644 index 0000000000..cba6fa6be1 --- /dev/null +++ b/unison-src/transcripts/fix-big-list-crash.output.md @@ -0,0 +1,22 @@ +#### Big list crash + +Big lists have been observed to crash, while in the garbage collection step. + +```unison +unique type Direction = U | D | L | R + +x = [(R,1005),(U,563),(R,417),(U,509),(L,237),(U,555),(R,397),(U,414),(L,490),(U,336),(L,697),(D,682),(L,180),(U,951),(L,189),(D,547),(R,697),(U,583),(L,172),(D,859),(L,370),(D,114),(L,519),(U,829),(R,389),(U,608),(R,66),(D,634),(L,320),(D,49),(L,931),(U,137),(L,349),(D,689),(L,351),(D,829),(R,819),(D,138),(L,118),(D,849),(R,230),(U,858),(L,509),(D,311),(R,815),(U,217),(R,359),(U,840),(R,77),(U,230),(R,361),(U,322),(R,300),(D,646),(R,348),(U,815),(R,793),(D,752),(R,967),(U,128),(R,948),(D,499),(R,359),(U,572),(L,566),(U,815),(R,630),(D,290),(L,829),(D,736),(R,358),(U,778),(R,891),(U,941),(R,544),(U,889),(L,920),(U,913),(L,447),(D,604),(R,538),(U,818),(L,215),(D,437),(R,447),(U,576),(R,452),(D,794),(R,864),(U,269),(L,325),(D,35),(L,268),(D,639),(L,101),(U,777),(L,776),(U,958),(R,105),(U,517),(R,667),(D,423),(R,603),(U,469),(L,125),(D,919),(R,879),(U,994),(R,665),(D,377),(R,456),(D,570),(L,685),(U,291),(R,261),(U,846),(R,840),(U,418),(L,974),(D,270),(L,312),(D,426),(R,621),(D,334),(L,855),(D,378),(R,694),(U,845),(R,481),(U,895),(L,362),(D,840),(L,712),(U,57),(R,276),(D,643),(R,566),(U,348),(R,361),(D,144),(L,287),(D,864),(L,556),(U,610),(L,927),(U,322),(R,271),(D,90),(L,741),(U,446),(R,181),(D,527),(R,56),(U,805),(L,907),(D,406),(L,286),(U,873),(L,79),(D,280),(L,153),(D,377),(R,253),(D,61),(R,475),(D,804),(R,788),(U,393),(L,660),(U,314),(R,489),(D,491),(L,234),(D,712),(L,253),(U,651),(L,777),(D,726),(R,146),(U,47),(R,630),(U,517),(R,226),(U,624),(L,834),(D,153),(L,513),(U,799),(R,287),(D,868),(R,982),(U,390),(L,296),(D,373),(R,9),(U,994),(R,105),(D,673),(L,657),(D,868),(R,738),(D,277),(R,374),(U,828),(R,860),(U,247),(R,484),(U,986),(L,723),(D,847),(L,578),(U,487),(L,51),(D,865),(L,328),(D,199),(R,812),(D,726),(R,355),(D,463),(R,761),(U,69),(R,508),(D,753),(L,81),(D,50),(L,345),(D,66),(L,764),(D,466),(L,975),(U,619),(R,59),(D,788),(L,737),(D,360),(R,14),(D,253),(L,512),(D,417),(R,828),(D,188),(L,394),(U,212),(R,658),(U,369),(R,920),(U,927),(L,339),(U,552),(R,856),(D,458),(R,407),(U,41),(L,930),(D,460),(R,809),(U,467),(L,410),(D,800),(L,135),(D,596),(R,678),(D,4),(L,771),(D,637),(L,876),(U,192),(L,406),(D,136),(R,666),(U,730),(R,711),(D,291),(L,586),(U,845),(R,606),(U,2),(L,228),(D,759),(R,244),(U,946),(R,948),(U,160),(R,397),(U,134),(R,188),(U,850),(R,623),(D,315),(L,219),(D,450),(R,489),(U,374),(R,299),(D,474),(L,767),(D,679),(L,160),(D,403),(L,708)] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + unique type Direction + x : [(Direction, Nat)] + +``` diff --git a/unison-src/transcripts/fix1063.md b/unison-src/transcripts/fix1063.md new file mode 100644 index 0000000000..1806b0e30a --- /dev/null +++ b/unison-src/transcripts/fix1063.md @@ -0,0 +1,17 @@ +Tests that functions named `.` are rendered correctly. + +```ucm:hide +.> builtins.merge +``` + +``` unison +(.) f g x = f (g x) + +noop = not . not +``` + +``` ucm +.> add +.> view noop +``` + diff --git a/unison-src/transcripts/fix1063.output.md b/unison-src/transcripts/fix1063.output.md new file mode 100644 index 0000000000..2020d47682 --- /dev/null +++ b/unison-src/transcripts/fix1063.output.md @@ -0,0 +1,35 @@ +Tests that functions named `.` are rendered correctly. + +```unison +(.) f g x = f (g x) + +noop = not . not +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + . : ∀ o 𝕖 i1 i. + (i1 ->{𝕖} o) -> (i ->{𝕖} i1) -> i ->{𝕖} o + noop : Boolean -> Boolean + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + . : ∀ o 𝕖 i1 i. (i1 ->{𝕖} o) -> (i ->{𝕖} i1) -> i ->{𝕖} o + noop : Boolean -> Boolean + +.> view noop + + noop : Boolean -> Boolean + noop = not . not + +``` diff --git a/unison-src/transcripts/fix1334.md b/unison-src/transcripts/fix1334.md new file mode 100644 index 0000000000..4f01d6adc8 --- /dev/null +++ b/unison-src/transcripts/fix1334.md @@ -0,0 +1,36 @@ +Previously, the `alias.term` and `alias.type` would fail if the source argument was hash-only, and there was no way to create an alias for a definition that didn't already have a name. Also, the `replace.term` and `replace.type` _only_ worked on hashes, and they had to be _full_ hashes. + +With this PR, the source of an alias can be a short hash (even of a definition that doesn't currently have a name in the namespace) along with a name or hash-qualified name from the current namespace as usual, and the arguments to `replace.term` and `replace.type` can be a short hash, a name, or a hash-qualified name. + +Let's make some hash-only aliases, now that we can. :mad-with-power-emoji: + +```ucm +.> alias.type ##Nat Cat +.> alias.term ##Nat.+ please_fix_763.+ +``` + +And some functions that use them: +```unison +f = 3 +g = 4 +h = f + 1 + +> h +``` + +```ucm +.> add +``` + +We used to have to know the full hash for a definition to be able to use the `replace.*` commands, but now we don't: +```ucm +.> names g +.> replace.term f g +.> names g +.> view.patch +``` + +The value of `h` should have been updated too: +```unison +> h +``` diff --git a/unison-src/transcripts/fix1334.output.md b/unison-src/transcripts/fix1334.output.md new file mode 100644 index 0000000000..f846f2acbf --- /dev/null +++ b/unison-src/transcripts/fix1334.output.md @@ -0,0 +1,101 @@ +Previously, the `alias.term` and `alias.type` would fail if the source argument was hash-only, and there was no way to create an alias for a definition that didn't already have a name. Also, the `replace.term` and `replace.type` _only_ worked on hashes, and they had to be _full_ hashes. + +With this PR, the source of an alias can be a short hash (even of a definition that doesn't currently have a name in the namespace) along with a name or hash-qualified name from the current namespace as usual, and the arguments to `replace.term` and `replace.type` can be a short hash, a name, or a hash-qualified name. + +Let's make some hash-only aliases, now that we can. :mad-with-power-emoji: + +```ucm +.> alias.type ##Nat Cat + + Done. + +.> alias.term ##Nat.+ please_fix_763.+ + + Done. + +``` +And some functions that use them: +```unison +f = 3 +g = 4 +h = f + 1 + +> h +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + f : Cat + g : Cat + h : Cat + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 5 | > h + ⧩ + 4 + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + f : Cat + g : Cat + h : Cat + +``` +We used to have to know the full hash for a definition to be able to use the `replace.*` commands, but now we don't: +```ucm +.> names g + + Term + Hash: #52addbrohu + Names: g + +.> replace.term f g + + Done. + +.> names g + + Term + Hash: #52addbrohu + Names: f g + +.> view.patch + + Edited Terms: f#msp7bv40rv -> f + + Tip: To remove entries from a patch, use + delete.term-replacement or delete.type-replacement, as + appropriate. + +``` +The value of `h` should have been updated too: +```unison +> h +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | > h + ⧩ + 5 + +``` diff --git a/unison-src/transcripts/fix1356.md b/unison-src/transcripts/fix1356.md new file mode 100644 index 0000000000..f932e8b4f2 --- /dev/null +++ b/unison-src/transcripts/fix1356.md @@ -0,0 +1,41 @@ +##### This transcript reproduces the failure to unlink documentation + +```ucm:hide +.> builtins.merge +``` + +Step 1: code a term and documentation for it +```unison +x = 1 +x.doc = [: I am the documentation for x:] +``` + +Step 2: add term and documentation, link, and check the documentation +```ucm +.> add +.> link x.doc x +.> docs x +``` + +Step 3: Oops I don't like the doc, so I will re-code it! +```unison +x.doc = [: I am the documentation for x, and I now look better:] +``` + +Step 4: I add it and expect to see it +```ucm +.> update +.> docs x +``` + +That works great. Let's relink the old doc too. + +```ucm +.> link #v8f1hhvs57 x +``` + +Let's check that we see both docs: + +```ucm +.> docs x +``` diff --git a/unison-src/transcripts/fix1356.output.md b/unison-src/transcripts/fix1356.output.md new file mode 100644 index 0000000000..cc40c7b5e1 --- /dev/null +++ b/unison-src/transcripts/fix1356.output.md @@ -0,0 +1,94 @@ +##### This transcript reproduces the failure to unlink documentation + +Step 1: code a term and documentation for it +```unison +x = 1 +x.doc = [: I am the documentation for x:] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + x : Nat + x.doc : Doc + +``` +Step 2: add term and documentation, link, and check the documentation +```ucm +.> add + + ⍟ I've added these definitions: + + x : Nat + x.doc : Doc + +.> link x.doc x + + Updates: + + 1. x : Nat + + 2. doc : Doc + +.> docs x + + I am the documentation for x + +``` +Step 3: Oops I don't like the doc, so I will re-code it! +```unison +x.doc = [: I am the documentation for x, and I now look better:] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + x.doc : Doc + +``` +Step 4: I add it and expect to see it +```ucm +.> update + + ⍟ I've updated these names to your new definition: + + x.doc : Doc + +.> docs x + + I am the documentation for x, and I now look better + +``` +That works great. Let's relink the old doc too. + +```ucm +.> link #v8f1hhvs57 x + + Updates: + + 1. x : Nat + + 2. #v8f1hhvs57 : Doc + +``` +Let's check that we see both docs: + +```ucm +.> docs x + + 1. x.doc : Doc + 2. #v8f1hhvs57 : Doc + + Tip: Try using `display 1` to display the first result or + `view 1` to view its source. + +``` diff --git a/unison-src/transcripts/fix689.md b/unison-src/transcripts/fix689.md new file mode 100644 index 0000000000..a156daa6aa --- /dev/null +++ b/unison-src/transcripts/fix689.md @@ -0,0 +1,13 @@ +Tests the fix for https://github.com/unisonweb/unison/issues/689 + +```ucm:hide +.> builtins.merge +``` + +``` unison +ability SystemTime where + systemTime : ##Nat + +tomorrow = '(SystemTime.systemTime + 24 * 60 * 60) +``` + diff --git a/unison-src/transcripts/fix689.output.md b/unison-src/transcripts/fix689.output.md new file mode 100644 index 0000000000..e4d39e5bcc --- /dev/null +++ b/unison-src/transcripts/fix689.output.md @@ -0,0 +1,21 @@ +Tests the fix for https://github.com/unisonweb/unison/issues/689 + +```unison +ability SystemTime where + systemTime : ##Nat + +tomorrow = '(SystemTime.systemTime + 24 * 60 * 60) +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ability SystemTime + tomorrow : '{SystemTime} Nat + +``` diff --git a/unison-src/transcripts/fix849.md b/unison-src/transcripts/fix849.md new file mode 100644 index 0000000000..4d111f9cc1 --- /dev/null +++ b/unison-src/transcripts/fix849.md @@ -0,0 +1,12 @@ + +```ucm:hide +.> builtins.merge +``` + +See [this ticket](https://github.com/unisonweb/unison/issues/849). + +```unison +x = 42 + +> x +``` diff --git a/unison-src/transcripts/fix849.output.md b/unison-src/transcripts/fix849.output.md new file mode 100644 index 0000000000..e9ec2183d7 --- /dev/null +++ b/unison-src/transcripts/fix849.output.md @@ -0,0 +1,27 @@ + +See [this ticket](https://github.com/unisonweb/unison/issues/849). + +```unison +x = 42 + +> x +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + x : Nat + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 3 | > x + ⧩ + 42 + +``` diff --git a/unison-src/transcripts/fix942.md b/unison-src/transcripts/fix942.md new file mode 100644 index 0000000000..5c12cb8c06 --- /dev/null +++ b/unison-src/transcripts/fix942.md @@ -0,0 +1,37 @@ +```ucm:hide +.> builtins.merge +``` + +First we add some code: + +```unison +x = 0 +y = x + 1 +z = y + 2 +``` + +```ucm +.> add +``` + +Now we edit `x` to be `7`, which should make `z` equal `10`: + +```unison +x = 7 +``` + +```ucm +.> update +.> view x y z +``` + +Uh oh! `z` is still referencing the old version. Just to confirm: + +```unison +test> t1 = if z == 3 then [Fail "nooo!!!"] else [Ok "great"] +``` + +```ucm +.> add +.> test +``` diff --git a/unison-src/transcripts/fix942.output.md b/unison-src/transcripts/fix942.output.md new file mode 100644 index 0000000000..c33368e139 --- /dev/null +++ b/unison-src/transcripts/fix942.output.md @@ -0,0 +1,114 @@ +First we add some code: + +```unison +x = 0 +y = x + 1 +z = y + 2 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + x : Nat + y : Nat + z : Nat + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + x : Nat + y : Nat + z : Nat + +``` +Now we edit `x` to be `7`, which should make `z` equal `10`: + +```unison +x = 7 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + x : Nat + +``` +```ucm +.> update + + ⍟ I've updated these names to your new definition: + + x : Nat + +.> view x y z + + x : Nat + x = 7 + + y : Nat + y = + use Nat + + x + 1 + + z : Nat + z = + use Nat + + y + 2 + +``` +Uh oh! `z` is still referencing the old version. Just to confirm: + +```unison +test> t1 = if z == 3 then [Fail "nooo!!!"] else [Ok "great"] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + t1 : [Result] + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | test> t1 = if z == 3 then [Fail "nooo!!!"] else [Ok "great"] + + ✅ Passed great + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + t1 : [Result] + +.> test + + Cached test results (`help testcache` to learn more) + + ◉ t1 great + + ✅ 1 test(s) passing + + Tip: Use view t1 to view the source of a test. + +``` diff --git a/unison-src/transcripts/fix987.md b/unison-src/transcripts/fix987.md new file mode 100644 index 0000000000..28e39518de --- /dev/null +++ b/unison-src/transcripts/fix987.md @@ -0,0 +1,37 @@ + +```ucm:hide +.> builtins.merge +``` + +First we'll add a definition: + +```unison +ability DeathStar where + attack : Text -> () + +spaceAttack1 x = + y = attack "saturn" + z = attack "neptune" + "All done" +``` + +Add it to the codebase: + +```ucm +.> add +``` + +Now we'll try to add a different definition that runs the actions in a different order. This should work fine: + +```unison +spaceAttack2 x = + z = attack "neptune" + y = attack "saturn" + "All done" +``` + +```ucm +.> add +``` + +Previously, this would fail because the hashing algorithm was being given one big let rec block whose binding order was normalized. diff --git a/unison-src/transcripts/fix987.output.md b/unison-src/transcripts/fix987.output.md new file mode 100644 index 0000000000..ecf3169535 --- /dev/null +++ b/unison-src/transcripts/fix987.output.md @@ -0,0 +1,65 @@ + +First we'll add a definition: + +```unison +ability DeathStar where + attack : Text -> () + +spaceAttack1 x = + y = attack "saturn" + z = attack "neptune" + "All done" +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + ability DeathStar + spaceAttack1 : x ->{DeathStar} Text + +``` +Add it to the codebase: + +```ucm +.> add + + ⍟ I've added these definitions: + + ability DeathStar + spaceAttack1 : x ->{DeathStar} Text + +``` +Now we'll try to add a different definition that runs the actions in a different order. This should work fine: + +```unison +spaceAttack2 x = + z = attack "neptune" + y = attack "saturn" + "All done" +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + spaceAttack2 : x ->{DeathStar} Text + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + spaceAttack2 : x ->{DeathStar} Text + +``` +Previously, this would fail because the hashing algorithm was being given one big let rec block whose binding order was normalized. diff --git a/unison-src/transcripts/hello.md b/unison-src/transcripts/hello.md new file mode 100644 index 0000000000..322036b93b --- /dev/null +++ b/unison-src/transcripts/hello.md @@ -0,0 +1,69 @@ + +# Hello! + +```ucm:hide +.> builtins.merge +``` + +This markdown file is also a Unison transcript file. Transcript files are an easy way to create self-documenting Unison programs, libraries, and tutorials. + +The format is just a regular markdown file with some fenced code blocks that are typechecked and elaborated by `ucm`. For example, you can call this transcript via: + +``` +$ ucm transcript hello.md +``` + +This runs it on a freshly generated empty codebase. Alternately `ucm transcript.fork -codebase /path/to/code hello.md` runs the transcript on a freshly generated copy of the provided codebase. Do `ucm help` to learn more about usage. + +Fenced code blocks of type `unison` and `ucm` are treated specially: + +* `ucm` blocks are executed, and the output is interleaved into the output markdown file after each command, replacing the original `ucm` block. +* `unison` blocks are typechecked, and a `ucm` block with the output of typechecking and execution of the file is inserted immediately afterwards. + +Take a look at [the elaborated output](hello.output.md) to see what this file looks like after passing through the transcript runner. + +## Let's try it out!! + +In the `unison` fenced block, you can give an (optional) file name (defaults to `scratch.u`), like so: + +```unison myfile.u +x = 42 +``` + +Let's go ahead and add that to the codebase, then make sure it's there: + +```ucm +.> add +.> view x +``` + +If `view` returned no results, the transcript would fail at this point. + +## Hiding output + +You may not always want to view the output of typechecking and evaluation every time, in which case, you can add `:hide` to the block. For instance: + +```unison:hide +y = 99 +``` + +This works for `ucm` blocks as well. + +```ucm:hide +.> rename.term x answerToUltimateQuestionOfLife +``` + +Doing `unison:hide:all` hides the block altogether, both input and output - this is useful for doing behind-the-scenes control of `ucm`'s state. + +```unison:hide:all +> [: you won't see me :] +``` + +## Expecting failures + +Sometimes, you have a block which you are _expecting_ to fail, perhaps because you're illustrating how something would be a type error. Adding `:error` to the block will check for this. For instance, this program has a type error: + +```unison:error +hmm : .builtin.Nat +hmm = "Not, in fact, a number" +``` diff --git a/unison-src/transcripts/hello.output.md b/unison-src/transcripts/hello.output.md new file mode 100644 index 0000000000..fd94c280a6 --- /dev/null +++ b/unison-src/transcripts/hello.output.md @@ -0,0 +1,92 @@ + +# Hello! + +This markdown file is also a Unison transcript file. Transcript files are an easy way to create self-documenting Unison programs, libraries, and tutorials. + +The format is just a regular markdown file with some fenced code blocks that are typechecked and elaborated by `ucm`. For example, you can call this transcript via: + +``` +$ ucm transcript hello.md + +``` + +This runs it on a freshly generated empty codebase. Alternately `ucm transcript.fork -codebase /path/to/code hello.md` runs the transcript on a freshly generated copy of the provided codebase. Do `ucm help` to learn more about usage. + +Fenced code blocks of type `unison` and `ucm` are treated specially: + +* `ucm` blocks are executed, and the output is interleaved into the output markdown file after each command, replacing the original `ucm` block. +* `unison` blocks are typechecked, and a `ucm` block with the output of typechecking and execution of the file is inserted immediately afterwards. + +Take a look at [the elaborated output](hello.output.md) to see what this file looks like after passing through the transcript runner. + +## Let's try it out!! + +In the `unison` fenced block, you can give an (optional) file name (defaults to `scratch.u`), like so: + +```unison +--- +title: myfile.u +--- +x = 42 + +``` + + +```ucm + + I found and typechecked these definitions in myfile.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + x : Nat + +``` +Let's go ahead and add that to the codebase, then make sure it's there: + +```ucm +.> add + + ⍟ I've added these definitions: + + x : Nat + +.> view x + + x : Nat + x = 42 + +``` +If `view` returned no results, the transcript would fail at this point. + +## Hiding output + +You may not always want to view the output of typechecking and evaluation every time, in which case, you can add `:hide` to the block. For instance: + +```unison +y = 99 +``` + +This works for `ucm` blocks as well. + +Doing `unison:hide:all` hides the block altogether, both input and output - this is useful for doing behind-the-scenes control of `ucm`'s state. + +## Expecting failures + +Sometimes, you have a block which you are _expecting_ to fail, perhaps because you're illustrating how something would be a type error. Adding `:error` to the block will check for this. For instance, this program has a type error: + +```unison +hmm : .builtin.Nat +hmm = "Not, in fact, a number" +``` + +```ucm + + I found a value of type builtin.Text where I expected to find one of type builtin.Nat: + + 1 | hmm : .builtin.Nat + 2 | hmm = "Not, in fact, a number" + + +``` diff --git a/unison-src/transcripts/link.md b/unison-src/transcripts/link.md new file mode 100644 index 0000000000..46720e385e --- /dev/null +++ b/unison-src/transcripts/link.md @@ -0,0 +1,70 @@ +# Linking definitions to metadata + +```ucm:hide +.> builtins.mergeio +``` + +The `link` and `unlink` commands can be used to manage metadata linked to definitions. For example, you can link documentation to a definition: + +```unison +use .builtin + +coolFunction x = x * 2 + +coolFunction.doc = [: This is a cool function. :] +``` + +```ucm +.> add +.> link coolFunction.doc coolFunction +``` + +You can use arbitrary Unison values and link them as metadata to definitions: + +```unison +toCopyrightHolder author = match author with + Author guid name -> CopyrightHolder guid name + +alice = Author (GUID Bytes.empty) "Alice Coder" + +coolFunction.license = License [toCopyrightHolder alice] [Year 2020] licenses.mit + +licenses.mit = LicenseType [: +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +:] +``` + +```ucm +.> add +.> link coolFunction.license coolFunction +.> link alice coolFunction +``` + +We can look at the links we have: + +```ucm +.> links coolFunction +``` + +We can link the same metadata simultaneously to multiple definitions: + +```unison +myLibrary.f x = x + 1 +myLibrary.g x = x + 2 +myLibrary.h x = x + 3 +``` + +```ucm +.> add +.> cd myLibrary +.myLibrary> find +.myLibrary> link .alice 1-3 +.myLibrary> links f +.myLibrary> links g +.myLibrary> links h +.myLibrary> history +``` diff --git a/unison-src/transcripts/link.output.md b/unison-src/transcripts/link.output.md new file mode 100644 index 0000000000..b85aa09ca8 --- /dev/null +++ b/unison-src/transcripts/link.output.md @@ -0,0 +1,202 @@ +# Linking definitions to metadata + +The `link` and `unlink` commands can be used to manage metadata linked to definitions. For example, you can link documentation to a definition: + +```unison +use .builtin + +coolFunction x = x * 2 + +coolFunction.doc = [: This is a cool function. :] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + coolFunction : Nat -> Nat + coolFunction.doc : Doc + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + coolFunction : Nat -> Nat + coolFunction.doc : Doc + +.> link coolFunction.doc coolFunction + + Updates: + + 1. coolFunction : Nat -> Nat + + 2. doc : Doc + +``` +You can use arbitrary Unison values and link them as metadata to definitions: + +```unison +toCopyrightHolder author = match author with + Author guid name -> CopyrightHolder guid name + +alice = Author (GUID Bytes.empty) "Alice Coder" + +coolFunction.license = License [toCopyrightHolder alice] [Year 2020] licenses.mit + +licenses.mit = LicenseType [: +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +:] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + alice : Author + coolFunction.license : License + licenses.mit : LicenseType + toCopyrightHolder : Author -> CopyrightHolder + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + alice : Author + coolFunction.license : License + licenses.mit : LicenseType + toCopyrightHolder : Author -> CopyrightHolder + +.> link coolFunction.license coolFunction + + Updates: + + 1. coolFunction : Nat -> Nat + + 2. license : License + +.> link alice coolFunction + + Updates: + + 1. coolFunction : Nat -> Nat + + 2. alice : Author + +``` +We can look at the links we have: + +```ucm +.> links coolFunction + + 1. alice : Author + 2. coolFunction.license : License + 3. coolFunction.doc : Doc + + Tip: Try using `display 1` to display the first result or + `view 1` to view its source. + +``` +We can link the same metadata simultaneously to multiple definitions: + +```unison +myLibrary.f x = x + 1 +myLibrary.g x = x + 2 +myLibrary.h x = x + 3 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + myLibrary.f : Nat -> Nat + myLibrary.g : Nat -> Nat + myLibrary.h : Nat -> Nat + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + myLibrary.f : Nat -> Nat + myLibrary.g : Nat -> Nat + myLibrary.h : Nat -> Nat + +.> cd myLibrary + +.myLibrary> find + + 1. f : Nat -> Nat + 2. g : Nat -> Nat + 3. h : Nat -> Nat + + +.myLibrary> link .alice 1-3 + + Updates: + + 1. myLibrary.f : Nat -> Nat + + 2. alice : Author + + 3. myLibrary.g : Nat -> Nat + + 4. alice : Author + + 5. myLibrary.h : Nat -> Nat + + 6. alice : Author + +.myLibrary> links f + + 1. .alice : Author + + Tip: Try using `display 1` to display the first result or + `view 1` to view its source. + +.myLibrary> links g + + 1. .alice : Author + + Tip: Try using `display 1` to display the first result or + `view 1` to view its source. + +.myLibrary> links h + + 1. .alice : Author + + Tip: Try using `display 1` to display the first result or + `view 1` to view its source. + +.myLibrary> history + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #mquil07fad + + + + ⊙ #4nfhqq566a + + + Adds / updates: + + f g h + + □ #7asfbtqmoj (start of history) + +``` diff --git a/unison-src/transcripts/merge.md b/unison-src/transcripts/merge.md new file mode 100644 index 0000000000..51633493b5 --- /dev/null +++ b/unison-src/transcripts/merge.md @@ -0,0 +1,101 @@ + +```ucm:hide +.> builtins.merge +``` + +# How merging works + +Suppose we have two branches, `P1` and `P2`, and a subnamespace, `foo`, which we'll refer to with `P1.foo` , `P2.foo`. This doc explains how `merge(P1,P2)` is computed, including the `merge(P1,P2).foo` subnamespace. + +`LCA(P1,P2)` is the lowest common ancestor of `P1` and `P2`. To compute `merge(P1,P2)`, we: + +1. Compute `LCA(P1,P2)` and do a three way merge of that level of the tree, using the algorithm below. What about the children of `P1` and `P2`? Let's just consider a child namespace `foo`. There are a few cases: + 1. `P1` and `P2` both have foo as a child namespace. Then `merge(P1,P2).foo == merge(P1.foo, P2.foo)` + 2. `P1` has `foo` as a child namespace, but `P2` does not (or vice versa). Then we have two subcases: + 1. `LCA(P1,P2)` has no `foo`. This means that `foo` child namespace was added by `P1`. The merged result for the `foo` subnamespace is just `P1.foo`. + 2. `LCA(P1,P2)` does have `foo`. This means that `P2` _deleted_ the `foo` subnamespace. The merged result for the `foo` subnamespace is then `merge(P1.foo, cons empty LCA(P1,P2).foo)`. This does a history-preserving delete of all the definitions that existed at the `LCA` point in history. + 1. Example is like if `P1` added a new definition `foo.bar = 23` after the `LCA`, then `foo.bar` will exist in the merged result, but all the definitions that existed in `foo` at the time of the `LCA` will be deleted in the result. + +### Diff-based 3-way merge algorithm + +Standard 3 way merge algorithm to merge `a` and `b`: + +* Let `lca = LCA(a,b)` +* merged result is: `apply(diff(lca,a) <> diff(lca,b), lca)` + +Relies on some diff combining operation `<>`. + +```unison:hide +foo.w = 2 +foo.x = 1 +baz.x = 3 +quux.x = 4 +``` + +```ucm +.P0> add +``` + +Now P0 has 3 sub-namespaces. +* foo will be modified definition-wise in each branch +* baz will be deleted in the P2 branch and left alone in P1 +* quux will be deleted in the P2 branch and added to in P1 +* P1 will add a bar sub-namespace + +```ucm +.P0> fork .P0 .P1 +.P0> fork .P0 .P2 +``` + +```unison:hide +foo.y = 2483908 +bar.y = 383 +quux.y = 333 +``` + +```ucm +.P1> add +.P1> delete.term foo.w +``` + +We added to `foo`, `bar` and `baz`, and deleted `foo.w`, which should stay deleted in the merge. + +```unison:hide +foo.z = +28348 +``` + +```ucm +.P2> add +.P2> delete.namespace baz +.P2> delete.namespace quux +.P2> find +``` + +We added `foo.z`, deleted whole namespaces `baz` and `quux` which should stay +deleted in the merge. + +Now we'll try merging `P1` and `P2` back into `P0`. We should see the union of all their definitions in the merged version of `P0`. + +This should succeed and the resulting P0 namespace should have `foo`, `bar` +and `quux` namespaces. + +```ucm +.P0> merge .P1 +.P0> merge .P2 +.P0> find +.P0> view foo.x foo.y foo.z bar.y quux.y +``` + +These test that things we expect to be deleted are still deleted. + +```ucm:error +.> view P0.foo.w +``` + +```ucm:error +.> view P0.baz.x +``` + +```ucm:error +.> view P0.quux.x +``` diff --git a/unison-src/transcripts/merge.output.md b/unison-src/transcripts/merge.output.md new file mode 100644 index 0000000000..ff64e189bb --- /dev/null +++ b/unison-src/transcripts/merge.output.md @@ -0,0 +1,227 @@ + +# How merging works + +Suppose we have two branches, `P1` and `P2`, and a subnamespace, `foo`, which we'll refer to with `P1.foo` , `P2.foo`. This doc explains how `merge(P1,P2)` is computed, including the `merge(P1,P2).foo` subnamespace. + +`LCA(P1,P2)` is the lowest common ancestor of `P1` and `P2`. To compute `merge(P1,P2)`, we: + +1. Compute `LCA(P1,P2)` and do a three way merge of that level of the tree, using the algorithm below. What about the children of `P1` and `P2`? Let's just consider a child namespace `foo`. There are a few cases: + 1. `P1` and `P2` both have foo as a child namespace. Then `merge(P1,P2).foo == merge(P1.foo, P2.foo)` + 2. `P1` has `foo` as a child namespace, but `P2` does not (or vice versa). Then we have two subcases: + 1. `LCA(P1,P2)` has no `foo`. This means that `foo` child namespace was added by `P1`. The merged result for the `foo` subnamespace is just `P1.foo`. + 2. `LCA(P1,P2)` does have `foo`. This means that `P2` _deleted_ the `foo` subnamespace. The merged result for the `foo` subnamespace is then `merge(P1.foo, cons empty LCA(P1,P2).foo)`. This does a history-preserving delete of all the definitions that existed at the `LCA` point in history. + 1. Example is like if `P1` added a new definition `foo.bar = 23` after the `LCA`, then `foo.bar` will exist in the merged result, but all the definitions that existed in `foo` at the time of the `LCA` will be deleted in the result. + +### Diff-based 3-way merge algorithm + +Standard 3 way merge algorithm to merge `a` and `b`: + +* Let `lca = LCA(a,b)` +* merged result is: `apply(diff(lca,a) <> diff(lca,b), lca)` + +Relies on some diff combining operation `<>`. + +```unison +foo.w = 2 +foo.x = 1 +baz.x = 3 +quux.x = 4 +``` + +```ucm + ☝️ The namespace .P0 is empty. + +.P0> add + + ⍟ I've added these definitions: + + baz.x : Nat + foo.w : Nat + foo.x : Nat + quux.x : Nat + +``` +Now P0 has 3 sub-namespaces. +* foo will be modified definition-wise in each branch +* baz will be deleted in the P2 branch and left alone in P1 +* quux will be deleted in the P2 branch and added to in P1 +* P1 will add a bar sub-namespace + +```ucm +.P0> fork .P0 .P1 + + Done. + +.P0> fork .P0 .P2 + + Done. + +``` +```unison +foo.y = 2483908 +bar.y = 383 +quux.y = 333 +``` + +```ucm +.P1> add + + ⍟ I've added these definitions: + + bar.y : Nat + foo.y : Nat + quux.y : Nat + +.P1> delete.term foo.w + + Name changes: + + Original Changes + 1. P0.foo.w ┐ 2. P1.foo.w (removed) + 3. P1.foo.w │ + 4. P2.foo.w ┘ + + Tip: You can use `undo` or `reflog` to undo this change. + +``` +We added to `foo`, `bar` and `baz`, and deleted `foo.w`, which should stay deleted in the merge. + +```unison +foo.z = +28348 +``` + +```ucm +.P2> add + + ⍟ I've added these definitions: + + foo.z : Int + +.P2> delete.namespace baz + + Removed definitions: + + 1. x : Nat + + Tip: You can use `undo` or `reflog` to undo this change. + +.P2> delete.namespace quux + + Removed definitions: + + 1. x : Nat + + Tip: You can use `undo` or `reflog` to undo this change. + +.P2> find + + 1. foo.w : Nat + 2. foo.x : Nat + 3. foo.z : Int + + +``` +We added `foo.z`, deleted whole namespaces `baz` and `quux` which should stay +deleted in the merge. + +Now we'll try merging `P1` and `P2` back into `P0`. We should see the union of all their definitions in the merged version of `P0`. + +This should succeed and the resulting P0 namespace should have `foo`, `bar` +and `quux` namespaces. + +```ucm +.P0> merge .P1 + + Here's what's changed in the current namespace after the + merge: + + Added definitions: + + 1. bar.y : Nat + 2. foo.y : Nat + 3. quux.y : Nat + + Removed definitions: + + 4. foo.w : Nat + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +.P0> merge .P2 + + Here's what's changed in the current namespace after the + merge: + + Added definitions: + + 1. foo.z : Int + + Removed definitions: + + 2. baz.x : Nat + 3. quux.x : Nat + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +.P0> find + + 1. bar.y : Nat + 2. foo.x : Nat + 3. foo.y : Nat + 4. foo.z : Int + 5. quux.y : Nat + + +.P0> view foo.x foo.y foo.z bar.y quux.y + + bar.y : Nat + bar.y = 383 + + foo.x : Nat + foo.x = 1 + + foo.y : Nat + foo.y = 2483908 + + foo.z : Int + foo.z = +28348 + + quux.y : Nat + quux.y = 333 + +``` +These test that things we expect to be deleted are still deleted. + +```ucm +.> view P0.foo.w + + ⚠️ + + The following names were not found in the codebase. Check your spelling. + P0.foo.w + +``` +```ucm +.> view P0.baz.x + + ⚠️ + + The following names were not found in the codebase. Check your spelling. + P0.baz.x + +``` +```ucm +.> view P0.quux.x + + ⚠️ + + The following names were not found in the codebase. Check your spelling. + P0.quux.x + +``` diff --git a/unison-src/transcripts/mergeloop.md b/unison-src/transcripts/mergeloop.md new file mode 100644 index 0000000000..bb03d5d258 --- /dev/null +++ b/unison-src/transcripts/mergeloop.md @@ -0,0 +1,51 @@ +# Merge loop test + +This tests for regressions of https://github.com/unisonweb/unison/issues/1276 where trivial merges cause loops in the history. + +Let's make three identical namespaces with different histories: + +```unison +a = 1 +``` + +```ucm +.x> add +``` + +```unison +b = 2 +``` + +```ucm +.x> add +``` + +```unison +b = 2 +``` + +```ucm +.y> add +``` + +```unison +a = 1 +``` + +```ucm +.y> add +``` + +```unison +a = 1 +b = 2 +``` + +```ucm +.z> add +.> merge x y +.> merge y z +.> history z +``` + + diff --git a/unison-src/transcripts/mergeloop.output.md b/unison-src/transcripts/mergeloop.output.md new file mode 100644 index 0000000000..3f319c351b --- /dev/null +++ b/unison-src/transcripts/mergeloop.output.md @@ -0,0 +1,143 @@ +# Merge loop test + +This tests for regressions of https://github.com/unisonweb/unison/issues/1276 where trivial merges cause loops in the history. + +Let's make three identical namespaces with different histories: + +```unison +a = 1 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + a : ##Nat + +``` +```ucm + ☝️ The namespace .x is empty. + +.x> add + + ⍟ I've added these definitions: + + a : ##Nat + +``` +```unison +b = 2 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + b : ##Nat + +``` +```ucm +.x> add + + ⍟ I've added these definitions: + + b : ##Nat + +``` +```unison +b = 2 +``` + +```ucm + + I found and typechecked the definitions in scratch.u. This + file has been previously added to the codebase. + +``` +```ucm + ☝️ The namespace .y is empty. + +.y> add + + ⍟ I've added these definitions: + + b : ##Nat + +``` +```unison +a = 1 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + a : ##Nat + +``` +```ucm +.y> add + + ⍟ I've added these definitions: + + a : ##Nat + +``` +```unison +a = 1 +b = 2 +``` + +```ucm + + I found and typechecked the definitions in scratch.u. This + file has been previously added to the codebase. + +``` +```ucm + ☝️ The namespace .z is empty. + +.z> add + + ⍟ I've added these definitions: + + a : ##Nat + b : ##Nat + +.> merge x y + + Nothing changed as a result of the merge. + +.> merge y z + + Nothing changed as a result of the merge. + +.> history z + + Note: The most recent namespace hash is immediately below this + message. + + + + This segment of history starts with a merge. Use + `history #som3n4m3space` to view history starting from a given + namespace hash. + + ⊙ #0ucrusr0bl + ⑃ + #0lf1cvdccp + #ofcsecdak0 + +``` diff --git a/unison-src/transcripts/merges.md b/unison-src/transcripts/merges.md new file mode 100644 index 0000000000..f026b7dfac --- /dev/null +++ b/unison-src/transcripts/merges.md @@ -0,0 +1,119 @@ +# Forking and merging namespaces in `ucm` + +```ucm:hide +.> builtins.merge +``` + +The Unison namespace is a versioned tree of names that map to Unison definitions. You can change this namespace and fork and merge subtrees of it. Let's start by introducing a few definitions into a new namespace, `foo`: + +```unison +x = 42 +``` + +```ucm +.> add +``` + +Let's move `x` into a new namespace, `master`: + +```ucm +.> rename.term x master.x +``` + +If you want to do some experimental work in a namespace without disturbing anyone else, you can `fork` it (which is a shorthand for `copy.namespace`). This creates a copy of it, preserving its history. + +> __Note:__ these copies are very efficient to create as they just have pointers into the same underlying definitions. Create as many as you like. + +Let's go ahead and do this: + +``` +.> fork master feature1 +.> view master.x +.> view feature1.x +``` + +Great! We can now do some further work in the `feature1` branch, then merge it back into `master` when we're ready. + +```unison +y = "hello" +``` + +```ucm +.feature1> add +.master> merge .feature1 +.master> view y +``` + +> Note: `merge src`, with one argument, merges `src` into the current namespace. You can also do `merge src dest` to merge into any destination namespace. + +Notice that `master` now has the definition of `y` we wrote. + +We can also delete the fork if we're done with it. (Don't worry, it's still in the `history` and can be resurrected at any time.) + +```ucm +.> delete.namespace .feature1 +.> history +``` + +To resurrect an old version of a namespace, you can learn its hash via the `history` command, then use `fork #namespacehash .newname`. + +## Concurrent edits and merges + +In the above scenario the destination namespace (`master`) was strictly behind the source namespace, so the merge didn't have anything interesting to do (Git would call this a "fast forward" merge). In other cases, the source and destination namespaces will each have changes the other doesn't know about, and the merge needs to something more interesting. That's okay too, and Unison will merge those results, using a 3-way merge algorithm. + +> __Note:__ When merging nested namespaces, Unison actually uses a recursive 3-way merge, so it finds a different (and possibly closer) common ancestor at each level of the tree. + +Let's see how this works. We are going to create a copy of `master`, add and delete some definitions in `master` and in the fork, then merge. + +```ucm +.> fork master feature2 +``` + +Here's one fork, we add `z` and delete `x`: + +```unison +z = 99 +``` + +```ucm +.feature2> add +.feature2> delete.term x +``` + +And here's the other fork, where we update `y` and add a new definition, `frobnicate`: + +```unison +master.y = "updated y" +master.frobnicate n = n + 1 +``` + +```ucm +.> update +.> view master.y +.> view master.frobnicate +``` + +At this point, `master` and `feature2` both have some changes the other doesn't know about. Let's merge them. + +```ucm +.> merge feature2 master +``` + +Notice that `x` is deleted in the merged branch (it was deleted in `feature2` and untouched by `master`): + +```ucm:error +.> view master.x +``` + +And notice that `y` has the most recent value, and that `z` and `frobnicate` both exist as well: + +```ucm +.> view master.y +.> view master.z +.> view master.frobnicate +``` + +## FAQ + +* What happens if namespace1 deletes a name that namespace2 has updated? A: ??? +* ... diff --git a/unison-src/transcripts/merges.output.md b/unison-src/transcripts/merges.output.md new file mode 100644 index 0000000000..84adbbfad3 --- /dev/null +++ b/unison-src/transcripts/merges.output.md @@ -0,0 +1,436 @@ +# Forking and merging namespaces in `ucm` + +The Unison namespace is a versioned tree of names that map to Unison definitions. You can change this namespace and fork and merge subtrees of it. Let's start by introducing a few definitions into a new namespace, `foo`: + +```unison +x = 42 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + x : Nat + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + x : Nat + +``` +Let's move `x` into a new namespace, `master`: + +```ucm +.> rename.term x master.x + + Done. + +``` +If you want to do some experimental work in a namespace without disturbing anyone else, you can `fork` it (which is a shorthand for `copy.namespace`). This creates a copy of it, preserving its history. + +> __Note:__ these copies are very efficient to create as they just have pointers into the same underlying definitions. Create as many as you like. + +Let's go ahead and do this: + +``` +.> fork master feature1 +.> view master.x +.> view feature1.x + +``` + +Great! We can now do some further work in the `feature1` branch, then merge it back into `master` when we're ready. + +```unison +y = "hello" +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + y : Text + +``` +```ucm + ☝️ The namespace .feature1 is empty. + +.feature1> add + + ⍟ I've added these definitions: + + y : Text + +.master> merge .feature1 + + Here's what's changed in the current namespace after the + merge: + + Added definitions: + + 1. y : Text + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +.master> view y + + y : Text + y = "hello" + +``` +> Note: `merge src`, with one argument, merges `src` into the current namespace. You can also do `merge src dest` to merge into any destination namespace. + +Notice that `master` now has the definition of `y` we wrote. + +We can also delete the fork if we're done with it. (Don't worry, it's still in the `history` and can be resurrected at any time.) + +```ucm +.> delete.namespace .feature1 + + Removed definitions: + + 1. y : Text + + Tip: You can use `undo` or `reflog` to undo this change. + +.> history + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #70soik1og2 + + - Deletes: + + feature1.y + + ⊙ #hm2opil978 + + + Adds / updates: + + master.y + + = Copies: + + Original name New name(s) + feature1.y master.y + + ⊙ #v66fup9qub + + + Adds / updates: + + feature1.y + + ⊙ #gspgkkujs4 + + > Moves: + + Original name New name + x master.x + + ⊙ #bmg3t60dhp + + + Adds / updates: + + x + + ⊙ #mj9vgnc1ro + + + Adds / updates: + + builtin.Boolean builtin.Boolean.not builtin.Bytes + builtin.Bytes.++ builtin.Bytes.at builtin.Bytes.drop + builtin.Bytes.empty builtin.Bytes.flatten + builtin.Bytes.fromBase16 builtin.Bytes.fromBase32 + builtin.Bytes.fromBase64 + builtin.Bytes.fromBase64UrlUnpadded builtin.Bytes.fromList + builtin.Bytes.size builtin.Bytes.take + builtin.Bytes.toBase16 builtin.Bytes.toBase32 + builtin.Bytes.toBase64 builtin.Bytes.toBase64UrlUnpadded + builtin.Bytes.toList builtin.Char builtin.Char.fromNat + builtin.Char.toNat builtin.Debug.watch builtin.Doc + builtin.Doc.Blob builtin.Doc.Evaluate builtin.Doc.Join + builtin.Doc.Link builtin.Doc.Signature builtin.Doc.Source + builtin.Either builtin.Either.Left builtin.Either.Right + builtin.Float builtin.Float.* builtin.Float.+ + builtin.Float.- builtin.Float./ builtin.Float.abs + builtin.Float.acos builtin.Float.acosh builtin.Float.asin + builtin.Float.asinh builtin.Float.atan builtin.Float.atan2 + builtin.Float.atanh builtin.Float.ceiling + builtin.Float.cos builtin.Float.cosh builtin.Float.eq + builtin.Float.exp builtin.Float.floor + builtin.Float.fromText builtin.Float.gt builtin.Float.gteq + builtin.Float.log builtin.Float.logBase builtin.Float.lt + builtin.Float.lteq builtin.Float.max builtin.Float.min + builtin.Float.pow builtin.Float.round builtin.Float.sin + builtin.Float.sinh builtin.Float.sqrt builtin.Float.tan + builtin.Float.tanh builtin.Float.toText + builtin.Float.truncate builtin.Int builtin.Int.* + builtin.Int.+ builtin.Int.- builtin.Int./ builtin.Int.and + builtin.Int.complement builtin.Int.eq builtin.Int.fromText + builtin.Int.gt builtin.Int.gteq builtin.Int.increment + builtin.Int.isEven builtin.Int.isOdd + builtin.Int.leadingZeros builtin.Int.lt builtin.Int.lteq + builtin.Int.mod builtin.Int.negate builtin.Int.or + builtin.Int.pow builtin.Int.shiftLeft + builtin.Int.shiftRight builtin.Int.signum + builtin.Int.toFloat builtin.Int.toText + builtin.Int.trailingZeros builtin.Int.truncate0 + builtin.Int.xor builtin.Link builtin.Link.Term##Link.Term + builtin.Link.Term#quh#0 builtin.Link.Type##Link.Type + builtin.Link.Type#quh#1 builtin.List builtin.List.++ + builtin.List.+: builtin.List.:+ builtin.List.at + builtin.List.cons builtin.List.drop builtin.List.empty + builtin.List.size builtin.List.snoc builtin.List.take + builtin.Nat builtin.Nat.* builtin.Nat.+ builtin.Nat./ + builtin.Nat.and builtin.Nat.complement builtin.Nat.drop + builtin.Nat.eq builtin.Nat.fromText builtin.Nat.gt + builtin.Nat.gteq builtin.Nat.increment builtin.Nat.isEven + builtin.Nat.isOdd builtin.Nat.leadingZeros builtin.Nat.lt + builtin.Nat.lteq builtin.Nat.mod builtin.Nat.or + builtin.Nat.pow builtin.Nat.shiftLeft + builtin.Nat.shiftRight builtin.Nat.sub builtin.Nat.toFloat + builtin.Nat.toInt builtin.Nat.toText + builtin.Nat.trailingZeros builtin.Nat.xor builtin.Optional + builtin.Optional.None builtin.Optional.Some + builtin.Request builtin.SeqView builtin.SeqView.VElem + builtin.SeqView.VEmpty builtin.Test.Result + builtin.Test.Result.Fail builtin.Test.Result.Ok + builtin.Text builtin.Text.!= builtin.Text.++ + builtin.Text.drop builtin.Text.empty builtin.Text.eq + builtin.Text.fromCharList builtin.Text.gt + builtin.Text.gteq builtin.Text.lt builtin.Text.lteq + builtin.Text.size builtin.Text.take + builtin.Text.toCharList builtin.Text.uncons + builtin.Text.unsnoc builtin.Tuple builtin.Tuple.Cons + builtin.Unit builtin.Unit.Unit builtin.Universal.< + builtin.Universal.<= builtin.Universal.== + builtin.Universal.> builtin.Universal.>= + builtin.Universal.compare builtin.bug + builtin.crypto.HashAlgorithm + builtin.crypto.HashAlgorithm.Blake2b_256 + builtin.crypto.HashAlgorithm.Blake2b_512 + builtin.crypto.HashAlgorithm.Blake2s_256 + builtin.crypto.HashAlgorithm.Sha2_256 + builtin.crypto.HashAlgorithm.Sha2_512 + builtin.crypto.HashAlgorithm.Sha3_256 + builtin.crypto.HashAlgorithm.Sha3_512 builtin.crypto.hash + builtin.crypto.hashBytes builtin.crypto.hmac + builtin.crypto.hmacBytes builtin.io2.BufferMode + builtin.io2.BufferMode.BlockBuffering + builtin.io2.BufferMode.LineBuffering + builtin.io2.BufferMode.NoBuffering + builtin.io2.BufferMode.SizedBlockBuffering + builtin.io2.FileMode builtin.io2.FileMode.Append + builtin.io2.FileMode.Read builtin.io2.FileMode.ReadWrite + builtin.io2.FileMode.Write builtin.io2.Handle + builtin.io2.IO builtin.io2.IO.clientSocket + builtin.io2.IO.closeFile builtin.io2.IO.closeSocket + builtin.io2.IO.createDirectory builtin.io2.IO.delay + builtin.io2.IO.fileExists builtin.io2.IO.forkComp + builtin.io2.IO.getBuffering + builtin.io2.IO.getCurrentDirectory + builtin.io2.IO.getFileSize builtin.io2.IO.getFileTimestamp + builtin.io2.IO.getLine builtin.io2.IO.getTempDirectory + builtin.io2.IO.getText builtin.io2.IO.handlePosition + builtin.io2.IO.isDirectory builtin.io2.IO.isFileEOF + builtin.io2.IO.isFileOpen builtin.io2.IO.isSeekable + builtin.io2.IO.kill builtin.io2.IO.listen + builtin.io2.IO.openFile builtin.io2.IO.putText + builtin.io2.IO.removeDirectory builtin.io2.IO.removeFile + builtin.io2.IO.renameDirectory builtin.io2.IO.renameFile + builtin.io2.IO.seekHandle builtin.io2.IO.serverSocket + builtin.io2.IO.setBuffering + builtin.io2.IO.setCurrentDirectory + builtin.io2.IO.socketAccept builtin.io2.IO.socketReceive + builtin.io2.IO.socketSend builtin.io2.IO.stdHandle + builtin.io2.IO.systemTime builtin.io2.IOError + builtin.io2.IOError.AlreadyExists builtin.io2.IOError.EOF + builtin.io2.IOError.IllegalOperation + builtin.io2.IOError.NoSuchThing + builtin.io2.IOError.PermissionDenied + builtin.io2.IOError.ResourceBusy + builtin.io2.IOError.ResourceExhausted + builtin.io2.IOError.UserError builtin.io2.MVar + builtin.io2.MVar.isEmpty builtin.io2.MVar.new + builtin.io2.MVar.newEmpty builtin.io2.MVar.put + builtin.io2.MVar.read builtin.io2.MVar.swap + builtin.io2.MVar.take builtin.io2.MVar.tryPut + builtin.io2.MVar.tryRead builtin.io2.MVar.tryTake + builtin.io2.SeekMode builtin.io2.SeekMode.AbsoluteSeek + builtin.io2.SeekMode.RelativeSeek + builtin.io2.SeekMode.SeekFromEnd builtin.io2.Socket + builtin.io2.StdHandle builtin.io2.StdHandle.StdErr + builtin.io2.StdHandle.StdIn builtin.io2.StdHandle.StdOut + builtin.io2.ThreadId builtin.todo + + □ #7asfbtqmoj (start of history) + +``` +To resurrect an old version of a namespace, you can learn its hash via the `history` command, then use `fork #namespacehash .newname`. + +## Concurrent edits and merges + +In the above scenario the destination namespace (`master`) was strictly behind the source namespace, so the merge didn't have anything interesting to do (Git would call this a "fast forward" merge). In other cases, the source and destination namespaces will each have changes the other doesn't know about, and the merge needs to something more interesting. That's okay too, and Unison will merge those results, using a 3-way merge algorithm. + +> __Note:__ When merging nested namespaces, Unison actually uses a recursive 3-way merge, so it finds a different (and possibly closer) common ancestor at each level of the tree. + +Let's see how this works. We are going to create a copy of `master`, add and delete some definitions in `master` and in the fork, then merge. + +```ucm +.> fork master feature2 + + Done. + +``` +Here's one fork, we add `z` and delete `x`: + +```unison +z = 99 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + z : Nat + +``` +```ucm +.feature2> add + + ⍟ I've added these definitions: + + z : Nat + +.feature2> delete.term x + + Name changes: + + Original Changes + 1. feature2.x ┐ 2. feature2.x (removed) + 3. master.x ┘ + + Tip: You can use `undo` or `reflog` to undo this change. + +``` +And here's the other fork, where we update `y` and add a new definition, `frobnicate`: + +```unison +master.y = "updated y" +master.frobnicate n = n + 1 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + master.frobnicate : Nat -> Nat + master.y : Text + +``` +```ucm +.> update + + ⍟ I've added these definitions: + + master.frobnicate : Nat -> Nat + + ⍟ I've updated these names to your new definition: + + master.y : Text + (The old definition was also named feature2.y. I updated + this name too.) + +.> view master.y + + feature2.y : Text + feature2.y = "updated y" + +.> view master.frobnicate + + master.frobnicate : Nat -> Nat + master.frobnicate n = + use Nat + + n + 1 + +``` +At this point, `master` and `feature2` both have some changes the other doesn't know about. Let's merge them. + +```ucm +.> merge feature2 master + + Here's what's changed in master after the merge: + + Added definitions: + + 1. z : Nat + + Removed definitions: + + 2. x : Nat + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +``` +Notice that `x` is deleted in the merged branch (it was deleted in `feature2` and untouched by `master`): + +```ucm +.> view master.x + + ⚠️ + + The following names were not found in the codebase. Check your spelling. + master.x + +``` +And notice that `y` has the most recent value, and that `z` and `frobnicate` both exist as well: + +```ucm +.> view master.y + + feature2.y : Text + feature2.y = "updated y" + +.> view master.z + + feature2.z : Nat + feature2.z = 99 + +.> view master.frobnicate + + master.frobnicate : Nat -> Nat + master.frobnicate n = + use Nat + + n + 1 + +``` +## FAQ + +* What happens if namespace1 deletes a name that namespace2 has updated? A: ??? +* ... diff --git a/unison-src/transcripts/names.md b/unison-src/transcripts/names.md new file mode 100644 index 0000000000..88f405dbf7 --- /dev/null +++ b/unison-src/transcripts/names.md @@ -0,0 +1,20 @@ + Example uses of the `names` command and output +```ucm:hide +.> alias.type ##Int .builtins.Int +``` + +```unison:hide +type IntTriple = IntTriple (Int, Int, Int) +intTriple = IntTriple(+1, +1, +1) +``` + +```ucm:hide +.> add +``` + +```ucm +.> alias.type IntTriple namespc.another.TripleInt +.> alias.term intTriple namespc.another.tripleInt +.> names IntTriple +.> names intTriple +``` \ No newline at end of file diff --git a/unison-src/transcripts/names.output.md b/unison-src/transcripts/names.output.md new file mode 100644 index 0000000000..9f9cb40a2f --- /dev/null +++ b/unison-src/transcripts/names.output.md @@ -0,0 +1,32 @@ + Example uses of the `names` command and output +```unison +type IntTriple = IntTriple (Int, Int, Int) +intTriple = IntTriple(+1, +1, +1) +``` + +```ucm +.> alias.type IntTriple namespc.another.TripleInt + + Done. + +.> alias.term intTriple namespc.another.tripleInt + + Done. + +.> names IntTriple + + Type + Hash: #170h4ackk7 + Names: IntTriple namespc.another.TripleInt + + Term + Hash: #170h4ackk7#0 + Names: IntTriple.IntTriple + +.> names intTriple + + Term + Hash: #uif14vd2oj + Names: intTriple namespc.another.tripleInt + +``` diff --git a/unison-src/transcripts/numbered-args.md b/unison-src/transcripts/numbered-args.md new file mode 100644 index 0000000000..86bf9a2147 --- /dev/null +++ b/unison-src/transcripts/numbered-args.md @@ -0,0 +1,56 @@ +# Using numbered arguments in UCM + +```ucm:hide +.> builtins.merge +``` + +First lets add some contents to our codebase. + +```unison +foo = "foo" +bar = "bar" +baz = "baz" +qux = "qux" +quux = "quux" +corge = "corge" +``` + +```ucm +.temp> add +``` + +We can get the list of things in the namespace, and UCM will give us a numbered +list: + +```ucm +.temp> find +``` + +We can ask to `view` the second element of this list: + +```ucm +.temp> find +.temp> view 2 +``` + +And we can `view` multiple elements by separating with spaces: + +```ucm +.temp> find +.temp> view 2 3 5 +``` + +We can also ask for a range: + +```ucm +.temp> find +.temp> view 2-4 +``` + +And we can ask for multiple ranges and use mix of ranges and numbers: + +```ucm +.temp> find +.temp> view 1-3 4 5-6 +``` + diff --git a/unison-src/transcripts/numbered-args.output.md b/unison-src/transcripts/numbered-args.output.md new file mode 100644 index 0000000000..50ba5ba06e --- /dev/null +++ b/unison-src/transcripts/numbered-args.output.md @@ -0,0 +1,162 @@ +# Using numbered arguments in UCM + +First lets add some contents to our codebase. + +```unison +foo = "foo" +bar = "bar" +baz = "baz" +qux = "qux" +quux = "quux" +corge = "corge" +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + bar : Text + baz : Text + corge : Text + foo : Text + quux : Text + qux : Text + +``` +```ucm + ☝️ The namespace .temp is empty. + +.temp> add + + ⍟ I've added these definitions: + + bar : Text + baz : Text + corge : Text + foo : Text + quux : Text + qux : Text + +``` +We can get the list of things in the namespace, and UCM will give us a numbered +list: + +```ucm +.temp> find + + 1. bar : Text + 2. baz : Text + 3. corge : Text + 4. foo : Text + 5. quux : Text + 6. qux : Text + + +``` +We can ask to `view` the second element of this list: + +```ucm +.temp> find + + 1. bar : Text + 2. baz : Text + 3. corge : Text + 4. foo : Text + 5. quux : Text + 6. qux : Text + + +.temp> view 2 + + baz : Text + baz = "baz" + +``` +And we can `view` multiple elements by separating with spaces: + +```ucm +.temp> find + + 1. bar : Text + 2. baz : Text + 3. corge : Text + 4. foo : Text + 5. quux : Text + 6. qux : Text + + +.temp> view 2 3 5 + + baz : Text + baz = "baz" + + corge : Text + corge = "corge" + + quux : Text + quux = "quux" + +``` +We can also ask for a range: + +```ucm +.temp> find + + 1. bar : Text + 2. baz : Text + 3. corge : Text + 4. foo : Text + 5. quux : Text + 6. qux : Text + + +.temp> view 2-4 + + baz : Text + baz = "baz" + + corge : Text + corge = "corge" + + foo : Text + foo = "foo" + +``` +And we can ask for multiple ranges and use mix of ranges and numbers: + +```ucm +.temp> find + + 1. bar : Text + 2. baz : Text + 3. corge : Text + 4. foo : Text + 5. quux : Text + 6. qux : Text + + +.temp> view 1-3 4 5-6 + + bar : Text + bar = "bar" + + baz : Text + baz = "baz" + + corge : Text + corge = "corge" + + foo : Text + foo = "foo" + + quux : Text + quux = "quux" + + qux : Text + qux = "qux" + +``` diff --git a/unison-src/transcripts/propagate.md b/unison-src/transcripts/propagate.md new file mode 100644 index 0000000000..cb2a7cc314 --- /dev/null +++ b/unison-src/transcripts/propagate.md @@ -0,0 +1,134 @@ +# Propagating type edits + +```ucm:hide +.> builtins.merge +``` + +We introduce a type `Foo` with a function dependent `fooToInt`. + +```unison +use .builtin + +unique type Foo = Foo + +fooToInt : Foo -> Int +fooToInt _ = +42 +``` + +And then we add it. + +```ucm +.subpath> add +.subpath> find.verbose +.subpath> view fooToInt +``` + +Then if we change the type `Foo`... + +```unison +unique type Foo = Foo | Bar +``` + +and update the codebase to use the new type `Foo`... + +```ucm +.subpath> update +``` + +... it should automatically propagate the type to `fooToInt`. + +```ucm +.subpath> view fooToInt +``` + +### Preserving user type variables + +We make a term that has a dependency on another term and also a non-redundant +user-provided type signature. + +```unison +use .builtin + +someTerm : Optional foo -> Optional foo +someTerm x = x + +otherTerm : Optional baz -> Optional baz +otherTerm y = someTerm y +``` + +Add that to the codebase: + +```ucm +.subpath.preserve> add +``` + +Let's now edit the dependency: + +```unison +use .builtin + +someTerm : Optional x -> Optional x +someTerm _ = None +``` + +Update... + +```ucm +.subpath.preserve> update +``` + +Now the type of `someTerm` should be `Optional x -> Optional x` and the +type of `otherTerm` should remain the same. + +```ucm +.subpath.preserve> view someTerm +.subpath.preserve> view otherTerm +``` + +### Propagation only applies to the local branch + +Cleaning up a bit... + +```ucm +.> delete.namespace subpath +``` + +Now, we make two terms, where one depends on the other. + +```unison +use .builtin + +someTerm : Optional foo -> Optional foo +someTerm x = x + +otherTerm : Optional baz -> Optional baz +otherTerm y = someTerm y +``` + +We'll make two copies of this namespace. + +```ucm +.subpath.one> add +.subpath> fork one two +``` + +Now let's edit one of the terms... + +```unison +use .builtin + +someTerm : Optional x -> Optional x +someTerm _ = None +``` + +... in one of the namespaces... + +```ucm +.subpath.one> update +``` + +The other namespace should be left alone. + +```ucm +.subpath.two> view someTerm +``` diff --git a/unison-src/transcripts/propagate.output.md b/unison-src/transcripts/propagate.output.md new file mode 100644 index 0000000000..70b7c12ce0 --- /dev/null +++ b/unison-src/transcripts/propagate.output.md @@ -0,0 +1,280 @@ +# Propagating type edits + +We introduce a type `Foo` with a function dependent `fooToInt`. + +```unison +use .builtin + +unique type Foo = Foo + +fooToInt : Foo -> Int +fooToInt _ = +42 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + unique type Foo + fooToInt : Foo -> Int + +``` +And then we add it. + +```ucm + ☝️ The namespace .subpath is empty. + +.subpath> add + + ⍟ I've added these definitions: + + unique type Foo + fooToInt : Foo -> Int + +.subpath> find.verbose + + 1. -- #qae64o6am81hoadf7eabd909gojboi5iu3g9deip79ro18f11bbhir2vg51grg4m72kr5ikdovi6aupttet0nsqil7f0df9nqr10hqg + unique type Foo + + 2. -- #qae64o6am81hoadf7eabd909gojboi5iu3g9deip79ro18f11bbhir2vg51grg4m72kr5ikdovi6aupttet0nsqil7f0df9nqr10hqg#0 + Foo.Foo : Foo + + 3. -- #hvtmbg1bd8of81n2os4ginnnen13njh47294uandlohooq0ej971u6tl5cdsfq237lec1tc007oajc4dee1fmnflqi6ogom3ecemu5g + fooToInt : Foo -> Int + + + +.subpath> view fooToInt + + fooToInt : Foo -> Int + fooToInt _ = +42 + +``` +Then if we change the type `Foo`... + +```unison +unique type Foo = Foo | Bar +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + unique type Foo + +``` +and update the codebase to use the new type `Foo`... + +```ucm +.subpath> update + + ⍟ I've updated these names to your new definition: + + unique type Foo + +``` +... it should automatically propagate the type to `fooToInt`. + +```ucm +.subpath> view fooToInt + + fooToInt : Foo -> Int + fooToInt _ = +42 + +``` +### Preserving user type variables + +We make a term that has a dependency on another term and also a non-redundant +user-provided type signature. + +```unison +use .builtin + +someTerm : Optional foo -> Optional foo +someTerm x = x + +otherTerm : Optional baz -> Optional baz +otherTerm y = someTerm y +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + otherTerm : Optional baz -> Optional baz + someTerm : Optional foo -> Optional foo + +``` +Add that to the codebase: + +```ucm + ☝️ The namespace .subpath.preserve is empty. + +.subpath.preserve> add + + ⍟ I've added these definitions: + + otherTerm : Optional baz -> Optional baz + someTerm : Optional foo -> Optional foo + +``` +Let's now edit the dependency: + +```unison +use .builtin + +someTerm : Optional x -> Optional x +someTerm _ = None +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + someTerm : Optional x -> Optional x + +``` +Update... + +```ucm +.subpath.preserve> update + + ⍟ I've updated these names to your new definition: + + someTerm : Optional x -> Optional x + +``` +Now the type of `someTerm` should be `Optional x -> Optional x` and the +type of `otherTerm` should remain the same. + +```ucm +.subpath.preserve> view someTerm + + someTerm : Optional x -> Optional x + someTerm _ = None + +.subpath.preserve> view otherTerm + + otherTerm : Optional baz -> Optional baz + otherTerm y = someTerm y + +``` +### Propagation only applies to the local branch + +Cleaning up a bit... + +```ucm +.> delete.namespace subpath + + Removed definitions: + + 1. unique type Foo + 2. Foo.Bar : #16d2id848g + 3. Foo.Foo : #16d2id848g + 4. fooToInt : #16d2id848g -> Int + 5. preserve.otherTerm : Optional baz -> Optional baz + 6. preserve.someTerm : Optional x -> Optional x + 7. patch patch + 8. patch preserve.patch + + Tip: You can use `undo` or `reflog` to undo this change. + +``` +Now, we make two terms, where one depends on the other. + +```unison +use .builtin + +someTerm : Optional foo -> Optional foo +someTerm x = x + +otherTerm : Optional baz -> Optional baz +otherTerm y = someTerm y +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + otherTerm : Optional baz -> Optional baz + someTerm : Optional foo -> Optional foo + +``` +We'll make two copies of this namespace. + +```ucm + ☝️ The namespace .subpath.one is empty. + +.subpath.one> add + + ⍟ I've added these definitions: + + otherTerm : Optional baz -> Optional baz + someTerm : Optional foo -> Optional foo + +.subpath> fork one two + + Done. + +``` +Now let's edit one of the terms... + +```unison +use .builtin + +someTerm : Optional x -> Optional x +someTerm _ = None +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + someTerm : Optional x -> Optional x + +``` +... in one of the namespaces... + +```ucm +.subpath.one> update + + ⍟ I've updated these names to your new definition: + + someTerm : Optional x -> Optional x + +``` +The other namespace should be left alone. + +```ucm +.subpath.two> view someTerm + + someTerm : Optional foo -> Optional foo + someTerm x = x + +``` diff --git a/unison-src/transcripts/redundant.output.md b/unison-src/transcripts/redundant.output.md new file mode 100644 index 0000000000..b778734cd7 --- /dev/null +++ b/unison-src/transcripts/redundant.output.md @@ -0,0 +1,45 @@ +The same kind of thing happens with `map`. Are we saying this is incorrect behaviour? + +```unison +map : (a -> b) -> [a] -> [b] +map f = cases + x +: xs -> f x +: map f xs + [] -> [] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + map : (a ->{𝕖} b) ->{𝕖} [a] ->{𝕖} [b] + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + map : (a ->{𝕖} b) ->{𝕖} [a] ->{𝕖} [b] + +.> view map + + map : (a -> b) -> [a] -> [b] + map f = cases + x +: xs -> + use builtin.List +: + f x +: map f xs + [] -> [] + +.> find map + + 1. map : (a ->{𝕖} b) ->{𝕖} [a] ->{𝕖} [b] + + +``` diff --git a/unison-src/transcripts/reflog.md b/unison-src/transcripts/reflog.md new file mode 100644 index 0000000000..202dc50820 --- /dev/null +++ b/unison-src/transcripts/reflog.md @@ -0,0 +1,31 @@ +```ucm:hide +.> builtins.merge +``` + +First we make two changes to the codebase, so that there's more than one line +for the `reflog` command to display: + +```unison +x = 1 +``` +```ucm +.> add +``` +```unison +y = 2 +``` +```ucm +.> add +.> view y +``` +```ucm +.> reflog +``` + +If we `reset-root` to its previous value, `y` disappears. +```ucm +.> reset-root 2 +``` +```ucm:error +.> view y +``` diff --git a/unison-src/transcripts/reflog.output.md b/unison-src/transcripts/reflog.output.md new file mode 100644 index 0000000000..c3412a1bcd --- /dev/null +++ b/unison-src/transcripts/reflog.output.md @@ -0,0 +1,90 @@ +First we make two changes to the codebase, so that there's more than one line +for the `reflog` command to display: + +```unison +x = 1 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + x : Nat + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + x : Nat + +``` +```unison +y = 2 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + y : Nat + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + y : Nat + +.> view y + + y : Nat + y = 2 + +``` +```ucm +.> reflog + + Here is a log of the root namespace hashes, starting with the + most recent, along with the command that got us there. Try: + + `fork 2 .old` + `fork #aeg2cssfqi .old` to make an old namespace + accessible again, + + `reset-root #aeg2cssfqi` to reset the root namespace and + its history to that of the + specified namespace. + + 1. #l2s90e9ce6 : add + 2. #aeg2cssfqi : add + 3. #mj9vgnc1ro : builtins.merge + 4. #7asfbtqmoj : (initial reflogged namespace) + +``` +If we `reset-root` to its previous value, `y` disappears. +```ucm +.> reset-root 2 + + Done. + +``` +```ucm +.> view y + + ⚠️ + + The following names were not found in the codebase. Check your spelling. + y + +``` diff --git a/unison-src/transcripts/resolve.md b/unison-src/transcripts/resolve.md new file mode 100644 index 0000000000..2d03b9dfd3 --- /dev/null +++ b/unison-src/transcripts/resolve.md @@ -0,0 +1,115 @@ +# Resolving edit conflicts in `ucm` + +```ucm:hide +.> builtins.merge +``` + +The `ucm` tool tracks edits to hashes in an object called a _patch_. When patches get merged, sometimes those patches will have conflicting edits. The `replace.term` command helps resolve such conflicts. + +First, let's make a new namespace, `example.resolve`: + +```ucm +.> cd example.resolve +``` + +Now let's add a term named `a.foo`: + +```unison +a.foo = 42 +``` + +```ucm +.example.resolve> add +``` + +We'll fork the namespace `a` into a new namespace `b`, so we can edit the two concurrently. + +```ucm +.example.resolve> fork a b +``` + +We'll also make a second fork `c` which we'll use as the target for our patch later. + +```ucm +.example.resolve> fork a c +``` + +Now let's make a change to `foo` in the `a` namespace: + +```ucm +.example.resolve> cd a +``` + +```unison +foo = 43 +``` + +```ucm +.example.resolve.a> update +``` + +And make a different change in the `b` namespace: + +```ucm +.example.resolve> cd .example.resolve.b +``` + +```unison +foo = 44 +``` + +```ucm +.example.resolve.b> update +``` + +The `a` and `b` namespaces now each contain a patch named `patch`. We can view these: + +```ucm +.example.resolve.b> cd .example.resolve +.example.resolve> view.patch a.patch +.example.resolve> view.patch b.patch +``` + +Let's now merge these namespaces into `c`: + +```ucm +.example.resolve> merge a c +``` +```ucm:error +.example.resolve> merge b c +``` + +The namespace `c` now has an edit conflict, since the term `foo` was edited in two different ways. + +```ucm +.example.resolve> cd c +.example.resolve.c> todo +``` + +We see that `#44954ulpdf` (the original hash of `a.foo`) got replaced with _both_ the `#8e68dvpr0a` and `#jdqoenu794`. + +We can resolve this conflict by picking one of the terms as the "winner": + +```ucm +.example.resolve.c> replace.term #44954ulpdf #8e68dvpr0a +``` + +This changes the merged `c.patch` so that only the edit from #44954ulpdf to #8e68dvpr0a remains: + +```ucm +.example.resolve.c> view.patch +``` + +We still have a remaining _name conflict_ since it just so happened that both of the definitions in the edits were named `foo`. + +```ucm +.example.resolve.c> todo +``` + +We can resolve the name conflict by deleting one of the names. + +```ucm +.example.resolve.c> delete.term foo#jdqoenu794 +``` + +And that's how you resolve edit conflicts with UCM. diff --git a/unison-src/transcripts/resolve.output.md b/unison-src/transcripts/resolve.output.md new file mode 100644 index 0000000000..bd319c1ed0 --- /dev/null +++ b/unison-src/transcripts/resolve.output.md @@ -0,0 +1,259 @@ +# Resolving edit conflicts in `ucm` + +The `ucm` tool tracks edits to hashes in an object called a _patch_. When patches get merged, sometimes those patches will have conflicting edits. The `replace.term` command helps resolve such conflicts. + +First, let's make a new namespace, `example.resolve`: + +```ucm +.> cd example.resolve + + ☝️ The namespace .example.resolve is empty. + +``` +Now let's add a term named `a.foo`: + +```unison +a.foo = 42 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + a.foo : Nat + +``` +```ucm +.example.resolve> add + + ⍟ I've added these definitions: + + a.foo : Nat + +``` +We'll fork the namespace `a` into a new namespace `b`, so we can edit the two concurrently. + +```ucm +.example.resolve> fork a b + + Done. + +``` +We'll also make a second fork `c` which we'll use as the target for our patch later. + +```ucm +.example.resolve> fork a c + + Done. + +``` +Now let's make a change to `foo` in the `a` namespace: + +```ucm +.example.resolve> cd a + +``` +```unison +foo = 43 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + foo : Nat + +``` +```ucm +.example.resolve.a> update + + ⍟ I've updated these names to your new definition: + + foo : Nat + +``` +And make a different change in the `b` namespace: + +```ucm +.example.resolve> cd .example.resolve.b + +``` +```unison +foo = 44 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + foo : Nat + +``` +```ucm +.example.resolve.b> update + + ⍟ I've updated these names to your new definition: + + foo : Nat + +``` +The `a` and `b` namespaces now each contain a patch named `patch`. We can view these: + +```ucm +.example.resolve.b> cd .example.resolve + +.example.resolve> view.patch a.patch + + Edited Terms: c.foo -> a.foo + + Tip: To remove entries from a patch, use + delete.term-replacement or delete.type-replacement, as + appropriate. + +.example.resolve> view.patch b.patch + + Edited Terms: c.foo -> b.foo + + Tip: To remove entries from a patch, use + delete.term-replacement or delete.type-replacement, as + appropriate. + +``` +Let's now merge these namespaces into `c`: + +```ucm +.example.resolve> merge a c + + Here's what's changed in c after the merge: + + Updates: + + 1. foo : Nat + ↓ + 2. foo : Nat + + Added definitions: + + 3. patch patch (added 1 updates) + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +``` +```ucm +.example.resolve> merge b c + + Here's what's changed in c after the merge: + + New name conflicts: + + 1. foo#jdqoenu794 : Nat + ↓ + 2. ┌ foo#8e68dvpr0a : Nat + 3. └ foo#jdqoenu794 : Nat + + Updates: + + 4. patch patch (added 1 updates) + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + + I tried to auto-apply the patch, but couldn't because it + contained contradictory entries. + +``` +The namespace `c` now has an edit conflict, since the term `foo` was edited in two different ways. + +```ucm +.example.resolve> cd c + +.example.resolve.c> todo + + ❓ + + These definitions were edited differently in namespaces that + have been merged into this one. You'll have to tell me what to + use as the new definition: + + The term #44954ulpdf was replaced with foo#8e68dvpr0a and + foo#jdqoenu794 + +``` +We see that `#44954ulpdf` (the original hash of `a.foo`) got replaced with _both_ the `#8e68dvpr0a` and `#jdqoenu794`. + +We can resolve this conflict by picking one of the terms as the "winner": + +```ucm +.example.resolve.c> replace.term #44954ulpdf #8e68dvpr0a + + Done. + +``` +This changes the merged `c.patch` so that only the edit from #44954ulpdf to #8e68dvpr0a remains: + +```ucm +.example.resolve.c> view.patch + + Edited Terms: #44954ulpdf -> foo#8e68dvpr0a + + Tip: To remove entries from a patch, use + delete.term-replacement or delete.type-replacement, as + appropriate. + +``` +We still have a remaining _name conflict_ since it just so happened that both of the definitions in the edits were named `foo`. + +```ucm +.example.resolve.c> todo + + ❓ + + These terms have conflicting definitions: foo + + Tip: This occurs when merging branches that both independently + introduce the same name. Use `view foo` to see the + conflicting defintions, then use `move.term` to resolve + the conflicts. + +``` +We can resolve the name conflict by deleting one of the names. + +```ucm +.example.resolve.c> delete.term foo#jdqoenu794 + + Resolved name conflicts: + + 1. ┌ example.resolve.c.foo#8e68dvpr0a : Nat + 2. └ example.resolve.c.foo#jdqoenu794 : Nat + ↓ + 3. example.resolve.c.foo#8e68dvpr0a : Nat + + Name changes: + + Original Changes + 4. example.resolve.a.foo ┐ 5. example.resolve.c.foo#jdqoenu794 (removed) + 6. example.resolve.c.foo#jdqoenu794 ┘ + + Tip: You can use `undo` or `reflog` to undo this change. + +``` +And that's how you resolve edit conflicts with UCM. diff --git a/unison-src/transcripts/squash.md b/unison-src/transcripts/squash.md new file mode 100644 index 0000000000..a5c00185a6 --- /dev/null +++ b/unison-src/transcripts/squash.md @@ -0,0 +1,132 @@ + +```ucm:hide +.> builtins.merge +``` + +# Squash merges + +`squash src dest` merges can be used to merge from `src` to `dest`, discarding the history of `src`. It's useful when the source namespace history is irrelevant or has a bunch of churn you wish to discard. Often when merging small pull requests, you'll use a squash merge. + +Let's look at some examples. We'll start with a namespace with just the builtins. Let's take a look at the hash of this namespace: + +```ucm +.> history builtin +.> fork builtin builtin2 +``` + +(We make a copy of `builtin` for use later in this transcript.) + +Now suppose we `fork` a copy of builtin, then rename `Nat.+` to `frobnicate`, then rename it back. Notice this produces multiple entries in the history: + +```ucm +.> fork builtin mybuiltin +.mybuiltin> rename.term Nat.+ Nat.frobnicate +.mybuiltin> rename.term Nat.frobnicate Nat.+ +.mybuiltin> history +``` + +If we merge that back into `builtin`, we get that same chain of history: + +```ucm +.> merge mybuiltin builtin +.> history builtin +``` + +Let's try again, but using a `merge.squash` (or just `squash`) instead. The history will be unchanged: + +```ucm +.> merge.squash mybuiltin builtin2 +.> history builtin2 +``` + +The churn that happened in `mybuiltin` namespace ended up back in the same spot, so the squash merge of that namespace with our original namespace had no effect. + +## Another example + +Let's look at a more interesting example, where the two namespaces have diverged a bit. Here's our starting namespace: + +```unison:hide +x = 1 +``` + +```ucm +.trunk> add +.> fork trunk alice +.> fork trunk bob +``` + +Alice now does some hacking: + +```unison:hide +radNumber = 348 +bodaciousNumero = 2394 +neatoFun x = x +``` + +```ucm +.alice> add +.alice> rename.term radNumber superRadNumber +.alice> rename.term neatoFun productionReadyId +``` + +Meanwhile, Bob does his own hacking: + +```unison:hide +whatIsLove = "?" +babyDon'tHurtMe = ".. Don't hurt me..." +no more = no more +``` + +```ucm +.bob> add +``` + +At this point, Alice and Bob both have some history beyond what's in trunk: + +```ucm +.> history trunk +.> history alice +.> history bob +``` + +Alice then squash merges into `trunk`, as does Bob. It's as if Alice and Bob both made their changes in one single commit. + +```ucm +.> merge.squash alice trunk +.> history trunk +.> merge.squash bob trunk +.> history trunk +``` + +Since squash merges don't produce any merge nodes, we can `undo` a couple times to get back to our starting state: + +```ucm +.> undo +.> undo +.> history trunk +``` + +This time, we'll first squash Alice and Bob's changes together before squashing their combined changes into `trunk`. The resulting `trunk` will have just a single entry in it, combining both Alice and Bob's changes: + +```ucm +.> squash alice bob +.> squash bob trunk +.> history trunk +``` + +So, there you have it. With squashing, you can control the granularity of your history. + +## Throwing out all history + +Another thing we can do is `squash` into an empty namespace. This effectively makes a copy of the namespace, but without any of its history: + +```ucm +.> squash alice nohistoryalice +.> history nohistoryalice +``` + +There's nothing really special here, `squash src dest` discards `src` history that comes after the LCA of `src` and `dest`, it's just that in the case of an empty namespace, that LCA is the beginning of time (the empty namespace), so all the history of `src` is discarded. + +## Caveats + +If you `squash mystuff trunk`, you're discarding any history of `mystuff` and just cons'ing onto the history of `trunk`. Thus, don't expect to be able to `merge trunk mystuff` later and get great results. Squashing should only be used when you don't care about the history (and you know others haven't pulled and built on your line of history being discarded, so they don't care about the history either). diff --git a/unison-src/transcripts/squash.output.md b/unison-src/transcripts/squash.output.md new file mode 100644 index 0000000000..f0a2f3c0c8 --- /dev/null +++ b/unison-src/transcripts/squash.output.md @@ -0,0 +1,472 @@ + +# Squash merges + +`squash src dest` merges can be used to merge from `src` to `dest`, discarding the history of `src`. It's useful when the source namespace history is irrelevant or has a bunch of churn you wish to discard. Often when merging small pull requests, you'll use a squash merge. + +Let's look at some examples. We'll start with a namespace with just the builtins. Let's take a look at the hash of this namespace: + +```ucm +.> history builtin + + Note: The most recent namespace hash is immediately below this + message. + + + + □ #ttlhvqf3s4 (start of history) + +.> fork builtin builtin2 + + Done. + +``` +(We make a copy of `builtin` for use later in this transcript.) + +Now suppose we `fork` a copy of builtin, then rename `Nat.+` to `frobnicate`, then rename it back. Notice this produces multiple entries in the history: + +```ucm +.> fork builtin mybuiltin + + Done. + +.mybuiltin> rename.term Nat.+ Nat.frobnicate + + Done. + +.mybuiltin> rename.term Nat.frobnicate Nat.+ + + Done. + +.mybuiltin> history + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #v024k3597g + + > Moves: + + Original name New name + Nat.frobnicate Nat.+ + + ⊙ #ci3d83ckgf + + > Moves: + + Original name New name + Nat.+ Nat.frobnicate + + □ #ttlhvqf3s4 (start of history) + +``` +If we merge that back into `builtin`, we get that same chain of history: + +```ucm +.> merge mybuiltin builtin + + Nothing changed as a result of the merge. + +.> history builtin + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #v024k3597g + + > Moves: + + Original name New name + Nat.frobnicate Nat.+ + + ⊙ #ci3d83ckgf + + > Moves: + + Original name New name + Nat.+ Nat.frobnicate + + □ #ttlhvqf3s4 (start of history) + +``` +Let's try again, but using a `merge.squash` (or just `squash`) instead. The history will be unchanged: + +```ucm +.> merge.squash mybuiltin builtin2 + + Nothing changed as a result of the merge. + + 😶 + + builtin2 was already up-to-date with mybuiltin. + +.> history builtin2 + + Note: The most recent namespace hash is immediately below this + message. + + + + □ #ttlhvqf3s4 (start of history) + +``` +The churn that happened in `mybuiltin` namespace ended up back in the same spot, so the squash merge of that namespace with our original namespace had no effect. + +## Another example + +Let's look at a more interesting example, where the two namespaces have diverged a bit. Here's our starting namespace: + +```unison +x = 1 +``` + +```ucm + ☝️ The namespace .trunk is empty. + +.trunk> add + + ⍟ I've added these definitions: + + x : Nat + +.> fork trunk alice + + Done. + +.> fork trunk bob + + Done. + +``` +Alice now does some hacking: + +```unison +radNumber = 348 +bodaciousNumero = 2394 +neatoFun x = x +``` + +```ucm +.alice> add + + ⍟ I've added these definitions: + + bodaciousNumero : Nat + neatoFun : x -> x + radNumber : Nat + +.alice> rename.term radNumber superRadNumber + + Done. + +.alice> rename.term neatoFun productionReadyId + + Done. + +``` +Meanwhile, Bob does his own hacking: + +```unison +whatIsLove = "?" +babyDon'tHurtMe = ".. Don't hurt me..." +no more = no more +``` + +```ucm +.bob> add + + ⍟ I've added these definitions: + + babyDon'tHurtMe : Text + no : more -> 𝕣 + whatIsLove : Text + +``` +At this point, Alice and Bob both have some history beyond what's in trunk: + +```ucm +.> history trunk + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #3p3anl2oil + + + Adds / updates: + + x + + □ #7asfbtqmoj (start of history) + +.> history alice + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #t85a26latn + + > Moves: + + Original name New name + neatoFun productionReadyId + + ⊙ #01scl44n4i + + > Moves: + + Original name New name + radNumber superRadNumber + + ⊙ #094h7rbo3m + + + Adds / updates: + + bodaciousNumero neatoFun radNumber + + ⊙ #3p3anl2oil + + + Adds / updates: + + x + + □ #7asfbtqmoj (start of history) + +.> history bob + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #g0mn0tn7ap + + + Adds / updates: + + babyDon'tHurtMe no whatIsLove + + ⊙ #3p3anl2oil + + + Adds / updates: + + x + + □ #7asfbtqmoj (start of history) + +``` +Alice then squash merges into `trunk`, as does Bob. It's as if Alice and Bob both made their changes in one single commit. + +```ucm +.> merge.squash alice trunk + + Here's what's changed in trunk after the merge: + + Added definitions: + + 1. bodaciousNumero : Nat + 2. productionReadyId : x -> x + 3. superRadNumber : Nat + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +.> history trunk + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #tcbafrhd81 + + + Adds / updates: + + bodaciousNumero productionReadyId superRadNumber + + ⊙ #3p3anl2oil + + + Adds / updates: + + x + + □ #7asfbtqmoj (start of history) + +.> merge.squash bob trunk + + Here's what's changed in trunk after the merge: + + Added definitions: + + 1. babyDon'tHurtMe : Text + 2. no : more -> 𝕣 + 3. whatIsLove : Text + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +.> history trunk + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #5grq7ao0b4 + + + Adds / updates: + + babyDon'tHurtMe no whatIsLove + + ⊙ #tcbafrhd81 + + + Adds / updates: + + bodaciousNumero productionReadyId superRadNumber + + ⊙ #3p3anl2oil + + + Adds / updates: + + x + + □ #7asfbtqmoj (start of history) + +``` +Since squash merges don't produce any merge nodes, we can `undo` a couple times to get back to our starting state: + +```ucm +.> undo + + Here's the changes I undid + + Name changes: + + Original Changes + 1. bob.babyDon'tHurtMe 2. trunk.babyDon'tHurtMe (added) + + 3. bob.no 4. trunk.no (added) + + 5. bob.whatIsLove 6. trunk.whatIsLove (added) + +.> undo + + Here's the changes I undid + + Name changes: + + Original Changes + 1. alice.bodaciousNumero 2. trunk.bodaciousNumero (added) + + 3. alice.productionReadyId 4. trunk.productionReadyId (added) + + 5. alice.superRadNumber 6. trunk.superRadNumber (added) + +.> history trunk + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #3p3anl2oil + + + Adds / updates: + + x + + □ #7asfbtqmoj (start of history) + +``` +This time, we'll first squash Alice and Bob's changes together before squashing their combined changes into `trunk`. The resulting `trunk` will have just a single entry in it, combining both Alice and Bob's changes: + +```ucm +.> squash alice bob + + Here's what's changed in bob after the merge: + + Added definitions: + + 1. bodaciousNumero : Nat + 2. productionReadyId : x -> x + 3. superRadNumber : Nat + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +.> squash bob trunk + + Here's what's changed in trunk after the merge: + + Added definitions: + + 1. babyDon'tHurtMe : Text + 2. bodaciousNumero : Nat + 3. no : more -> 𝕣 + 4. productionReadyId : x -> x + 5. superRadNumber : Nat + 6. whatIsLove : Text + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +.> history trunk + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #8t5skhmd1g + + + Adds / updates: + + babyDon'tHurtMe bodaciousNumero no productionReadyId + superRadNumber whatIsLove + + ⊙ #3p3anl2oil + + + Adds / updates: + + x + + □ #7asfbtqmoj (start of history) + +``` +So, there you have it. With squashing, you can control the granularity of your history. + +## Throwing out all history + +Another thing we can do is `squash` into an empty namespace. This effectively makes a copy of the namespace, but without any of its history: + +```ucm +.> squash alice nohistoryalice + + Here's what's changed in nohistoryalice after the merge: + + Added definitions: + + 1. bodaciousNumero : Nat + 2. productionReadyId : x -> x + 3. superRadNumber : Nat + 4. x : Nat + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +.> history nohistoryalice + + Note: The most recent namespace hash is immediately below this + message. + + ⊙ #fs1a0n3q3r + + + Adds / updates: + + bodaciousNumero productionReadyId superRadNumber x + + □ #7asfbtqmoj (start of history) + +``` +There's nothing really special here, `squash src dest` discards `src` history that comes after the LCA of `src` and `dest`, it's just that in the case of an empty namespace, that LCA is the beginning of time (the empty namespace), so all the history of `src` is discarded. + +## Caveats + +If you `squash mystuff trunk`, you're discarding any history of `mystuff` and just cons'ing onto the history of `trunk`. Thus, don't expect to be able to `merge trunk mystuff later and get great results. Squashing should only be used when you don't care about the history (and you know others haven't pulled and built on your line of history being discarded, so they don't care about the history either). diff --git a/unison-src/transcripts/suffixes.md b/unison-src/transcripts/suffixes.md new file mode 100644 index 0000000000..359e40b7ed --- /dev/null +++ b/unison-src/transcripts/suffixes.md @@ -0,0 +1,40 @@ +# Suffix-based resolution of names + +```ucm:hide +.> builtins.merge +``` + +Any unique name suffix can be used to refer to a definition. For instance: + +```unison:hide +-- No imports needed even though FQN is `builtin.{Int,Nat}` +foo.bar.a : Int +foo.bar.a = +99 + +-- No imports needed even though FQN is `builtin.Optional.{None,Some}` +optional.isNone = cases + None -> true + Some _ -> false +``` + +This also affects commands like find. Notice lack of qualified names in output: + +```ucm +.> add +.> find take +``` + +The `view` and `display` commands also benefit from this: + +```ucm +.> view List.drop +.> display bar.a +``` + +In the signature, we don't see `base.Nat`, just `Nat`. The full declaration name is still shown for each search result though. + +Type-based search also benefits from this, we can just say `Nat` rather than `.base.Nat`: + +```ucm +.> find : Nat -> [a] -> [a] +``` diff --git a/unison-src/transcripts/suffixes.output.md b/unison-src/transcripts/suffixes.output.md new file mode 100644 index 0000000000..1ca4f8d458 --- /dev/null +++ b/unison-src/transcripts/suffixes.output.md @@ -0,0 +1,59 @@ +# Suffix-based resolution of names + +Any unique name suffix can be used to refer to a definition. For instance: + +```unison +-- No imports needed even though FQN is `builtin.{Int,Nat}` +foo.bar.a : Int +foo.bar.a = +99 + +-- No imports needed even though FQN is `builtin.Optional.{None,Some}` +optional.isNone = cases + None -> true + Some _ -> false +``` + +This also affects commands like find. Notice lack of qualified names in output: + +```ucm +.> add + + ⍟ I've added these definitions: + + foo.bar.a : Int + optional.isNone : Optional a -> Boolean + +.> find take + + 1. builtin.Bytes.take : Nat -> Bytes -> Bytes + 2. builtin.List.take : Nat -> [a] -> [a] + 3. builtin.Text.take : Nat -> Text -> Text + 4. builtin.io2.MVar.take : MVar a ->{IO} Either IOError a + 5. builtin.io2.MVar.tryTake : MVar a ->{IO} Optional a + + +``` +The `view` and `display` commands also benefit from this: + +```ucm +.> view List.drop + + -- builtin.List.drop is built-in. + +.> display bar.a + + +99 + +``` +In the signature, we don't see `base.Nat`, just `Nat`. The full declaration name is still shown for each search result though. + +Type-based search also benefits from this, we can just say `Nat` rather than `.base.Nat`: + +```ucm +.> find : Nat -> [a] -> [a] + + 1. builtin.List.drop : Nat -> [a] -> [a] + 2. builtin.List.take : Nat -> [a] -> [a] + + +``` diff --git a/unison-src/transcripts/todo-bug-builtins.md b/unison-src/transcripts/todo-bug-builtins.md new file mode 100644 index 0000000000..c7d88fb784 --- /dev/null +++ b/unison-src/transcripts/todo-bug-builtins.md @@ -0,0 +1,27 @@ +# The `todo` and `bug` builtin + +```ucm:hide +.> builtins.merge +``` + +`todo` and `bug` have type `a -> b`. They take a message or a value of type `a` and crash during runtime displaying `a` in ucm. +```unison:error +> todo "implement me later" +``` +```unison:error +> bug "there's a bug in my code" +``` + +## Todo +`todo` is useful if you want to come back to a piece of code later but you want your project to compile. +```unison +complicatedMathStuff x = todo "Come back and to something with x here" +``` + +## Bug +`bug` is used to indicate that a particular branch is not expected to execute. +```unison +test = match true with + true -> "Yay" + false -> bug "Wow, that's unexpected" +``` diff --git a/unison-src/transcripts/todo-bug-builtins.output.md b/unison-src/transcripts/todo-bug-builtins.output.md new file mode 100644 index 0000000000..3a62517f8c --- /dev/null +++ b/unison-src/transcripts/todo-bug-builtins.output.md @@ -0,0 +1,89 @@ +# The `todo` and `bug` builtin + +`todo` and `bug` have type `a -> b`. They take a message or a value of type `a` and crash during runtime displaying `a` in ucm. +```unison +> todo "implement me later" +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 💔💥 + + I've encountered a call to builtin.todo with the following + value: + + "implement me later" + + I'm sorry this message doesn't have more detail about the + location of the failure. My makers plan to fix this in a + future release. 😢 + +``` +```unison +> bug "there's a bug in my code" +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 💔💥 + + I've encountered a call to builtin.bug with the following + value: + + "there's a bug in my code" + + I'm sorry this message doesn't have more detail about the + location of the failure. My makers plan to fix this in a + future release. 😢 + +``` +## Todo +`todo` is useful if you want to come back to a piece of code later but you want your project to compile. +```unison +complicatedMathStuff x = todo "Come back and to something with x here" +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + complicatedMathStuff : x -> 𝕣 + +``` +## Bug +`bug` is used to indicate that a particular branch is not expected to execute. +```unison +test = match true with + true -> "Yay" + false -> bug "Wow, that's unexpected" +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + test : Text + +``` diff --git a/unison-src/transcripts/transcript-parser-commands.md b/unison-src/transcripts/transcript-parser-commands.md new file mode 100644 index 0000000000..e39fd10885 --- /dev/null +++ b/unison-src/transcripts/transcript-parser-commands.md @@ -0,0 +1,41 @@ +### Transcript parser operations + +```ucm:hide +.> builtins.merge +``` + +The transcript parser is meant to parse `ucm` and `unison` blocks. + +```unison +x = 1 +``` + +```ucm +.> add +``` + +```unison:hide:error:scratch.u +z +``` + +```ucm:error +.> delete foo +``` + +```ucm :error +.> delete lineToken.call +``` + +However handling of blocks of other languages should be supported. + +```python +some python code +``` + +```c_cpp +some C++ code +``` + +```c9search +some cloud9 code +``` diff --git a/unison-src/transcripts/transcript-parser-commands.output.md b/unison-src/transcripts/transcript-parser-commands.output.md new file mode 100644 index 0000000000..1a1cdbc916 --- /dev/null +++ b/unison-src/transcripts/transcript-parser-commands.output.md @@ -0,0 +1,72 @@ +### Transcript parser operations + +The transcript parser is meant to parse `ucm` and `unison` blocks. + +```unison +x = 1 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + x : Nat + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + x : Nat + +``` +```unison +--- +title: :scratch.u +--- +z + +``` + + +```ucm +.> delete foo + + ⚠️ + + I don't know about that name. + +``` +```ucm +.> delete lineToken.call + + ⚠️ + + I don't know about that name. + +``` +However handling of blocks of other languages should be supported. + +```python + +some python code + +``` + +```c_cpp + +some C++ code + +``` + +```c9search + +some cloud9 code + +``` + diff --git a/unison-src/transcripts/unitnamespace.md b/unison-src/transcripts/unitnamespace.md new file mode 100644 index 0000000000..3fcee464f8 --- /dev/null +++ b/unison-src/transcripts/unitnamespace.md @@ -0,0 +1,9 @@ +```unison +foo = "bar" +``` + +```ucm +.> cd () +.()> add +.> delete.namespace () +``` diff --git a/unison-src/transcripts/unitnamespace.output.md b/unison-src/transcripts/unitnamespace.output.md new file mode 100644 index 0000000000..1e3a726877 --- /dev/null +++ b/unison-src/transcripts/unitnamespace.output.md @@ -0,0 +1,35 @@ +```unison +foo = "bar" +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + foo : ##Text + +``` +```ucm +.> cd () + + ☝️ The namespace .() is empty. + +.()> add + + ⍟ I've added these definitions: + + foo : ##Text + +.> delete.namespace () + + Removed definitions: + + 1. foo : ##Text + + Tip: You can use `undo` or `reflog` to undo this change. + +``` From 29359b18a8dceec1068e826591ef4fc29f63896e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 20 Oct 2020 13:27:43 -0400 Subject: [PATCH 021/225] change hashing for refs, and add loadTermByHash --- .../U/Codebase/Sqlite/Operations.hs | 63 +++++++++++ .../U/Codebase/Sqlite/Queries.hs | 24 +++- .../U/Codebase/Sqlite/Term/Format.hs | 5 +- .../unison-codebase-sqlite.cabal | 2 + codebase2/codebase/U/Codebase/Term.hs | 14 ++- hie.yaml | 2 +- .../src/Unison/Codebase/SqliteCodebase.hs | 105 ++++++++++++++++++ parser-typechecker/src/Unison/Runtime/ANF.hs | 2 +- .../unison-parser-typechecker.cabal | 8 +- stack.yaml | 3 +- unison-core/src/Unison/Reference.hs | 2 +- 11 files changed, 215 insertions(+), 15 deletions(-) create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs create mode 100644 parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs new file mode 100644 index 0000000000..2d5b7694da --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -0,0 +1,63 @@ +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE ScopedTypeVariables #-} + +module U.Codebase.Sqlite.Operations where + +import Control.Monad ((<=<)) +import Control.Monad.Trans.Maybe (MaybeT (MaybeT, runMaybeT)) +import Data.Bifunctor (Bifunctor (bimap)) +import Data.Functor ((<&>)) +import qualified Data.Vector as Vector +import qualified U.Codebase.Reference as C.Reference +import qualified U.Codebase.Sqlite.LocalIds as LocalIds +import U.Codebase.Sqlite.Queries (DB) +import qualified U.Codebase.Sqlite.Queries as Q +import qualified U.Codebase.Sqlite.Serialization as S +import U.Codebase.Sqlite.Symbol (Symbol) +import qualified U.Codebase.Sqlite.Term.Format as S +import qualified U.Codebase.Term as C +import qualified U.Codebase.Term as C.Term +import U.Util.Base32Hex (Base32Hex) +import qualified U.Util.Hash as H +import U.Util.Serialization (getFromBytes) + +loadTermComponentByHash :: DB m => Base32Hex -> m (Maybe [C.Term Symbol]) +loadTermComponentByHash = error "todo" + +m :: (a -> f (Maybe b)) -> a -> MaybeT f b +m = fmap MaybeT + +m' :: (Functor f, Show a) => String -> (a -> f (Maybe b)) -> a -> MaybeT f b +m' msg f a = MaybeT do + f a <&> \case + Nothing -> error $ "nothing: " ++ msg ++ " " ++ show a + Just b -> Just b + +loadTermByHash :: DB m => C.Reference.Id -> m (Maybe (C.Term Symbol)) +loadTermByHash (C.Reference.Id h i) = runMaybeT do + -- retrieve the blob + (localIds, term) <- + m' + ("getFromBytes $ S.lookupTermElement " ++ show i) + (fmap pure $ getFromBytes $ S.lookupTermElement i) + <=< m' "Q.loadObjectById" Q.loadObjectById + <=< m' "Q.objectIdByAnyHash" Q.objectIdByAnyHash + $ H.toBase32Hex h + + -- look up the text and hashes that are used by the term + texts <- traverse (m' "Q.loadTextById" Q.loadTextById) $ LocalIds.textLookup localIds + hashes <- traverse (m' "Q.loadPrimaryHashByObjectId" Q.loadPrimaryHashByObjectId) $ LocalIds.objectLookup localIds + + -- substitute the text and hashes back into the term + let substText (S.LocalTextId w) = texts Vector.! fromIntegral w + substHash (S.LocalDefnId w) = H.fromBase32Hex $ hashes Vector.! fromIntegral w + substTermRef = bimap substText (fmap substHash) + substTypeRef = bimap substText substHash + substTermLink = bimap substTermRef substTypeRef + substTypeLink = substTypeRef + pure (C.Term.extraMap substText substTermRef substTypeRef substTermLink substTypeLink id term) + + +-- loadLocallyIndexedComponentByHash diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index e4536ff2a2..2e82547de2 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -85,7 +85,7 @@ saveHashObject hId oId version = execute sql (hId, oId, version) where saveObject :: DB m => HashId -> ObjectType -> ByteString -> m ObjectId saveObject h t blob = - execute sql (h, t, blob) >> queryOne (objectByPrimaryHashId h) + execute sql (h, t, blob) >> queryOne (objectIdByPrimaryHashId h) where sql = [here| INSERT OR IGNORE INTO object (primary_hash_id, type_id, bytes) @@ -97,14 +97,30 @@ loadObjectById oId = queryOnly sql (Only oId) where sql = [here| SELECT bytes FROM object WHERE id = ? |] -objectByPrimaryHashId :: DB m => HashId -> m (Maybe ObjectId) -objectByPrimaryHashId h = queryOnly sql (Only h) where sql = [here| +objectIdByPrimaryHashId :: DB m => HashId -> m (Maybe ObjectId) +objectIdByPrimaryHashId h = queryOnly sql (Only h) where sql = [here| SELECT id FROM object WHERE primary_hash_id = ? |] +objectIdByAnyHash :: DB m => Base32Hex -> m (Maybe ObjectId) +objectIdByAnyHash h = queryOnly sql (Only h) where sql = [here| + SELECT object.id + FROM hash + INNER JOIN hash_object ON hash_object.hash_id = hash.id + INNER JOIN object ON hash_object.object_id = object.id + WHERE hash.base32 = ? +|] + +loadPrimaryHashByObjectId :: DB m => ObjectId -> m (Maybe Base32Hex) +loadPrimaryHashByObjectId oId = queryOnly sql (Only oId) where sql = [here| + SELECT hash.base32 + FROM hash INNER JOIN hash_object ON hash_object.hash_id = hash.id + WHERE hash_object.object_id = ? +|] + objectAndPrimaryHashByAnyHash :: DB m => Base32Hex -> m (Maybe (Base32Hex, ObjectId)) objectAndPrimaryHashByAnyHash h = queryMaybe sql (Only h) where sql = [here| - SELECT object.id + SELECT object.primary_hash_id, object.id FROM hash INNER JOIN hash_object ON hash_object.hash_id = hash.id INNER JOIN object ON hash_object.objectId = object.id diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs index 357640bcaa..ca50e64e0e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs @@ -20,11 +20,14 @@ type TermRef = Reference' LocalTextId (Maybe LocalDefnId) type TypeRef = Reference' LocalTextId LocalDefnId +type TermLink = Referent' TermRef TypeRef +type TypeLink = TypeRef + newtype LocallyIndexedComponent = LocallyIndexedComponent (Vector (LocalIds, Term)) type F = - Term.F' LocalTextId TermRef TypeRef (Referent' TermRef TypeRef) TypeRef Symbol + Term.F' LocalTextId TermRef TypeRef TermLink TypeLink Symbol type FT = Type.F' TypeRef diff --git a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal index adcac26082..d60f982361 100644 --- a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal +++ b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal @@ -24,6 +24,7 @@ library U.Codebase.Sqlite.DbId U.Codebase.Sqlite.LocalIds U.Codebase.Sqlite.ObjectType + U.Codebase.Sqlite.Operations U.Codebase.Sqlite.Patch.Format U.Codebase.Sqlite.Patch.Full U.Codebase.Sqlite.Patch.Diff @@ -49,6 +50,7 @@ library mtl, sqlite-simple, text, + transformers, unliftio, vector, unison-codebase, diff --git a/codebase2/codebase/U/Codebase/Term.hs b/codebase2/codebase/U/Codebase/Term.hs index 84c87f395b..f98621a480 100644 --- a/codebase2/codebase/U/Codebase/Term.hs +++ b/codebase2/codebase/U/Codebase/Term.hs @@ -34,15 +34,19 @@ import qualified Data.Foldable as Foldable type ConstructorId = Word64 type Term v = ABT.Term (F v) v () +type TermRef = Reference' Text (Maybe Hash) +type TypeRef = Reference +type TermLink = Referent' (Reference' Text (Maybe Hash)) (Reference' Text Hash) +type TypeLink = Reference -- | Base functor for terms in the Unison codebase type F vt = F' - Text -- text - (Reference' Text (Maybe Hash)) -- termRef - Reference -- typeRef - (Referent' (Reference' Text (Maybe Hash)) (Reference' Text Hash)) -- termLink - Reference -- typeLink + Text + TermRef + TypeRef + TermLink + TypeLink vt -- | Generalized version. We could generalize further to allow sharing within diff --git a/hie.yaml b/hie.yaml index 428be419ec..76e1e6740a 100644 --- a/hie.yaml +++ b/hie.yaml @@ -60,5 +60,5 @@ cradle: - path: "yaks/easytest/src/." component: "easytest:lib" - - path: "yaks/easytest/tests/." + - path: "yaks/easytest/tests/." component: "easytest:test:tests" diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs new file mode 100644 index 0000000000..9ba8e93d71 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -0,0 +1,105 @@ +module Unison.Codebase.SqliteCodebase where + +-- initCodebase :: Branch.Cache IO -> FilePath -> IO (Codebase IO Symbol Ann) + +import qualified Unison.Codebase as Codebase1 +import qualified Unison.Reference as Reference +import Unison.Term (Term) +import Unison.Symbol (Symbol) +import Unison.Parser (Ann) +import Unison.Type (Type) +import Unison.DataDeclaration (Decl) +import Unison.Codebase.Branch (Branch) +import qualified Unison.Codebase.Branch as Branch +import Data.Set (Set) +import Unison.Reference (Reference) +import Unison.Codebase.SyncMode (SyncMode) +import qualified Unison.UnisonFile as UF +import qualified Unison.Codebase.Reflog as Reflog +import Data.Text (Text) +import qualified Unison.Referent as Referent +import Unison.ShortHash (ShortHash) +import Unison.Codebase.ShortBranchHash (ShortBranchHash) +-- import qualified Database.SQLite.Simple as Sqlite + +sqliteCodebase :: FilePath -> IO (Codebase1.Codebase IO Symbol Ann) +sqliteCodebase _root = do + -- c :: Sqlite.Connection <- error "todo" + let + getTerm :: Reference.Id -> IO (Maybe (Term Symbol Ann)) + getTypeOfTermImpl :: Reference.Id -> IO (Maybe (Type Symbol Ann)) + getTypeDeclaration :: Reference.Id -> IO (Maybe (Decl Symbol Ann)) + putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () + putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () + getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) + putRootBranch :: Branch IO -> IO () + rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) + getBranchForHash :: Branch.Hash -> IO (Maybe (Branch IO)) + dependentsImpl :: Reference -> IO (Set Reference.Id) + syncFromDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () + syncToDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () + watches :: UF.WatchKind -> IO [Reference.Id] + getWatch :: UF.WatchKind -> Reference.Id -> IO (Maybe (Term Symbol Ann)) + putWatch :: UF.WatchKind -> Reference.Id -> Term Symbol Ann -> IO () + getReflog :: IO [Reflog.Entry] + appendReflog :: Text -> Branch IO -> Branch IO -> IO () + termsOfTypeImpl :: Reference -> IO (Set Referent.Id) + termsMentioningTypeImpl :: Reference -> IO (Set Referent.Id) + hashLength :: IO Int + termReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) + typeReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) + termReferentsByPrefix :: ShortHash -> IO (Set Referent.Id) + branchHashLength :: IO Int + branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) + + getTerm (Reference.Id _r _i _n) = error "todo" + getTypeOfTermImpl = error "todo" + getTypeDeclaration = error "todo" + putTerm = error "todo" + putTypeDeclaration = error "todo" + getRootBranch = error "todo" + putRootBranch = error "todo" + rootBranchUpdates = error "todo" + getBranchForHash = error "todo" + dependentsImpl = error "todo" + syncFromDirectory = error "todo" + syncToDirectory = error "todo" + watches = error "todo" + getWatch = error "todo" + putWatch = error "todo" + getReflog = error "todo" + appendReflog = error "todo" + termsOfTypeImpl = error "todo" + termsMentioningTypeImpl = error "todo" + hashLength = error "todo" + termReferencesByPrefix = error "todo" + typeReferencesByPrefix = error "todo" + termReferentsByPrefix = error "todo" + branchHashLength = error "todo" + branchHashesByPrefix = error "todo" + pure $ Codebase1.Codebase + getTerm + getTypeOfTermImpl + getTypeDeclaration + putTerm + putTypeDeclaration + getRootBranch + putRootBranch + rootBranchUpdates + getBranchForHash + dependentsImpl + syncFromDirectory + syncToDirectory + watches + getWatch + putWatch + getReflog + appendReflog + termsOfTypeImpl + termsMentioningTypeImpl + hashLength + termReferencesByPrefix + typeReferencesByPrefix + termReferentsByPrefix + branchHashLength + branchHashesByPrefix diff --git a/parser-typechecker/src/Unison/Runtime/ANF.hs b/parser-typechecker/src/Unison/Runtime/ANF.hs index 578719fbcb..ab02e0f98b 100644 --- a/parser-typechecker/src/Unison/Runtime/ANF.hs +++ b/parser-typechecker/src/Unison/Runtime/ANF.hs @@ -184,7 +184,7 @@ enclose keep rec (Let1NamedTop' top v b@(LamsNamed' vs bd) e) = Just . let1' top [(v, lamb)] . rec (Set.insert v keep) $ ABT.subst v av e where - (_, av) = expandSimple keep (v, b) + (_, av) = expandSimple keep (v, b) keep' = Set.difference keep $ Set.fromList vs fvs = ABT.freeVars b evs = Set.toList $ Set.difference fvs keep diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 08e30587eb..a40e374452 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -96,6 +96,7 @@ library Unison.Codebase.Serialization.PutT Unison.Codebase.Serialization.V1 Unison.Codebase.ShortBranchHash + Unison.Codebase.SqliteCodebase Unison.Codebase.SyncMode Unison.Codebase.TermEdit Unison.Codebase.TranscriptParser @@ -231,6 +232,7 @@ library split, stm, strings, + sqlite-simple, tagged, terminal-size, text, @@ -240,7 +242,11 @@ library unliftio, util, vector, - unicode-show + unicode-show, + -- v2 + unison-core, + unison-codebase, + unison-codebase-sqlite ghc-options: -Wall -O0 -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures diff --git a/stack.yaml b/stack.yaml index d8409d66b4..a1605871ee 100644 --- a/stack.yaml +++ b/stack.yaml @@ -8,6 +8,7 @@ packages: - yaks/easytest - parser-typechecker - unison-core + - codebase-convert-1to2 - codebase1/codebase - codebase2/codebase @@ -41,4 +42,4 @@ extra-deps: ghc-options: # All packages - "$locals": -Wall -Wno-name-shadowing -Werror -Wno-type-defaults -Wno-missing-pattern-synonym-signatures #-freverse-errors + "$locals": -haddock -Wall -Wno-name-shadowing -Werror -Wno-type-defaults -Wno-missing-pattern-synonym-signatures #-freverse-errors diff --git a/unison-core/src/Unison/Reference.hs b/unison-core/src/Unison/Reference.hs index f007e2b764..9515b6248d 100644 --- a/unison-core/src/Unison/Reference.hs +++ b/unison-core/src/Unison/Reference.hs @@ -176,4 +176,4 @@ instance Show Reference where show = SH.toString . SH.take 5 . toShortHash instance Hashable.Hashable Reference where tokens (Builtin txt) = [Hashable.Tag 0, Hashable.Text txt] - tokens (DerivedId (Id h i n)) = [Hashable.Tag 1, Hashable.Bytes (H.toBytes h), Hashable.Nat i, Hashable.Nat n] + tokens (DerivedId (Id h i _n)) = [Hashable.Tag 1, Hashable.Bytes (H.toBytes h), Hashable.Nat i] From 6030572bcd8c80b50bbc4f1c79e44f2efce4c0c1 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 20 Oct 2020 16:11:16 -0400 Subject: [PATCH 022/225] implement `loadTypeOfTermByTermHash` --- .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 4 +-- .../U/Codebase/Sqlite/Operations.hs | 35 ++++++++++++++++--- .../U/Codebase/Sqlite/Queries.hs | 6 ++-- .../U/Codebase/Sqlite/Term/Format.hs | 6 ++++ codebase2/codebase/U/Codebase/Reference.hs | 24 ++++++++++--- codebase2/codebase/U/Codebase/Referent.hs | 28 +++++++++++---- codebase2/codebase/U/Codebase/Term.hs | 1 + 7 files changed, 82 insertions(+), 22 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index cc13d8f148..885d46ec54 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -15,10 +15,8 @@ import Data.Bits (Bits) newtype ObjectId = ObjectId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 newtype TextId = TextId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 -newtype HashId = HashId Word64 deriving (Eq, Ord) deriving (Hashable, FromField, ToField) via Word64 +newtype HashId = HashId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via Word64 -newtype TermId = TermId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via ObjectId -newtype DeclId = DeclId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via ObjectId newtype PatchId = PatchId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via ObjectId newtype BranchId = BranchId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 2d5b7694da..aa427b94af 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -5,9 +5,10 @@ module U.Codebase.Sqlite.Operations where -import Control.Monad ((<=<)) +import Control.Monad ((<=<), (>=>)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT, runMaybeT)) import Data.Bifunctor (Bifunctor (bimap)) +import Data.Bitraversable (Bitraversable (bitraverse)) import Data.Functor ((<&>)) import qualified Data.Vector as Vector import qualified U.Codebase.Reference as C.Reference @@ -16,9 +17,10 @@ import U.Codebase.Sqlite.Queries (DB) import qualified U.Codebase.Sqlite.Queries as Q import qualified U.Codebase.Sqlite.Serialization as S import U.Codebase.Sqlite.Symbol (Symbol) -import qualified U.Codebase.Sqlite.Term.Format as S +import qualified U.Codebase.Sqlite.Term.Format as S.Term import qualified U.Codebase.Term as C import qualified U.Codebase.Term as C.Term +import qualified U.Codebase.Type as C.Type import U.Util.Base32Hex (Base32Hex) import qualified U.Util.Hash as H import U.Util.Serialization (getFromBytes) @@ -40,7 +42,7 @@ loadTermByHash (C.Reference.Id h i) = runMaybeT do -- retrieve the blob (localIds, term) <- m' - ("getFromBytes $ S.lookupTermElement " ++ show i) + ("getTermElement: " ++ show i ++ ") fromBytes:") (fmap pure $ getFromBytes $ S.lookupTermElement i) <=< m' "Q.loadObjectById" Q.loadObjectById <=< m' "Q.objectIdByAnyHash" Q.objectIdByAnyHash @@ -51,13 +53,36 @@ loadTermByHash (C.Reference.Id h i) = runMaybeT do hashes <- traverse (m' "Q.loadPrimaryHashByObjectId" Q.loadPrimaryHashByObjectId) $ LocalIds.objectLookup localIds -- substitute the text and hashes back into the term - let substText (S.LocalTextId w) = texts Vector.! fromIntegral w - substHash (S.LocalDefnId w) = H.fromBase32Hex $ hashes Vector.! fromIntegral w + let substText (S.Term.LocalTextId w) = texts Vector.! fromIntegral w + substHash (S.Term.LocalDefnId w) = H.fromBase32Hex $ hashes Vector.! fromIntegral w substTermRef = bimap substText (fmap substHash) substTypeRef = bimap substText substHash substTermLink = bimap substTermRef substTypeRef substTypeLink = substTypeRef pure (C.Term.extraMap substText substTermRef substTypeRef substTermLink substTypeLink id term) +loadTypeOfTermByTermHash :: DB m => C.Reference.Id -> m (Maybe (C.Term.Type Symbol)) +loadTypeOfTermByTermHash = + runMaybeT + . ( -- convert query reference by looking up db ids + C.Reference.idH + ( -- look up hash ids + m' "Q.loadHashId" Q.loadHashId . H.toBase32Hex + -- look up object ids + >=> m' "Q.objectIdByPrimaryHashId" Q.objectIdByPrimaryHashId + ) + -- load "type of term" blob for the reference + >=> m' "Q.loadTypeOfTerm" Q.loadTypeOfTerm + -- deserialize the blob into the type + >=> m' + "getTypeFromBytes" + (fmap pure $ getFromBytes $ S.getType S.getReference) + -- convert the result type by looking up db ids + >=> C.Type.rtraverse + ( bitraverse + (m Q.loadTextById) + (fmap H.fromBase32Hex . m Q.loadPrimaryHashByObjectId) + ) + ) -- loadLocallyIndexedComponentByHash diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 2e82547de2..ebf63291e0 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -50,14 +50,14 @@ type TypeHashReference = Reference' TextId HashId -- * main squeeze saveHash :: DB m => Base32Hex -> m HashId -saveHash base32 = execute sql (Only base32) >> queryOne (loadHash base32) +saveHash base32 = execute sql (Only base32) >> queryOne (loadHashId base32) where sql = [here| INSERT OR IGNORE INTO hash (base32) VALUES (?) |] saveHashHash :: DB m => Hash -> m HashId saveHashHash = saveHash . Hash.toBase32Hex -loadHash :: DB m => Base32Hex -> m (Maybe HashId) -loadHash base32 = queryOnly sql (Only base32) +loadHashId :: DB m => Base32Hex -> m (Maybe HashId) +loadHashId base32 = queryOnly sql (Only base32) where sql = [here| SELECT id FROM hash WHERE base32 = ? |] loadHashById :: DB m => HashId -> m (Maybe Base32Hex) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs index ca50e64e0e..63d290ed90 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs @@ -12,6 +12,7 @@ import U.Codebase.Sqlite.Symbol import qualified U.Codebase.Term as Term import qualified U.Core.ABT as ABT import qualified U.Codebase.Type as Type +import qualified U.Codebase.Sqlite.Reference as Sqlite newtype LocalDefnId = LocalDefnId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 @@ -34,5 +35,10 @@ type FT = Type.F' TypeRef type Term = ABT.Term F Symbol () type Type = ABT.Term FT Symbol () +-- * Type of Term +-- Maybe these should have a LocalIds index too; or share one with the term? +type FTT = Type.F' Sqlite.Reference +type TypeOfTerm = ABT.Term FTT Symbol () + data TermFormat = Term LocallyIndexedComponent diff --git a/codebase2/codebase/U/Codebase/Reference.hs b/codebase2/codebase/U/Codebase/Reference.hs index 8094c584ba..88848a9f6b 100644 --- a/codebase2/codebase/U/Codebase/Reference.hs +++ b/codebase2/codebase/U/Codebase/Reference.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE FlexibleInstances #-} @@ -12,7 +13,9 @@ import qualified U.Util.Hash as Hash import U.Util.Hash (Hash) import U.Util.Hashable (Hashable (..)) import qualified U.Util.Hashable as Hashable -import Control.Lens (Bifunctor(..), Traversal) +import Control.Lens (lens, Lens, Bifunctor(..), Traversal) +import Data.Bitraversable (Bitraversable(..)) +import Data.Bifoldable (Bifoldable(..)) -- |This is the canonical representation of Reference type Reference = Reference' Text Hash @@ -21,7 +24,7 @@ type Id = Id' Hash data Reference' t h = ReferenceBuiltin t | ReferenceDerived (Id' h) - deriving (Eq, Ord, Show, Functor) + deriving (Eq, Ord, Show) pattern Derived :: h -> ComponentIndex -> Reference' t h pattern Derived h i = ReferenceDerived (Id h i) @@ -30,7 +33,7 @@ pattern Derived h i = ReferenceDerived (Id h i) type ComponentIndex = Word64 data Id' h = Id h ComponentIndex - deriving (Eq, Ord, Show, Functor) + deriving (Eq, Ord, Show, Functor, Foldable, Traversable) t :: Traversal (Reference' t h) (Reference' t' h) t t' t f = \case @@ -42,9 +45,20 @@ h f = \case ReferenceBuiltin t -> pure (ReferenceBuiltin t) Derived h i -> Derived <$> f h <*> pure i +idH :: Lens (Id' h) (Id' h') h h' +idH = lens (\(Id h _w) -> h) (\(Id _h w) h -> Id h w) + instance Bifunctor Reference' where - bimap fl _ (ReferenceBuiltin t) = ReferenceBuiltin (fl t) - bimap _ fr (ReferenceDerived id) = ReferenceDerived (fr <$> id) + bimap f _ (ReferenceBuiltin t) = ReferenceBuiltin (f t) + bimap _ g (ReferenceDerived id) = ReferenceDerived (g <$> id) + +instance Bifoldable Reference' where + bifoldMap f _ (ReferenceBuiltin t) = f t + bifoldMap _ g (ReferenceDerived id) = foldMap g id + +instance Bitraversable Reference' where + bitraverse f _ (ReferenceBuiltin t) = ReferenceBuiltin <$> f t + bitraverse _ g (ReferenceDerived id) = ReferenceDerived <$> traverse g id instance Hashable Reference where tokens (ReferenceBuiltin txt) = diff --git a/codebase2/codebase/U/Codebase/Referent.hs b/codebase2/codebase/U/Codebase/Referent.hs index 692f0b2e74..48da3793fa 100644 --- a/codebase2/codebase/U/Codebase/Referent.hs +++ b/codebase2/codebase/U/Codebase/Referent.hs @@ -1,3 +1,5 @@ +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeSynonymInstances #-} @@ -12,6 +14,8 @@ import U.Util.Hashable (Hashable (..)) import Data.Word (Word64) import qualified U.Util.Hashable as Hashable import Data.Bifunctor (Bifunctor(..)) +import Data.Bifoldable (Bifoldable(..)) +import Data.Bitraversable (Bitraversable(..)) type Referent = Referent' Reference Reference type ReferentH = Referent' (Reference' Text (Maybe Hash)) (Reference' Text Hash) @@ -21,22 +25,34 @@ type ConstructorIndex = Word64 data Referent' rTm rTp = Ref rTm | Con rTp ConstructorIndex - deriving (Eq, Ord, Show) + deriving (Eq, Ord, Show, Bitraversable) type Id = Id' Hash Hash data Id' hTm hTp = RefId (Reference.Id' hTm) | ConId (Reference.Id' hTp) ConstructorIndex - deriving (Eq, Ord, Show, Functor) + deriving (Eq, Ord, Show, Bitraversable) instance (Hashable rTm, Hashable rTp) => Hashable (Referent' rTm rTp) where tokens (Ref r) = Hashable.Tag 0 : Hashable.tokens r tokens (Con r i) = [Hashable.Tag 1] ++ Hashable.tokens r ++ [Hashable.Nat (fromIntegral i)] instance Bifunctor Referent' where - bimap f _ (Ref r) = Ref (f r) - bimap _ g (Con r i) = Con (g r) i + bimap f g = \case + Ref r -> Ref (f r) + Con r i -> Con (g r) i + +instance Bifoldable Referent' where + bifoldMap f g = \case + Ref r -> f r + Con r _ -> g r instance Bifunctor Id' where - bimap f _ (RefId (Reference.Id h i)) = RefId (Reference.Id (f h) i) - bimap _ g (ConId (Reference.Id h i) j) = ConId (Reference.Id (g h) i) j + bimap f g = \case + RefId r -> RefId (fmap f r) + ConId r j -> ConId (fmap g r) j + +instance Bifoldable Id' where + bifoldMap f g = \case + RefId r -> foldMap f r + ConId r _ -> foldMap g r diff --git a/codebase2/codebase/U/Codebase/Term.hs b/codebase2/codebase/U/Codebase/Term.hs index f98621a480..b086f6dd70 100644 --- a/codebase2/codebase/U/Codebase/Term.hs +++ b/codebase2/codebase/U/Codebase/Term.hs @@ -34,6 +34,7 @@ import qualified Data.Foldable as Foldable type ConstructorId = Word64 type Term v = ABT.Term (F v) v () +type Type v = TypeR TypeRef v type TermRef = Reference' Text (Maybe Hash) type TypeRef = Reference type TermLink = Referent' (Reference' Text (Maybe Hash)) (Reference' Text Hash) From b3df95e7c6741803dd1396eb9fd3c077082189d9 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 20 Oct 2020 16:36:09 -0400 Subject: [PATCH 023/225] favor temp vars over Kleisli arrows --- .../U/Codebase/Sqlite/Operations.hs | 38 ++++++++----------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index aa427b94af..b37dcf39ba 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -62,27 +62,21 @@ loadTermByHash (C.Reference.Id h i) = runMaybeT do pure (C.Term.extraMap substText substTermRef substTypeRef substTermLink substTypeLink id term) loadTypeOfTermByTermHash :: DB m => C.Reference.Id -> m (Maybe (C.Term.Type Symbol)) -loadTypeOfTermByTermHash = - runMaybeT - . ( -- convert query reference by looking up db ids - C.Reference.idH - ( -- look up hash ids - m' "Q.loadHashId" Q.loadHashId . H.toBase32Hex - -- look up object ids - >=> m' "Q.objectIdByPrimaryHashId" Q.objectIdByPrimaryHashId - ) - -- load "type of term" blob for the reference - >=> m' "Q.loadTypeOfTerm" Q.loadTypeOfTerm - -- deserialize the blob into the type - >=> m' - "getTypeFromBytes" - (fmap pure $ getFromBytes $ S.getType S.getReference) - -- convert the result type by looking up db ids - >=> C.Type.rtraverse - ( bitraverse - (m Q.loadTextById) - (fmap H.fromBase32Hex . m Q.loadPrimaryHashByObjectId) - ) - ) +loadTypeOfTermByTermHash r = runMaybeT do + -- convert query reference by looking up db ids + let externalToDb = + m' "Q.loadHashId" Q.loadHashId . H.toBase32Hex + >=> m' "Q.objectIdByPrimaryHashId" Q.objectIdByPrimaryHashId + r' <- C.Reference.idH externalToDb r + -- load "type of term" blob for the reference + bytes <- m' "Q.loadTypeOfTerm" Q.loadTypeOfTerm r' + -- deserialize the blob into the type + typ <- m' "getTypeFromBytes" (fmap pure $ getFromBytes $ S.getType S.getReference) bytes + -- convert the result type by looking up db ids + let dbToExternal = + bitraverse + (m Q.loadTextById) + (fmap H.fromBase32Hex . m Q.loadPrimaryHashByObjectId) + C.Type.rtraverse dbToExternal typ -- loadLocallyIndexedComponentByHash From 8eb982cf185ec0de25dfa496119b928d04fa331e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 20 Oct 2020 22:49:53 -0400 Subject: [PATCH 024/225] drop decl ctor names and implement loadDeclByHash --- .../U/Codebase/Sqlite/Decl/Format.hs | 11 ++----- .../U/Codebase/Sqlite/Operations.hs | 22 ++++++++++++- .../U/Codebase/Sqlite/Serialization.hs | 6 ++-- codebase2/codebase/U/Codebase/Decl.hs | 32 ++++++++++--------- 4 files changed, 44 insertions(+), 27 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs index b9022fe681..5a95a089c7 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs @@ -1,7 +1,7 @@ {-# LANGUAGE DerivingVia #-} module U.Codebase.Sqlite.Decl.Format where -import U.Codebase.Decl (DeclType, Modifier) +import U.Codebase.Decl (DeclR) import U.Codebase.Reference (Reference') import U.Codebase.Sqlite.Symbol import qualified U.Codebase.Type as Type @@ -16,15 +16,10 @@ data DeclFormat = Decl LocallyIndexedComponent -- | V1: Decls included `Hash`es inline -- V2: Instead of `Hash`, we use a smaller index. -data LocallyIndexedComponent = +data LocallyIndexedComponent = LocallyIndexedComponent (Vector (LocalIds, Decl Symbol)) -data Decl v = DataDeclaration - { declType :: DeclType, - modifier :: Modifier, - bound :: [v], - constructors :: [Type v] - } +type Decl v = DeclR TypeRef v type Type v = ABT.Term (Type.F' TypeRef) v () type TypeRef = Reference' LocalTextId (Maybe LocalTypeId) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index b37dcf39ba..c492982d10 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -11,7 +11,10 @@ import Data.Bifunctor (Bifunctor (bimap)) import Data.Bitraversable (Bitraversable (bitraverse)) import Data.Functor ((<&>)) import qualified Data.Vector as Vector +import qualified U.Codebase.Decl as C +import qualified U.Codebase.Decl as C.Decl import qualified U.Codebase.Reference as C.Reference +import qualified U.Codebase.Sqlite.Decl.Format as S.Decl import qualified U.Codebase.Sqlite.LocalIds as LocalIds import U.Codebase.Sqlite.Queries (DB) import qualified U.Codebase.Sqlite.Queries as Q @@ -79,4 +82,21 @@ loadTypeOfTermByTermHash r = runMaybeT do (fmap H.fromBase32Hex . m Q.loadPrimaryHashByObjectId) C.Type.rtraverse dbToExternal typ --- loadLocallyIndexedComponentByHash +loadDeclByHash :: DB m => C.Reference.Id -> m (Maybe (C.Decl Symbol)) +loadDeclByHash (C.Reference.Id h i) = runMaybeT do + -- retrieve the blob + (localIds, C.Decl.DataDeclaration dt m b ct) <- do + oId <- m' "Q.objectIdByAnyHash" (Q.objectIdByAnyHash . H.toBase32Hex) h + bytes <- m' "Q.loadObjectById" Q.loadObjectById oId + m' ("getDeclElement: " ++ show i ++ ") fromBytes:") (pure . getFromBytes (S.lookupDeclElement i)) bytes + + -- look up the text and hashes that are used by the term + texts <- traverse (m' "Q.loadTextById" Q.loadTextById) $ LocalIds.textLookup localIds + hashes <- traverse (m' "Q.loadPrimaryHashByObjectId" Q.loadPrimaryHashByObjectId) $ LocalIds.objectLookup localIds + + -- substitute the text and hashes back into the term + let substText (S.Decl.LocalTextId w) = texts Vector.! fromIntegral w + substHash (S.Decl.LocalTypeId w) = H.fromBase32Hex $ hashes Vector.! fromIntegral w + substTypeRef :: S.Decl.TypeRef -> C.Decl.TypeRef + substTypeRef = bimap substText (fmap substHash) + pure (C.Decl.DataDeclaration dt m b (C.Type.rmap substTypeRef <$> ct)) -- lens might be nice here diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index c33b7c9fd8..93d0035269 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -347,11 +347,11 @@ putDeclFormat = \case putDeclComponent (DeclFormat.LocallyIndexedComponent v) = putFramedArray (putPair putLocalIds putDeclElement) v where - putDeclElement DeclFormat.DataDeclaration {..} = do + putDeclElement Decl.DataDeclaration {..} = do putDeclType declType putModifier modifier putFoldable putSymbol bound - putFoldable (putType putRecursiveReference putSymbol) constructors + putFoldable (putType putRecursiveReference putSymbol) constructorTypes putDeclType Decl.Data = putWord8 0 putDeclType Decl.Effect = putWord8 1 putModifier Decl.Structural = putWord8 0 @@ -369,7 +369,7 @@ getDeclFormat = getWord8 >>= \case getDeclElement :: MonadGet m => m (DeclFormat.Decl Symbol) getDeclElement = - DeclFormat.DataDeclaration + Decl.DataDeclaration <$> getDeclType <*> getModifier <*> getList getSymbol diff --git a/codebase2/codebase/U/Codebase/Decl.hs b/codebase2/codebase/U/Codebase/Decl.hs index 656f96061d..ece4bfc805 100644 --- a/codebase2/codebase/U/Codebase/Decl.hs +++ b/codebase2/codebase/U/Codebase/Decl.hs @@ -16,41 +16,39 @@ type ConstructorId = Word64 data DeclType = Data | Effect deriving (Eq, Ord, Show, Enum) -type Decl v = DeclR (Reference' Text Hash) v +type Decl v = DeclR TypeRef v +type TypeRef = Reference' Text (Maybe Hash) -data Modifier = Structural | Unique Text -- | Opaque (Set Reference) +data Modifier = Structural | Unique Text deriving (Eq, Ord, Show) data DeclR r v = DataDeclaration { declType :: DeclType, modifier :: Modifier, bound :: [v], - constructors' :: [(v, TypeR r v)] + constructorTypes :: [TypeR r v] } -- instance Hashable ConstructorType where -- tokens b = [Tag . fromIntegral $ fromEnum b] -- * Hashing stuff -constructors :: DeclR r v -> [v] -constructors = fmap fst . constructors' +data V v = Bound v | Ctor Int -constructorTypes :: DeclR r v -> [TypeR r v] -constructorTypes = fmap snd . constructors' - --- toABT :: Ord v => Decl v -> ABT.Term F v () --- toABT dd = ABT.tm () $ Modified (modifier dd) dd' +-- toABT :: Ord v => Decl v -> ABT.Term F (V v) () +-- toABT (DataDeclaration dt m bound constructors) = +-- ABT.tm () $ Modified dt m dd' -- where --- dd' = ABT.absChain (bound dd) $ +-- dd' = ABT.absChain bound $ -- ABT.absCycle --- (constructors dd) +-- constructors dd -- (ABT.tm () . Constructors $ ABT.transform Type <$> constructorTypes dd) data F a = Type (Type.FD a) | LetRec [a] a | Constructors [a] - | Modified Modifier a + | Modified DeclType Modifier a deriving (Functor, Foldable, Show) instance Hashable.Hashable1 F where @@ -66,8 +64,12 @@ instance Hashable.Hashable1 F where Constructors cs -> let (hashes, _) = hashCycle cs in tag 2 : map hashed hashes - Modified m t -> - [tag 3, Hashable.accumulateToken m, hashed $ hash t] + Modified dt m t -> + [tag 3, Hashable.accumulateToken dt, Hashable.accumulateToken m, hashed $ hash t] + +instance Hashable.Hashable DeclType where + tokens Data = [Hashable.Tag 0] + tokens Effect = [Hashable.Tag 1] instance Hashable.Hashable Modifier where tokens Structural = [Hashable.Tag 0] From 6a67cc74d68ed6b16dd02dbe55780ae7cbb54216 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 20 Oct 2020 22:58:48 -0400 Subject: [PATCH 025/225] reformatting --- .../U/Codebase/Sqlite/Decl/Format.hs | 17 ++++--- .../U/Codebase/Sqlite/Operations.hs | 13 +++-- codebase2/codebase/U/Codebase/Decl.hs | 50 +++++++++++-------- 3 files changed, 44 insertions(+), 36 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs index 5a95a089c7..37f7cea30d 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs @@ -1,28 +1,31 @@ {-# LANGUAGE DerivingVia #-} + module U.Codebase.Sqlite.Decl.Format where +import Data.Bits (Bits) +import Data.Vector (Vector) +import Data.Word (Word64) import U.Codebase.Decl (DeclR) import U.Codebase.Reference (Reference') -import U.Codebase.Sqlite.Symbol +import U.Codebase.Sqlite.LocalIds (LocalIds) +import U.Codebase.Sqlite.Symbol (Symbol) import qualified U.Codebase.Type as Type import qualified U.Core.ABT as ABT -import U.Codebase.Sqlite.LocalIds -import Data.Word (Word64) -import Data.Bits (Bits) -import Data.Vector (Vector) -- | Add new formats here data DeclFormat = Decl LocallyIndexedComponent -- | V1: Decls included `Hash`es inline -- V2: Instead of `Hash`, we use a smaller index. -data LocallyIndexedComponent = - LocallyIndexedComponent (Vector (LocalIds, Decl Symbol)) +data LocallyIndexedComponent + = LocallyIndexedComponent (Vector (LocalIds, Decl Symbol)) type Decl v = DeclR TypeRef v type Type v = ABT.Term (Type.F' TypeRef) v () + type TypeRef = Reference' LocalTextId (Maybe LocalTypeId) newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 + newtype LocalTypeId = LocalTypeId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index c492982d10..928ba66115 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -5,7 +5,7 @@ module U.Codebase.Sqlite.Operations where -import Control.Monad ((<=<), (>=>)) +import Control.Monad ((>=>)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT, runMaybeT)) import Data.Bifunctor (Bifunctor (bimap)) import Data.Bitraversable (Bitraversable (bitraverse)) @@ -42,14 +42,13 @@ m' msg f a = MaybeT do loadTermByHash :: DB m => C.Reference.Id -> m (Maybe (C.Term Symbol)) loadTermByHash (C.Reference.Id h i) = runMaybeT do - -- retrieve the blob - (localIds, term) <- + -- retrieve and deserialize the blob + (localIds, term) <- do + oId <- m' "Q.objectIdByAnyHash" (Q.objectIdByAnyHash . H.toBase32Hex) h + bytes <- m' "Q.loadObjectById" Q.loadObjectById oId m' ("getTermElement: " ++ show i ++ ") fromBytes:") - (fmap pure $ getFromBytes $ S.lookupTermElement i) - <=< m' "Q.loadObjectById" Q.loadObjectById - <=< m' "Q.objectIdByAnyHash" Q.objectIdByAnyHash - $ H.toBase32Hex h + (fmap pure $ getFromBytes $ S.lookupTermElement i) bytes -- look up the text and hashes that are used by the term texts <- traverse (m' "Q.loadTextById" Q.loadTextById) $ LocalIds.textLookup localIds diff --git a/codebase2/codebase/U/Codebase/Decl.hs b/codebase2/codebase/U/Codebase/Decl.hs index ece4bfc805..349269b915 100644 --- a/codebase2/codebase/U/Codebase/Decl.hs +++ b/codebase2/codebase/U/Codebase/Decl.hs @@ -1,14 +1,16 @@ {-# LANGUAGE DeriveFoldable #-} {-# LANGUAGE DeriveFunctor #-} + module U.Codebase.Decl where +import Data.Text (Text) import Data.Word (Word64) import U.Codebase.Reference (Reference') -import Data.Text (Text) -import U.Util.Hash (Hash) import U.Codebase.Type (TypeR) -import qualified U.Util.Hashable as Hashable import qualified U.Codebase.Type as Type +import U.Util.Hash (Hash) +import qualified U.Util.Hashable as Hashable + -- import qualified U.Core.ABT as ABT type ConstructorId = Word64 @@ -17,22 +19,24 @@ data DeclType = Data | Effect deriving (Eq, Ord, Show, Enum) type Decl v = DeclR TypeRef v + type TypeRef = Reference' Text (Maybe Hash) data Modifier = Structural | Unique Text deriving (Eq, Ord, Show) -data DeclR r v = DataDeclaration { - declType :: DeclType, - modifier :: Modifier, - bound :: [v], - constructorTypes :: [TypeR r v] -} +data DeclR r v = DataDeclaration + { declType :: DeclType, + modifier :: Modifier, + bound :: [v], + constructorTypes :: [TypeR r v] + } -- instance Hashable ConstructorType where -- tokens b = [Tag . fromIntegral $ fromEnum b] -- * Hashing stuff + data V v = Bound v | Ctor Int -- toABT :: Ord v => Decl v -> ABT.Term F (V v) () @@ -53,19 +57,21 @@ data F a instance Hashable.Hashable1 F where hash1 hashCycle hash e = - let (tag, hashed) = (Hashable.Tag, Hashable.Hashed) - -- Note: start each layer with leading `2` byte, to avoid collisions with - -- terms, which start each layer with leading `1`. See `Hashable1 Term.F` - in Hashable.accumulate $ tag 2 : case e of - Type t -> [tag 0, hashed $ Hashable.hash1 hashCycle hash t] - LetRec bindings body -> - let (hashes, hash') = hashCycle bindings - in [tag 1] ++ map hashed hashes ++ [hashed $ hash' body] - Constructors cs -> - let (hashes, _) = hashCycle cs - in tag 2 : map hashed hashes - Modified dt m t -> - [tag 3, Hashable.accumulateToken dt, Hashable.accumulateToken m, hashed $ hash t] + -- Note: start each layer with leading `2` byte, to avoid collisions with + -- terms, which start each layer with leading `1`. See `Hashable1 Term.F` + Hashable.accumulate $ + tag 2 : case e of + Type t -> [tag 0, hashed $ Hashable.hash1 hashCycle hash t] + LetRec bindings body -> + let (hashes, hash') = hashCycle bindings + in [tag 1] ++ map hashed hashes ++ [hashed $ hash' body] + Constructors cs -> + let (hashes, _) = hashCycle cs + in tag 2 : map hashed hashes + Modified dt m t -> + [tag 3, Hashable.accumulateToken dt, Hashable.accumulateToken m, hashed $ hash t] + where + (tag, hashed) = (Hashable.Tag, Hashable.Hashed) instance Hashable.Hashable DeclType where tokens Data = [Hashable.Tag 0] From ba6e08b31f63547e8d1d8b44290e398997e0652d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 20 Oct 2020 23:33:18 -0400 Subject: [PATCH 026/225] stub out a bunch more codebase operations --- .../U/Codebase/Sqlite/Operations.hs | 59 +++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 928ba66115..9ccbe5a11d 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -27,6 +27,11 @@ import qualified U.Codebase.Type as C.Type import U.Util.Base32Hex (Base32Hex) import qualified U.Util.Hash as H import U.Util.Serialization (getFromBytes) +import U.Codebase.WatchKind (WatchKind) +import qualified U.Codebase.Reference as C +import qualified U.Codebase.Referent as C.Referent +import Data.Set (Set) +import U.Codebase.ShortHash (ShortBranchHash, ShortHash) loadTermComponentByHash :: DB m => Base32Hex -> m (Maybe [C.Term Symbol]) loadTermComponentByHash = error "todo" @@ -99,3 +104,57 @@ loadDeclByHash (C.Reference.Id h i) = runMaybeT do substTypeRef :: S.Decl.TypeRef -> C.Decl.TypeRef substTypeRef = bimap substText (fmap substHash) pure (C.Decl.DataDeclaration dt m b (C.Type.rmap substTypeRef <$> ct)) -- lens might be nice here + +saveTerm :: DB m => C.Reference.Id -> C.Term Symbol -> C.Term.Type Symbol -> m () +saveTerm = error "todo" + +saveDecl :: DB m => C.Reference.Id -> C.Decl Symbol -> m () +saveDecl = error "todo" + +listWatches :: DB m => WatchKind -> m [C.Reference.Id] +listWatches = error "todo" + +loadWatch :: DB m => WatchKind -> C.Reference.Id -> m (Maybe (C.Term Symbol)) +loadWatch = error "todo" + +saveWatch :: DB m => WatchKind -> C.Reference.Id -> C.Term Symbol -> m () +saveWatch = error "todo" + +termsHavingType :: DB m => C.Reference -> m (Set C.Referent.Id) +termsHavingType = error "todo" + +termsMentioningType :: DB m => C.Reference -> m (Set C.Referent.Id) +termsMentioningType = error "todo" + +termReferencesByPrefix :: DB m => ShortHash -> m (Set C.Reference.Id) +termReferencesByPrefix = error "todo" + +typeReferencesByPrefix :: DB m => ShortHash -> m (Set C.Reference.Id) +typeReferencesByPrefix = error "todo" + +termReferentsByPrefix :: DB m => ShortHash -> m (Set C.Referent.Id) +termReferentsByPrefix = error "todo" + +branchHashesByPrefix :: DB m => ShortBranchHash -> m (Set C.Reference.Id) +branchHashesByPrefix = error "todo" + +dependents :: DB m => C.Reference -> m (Maybe (Set C.Reference.Id)) +dependents = error "todo" + +termDependencies :: DB m => C.Reference.Id -> m (Maybe (Set C.Reference.Id)) +termDependencies = error "todo" + +declDependencies :: DB m => C.Reference.Id -> m (Maybe (Set C.Reference.Id)) +declDependencies = error "todo" + + +-- getBranchByAnyHash :: +-- getBranchByBranchHash :: DB m => BranchHash -> m (Maybe (Branch m)) +-- getBranchByCausalHash :: DB m => CausalHash -> m (Maybe (Branch m)) + +-- lca :: (forall he e. [Causal m CausalHash he e] -> m (Maybe BranchHash)), + +-- branchDependencies :: +-- Branch.Hash -> m (Maybe (CausalHash, BD.Dependencies)), +-- -- |the "new" terms and types mentioned in a patch +-- patchDependencies :: EditHash -> m (Set Reference, Set Reference) From 8eb21adda063d9f700b105c70e6bb88ba0e7b2de Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 21 Oct 2020 21:34:43 -0400 Subject: [PATCH 027/225] wip --- .../U/Codebase/Sqlite/Operations.hs | 138 ++++--- .../U/Codebase/Sqlite/Queries.hs | 8 + .../U/Codebase/Sqlite/Reference.hs | 4 + .../src/Unison/Codebase/SqliteCodebase.hs | 348 +++++++++++++----- .../unison-parser-typechecker.cabal | 3 +- stack.yaml | 2 +- unison-core/src/Unison/ABT.hs | 8 + unison-core/src/Unison/Hash.hs | 3 +- unison-core/src/Unison/Reference.hs | 1 + 9 files changed, 372 insertions(+), 143 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 9ccbe5a11d..7311239e84 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -6,36 +6,46 @@ module U.Codebase.Sqlite.Operations where import Control.Monad ((>=>)) -import Control.Monad.Trans.Maybe (MaybeT (MaybeT, runMaybeT)) +import Control.Monad.Trans.Maybe (MaybeT (MaybeT)) import Data.Bifunctor (Bifunctor (bimap)) import Data.Bitraversable (Bitraversable (bitraverse)) +import Data.ByteString (ByteString) import Data.Functor ((<&>)) +import Data.Set (Set) +import qualified Data.Set as Set +import Data.Text (Text) import qualified Data.Vector as Vector +import Data.Word (Word64) import qualified U.Codebase.Decl as C import qualified U.Codebase.Decl as C.Decl +import qualified U.Codebase.Reference as C import qualified U.Codebase.Reference as C.Reference +import qualified U.Codebase.Referent as C.Referent +import U.Codebase.ShortHash (ShortBranchHash, ShortHash) +import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Decl.Format as S.Decl +import U.Codebase.Sqlite.LocalIds (LocalIds) import qualified U.Codebase.Sqlite.LocalIds as LocalIds import U.Codebase.Sqlite.Queries (DB) import qualified U.Codebase.Sqlite.Queries as Q +import qualified U.Codebase.Sqlite.Reference as S +import qualified U.Codebase.Sqlite.Reference as S.Reference import qualified U.Codebase.Sqlite.Serialization as S import U.Codebase.Sqlite.Symbol (Symbol) import qualified U.Codebase.Sqlite.Term.Format as S.Term import qualified U.Codebase.Term as C import qualified U.Codebase.Term as C.Term import qualified U.Codebase.Type as C.Type +import U.Codebase.WatchKind (WatchKind) import U.Util.Base32Hex (Base32Hex) import qualified U.Util.Hash as H import U.Util.Serialization (getFromBytes) -import U.Codebase.WatchKind (WatchKind) -import qualified U.Codebase.Reference as C -import qualified U.Codebase.Referent as C.Referent -import Data.Set (Set) -import U.Codebase.ShortHash (ShortBranchHash, ShortHash) loadTermComponentByHash :: DB m => Base32Hex -> m (Maybe [C.Term Symbol]) loadTermComponentByHash = error "todo" +-- * helpers + m :: (a -> f (Maybe b)) -> a -> MaybeT f b m = fmap MaybeT @@ -45,62 +55,94 @@ m' msg f a = MaybeT do Nothing -> error $ "nothing: " ++ msg ++ " " ++ show a Just b -> Just b -loadTermByHash :: DB m => C.Reference.Id -> m (Maybe (C.Term Symbol)) -loadTermByHash (C.Reference.Id h i) = runMaybeT do +c2sReference :: DB m => C.Reference -> MaybeT m S.Reference +c2sReference = bitraverse lookupTextId hashToObjectId + +s2cReference :: DB m => S.Reference -> MaybeT m C.Reference +s2cReference = bitraverse loadTextById loadHashByObjectId + +c2sReferenceId :: DB m => C.Reference.Id -> MaybeT m S.Reference.Id +c2sReferenceId = C.Reference.idH hashToObjectId + +s2cReferenceId :: DB m => S.Reference.Id -> MaybeT m C.Reference.Id +s2cReferenceId = C.Reference.idH loadHashByObjectId + +lookupTextId :: DB m => Text -> MaybeT m Db.TextId +lookupTextId = m' "Q.loadText" Q.loadText + +loadTextById :: DB m => Db.TextId -> MaybeT m Text +loadTextById = m' "Q.loadTextById" Q.loadTextById + +hashToObjectId :: DB m => H.Hash -> MaybeT m Db.ObjectId +hashToObjectId = + m' "Q.loadHashId" Q.loadHashId . H.toBase32Hex + >=> m' "Q.objectIdByPrimaryHashId" Q.objectIdByPrimaryHashId + +loadObjectById :: DB m => Db.ObjectId -> MaybeT m ByteString +loadObjectById = m' "Q.loadObjectById" Q.loadObjectById + +loadHashByObjectId :: DB m => Db.ObjectId -> MaybeT m H.Hash +loadHashByObjectId = + fmap H.fromBase32Hex + . m' "Q.loadPrimaryHashByObjectId" Q.loadPrimaryHashByObjectId + +decodeTermElement :: Applicative f => Word64 -> ByteString -> MaybeT f (LocalIds, S.Term.Term) +decodeTermElement i = + m' + ("getTermElement: " ++ show i ++ ") fromBytes:") + (fmap pure $ getFromBytes $ S.lookupTermElement i) + +decodeDeclElement :: Applicative f => Word64 -> ByteString -> MaybeT f (LocalIds, S.Decl.Decl Symbol) +decodeDeclElement i = + m' + ("getDeclElement: " ++ show i ++ ") fromBytes:") + (pure . getFromBytes (S.lookupDeclElement i)) + +-- * meat and veggies + +loadTermByReference :: DB m => C.Reference.Id -> MaybeT m (C.Term Symbol) +loadTermByReference (C.Reference.Id h i) = do -- retrieve and deserialize the blob - (localIds, term) <- do - oId <- m' "Q.objectIdByAnyHash" (Q.objectIdByAnyHash . H.toBase32Hex) h - bytes <- m' "Q.loadObjectById" Q.loadObjectById oId - m' - ("getTermElement: " ++ show i ++ ") fromBytes:") - (fmap pure $ getFromBytes $ S.lookupTermElement i) bytes + (localIds, term) <- + hashToObjectId >=> loadObjectById >=> decodeTermElement i $ h -- look up the text and hashes that are used by the term - texts <- traverse (m' "Q.loadTextById" Q.loadTextById) $ LocalIds.textLookup localIds - hashes <- traverse (m' "Q.loadPrimaryHashByObjectId" Q.loadPrimaryHashByObjectId) $ LocalIds.objectLookup localIds + texts <- traverse loadTextById $ LocalIds.textLookup localIds + hashes <- traverse loadHashByObjectId $ LocalIds.objectLookup localIds -- substitute the text and hashes back into the term let substText (S.Term.LocalTextId w) = texts Vector.! fromIntegral w - substHash (S.Term.LocalDefnId w) = H.fromBase32Hex $ hashes Vector.! fromIntegral w + substHash (S.Term.LocalDefnId w) = hashes Vector.! fromIntegral w substTermRef = bimap substText (fmap substHash) substTypeRef = bimap substText substHash substTermLink = bimap substTermRef substTypeRef substTypeLink = substTypeRef pure (C.Term.extraMap substText substTermRef substTypeRef substTermLink substTypeLink id term) -loadTypeOfTermByTermHash :: DB m => C.Reference.Id -> m (Maybe (C.Term.Type Symbol)) -loadTypeOfTermByTermHash r = runMaybeT do +loadTypeOfTermByTermHash :: DB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) +loadTypeOfTermByTermHash r = do -- convert query reference by looking up db ids - let externalToDb = - m' "Q.loadHashId" Q.loadHashId . H.toBase32Hex - >=> m' "Q.objectIdByPrimaryHashId" Q.objectIdByPrimaryHashId - r' <- C.Reference.idH externalToDb r + r' <- C.Reference.idH hashToObjectId r -- load "type of term" blob for the reference bytes <- m' "Q.loadTypeOfTerm" Q.loadTypeOfTerm r' -- deserialize the blob into the type typ <- m' "getTypeFromBytes" (fmap pure $ getFromBytes $ S.getType S.getReference) bytes -- convert the result type by looking up db ids - let dbToExternal = - bitraverse - (m Q.loadTextById) - (fmap H.fromBase32Hex . m Q.loadPrimaryHashByObjectId) - C.Type.rtraverse dbToExternal typ - -loadDeclByHash :: DB m => C.Reference.Id -> m (Maybe (C.Decl Symbol)) -loadDeclByHash (C.Reference.Id h i) = runMaybeT do + C.Type.rtraverse s2cReference typ + +loadDeclByHash :: DB m => C.Reference.Id -> MaybeT m (C.Decl Symbol) +loadDeclByHash (C.Reference.Id h i) = do -- retrieve the blob (localIds, C.Decl.DataDeclaration dt m b ct) <- do - oId <- m' "Q.objectIdByAnyHash" (Q.objectIdByAnyHash . H.toBase32Hex) h - bytes <- m' "Q.loadObjectById" Q.loadObjectById oId - m' ("getDeclElement: " ++ show i ++ ") fromBytes:") (pure . getFromBytes (S.lookupDeclElement i)) bytes + hashToObjectId >=> loadObjectById >=> decodeDeclElement i $ h -- look up the text and hashes that are used by the term - texts <- traverse (m' "Q.loadTextById" Q.loadTextById) $ LocalIds.textLookup localIds - hashes <- traverse (m' "Q.loadPrimaryHashByObjectId" Q.loadPrimaryHashByObjectId) $ LocalIds.objectLookup localIds + texts <- traverse loadTextById $ LocalIds.textLookup localIds + hashes <- traverse loadHashByObjectId $ LocalIds.objectLookup localIds -- substitute the text and hashes back into the term let substText (S.Decl.LocalTextId w) = texts Vector.! fromIntegral w - substHash (S.Decl.LocalTypeId w) = H.fromBase32Hex $ hashes Vector.! fromIntegral w + substHash (S.Decl.LocalTypeId w) = hashes Vector.! fromIntegral w substTypeRef :: S.Decl.TypeRef -> C.Decl.TypeRef substTypeRef = bimap substText (fmap substHash) pure (C.Decl.DataDeclaration dt m b (C.Type.rmap substTypeRef <$> ct)) -- lens might be nice here @@ -138,8 +180,17 @@ termReferentsByPrefix = error "todo" branchHashesByPrefix :: DB m => ShortBranchHash -> m (Set C.Reference.Id) branchHashesByPrefix = error "todo" -dependents :: DB m => C.Reference -> m (Maybe (Set C.Reference.Id)) -dependents = error "todo" +-- | returns a list of known definitions referencing `r` +dependents :: DB m => C.Reference -> MaybeT m (Set C.Reference.Id) +dependents r = do + r' <- c2sReference r + sIds :: [S.Reference.Id] <- Q.getDependentsForDependency r' + -- how will you convert this back to Unison.Reference if you + -- don't know the cycle size? + cIds <- traverse s2cReferenceId sIds + pure $ Set.fromList cIds + +-- * Sync-related dependency queries termDependencies :: DB m => C.Reference.Id -> m (Maybe (Set C.Reference.Id)) termDependencies = error "todo" @@ -147,14 +198,13 @@ termDependencies = error "todo" declDependencies :: DB m => C.Reference.Id -> m (Maybe (Set C.Reference.Id)) declDependencies = error "todo" +-- branchDependencies :: +-- Branch.Hash -> m (Maybe (CausalHash, BD.Dependencies)), +-- -- |the "new" terms and types mentioned in a patch +-- patchDependencies :: EditHash -> m (Set Reference, Set Reference) -- getBranchByAnyHash :: -- getBranchByBranchHash :: DB m => BranchHash -> m (Maybe (Branch m)) -- getBranchByCausalHash :: DB m => CausalHash -> m (Maybe (Branch m)) -- lca :: (forall he e. [Causal m CausalHash he e] -> m (Maybe BranchHash)), - --- branchDependencies :: --- Branch.Hash -> m (Maybe (CausalHash, BD.Dependencies)), --- -- |the "new" terms and types mentioned in a patch --- patchDependencies :: EditHash -> m (Set Reference, Set Reference) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index ebf63291e0..b7726acdeb 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -229,6 +229,14 @@ addToDependentsIndex dependency dependent = execute sql (dependency :. dependent ) VALUES (?, ?, ?, ?, ?) |] +getDependentsForDependency :: DB m => Reference.Reference -> m [Reference.Id] +getDependentsForDependency dependency = query sql dependency where sql = [here| + SELECT FROM dependents_index ( + dependent_object_id, dependent_component_index + ) WHERE dependency_builtin = ? + AND dependency_object_id = ? + AND dependency_component_index = ? +|] -- * helper functions queryList :: (DB f, ToRow q, FromField b) => SQLite.Query -> q -> f [b] diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs index 894794599b..a7971d739a 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs @@ -7,6 +7,7 @@ module U.Codebase.Sqlite.Reference where import U.Codebase.Sqlite.DbId import U.Codebase.Reference (Reference'(ReferenceBuiltin, ReferenceDerived), Id'(Id)) import Database.SQLite.Simple (SQLData(..), Only(..), ToRow(toRow)) +import Database.SQLite.Simple.FromRow (FromRow(fromRow), field) type Reference = Reference' TextId ObjectId type Id = Id' ObjectId @@ -30,3 +31,6 @@ instance ToRow Id where -- | builtinId, hashId, componentIndex toRow = \case Id h i -> toRow (Only h) ++ toRow (Only i) + +instance FromRow Id where + fromRow = Id <$> field <*> field diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 9ba8e93d71..d68cf743bb 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -1,105 +1,263 @@ +{-# LANGUAGE ScopedTypeVariables #-} + module Unison.Codebase.SqliteCodebase where -- initCodebase :: Branch.Cache IO -> FilePath -> IO (Codebase IO Symbol Ann) +import Control.Monad.Reader (ReaderT (runReaderT)) +import qualified Data.ByteString.Short as SBS +import Data.Set (Set) +import Data.Text (Text) +import Database.SQLite.Simple (Connection) +import qualified Database.SQLite.Simple as Sqlite +import System.FilePath (()) +import qualified U.Codebase.Decl as V2.Decl +import qualified U.Codebase.Reference as V2 +import qualified U.Codebase.Reference as V2.Reference +import qualified U.Codebase.Referent as V2 +import qualified U.Codebase.Sqlite.Symbol as V2 +import qualified U.Codebase.Term as V2.Term +import qualified U.Core.ABT as V2.ABT +import qualified U.Util.Hash as V2 +import qualified U.Util.Hash as V2.Hash +import qualified Unison.ABT as V1.ABT +import Unison.Codebase (CodebasePath) import qualified Unison.Codebase as Codebase1 -import qualified Unison.Reference as Reference -import Unison.Term (Term) -import Unison.Symbol (Symbol) -import Unison.Parser (Ann) -import Unison.Type (Type) -import Unison.DataDeclaration (Decl) import Unison.Codebase.Branch (Branch) import qualified Unison.Codebase.Branch as Branch -import Data.Set (Set) -import Unison.Reference (Reference) -import Unison.Codebase.SyncMode (SyncMode) -import qualified Unison.UnisonFile as UF import qualified Unison.Codebase.Reflog as Reflog -import Data.Text (Text) +import Unison.Codebase.ShortBranchHash (ShortBranchHash) +import Unison.Codebase.SyncMode (SyncMode) +import qualified Unison.ConstructorType as CT +import Unison.DataDeclaration (Decl) +import Unison.Hash (Hash) +import qualified Unison.Hash as V1 +import Unison.Parser (Ann) +import qualified Unison.Parser as Ann +import qualified Unison.Pattern as P +import Unison.Prelude (MaybeT (runMaybeT)) +import Unison.Reference (Reference) +import qualified Unison.Reference as Reference +import qualified Unison.Reference as V1 +import qualified Unison.Reference as V1.Reference import qualified Unison.Referent as Referent +import qualified Unison.Referent as V1 import Unison.ShortHash (ShortHash) -import Unison.Codebase.ShortBranchHash (ShortBranchHash) --- import qualified Database.SQLite.Simple as Sqlite - -sqliteCodebase :: FilePath -> IO (Codebase1.Codebase IO Symbol Ann) -sqliteCodebase _root = do - -- c :: Sqlite.Connection <- error "todo" - let - getTerm :: Reference.Id -> IO (Maybe (Term Symbol Ann)) - getTypeOfTermImpl :: Reference.Id -> IO (Maybe (Type Symbol Ann)) - getTypeDeclaration :: Reference.Id -> IO (Maybe (Decl Symbol Ann)) - putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () - putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () - getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) - putRootBranch :: Branch IO -> IO () - rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) - getBranchForHash :: Branch.Hash -> IO (Maybe (Branch IO)) - dependentsImpl :: Reference -> IO (Set Reference.Id) - syncFromDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () - syncToDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () - watches :: UF.WatchKind -> IO [Reference.Id] - getWatch :: UF.WatchKind -> Reference.Id -> IO (Maybe (Term Symbol Ann)) - putWatch :: UF.WatchKind -> Reference.Id -> Term Symbol Ann -> IO () - getReflog :: IO [Reflog.Entry] - appendReflog :: Text -> Branch IO -> Branch IO -> IO () - termsOfTypeImpl :: Reference -> IO (Set Referent.Id) - termsMentioningTypeImpl :: Reference -> IO (Set Referent.Id) - hashLength :: IO Int - termReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) - typeReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) - termReferentsByPrefix :: ShortHash -> IO (Set Referent.Id) - branchHashLength :: IO Int - branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) - - getTerm (Reference.Id _r _i _n) = error "todo" - getTypeOfTermImpl = error "todo" - getTypeDeclaration = error "todo" - putTerm = error "todo" - putTypeDeclaration = error "todo" - getRootBranch = error "todo" - putRootBranch = error "todo" - rootBranchUpdates = error "todo" - getBranchForHash = error "todo" - dependentsImpl = error "todo" - syncFromDirectory = error "todo" - syncToDirectory = error "todo" - watches = error "todo" - getWatch = error "todo" - putWatch = error "todo" - getReflog = error "todo" - appendReflog = error "todo" - termsOfTypeImpl = error "todo" - termsMentioningTypeImpl = error "todo" - hashLength = error "todo" - termReferencesByPrefix = error "todo" - typeReferencesByPrefix = error "todo" - termReferentsByPrefix = error "todo" - branchHashLength = error "todo" - branchHashesByPrefix = error "todo" - pure $ Codebase1.Codebase - getTerm - getTypeOfTermImpl - getTypeDeclaration - putTerm - putTypeDeclaration - getRootBranch - putRootBranch - rootBranchUpdates - getBranchForHash - dependentsImpl - syncFromDirectory - syncToDirectory - watches - getWatch - putWatch - getReflog - appendReflog - termsOfTypeImpl - termsMentioningTypeImpl - hashLength - termReferencesByPrefix - typeReferencesByPrefix - termReferentsByPrefix - branchHashLength - branchHashesByPrefix +import Unison.Symbol (Symbol) +import qualified Unison.Symbol as V1 +import Unison.Term (Term) +import qualified Unison.Term as V1.Term +import Unison.Type (Type) +import qualified Unison.Type as V1.Type +import qualified Unison.UnisonFile as UF +import qualified Unison.Var as Var + +sqliteCodebase :: CodebasePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) +sqliteCodebase root = do + conn :: Sqlite.Connection <- Sqlite.open $ root "v2" "unison.sqlite3" + let getTypeOfTermImpl :: Reference.Id -> IO (Maybe (Type Symbol Ann)) + getTypeDeclaration :: Reference.Id -> IO (Maybe (Decl Symbol Ann)) + putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () + putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () + getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) + putRootBranch :: Branch IO -> IO () + rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) + getBranchForHash :: Branch.Hash -> IO (Maybe (Branch IO)) + dependentsImpl :: Reference -> IO (Set Reference.Id) + syncFromDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () + syncToDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () + watches :: UF.WatchKind -> IO [Reference.Id] + getWatch :: UF.WatchKind -> Reference.Id -> IO (Maybe (Term Symbol Ann)) + putWatch :: UF.WatchKind -> Reference.Id -> Term Symbol Ann -> IO () + getReflog :: IO [Reflog.Entry] + appendReflog :: Text -> Branch IO -> Branch IO -> IO () + termsOfTypeImpl :: Reference -> IO (Set Referent.Id) + termsMentioningTypeImpl :: Reference -> IO (Set Referent.Id) + hashLength :: IO Int + termReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) + typeReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) + termReferentsByPrefix :: ShortHash -> IO (Set Referent.Id) + branchHashLength :: IO Int + branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) + + getTerm :: Reference.Id -> IO (Maybe (Term Symbol Ann)) + getTerm (Reference.Id _h _i _n) = error "todo" + -- runDB . fmap (term2to1 h) $ Ops.loadTermByReference (C.Reference.Id h i) + getTypeOfTermImpl = error "todo" + getTypeDeclaration = error "todo" + putTerm = error "todo" + putTypeDeclaration = error "todo" + getRootBranch = error "todo" + putRootBranch = error "todo" + rootBranchUpdates = error "todo" + getBranchForHash = error "todo" + dependentsImpl = error "todo" + syncFromDirectory = error "todo" + syncToDirectory = error "todo" + watches = error "todo" + getWatch = error "todo" + putWatch = error "todo" + getReflog = error "todo" + appendReflog = error "todo" + termsOfTypeImpl = error "todo" + termsMentioningTypeImpl = error "todo" + hashLength = error "todo" + termReferencesByPrefix = error "todo" + typeReferencesByPrefix = error "todo" + termReferentsByPrefix = error "todo" + branchHashLength = error "todo" + branchHashesByPrefix = error "todo" + let finalizer = Sqlite.close conn + pure $ + ( finalizer, + Codebase1.Codebase + getTerm + getTypeOfTermImpl + getTypeDeclaration + putTerm + putTypeDeclaration + getRootBranch + putRootBranch + rootBranchUpdates + getBranchForHash + dependentsImpl + syncFromDirectory + syncToDirectory + watches + getWatch + putWatch + getReflog + appendReflog + termsOfTypeImpl + termsMentioningTypeImpl + hashLength + termReferencesByPrefix + typeReferencesByPrefix + termReferentsByPrefix + branchHashLength + branchHashesByPrefix + ) + +-- x :: DB m => MaybeT m (Term Symbol) -> MaybeT m (Term Symbol Ann) +-- x = error "not implemented" + +term2to1 :: forall m. Monad m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Term.Term V2.Symbol -> m (V1.Term.Term V1.Symbol Ann) +term2to1 h lookupSize lookupCT tm = + V1.ABT.transformM (termF2to1 h lookupSize lookupCT) + . V1.ABT.vmap symbol2to1 + . V1.ABT.amap (const Ann.External) + $ abt2to1 tm + +symbol2to1 :: V2.Symbol -> V1.Symbol +symbol2to1 (V2.Symbol w t) = V1.Symbol w (Var.User t) + +abt2to1 :: Functor f => V2.ABT.Term f v a -> V1.ABT.Term f v a +abt2to1 (V2.ABT.Term fv a out) = V1.ABT.Term fv a (go out) + where + go = \case + V2.ABT.Cycle body -> V1.ABT.Cycle (abt2to1 body) + V2.ABT.Abs v body -> V1.ABT.Abs v (abt2to1 body) + V2.ABT.Var v -> V1.ABT.Var v + V2.ABT.Tm tm -> V1.ABT.Tm (abt2to1 <$> tm) + +abt1to2 :: Functor f => V1.ABT.Term f v a -> V2.ABT.Term f v a +abt1to2 (V1.ABT.Term fv a out) = V2.ABT.Term fv a (go out) + where + go = \case + V1.ABT.Cycle body -> V2.ABT.Cycle (abt1to2 body) + V1.ABT.Abs v body -> V2.ABT.Abs v (abt1to2 body) + V1.ABT.Var v -> V2.ABT.Var v + V1.ABT.Tm tm -> V2.ABT.Tm (abt1to2 <$> tm) + +rreference2to1 :: Applicative m => Hash -> (Hash -> m V1.Reference.Size) -> V2.Reference' Text (Maybe V2.Hash) -> m V1.Reference +rreference2to1 h lookupSize = \case + V2.ReferenceBuiltin t -> pure $ V1.Reference.Builtin t + V2.ReferenceDerived i -> V1.Reference.DerivedId <$> rreferenceid2to1 h lookupSize i + +rreferenceid2to1 :: Functor m => Hash -> (Hash -> m V1.Reference.Size) -> V2.Reference.Id' (Maybe V2.Hash) -> m V1.Reference.Id +rreferenceid2to1 h lookupSize (V2.Reference.Id oh i) = + V1.Reference.Id h' i <$> lookupSize h' + where + h' = maybe h hash2to1 oh + +reference2to1 :: Applicative m => (Hash -> m V1.Reference.Size) -> V2.Reference -> m V1.Reference +reference2to1 lookupSize = \case + V2.ReferenceBuiltin t -> pure $ V1.Reference.Builtin t + V2.ReferenceDerived i -> V1.Reference.DerivedId <$> referenceid2to1 lookupSize i + +referenceid2to1 :: Functor m => (Hash -> m V1.Reference.Size) -> V2.Reference.Id -> m V1.Reference.Id +referenceid2to1 lookupSize (V2.Reference.Id h i) = + V1.Reference.Id sh i <$> lookupSize sh + where + sh = hash2to1 h + +rreferent2to1 :: Applicative m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.ReferentH -> m V1.Referent +rreferent2to1 h lookupSize lookupCT = \case + V2.Ref r -> V1.Ref <$> rreference2to1 h lookupSize r + V2.Con r i -> V1.Con <$> reference2to1 lookupSize r <*> pure (fromIntegral i) <*> lookupCT r + +hash2to1 :: V2.Hash.Hash -> Hash +hash2to1 (V2.Hash.Hash sbs) = V1.Hash (SBS.fromShort sbs) + +ttype2to1 :: Monad m => (Hash -> m V1.Reference.Size) -> V2.Term.Type V2.Symbol -> m (V1.Type.Type V1.Symbol Ann) +ttype2to1 = undefined + +dtype2to1 :: Monad m => Hash -> (Hash -> m V1.Reference.Size) -> V2.Decl.Type V2.Symbol -> m (V1.Type.Type V1.Symbol Ann) +dtype2to1 = undefined + +termF2to1 :: forall m a. Monad m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Term.F V2.Symbol a -> m (V1.Term.F V1.Symbol Ann Ann a) +termF2to1 h lookupSize lookupCT = go + where + go :: V2.Term.F V2.Symbol a -> m (V1.Term.F V1.Symbol Ann Ann a) + go = \case + V2.Term.Int i -> pure $ V1.Term.Int i + V2.Term.Nat n -> pure $ V1.Term.Nat n + V2.Term.Float d -> pure $ V1.Term.Float d + V2.Term.Boolean b -> pure $ V1.Term.Boolean b + V2.Term.Text t -> pure $ V1.Term.Text t + V2.Term.Char c -> pure $ V1.Term.Char c + V2.Term.Ref r -> V1.Term.Ref <$> rreference2to1 h lookupSize r + V2.Term.Constructor r i -> + V1.Term.Constructor <$> reference2to1 lookupSize r <*> pure (fromIntegral i) + V2.Term.Request r i -> + V1.Term.Request <$> reference2to1 lookupSize r <*> pure (fromIntegral i) + V2.Term.Handle a a4 -> pure $ V1.Term.Handle a a4 + V2.Term.App a a4 -> pure $ V1.Term.App a a4 + V2.Term.Ann a t2 -> V1.Term.Ann a <$> ttype2to1 lookupSize t2 + V2.Term.Sequence sa -> pure $ V1.Term.Sequence sa + V2.Term.If a a4 a5 -> pure $ V1.Term.If a a4 a5 + V2.Term.And a a4 -> pure $ V1.Term.And a a4 + V2.Term.Or a a4 -> pure $ V1.Term.Or a a4 + V2.Term.Lam a -> pure $ V1.Term.Lam a + V2.Term.LetRec as a -> pure $ V1.Term.LetRec False as a + V2.Term.Let a a4 -> pure $ V1.Term.Let False a a4 + V2.Term.Match a cases -> V1.Term.Match a <$> traverse goCase cases + V2.Term.TermLink rr -> V1.Term.TermLink <$> rreferent2to1 h lookupSize lookupCT rr + V2.Term.TypeLink r -> V1.Term.TypeLink <$> reference2to1 lookupSize r + goCase = \case + V2.Term.MatchCase pat cond body -> + V1.Term.MatchCase <$> (goPat pat) <*> pure cond <*> pure body + goPat = \case + V2.Term.PUnbound -> pure $ P.Unbound a + V2.Term.PVar -> pure $ P.Var a + V2.Term.PBoolean b -> pure $ P.Boolean a b + V2.Term.PInt i -> pure $ P.Int a i + V2.Term.PNat n -> pure $ P.Nat a n + V2.Term.PFloat d -> pure $ P.Float a d + V2.Term.PText t -> pure $ P.Text a t + V2.Term.PChar c -> pure $ P.Char a c + V2.Term.PConstructor r i ps -> + P.Constructor a <$> reference2to1 lookupSize r <*> pure i <*> (traverse goPat ps) + V2.Term.PAs p -> P.As a <$> goPat p + V2.Term.PEffectPure p -> P.EffectPure a <$> goPat p + V2.Term.PEffectBind r i ps p -> P.EffectBind a <$> reference2to1 lookupSize r <*> pure i <*> traverse goPat ps <*> goPat p + V2.Term.PSequenceLiteral ps -> P.SequenceLiteral a <$> traverse goPat ps + V2.Term.PSequenceOp p1 op p2 -> P.SequenceOp a <$> goPat p1 <*> pure (goOp op) <*> goPat p2 + goOp = \case + V2.Term.PCons -> P.Cons + V2.Term.PSnoc -> P.Snoc + V2.Term.PConcat -> P.Concat + a = Ann.External + +runDB :: Connection -> MaybeT (ReaderT Connection IO) a -> IO (Maybe a) +runDB conn action = flip runReaderT conn $ runMaybeT action diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index a40e374452..32234f075d 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -246,7 +246,8 @@ library -- v2 unison-core, unison-codebase, - unison-codebase-sqlite + unison-codebase-sqlite, + unison-util ghc-options: -Wall -O0 -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures diff --git a/stack.yaml b/stack.yaml index a1605871ee..ec8fdef831 100644 --- a/stack.yaml +++ b/stack.yaml @@ -42,4 +42,4 @@ extra-deps: ghc-options: # All packages - "$locals": -haddock -Wall -Wno-name-shadowing -Werror -Wno-type-defaults -Wno-missing-pattern-synonym-signatures #-freverse-errors + "$locals": -haddock -Wall -Wno-name-shadowing -Werror -Wno-type-defaults -Wno-missing-pattern-synonym-signatures -fprint-expanded-synonyms #-freverse-errors diff --git a/unison-core/src/Unison/ABT.hs b/unison-core/src/Unison/ABT.hs index a4219f1d5f..1e1cfb4b46 100644 --- a/unison-core/src/Unison/ABT.hs +++ b/unison-core/src/Unison/ABT.hs @@ -495,6 +495,14 @@ transform f tm = case out tm of in tm' (annotation tm) (f subterms') Cycle body -> cycle' (annotation tm) (transform f body) +transformM :: (Ord v, Monad m, Traversable g) + => (forall a. f a -> m (g a)) -> Term f v a -> m (Term g v a) +transformM f t = case out t of + Var v -> pure $ annotatedVar (annotation t) v + Abs v body -> abs' (annotation t) v <$> (transformM f body) + Tm subterms -> tm' (annotation t) <$> (traverse (transformM f) =<< f subterms) + Cycle body -> cycle' (annotation t) <$> (transformM f body) + -- Rebuild the tree annotations upward, starting from the leaves, -- using the Monoid to choose the annotation at intermediate nodes reannotateUp :: (Ord v, Foldable f, Functor f, Monoid b) diff --git a/unison-core/src/Unison/Hash.hs b/unison-core/src/Unison/Hash.hs index d6738540a3..1ad972ad79 100644 --- a/unison-core/src/Unison/Hash.hs +++ b/unison-core/src/Unison/Hash.hs @@ -1,7 +1,7 @@ {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE OverloadedStrings #-} -module Unison.Hash (Hash, toBytes, base32Hex, base32Hexs, fromBase32Hex, fromBytes, unsafeFromBase32Hex, showBase32Hex, validBase32HexChars) where +module Unison.Hash (Hash(Hash), toBytes, base32Hex, base32Hexs, fromBase32Hex, fromBytes, unsafeFromBase32Hex, showBase32Hex, validBase32HexChars) where import Unison.Prelude @@ -106,4 +106,3 @@ fromBytes = Hash showBase32Hex :: H.Hashable t => t -> String showBase32Hex = base32Hexs . H.accumulate' - diff --git a/unison-core/src/Unison/Reference.hs b/unison-core/src/Unison/Reference.hs index 9515b6248d..576aa3850c 100644 --- a/unison-core/src/Unison/Reference.hs +++ b/unison-core/src/Unison/Reference.hs @@ -9,6 +9,7 @@ module Unison.Reference pattern Derived, pattern DerivedId, Id(..), + Size, derivedBase32Hex, Component, members, components, From b1504fa97061e8009ee7d02c7d51f8be8f4515e8 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 22 Oct 2020 14:05:07 -0400 Subject: [PATCH 028/225] wip --- .../U/Codebase/Sqlite/Operations.hs | 21 +- codebase2/codebase/U/Codebase/Decl.hs | 1 + codebase2/core/U/Core/ABT.hs | 8 + .../U/Util/Serialization.hs | 4 + .../src/Unison/Codebase/SqliteCodebase.hs | 167 ++----------- .../Codebase/SqliteCodebase/Conversions.hs | 233 ++++++++++++++++++ .../unison-parser-typechecker.cabal | 1 + 7 files changed, 288 insertions(+), 147 deletions(-) create mode 100644 parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 7311239e84..48a3539727 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -40,6 +40,7 @@ import U.Codebase.WatchKind (WatchKind) import U.Util.Base32Hex (Base32Hex) import qualified U.Util.Hash as H import U.Util.Serialization (getFromBytes) +import qualified U.Util.Serialization as S loadTermComponentByHash :: DB m => Base32Hex -> m (Maybe [C.Term Symbol]) loadTermComponentByHash = error "todo" @@ -86,6 +87,10 @@ loadHashByObjectId = fmap H.fromBase32Hex . m' "Q.loadPrimaryHashByObjectId" Q.loadPrimaryHashByObjectId +decodeComponentLengthOnly :: Applicative f => ByteString -> MaybeT f Word64 +decodeComponentLengthOnly = m' "decodeComponentLengthOnly" + (fmap pure $ getFromBytes S.lengthFramedArray) + decodeTermElement :: Applicative f => Word64 -> ByteString -> MaybeT f (LocalIds, S.Term.Term) decodeTermElement i = m' @@ -98,6 +103,14 @@ decodeDeclElement i = ("getDeclElement: " ++ show i ++ ") fromBytes:") (pure . getFromBytes (S.lookupDeclElement i)) +-- * legacy conversion helpers +getCycleLen :: DB m => H.Hash -> MaybeT m Word64 +getCycleLen h = fmap fromIntegral $ + hashToObjectId >=> loadObjectById >=> decodeComponentLengthOnly $ h + +getDeclTypeByReference :: DB m => C.Reference.Id -> MaybeT m C.Decl.DeclType +getDeclTypeByReference = fmap C.Decl.declType . loadDeclByReference + -- * meat and veggies loadTermByReference :: DB m => C.Reference.Id -> MaybeT m (C.Term Symbol) @@ -119,8 +132,8 @@ loadTermByReference (C.Reference.Id h i) = do substTypeLink = substTypeRef pure (C.Term.extraMap substText substTermRef substTypeRef substTermLink substTypeLink id term) -loadTypeOfTermByTermHash :: DB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) -loadTypeOfTermByTermHash r = do +loadTypeOfTermByTermReference :: DB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) +loadTypeOfTermByTermReference r = do -- convert query reference by looking up db ids r' <- C.Reference.idH hashToObjectId r -- load "type of term" blob for the reference @@ -130,8 +143,8 @@ loadTypeOfTermByTermHash r = do -- convert the result type by looking up db ids C.Type.rtraverse s2cReference typ -loadDeclByHash :: DB m => C.Reference.Id -> MaybeT m (C.Decl Symbol) -loadDeclByHash (C.Reference.Id h i) = do +loadDeclByReference :: DB m => C.Reference.Id -> MaybeT m (C.Decl Symbol) +loadDeclByReference (C.Reference.Id h i) = do -- retrieve the blob (localIds, C.Decl.DataDeclaration dt m b ct) <- do hashToObjectId >=> loadObjectById >=> decodeDeclElement i $ h diff --git a/codebase2/codebase/U/Codebase/Decl.hs b/codebase2/codebase/U/Codebase/Decl.hs index 349269b915..6f0df95cb1 100644 --- a/codebase2/codebase/U/Codebase/Decl.hs +++ b/codebase2/codebase/U/Codebase/Decl.hs @@ -21,6 +21,7 @@ data DeclType = Data | Effect type Decl v = DeclR TypeRef v type TypeRef = Reference' Text (Maybe Hash) +type Type v = TypeR TypeRef v data Modifier = Structural | Unique Text deriving (Eq, Ord, Show) diff --git a/codebase2/core/U/Core/ABT.hs b/codebase2/core/U/Core/ABT.hs index e9148d5375..07cafb74d9 100644 --- a/codebase2/core/U/Core/ABT.hs +++ b/codebase2/core/U/Core/ABT.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE BlockArguments #-} {-# LANGUAGE LambdaCase #-} -- Based on: http://semantic-domain.blogspot.com/2015/03/abstract-binding-trees.html {-# LANGUAGE DeriveFoldable #-} @@ -37,6 +38,13 @@ data ABT f v r data Term f v a = Term { freeVars :: Set v, annotation :: a, out :: ABT f v (Term f v a) } deriving (Functor, Foldable, Traversable) +amap :: (Functor f, Foldable f) => (a -> a') -> Term f v a -> Term f v a' +amap f (Term fv a out) = Term fv (f a) $ case out of + Var v -> Var v + Tm fa -> Tm (amap f <$> fa) + Cycle r -> Cycle (amap f r) + Abs v body -> Abs v (amap f body) + vmap :: (Functor f, Foldable f, Ord v') => (v -> v') -> Term f v a -> Term f v' a vmap f (Term _ a out) = case out of Var v -> var a (f v) diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index a9c58bcb2b..96d62f7964 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -35,6 +35,7 @@ import Prelude hiding (readFile, writeFile) import Data.Map (Map) import qualified Data.Map as Map import Control.Applicative (Applicative(liftA2)) +import GHC.Word (Word64) type Get a = forall m. MonadGet m => m a @@ -194,6 +195,9 @@ lookupFramedArray getA index = do skip (Vector.unsafeIndex offsets index) Just <$> getA +lengthFramedArray :: MonadGet m => m Word64 +lengthFramedArray = getVarInt + unsafeFramedArrayLookup :: MonadGet m => m a -> Int -> m a unsafeFramedArrayLookup getA index = do offsets <- getVector getVarInt diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index d68cf743bb..80cedd8363 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE ViewPatterns #-} {-# LANGUAGE ScopedTypeVariables #-} module Unison.Codebase.SqliteCodebase where @@ -5,52 +6,38 @@ module Unison.Codebase.SqliteCodebase where -- initCodebase :: Branch.Cache IO -> FilePath -> IO (Codebase IO Symbol Ann) import Control.Monad.Reader (ReaderT (runReaderT)) -import qualified Data.ByteString.Short as SBS import Data.Set (Set) import Data.Text (Text) import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as Sqlite import System.FilePath (()) -import qualified U.Codebase.Decl as V2.Decl -import qualified U.Codebase.Reference as V2 -import qualified U.Codebase.Reference as V2.Reference -import qualified U.Codebase.Referent as V2 -import qualified U.Codebase.Sqlite.Symbol as V2 -import qualified U.Codebase.Term as V2.Term -import qualified U.Core.ABT as V2.ABT -import qualified U.Util.Hash as V2 -import qualified U.Util.Hash as V2.Hash -import qualified Unison.ABT as V1.ABT +import qualified U.Codebase.Reference as C.Reference +import qualified U.Codebase.Sqlite.Operations as Ops import Unison.Codebase (CodebasePath) import qualified Unison.Codebase as Codebase1 import Unison.Codebase.Branch (Branch) import qualified Unison.Codebase.Branch as Branch import qualified Unison.Codebase.Reflog as Reflog import Unison.Codebase.ShortBranchHash (ShortBranchHash) +import Unison.Codebase.SqliteCodebase.Conversions (decltype2to1, hash1to2, term2to1) import Unison.Codebase.SyncMode (SyncMode) -import qualified Unison.ConstructorType as CT import Unison.DataDeclaration (Decl) -import Unison.Hash (Hash) -import qualified Unison.Hash as V1 import Unison.Parser (Ann) -import qualified Unison.Parser as Ann -import qualified Unison.Pattern as P import Unison.Prelude (MaybeT (runMaybeT)) import Unison.Reference (Reference) import qualified Unison.Reference as Reference -import qualified Unison.Reference as V1 -import qualified Unison.Reference as V1.Reference import qualified Unison.Referent as Referent -import qualified Unison.Referent as V1 import Unison.ShortHash (ShortHash) import Unison.Symbol (Symbol) -import qualified Unison.Symbol as V1 import Unison.Term (Term) -import qualified Unison.Term as V1.Term import Unison.Type (Type) -import qualified Unison.Type as V1.Type import qualified Unison.UnisonFile as UF -import qualified Unison.Var as Var +import Unison.Hash (Hash) +import qualified Unison.ConstructorType as CT +import qualified Unison.Builtin as Builtins +import qualified Data.Map as Map +import Control.Monad.Trans.Maybe (MaybeT(MaybeT)) +import U.Codebase.Sqlite.Queries (DB) sqliteCodebase :: CodebasePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) sqliteCodebase root = do @@ -81,8 +68,20 @@ sqliteCodebase root = do branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) getTerm :: Reference.Id -> IO (Maybe (Term Symbol Ann)) - getTerm (Reference.Id _h _i _n) = error "todo" - -- runDB . fmap (term2to1 h) $ Ops.loadTermByReference (C.Reference.Id h i) + getTerm (Reference.Id h1@(hash1to2 -> h2) i _n) = + runDB conn $ do + term2 <- Ops.loadTermByReference (C.Reference.Id h2 i) + term2to1 h1 getCycleLen getDeclType term2 + + getCycleLen :: DB m => Hash -> MaybeT m Reference.Size + getCycleLen = Ops.getCycleLen . hash1to2 + getDeclType :: DB m => C.Reference.Reference -> MaybeT m CT.ConstructorType + getDeclType = \case + C.Reference.ReferenceBuiltin t -> + MaybeT (pure $ Map.lookup (Reference.Builtin t) Builtins.builtinConstructorType) + C.Reference.ReferenceDerived i -> getDeclTypeById i + getDeclTypeById :: DB m => C.Reference.Id -> MaybeT m CT.ConstructorType + getDeclTypeById = fmap decltype2to1 . Ops.getDeclTypeByReference getTypeOfTermImpl = error "todo" getTypeDeclaration = error "todo" putTerm = error "todo" @@ -141,123 +140,5 @@ sqliteCodebase root = do -- x :: DB m => MaybeT m (Term Symbol) -> MaybeT m (Term Symbol Ann) -- x = error "not implemented" -term2to1 :: forall m. Monad m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Term.Term V2.Symbol -> m (V1.Term.Term V1.Symbol Ann) -term2to1 h lookupSize lookupCT tm = - V1.ABT.transformM (termF2to1 h lookupSize lookupCT) - . V1.ABT.vmap symbol2to1 - . V1.ABT.amap (const Ann.External) - $ abt2to1 tm - -symbol2to1 :: V2.Symbol -> V1.Symbol -symbol2to1 (V2.Symbol w t) = V1.Symbol w (Var.User t) - -abt2to1 :: Functor f => V2.ABT.Term f v a -> V1.ABT.Term f v a -abt2to1 (V2.ABT.Term fv a out) = V1.ABT.Term fv a (go out) - where - go = \case - V2.ABT.Cycle body -> V1.ABT.Cycle (abt2to1 body) - V2.ABT.Abs v body -> V1.ABT.Abs v (abt2to1 body) - V2.ABT.Var v -> V1.ABT.Var v - V2.ABT.Tm tm -> V1.ABT.Tm (abt2to1 <$> tm) - -abt1to2 :: Functor f => V1.ABT.Term f v a -> V2.ABT.Term f v a -abt1to2 (V1.ABT.Term fv a out) = V2.ABT.Term fv a (go out) - where - go = \case - V1.ABT.Cycle body -> V2.ABT.Cycle (abt1to2 body) - V1.ABT.Abs v body -> V2.ABT.Abs v (abt1to2 body) - V1.ABT.Var v -> V2.ABT.Var v - V1.ABT.Tm tm -> V2.ABT.Tm (abt1to2 <$> tm) - -rreference2to1 :: Applicative m => Hash -> (Hash -> m V1.Reference.Size) -> V2.Reference' Text (Maybe V2.Hash) -> m V1.Reference -rreference2to1 h lookupSize = \case - V2.ReferenceBuiltin t -> pure $ V1.Reference.Builtin t - V2.ReferenceDerived i -> V1.Reference.DerivedId <$> rreferenceid2to1 h lookupSize i - -rreferenceid2to1 :: Functor m => Hash -> (Hash -> m V1.Reference.Size) -> V2.Reference.Id' (Maybe V2.Hash) -> m V1.Reference.Id -rreferenceid2to1 h lookupSize (V2.Reference.Id oh i) = - V1.Reference.Id h' i <$> lookupSize h' - where - h' = maybe h hash2to1 oh - -reference2to1 :: Applicative m => (Hash -> m V1.Reference.Size) -> V2.Reference -> m V1.Reference -reference2to1 lookupSize = \case - V2.ReferenceBuiltin t -> pure $ V1.Reference.Builtin t - V2.ReferenceDerived i -> V1.Reference.DerivedId <$> referenceid2to1 lookupSize i - -referenceid2to1 :: Functor m => (Hash -> m V1.Reference.Size) -> V2.Reference.Id -> m V1.Reference.Id -referenceid2to1 lookupSize (V2.Reference.Id h i) = - V1.Reference.Id sh i <$> lookupSize sh - where - sh = hash2to1 h - -rreferent2to1 :: Applicative m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.ReferentH -> m V1.Referent -rreferent2to1 h lookupSize lookupCT = \case - V2.Ref r -> V1.Ref <$> rreference2to1 h lookupSize r - V2.Con r i -> V1.Con <$> reference2to1 lookupSize r <*> pure (fromIntegral i) <*> lookupCT r - -hash2to1 :: V2.Hash.Hash -> Hash -hash2to1 (V2.Hash.Hash sbs) = V1.Hash (SBS.fromShort sbs) - -ttype2to1 :: Monad m => (Hash -> m V1.Reference.Size) -> V2.Term.Type V2.Symbol -> m (V1.Type.Type V1.Symbol Ann) -ttype2to1 = undefined - -dtype2to1 :: Monad m => Hash -> (Hash -> m V1.Reference.Size) -> V2.Decl.Type V2.Symbol -> m (V1.Type.Type V1.Symbol Ann) -dtype2to1 = undefined - -termF2to1 :: forall m a. Monad m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Term.F V2.Symbol a -> m (V1.Term.F V1.Symbol Ann Ann a) -termF2to1 h lookupSize lookupCT = go - where - go :: V2.Term.F V2.Symbol a -> m (V1.Term.F V1.Symbol Ann Ann a) - go = \case - V2.Term.Int i -> pure $ V1.Term.Int i - V2.Term.Nat n -> pure $ V1.Term.Nat n - V2.Term.Float d -> pure $ V1.Term.Float d - V2.Term.Boolean b -> pure $ V1.Term.Boolean b - V2.Term.Text t -> pure $ V1.Term.Text t - V2.Term.Char c -> pure $ V1.Term.Char c - V2.Term.Ref r -> V1.Term.Ref <$> rreference2to1 h lookupSize r - V2.Term.Constructor r i -> - V1.Term.Constructor <$> reference2to1 lookupSize r <*> pure (fromIntegral i) - V2.Term.Request r i -> - V1.Term.Request <$> reference2to1 lookupSize r <*> pure (fromIntegral i) - V2.Term.Handle a a4 -> pure $ V1.Term.Handle a a4 - V2.Term.App a a4 -> pure $ V1.Term.App a a4 - V2.Term.Ann a t2 -> V1.Term.Ann a <$> ttype2to1 lookupSize t2 - V2.Term.Sequence sa -> pure $ V1.Term.Sequence sa - V2.Term.If a a4 a5 -> pure $ V1.Term.If a a4 a5 - V2.Term.And a a4 -> pure $ V1.Term.And a a4 - V2.Term.Or a a4 -> pure $ V1.Term.Or a a4 - V2.Term.Lam a -> pure $ V1.Term.Lam a - V2.Term.LetRec as a -> pure $ V1.Term.LetRec False as a - V2.Term.Let a a4 -> pure $ V1.Term.Let False a a4 - V2.Term.Match a cases -> V1.Term.Match a <$> traverse goCase cases - V2.Term.TermLink rr -> V1.Term.TermLink <$> rreferent2to1 h lookupSize lookupCT rr - V2.Term.TypeLink r -> V1.Term.TypeLink <$> reference2to1 lookupSize r - goCase = \case - V2.Term.MatchCase pat cond body -> - V1.Term.MatchCase <$> (goPat pat) <*> pure cond <*> pure body - goPat = \case - V2.Term.PUnbound -> pure $ P.Unbound a - V2.Term.PVar -> pure $ P.Var a - V2.Term.PBoolean b -> pure $ P.Boolean a b - V2.Term.PInt i -> pure $ P.Int a i - V2.Term.PNat n -> pure $ P.Nat a n - V2.Term.PFloat d -> pure $ P.Float a d - V2.Term.PText t -> pure $ P.Text a t - V2.Term.PChar c -> pure $ P.Char a c - V2.Term.PConstructor r i ps -> - P.Constructor a <$> reference2to1 lookupSize r <*> pure i <*> (traverse goPat ps) - V2.Term.PAs p -> P.As a <$> goPat p - V2.Term.PEffectPure p -> P.EffectPure a <$> goPat p - V2.Term.PEffectBind r i ps p -> P.EffectBind a <$> reference2to1 lookupSize r <*> pure i <*> traverse goPat ps <*> goPat p - V2.Term.PSequenceLiteral ps -> P.SequenceLiteral a <$> traverse goPat ps - V2.Term.PSequenceOp p1 op p2 -> P.SequenceOp a <$> goPat p1 <*> pure (goOp op) <*> goPat p2 - goOp = \case - V2.Term.PCons -> P.Cons - V2.Term.PSnoc -> P.Snoc - V2.Term.PConcat -> P.Concat - a = Ann.External - runDB :: Connection -> MaybeT (ReaderT Connection IO) a -> IO (Maybe a) runDB conn action = flip runReaderT conn $ runMaybeT action diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs new file mode 100644 index 0000000000..8d81ac65fe --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -0,0 +1,233 @@ +module Unison.Codebase.SqliteCodebase.Conversions where + +import qualified Data.ByteString.Short as SBS +import Data.Text (Text) +import qualified U.Codebase.Decl as V2.Decl +import qualified U.Codebase.Kind as V2.Kind +import qualified U.Codebase.Reference as V2 +import qualified U.Codebase.Reference as V2.Reference +import qualified U.Codebase.Referent as V2 +import qualified U.Codebase.Sqlite.Symbol as V2 +import qualified U.Codebase.Term as V2.Term +import qualified U.Codebase.Type as V2.Type +import qualified U.Core.ABT as V2.ABT +import qualified U.Util.Hash as V2 +import qualified U.Util.Hash as V2.Hash +import qualified Unison.ABT as V1.ABT +import qualified Unison.ConstructorType as CT +import Unison.Hash (Hash) +import qualified Unison.Hash as V1 +import qualified Unison.Kind as V1.Kind +import Unison.Parser (Ann) +import qualified Unison.Parser as Ann +import qualified Unison.Pattern as P +import qualified Unison.Reference as V1 +import qualified Unison.Reference as V1.Reference +import qualified Unison.Referent as V1 +import qualified Unison.Symbol as V1 +import qualified Unison.Term as V1.Term +import qualified Unison.Type as V1.Type +import qualified Unison.Var as Var + +decltype2to1 :: V2.Decl.DeclType -> CT.ConstructorType +decltype2to1 = \case + V2.Decl.Data -> CT.Data + V2.Decl.Effect -> CT.Effect + +decltype1to2 :: CT.ConstructorType -> V2.Decl.DeclType +decltype1to2 = \case + CT.Data -> V2.Decl.Data + CT.Effect -> V2.Decl.Effect + +term2to1 :: forall m. Monad m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Term.Term V2.Symbol -> m (V1.Term.Term V1.Symbol Ann) +term2to1 h lookupSize lookupCT tm = + V1.ABT.transformM (termF2to1 h lookupSize lookupCT) + . V1.ABT.vmap symbol2to1 + . V1.ABT.amap (const Ann.External) + $ abt2to1 tm + +symbol2to1 :: V2.Symbol -> V1.Symbol +symbol2to1 (V2.Symbol i t) = V1.Symbol i (Var.User t) + +symbol1to2 :: V1.Symbol -> V2.Symbol +symbol1to2 (V1.Symbol i (Var.User t)) = V2.Symbol i t +symbol1to2 x = error $ "unimplemented: symbol1to2 " ++ show x + +abt2to1 :: Functor f => V2.ABT.Term f v a -> V1.ABT.Term f v a +abt2to1 (V2.ABT.Term fv a out) = V1.ABT.Term fv a (go out) + where + go = \case + V2.ABT.Cycle body -> V1.ABT.Cycle (abt2to1 body) + V2.ABT.Abs v body -> V1.ABT.Abs v (abt2to1 body) + V2.ABT.Var v -> V1.ABT.Var v + V2.ABT.Tm tm -> V1.ABT.Tm (abt2to1 <$> tm) + +abt1to2 :: Functor f => V1.ABT.Term f v a -> V2.ABT.Term f v a +abt1to2 (V1.ABT.Term fv a out) = V2.ABT.Term fv a (go out) + where + go = \case + V1.ABT.Cycle body -> V2.ABT.Cycle (abt1to2 body) + V1.ABT.Abs v body -> V2.ABT.Abs v (abt1to2 body) + V1.ABT.Var v -> V2.ABT.Var v + V1.ABT.Tm tm -> V2.ABT.Tm (abt1to2 <$> tm) + +rreference2to1 :: Applicative m => Hash -> (Hash -> m V1.Reference.Size) -> V2.Reference' Text (Maybe V2.Hash) -> m V1.Reference +rreference2to1 h lookupSize = \case + V2.ReferenceBuiltin t -> pure $ V1.Reference.Builtin t + V2.ReferenceDerived i -> V1.Reference.DerivedId <$> rreferenceid2to1 h lookupSize i + +rreference1to2 :: Hash -> V1.Reference -> V2.Reference' Text (Maybe V2.Hash) +rreference1to2 h = \case + V1.Reference.Builtin t -> V2.ReferenceBuiltin t + V1.Reference.DerivedId i -> V2.ReferenceDerived (rreferenceid1to2 h i) + +rreferenceid2to1 :: Functor m => Hash -> (Hash -> m V1.Reference.Size) -> V2.Reference.Id' (Maybe V2.Hash) -> m V1.Reference.Id +rreferenceid2to1 h lookupSize (V2.Reference.Id oh i) = + V1.Reference.Id h' i <$> lookupSize h' + where + h' = maybe h hash2to1 oh + +rreferenceid1to2 :: Hash -> V1.Reference.Id -> V2.Reference.Id' (Maybe V2.Hash) +rreferenceid1to2 h (V1.Reference.Id h' i _n) = V2.Reference.Id oh i + where + oh = if h == h' then Nothing else Just (hash1to2 h') + +hash1to2 :: Hash -> V2.Hash +hash1to2 (V1.Hash bs) = V2.Hash.Hash (SBS.toShort bs) + +reference2to1 :: Applicative m => (Hash -> m V1.Reference.Size) -> V2.Reference -> m V1.Reference +reference2to1 lookupSize = \case + V2.ReferenceBuiltin t -> pure $ V1.Reference.Builtin t + V2.ReferenceDerived i -> V1.Reference.DerivedId <$> referenceid2to1 lookupSize i + +reference1to2 :: V1.Reference -> V2.Reference +reference1to2 = \case + V1.Reference.Builtin t -> V2.ReferenceBuiltin t + V1.Reference.DerivedId i -> V2.ReferenceDerived (referenceid1to2 i) + +referenceid1to2 :: V1.Reference.Id -> V2.Reference.Id +referenceid1to2 (V1.Reference.Id h i _n) = V2.Reference.Id (hash1to2 h) i + +referenceid2to1 :: Functor m => (Hash -> m V1.Reference.Size) -> V2.Reference.Id -> m V1.Reference.Id +referenceid2to1 lookupSize (V2.Reference.Id h i) = + V1.Reference.Id sh i <$> lookupSize sh + where + sh = hash2to1 h + +rreferent2to1 :: Applicative m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.ReferentH -> m V1.Referent +rreferent2to1 h lookupSize lookupCT = \case + V2.Ref r -> V1.Ref <$> rreference2to1 h lookupSize r + V2.Con r i -> V1.Con <$> reference2to1 lookupSize r <*> pure (fromIntegral i) <*> lookupCT r + +rreferent1to2 :: Hash -> V1.Referent -> V2.ReferentH +rreferent1to2 h = \case + V1.Ref r -> V2.Ref (rreference1to2 h r) + V1.Con r i _ct -> V2.Con (reference1to2 r) (fromIntegral i) + +hash2to1 :: V2.Hash.Hash -> Hash +hash2to1 (V2.Hash.Hash sbs) = V1.Hash (SBS.fromShort sbs) + +ttype2to1 :: Monad m => (Hash -> m V1.Reference.Size) -> V2.Term.Type V2.Symbol -> m (V1.Type.Type V1.Symbol Ann) +ttype2to1 lookupSize = type2to1' (reference2to1 lookupSize) + +dtype2to1 :: Monad m => Hash -> (Hash -> m V1.Reference.Size) -> V2.Decl.Type V2.Symbol -> m (V1.Type.Type V1.Symbol Ann) +dtype2to1 h lookupSize = type2to1' (rreference2to1 h lookupSize) + +type2to1' :: Monad m => (r -> m V1.Reference) -> V2.Type.TypeR r V2.Symbol -> m (V1.Type.Type V1.Symbol Ann) +type2to1' convertRef = + V1.ABT.transformM (typeF2to1 convertRef) + . V1.ABT.vmap symbol2to1 + . V1.ABT.amap (const Ann.External) + . abt2to1 + where + typeF2to1 :: Applicative m => (r -> m V1.Reference) -> V2.Type.F' r a -> m (V1.Type.F a) + typeF2to1 convertRef = \case + V2.Type.Ref r -> V1.Type.Ref <$> convertRef r + V2.Type.Arrow i o -> pure $ V1.Type.Arrow i o + V2.Type.Ann a k -> pure $ V1.Type.Ann a (convertKind k) + V2.Type.App f x -> pure $ V1.Type.App f x + V2.Type.Effect e b -> pure $ V1.Type.Effect e b + V2.Type.Effects as -> pure $ V1.Type.Effects as + V2.Type.Forall a -> pure $ V1.Type.Forall a + V2.Type.IntroOuter a -> pure $ V1.Type.IntroOuter a + where + convertKind = \case + V2.Kind.Star -> V1.Kind.Star + V2.Kind.Arrow i o -> V1.Kind.Arrow (convertKind i) (convertKind o) + +type1to2' :: (V1.Reference -> r) -> V1.Type.Type V1.Symbol a -> V2.Type.TypeR r V2.Symbol +type1to2' convertRef = + V2.ABT.transform (typeF1to2' convertRef) + . V2.ABT.vmap symbol1to2 + . V2.ABT.amap (const ()) + . abt1to2 + +typeF1to2' :: (V1.Reference -> r) -> V1.Type.F a -> V2.Type.F' r a +typeF1to2' convertRef = \case + V1.Type.Ref r -> V2.Type.Ref (convertRef r) + V1.Type.Arrow i o -> V2.Type.Arrow i o + V1.Type.Ann a k -> V2.Type.Ann a (convertKind k) + V1.Type.App f x -> V2.Type.App f x + V1.Type.Effect e b -> V2.Type.Effect e b + V1.Type.Effects as -> V2.Type.Effects as + V1.Type.Forall a -> V2.Type.Forall a + V1.Type.IntroOuter a -> V2.Type.IntroOuter a + where + convertKind = \case + V1.Kind.Star -> V2.Kind.Star + V1.Kind.Arrow i o -> V2.Kind.Arrow (convertKind i) (convertKind o) + + +termF2to1 :: forall m a. Monad m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Term.F V2.Symbol a -> m (V1.Term.F V1.Symbol Ann Ann a) +termF2to1 h lookupSize lookupCT = go + where + go :: V2.Term.F V2.Symbol a -> m (V1.Term.F V1.Symbol Ann Ann a) + go = \case + V2.Term.Int i -> pure $ V1.Term.Int i + V2.Term.Nat n -> pure $ V1.Term.Nat n + V2.Term.Float d -> pure $ V1.Term.Float d + V2.Term.Boolean b -> pure $ V1.Term.Boolean b + V2.Term.Text t -> pure $ V1.Term.Text t + V2.Term.Char c -> pure $ V1.Term.Char c + V2.Term.Ref r -> V1.Term.Ref <$> rreference2to1 h lookupSize r + V2.Term.Constructor r i -> + V1.Term.Constructor <$> reference2to1 lookupSize r <*> pure (fromIntegral i) + V2.Term.Request r i -> + V1.Term.Request <$> reference2to1 lookupSize r <*> pure (fromIntegral i) + V2.Term.Handle a a4 -> pure $ V1.Term.Handle a a4 + V2.Term.App a a4 -> pure $ V1.Term.App a a4 + V2.Term.Ann a t2 -> V1.Term.Ann a <$> ttype2to1 lookupSize t2 + V2.Term.Sequence sa -> pure $ V1.Term.Sequence sa + V2.Term.If a a4 a5 -> pure $ V1.Term.If a a4 a5 + V2.Term.And a a4 -> pure $ V1.Term.And a a4 + V2.Term.Or a a4 -> pure $ V1.Term.Or a a4 + V2.Term.Lam a -> pure $ V1.Term.Lam a + V2.Term.LetRec as a -> pure $ V1.Term.LetRec False as a + V2.Term.Let a a4 -> pure $ V1.Term.Let False a a4 + V2.Term.Match a cases -> V1.Term.Match a <$> traverse goCase cases + V2.Term.TermLink rr -> V1.Term.TermLink <$> rreferent2to1 h lookupSize lookupCT rr + V2.Term.TypeLink r -> V1.Term.TypeLink <$> reference2to1 lookupSize r + goCase = \case + V2.Term.MatchCase pat cond body -> + V1.Term.MatchCase <$> (goPat pat) <*> pure cond <*> pure body + goPat = \case + V2.Term.PUnbound -> pure $ P.Unbound a + V2.Term.PVar -> pure $ P.Var a + V2.Term.PBoolean b -> pure $ P.Boolean a b + V2.Term.PInt i -> pure $ P.Int a i + V2.Term.PNat n -> pure $ P.Nat a n + V2.Term.PFloat d -> pure $ P.Float a d + V2.Term.PText t -> pure $ P.Text a t + V2.Term.PChar c -> pure $ P.Char a c + V2.Term.PConstructor r i ps -> + P.Constructor a <$> reference2to1 lookupSize r <*> pure i <*> (traverse goPat ps) + V2.Term.PAs p -> P.As a <$> goPat p + V2.Term.PEffectPure p -> P.EffectPure a <$> goPat p + V2.Term.PEffectBind r i ps p -> P.EffectBind a <$> reference2to1 lookupSize r <*> pure i <*> traverse goPat ps <*> goPat p + V2.Term.PSequenceLiteral ps -> P.SequenceLiteral a <$> traverse goPat ps + V2.Term.PSequenceOp p1 op p2 -> P.SequenceOp a <$> goPat p1 <*> pure (goOp op) <*> goPat p2 + goOp = \case + V2.Term.PCons -> P.Cons + V2.Term.PSnoc -> P.Snoc + V2.Term.PConcat -> P.Concat + a = Ann.External diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 32234f075d..3d8eb7ecba 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -97,6 +97,7 @@ library Unison.Codebase.Serialization.V1 Unison.Codebase.ShortBranchHash Unison.Codebase.SqliteCodebase + Unison.Codebase.SqliteCodebase.Conversions Unison.Codebase.SyncMode Unison.Codebase.TermEdit Unison.Codebase.TranscriptParser From cc9f6250b14e5036b92c7b77c4f4631afa8a2adf Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 22 Oct 2020 22:27:23 -0400 Subject: [PATCH 029/225] Q.objectIdByBase32Prefix --- .../U/Codebase/Sqlite/Operations.hs | 2 +- .../U/Codebase/Sqlite/Queries.hs | 38 +++- codebase2/codebase/U/Codebase/ShortHash.hs | 8 + .../src/Unison/Codebase/SqliteCodebase.hs | 39 ++-- .../Codebase/SqliteCodebase/Conversions.hs | 187 +++++++++++------- 5 files changed, 175 insertions(+), 99 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 48a3539727..69e4ceafb8 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -146,7 +146,7 @@ loadTypeOfTermByTermReference r = do loadDeclByReference :: DB m => C.Reference.Id -> MaybeT m (C.Decl Symbol) loadDeclByReference (C.Reference.Id h i) = do -- retrieve the blob - (localIds, C.Decl.DataDeclaration dt m b ct) <- do + (localIds, C.Decl.DataDeclaration dt m b ct) <- hashToObjectId >=> loadObjectById >=> decodeDeclElement i $ h -- look up the text and hashes that are used by the term diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index b7726acdeb..2f4240dd07 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -21,22 +21,21 @@ import Data.String.Here.Uninterpolated (here) import Data.Text (Text) import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple (SQLData, (:.) (..), Connection, FromRow, Only (..), ToRow (..)) -import Database.SQLite.Simple.FromField -import Database.SQLite.Simple.ToField +import Database.SQLite.Simple.FromField ( FromField ) +import Database.SQLite.Simple.ToField ( ToField ) import qualified U.Codebase.Sqlite.Reference as Reference import qualified U.Codebase.Sqlite.Referent as Referent -import U.Codebase.Sqlite.ObjectType +import U.Codebase.Sqlite.ObjectType ( ObjectType ) import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hashable (Hashable) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash -import U.Codebase.Sqlite.DbId +import U.Codebase.Sqlite.DbId ( HashId(..), ObjectId(..), TextId ) import U.Codebase.Reference (Reference') -- * types type DB m = (MonadIO m, MonadReader Connection m) - newtype TypeId = TypeId ObjectId deriving (FromField, ToField) via ObjectId newtype TermId = TermCycleId ObjectId deriving (FromField, ToField) via ObjectId newtype DeclId = DeclCycleId ObjectId deriving (FromField, ToField) via ObjectId @@ -47,6 +46,7 @@ newtype NamespaceHashId = NamespaceHashId ObjectId deriving (Hashable, FromField -- type DerivedReferent = Referent.Id ObjectId ObjectId -- type DerivedReference = Reference.Id ObjectId type TypeHashReference = Reference' TextId HashId + -- * main squeeze saveHash :: DB m => Base32Hex -> m HashId @@ -231,12 +231,30 @@ addToDependentsIndex dependency dependent = execute sql (dependency :. dependent getDependentsForDependency :: DB m => Reference.Reference -> m [Reference.Id] getDependentsForDependency dependency = query sql dependency where sql = [here| - SELECT FROM dependents_index ( - dependent_object_id, dependent_component_index - ) WHERE dependency_builtin = ? - AND dependency_object_id = ? - AND dependency_component_index = ? + SELECT dependent_object_id, dependent_component_index + FROM dependents_index + WHERE dependency_builtin = ? + AND dependency_object_id = ? + AND dependency_component_index = ? +|] + +objectIdByBase32Prefix :: DB m => ObjectType -> Text -> m [ObjectId] +objectIdByBase32Prefix objType prefix = queryList sql (objType, prefix <> "%") where sql = [here| + SELECT object.id FROM object + INNER JOIN hash_object ON hash_object.object_id = object.id + INNER JOIN hash ON hash_object.hash_id = hash.id + WHERE object.type_id = ? + AND hash.base32 LIKE ? |] +-- alternatively +-- [here| +-- SELECT object.id +-- FROM object, hash, hash_object +-- WHERE object.id = hash_object.object_id +-- AND hash.id = hash_object.hash_id +-- AND object.type_id = ? +-- AND hash.base32 LIKE ? +-- |] -- * helper functions queryList :: (DB f, ToRow q, FromField b) => SQLite.Query -> q -> f [b] diff --git a/codebase2/codebase/U/Codebase/ShortHash.hs b/codebase2/codebase/U/Codebase/ShortHash.hs index 548c6d9166..e5baed834e 100644 --- a/codebase2/codebase/U/Codebase/ShortHash.hs +++ b/codebase2/codebase/U/Codebase/ShortHash.hs @@ -7,6 +7,14 @@ module U.Codebase.ShortHash where import Data.Text (Text) import Data.Word (Word64) + +-- ##Text.++ +-- ^^^^^^^-- builtin + +-- #abc123.a#0 +-- ^ ^ ^-cid +-- | \-cycle +-- \-- prefix data ShortHash = Builtin Text | ShortHash { prefix :: Text, cycle :: Maybe Word64, cid :: Maybe Word64 } diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 80cedd8363..6a8d722863 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -19,7 +19,7 @@ import Unison.Codebase.Branch (Branch) import qualified Unison.Codebase.Branch as Branch import qualified Unison.Codebase.Reflog as Reflog import Unison.Codebase.ShortBranchHash (ShortBranchHash) -import Unison.Codebase.SqliteCodebase.Conversions (decltype2to1, hash1to2, term2to1) +import qualified Unison.Codebase.SqliteCodebase.Conversions as Cv import Unison.Codebase.SyncMode (SyncMode) import Unison.DataDeclaration (Decl) import Unison.Parser (Ann) @@ -42,11 +42,7 @@ import U.Codebase.Sqlite.Queries (DB) sqliteCodebase :: CodebasePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) sqliteCodebase root = do conn :: Sqlite.Connection <- Sqlite.open $ root "v2" "unison.sqlite3" - let getTypeOfTermImpl :: Reference.Id -> IO (Maybe (Type Symbol Ann)) - getTypeDeclaration :: Reference.Id -> IO (Maybe (Decl Symbol Ann)) - putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () - putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () - getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) + let getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) putRootBranch :: Branch IO -> IO () rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) getBranchForHash :: Branch.Hash -> IO (Maybe (Branch IO)) @@ -68,24 +64,41 @@ sqliteCodebase root = do branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) getTerm :: Reference.Id -> IO (Maybe (Term Symbol Ann)) - getTerm (Reference.Id h1@(hash1to2 -> h2) i _n) = - runDB conn $ do + getTerm (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = + runDB conn do term2 <- Ops.loadTermByReference (C.Reference.Id h2 i) - term2to1 h1 getCycleLen getDeclType term2 + Cv.term2to1 h1 getCycleLen getDeclType term2 getCycleLen :: DB m => Hash -> MaybeT m Reference.Size - getCycleLen = Ops.getCycleLen . hash1to2 + getCycleLen = Ops.getCycleLen . Cv.hash1to2 + getDeclType :: DB m => C.Reference.Reference -> MaybeT m CT.ConstructorType getDeclType = \case C.Reference.ReferenceBuiltin t -> MaybeT (pure $ Map.lookup (Reference.Builtin t) Builtins.builtinConstructorType) C.Reference.ReferenceDerived i -> getDeclTypeById i + getDeclTypeById :: DB m => C.Reference.Id -> MaybeT m CT.ConstructorType - getDeclTypeById = fmap decltype2to1 . Ops.getDeclTypeByReference - getTypeOfTermImpl = error "todo" - getTypeDeclaration = error "todo" + getDeclTypeById = fmap Cv.decltype2to1 . Ops.getDeclTypeByReference + + getTypeOfTermImpl :: Reference.Id -> IO (Maybe (Type Symbol Ann)) + getTypeOfTermImpl (Reference.Id (Cv.hash1to2 -> h2) i _n) = + runDB conn do + type2 <- Ops.loadTypeOfTermByTermReference (C.Reference.Id h2 i) + Cv.ttype2to1 getCycleLen type2 + + getTypeDeclaration :: Reference.Id -> IO (Maybe (Decl Symbol Ann)) + getTypeDeclaration (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = + runDB conn do + decl2 <- Ops.loadDeclByReference (C.Reference.Id h2 i) + Cv.decl2to1 h1 getCycleLen decl2 + + putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () putTerm = error "todo" + + putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () putTypeDeclaration = error "todo" + getRootBranch = error "todo" putRootBranch = error "todo" rootBranchUpdates = error "todo" diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 8d81ac65fe..4c4a9fc8c3 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -1,7 +1,7 @@ module Unison.Codebase.SqliteCodebase.Conversions where import qualified Data.ByteString.Short as SBS -import Data.Text (Text) +import Data.Text (Text, pack) import qualified U.Codebase.Decl as V2.Decl import qualified U.Codebase.Kind as V2.Kind import qualified U.Codebase.Reference as V2 @@ -15,6 +15,7 @@ import qualified U.Util.Hash as V2 import qualified U.Util.Hash as V2.Hash import qualified Unison.ABT as V1.ABT import qualified Unison.ConstructorType as CT +import qualified Unison.DataDeclaration as V1.Decl import Unison.Hash (Hash) import qualified Unison.Hash as V1 import qualified Unison.Kind as V1.Kind @@ -36,8 +37,8 @@ decltype2to1 = \case decltype1to2 :: CT.ConstructorType -> V2.Decl.DeclType decltype1to2 = \case - CT.Data -> V2.Decl.Data - CT.Effect -> V2.Decl.Effect + CT.Data -> V2.Decl.Data + CT.Effect -> V2.Decl.Effect term2to1 :: forall m. Monad m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Term.Term V2.Symbol -> m (V1.Term.Term V1.Symbol Ann) term2to1 h lookupSize lookupCT tm = @@ -45,6 +46,91 @@ term2to1 h lookupSize lookupCT tm = . V1.ABT.vmap symbol2to1 . V1.ABT.amap (const Ann.External) $ abt2to1 tm + where + termF2to1 :: forall m a. Monad m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Term.F V2.Symbol a -> m (V1.Term.F V1.Symbol Ann Ann a) + termF2to1 h lookupSize lookupCT = go + where + go :: V2.Term.F V2.Symbol a -> m (V1.Term.F V1.Symbol Ann Ann a) + go = \case + V2.Term.Int i -> pure $ V1.Term.Int i + V2.Term.Nat n -> pure $ V1.Term.Nat n + V2.Term.Float d -> pure $ V1.Term.Float d + V2.Term.Boolean b -> pure $ V1.Term.Boolean b + V2.Term.Text t -> pure $ V1.Term.Text t + V2.Term.Char c -> pure $ V1.Term.Char c + V2.Term.Ref r -> V1.Term.Ref <$> rreference2to1 h lookupSize r + V2.Term.Constructor r i -> + V1.Term.Constructor <$> reference2to1 lookupSize r <*> pure (fromIntegral i) + V2.Term.Request r i -> + V1.Term.Request <$> reference2to1 lookupSize r <*> pure (fromIntegral i) + V2.Term.Handle a a4 -> pure $ V1.Term.Handle a a4 + V2.Term.App a a4 -> pure $ V1.Term.App a a4 + V2.Term.Ann a t2 -> V1.Term.Ann a <$> ttype2to1 lookupSize t2 + V2.Term.Sequence sa -> pure $ V1.Term.Sequence sa + V2.Term.If a a4 a5 -> pure $ V1.Term.If a a4 a5 + V2.Term.And a a4 -> pure $ V1.Term.And a a4 + V2.Term.Or a a4 -> pure $ V1.Term.Or a a4 + V2.Term.Lam a -> pure $ V1.Term.Lam a + V2.Term.LetRec as a -> pure $ V1.Term.LetRec False as a + V2.Term.Let a a4 -> pure $ V1.Term.Let False a a4 + V2.Term.Match a cases -> V1.Term.Match a <$> traverse goCase cases + V2.Term.TermLink rr -> V1.Term.TermLink <$> rreferent2to1 h lookupSize lookupCT rr + V2.Term.TypeLink r -> V1.Term.TypeLink <$> reference2to1 lookupSize r + goCase = \case + V2.Term.MatchCase pat cond body -> + V1.Term.MatchCase <$> (goPat pat) <*> pure cond <*> pure body + goPat = \case + V2.Term.PUnbound -> pure $ P.Unbound a + V2.Term.PVar -> pure $ P.Var a + V2.Term.PBoolean b -> pure $ P.Boolean a b + V2.Term.PInt i -> pure $ P.Int a i + V2.Term.PNat n -> pure $ P.Nat a n + V2.Term.PFloat d -> pure $ P.Float a d + V2.Term.PText t -> pure $ P.Text a t + V2.Term.PChar c -> pure $ P.Char a c + V2.Term.PConstructor r i ps -> + P.Constructor a <$> reference2to1 lookupSize r <*> pure i <*> (traverse goPat ps) + V2.Term.PAs p -> P.As a <$> goPat p + V2.Term.PEffectPure p -> P.EffectPure a <$> goPat p + V2.Term.PEffectBind r i ps p -> P.EffectBind a <$> reference2to1 lookupSize r <*> pure i <*> traverse goPat ps <*> goPat p + V2.Term.PSequenceLiteral ps -> P.SequenceLiteral a <$> traverse goPat ps + V2.Term.PSequenceOp p1 op p2 -> P.SequenceOp a <$> goPat p1 <*> pure (goOp op) <*> goPat p2 + goOp = \case + V2.Term.PCons -> P.Cons + V2.Term.PSnoc -> P.Snoc + V2.Term.PConcat -> P.Concat + a = Ann.External + +decl2to1 :: Monad m => Hash -> (Hash -> m V1.Reference.Size) -> V2.Decl.Decl V2.Symbol -> m (V1.Decl.Decl V1.Symbol Ann) +decl2to1 h lookupSize (V2.Decl.DataDeclaration dt m bound cts) = + goCT dt + <$> V1.Decl.DataDeclaration (goMod m) Ann.External (symbol2to1 <$> bound) + <$> cts' + where + goMod = \case + V2.Decl.Structural -> V1.Decl.Structural + V2.Decl.Unique t -> V1.Decl.Unique t + goCT = \case + V2.Decl.Data -> Right + V2.Decl.Effect -> Left . V1.Decl.EffectDeclaration + cts' = traverse mkCtor (zip cts [0 ..]) + mkCtor (type1, i) = do + type2 <- dtype2to1 h lookupSize type1 + pure $ (Ann.External, V1.symbol . pack $ "Constructor" ++ show i, type2) + +decl1to2 :: Hash -> V1.Decl.Decl V1.Symbol a -> V2.Decl.Decl V2.Symbol +decl1to2 h decl1 = case V1.Decl.asDataDecl decl1 of + V1.Decl.DataDeclaration m _ann bound cts -> + V2.Decl.DataDeclaration + (decltype1to2 $ V1.Decl.constructorType decl1) + (goMod m) + (symbol1to2 <$> bound) + cts' + where + goMod = \case + V1.Decl.Structural -> V2.Decl.Structural + V1.Decl.Unique t -> V2.Decl.Unique t + cts' = [dtype1to2 h t | (_, _, t) <- cts] symbol2to1 :: V2.Symbol -> V1.Symbol symbol2to1 (V2.Symbol i t) = V1.Symbol i (Var.User t) @@ -155,79 +241,30 @@ type2to1' convertRef = V2.Kind.Star -> V1.Kind.Star V2.Kind.Arrow i o -> V1.Kind.Arrow (convertKind i) (convertKind o) +dtype1to2 :: Hash -> V1.Type.Type V1.Symbol a -> V2.Type.TypeD V2.Symbol +dtype1to2 h = type1to2' (rreference1to2 h) + +ttype1to2 :: V1.Type.Type V1.Symbol a -> V2.Type.TypeT V2.Symbol +ttype1to2 = type1to2' reference1to2 + type1to2' :: (V1.Reference -> r) -> V1.Type.Type V1.Symbol a -> V2.Type.TypeR r V2.Symbol type1to2' convertRef = V2.ABT.transform (typeF1to2' convertRef) - . V2.ABT.vmap symbol1to2 - . V2.ABT.amap (const ()) - . abt1to2 - -typeF1to2' :: (V1.Reference -> r) -> V1.Type.F a -> V2.Type.F' r a -typeF1to2' convertRef = \case - V1.Type.Ref r -> V2.Type.Ref (convertRef r) - V1.Type.Arrow i o -> V2.Type.Arrow i o - V1.Type.Ann a k -> V2.Type.Ann a (convertKind k) - V1.Type.App f x -> V2.Type.App f x - V1.Type.Effect e b -> V2.Type.Effect e b - V1.Type.Effects as -> V2.Type.Effects as - V1.Type.Forall a -> V2.Type.Forall a - V1.Type.IntroOuter a -> V2.Type.IntroOuter a + . V2.ABT.vmap symbol1to2 + . V2.ABT.amap (const ()) + . abt1to2 where - convertKind = \case - V1.Kind.Star -> V2.Kind.Star - V1.Kind.Arrow i o -> V2.Kind.Arrow (convertKind i) (convertKind o) - - -termF2to1 :: forall m a. Monad m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Term.F V2.Symbol a -> m (V1.Term.F V1.Symbol Ann Ann a) -termF2to1 h lookupSize lookupCT = go - where - go :: V2.Term.F V2.Symbol a -> m (V1.Term.F V1.Symbol Ann Ann a) - go = \case - V2.Term.Int i -> pure $ V1.Term.Int i - V2.Term.Nat n -> pure $ V1.Term.Nat n - V2.Term.Float d -> pure $ V1.Term.Float d - V2.Term.Boolean b -> pure $ V1.Term.Boolean b - V2.Term.Text t -> pure $ V1.Term.Text t - V2.Term.Char c -> pure $ V1.Term.Char c - V2.Term.Ref r -> V1.Term.Ref <$> rreference2to1 h lookupSize r - V2.Term.Constructor r i -> - V1.Term.Constructor <$> reference2to1 lookupSize r <*> pure (fromIntegral i) - V2.Term.Request r i -> - V1.Term.Request <$> reference2to1 lookupSize r <*> pure (fromIntegral i) - V2.Term.Handle a a4 -> pure $ V1.Term.Handle a a4 - V2.Term.App a a4 -> pure $ V1.Term.App a a4 - V2.Term.Ann a t2 -> V1.Term.Ann a <$> ttype2to1 lookupSize t2 - V2.Term.Sequence sa -> pure $ V1.Term.Sequence sa - V2.Term.If a a4 a5 -> pure $ V1.Term.If a a4 a5 - V2.Term.And a a4 -> pure $ V1.Term.And a a4 - V2.Term.Or a a4 -> pure $ V1.Term.Or a a4 - V2.Term.Lam a -> pure $ V1.Term.Lam a - V2.Term.LetRec as a -> pure $ V1.Term.LetRec False as a - V2.Term.Let a a4 -> pure $ V1.Term.Let False a a4 - V2.Term.Match a cases -> V1.Term.Match a <$> traverse goCase cases - V2.Term.TermLink rr -> V1.Term.TermLink <$> rreferent2to1 h lookupSize lookupCT rr - V2.Term.TypeLink r -> V1.Term.TypeLink <$> reference2to1 lookupSize r - goCase = \case - V2.Term.MatchCase pat cond body -> - V1.Term.MatchCase <$> (goPat pat) <*> pure cond <*> pure body - goPat = \case - V2.Term.PUnbound -> pure $ P.Unbound a - V2.Term.PVar -> pure $ P.Var a - V2.Term.PBoolean b -> pure $ P.Boolean a b - V2.Term.PInt i -> pure $ P.Int a i - V2.Term.PNat n -> pure $ P.Nat a n - V2.Term.PFloat d -> pure $ P.Float a d - V2.Term.PText t -> pure $ P.Text a t - V2.Term.PChar c -> pure $ P.Char a c - V2.Term.PConstructor r i ps -> - P.Constructor a <$> reference2to1 lookupSize r <*> pure i <*> (traverse goPat ps) - V2.Term.PAs p -> P.As a <$> goPat p - V2.Term.PEffectPure p -> P.EffectPure a <$> goPat p - V2.Term.PEffectBind r i ps p -> P.EffectBind a <$> reference2to1 lookupSize r <*> pure i <*> traverse goPat ps <*> goPat p - V2.Term.PSequenceLiteral ps -> P.SequenceLiteral a <$> traverse goPat ps - V2.Term.PSequenceOp p1 op p2 -> P.SequenceOp a <$> goPat p1 <*> pure (goOp op) <*> goPat p2 - goOp = \case - V2.Term.PCons -> P.Cons - V2.Term.PSnoc -> P.Snoc - V2.Term.PConcat -> P.Concat - a = Ann.External + typeF1to2' :: (V1.Reference -> r) -> V1.Type.F a -> V2.Type.F' r a + typeF1to2' convertRef = \case + V1.Type.Ref r -> V2.Type.Ref (convertRef r) + V1.Type.Arrow i o -> V2.Type.Arrow i o + V1.Type.Ann a k -> V2.Type.Ann a (convertKind k) + V1.Type.App f x -> V2.Type.App f x + V1.Type.Effect e b -> V2.Type.Effect e b + V1.Type.Effects as -> V2.Type.Effects as + V1.Type.Forall a -> V2.Type.Forall a + V1.Type.IntroOuter a -> V2.Type.IntroOuter a + where + convertKind = \case + V1.Kind.Star -> V2.Kind.Star + V1.Kind.Arrow i o -> V2.Kind.Arrow (convertKind i) (convertKind o) From d7f4a501b1f1bcc2b507407c226c17a319a38464 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 23 Oct 2020 12:20:15 -0400 Subject: [PATCH 030/225] {term,decl}ReferencesByPrefix --- .../U/Codebase/Sqlite/Operations.hs | 52 +++++++++++++------ .../src/Unison/Codebase/SqliteCodebase.hs | 17 ++++-- .../Codebase/SqliteCodebase/Conversions.hs | 7 +++ unison-core/src/Unison/Reference.hs | 1 + 4 files changed, 59 insertions(+), 18 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 69e4ceafb8..4c26ebfb3b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -5,8 +5,8 @@ module U.Codebase.Sqlite.Operations where -import Control.Monad ((>=>)) -import Control.Monad.Trans.Maybe (MaybeT (MaybeT)) +import Control.Monad (join, (>=>)) +import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) import Data.Bifunctor (Bifunctor (bimap)) import Data.Bitraversable (Bitraversable (bitraverse)) import Data.ByteString (ByteString) @@ -21,11 +21,12 @@ import qualified U.Codebase.Decl as C.Decl import qualified U.Codebase.Reference as C import qualified U.Codebase.Reference as C.Reference import qualified U.Codebase.Referent as C.Referent -import U.Codebase.ShortHash (ShortBranchHash, ShortHash) +import U.Codebase.ShortHash (ShortBranchHash) import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Decl.Format as S.Decl import U.Codebase.Sqlite.LocalIds (LocalIds) import qualified U.Codebase.Sqlite.LocalIds as LocalIds +import qualified U.Codebase.Sqlite.ObjectType as OT import U.Codebase.Sqlite.Queries (DB) import qualified U.Codebase.Sqlite.Queries as Q import qualified U.Codebase.Sqlite.Reference as S @@ -39,6 +40,7 @@ import qualified U.Codebase.Type as C.Type import U.Codebase.WatchKind (WatchKind) import U.Util.Base32Hex (Base32Hex) import qualified U.Util.Hash as H +import qualified U.Util.Monoid as Monoid import U.Util.Serialization (getFromBytes) import qualified U.Util.Serialization as S @@ -88,8 +90,10 @@ loadHashByObjectId = . m' "Q.loadPrimaryHashByObjectId" Q.loadPrimaryHashByObjectId decodeComponentLengthOnly :: Applicative f => ByteString -> MaybeT f Word64 -decodeComponentLengthOnly = m' "decodeComponentLengthOnly" - (fmap pure $ getFromBytes S.lengthFramedArray) +decodeComponentLengthOnly = + m' + "decodeComponentLengthOnly" + (fmap pure $ getFromBytes S.lengthFramedArray) decodeTermElement :: Applicative f => Word64 -> ByteString -> MaybeT f (LocalIds, S.Term.Term) decodeTermElement i = @@ -104,9 +108,11 @@ decodeDeclElement i = (pure . getFromBytes (S.lookupDeclElement i)) -- * legacy conversion helpers + getCycleLen :: DB m => H.Hash -> MaybeT m Word64 -getCycleLen h = fmap fromIntegral $ - hashToObjectId >=> loadObjectById >=> decodeComponentLengthOnly $ h +getCycleLen h = + fmap fromIntegral $ + hashToObjectId >=> loadObjectById >=> decodeComponentLengthOnly $ h getDeclTypeByReference :: DB m => C.Reference.Id -> MaybeT m C.Decl.DeclType getDeclTypeByReference = fmap C.Decl.declType . loadDeclByReference @@ -181,14 +187,30 @@ termsHavingType = error "todo" termsMentioningType :: DB m => C.Reference -> m (Set C.Referent.Id) termsMentioningType = error "todo" -termReferencesByPrefix :: DB m => ShortHash -> m (Set C.Reference.Id) -termReferencesByPrefix = error "todo" - -typeReferencesByPrefix :: DB m => ShortHash -> m (Set C.Reference.Id) -typeReferencesByPrefix = error "todo" - -termReferentsByPrefix :: DB m => ShortHash -> m (Set C.Referent.Id) -termReferentsByPrefix = error "todo" +componentReferencesByPrefix :: DB m => OT.ObjectType -> Text -> Maybe Word64 -> m (Set C.Reference.Id) +componentReferencesByPrefix ot b32prefix componentIndex = do + oIds :: [Db.ObjectId] <- Q.objectIdByBase32Prefix ot b32prefix + let filteredComponent l = case componentIndex of + Nothing -> l + Just qi -> [x | x@(C.Reference.Id _ i) <- l, i == qi] + fmap Monoid.fromMaybe . runMaybeT $ + Set.fromList . join + <$> traverse (fmap filteredComponent . componentByObjectId) oIds + +termReferencesByPrefix :: DB m => Text -> Maybe Word64 -> m (Set C.Reference.Id) +termReferencesByPrefix = componentReferencesByPrefix OT.TermComponent + +declReferencesByPrefix :: DB m => Text -> Maybe Word64 -> m (Set C.Reference.Id) +declReferencesByPrefix = componentReferencesByPrefix OT.DeclComponent + +componentByObjectId :: DB m => Db.ObjectId -> MaybeT m [C.Reference.Id] +componentByObjectId id = do + len <- loadObjectById id >>= decodeComponentLengthOnly + hash <- loadHashByObjectId id + pure [C.Reference.Id hash i | i <- [0 .. len - 1]] + +-- termReferentsByPrefix :: DB m => ShortHash -> m (Set C.Referent.Id) +-- termReferentsByPrefix = error "todo" branchHashesByPrefix :: DB m => ShortBranchHash -> m (Set C.Reference.Id) branchHashesByPrefix = error "todo" diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 6a8d722863..0f26d12868 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -38,6 +38,10 @@ import qualified Unison.Builtin as Builtins import qualified Data.Map as Map import Control.Monad.Trans.Maybe (MaybeT(MaybeT)) import U.Codebase.Sqlite.Queries (DB) +import qualified Unison.ShortHash as ShortHash +import qualified Data.Set as Set +import Control.Monad.Trans (MonadTrans(lift)) +import qualified U.Util.Monoid as Monoid sqliteCodebase :: CodebasePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) sqliteCodebase root = do @@ -57,8 +61,6 @@ sqliteCodebase root = do termsOfTypeImpl :: Reference -> IO (Set Referent.Id) termsMentioningTypeImpl :: Reference -> IO (Set Referent.Id) hashLength :: IO Int - termReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) - typeReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) termReferentsByPrefix :: ShortHash -> IO (Set Referent.Id) branchHashLength :: IO Int branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) @@ -114,8 +116,17 @@ sqliteCodebase root = do termsOfTypeImpl = error "todo" termsMentioningTypeImpl = error "todo" hashLength = error "todo" - termReferencesByPrefix = error "todo" + + termReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) + termReferencesByPrefix (ShortHash.Builtin _) = pure mempty + termReferencesByPrefix (ShortHash.ShortHash prefix cycle _cid) = + Monoid.fromMaybe <$> runDB conn do + refs <- lift $ Ops.termReferencesByPrefix prefix (Cv.shortHashSuffix1to2 <$> cycle) + Set.fromList <$> traverse (Cv.referenceid2to1 getCycleLen) (Set.toList refs) + + typeReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) typeReferencesByPrefix = error "todo" + termReferentsByPrefix = error "todo" branchHashLength = error "todo" branchHashesByPrefix = error "todo" diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 4c4a9fc8c3..05ecd1907b 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -1,6 +1,7 @@ module Unison.Codebase.SqliteCodebase.Conversions where import qualified Data.ByteString.Short as SBS +import Data.Either (fromRight) import Data.Text (Text, pack) import qualified U.Codebase.Decl as V2.Decl import qualified U.Codebase.Kind as V2.Kind @@ -139,6 +140,12 @@ symbol1to2 :: V1.Symbol -> V2.Symbol symbol1to2 (V1.Symbol i (Var.User t)) = V2.Symbol i t symbol1to2 x = error $ "unimplemented: symbol1to2 " ++ show x +shortHashSuffix1to2 :: Text -> V1.Reference.Pos +shortHashSuffix1to2 = + fst + . fromRight (error "todo: move suffix parsing to frontend") + . V1.Reference.readSuffix + abt2to1 :: Functor f => V2.ABT.Term f v a -> V1.ABT.Term f v a abt2to1 (V2.ABT.Term fv a out) = V1.ABT.Term fv a (go out) where diff --git a/unison-core/src/Unison/Reference.hs b/unison-core/src/Unison/Reference.hs index 576aa3850c..b0cbfedffa 100644 --- a/unison-core/src/Unison/Reference.hs +++ b/unison-core/src/Unison/Reference.hs @@ -9,6 +9,7 @@ module Unison.Reference pattern Derived, pattern DerivedId, Id(..), + Pos, Size, derivedBase32Hex, Component, members, From c15a9ce3573eee7cc80336a4071315e6bfbaddf0 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 23 Oct 2020 12:55:01 -0400 Subject: [PATCH 031/225] termReferentsByPrefix --- .../src/Unison/Codebase/SqliteCodebase.hs | 51 ++++++++++++++----- 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 0f26d12868..f5d1bc89da 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -23,7 +23,7 @@ import qualified Unison.Codebase.SqliteCodebase.Conversions as Cv import Unison.Codebase.SyncMode (SyncMode) import Unison.DataDeclaration (Decl) import Unison.Parser (Ann) -import Unison.Prelude (MaybeT (runMaybeT)) +import Unison.Prelude (fromMaybe, MaybeT (runMaybeT)) import Unison.Reference (Reference) import qualified Unison.Reference as Reference import qualified Unison.Referent as Referent @@ -42,6 +42,11 @@ import qualified Unison.ShortHash as ShortHash import qualified Data.Set as Set import Control.Monad.Trans (MonadTrans(lift)) import qualified U.Util.Monoid as Monoid +import qualified U.Codebase.Sqlite.ObjectType as OT +import Data.Word (Word64) +import qualified U.Codebase.Decl as V2.Decl +import Control.Monad (join) +import qualified Data.Foldable as Foldable sqliteCodebase :: CodebasePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) sqliteCodebase root = do @@ -61,7 +66,6 @@ sqliteCodebase root = do termsOfTypeImpl :: Reference -> IO (Set Referent.Id) termsMentioningTypeImpl :: Reference -> IO (Set Referent.Id) hashLength :: IO Int - termReferentsByPrefix :: ShortHash -> IO (Set Referent.Id) branchHashLength :: IO Int branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) @@ -115,20 +119,43 @@ sqliteCodebase root = do appendReflog = error "todo" termsOfTypeImpl = error "todo" termsMentioningTypeImpl = error "todo" - hashLength = error "todo" - termReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) - termReferencesByPrefix (ShortHash.Builtin _) = pure mempty - termReferencesByPrefix (ShortHash.ShortHash prefix cycle _cid) = + hashLength = pure 10 + branchHashLength = pure 10 + + defnReferencesByPrefix :: OT.ObjectType -> ShortHash -> IO (Set Reference.Id) + defnReferencesByPrefix _ (ShortHash.Builtin _) = pure mempty + defnReferencesByPrefix ot (ShortHash.ShortHash prefix cycle _cid) = Monoid.fromMaybe <$> runDB conn do - refs <- lift $ Ops.termReferencesByPrefix prefix (Cv.shortHashSuffix1to2 <$> cycle) + refs <- lift $ Ops.componentReferencesByPrefix ot prefix (Cv.shortHashSuffix1to2 <$> cycle) Set.fromList <$> traverse (Cv.referenceid2to1 getCycleLen) (Set.toList refs) - typeReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) - typeReferencesByPrefix = error "todo" + termReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) + termReferencesByPrefix = defnReferencesByPrefix OT.TermComponent + + declReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) + declReferencesByPrefix = defnReferencesByPrefix OT.DeclComponent + + -- this implementation is wrong; it should filter by ctor id if provided + termReferentsByPrefix :: ShortHash -> IO (Set Referent.Id) + termReferentsByPrefix sh = do + terms <- termReferencesByPrefix sh + let termReferents = Set.map Referent.Ref' terms + decls <- declReferencesByPrefix sh + declReferents <- Set.fromList . join <$> traverse go (Foldable.toList decls) + pure (termReferents <> declReferents) + where + getDeclCtorCount :: DB m => Reference.Id -> m (CT.ConstructorType, Word64) + getDeclCtorCount (Reference.Id (Cv.hash1to2 -> h2) i _n) = do + -- this is a database integrity error if the decl doesn't exist in the database + decl20 <- runMaybeT $ Ops.loadDeclByReference (C.Reference.Id h2 i) + let decl2 = fromMaybe (error "database integrity error") decl20 + pure (Cv.decltype2to1 $ V2.Decl.declType decl2, + fromIntegral . length $ V2.Decl.constructorTypes decl2) + go rid = do + (ct, ctorCount) <- getDeclCtorCount rid + pure [Referent.Con' rid (fromIntegral cid) ct | cid <- [0..ctorCount - 1]] - termReferentsByPrefix = error "todo" - branchHashLength = error "todo" branchHashesByPrefix = error "todo" let finalizer = Sqlite.close conn pure $ @@ -155,7 +182,7 @@ sqliteCodebase root = do termsMentioningTypeImpl hashLength termReferencesByPrefix - typeReferencesByPrefix + declReferencesByPrefix termReferentsByPrefix branchHashLength branchHashesByPrefix From b3471226316086eed9d84bbb6011615edff4e964 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 23 Oct 2020 13:08:02 -0400 Subject: [PATCH 032/225] getReflog / easy stuff --- .../src/Unison/Codebase/SqliteCodebase.hs | 45 ++++++++++++++++--- 1 file changed, 38 insertions(+), 7 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index f5d1bc89da..b082d2fade 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -1,5 +1,6 @@ {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE OverloadedStrings #-} module Unison.Codebase.SqliteCodebase where @@ -47,6 +48,10 @@ import Data.Word (Word64) import qualified U.Codebase.Decl as V2.Decl import Control.Monad (join) import qualified Data.Foldable as Foldable +import qualified Data.Text.IO as TextIO +import qualified Data.Text as Text +import UnliftIO (MonadUnliftIO, catchIO) +import UnliftIO (MonadIO(liftIO)) sqliteCodebase :: CodebasePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) sqliteCodebase root = do @@ -61,12 +66,8 @@ sqliteCodebase root = do watches :: UF.WatchKind -> IO [Reference.Id] getWatch :: UF.WatchKind -> Reference.Id -> IO (Maybe (Term Symbol Ann)) putWatch :: UF.WatchKind -> Reference.Id -> Term Symbol Ann -> IO () - getReflog :: IO [Reflog.Entry] - appendReflog :: Text -> Branch IO -> Branch IO -> IO () termsOfTypeImpl :: Reference -> IO (Set Referent.Id) termsMentioningTypeImpl :: Reference -> IO (Set Referent.Id) - hashLength :: IO Int - branchHashLength :: IO Int branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) getTerm :: Reference.Id -> IO (Maybe (Term Symbol Ann)) @@ -115,12 +116,38 @@ sqliteCodebase root = do watches = error "todo" getWatch = error "todo" putWatch = error "todo" - getReflog = error "todo" - appendReflog = error "todo" + + getReflog :: IO [Reflog.Entry] + getReflog = + (do contents <- TextIO.readFile (reflogPath root) + let lines = Text.lines contents + let entries = parseEntry <$> lines + pure entries) `catchIO` const (pure []) + where + parseEntry t = fromMaybe (err t) (Reflog.fromText t) + err t = error $ + "I couldn't understand this line in " ++ reflogPath root ++ "\n\n" ++ + Text.unpack t + + appendReflog :: Text -> Branch IO -> Branch IO -> IO () + appendReflog reason old new = + let + t = Reflog.toText $ + Reflog.Entry (Branch.headHash old) (Branch.headHash new) reason + in TextIO.appendFile (reflogPath root) (t <> "\n") + + + reflogPath :: CodebasePath -> FilePath + reflogPath root = root "reflog" + + termsOfTypeImpl = error "todo" termsMentioningTypeImpl = error "todo" + hashLength :: IO Int hashLength = pure 10 + + branchHashLength :: IO Int branchHashLength = pure 10 defnReferencesByPrefix :: OT.ObjectType -> ShortHash -> IO (Set Reference.Id) @@ -152,7 +179,7 @@ sqliteCodebase root = do let decl2 = fromMaybe (error "database integrity error") decl20 pure (Cv.decltype2to1 $ V2.Decl.declType decl2, fromIntegral . length $ V2.Decl.constructorTypes decl2) - go rid = do + go rid = runDB' conn do (ct, ctorCount) <- getDeclCtorCount rid pure [Referent.Con' rid (fromIntegral cid) ct | cid <- [0..ctorCount - 1]] @@ -193,3 +220,7 @@ sqliteCodebase root = do runDB :: Connection -> MaybeT (ReaderT Connection IO) a -> IO (Maybe a) runDB conn action = flip runReaderT conn $ runMaybeT action + +runDB' :: Connection -> MaybeT (ReaderT Connection IO) a -> IO a +runDB' conn action = flip runReaderT conn $ fmap err $ runMaybeT action + where err = fromMaybe (error "database consistency error") From 4f36a277da86de73bf4c4127ae52522452314db8 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 23 Oct 2020 18:04:32 -0400 Subject: [PATCH 033/225] prep for buffered component writing --- .../src/Unison/Codebase/SqliteCodebase.hs | 129 ++++++++++++------ 1 file changed, 91 insertions(+), 38 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index b082d2fade..a65a294aae 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -1,19 +1,35 @@ -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE ViewPatterns #-} module Unison.Codebase.SqliteCodebase where -- initCodebase :: Branch.Cache IO -> FilePath -> IO (Codebase IO Symbol Ann) +import Control.Concurrent.STM +import Control.Monad (join) import Control.Monad.Reader (ReaderT (runReaderT)) +import Control.Monad.Trans (MonadTrans (lift)) +import Control.Monad.Trans.Maybe (MaybeT (MaybeT)) +import qualified Data.Foldable as Foldable +import Data.Map (Map) +import qualified Data.Map as Map import Data.Set (Set) +import qualified Data.Set as Set import Data.Text (Text) +import qualified Data.Text as Text +import qualified Data.Text.IO as TextIO +import Data.Word (Word64) import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as Sqlite import System.FilePath (()) +import qualified U.Codebase.Decl as V2.Decl import qualified U.Codebase.Reference as C.Reference +import qualified U.Codebase.Sqlite.ObjectType as OT import qualified U.Codebase.Sqlite.Operations as Ops +import U.Codebase.Sqlite.Queries (DB) +import qualified U.Util.Monoid as Monoid +import qualified Unison.Builtin as Builtins import Unison.Codebase (CodebasePath) import qualified Unison.Codebase as Codebase1 import Unison.Codebase.Branch (Branch) @@ -22,40 +38,49 @@ import qualified Unison.Codebase.Reflog as Reflog import Unison.Codebase.ShortBranchHash (ShortBranchHash) import qualified Unison.Codebase.SqliteCodebase.Conversions as Cv import Unison.Codebase.SyncMode (SyncMode) +import qualified Unison.ConstructorType as CT import Unison.DataDeclaration (Decl) +import Unison.Hash (Hash) import Unison.Parser (Ann) -import Unison.Prelude (fromMaybe, MaybeT (runMaybeT)) +import Unison.Prelude (MaybeT (runMaybeT), fromMaybe) import Unison.Reference (Reference) import qualified Unison.Reference as Reference import qualified Unison.Referent as Referent import Unison.ShortHash (ShortHash) +import qualified Unison.ShortHash as ShortHash import Unison.Symbol (Symbol) import Unison.Term (Term) import Unison.Type (Type) import qualified Unison.UnisonFile as UF -import Unison.Hash (Hash) -import qualified Unison.ConstructorType as CT -import qualified Unison.Builtin as Builtins -import qualified Data.Map as Map -import Control.Monad.Trans.Maybe (MaybeT(MaybeT)) -import U.Codebase.Sqlite.Queries (DB) -import qualified Unison.ShortHash as ShortHash -import qualified Data.Set as Set -import Control.Monad.Trans (MonadTrans(lift)) -import qualified U.Util.Monoid as Monoid -import qualified U.Codebase.Sqlite.ObjectType as OT -import Data.Word (Word64) -import qualified U.Codebase.Decl as V2.Decl -import Control.Monad (join) -import qualified Data.Foldable as Foldable -import qualified Data.Text.IO as TextIO -import qualified Data.Text as Text -import UnliftIO (MonadUnliftIO, catchIO) -import UnliftIO (MonadIO(liftIO)) +import UnliftIO (catchIO) + +-- 1) buffer up the component +-- 2) in the event that the component is complete, then what? +-- * can write component provided all of its dependency components are complete. +-- if dependency not complete, +-- register yourself to be written when that dependency is complete + +-- an entry for a single hash +data BufferEntry a = BufferEntry + { -- First, you are waiting for the cycle to fill up with all elements + -- Then, you check: are all dependencies of the cycle in the db? + -- If yes: write yourself to database and trigger check of dependents + -- If no: just wait, do nothing + beComponentTargetSize :: Maybe Word64, + beComponent :: Map Reference.Pos a, + beMissingDependencies :: Set Hash, + beWaitingDependents :: Set Hash + } + +type TermBufferEntry = BufferEntry (Term Symbol Ann, Type Symbol Ann) + +type DeclBufferEntry = BufferEntry (Decl Symbol Ann) sqliteCodebase :: CodebasePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) sqliteCodebase root = do conn :: Sqlite.Connection <- Sqlite.open $ root "v2" "unison.sqlite3" + termBuffer :: TVar (Map Hash TermBufferEntry) <- newTVarIO Map.empty + declBuffer :: TVar (Map Hash DeclBufferEntry) <- newTVarIO Map.empty let getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) putRootBranch :: Branch IO -> IO () rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) @@ -101,7 +126,30 @@ sqliteCodebase root = do Cv.decl2to1 h1 getCycleLen decl2 putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () - putTerm = error "todo" + putTerm r@(Reference.Id h i n) = error "todo" + updateBufferEntry termBuffer h $ \be -> error "todo" + +-- data BufferEntry a = BufferEntry +-- { -- First, you are waiting for the cycle to fill up with all elements +-- -- Then, you check: are all dependencies of the cycle in the db? +-- -- If yes: write yourself to database and trigger check of dependents +-- -- If no: just wait, do nothing +-- beComponentTargetSize :: Maybe Word64, +-- beComponent :: Map Reference.Pos a, +-- beMissingDependencies :: Set Hash, +-- beWaitingDependents :: Set Hash +-- } + + updateBufferEntry :: + TVar (Map Hash (BufferEntry a)) -> + Hash -> + -- this signature may need to change + (BufferEntry a -> (BufferEntry a, b)) -> + IO [b] + updateBufferEntry = error "todo" + + tryWriteBuffer :: Hash -> TVar (Map Hash (BufferEntry a)) -> IO () + tryWriteBuffer = error "todo" putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () putTypeDeclaration = error "todo" @@ -119,28 +167,30 @@ sqliteCodebase root = do getReflog :: IO [Reflog.Entry] getReflog = - (do contents <- TextIO.readFile (reflogPath root) + ( do + contents <- TextIO.readFile (reflogPath root) let lines = Text.lines contents let entries = parseEntry <$> lines - pure entries) `catchIO` const (pure []) + pure entries + ) + `catchIO` const (pure []) where parseEntry t = fromMaybe (err t) (Reflog.fromText t) - err t = error $ - "I couldn't understand this line in " ++ reflogPath root ++ "\n\n" ++ - Text.unpack t + err t = + error $ + "I couldn't understand this line in " ++ reflogPath root ++ "\n\n" + ++ Text.unpack t appendReflog :: Text -> Branch IO -> Branch IO -> IO () appendReflog reason old new = - let - t = Reflog.toText $ - Reflog.Entry (Branch.headHash old) (Branch.headHash new) reason - in TextIO.appendFile (reflogPath root) (t <> "\n") - + let t = + Reflog.toText $ + Reflog.Entry (Branch.headHash old) (Branch.headHash new) reason + in TextIO.appendFile (reflogPath root) (t <> "\n") reflogPath :: CodebasePath -> FilePath reflogPath root = root "reflog" - termsOfTypeImpl = error "todo" termsMentioningTypeImpl = error "todo" @@ -177,11 +227,13 @@ sqliteCodebase root = do -- this is a database integrity error if the decl doesn't exist in the database decl20 <- runMaybeT $ Ops.loadDeclByReference (C.Reference.Id h2 i) let decl2 = fromMaybe (error "database integrity error") decl20 - pure (Cv.decltype2to1 $ V2.Decl.declType decl2, - fromIntegral . length $ V2.Decl.constructorTypes decl2) + pure + ( Cv.decltype2to1 $ V2.Decl.declType decl2, + fromIntegral . length $ V2.Decl.constructorTypes decl2 + ) go rid = runDB' conn do (ct, ctorCount) <- getDeclCtorCount rid - pure [Referent.Con' rid (fromIntegral cid) ct | cid <- [0..ctorCount - 1]] + pure [Referent.Con' rid (fromIntegral cid) ct | cid <- [0 .. ctorCount - 1]] branchHashesByPrefix = error "todo" let finalizer = Sqlite.close conn @@ -223,4 +275,5 @@ runDB conn action = flip runReaderT conn $ runMaybeT action runDB' :: Connection -> MaybeT (ReaderT Connection IO) a -> IO a runDB' conn action = flip runReaderT conn $ fmap err $ runMaybeT action - where err = fromMaybe (error "database consistency error") + where + err = fromMaybe (error "database consistency error") From 258acb906ce5962636d509326afef6a9cc63a766 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sun, 25 Oct 2020 02:35:26 -0400 Subject: [PATCH 034/225] added some MonadError --- .../U/Codebase/Sqlite/Operations.hs | 211 ++++++++++++------ .../U/Codebase/Sqlite/Queries.hs | 112 ++++++---- .../U/Codebase/Sqlite/Serialization.hs | 4 +- codebase2/codebase/U/Codebase/Reference.hs | 6 +- codebase2/codebase/U/Codebase/Referent.hs | 8 +- .../src/Unison/Codebase/SqliteCodebase.hs | 127 ++++++----- .../Codebase/SqliteCodebase/Conversions.hs | 10 + stack.yaml | 2 +- unison-core/src/Unison/Referent.hs | 6 +- 9 files changed, 304 insertions(+), 182 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 4c26ebfb3b..941710e388 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -1,21 +1,26 @@ {-# LANGUAGE BlockArguments #-} +{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} module U.Codebase.Sqlite.Operations where -import Control.Monad (join, (>=>)) +import Control.Monad (join) +import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) import Data.Bifunctor (Bifunctor (bimap)) import Data.Bitraversable (Bitraversable (bitraverse)) import Data.ByteString (ByteString) +import Data.Bytes.Get (runGetS) import Data.Functor ((<&>)) import Data.Set (Set) import qualified Data.Set as Set import Data.Text (Text) import qualified Data.Vector as Vector import Data.Word (Word64) +import U.Codebase.Decl (ConstructorId) import qualified U.Codebase.Decl as C import qualified U.Codebase.Decl as C.Decl import qualified U.Codebase.Reference as C @@ -41,9 +46,39 @@ import U.Codebase.WatchKind (WatchKind) import U.Util.Base32Hex (Base32Hex) import qualified U.Util.Hash as H import qualified U.Util.Monoid as Monoid -import U.Util.Serialization (getFromBytes) +import U.Util.Serialization (Get, getFromBytes) import qualified U.Util.Serialization as S +type Err m = MonadError Error m + +type EDB m = (Err m, DB m) + +type ErrString = String + +data DecodeError + = ErrTermElement Word64 + | ErrDeclElement Word64 + | ErrFramedArrayLen + deriving (Show) + +data Error + = DecodeError DecodeError ByteString ErrString + | DatabaseIntegrityError Q.Integrity + | LegacyUnknownCycleLen H.Hash + | LegacyUnknownConstructorType H.Hash C.Reference.Pos + deriving (Show) + +getFromBytesOr :: Err m => DecodeError -> Get a -> ByteString -> m a +getFromBytesOr e get bs = case runGetS get bs of + Left err -> throwError (DecodeError e bs err) + Right a -> pure a + +liftQ :: Err m => ExceptT Q.Integrity m a -> m a +liftQ a = + runExceptT a >>= \case + Left e -> throwError (DatabaseIntegrityError e) + Right a -> pure a + loadTermComponentByHash :: DB m => Base32Hex -> m (Maybe [C.Term Symbol]) loadTermComponentByHash = error "todo" @@ -58,72 +93,65 @@ m' msg f a = MaybeT do Nothing -> error $ "nothing: " ++ msg ++ " " ++ show a Just b -> Just b -c2sReference :: DB m => C.Reference -> MaybeT m S.Reference +c2sReference :: EDB m => C.Reference -> MaybeT m S.Reference c2sReference = bitraverse lookupTextId hashToObjectId -s2cReference :: DB m => S.Reference -> MaybeT m C.Reference +s2cReference :: EDB m => S.Reference -> m C.Reference s2cReference = bitraverse loadTextById loadHashByObjectId -c2sReferenceId :: DB m => C.Reference.Id -> MaybeT m S.Reference.Id +c2sReferenceId :: EDB m => C.Reference.Id -> MaybeT m S.Reference.Id c2sReferenceId = C.Reference.idH hashToObjectId -s2cReferenceId :: DB m => S.Reference.Id -> MaybeT m C.Reference.Id +s2cReferenceId :: EDB m => S.Reference.Id -> m C.Reference.Id s2cReferenceId = C.Reference.idH loadHashByObjectId lookupTextId :: DB m => Text -> MaybeT m Db.TextId -lookupTextId = m' "Q.loadText" Q.loadText - -loadTextById :: DB m => Db.TextId -> MaybeT m Text -loadTextById = m' "Q.loadTextById" Q.loadTextById - -hashToObjectId :: DB m => H.Hash -> MaybeT m Db.ObjectId -hashToObjectId = - m' "Q.loadHashId" Q.loadHashId . H.toBase32Hex - >=> m' "Q.objectIdByPrimaryHashId" Q.objectIdByPrimaryHashId - -loadObjectById :: DB m => Db.ObjectId -> MaybeT m ByteString -loadObjectById = m' "Q.loadObjectById" Q.loadObjectById - -loadHashByObjectId :: DB m => Db.ObjectId -> MaybeT m H.Hash -loadHashByObjectId = - fmap H.fromBase32Hex - . m' "Q.loadPrimaryHashByObjectId" Q.loadPrimaryHashByObjectId - -decodeComponentLengthOnly :: Applicative f => ByteString -> MaybeT f Word64 -decodeComponentLengthOnly = - m' - "decodeComponentLengthOnly" - (fmap pure $ getFromBytes S.lengthFramedArray) - -decodeTermElement :: Applicative f => Word64 -> ByteString -> MaybeT f (LocalIds, S.Term.Term) -decodeTermElement i = - m' - ("getTermElement: " ++ show i ++ ") fromBytes:") - (fmap pure $ getFromBytes $ S.lookupTermElement i) - -decodeDeclElement :: Applicative f => Word64 -> ByteString -> MaybeT f (LocalIds, S.Decl.Decl Symbol) -decodeDeclElement i = - m' - ("getDeclElement: " ++ show i ++ ") fromBytes:") - (pure . getFromBytes (S.lookupDeclElement i)) +lookupTextId = m Q.loadText + +loadTextById :: EDB m => Db.TextId -> m Text +loadTextById = liftQ . Q.loadTextById + +-- ok to fail +hashToObjectId :: EDB m => H.Hash -> MaybeT m Db.ObjectId +hashToObjectId h = do + hashId <- MaybeT $ Q.loadHashId . H.toBase32Hex $ h + liftQ $ Q.objectIdByPrimaryHashId hashId + +loadHashByObjectId :: EDB m => Db.ObjectId -> m H.Hash +loadHashByObjectId = fmap H.fromBase32Hex . liftQ . Q.loadPrimaryHashByObjectId + +decodeComponentLengthOnly :: Err m => ByteString -> m Word64 +decodeComponentLengthOnly = getFromBytesOr ErrFramedArrayLen S.lengthFramedArray + +decodeTermElement :: Err m => C.Reference.Pos -> ByteString -> m (LocalIds, S.Term.Term) +decodeTermElement i = getFromBytesOr (ErrTermElement i) (S.lookupTermElement i) + +decodeDeclElement :: Err m => Word64 -> ByteString -> m (LocalIds, S.Decl.Decl Symbol) +decodeDeclElement i = getFromBytesOr (ErrDeclElement i) (S.lookupDeclElement i) -- * legacy conversion helpers -getCycleLen :: DB m => H.Hash -> MaybeT m Word64 -getCycleLen h = - fmap fromIntegral $ - hashToObjectId >=> loadObjectById >=> decodeComponentLengthOnly $ h +getCycleLen :: EDB m => H.Hash -> m Word64 +getCycleLen h = do + runMaybeT (hashToObjectId h) + >>= maybe (throwError $ LegacyUnknownCycleLen h) pure + >>= liftQ . Q.loadObjectById + >>= decodeComponentLengthOnly + >>= pure . fromIntegral -getDeclTypeByReference :: DB m => C.Reference.Id -> MaybeT m C.Decl.DeclType -getDeclTypeByReference = fmap C.Decl.declType . loadDeclByReference +getDeclTypeByReference :: EDB m => C.Reference.Id -> m C.Decl.DeclType +getDeclTypeByReference r@(C.Reference.Id h pos) = + runMaybeT (loadDeclByReference r) + >>= maybe (throwError $ LegacyUnknownConstructorType h pos) pure + >>= pure . C.Decl.declType -- * meat and veggies -loadTermByReference :: DB m => C.Reference.Id -> MaybeT m (C.Term Symbol) +loadTermByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term Symbol) loadTermByReference (C.Reference.Id h i) = do -- retrieve and deserialize the blob (localIds, term) <- - hashToObjectId >=> loadObjectById >=> decodeTermElement i $ h + hashToObjectId h >>= liftQ . Q.loadObjectById >>= decodeTermElement i -- look up the text and hashes that are used by the term texts <- traverse loadTextById $ LocalIds.textLookup localIds @@ -138,7 +166,7 @@ loadTermByReference (C.Reference.Id h i) = do substTypeLink = substTypeRef pure (C.Term.extraMap substText substTermRef substTypeRef substTermLink substTypeLink id term) -loadTypeOfTermByTermReference :: DB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) +loadTypeOfTermByTermReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) loadTypeOfTermByTermReference r = do -- convert query reference by looking up db ids r' <- C.Reference.idH hashToObjectId r @@ -149,11 +177,11 @@ loadTypeOfTermByTermReference r = do -- convert the result type by looking up db ids C.Type.rtraverse s2cReference typ -loadDeclByReference :: DB m => C.Reference.Id -> MaybeT m (C.Decl Symbol) +loadDeclByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Decl Symbol) loadDeclByReference (C.Reference.Id h i) = do -- retrieve the blob (localIds, C.Decl.DataDeclaration dt m b ct) <- - hashToObjectId >=> loadObjectById >=> decodeDeclElement i $ h + hashToObjectId h >>= liftQ . Q.loadObjectById >>= decodeDeclElement i -- look up the text and hashes that are used by the term texts <- traverse loadTextById $ LocalIds.textLookup localIds @@ -187,28 +215,73 @@ termsHavingType = error "todo" termsMentioningType :: DB m => C.Reference -> m (Set C.Referent.Id) termsMentioningType = error "todo" -componentReferencesByPrefix :: DB m => OT.ObjectType -> Text -> Maybe Word64 -> m (Set C.Reference.Id) -componentReferencesByPrefix ot b32prefix componentIndex = do +-- something kind of funny here. first, we don't need to enumerate all the reference pos if we're just picking one +-- second, it would be nice if we could leave these as S.References a little longer +-- so that we remember how to blow up if they're missing +componentReferencesByPrefix :: EDB m => OT.ObjectType -> Text -> Maybe C.Reference.Pos -> m [S.Reference.Id] +componentReferencesByPrefix ot b32prefix pos = do oIds :: [Db.ObjectId] <- Q.objectIdByBase32Prefix ot b32prefix - let filteredComponent l = case componentIndex of - Nothing -> l - Just qi -> [x | x@(C.Reference.Id _ i) <- l, i == qi] + let test = maybe (const True) (==) pos + let filterComponent l = [x | x@(C.Reference.Id _ pos) <- l, test pos] fmap Monoid.fromMaybe . runMaybeT $ - Set.fromList . join - <$> traverse (fmap filteredComponent . componentByObjectId) oIds - -termReferencesByPrefix :: DB m => Text -> Maybe Word64 -> m (Set C.Reference.Id) -termReferencesByPrefix = componentReferencesByPrefix OT.TermComponent - -declReferencesByPrefix :: DB m => Text -> Maybe Word64 -> m (Set C.Reference.Id) -declReferencesByPrefix = componentReferencesByPrefix OT.DeclComponent - -componentByObjectId :: DB m => Db.ObjectId -> MaybeT m [C.Reference.Id] + join <$> traverse (fmap filterComponent . componentByObjectIdS) oIds + +termReferencesByPrefix :: EDB m => Text -> Maybe Word64 -> m [C.Reference.Id] +termReferencesByPrefix t w = + componentReferencesByPrefix OT.TermComponent t w + >>= traverse (C.Reference.idH loadHashByObjectId) + +declReferencesByPrefix :: EDB m => Text -> Maybe Word64 -> m [C.Reference.Id] +declReferencesByPrefix t w = + componentReferencesByPrefix OT.DeclComponent t w + >>= traverse (C.Reference.idH loadHashByObjectId) + +termReferentsByPrefix :: EDB m => Text -> Maybe Word64 -> m [C.Referent.Id] +termReferentsByPrefix b32prefix pos = + fmap C.Referent.RefId <$> termReferencesByPrefix b32prefix pos + +-- todo: simplify this if we stop caring about constructor type +-- todo: remove the cycle length once we drop it from Unison.Reference +declReferentsByPrefix :: + EDB m => + Text -> + Maybe C.Reference.Pos -> + Maybe ConstructorId -> + m [(H.Hash, C.Reference.Pos, Word64, C.DeclType, [C.Decl.ConstructorId])] +declReferentsByPrefix b32prefix pos cid = do + componentReferencesByPrefix OT.DeclComponent b32prefix pos + >>= traverse (loadConstructors cid) + where + loadConstructors :: EDB m => Maybe Word64 -> S.Reference.Id -> m (H.Hash, C.Reference.Pos, Word64, C.DeclType, [ConstructorId]) + loadConstructors cid rid@(C.Reference.Id oId pos) = do + (dt, len, ctorCount) <- getDeclCtorCount rid + h <- loadHashByObjectId oId + let test :: ConstructorId -> Bool + test = maybe (const True) (==) cid + cids = [cid | cid <- [0 :: ConstructorId .. ctorCount - 1], test cid] + pure (h, pos, len, dt, cids) + getDeclCtorCount :: EDB m => S.Reference.Id -> m (C.Decl.DeclType, Word64, ConstructorId) + getDeclCtorCount (C.Reference.Id r i) = do + bs <- liftQ (Q.loadObjectById r) + len <- decodeComponentLengthOnly bs + (_localIds, decl) <- decodeDeclElement i bs + pure (C.Decl.declType decl, len, fromIntegral $ length (C.Decl.constructorTypes decl)) + +-- (localIds, C.Decl.DataDeclaration dt m b ct) <- +-- hashToObjectId h >>= liftQ . Q.loadObjectById >>= decodeDeclElement i + +-- consider getting rid of this function, or making it produce [S.Reference.Id] +componentByObjectId :: EDB m => Db.ObjectId -> m [C.Reference.Id] componentByObjectId id = do - len <- loadObjectById id >>= decodeComponentLengthOnly + len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly hash <- loadHashByObjectId id pure [C.Reference.Id hash i | i <- [0 .. len - 1]] +componentByObjectIdS :: EDB m => Db.ObjectId -> m [S.Reference.Id] +componentByObjectIdS id = do + len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly + pure [C.Reference.Id id i | i <- [0 .. len - 1]] + -- termReferentsByPrefix :: DB m => ShortHash -> m (Set C.Referent.Id) -- termReferentsByPrefix = error "todo" @@ -216,7 +289,7 @@ branchHashesByPrefix :: DB m => ShortBranchHash -> m (Set C.Reference.Id) branchHashesByPrefix = error "todo" -- | returns a list of known definitions referencing `r` -dependents :: DB m => C.Reference -> MaybeT m (Set C.Reference.Id) +dependents :: EDB m => C.Reference -> MaybeT m (Set C.Reference.Id) dependents r = do r' <- c2sReference r sIds :: [S.Reference.Id] <- Q.getDependentsForDependency r' diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 2f4240dd07..8df9648eea 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE BlockArguments #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE FlexibleInstances #-} {-# OPTIONS_GHC -Wno-orphans #-} @@ -32,16 +33,36 @@ import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash import U.Codebase.Sqlite.DbId ( HashId(..), ObjectId(..), TextId ) import U.Codebase.Reference (Reference') +import Control.Monad.Trans.Maybe (runMaybeT, MaybeT(MaybeT)) +import Control.Monad.Except (runExceptT, ExceptT, throwError, MonadError) -- * types type DB m = (MonadIO m, MonadReader Connection m) - -newtype TypeId = TypeId ObjectId deriving (FromField, ToField) via ObjectId -newtype TermId = TermCycleId ObjectId deriving (FromField, ToField) via ObjectId -newtype DeclId = DeclCycleId ObjectId deriving (FromField, ToField) via ObjectId -newtype CausalHashId = CausalHashId HashId deriving (Hashable, FromField, ToField) via HashId -newtype CausalOldHashId = CausalOldHashId HashId deriving (Hashable, FromField, ToField) via HashId -newtype NamespaceHashId = NamespaceHashId ObjectId deriving (Hashable, FromField, ToField) via ObjectId +type EDB m = (DB m, MonadError Integrity m) + +data Integrity + = UnknownHashId HashId + | UnknownTextId TextId + | UnknownObjectId ObjectId + | UnknownCausalOldHashId CausalOldHashId + | NoObjectForHashId HashId + deriving Show + +-- |discard errors that you're sure are impossible +noError :: (Monad m, Show e) => ExceptT e m a -> m a +noError a = runExceptT a >>= \case + Left e -> error $ "unexpected error: " ++ show e + Right a -> pure a + +orError :: MonadError Integrity m => (a -> Integrity) -> a -> Maybe b -> m b +orError fe a = maybe (throwError $ fe a) pure + +newtype TypeId = TypeId ObjectId deriving Show deriving (FromField, ToField) via ObjectId +newtype TermId = TermCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId +newtype DeclId = DeclCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId +newtype CausalHashId = CausalHashId HashId deriving Show deriving (Hashable, FromField, ToField) via HashId +newtype CausalOldHashId = CausalOldHashId HashId deriving Show deriving (Hashable, FromField, ToField) via HashId +newtype NamespaceHashId = NamespaceHashId ObjectId deriving Show deriving (Hashable, FromField, ToField) via ObjectId -- type DerivedReferent = Referent.Id ObjectId ObjectId -- type DerivedReference = Reference.Id ObjectId @@ -60,20 +81,21 @@ loadHashId :: DB m => Base32Hex -> m (Maybe HashId) loadHashId base32 = queryOnly sql (Only base32) where sql = [here| SELECT id FROM hash WHERE base32 = ? |] -loadHashById :: DB m => HashId -> m (Maybe Base32Hex) -loadHashById h = queryOnly sql (Only h) +loadHashById :: EDB m => HashId -> m Base32Hex +loadHashById h = queryOnly sql (Only h) >>= orError UnknownHashId h where sql = [here| SELECT base32 FROM hash WHERE id = ? |] saveText :: DB m => Text -> m TextId saveText t = execute sql (Only t) >> queryOne (loadText t) where sql = [here| INSERT OR IGNORE INTO text (text) VALUES (?) |] +-- ok to return Nothing loadText :: DB m => Text -> m (Maybe TextId) loadText t = queryOnly sql (Only t) where sql = [here| SELECT id FROM text WHERE text = ? |] -loadTextById :: DB m => TextId -> m (Maybe Text) -loadTextById h = queryOnly sql (Only h) +loadTextById :: EDB m => TextId -> m Text +loadTextById h = queryOnly sql (Only h) >>= orError UnknownTextId h where sql = [here| SELECT text FROM text WHERE id = ? |] saveHashObject :: DB m => HashId -> ObjectId -> Int -> m () @@ -85,47 +107,55 @@ saveHashObject hId oId version = execute sql (hId, oId, version) where saveObject :: DB m => HashId -> ObjectType -> ByteString -> m ObjectId saveObject h t blob = - execute sql (h, t, blob) >> queryOne (objectIdByPrimaryHashId h) + execute sql (h, t, blob) >> noError (objectIdByPrimaryHashId h) where sql = [here| INSERT OR IGNORE INTO object (primary_hash_id, type_id, bytes) VALUES (?, ?, ?) |] -loadObjectById :: DB m => ObjectId -> m (Maybe ByteString) -loadObjectById oId = queryOnly sql (Only oId) where sql = [here| +loadObjectById :: EDB m => ObjectId -> m ByteString +loadObjectById oId = queryOnly sql (Only oId) >>= orError UnknownObjectId oId + where sql = [here| SELECT bytes FROM object WHERE id = ? |] -objectIdByPrimaryHashId :: DB m => HashId -> m (Maybe ObjectId) -objectIdByPrimaryHashId h = queryOnly sql (Only h) where sql = [here| +objectIdByPrimaryHashId :: EDB m => HashId -> m ObjectId +objectIdByPrimaryHashId h = queryOnly sql (Only h) >>= orError UnknownHashId h + where sql = [here| SELECT id FROM object WHERE primary_hash_id = ? |] -objectIdByAnyHash :: DB m => Base32Hex -> m (Maybe ObjectId) -objectIdByAnyHash h = queryOnly sql (Only h) where sql = [here| - SELECT object.id - FROM hash - INNER JOIN hash_object ON hash_object.hash_id = hash.id - INNER JOIN object ON hash_object.object_id = object.id - WHERE hash.base32 = ? -|] +objectIdByAnyHashId :: EDB m => HashId -> m ObjectId +objectIdByAnyHashId h = + queryOnly sql (Only h) >>= orError NoObjectForHashId h where sql = [here| + SELECT object_id FROM hash_object WHERE hash_id = ? + |] -loadPrimaryHashByObjectId :: DB m => ObjectId -> m (Maybe Base32Hex) -loadPrimaryHashByObjectId oId = queryOnly sql (Only oId) where sql = [here| +-- objectIdByAnyHash :: DB m => Base32Hex -> m (Maybe ObjectId) +-- objectIdByAnyHash h = queryOnly sql (Only h) where sql = [here| +-- SELECT object.id +-- FROM hash +-- INNER JOIN hash_object ON hash_object.hash_id = hash.id +-- INNER JOIN object ON hash_object.object_id = object.id +-- WHERE hash.base32 = ? +-- |] + +-- error to return Nothing +loadPrimaryHashByObjectId :: EDB m => ObjectId -> m Base32Hex +loadPrimaryHashByObjectId oId = queryOnly sql (Only oId) >>= orError UnknownObjectId oId + where sql = [here| SELECT hash.base32 FROM hash INNER JOIN hash_object ON hash_object.hash_id = hash.id WHERE hash_object.object_id = ? |] -objectAndPrimaryHashByAnyHash :: DB m => Base32Hex -> m (Maybe (Base32Hex, ObjectId)) -objectAndPrimaryHashByAnyHash h = queryMaybe sql (Only h) where sql = [here| - SELECT object.primary_hash_id, object.id - FROM hash - INNER JOIN hash_object ON hash_object.hash_id = hash.id - INNER JOIN object ON hash_object.objectId = object.id - WHERE hash.base32 = ? -|] +objectAndPrimaryHashByAnyHash :: EDB m => Base32Hex -> m (Maybe (Base32Hex, ObjectId)) +objectAndPrimaryHashByAnyHash h = runMaybeT do + hashId <- MaybeT $ loadHashId h + oId <- objectIdByAnyHashId hashId + base32 <- loadPrimaryHashByObjectId oId + pure (base32, oId) objectExistsWithHash :: DB m => Base32Hex -> m Bool objectExistsWithHash h = queryExists sql (Only h) where @@ -147,6 +177,7 @@ saveCausal self value = execute sql (self, value) where sql = [here| INSERT OR IGNORE INTO causal (self_hash_id, value_hash_id) VALUES (?, ?) |] +-- error to return Nothing loadCausalValueHash :: DB m => CausalHashId -> m (Maybe NamespaceHashId) loadCausalValueHash hash = queryOnly sql (Only hash) where sql = [here| SELECT value_hash_id FROM causal WHERE self_hash_id = ? @@ -157,13 +188,17 @@ saveCausalOld v1 v2 = execute sql (v1, v2) where sql = [here| INSERT OR IGNORE INTO causal_old (old_hash_id, new_hash_id) VALUES (?, ?) |] -loadCausalHashIdByCausalOldHash :: DB m => CausalOldHashId -> m (Maybe CausalHashId) -loadCausalHashIdByCausalOldHash id = queryOnly sql (Only id) where sql = [here| +-- error to return Nothing +loadCausalHashIdByCausalOldHash :: EDB m => CausalOldHashId -> m CausalHashId +loadCausalHashIdByCausalOldHash id = + queryOnly sql (Only id) >>= orError UnknownCausalOldHashId id where sql = [here| SELECT new_hash_id FROM causal_old where old_hash_id = ? |] -loadOldCausalValueHash :: DB m => CausalOldHashId -> m (Maybe NamespaceHashId) -loadOldCausalValueHash id = queryOnly sql (Only id) where sql = [here| +-- error to return Nothing +loadOldCausalValueHash :: EDB m => CausalOldHashId -> m NamespaceHashId +loadOldCausalValueHash id = + queryOnly sql (Only id) >>= orError UnknownCausalOldHashId id where sql = [here| SELECT value_hash_id FROM causal INNER JOIN causal_old ON self_hash_id = new_hash_id WHERE old_hash_id = ? @@ -186,6 +221,7 @@ saveTypeOfTerm r blob = execute sql (r :. Only blob) where sql = [here| VALUES (?, ?, ?) |] +-- possible application error to return Nothing loadTypeOfTerm :: DB m => Reference.Id -> m (Maybe ByteString) loadTypeOfTerm r = queryOnly sql r where sql = [here| SELECT bytes FROM type_of_term diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 93d0035269..30edf6253e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -315,7 +315,7 @@ getTermElement = getABT getSymbol getUnit getF 2 -> pure Term.PConcat tag -> unknownTag "SeqOp" tag -lookupTermElement :: MonadGet m => Reference.ComponentIndex -> m (LocalIds, TermFormat.Term) +lookupTermElement :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Term) lookupTermElement = unsafeFramedArrayLookup (getPair getLocalIds getTermElement) . fromIntegral @@ -385,7 +385,7 @@ getDeclElement = other -> unknownTag "DeclModifier" other lookupDeclElement :: - MonadGet m => Reference.ComponentIndex -> m (LocalIds, DeclFormat.Decl Symbol) + MonadGet m => Reference.Pos -> m (LocalIds, DeclFormat.Decl Symbol) lookupDeclElement = unsafeFramedArrayLookup (getPair getLocalIds getDeclElement) . fromIntegral diff --git a/codebase2/codebase/U/Codebase/Reference.hs b/codebase2/codebase/U/Codebase/Reference.hs index 88848a9f6b..ed4cdf4519 100644 --- a/codebase2/codebase/U/Codebase/Reference.hs +++ b/codebase2/codebase/U/Codebase/Reference.hs @@ -26,13 +26,13 @@ data Reference' t h | ReferenceDerived (Id' h) deriving (Eq, Ord, Show) -pattern Derived :: h -> ComponentIndex -> Reference' t h +pattern Derived :: h -> Pos -> Reference' t h pattern Derived h i = ReferenceDerived (Id h i) {-# COMPLETE ReferenceBuiltin, Derived #-} -type ComponentIndex = Word64 -data Id' h = Id h ComponentIndex +type Pos = Word64 +data Id' h = Id h Pos deriving (Eq, Ord, Show, Functor, Foldable, Traversable) t :: Traversal (Reference' t h) (Reference' t' h) t t' diff --git a/codebase2/codebase/U/Codebase/Referent.hs b/codebase2/codebase/U/Codebase/Referent.hs index 48da3793fa..10d5fe4f19 100644 --- a/codebase2/codebase/U/Codebase/Referent.hs +++ b/codebase2/codebase/U/Codebase/Referent.hs @@ -11,26 +11,24 @@ import U.Codebase.Reference (Reference, Reference') import qualified U.Codebase.Reference as Reference import U.Util.Hash (Hash) import U.Util.Hashable (Hashable (..)) -import Data.Word (Word64) import qualified U.Util.Hashable as Hashable import Data.Bifunctor (Bifunctor(..)) import Data.Bifoldable (Bifoldable(..)) import Data.Bitraversable (Bitraversable(..)) +import U.Codebase.Decl (ConstructorId) type Referent = Referent' Reference Reference type ReferentH = Referent' (Reference' Text (Maybe Hash)) (Reference' Text Hash) -type ConstructorIndex = Word64 - data Referent' rTm rTp = Ref rTm - | Con rTp ConstructorIndex + | Con rTp ConstructorId deriving (Eq, Ord, Show, Bitraversable) type Id = Id' Hash Hash data Id' hTm hTp = RefId (Reference.Id' hTm) - | ConId (Reference.Id' hTp) ConstructorIndex + | ConId (Reference.Id' hTp) ConstructorId deriving (Eq, Ord, Show, Bitraversable) instance (Hashable rTm, Hashable rTp) => Hashable (Referent' rTm rTp) where diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index a65a294aae..5d2ccf3bee 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -7,11 +7,10 @@ module Unison.Codebase.SqliteCodebase where -- initCodebase :: Branch.Cache IO -> FilePath -> IO (Codebase IO Symbol Ann) import Control.Concurrent.STM -import Control.Monad (join) +import Control.Monad ((>=>)) +import Control.Monad.Except (ExceptT, runExceptT) import Control.Monad.Reader (ReaderT (runReaderT)) -import Control.Monad.Trans (MonadTrans (lift)) -import Control.Monad.Trans.Maybe (MaybeT (MaybeT)) -import qualified Data.Foldable as Foldable +import Control.Monad.Trans.Maybe (MaybeT) import Data.Map (Map) import qualified Data.Map as Map import Data.Set (Set) @@ -23,11 +22,10 @@ import Data.Word (Word64) import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as Sqlite import System.FilePath (()) -import qualified U.Codebase.Decl as V2.Decl import qualified U.Codebase.Reference as C.Reference import qualified U.Codebase.Sqlite.ObjectType as OT +import U.Codebase.Sqlite.Operations (EDB) import qualified U.Codebase.Sqlite.Operations as Ops -import U.Codebase.Sqlite.Queries (DB) import qualified U.Util.Monoid as Monoid import qualified Unison.Builtin as Builtins import Unison.Codebase (CodebasePath) @@ -47,6 +45,7 @@ import Unison.Reference (Reference) import qualified Unison.Reference as Reference import qualified Unison.Referent as Referent import Unison.ShortHash (ShortHash) +import qualified Unison.ShortHash as SH import qualified Unison.ShortHash as ShortHash import Unison.Symbol (Symbol) import Unison.Term (Term) @@ -80,7 +79,7 @@ sqliteCodebase :: CodebasePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) sqliteCodebase root = do conn :: Sqlite.Connection <- Sqlite.open $ root "v2" "unison.sqlite3" termBuffer :: TVar (Map Hash TermBufferEntry) <- newTVarIO Map.empty - declBuffer :: TVar (Map Hash DeclBufferEntry) <- newTVarIO Map.empty + _declBuffer :: TVar (Map Hash DeclBufferEntry) <- newTVarIO Map.empty let getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) putRootBranch :: Branch IO -> IO () rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) @@ -97,60 +96,70 @@ sqliteCodebase root = do getTerm :: Reference.Id -> IO (Maybe (Term Symbol Ann)) getTerm (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = - runDB conn do + runDB' conn do term2 <- Ops.loadTermByReference (C.Reference.Id h2 i) Cv.term2to1 h1 getCycleLen getDeclType term2 - getCycleLen :: DB m => Hash -> MaybeT m Reference.Size + getCycleLen :: EDB m => Hash -> m Reference.Size getCycleLen = Ops.getCycleLen . Cv.hash1to2 - getDeclType :: DB m => C.Reference.Reference -> MaybeT m CT.ConstructorType + getDeclType :: EDB m => C.Reference.Reference -> m CT.ConstructorType getDeclType = \case C.Reference.ReferenceBuiltin t -> - MaybeT (pure $ Map.lookup (Reference.Builtin t) Builtins.builtinConstructorType) + let err = + error $ + "I don't know about the builtin type ##" + ++ show t + ++ ", but I need to know whether it's Data or Effect in order to construct a V1 TermLink for a constructor." + in pure . fromMaybe err $ + Map.lookup (Reference.Builtin t) Builtins.builtinConstructorType C.Reference.ReferenceDerived i -> getDeclTypeById i - getDeclTypeById :: DB m => C.Reference.Id -> MaybeT m CT.ConstructorType + getDeclTypeById :: EDB m => C.Reference.Id -> m CT.ConstructorType getDeclTypeById = fmap Cv.decltype2to1 . Ops.getDeclTypeByReference getTypeOfTermImpl :: Reference.Id -> IO (Maybe (Type Symbol Ann)) getTypeOfTermImpl (Reference.Id (Cv.hash1to2 -> h2) i _n) = - runDB conn do + runDB' conn do type2 <- Ops.loadTypeOfTermByTermReference (C.Reference.Id h2 i) Cv.ttype2to1 getCycleLen type2 getTypeDeclaration :: Reference.Id -> IO (Maybe (Decl Symbol Ann)) getTypeDeclaration (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = - runDB conn do + runDB' conn do decl2 <- Ops.loadDeclByReference (C.Reference.Id h2 i) Cv.decl2to1 h1 getCycleLen decl2 putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () - putTerm r@(Reference.Id h i n) = error "todo" - updateBufferEntry termBuffer h $ \be -> error "todo" - --- data BufferEntry a = BufferEntry --- { -- First, you are waiting for the cycle to fill up with all elements --- -- Then, you check: are all dependencies of the cycle in the db? --- -- If yes: write yourself to database and trigger check of dependents --- -- If no: just wait, do nothing --- beComponentTargetSize :: Maybe Word64, --- beComponent :: Map Reference.Pos a, --- beMissingDependencies :: Set Hash, --- beWaitingDependents :: Set Hash --- } + putTerm _r@(Reference.Id h _i _n) = error + "todo" + updateBufferEntry + termBuffer + h + $ \_be -> error "todo" + + -- data BufferEntry a = BufferEntry + -- { -- First, you are waiting for the cycle to fill up with all elements + -- -- Then, you check: are all dependencies of the cycle in the db? + -- -- If yes: write yourself to database and trigger check of dependents + -- -- If no: just wait, do nothing + -- beComponentTargetSize :: Maybe Word64, + -- beComponent :: Map Reference.Pos a, + -- beMissingDependencies :: Set Hash, + -- beWaitingDependents :: Set Hash + -- } updateBufferEntry :: TVar (Map Hash (BufferEntry a)) -> Hash -> -- this signature may need to change (BufferEntry a -> (BufferEntry a, b)) -> - IO [b] + IO b updateBufferEntry = error "todo" - tryWriteBuffer :: Hash -> TVar (Map Hash (BufferEntry a)) -> IO () - tryWriteBuffer = error "todo" - + _tryWriteBuffer :: Hash -> TVar (Map Hash (BufferEntry a)) -> IO () + _tryWriteBuffer _h = error "todo" --do + -- isMissingDependencies <- allM putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () putTypeDeclaration = error "todo" @@ -202,9 +211,13 @@ sqliteCodebase root = do defnReferencesByPrefix :: OT.ObjectType -> ShortHash -> IO (Set Reference.Id) defnReferencesByPrefix _ (ShortHash.Builtin _) = pure mempty - defnReferencesByPrefix ot (ShortHash.ShortHash prefix cycle _cid) = - Monoid.fromMaybe <$> runDB conn do - refs <- lift $ Ops.componentReferencesByPrefix ot prefix (Cv.shortHashSuffix1to2 <$> cycle) + defnReferencesByPrefix ot (ShortHash.ShortHash prefix (fmap Cv.shortHashSuffix1to2 -> cycle) _cid) = + Monoid.fromMaybe <$> runDB' conn do + refs <- do + Ops.componentReferencesByPrefix ot prefix cycle + >>= traverse (C.Reference.idH Ops.loadHashByObjectId) + >>= pure . Set.fromList + Set.fromList <$> traverse (Cv.referenceid2to1 getCycleLen) (Set.toList refs) termReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) @@ -213,27 +226,19 @@ sqliteCodebase root = do declReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) declReferencesByPrefix = defnReferencesByPrefix OT.DeclComponent - -- this implementation is wrong; it should filter by ctor id if provided - termReferentsByPrefix :: ShortHash -> IO (Set Referent.Id) - termReferentsByPrefix sh = do - terms <- termReferencesByPrefix sh - let termReferents = Set.map Referent.Ref' terms - decls <- declReferencesByPrefix sh - declReferents <- Set.fromList . join <$> traverse go (Foldable.toList decls) - pure (termReferents <> declReferents) - where - getDeclCtorCount :: DB m => Reference.Id -> m (CT.ConstructorType, Word64) - getDeclCtorCount (Reference.Id (Cv.hash1to2 -> h2) i _n) = do - -- this is a database integrity error if the decl doesn't exist in the database - decl20 <- runMaybeT $ Ops.loadDeclByReference (C.Reference.Id h2 i) - let decl2 = fromMaybe (error "database integrity error") decl20 - pure - ( Cv.decltype2to1 $ V2.Decl.declType decl2, - fromIntegral . length $ V2.Decl.constructorTypes decl2 - ) - go rid = runDB' conn do - (ct, ctorCount) <- getDeclCtorCount rid - pure [Referent.Con' rid (fromIntegral cid) ct | cid <- [0 .. ctorCount - 1]] + referentsByPrefix :: ShortHash -> IO (Set Referent.Id) + referentsByPrefix SH.Builtin {} = pure mempty + referentsByPrefix (SH.ShortHash prefix (fmap Cv.shortHashSuffix1to2 -> cycle) cid) = runDB conn do + termReferents <- + Ops.termReferentsByPrefix prefix cycle + >>= traverse (Cv.referentid2to1 getCycleLen getDeclType) + declReferents' <- Ops.declReferentsByPrefix prefix cycle (read . Text.unpack <$> cid) + let declReferents = + [ Referent.Con' (Reference.Id (Cv.hash2to1 h) pos len) (fromIntegral cid) (Cv.decltype2to1 ct) + | (h, pos, len, ct, cids) <- declReferents', + cid <- cids + ] + pure . Set.fromList $ termReferents <> declReferents branchHashesByPrefix = error "todo" let finalizer = Sqlite.close conn @@ -262,7 +267,7 @@ sqliteCodebase root = do hashLength termReferencesByPrefix declReferencesByPrefix - termReferentsByPrefix + referentsByPrefix branchHashLength branchHashesByPrefix ) @@ -270,10 +275,10 @@ sqliteCodebase root = do -- x :: DB m => MaybeT m (Term Symbol) -> MaybeT m (Term Symbol Ann) -- x = error "not implemented" -runDB :: Connection -> MaybeT (ReaderT Connection IO) a -> IO (Maybe a) -runDB conn action = flip runReaderT conn $ runMaybeT action +runDB' :: Connection -> MaybeT (ReaderT Connection (ExceptT Ops.Error IO)) a -> IO (Maybe a) +runDB' conn = runDB conn . runMaybeT -runDB' :: Connection -> MaybeT (ReaderT Connection IO) a -> IO a -runDB' conn action = flip runReaderT conn $ fmap err $ runMaybeT action +runDB :: Connection -> ReaderT Connection (ExceptT Ops.Error IO) a -> IO a +runDB conn = (runExceptT >=> err) . flip runReaderT conn where - err = fromMaybe (error "database consistency error") + err = \case Left err -> error $ show err; Right a -> pure a diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 05ecd1907b..8062cceff1 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -8,6 +8,7 @@ import qualified U.Codebase.Kind as V2.Kind import qualified U.Codebase.Reference as V2 import qualified U.Codebase.Reference as V2.Reference import qualified U.Codebase.Referent as V2 +import qualified U.Codebase.Referent as V2.Referent import qualified U.Codebase.Sqlite.Symbol as V2 import qualified U.Codebase.Term as V2.Term import qualified U.Codebase.Type as V2.Type @@ -26,6 +27,7 @@ import qualified Unison.Pattern as P import qualified Unison.Reference as V1 import qualified Unison.Reference as V1.Reference import qualified Unison.Referent as V1 +import qualified Unison.Referent as V1.Referent import qualified Unison.Symbol as V1 import qualified Unison.Term as V1.Term import qualified Unison.Type as V1.Type @@ -217,6 +219,14 @@ rreferent1to2 h = \case V1.Ref r -> V2.Ref (rreference1to2 h r) V1.Con r i _ct -> V2.Con (reference1to2 r) (fromIntegral i) +referentid2to1 :: Applicative m => (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Referent.Id -> m V1.Referent.Id +referentid2to1 lookupSize lookupCT = \case + V2.RefId r -> V1.Ref' <$> referenceid2to1 lookupSize r + V2.ConId r i -> + V1.Con' <$> referenceid2to1 lookupSize r + <*> pure (fromIntegral i) + <*> lookupCT (V2.ReferenceDerived r) + hash2to1 :: V2.Hash.Hash -> Hash hash2to1 (V2.Hash.Hash sbs) = V1.Hash (SBS.fromShort sbs) diff --git a/stack.yaml b/stack.yaml index ec8fdef831..40eab1c2f7 100644 --- a/stack.yaml +++ b/stack.yaml @@ -9,7 +9,7 @@ packages: - parser-typechecker - unison-core -- codebase-convert-1to2 +# - codebase-convert-1to2 - codebase1/codebase - codebase2/codebase - codebase2/codebase-sqlite diff --git a/unison-core/src/Unison/Referent.hs b/unison-core/src/Unison/Referent.hs index 700e84ed02..ddefe350fc 100644 --- a/unison-core/src/Unison/Referent.hs +++ b/unison-core/src/Unison/Referent.hs @@ -42,7 +42,7 @@ toShortHash :: Referent -> ShortHash toShortHash = \case Ref r -> R.toShortHash r Con r i _ -> patternShortHash r i - + toShortHashId :: Id -> ShortHash toShortHashId = toShortHash . fromId @@ -84,9 +84,9 @@ toReference' :: Referent' r -> r toReference' = \case Ref' r -> r Con' r _i _t -> r - + fromId :: Id -> Referent -fromId = fmap R.DerivedId +fromId = fmap R.DerivedId toTypeReference :: Referent -> Maybe Reference toTypeReference = \case From 7ef216a87cb7b069d45ea49d1e634444db41e868 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sun, 25 Oct 2020 15:28:32 -0400 Subject: [PATCH 035/225] SqliteCodebase.putTerm and helpers --- .../U/Codebase/Sqlite/Operations.hs | 23 ++-- .../src/Unison/Codebase/SqliteCodebase.hs | 101 ++++++++++++++---- .../Codebase/SqliteCodebase/Conversions.hs | 8 ++ unison-core/src/Unison/Referent.hs | 5 + unison-core/src/Unison/Term.hs | 15 +++ 5 files changed, 117 insertions(+), 35 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 941710e388..8914509c0e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -48,6 +48,7 @@ import qualified U.Util.Hash as H import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get, getFromBytes) import qualified U.Util.Serialization as S +import Data.Maybe (isJust) type Err m = MonadError Error m @@ -117,6 +118,9 @@ hashToObjectId h = do hashId <- MaybeT $ Q.loadHashId . H.toBase32Hex $ h liftQ $ Q.objectIdByPrimaryHashId hashId +objectExistsForHash :: EDB m => H.Hash -> m Bool +objectExistsForHash h = isJust <$> runMaybeT (hashToObjectId h) + loadHashByObjectId :: EDB m => Db.ObjectId -> m H.Hash loadHashByObjectId = fmap H.fromBase32Hex . liftQ . Q.loadPrimaryHashByObjectId @@ -194,11 +198,11 @@ loadDeclByReference (C.Reference.Id h i) = do substTypeRef = bimap substText (fmap substHash) pure (C.Decl.DataDeclaration dt m b (C.Type.rmap substTypeRef <$> ct)) -- lens might be nice here -saveTerm :: DB m => C.Reference.Id -> C.Term Symbol -> C.Term.Type Symbol -> m () -saveTerm = error "todo" +saveTermComponent :: H.Hash -> [(C.Term Symbol, C.Term.Type Symbol)] -> m () +saveTermComponent = error "todo" -saveDecl :: DB m => C.Reference.Id -> C.Decl Symbol -> m () -saveDecl = error "todo" +saveDeclComponent :: DB m => H.Hash -> [C.Decl Symbol] -> m () +saveDeclComponent = error "todo" listWatches :: DB m => WatchKind -> m [C.Reference.Id] listWatches = error "todo" @@ -224,7 +228,7 @@ componentReferencesByPrefix ot b32prefix pos = do let test = maybe (const True) (==) pos let filterComponent l = [x | x@(C.Reference.Id _ pos) <- l, test pos] fmap Monoid.fromMaybe . runMaybeT $ - join <$> traverse (fmap filterComponent . componentByObjectIdS) oIds + join <$> traverse (fmap filterComponent . componentByObjectId) oIds termReferencesByPrefix :: EDB m => Text -> Maybe Word64 -> m [C.Reference.Id] termReferencesByPrefix t w = @@ -270,15 +274,8 @@ declReferentsByPrefix b32prefix pos cid = do -- (localIds, C.Decl.DataDeclaration dt m b ct) <- -- hashToObjectId h >>= liftQ . Q.loadObjectById >>= decodeDeclElement i --- consider getting rid of this function, or making it produce [S.Reference.Id] -componentByObjectId :: EDB m => Db.ObjectId -> m [C.Reference.Id] +componentByObjectId :: EDB m => Db.ObjectId -> m [S.Reference.Id] componentByObjectId id = do - len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly - hash <- loadHashByObjectId id - pure [C.Reference.Id hash i | i <- [0 .. len - 1]] - -componentByObjectIdS :: EDB m => Db.ObjectId -> m [S.Reference.Id] -componentByObjectIdS id = do len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly pure [C.Reference.Id id i | i <- [0 .. len - 1]] diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 5d2ccf3bee..da91506287 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} @@ -6,11 +7,13 @@ module Unison.Codebase.SqliteCodebase where -- initCodebase :: Branch.Cache IO -> FilePath -> IO (Codebase IO Symbol Ann) -import Control.Concurrent.STM -import Control.Monad ((>=>)) +import Control.Monad (filterM, (>=>)) import Control.Monad.Except (ExceptT, runExceptT) +import Control.Monad.Extra (ifM, unlessM) import Control.Monad.Reader (ReaderT (runReaderT)) import Control.Monad.Trans.Maybe (MaybeT) +import Data.Bifunctor (Bifunctor (first), second) +import Data.Foldable (traverse_) import Data.Map (Map) import qualified Data.Map as Map import Data.Set (Set) @@ -40,7 +43,7 @@ import qualified Unison.ConstructorType as CT import Unison.DataDeclaration (Decl) import Unison.Hash (Hash) import Unison.Parser (Ann) -import Unison.Prelude (MaybeT (runMaybeT), fromMaybe) +import Unison.Prelude (MaybeT (runMaybeT), fromMaybe, traceM) import Unison.Reference (Reference) import qualified Unison.Reference as Reference import qualified Unison.Referent as Referent @@ -49,9 +52,13 @@ import qualified Unison.ShortHash as SH import qualified Unison.ShortHash as ShortHash import Unison.Symbol (Symbol) import Unison.Term (Term) +import qualified Unison.Term as Term import Unison.Type (Type) +import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF -import UnliftIO (catchIO) +import UnliftIO (MonadIO, catchIO) +import UnliftIO.STM +import Data.Foldable (Foldable(toList)) -- 1) buffer up the component -- 2) in the event that the component is complete, then what? @@ -70,6 +77,7 @@ data BufferEntry a = BufferEntry beMissingDependencies :: Set Hash, beWaitingDependents :: Set Hash } + deriving (Show) type TermBufferEntry = BufferEntry (Term Symbol Ann, Type Symbol Ann) @@ -79,7 +87,7 @@ sqliteCodebase :: CodebasePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) sqliteCodebase root = do conn :: Sqlite.Connection <- Sqlite.open $ root "v2" "unison.sqlite3" termBuffer :: TVar (Map Hash TermBufferEntry) <- newTVarIO Map.empty - _declBuffer :: TVar (Map Hash DeclBufferEntry) <- newTVarIO Map.empty + declBuffer :: TVar (Map Hash DeclBufferEntry) <- newTVarIO Map.empty let getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) putRootBranch :: Branch IO -> IO () rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) @@ -110,7 +118,7 @@ sqliteCodebase root = do error $ "I don't know about the builtin type ##" ++ show t - ++ ", but I need to know whether it's Data or Effect in order to construct a V1 TermLink for a constructor." + ++ ", but I've been asked for it's ConstructorType." in pure . fromMaybe err $ Map.lookup (Reference.Builtin t) Builtins.builtinConstructorType C.Reference.ReferenceDerived i -> getDeclTypeById i @@ -131,12 +139,32 @@ sqliteCodebase root = do Cv.decl2to1 h1 getCycleLen decl2 putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () - putTerm _r@(Reference.Id h _i _n) = error - "todo" - updateBufferEntry - termBuffer - h - $ \_be -> error "todo" + putTerm _r@(Reference.Id h@(Cv.hash1to2 -> h2) i n) tm tp = + runDB conn $ + unlessM + (Ops.objectExistsForHash h2) + ( withBuffer termBuffer h \(BufferEntry size comp missing waiting) -> do + let size' = Just n + pure $ + ifM + ((==) <$> size <*> size') + (pure ()) + (error $ "targetSize for term " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size') + let comp' = Map.insert i (tm, tp) comp + missingTerms' <- + filterM + (fmap not . Ops.objectExistsForHash . Cv.hash1to2) + [h | Reference.Derived h _i _n <- Set.toList $ Term.termDependencies tm] + missingTypes' <- + filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) $ + [h | Reference.Derived h _i _n <- Set.toList $ Term.typeDependencies tm] + ++ [h | Reference.Derived h _i _n <- Set.toList $ Type.dependencies tp] + let missing' = missing <> Set.fromList (missingTerms' <> missingTypes') + traverse (addBufferDependent h termBuffer) missingTerms' + traverse (addBufferDependent h declBuffer) missingTypes' + putBuffer termBuffer h (BufferEntry size' comp' missing' waiting) + tryFlushTermBuffer h + ) -- data BufferEntry a = BufferEntry -- { -- First, you are waiting for the cycle to fill up with all elements @@ -148,17 +176,46 @@ sqliteCodebase root = do -- beMissingDependencies :: Set Hash, -- beWaitingDependents :: Set Hash -- } + putBuffer :: (MonadIO m, Show a) => TVar (Map Hash (BufferEntry a)) -> Hash -> BufferEntry a -> m () + putBuffer tv h e = do + traceM $ "putBuffer " ++ show h ++ " " ++ show e + atomically $ modifyTVar tv (Map.insert h e) + + withBuffer :: MonadIO m => TVar (Map Hash (BufferEntry a)) -> Hash -> (BufferEntry a -> m b) -> m b + withBuffer tv h f = + Map.lookup h <$> readTVarIO tv >>= \case + Just e -> f e + Nothing -> f (BufferEntry Nothing Map.empty Set.empty Set.empty) + + removeBuffer :: MonadIO m => TVar (Map Hash (BufferEntry a)) -> Hash -> m () + removeBuffer tv h = atomically $ modifyTVar tv (Map.delete h) + + addBufferDependent :: (MonadIO m, Show a) => Hash -> TVar (Map Hash (BufferEntry a)) -> Hash -> m () + addBufferDependent dependent tv dependency = withBuffer tv dependency \be -> do + putBuffer tv dependency be {beWaitingDependents = Set.insert dependent $ beWaitingDependents be} + tryFlushTermBuffer :: EDB m => Hash -> m () + tryFlushTermBuffer h@(Cv.hash1to2 -> h2) = + -- skip if it has already been flushed + unlessM (Ops.objectExistsForHash h2) $ + withBuffer + termBuffer + h + \(BufferEntry size comp (Set.toList -> missing) waiting) -> do + missing' <- + filterM + (fmap not . Ops.objectExistsForHash . Cv.hash1to2) + missing + if null missing' && size == Just (fromIntegral (length comp)) + then do + Ops.saveTermComponent h2 $ + first (Cv.term1to2 h) . second Cv.ttype1to2 <$> toList comp + removeBuffer termBuffer h + traverse_ tryFlushTermBuffer waiting + else -- update + + putBuffer termBuffer h $ + BufferEntry size comp (Set.fromList missing') waiting - updateBufferEntry :: - TVar (Map Hash (BufferEntry a)) -> - Hash -> - -- this signature may need to change - (BufferEntry a -> (BufferEntry a, b)) -> - IO b - updateBufferEntry = error "todo" - - _tryWriteBuffer :: Hash -> TVar (Map Hash (BufferEntry a)) -> IO () - _tryWriteBuffer _h = error "todo" --do -- isMissingDependencies <- allM putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () putTypeDeclaration = error "todo" diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 8062cceff1..c01af7be5a 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -43,6 +43,14 @@ decltype1to2 = \case CT.Data -> V2.Decl.Data CT.Effect -> V2.Decl.Effect +term1to2 :: Hash -> V1.Term.Term V1.Symbol Ann -> V2.Term.Term V2.Symbol +term1to2 h = + V2.ABT.transform (termF1to2 h) + . V2.ABT.vmap symbol1to2 + . V2.ABT.amap (const ()) + . abt1to2 + where termF1to2 = undefined + term2to1 :: forall m. Monad m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Term.Term V2.Symbol -> m (V1.Term.Term V1.Symbol Ann) term2to1 h lookupSize lookupCT tm = V1.ABT.transformM (termF2to1 h lookupSize lookupCT) diff --git a/unison-core/src/Unison/Referent.hs b/unison-core/src/Unison/Referent.hs index ddefe350fc..1807137f0c 100644 --- a/unison-core/src/Unison/Referent.hs +++ b/unison-core/src/Unison/Referent.hs @@ -123,6 +123,11 @@ fromText t = either (const Nothing) Just $ cidPart' = Text.takeWhileEnd (/= '#') t cidPart = Text.drop 1 cidPart' +fold :: (r -> a) -> (r -> Int -> ConstructorType -> a) -> Referent' r -> a +fold fr fc = \case + Ref' r -> fr r + Con' r i ct -> fc r i ct + instance Hashable Referent where tokens (Ref r) = [H.Tag 0] ++ H.tokens r tokens (Con r i dt) = [H.Tag 2] ++ H.tokens r ++ H.tokens (fromIntegral i :: Word64) ++ H.tokens dt diff --git a/unison-core/src/Unison/Term.hs b/unison-core/src/Unison/Term.hs index 1d41513b50..9486b2abc6 100644 --- a/unison-core/src/Unison/Term.hs +++ b/unison-core/src/Unison/Term.hs @@ -822,6 +822,21 @@ unReqOrCtor _ = Nothing dependencies :: (Ord v, Ord vt) => Term2 vt at ap v a -> Set Reference dependencies t = Set.map (LD.fold id Referent.toReference) (labeledDependencies t) +termDependencies :: (Ord v, Ord vt) => Term2 vt at ap v a -> Set Reference +termDependencies = + Set.fromList + . mapMaybe + ( LD.fold + (\_typeRef -> Nothing) + ( Referent.fold + (\termRef -> Just termRef) + (\_typeConRef _i _ct -> Nothing) + ) + ) + . toList + . labeledDependencies + +-- gets types from annotations and constructors typeDependencies :: (Ord v, Ord vt) => Term2 vt at ap v a -> Set Reference typeDependencies = Set.fromList . mapMaybe (LD.fold Just (const Nothing)) . toList . labeledDependencies From 9f72a53c0afb9133f01823c288ef492077045e93 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 26 Oct 2020 18:52:16 -0400 Subject: [PATCH 036/225] flesh out SqliteCodebase.putTypeDeclaration --- .../src/Unison/Codebase/SqliteCodebase.hs | 95 ++++++++++++++----- 1 file changed, 69 insertions(+), 26 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index da91506287..1073425c5b 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -13,7 +13,7 @@ import Control.Monad.Extra (ifM, unlessM) import Control.Monad.Reader (ReaderT (runReaderT)) import Control.Monad.Trans.Maybe (MaybeT) import Data.Bifunctor (Bifunctor (first), second) -import Data.Foldable (traverse_) +import Data.Foldable (Foldable (toList), traverse_) import Data.Map (Map) import qualified Data.Map as Map import Data.Set (Set) @@ -29,6 +29,7 @@ import qualified U.Codebase.Reference as C.Reference import qualified U.Codebase.Sqlite.ObjectType as OT import U.Codebase.Sqlite.Operations (EDB) import qualified U.Codebase.Sqlite.Operations as Ops +import qualified U.Util.Hash as H2 import qualified U.Util.Monoid as Monoid import qualified Unison.Builtin as Builtins import Unison.Codebase (CodebasePath) @@ -41,6 +42,7 @@ import qualified Unison.Codebase.SqliteCodebase.Conversions as Cv import Unison.Codebase.SyncMode (SyncMode) import qualified Unison.ConstructorType as CT import Unison.DataDeclaration (Decl) +import qualified Unison.DataDeclaration as Decl import Unison.Hash (Hash) import Unison.Parser (Ann) import Unison.Prelude (MaybeT (runMaybeT), fromMaybe, traceM) @@ -58,7 +60,6 @@ import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF import UnliftIO (MonadIO, catchIO) import UnliftIO.STM -import Data.Foldable (Foldable(toList)) -- 1) buffer up the component -- 2) in the event that the component is complete, then what? @@ -139,7 +140,7 @@ sqliteCodebase root = do Cv.decl2to1 h1 getCycleLen decl2 putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () - putTerm _r@(Reference.Id h@(Cv.hash1to2 -> h2) i n) tm tp = + putTerm (Reference.Id h@(Cv.hash1to2 -> h2) i n) tm tp = runDB conn $ unlessM (Ops.objectExistsForHash h2) @@ -193,32 +194,74 @@ sqliteCodebase root = do addBufferDependent :: (MonadIO m, Show a) => Hash -> TVar (Map Hash (BufferEntry a)) -> Hash -> m () addBufferDependent dependent tv dependency = withBuffer tv dependency \be -> do putBuffer tv dependency be {beWaitingDependents = Set.insert dependent $ beWaitingDependents be} - tryFlushTermBuffer :: EDB m => Hash -> m () - tryFlushTermBuffer h@(Cv.hash1to2 -> h2) = + + tryFlushBuffer :: + (EDB m, Show a) => + TVar (Map Hash (BufferEntry a)) -> + (H2.Hash -> [a] -> m ()) -> + (Hash -> m ()) -> + Hash -> + m () + tryFlushBuffer b saveComponent tryWaiting h@(Cv.hash1to2 -> h2) = -- skip if it has already been flushed unlessM (Ops.objectExistsForHash h2) $ - withBuffer - termBuffer - h - \(BufferEntry size comp (Set.toList -> missing) waiting) -> do - missing' <- - filterM - (fmap not . Ops.objectExistsForHash . Cv.hash1to2) - missing - if null missing' && size == Just (fromIntegral (length comp)) - then do - Ops.saveTermComponent h2 $ - first (Cv.term1to2 h) . second Cv.ttype1to2 <$> toList comp - removeBuffer termBuffer h - traverse_ tryFlushTermBuffer waiting - else -- update - - putBuffer termBuffer h $ - BufferEntry size comp (Set.fromList missing') waiting - - -- isMissingDependencies <- allM + withBuffer b h try + where + try (BufferEntry size comp (Set.toList -> missing) waiting) = do + missing' <- + filterM + (fmap not . Ops.objectExistsForHash . Cv.hash1to2) + missing + if null missing' && size == Just (fromIntegral (length comp)) + then do + saveComponent h2 (toList comp) + removeBuffer b h + traverse_ tryWaiting waiting + else -- update + + putBuffer b h $ + BufferEntry size comp (Set.fromList missing') waiting + + tryFlushTermBuffer :: EDB m => Hash -> m () + tryFlushTermBuffer h = + tryFlushBuffer + termBuffer + ( \h2 -> + Ops.saveTermComponent h2 + . fmap (first (Cv.term1to2 h) . second Cv.ttype1to2) + ) + tryFlushTermBuffer + h + + tryFlushDeclBuffer :: EDB m => Hash -> m () + tryFlushDeclBuffer h = + tryFlushBuffer + declBuffer + (\h2 -> Ops.saveDeclComponent h2 . fmap (Cv.decl1to2 h)) + (\h -> tryFlushTermBuffer h >> tryFlushDeclBuffer h) + h + putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () - putTypeDeclaration = error "todo" + putTypeDeclaration (Reference.Id h@(Cv.hash1to2 -> h2) i n) decl = + runDB conn $ + unlessM + (Ops.objectExistsForHash h2) + ( withBuffer declBuffer h \(BufferEntry size comp missing waiting) -> do + let size' = Just n + pure $ + ifM + ((==) <$> size <*> size') + (pure ()) + (error $ "targetSize for term " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size') + let comp' = Map.insert i decl comp + moreMissing <- + filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) $ + [h | Reference.Derived h _i _n <- Set.toList $ Decl.declDependencies decl] + let missing' = missing <> Set.fromList moreMissing + traverse (addBufferDependent h declBuffer) moreMissing + putBuffer declBuffer h (BufferEntry size' comp' missing' waiting) + tryFlushDeclBuffer h + ) getRootBranch = error "todo" putRootBranch = error "todo" From 5e2238f62280e343df9529d33f868f0fc2a8a050 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 27 Oct 2020 09:36:32 -0400 Subject: [PATCH 037/225] wip --- .../src/Unison/Codebase/SqliteCodebase.hs | 16 ++-- .../Codebase/SqliteCodebase/Conversions.hs | 91 +++++++++++++++---- 2 files changed, 79 insertions(+), 28 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 1073425c5b..b8f6ef9f1e 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -195,6 +195,8 @@ sqliteCodebase root = do addBufferDependent dependent tv dependency = withBuffer tv dependency \be -> do putBuffer tv dependency be {beWaitingDependents = Set.insert dependent $ beWaitingDependents be} + -- |if all of the dependencies are in the codebase and this component is + -- complete, then save this component! tryFlushBuffer :: (EDB m, Show a) => TVar (Map Hash (BufferEntry a)) -> @@ -202,24 +204,22 @@ sqliteCodebase root = do (Hash -> m ()) -> Hash -> m () - tryFlushBuffer b saveComponent tryWaiting h@(Cv.hash1to2 -> h2) = + tryFlushBuffer buf saveComponent tryWaiting h@(Cv.hash1to2 -> h2) = -- skip if it has already been flushed - unlessM (Ops.objectExistsForHash h2) $ - withBuffer b h try + unlessM (Ops.objectExistsForHash h2) $ withBuffer buf h try where - try (BufferEntry size comp (Set.toList -> missing) waiting) = do + try (BufferEntry size comp (Set.delete h -> missing) waiting) = do missing' <- filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) - missing + (toList missing) if null missing' && size == Just (fromIntegral (length comp)) then do saveComponent h2 (toList comp) - removeBuffer b h + removeBuffer buf h traverse_ tryWaiting waiting else -- update - - putBuffer b h $ + putBuffer buf h $ BufferEntry size comp (Set.fromList missing') waiting tryFlushTermBuffer :: EDB m => Hash -> m () diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index c01af7be5a..337521c374 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -23,7 +23,7 @@ import qualified Unison.Hash as V1 import qualified Unison.Kind as V1.Kind import Unison.Parser (Ann) import qualified Unison.Parser as Ann -import qualified Unison.Pattern as P +import qualified Unison.Pattern as V1.Pattern import qualified Unison.Reference as V1 import qualified Unison.Reference as V1.Reference import qualified Unison.Referent as V1 @@ -45,11 +45,62 @@ decltype1to2 = \case term1to2 :: Hash -> V1.Term.Term V1.Symbol Ann -> V2.Term.Term V2.Symbol term1to2 h = - V2.ABT.transform (termF1to2 h) + V2.ABT.transform termF1to2 . V2.ABT.vmap symbol1to2 . V2.ABT.amap (const ()) . abt1to2 - where termF1to2 = undefined + where + termF1to2 :: V1.Term.F V1.Symbol Ann Ann a -> V2.Term.F V2.Symbol a + termF1to2 = go + go = \case + V1.Term.Int i -> V2.Term.Int i + V1.Term.Nat n -> V2.Term.Nat n + V1.Term.Float f -> V2.Term.Float f + V1.Term.Boolean b -> V2.Term.Boolean b + V1.Term.Text t -> V2.Term.Text t + V1.Term.Char c -> V2.Term.Char c + V1.Term.Ref r -> V2.Term.Ref (rreference1to2 h r) + V1.Term.Constructor r i -> V2.Term.Constructor (reference1to2 r) (fromIntegral i) + V1.Term.Request r i -> V2.Term.Constructor (reference1to2 r) (fromIntegral i) + V1.Term.Handle b h -> V2.Term.Handle b h + V1.Term.App f a -> V2.Term.App f a + V1.Term.Ann e t -> V2.Term.Ann e (ttype1to2 t) + V1.Term.Sequence as -> V2.Term.Sequence as + V1.Term.If c t f -> V2.Term.If c t f + V1.Term.And a b -> V2.Term.And a b + V1.Term.Or a b -> V2.Term.Or a b + V1.Term.Lam a -> V2.Term.Lam a + V1.Term.LetRec _ bs body -> V2.Term.LetRec bs body + V1.Term.Let _ b body -> V2.Term.Let b body + V1.Term.Match e cases -> V2.Term.Match e (goCase <$> cases) + V1.Term.TermLink r -> V2.Term.TermLink (rreferent1to2 h r) + V1.Term.TypeLink r -> V2.Term.TypeLink (reference1to2 r) + V1.Term.Blank _ -> error "can't serialize term with blanks" + goCase (V1.Term.MatchCase p g b) = + V2.Term.MatchCase (goPat p) g b + goPat :: V1.Pattern.Pattern a -> V2.Term.Pattern Text V2.Reference + goPat = \case + V1.Pattern.Unbound _ -> V2.Term.PUnbound + V1.Pattern.Var _ -> V2.Term.PVar + V1.Pattern.Boolean _ b -> V2.Term.PBoolean b + V1.Pattern.Int _ i -> V2.Term.PInt i + V1.Pattern.Nat _ n -> V2.Term.PNat n + V1.Pattern.Float _ d -> V2.Term.PFloat d + V1.Pattern.Text _ t -> V2.Term.PText t + V1.Pattern.Char _ c -> V2.Term.PChar c + V1.Pattern.Constructor _ r i ps -> + V2.Term.PConstructor (reference1to2 r) i (goPat <$> ps) + V1.Pattern.As _ p -> V2.Term.PAs (goPat p) + V1.Pattern.EffectPure _ p -> V2.Term.PEffectPure (goPat p) + V1.Pattern.EffectBind _ r i ps k -> + V2.Term.PEffectBind (reference1to2 r) i (goPat <$> ps) (goPat k) + V1.Pattern.SequenceLiteral _ ps -> V2.Term.PSequenceLiteral (goPat <$> ps) + V1.Pattern.SequenceOp _ p op p2 -> + V2.Term.PSequenceOp (goPat p) (goSeqOp op) (goPat p2) + goSeqOp = \case + V1.Pattern.Cons -> V2.Term.PCons + V1.Pattern.Snoc -> V2.Term.PSnoc + V1.Pattern.Concat -> V2.Term.PConcat term2to1 :: forall m. Monad m => Hash -> (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Term.Term V2.Symbol -> m (V1.Term.Term V1.Symbol Ann) term2to1 h lookupSize lookupCT tm = @@ -91,25 +142,25 @@ term2to1 h lookupSize lookupCT tm = V2.Term.MatchCase pat cond body -> V1.Term.MatchCase <$> (goPat pat) <*> pure cond <*> pure body goPat = \case - V2.Term.PUnbound -> pure $ P.Unbound a - V2.Term.PVar -> pure $ P.Var a - V2.Term.PBoolean b -> pure $ P.Boolean a b - V2.Term.PInt i -> pure $ P.Int a i - V2.Term.PNat n -> pure $ P.Nat a n - V2.Term.PFloat d -> pure $ P.Float a d - V2.Term.PText t -> pure $ P.Text a t - V2.Term.PChar c -> pure $ P.Char a c + V2.Term.PUnbound -> pure $ V1.Pattern.Unbound a + V2.Term.PVar -> pure $ V1.Pattern.Var a + V2.Term.PBoolean b -> pure $ V1.Pattern.Boolean a b + V2.Term.PInt i -> pure $ V1.Pattern.Int a i + V2.Term.PNat n -> pure $ V1.Pattern.Nat a n + V2.Term.PFloat d -> pure $ V1.Pattern.Float a d + V2.Term.PText t -> pure $ V1.Pattern.Text a t + V2.Term.PChar c -> pure $ V1.Pattern.Char a c V2.Term.PConstructor r i ps -> - P.Constructor a <$> reference2to1 lookupSize r <*> pure i <*> (traverse goPat ps) - V2.Term.PAs p -> P.As a <$> goPat p - V2.Term.PEffectPure p -> P.EffectPure a <$> goPat p - V2.Term.PEffectBind r i ps p -> P.EffectBind a <$> reference2to1 lookupSize r <*> pure i <*> traverse goPat ps <*> goPat p - V2.Term.PSequenceLiteral ps -> P.SequenceLiteral a <$> traverse goPat ps - V2.Term.PSequenceOp p1 op p2 -> P.SequenceOp a <$> goPat p1 <*> pure (goOp op) <*> goPat p2 + V1.Pattern.Constructor a <$> reference2to1 lookupSize r <*> pure i <*> (traverse goPat ps) + V2.Term.PAs p -> V1.Pattern.As a <$> goPat p + V2.Term.PEffectPure p -> V1.Pattern.EffectPure a <$> goPat p + V2.Term.PEffectBind r i ps p -> V1.Pattern.EffectBind a <$> reference2to1 lookupSize r <*> pure i <*> traverse goPat ps <*> goPat p + V2.Term.PSequenceLiteral ps -> V1.Pattern.SequenceLiteral a <$> traverse goPat ps + V2.Term.PSequenceOp p1 op p2 -> V1.Pattern.SequenceOp a <$> goPat p1 <*> pure (goOp op) <*> goPat p2 goOp = \case - V2.Term.PCons -> P.Cons - V2.Term.PSnoc -> P.Snoc - V2.Term.PConcat -> P.Concat + V2.Term.PCons -> V1.Pattern.Cons + V2.Term.PSnoc -> V1.Pattern.Snoc + V2.Term.PConcat -> V1.Pattern.Concat a = Ann.External decl2to1 :: Monad m => Hash -> (Hash -> m V1.Reference.Size) -> V2.Decl.Decl V2.Symbol -> m (V1.Decl.Decl V1.Symbol Ann) From ac7f23e3fc67c507b63cc2f0846b023b183f5931 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 29 Oct 2020 00:10:58 -0400 Subject: [PATCH 038/225] finished up saveWatch, loadWatch, listWatches --- .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 2 +- .../U/Codebase/Sqlite/LocalIds.hs | 19 +- .../U/Codebase/Sqlite/Operations.hs | 216 ++++++++++++++++-- .../U/Codebase/Sqlite/Queries.hs | 54 ++++- .../U/Codebase/Sqlite/Reference.hs | 8 +- .../U/Codebase/Sqlite/Serialization.hs | 177 +++++++------- .../U/Codebase/Sqlite/Term/Format.hs | 6 +- codebase2/codebase-sqlite/sql/create.sql | 22 +- codebase2/codebase/U/Codebase/WatchKind.hs | 2 +- .../src/Unison/Codebase/SqliteCodebase.hs | 71 +++--- .../Codebase/SqliteCodebase/Conversions.hs | 15 ++ .../unison-parser-typechecker.cabal | 3 +- 12 files changed, 436 insertions(+), 159 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index 885d46ec54..2616a2f527 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -15,7 +15,7 @@ import Data.Bits (Bits) newtype ObjectId = ObjectId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 newtype TextId = TextId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 -newtype HashId = HashId Word64 deriving (Eq, Ord, Show) deriving (Hashable, FromField, ToField) via Word64 +newtype HashId = HashId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 newtype PatchId = PatchId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via ObjectId diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs index d529e7b3f7..21e47a76cf 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs @@ -2,9 +2,22 @@ module U.Codebase.Sqlite.LocalIds where import Data.Vector (Vector) import U.Codebase.Sqlite.DbId +import Data.Bitraversable (Bitraversable(bitraverse)) +import Data.Bifoldable (Bifoldable(bifoldMap)) +import Data.Bifunctor (Bifunctor(bimap)) -- |A mapping between index ids that are local to an object and the ids in the database -data LocalIds = LocalIds - { textLookup :: Vector TextId, - objectLookup :: Vector ObjectId +data LocalIds' t h = LocalIds + { textLookup :: Vector t, + defnLookup :: Vector h } + +type LocalIds = LocalIds' TextId ObjectId +type WatchLocalIds = LocalIds' TextId HashId + +instance Bitraversable LocalIds' where + bitraverse f g (LocalIds t d) = LocalIds <$> traverse f t <*> traverse g d +instance Bifoldable LocalIds' where + bifoldMap f g (LocalIds t d) = foldMap f t <> foldMap g d +instance Bifunctor LocalIds' where + bimap f g (LocalIds t d) = LocalIds (f <$> t) (g <$> d) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 8914509c0e..57d750073b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -7,14 +7,25 @@ module U.Codebase.Sqlite.Operations where +import Control.Lens (Lens') +import qualified Control.Lens as Lens import Control.Monad (join) import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) +import Control.Monad.State (MonadState, evalStateT) import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) +import Control.Monad.Writer (MonadWriter, runWriterT) +import qualified Control.Monad.Writer as Writer import Data.Bifunctor (Bifunctor (bimap)) import Data.Bitraversable (Bitraversable (bitraverse)) import Data.ByteString (ByteString) import Data.Bytes.Get (runGetS) -import Data.Functor ((<&>)) +import qualified Data.Foldable as Foldable +import Data.Functor (void, (<&>)) +import Data.Map (Map) +import qualified Data.Map as Map +import Data.Maybe (isJust) +import Data.Sequence (Seq) +import qualified Data.Sequence as Seq import Data.Set (Set) import qualified Data.Set as Set import Data.Text (Text) @@ -29,7 +40,7 @@ import qualified U.Codebase.Referent as C.Referent import U.Codebase.ShortHash (ShortBranchHash) import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Decl.Format as S.Decl -import U.Codebase.Sqlite.LocalIds (LocalIds) +import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), WatchLocalIds) import qualified U.Codebase.Sqlite.LocalIds as LocalIds import qualified U.Codebase.Sqlite.ObjectType as OT import U.Codebase.Sqlite.Queries (DB) @@ -43,12 +54,12 @@ import qualified U.Codebase.Term as C import qualified U.Codebase.Term as C.Term import qualified U.Codebase.Type as C.Type import U.Codebase.WatchKind (WatchKind) +import qualified U.Core.ABT as ABT import U.Util.Base32Hex (Base32Hex) import qualified U.Util.Hash as H import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get, getFromBytes) import qualified U.Util.Serialization as S -import Data.Maybe (isJust) type Err m = MonadError Error m @@ -124,6 +135,9 @@ objectExistsForHash h = isJust <$> runMaybeT (hashToObjectId h) loadHashByObjectId :: EDB m => Db.ObjectId -> m H.Hash loadHashByObjectId = fmap H.fromBase32Hex . liftQ . Q.loadPrimaryHashByObjectId +loadHashByHashId :: EDB m => Db.HashId -> m H.Hash +loadHashByHashId = fmap H.fromBase32Hex . liftQ . Q.loadHashById + decodeComponentLengthOnly :: Err m => ByteString -> m Word64 decodeComponentLengthOnly = getFromBytesOr ErrFramedArrayLen S.lengthFramedArray @@ -152,14 +166,24 @@ getDeclTypeByReference r@(C.Reference.Id h pos) = -- * meat and veggies loadTermByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term Symbol) -loadTermByReference (C.Reference.Id h i) = do - -- retrieve and deserialize the blob - (localIds, term) <- - hashToObjectId h >>= liftQ . Q.loadObjectById >>= decodeTermElement i +loadTermByReference (C.Reference.Id h i) = + hashToObjectId h + >>= liftQ . Q.loadObjectById + -- retrieve and deserialize the blob + >>= decodeTermElement i + >>= uncurry s2cTerm + +s2cTerm :: EDB m => LocalIds -> S.Term.Term -> MaybeT m (C.Term Symbol) +s2cTerm = x2cTerm loadTextById loadHashByObjectId + +w2cTerm :: EDB m => WatchLocalIds -> S.Term.Term -> MaybeT m (C.Term Symbol) +w2cTerm = x2cTerm loadTextById loadHashByHashId +x2cTerm :: Monad m => (t -> m Text) -> (d -> m H.Hash) -> LocalIds' t d -> S.Term.Term -> m (C.Term Symbol) +x2cTerm loadText loadHash localIds term = do -- look up the text and hashes that are used by the term - texts <- traverse loadTextById $ LocalIds.textLookup localIds - hashes <- traverse loadHashByObjectId $ LocalIds.objectLookup localIds + texts <- traverse loadText $ LocalIds.textLookup localIds + hashes <- traverse loadHash $ LocalIds.defnLookup localIds -- substitute the text and hashes back into the term let substText (S.Term.LocalTextId w) = texts Vector.! fromIntegral w @@ -170,6 +194,154 @@ loadTermByReference (C.Reference.Id h i) = do substTypeLink = substTypeRef pure (C.Term.extraMap substText substTermRef substTypeRef substTermLink substTypeLink id term) +c2xTerm :: forall m t d. Monad m => (Text -> m t) -> (H.Hash -> m d) -> C.Term Symbol -> m (LocalIds' t d, S.Term.Term) +c2xTerm saveText saveDefn tm = + done =<< (runWriterT . flip evalStateT mempty) (ABT.transformM go tm) + where + go :: forall m a. (MonadWriter (Seq Text, Seq H.Hash) m, MonadState (Map Text S.Term.LocalTextId, Map H.Hash S.Term.LocalDefnId) m) => C.Term.F Symbol a -> m (S.Term.F a) + go = \case + C.Term.Int n -> pure $ C.Term.Int n + C.Term.Nat n -> pure $ C.Term.Nat n + C.Term.Float n -> pure $ C.Term.Float n + C.Term.Boolean b -> pure $ C.Term.Boolean b + C.Term.Text t -> C.Term.Text <$> lookupText t + C.Term.Char ch -> pure $ C.Term.Char ch + C.Term.Ref r -> + C.Term.Ref <$> bitraverse lookupText (traverse lookupDefn) r + C.Term.Constructor typeRef cid -> + C.Term.Constructor + <$> bitraverse lookupText lookupDefn typeRef + <*> pure cid + C.Term.Request typeRef cid -> + C.Term.Request <$> bitraverse lookupText lookupDefn typeRef <*> pure cid + C.Term.Handle a a2 -> pure $ C.Term.Handle a a2 + C.Term.App a a2 -> pure $ C.Term.App a a2 + C.Term.Ann a typ -> C.Term.Ann a <$> ABT.transformM goType typ + C.Term.Sequence as -> pure $ C.Term.Sequence as + C.Term.If c t f -> pure $ C.Term.If c t f + C.Term.And a a2 -> pure $ C.Term.And a a2 + C.Term.Or a a2 -> pure $ C.Term.Or a a2 + C.Term.Lam a -> pure $ C.Term.Lam a + C.Term.LetRec bs a -> pure $ C.Term.LetRec bs a + C.Term.Let a a2 -> pure $ C.Term.Let a a2 + C.Term.Match a cs -> C.Term.Match a <$> traverse goCase cs + C.Term.TermLink r -> + C.Term.TermLink + <$> bitraverse + (bitraverse lookupText (traverse lookupDefn)) + (bitraverse lookupText lookupDefn) + r + C.Term.TypeLink r -> + C.Term.TypeLink <$> bitraverse lookupText lookupDefn r + goType :: + forall m a. + (MonadWriter (Seq Text, Seq H.Hash) m, MonadState (Map Text S.Term.LocalTextId, Map H.Hash S.Term.LocalDefnId) m) => + C.Type.FT a -> + m (S.Term.FT a) + goType = \case + C.Type.Ref r -> C.Type.Ref <$> bitraverse lookupText lookupDefn r + C.Type.Arrow i o -> pure $ C.Type.Arrow i o + C.Type.Ann a k -> pure $ C.Type.Ann a k + C.Type.App f a -> pure $ C.Type.App f a + C.Type.Effect e a -> pure $ C.Type.Effect e a + C.Type.Effects es -> pure $ C.Type.Effects es + C.Type.Forall a -> pure $ C.Type.Forall a + C.Type.IntroOuter a -> pure $ C.Type.IntroOuter a + goCase :: + forall m w s a. + ( MonadState s m, + MonadWriter w m, + Lens.Field1 s s (Map Text S.Term.LocalTextId) (Map Text S.Term.LocalTextId), + Lens.Field1 w w (Seq Text) (Seq Text), + Lens.Field2 s s (Map H.Hash S.Term.LocalDefnId) (Map H.Hash S.Term.LocalDefnId), + Lens.Field2 w w (Seq H.Hash) (Seq H.Hash) + ) => + C.Term.MatchCase Text C.Term.TypeRef a -> + m (C.Term.MatchCase S.Term.LocalTextId S.Term.TypeRef a) + goCase = \case + C.Term.MatchCase pat guard body -> + C.Term.MatchCase <$> goPat pat <*> pure guard <*> pure body + goPat :: + forall m s w. + ( MonadState s m, + MonadWriter w m, + Lens.Field1 s s (Map Text S.Term.LocalTextId) (Map Text S.Term.LocalTextId), + Lens.Field1 w w (Seq Text) (Seq Text), + Lens.Field2 s s (Map H.Hash S.Term.LocalDefnId) (Map H.Hash S.Term.LocalDefnId), + Lens.Field2 w w (Seq H.Hash) (Seq H.Hash) + ) => + C.Term.Pattern Text C.Term.TypeRef -> + m (C.Term.Pattern S.Term.LocalTextId S.Term.TypeRef) + goPat = \case + C.Term.PUnbound -> pure $ C.Term.PUnbound + C.Term.PVar -> pure $ C.Term.PVar + C.Term.PBoolean b -> pure $ C.Term.PBoolean b + C.Term.PInt i -> pure $ C.Term.PInt i + C.Term.PNat n -> pure $ C.Term.PNat n + C.Term.PFloat d -> pure $ C.Term.PFloat d + C.Term.PText t -> C.Term.PText <$> lookupText t + C.Term.PChar c -> pure $ C.Term.PChar c + C.Term.PConstructor r i ps -> C.Term.PConstructor <$> bitraverse lookupText lookupDefn r <*> pure i <*> traverse goPat ps + C.Term.PAs p -> C.Term.PAs <$> goPat p + C.Term.PEffectPure p -> C.Term.PEffectPure <$> goPat p + C.Term.PEffectBind r i bindings k -> C.Term.PEffectBind <$> bitraverse lookupText lookupDefn r <*> pure i <*> traverse goPat bindings <*> goPat k + C.Term.PSequenceLiteral ps -> C.Term.PSequenceLiteral <$> traverse goPat ps + C.Term.PSequenceOp l op r -> C.Term.PSequenceOp <$> goPat l <*> pure op <*> goPat r + lookupText :: + forall m s w t. + ( MonadState s m, + MonadWriter w m, + Lens.Field1 s s (Map t S.Term.LocalTextId) (Map t S.Term.LocalTextId), + Lens.Field1 w w (Seq t) (Seq t), + Ord t + ) => + t -> + m S.Term.LocalTextId + lookupText = lookup Lens._1 Lens._1 S.Term.LocalTextId + lookupDefn :: + forall m s w d. + ( MonadState s m, + MonadWriter w m, + Lens.Field2 s s (Map d S.Term.LocalDefnId) (Map d S.Term.LocalDefnId), + Lens.Field2 w w (Seq d) (Seq d), + Ord d + ) => + d -> + m S.Term.LocalDefnId + lookupDefn = lookup Lens._2 Lens._2 S.Term.LocalDefnId + lookup :: + forall m s w t t'. + (MonadState s m, MonadWriter w m, Ord t) => + Lens' s (Map t t') -> + Lens' w (Seq t) -> + (Word64 -> t') -> + t -> + m t' + lookup stateLens writerLens mk t = do + map <- Lens.use stateLens + case Map.lookup t map of + Nothing -> do + let id = mk . fromIntegral $ Map.size map + stateLens Lens.%= Map.insert t id + Writer.tell $ Lens.set writerLens (Seq.singleton t) mempty + pure id + Just t' -> pure t' + done :: (S.Term.Term, (Seq Text, Seq H.Hash)) -> m (LocalIds' t d, S.Term.Term) + done (tm, (localTextValues, localDefnValues)) = do + textIds <- traverse saveText localTextValues + defnIds <- traverse saveDefn localDefnValues + let ids = + LocalIds + (Vector.fromList (Foldable.toList textIds)) + (Vector.fromList (Foldable.toList defnIds)) + pure (ids, void tm) + +c2wTerm :: DB m => C.Term Symbol -> m (WatchLocalIds, S.Term.Term) +c2wTerm = c2xTerm Q.saveText Q.saveHashHash + +c2sTerm :: EDB m => C.Term Symbol -> MaybeT m (LocalIds, S.Term.Term) +c2sTerm = c2xTerm Q.saveText hashToObjectId + loadTypeOfTermByTermReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) loadTypeOfTermByTermReference r = do -- convert query reference by looking up db ids @@ -189,7 +361,7 @@ loadDeclByReference (C.Reference.Id h i) = do -- look up the text and hashes that are used by the term texts <- traverse loadTextById $ LocalIds.textLookup localIds - hashes <- traverse loadHashByObjectId $ LocalIds.objectLookup localIds + hashes <- traverse loadHashByObjectId $ LocalIds.defnLookup localIds -- substitute the text and hashes back into the term let substText (S.Decl.LocalTextId w) = texts Vector.! fromIntegral w @@ -204,14 +376,22 @@ saveTermComponent = error "todo" saveDeclComponent :: DB m => H.Hash -> [C.Decl Symbol] -> m () saveDeclComponent = error "todo" -listWatches :: DB m => WatchKind -> m [C.Reference.Id] -listWatches = error "todo" - -loadWatch :: DB m => WatchKind -> C.Reference.Id -> m (Maybe (C.Term Symbol)) -loadWatch = error "todo" - -saveWatch :: DB m => WatchKind -> C.Reference.Id -> C.Term Symbol -> m () -saveWatch = error "todo" +listWatches :: EDB m => WatchKind -> m [C.Reference.Id] +listWatches k = Q.loadWatchesByWatchKind k >>= traverse s2cReferenceId + +loadWatch :: EDB m => WatchKind -> C.Reference.Id -> MaybeT m (C.Term Symbol) +loadWatch k r = + C.Reference.idH Q.saveHashHash r + >>= MaybeT . Q.loadWatch k + >>= MaybeT . pure . getFromBytes (S.getPair S.getLocalIds S.getTermElement) + >>= uncurry s2cTerm + +saveWatch :: EDB m => WatchKind -> C.Reference.Id -> C.Term Symbol -> m () +saveWatch w r t = do + rs <- C.Reference.idH Q.saveHashHash r + wterm <- c2wTerm t + let bytes = S.putBytes (S.putPair S.putLocalIds S.putTermElement) wterm + Q.saveWatch w rs bytes termsHavingType :: DB m => C.Reference -> m (Set C.Referent.Id) termsHavingType = error "todo" diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 8df9648eea..1f02fb2ad8 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -23,7 +23,7 @@ import Data.Text (Text) import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple (SQLData, (:.) (..), Connection, FromRow, Only (..), ToRow (..)) import Database.SQLite.Simple.FromField ( FromField ) -import Database.SQLite.Simple.ToField ( ToField ) +import Database.SQLite.Simple.ToField ( ToField(..) ) import qualified U.Codebase.Sqlite.Reference as Reference import qualified U.Codebase.Sqlite.Referent as Referent import U.Codebase.Sqlite.ObjectType ( ObjectType ) @@ -35,6 +35,8 @@ import U.Codebase.Sqlite.DbId ( HashId(..), ObjectId(..), TextId ) import U.Codebase.Reference (Reference') import Control.Monad.Trans.Maybe (runMaybeT, MaybeT(MaybeT)) import Control.Monad.Except (runExceptT, ExceptT, throwError, MonadError) +import U.Codebase.WatchKind (WatchKind) +import qualified U.Codebase.WatchKind as WatchKind -- * types type DB m = (MonadIO m, MonadReader Connection m) @@ -46,6 +48,8 @@ data Integrity | UnknownObjectId ObjectId | UnknownCausalOldHashId CausalOldHashId | NoObjectForHashId HashId + | NoNamespaceRoot + | MultipleNamespaceRoots [CausalHashId] deriving Show -- |discard errors that you're sure are impossible @@ -215,19 +219,58 @@ loadCausalParents h = queryList sql (Only h) where sql = [here| SELECT parent_id FROM causal_parent WHERE causal_id = ? |] +loadNamespaceRoot :: EDB m => m CausalHashId +loadNamespaceRoot = queryList sql () >>= \case + [] -> throwError NoNamespaceRoot + [id] -> pure id + ids -> throwError (MultipleNamespaceRoots ids) + where sql = "SELECT causal_id FROM namespace_root" + +setNamespaceRoot :: DB m => CausalHashId -> m () +setNamespaceRoot id = execute sql (Only id) where sql = [here| + INSERT OR REPLACE INTO namespace_root VALUES (?) +|] + saveTypeOfTerm :: DB m => Reference.Id -> ByteString -> m () saveTypeOfTerm r blob = execute sql (r :. Only blob) where sql = [here| INSERT OR IGNORE INTO type_of_term VALUES (?, ?, ?) |] --- possible application error to return Nothing loadTypeOfTerm :: DB m => Reference.Id -> m (Maybe ByteString) loadTypeOfTerm r = queryOnly sql r where sql = [here| SELECT bytes FROM type_of_term WHERE object_id = ? AND component_index = ? |] +-- +saveWatch :: DB m => WatchKind -> Reference.IdH -> ByteString -> m () +saveWatch k r blob = execute sql (r :. Only blob) >> execute sql2 (r :. Only k) + where + sql = [here| + INSERT OR IGNORE + INTO watch_result (hash_id, component_index, result) + VALUES (?, ?, ?) + |] + sql2 = [here| + INSERT OR IGNORE + INTO watch (hash_id, component_index, watch_kind_id) + VALUES (?, ?, ?) + |] + +loadWatch :: DB m => WatchKind -> Reference.IdH -> m (Maybe ByteString) +loadWatch k r = queryOnly sql (Only k :. r) where sql = [here| + SELECT bytes FROM watch + WHERE watch_kind_id = ? + AND hash_id = ? + AND component_index = ? + |] + +loadWatchesByWatchKind :: DB m => WatchKind -> m [Reference.Id] +loadWatchesByWatchKind k = query sql (Only k) where sql = [here| + SELECT object_id, component_index FROM watch WHERE watch_kind_id = ? +|] + -- * Index-building addToTypeIndex :: DB m => Reference' TextId HashId -> Referent.Id -> m () addToTypeIndex tp tm = execute sql (tp :. tm) where sql = [here| @@ -307,7 +350,7 @@ queryOne = fmap fromJust queryExists :: (DB m, ToRow q) => SQLite.Query -> q -> m Bool queryExists q r = not . null . map (id @SQLData) <$> queryList q r -query :: (DB m, ToRow q, SQLite.FromRow r) => SQLite.Query -> q -> m [r] +query :: (DB m, ToRow q, FromRow r) => SQLite.Query -> q -> m [r] query q r = do c <- ask; liftIO $ SQLite.query c q r execute :: (DB m, ToRow q) => SQLite.Query -> q -> m () execute q r = do c <- ask; liftIO $ SQLite.execute c q r @@ -319,3 +362,8 @@ headMay (a:_) = Just a -- * orphan instances deriving via Text instance ToField Base32Hex deriving via Text instance FromField Base32Hex + +instance ToField WatchKind where + toField = \case + WatchKind.RegularWatch -> SQLite.SQLInteger 0 + WatchKind.TestWatch -> SQLite.SQLInteger 1 diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs index a7971d739a..22ce195e8c 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs @@ -8,11 +8,14 @@ import U.Codebase.Sqlite.DbId import U.Codebase.Reference (Reference'(ReferenceBuiltin, ReferenceDerived), Id'(Id)) import Database.SQLite.Simple (SQLData(..), Only(..), ToRow(toRow)) import Database.SQLite.Simple.FromRow (FromRow(fromRow), field) +import Database.SQLite.Simple.ToField (ToField) +import Database.SQLite.Simple.FromField (FromField) type Reference = Reference' TextId ObjectId type Id = Id' ObjectId type ReferenceH = Reference' TextId HashId +type IdH = Id' HashId -- * Orphan instances instance ToRow (Reference' TextId HashId) where @@ -27,10 +30,9 @@ instance ToRow (Reference' TextId ObjectId) where ReferenceBuiltin t -> toRow (Only t) ++ [SQLNull, SQLNull] ReferenceDerived (Id h i) -> SQLNull : toRow (Only h) ++ toRow (Only i) -instance ToRow Id where - -- | builtinId, hashId, componentIndex +instance ToField h => ToRow (Id' h) where toRow = \case Id h i -> toRow (Only h) ++ toRow (Only i) -instance FromRow Id where +instance FromField h => FromRow (Id' h) where fromRow = Id <$> field <*> field diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 30edf6253e..0f1795dd17 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -117,10 +117,10 @@ put/get/write/read - [ ] add to type mentions index -} -putLocalIds :: MonadPut m => LocalIds -> m () +putLocalIds :: (MonadPut m, Integral t, Bits t, Integral d, Bits d) => LocalIds' t d -> m () putLocalIds LocalIds {..} = do putFoldable putVarInt textLookup - putFoldable putVarInt objectLookup + putFoldable putVarInt defnLookup getLocalIds :: MonadGet m => m LocalIds getLocalIds = @@ -149,94 +149,93 @@ putTermComponent :: m () putTermComponent (TermFormat.LocallyIndexedComponent v) = putFramedArray (putPair putLocalIds putTermElement) v + +putTermElement :: MonadPut m => TermFormat.Term -> m () +putTermElement = putABT putSymbol putUnit putF where - putF :: MonadPut m => (a -> m ()) -> TermFormat.F a -> m () - putF putChild = \case - Term.Int n -> - putWord8 0 *> putInt n - Term.Nat n -> - putWord8 1 *> putNat n - Term.Float n -> - putWord8 2 *> putFloat n - Term.Boolean b -> - putWord8 3 *> putBoolean b - Term.Text t -> - putWord8 4 *> putVarInt t - Term.Ref r -> - putWord8 5 *> putRecursiveReference r - Term.Constructor r cid -> - putWord8 6 *> putReference r *> putVarInt cid - Term.Request r cid -> - putWord8 7 *> putReference r *> putVarInt cid - Term.Handle h a -> - putWord8 8 *> putChild h *> putChild a - Term.App f arg -> - putWord8 9 *> putChild f *> putChild arg - Term.Ann e t -> - putWord8 10 *> putChild e *> putType putReference putSymbol t - Term.Sequence vs -> - putWord8 11 *> putFoldable putChild vs - Term.If cond t f -> - putWord8 12 *> putChild cond *> putChild t *> putChild f - Term.And x y -> - putWord8 13 *> putChild x *> putChild y - Term.Or x y -> - putWord8 14 *> putChild x *> putChild y - Term.Lam body -> - putWord8 15 *> putChild body - Term.LetRec bs body -> - putWord8 16 *> putFoldable putChild bs *> putChild body - Term.Let b body -> - putWord8 17 *> putChild b *> putChild body - Term.Match s cases -> - putWord8 18 *> putChild s *> putFoldable (putMatchCase putChild) cases - Term.Char c -> - putWord8 19 *> putChar c - Term.TermLink r -> - putWord8 20 *> putReferent putRecursiveReference putReference r - Term.TypeLink r -> - putWord8 21 *> putReference r - putTermElement :: MonadPut m => TermFormat.Term -> m () - putTermElement = putABT putSymbol putUnit putF - putMatchCase :: MonadPut m => (a -> m ()) -> Term.MatchCase TermFormat.LocalTextId TermFormat.TypeRef a -> m () - putMatchCase putChild (Term.MatchCase pat guard body) = - putPattern pat *> putMaybe putChild guard *> putChild body - where - putPattern :: MonadPut m => Term.Pattern TermFormat.LocalTextId TermFormat.TypeRef -> m () - putPattern p = case p of - Term.PUnbound -> putWord8 0 - Term.PVar -> putWord8 1 - Term.PBoolean b -> putWord8 2 *> putBoolean b - Term.PInt n -> putWord8 3 *> putInt n - Term.PNat n -> putWord8 4 *> putNat n - Term.PFloat n -> putWord8 5 *> putFloat n - Term.PConstructor r cid ps -> - putWord8 6 - *> putReference r - *> putVarInt cid - *> putFoldable putPattern ps - Term.PAs p -> putWord8 7 *> putPattern p - Term.PEffectPure p -> putWord8 8 *> putPattern p - Term.PEffectBind r cid args k -> - putWord8 9 - *> putReference r - *> putVarInt cid - *> putFoldable putPattern args - *> putPattern k - Term.PSequenceLiteral ps -> - putWord8 10 *> putFoldable putPattern ps - Term.PSequenceOp l op r -> - putWord8 11 - *> putPattern l - *> putSeqOp op - *> putPattern r - Term.PText t -> putWord8 12 *> putVarInt t - Term.PChar c -> putWord8 13 *> putChar c - where - putSeqOp :: MonadPut m => Term.SeqOp -> m () - putSeqOp Term.PCons = putWord8 0 - putSeqOp Term.PSnoc = putWord8 1 - putSeqOp Term.PConcat = putWord8 2 + putF :: MonadPut m => (a -> m ()) -> TermFormat.F a -> m () + putF putChild = \case + Term.Int n -> + putWord8 0 *> putInt n + Term.Nat n -> + putWord8 1 *> putNat n + Term.Float n -> + putWord8 2 *> putFloat n + Term.Boolean b -> + putWord8 3 *> putBoolean b + Term.Text t -> + putWord8 4 *> putVarInt t + Term.Ref r -> + putWord8 5 *> putRecursiveReference r + Term.Constructor r cid -> + putWord8 6 *> putReference r *> putVarInt cid + Term.Request r cid -> + putWord8 7 *> putReference r *> putVarInt cid + Term.Handle h a -> + putWord8 8 *> putChild h *> putChild a + Term.App f arg -> + putWord8 9 *> putChild f *> putChild arg + Term.Ann e t -> + putWord8 10 *> putChild e *> putType putReference putSymbol t + Term.Sequence vs -> + putWord8 11 *> putFoldable putChild vs + Term.If cond t f -> + putWord8 12 *> putChild cond *> putChild t *> putChild f + Term.And x y -> + putWord8 13 *> putChild x *> putChild y + Term.Or x y -> + putWord8 14 *> putChild x *> putChild y + Term.Lam body -> + putWord8 15 *> putChild body + Term.LetRec bs body -> + putWord8 16 *> putFoldable putChild bs *> putChild body + Term.Let b body -> + putWord8 17 *> putChild b *> putChild body + Term.Match s cases -> + putWord8 18 *> putChild s *> putFoldable (putMatchCase putChild) cases + Term.Char c -> + putWord8 19 *> putChar c + Term.TermLink r -> + putWord8 20 *> putReferent putRecursiveReference putReference r + Term.TypeLink r -> + putWord8 21 *> putReference r + putMatchCase :: MonadPut m => (a -> m ()) -> Term.MatchCase TermFormat.LocalTextId TermFormat.TypeRef a -> m () + putMatchCase putChild (Term.MatchCase pat guard body) = + putPattern pat *> putMaybe putChild guard *> putChild body + putPattern :: MonadPut m => Term.Pattern TermFormat.LocalTextId TermFormat.TypeRef -> m () + putPattern p = case p of + Term.PUnbound -> putWord8 0 + Term.PVar -> putWord8 1 + Term.PBoolean b -> putWord8 2 *> putBoolean b + Term.PInt n -> putWord8 3 *> putInt n + Term.PNat n -> putWord8 4 *> putNat n + Term.PFloat n -> putWord8 5 *> putFloat n + Term.PConstructor r cid ps -> + putWord8 6 + *> putReference r + *> putVarInt cid + *> putFoldable putPattern ps + Term.PAs p -> putWord8 7 *> putPattern p + Term.PEffectPure p -> putWord8 8 *> putPattern p + Term.PEffectBind r cid args k -> + putWord8 9 + *> putReference r + *> putVarInt cid + *> putFoldable putPattern args + *> putPattern k + Term.PSequenceLiteral ps -> + putWord8 10 *> putFoldable putPattern ps + Term.PSequenceOp l op r -> + putWord8 11 + *> putPattern l + *> putSeqOp op + *> putPattern r + Term.PText t -> putWord8 12 *> putVarInt t + Term.PChar c -> putWord8 13 *> putChar c + putSeqOp :: MonadPut m => Term.SeqOp -> m () + putSeqOp Term.PCons = putWord8 0 + putSeqOp Term.PSnoc = putWord8 1 + putSeqOp Term.PConcat = putWord8 2 getTermComponent :: MonadGet m => m TermFormat.LocallyIndexedComponent getTermComponent = diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs index 63d290ed90..d448b980c2 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs @@ -13,6 +13,7 @@ import qualified U.Codebase.Term as Term import qualified U.Core.ABT as ABT import qualified U.Codebase.Type as Type import qualified U.Codebase.Sqlite.Reference as Sqlite +import U.Codebase.Sqlite.DbId (ObjectId, TextId) newtype LocalDefnId = LocalDefnId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 @@ -24,8 +25,9 @@ type TypeRef = Reference' LocalTextId LocalDefnId type TermLink = Referent' TermRef TypeRef type TypeLink = TypeRef -newtype LocallyIndexedComponent = - LocallyIndexedComponent (Vector (LocalIds, Term)) +type LocallyIndexedComponent = LocallyIndexedComponent' TextId ObjectId +newtype LocallyIndexedComponent' t d = + LocallyIndexedComponent (Vector (LocalIds' t d, Term)) type F = Term.F' LocalTextId TermRef TypeRef TermLink TypeLink Symbol diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 0b7e5330f2..521681ab15 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -65,6 +65,12 @@ CREATE TABLE causal ( -- causalHash : Hash = hash(new Causal(valueHash, parentCausalHashes)) -- db.saveCausal(selfHash = causalHash, valueHash, parentCausalHashes) +CREATE TABLE namespace_root ( + -- a dummy pk because + -- id INTEGER PRIMARY KEY NOT NULL, + causal_id INTEGER PRIMARY KEY NOT NULL REFERENCES causal(self_hash_id) +); + CREATE TABLE causal_parent ( id INTEGER PRIMARY KEY NOT NULL, causal_id INTEGER NOT NULL REFERENCES causal(self_hash_id), @@ -87,19 +93,21 @@ CREATE TABLE type_of_term ( PRIMARY KEY (object_id, component_index) ); -CREATE TABLE watch ( - object_id INTEGER NOT NULL REFERENCES object(id), +CREATE TABLE watch_result ( + hash_id INTEGER NOT NULL REFERENCES object(id), component_index INTEGER NOT NULL, result BLOB NOT NULL, - PRIMARY KEY (object_id, component_index) + PRIMARY KEY (hash_id, component_index) ); -CREATE TABLE watch_kind ( - object_id INTEGER NOT NULL REFERENCES object(id), +CREATE TABLE watch ( + hash_id INTEGER NOT NULL REFERENCES object(id), component_index INTEGER NOT NULL, - description_id INTEGER NOT NULL REFERENCES watch_kind_description(id), - PRIMARY KEY (object_id, component_index, watch_kind_id) + watch_kind_id INTEGER NOT NULL REFERENCES watch_kind_description(id), + PRIMARY KEY (hash_id, component_index, watch_kind_id) ); +CREATE INDEX watch_kind ON watch(watch_kind_id); + CREATE TABLE watch_kind_description ( id PRIMARY KEY INTEGER UNIQUE NOT NULL, description TEXT UNIQUE NOT NULL diff --git a/codebase2/codebase/U/Codebase/WatchKind.hs b/codebase2/codebase/U/Codebase/WatchKind.hs index 13df429b13..ba5bc4080e 100644 --- a/codebase2/codebase/U/Codebase/WatchKind.hs +++ b/codebase2/codebase/U/Codebase/WatchKind.hs @@ -1,3 +1,3 @@ module U.Codebase.WatchKind where -data WatchKind = RegularWatch | TestWatch deriving (Eq, Ord, Show) \ No newline at end of file +data WatchKind = RegularWatch | TestWatch deriving (Eq, Ord, Show) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index b8f6ef9f1e..e23130f282 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -89,21 +89,7 @@ sqliteCodebase root = do conn :: Sqlite.Connection <- Sqlite.open $ root "v2" "unison.sqlite3" termBuffer :: TVar (Map Hash TermBufferEntry) <- newTVarIO Map.empty declBuffer :: TVar (Map Hash DeclBufferEntry) <- newTVarIO Map.empty - let getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) - putRootBranch :: Branch IO -> IO () - rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) - getBranchForHash :: Branch.Hash -> IO (Maybe (Branch IO)) - dependentsImpl :: Reference -> IO (Set Reference.Id) - syncFromDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () - syncToDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () - watches :: UF.WatchKind -> IO [Reference.Id] - getWatch :: UF.WatchKind -> Reference.Id -> IO (Maybe (Term Symbol Ann)) - putWatch :: UF.WatchKind -> Reference.Id -> Term Symbol Ann -> IO () - termsOfTypeImpl :: Reference -> IO (Set Referent.Id) - termsMentioningTypeImpl :: Reference -> IO (Set Referent.Id) - branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) - - getTerm :: Reference.Id -> IO (Maybe (Term Symbol Ann)) + let getTerm :: Reference.Id -> IO (Maybe (Term Symbol Ann)) getTerm (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = runDB' conn do term2 <- Ops.loadTermByReference (C.Reference.Id h2 i) @@ -167,16 +153,6 @@ sqliteCodebase root = do tryFlushTermBuffer h ) - -- data BufferEntry a = BufferEntry - -- { -- First, you are waiting for the cycle to fill up with all elements - -- -- Then, you check: are all dependencies of the cycle in the db? - -- -- If yes: write yourself to database and trigger check of dependents - -- -- If no: just wait, do nothing - -- beComponentTargetSize :: Maybe Word64, - -- beComponent :: Map Reference.Pos a, - -- beMissingDependencies :: Set Hash, - -- beWaitingDependents :: Set Hash - -- } putBuffer :: (MonadIO m, Show a) => TVar (Map Hash (BufferEntry a)) -> Hash -> BufferEntry a -> m () putBuffer tv h e = do traceM $ "putBuffer " ++ show h ++ " " ++ show e @@ -194,9 +170,6 @@ sqliteCodebase root = do addBufferDependent :: (MonadIO m, Show a) => Hash -> TVar (Map Hash (BufferEntry a)) -> Hash -> m () addBufferDependent dependent tv dependency = withBuffer tv dependency \be -> do putBuffer tv dependency be {beWaitingDependents = Set.insert dependent $ beWaitingDependents be} - - -- |if all of the dependencies are in the codebase and this component is - -- complete, then save this component! tryFlushBuffer :: (EDB m, Show a) => TVar (Map Hash (BufferEntry a)) -> @@ -219,6 +192,7 @@ sqliteCodebase root = do removeBuffer buf h traverse_ tryWaiting waiting else -- update + putBuffer buf h $ BufferEntry size comp (Set.fromList missing') waiting @@ -263,16 +237,46 @@ sqliteCodebase root = do tryFlushDeclBuffer h ) + getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) getRootBranch = error "todo" + + putRootBranch :: Branch IO -> IO () putRootBranch = error "todo" + + rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) rootBranchUpdates = error "todo" + + getBranchForHash :: Branch.Hash -> IO (Maybe (Branch IO)) getBranchForHash = error "todo" + + dependentsImpl :: Reference -> IO (Set Reference.Id) dependentsImpl = error "todo" + + syncFromDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () syncFromDirectory = error "todo" + + syncToDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () syncToDirectory = error "todo" - watches = error "todo" - getWatch = error "todo" - putWatch = error "todo" + + watches :: UF.WatchKind -> IO [Reference.Id] + watches w = + runDB conn $ + Ops.listWatches (Cv.watchKind1to2 w) + >>= traverse (Cv.referenceid2to1 getCycleLen) + + -- getWatch :: UF.WatchKind -> Reference.Id -> IO (Maybe (Term Symbol Ann)) + getWatch k r@(Reference.Id h _i _n) = + runDB' conn $ + Ops.loadWatch (Cv.watchKind1to2 k) (Cv.referenceid1to2 r) + >>= Cv.term2to1 h getCycleLen getDeclType + + putWatch :: UF.WatchKind -> Reference.Id -> Term Symbol Ann -> IO () + putWatch k r@(Reference.Id h _i _n) tm = + runDB conn $ + Ops.saveWatch + (Cv.watchKind1to2 k) + (Cv.referenceid1to2 r) + (Cv.term1to2 h tm) getReflog :: IO [Reflog.Entry] getReflog = @@ -300,7 +304,10 @@ sqliteCodebase root = do reflogPath :: CodebasePath -> FilePath reflogPath root = root "reflog" + termsOfTypeImpl :: Reference -> IO (Set Referent.Id) termsOfTypeImpl = error "todo" + + termsMentioningTypeImpl :: Reference -> IO (Set Referent.Id) termsMentioningTypeImpl = error "todo" hashLength :: IO Int @@ -340,7 +347,9 @@ sqliteCodebase root = do ] pure . Set.fromList $ termReferents <> declReferents + branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) branchHashesByPrefix = error "todo" + let finalizer = Sqlite.close conn pure $ ( finalizer, diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 337521c374..708f22b6a4 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -32,6 +32,9 @@ import qualified Unison.Symbol as V1 import qualified Unison.Term as V1.Term import qualified Unison.Type as V1.Type import qualified Unison.Var as Var +import qualified Unison.Var as V1.Var +import qualified U.Codebase.WatchKind as V2 +import qualified U.Codebase.WatchKind as V2.WatchKind decltype2to1 :: V2.Decl.DeclType -> CT.ConstructorType decltype2to1 = \case @@ -43,6 +46,18 @@ decltype1to2 = \case CT.Data -> V2.Decl.Data CT.Effect -> V2.Decl.Effect +watchKind1to2 :: V1.Var.WatchKind -> V2.WatchKind +watchKind1to2 = \case + V1.Var.RegularWatch -> V2.WatchKind.RegularWatch + V1.Var.TestWatch -> V2.WatchKind.TestWatch + other -> error $ "What kind of watchkind is " ++ other ++ "?" + +watchKind2to1 :: V2.WatchKind -> V1.Var.WatchKind +watchKind2to1 = \case + V2.WatchKind.RegularWatch -> V1.Var.RegularWatch + V2.WatchKind.TestWatch -> V1.Var.TestWatch + + term1to2 :: Hash -> V1.Term.Term V1.Symbol Ann -> V2.Term.Term V2.Symbol term1to2 h = V2.ABT.transform termF1to2 diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 3d8eb7ecba..503430fba5 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -248,7 +248,8 @@ library unison-core, unison-codebase, unison-codebase-sqlite, - unison-util + unison-util, + unison-util-serialization ghc-options: -Wall -O0 -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures From 89f0c72ffe075961357dc1a2fe3adac58eab269c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 29 Oct 2020 09:28:22 -0400 Subject: [PATCH 039/225] add basic finalizer error for nonempty com.buffers --- .../src/Unison/Codebase/SqliteCodebase.hs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index e23130f282..bbcc6398a1 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -78,7 +78,7 @@ data BufferEntry a = BufferEntry beMissingDependencies :: Set Hash, beWaitingDependents :: Set Hash } - deriving (Show) + deriving (Eq, Show) type TermBufferEntry = BufferEntry (Term Symbol Ann, Type Symbol Ann) @@ -350,7 +350,17 @@ sqliteCodebase root = do branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) branchHashesByPrefix = error "todo" - let finalizer = Sqlite.close conn + let finalizer = do + Sqlite.close conn + decls <- readTVarIO declBuffer + terms <- readTVarIO termBuffer + let printBuffer header b = + if b /= mempty + then putStrLn header >> putStrLn "" >> print b else pure () + printBuffer "Decls:" decls + printBuffer "Terms:" terms + + pure $ ( finalizer, Codebase1.Codebase From e9ddc18c72fa75724d161f7517c327bed1136bae Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 30 Oct 2020 13:14:56 -0400 Subject: [PATCH 040/225] branchHashesByPrefix --- .../U/Codebase/Sqlite/Branch/Diff.hs | 18 +-- .../U/Codebase/Sqlite/Branch/Format.hs | 6 +- .../U/Codebase/Sqlite/Branch/Full.hs | 12 +- .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 42 +++++-- .../U/Codebase/Sqlite/Operations.hs | 19 ++- .../U/Codebase/Sqlite/Patch/Diff.hs | 23 ++-- .../U/Codebase/Sqlite/Queries.hs | 53 ++++---- .../U/Codebase/Sqlite/Serialization.hs | 2 +- codebase2/codebase/U/Codebase/Branch.hs | 9 +- codebase2/codebase/U/Codebase/Codebase.hs | 119 ++++++++---------- codebase2/codebase/U/Codebase/HashTags.hs | 11 ++ codebase2/codebase/U/Codebase/Reflog.hs | 2 +- codebase2/codebase/unison-codebase.cabal | 1 + .../src/Unison/Codebase/SqliteCodebase.hs | 14 ++- .../Codebase/SqliteCodebase/Conversions.hs | 11 +- 15 files changed, 194 insertions(+), 148 deletions(-) create mode 100644 codebase2/codebase/U/Codebase/HashTags.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs index 84fd801497..0dcb13eeb9 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs @@ -2,22 +2,24 @@ module U.Codebase.Sqlite.Branch.Diff where import Data.Map (Map) import Data.Set (Set) -import U.Codebase.Sqlite.DbId -import U.Codebase.Sqlite.Reference -import U.Codebase.Sqlite.Referent -import U.Codebase.Sqlite.Patch.Diff +import U.Codebase.Sqlite.DbId (BranchObjectId, PatchObjectId, TextId) +import U.Codebase.Sqlite.Patch.Diff (PatchDiff) +import U.Codebase.Sqlite.Reference (Reference) +import U.Codebase.Sqlite.Referent (Referent) type NameSegment = TextId + type Metadata = Reference -data PatchOp = PatchRemove | PatchAdd | PatchEdit PatchDiff -data AddRemove a = AddRemove { add :: Set a, remove :: Set a } + +data PatchOp = PatchRemove | PatchAdd PatchObjectId | PatchEdit PatchDiff + +data AddRemove a = AddRemove {add :: Set a, remove :: Set a} data Diff = Diff - { reference :: BranchId, + { reference :: BranchObjectId, terms :: Map NameSegment (AddRemove Referent), types :: Map NameSegment (AddRemove Reference), termMetadata :: Map NameSegment (Map Referent (AddRemove Metadata)), typeMetadata :: Map NameSegment (Map Reference (AddRemove Metadata)), patches :: Map NameSegment PatchOp } - diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs index 2c86fede8c..d3e89d41fa 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs @@ -3,6 +3,6 @@ module U.Codebase.Sqlite.Branch.Format where import U.Codebase.Sqlite.Branch.Full import U.Codebase.Sqlite.Branch.Diff -data BranchFormat - = Full Branch - | Diff Diff \ No newline at end of file +data BranchFormat + = Full Branch + | Diff Diff diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs index 9e3d1be211..187ee56dd5 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs @@ -1,16 +1,16 @@ module U.Codebase.Sqlite.Branch.Full where import Data.Map (Map) -import U.Codebase.Sqlite.Referent -import U.Codebase.Sqlite.Reference -import U.Codebase.Sqlite.DbId -import U.Codebase.Sqlite.Branch.MetadataSet +import U.Codebase.Sqlite.Branch.MetadataSet (MetadataSetFormat) +import U.Codebase.Sqlite.DbId (BranchObjectId, PatchObjectId, TextId) +import U.Codebase.Sqlite.Reference (Reference) +import U.Codebase.Sqlite.Referent (Referent) type NameSegment = TextId data Branch = Branch { terms :: Map NameSegment (Map Referent MetadataSetFormat), types :: Map NameSegment (Map Reference MetadataSetFormat), - patches :: Map NameSegment PatchId, - children :: Map NameSegment BranchId + patches :: Map NameSegment PatchObjectId, + children :: Map NameSegment BranchObjectId } diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index 2616a2f527..f925b6aba4 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -1,22 +1,42 @@ -{-# LANGUAGE UndecidableInstances #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE TypeFamilies #-} -{-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE DerivingVia #-} +{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE UndecidableInstances #-} module U.Codebase.Sqlite.DbId where +import Data.Bits (Bits) +import Data.Word (Word64) import Database.SQLite.Simple.FromField import Database.SQLite.Simple.ToField -import Data.Word (Word64) import U.Util.Hashable (Hashable) -import Data.Bits (Bits) -newtype ObjectId = ObjectId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 -newtype TextId = TextId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 -newtype HashId = HashId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 +newtype ObjectId = ObjectId Word64 deriving (Eq, Ord, Show) + deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 + +newtype TextId = TextId Word64 deriving (Eq, Ord, Show) + deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 + +newtype HashId = HashId Word64 deriving (Eq, Ord, Show) + deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 + +newtype PatchObjectId = PatchObjectId Word64 deriving (Eq, Ord, Show) + deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via ObjectId + +newtype BranchObjectId = BranchObjectId ObjectId deriving (Eq, Ord, Show) + deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via ObjectId + +newtype BranchHashId = BranchHashId { unBranchHashId :: HashId } deriving (Eq, Ord, Show) + deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via HashId -newtype PatchId = PatchId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via ObjectId +newtype CausalHashId = CausalId { unCausalHashId :: HashId } deriving (Eq, Ord, Show) + deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via HashId -newtype BranchId = BranchId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 +newtype TypeId = TypeId ObjectId deriving Show deriving (FromField, ToField) via ObjectId +newtype TermId = TermCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId +newtype DeclId = DeclCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId +-- newtype CausalHashId = CausalHashId HashId deriving Show deriving (Hashable, FromField, ToField) via HashId +newtype CausalOldHashId = CausalOldHashId HashId deriving Show deriving (Hashable, FromField, ToField) via HashId +newtype NamespaceHashId = NamespaceHashId ObjectId deriving Show deriving (Hashable, FromField, ToField) via ObjectId diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 57d750073b..db31252e89 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -34,10 +34,11 @@ import Data.Word (Word64) import U.Codebase.Decl (ConstructorId) import qualified U.Codebase.Decl as C import qualified U.Codebase.Decl as C.Decl +import U.Codebase.HashTags (CausalHash(..), BranchHash(..)) import qualified U.Codebase.Reference as C import qualified U.Codebase.Reference as C.Reference import qualified U.Codebase.Referent as C.Referent -import U.Codebase.ShortHash (ShortBranchHash) +import U.Codebase.ShortHash (ShortBranchHash (ShortBranchHash)) import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Decl.Format as S.Decl import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), WatchLocalIds) @@ -459,11 +460,19 @@ componentByObjectId id = do len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly pure [C.Reference.Id id i | i <- [0 .. len - 1]] --- termReferentsByPrefix :: DB m => ShortHash -> m (Set C.Referent.Id) --- termReferentsByPrefix = error "todo" +-- loadBranchById :: DB m => Db.BranchId -> m C.Branch -branchHashesByPrefix :: DB m => ShortBranchHash -> m (Set C.Reference.Id) -branchHashesByPrefix = error "todo" +branchHashesByPrefix :: EDB m => ShortBranchHash -> m (Set BranchHash) +branchHashesByPrefix (ShortBranchHash b32prefix) = do + hashIds <- Q.namespaceHashIdByBase32Prefix b32prefix + b32s <- traverse (liftQ . Q.loadHashById . Db.unBranchHashId) hashIds + pure $ Set.fromList . fmap BranchHash . fmap H.fromBase32Hex $ b32s + +causalHashesByPrefix :: EDB m => ShortBranchHash -> m (Set CausalHash) +causalHashesByPrefix (ShortBranchHash b32prefix) = do + hashIds <- Q.causalHashIdByBase32Prefix b32prefix + b32s <- traverse (liftQ . Q.loadHashById . Db.unCausalHashId) hashIds + pure $ Set.fromList . fmap CausalHash . fmap H.fromBase32Hex $ b32s -- | returns a list of known definitions referencing `r` dependents :: EDB m => C.Reference -> MaybeT m (Set C.Reference.Id) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs index 8f1cdb48ce..d6deed156f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs @@ -1,17 +1,18 @@ module U.Codebase.Sqlite.Patch.Diff where import Data.Map (Map) -import U.Codebase.Sqlite.Referent -import U.Codebase.Sqlite.Reference -import U.Codebase.Sqlite.Patch.TermEdit -import U.Codebase.Sqlite.Patch.TypeEdit -import U.Codebase.Sqlite.DbId import Data.Set (Set) +import U.Codebase.Sqlite.DbId (PatchObjectId) +import U.Codebase.Sqlite.Patch.TermEdit (TermEdit) +import U.Codebase.Sqlite.Patch.TypeEdit (TypeEdit) +import U.Codebase.Sqlite.Reference (Reference) +import U.Codebase.Sqlite.Referent (Referent) data PatchDiff = PatchDiff - { reference :: PatchId - , addedTermEdits :: Map Referent TermEdit - , addedTypeEdits :: Map Reference TypeEdit - , removedTermEdits :: Set Referent - , removedTypeEdits :: Set Reference - } deriving (Eq, Ord, Show) + { reference :: PatchObjectId, + addedTermEdits :: Map Referent TermEdit, + addedTypeEdits :: Map Reference TypeEdit, + removedTermEdits :: Set Referent, + removedTypeEdits :: Set Reference + } + deriving (Eq, Ord, Show) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 1f02fb2ad8..067bba40c8 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -13,30 +13,29 @@ module U.Codebase.Sqlite.Queries where +import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) import Control.Monad.IO.Class (MonadIO) import Control.Monad.Reader (MonadReader (ask)) -import Data.ByteString (ByteString) import Control.Monad.Trans (MonadIO (liftIO)) +import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) +import Data.ByteString (ByteString) import Data.Maybe (fromJust) import Data.String.Here.Uninterpolated (here) import Data.Text (Text) +import Database.SQLite.Simple (Connection, FromRow, Only (..), SQLData, ToRow (..), (:.) (..)) import qualified Database.SQLite.Simple as SQLite -import Database.SQLite.Simple (SQLData, (:.) (..), Connection, FromRow, Only (..), ToRow (..)) -import Database.SQLite.Simple.FromField ( FromField ) -import Database.SQLite.Simple.ToField ( ToField(..) ) +import Database.SQLite.Simple.FromField (FromField) +import Database.SQLite.Simple.ToField (ToField (..)) +import U.Codebase.Reference (Reference') +import U.Codebase.Sqlite.DbId (BranchHashId, CausalHashId, CausalOldHashId, HashId (..), NamespaceHashId, ObjectId (..), TextId) +import U.Codebase.Sqlite.ObjectType (ObjectType) import qualified U.Codebase.Sqlite.Reference as Reference import qualified U.Codebase.Sqlite.Referent as Referent -import U.Codebase.Sqlite.ObjectType ( ObjectType ) +import U.Codebase.WatchKind (WatchKind) +import qualified U.Codebase.WatchKind as WatchKind import U.Util.Base32Hex (Base32Hex (..)) -import U.Util.Hashable (Hashable) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash -import U.Codebase.Sqlite.DbId ( HashId(..), ObjectId(..), TextId ) -import U.Codebase.Reference (Reference') -import Control.Monad.Trans.Maybe (runMaybeT, MaybeT(MaybeT)) -import Control.Monad.Except (runExceptT, ExceptT, throwError, MonadError) -import U.Codebase.WatchKind (WatchKind) -import qualified U.Codebase.WatchKind as WatchKind -- * types type DB m = (MonadIO m, MonadReader Connection m) @@ -61,13 +60,6 @@ noError a = runExceptT a >>= \case orError :: MonadError Integrity m => (a -> Integrity) -> a -> Maybe b -> m b orError fe a = maybe (throwError $ fe a) pure -newtype TypeId = TypeId ObjectId deriving Show deriving (FromField, ToField) via ObjectId -newtype TermId = TermCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId -newtype DeclId = DeclCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId -newtype CausalHashId = CausalHashId HashId deriving Show deriving (Hashable, FromField, ToField) via HashId -newtype CausalOldHashId = CausalOldHashId HashId deriving Show deriving (Hashable, FromField, ToField) via HashId -newtype NamespaceHashId = NamespaceHashId ObjectId deriving Show deriving (Hashable, FromField, ToField) via ObjectId - -- type DerivedReferent = Referent.Id ObjectId ObjectId -- type DerivedReference = Reference.Id ObjectId type TypeHashReference = Reference' TextId HashId @@ -325,15 +317,20 @@ objectIdByBase32Prefix objType prefix = queryList sql (objType, prefix <> "%") w WHERE object.type_id = ? AND hash.base32 LIKE ? |] --- alternatively --- [here| --- SELECT object.id --- FROM object, hash, hash_object --- WHERE object.id = hash_object.object_id --- AND hash.id = hash_object.hash_id --- AND object.type_id = ? --- AND hash.base32 LIKE ? --- |] + +causalHashIdByBase32Prefix :: DB m => Text -> m [CausalHashId] +causalHashIdByBase32Prefix prefix = queryList sql (Only $ prefix <> "%") where sql = [here| + SELECT self_hash_id FROM causal + INNER JOIN hash ON id = self_hash_id + WHERE base32 LIKE ? +|] + +namespaceHashIdByBase32Prefix :: DB m => Text -> m [BranchHashId] +namespaceHashIdByBase32Prefix prefix = queryList sql (Only $ prefix <> "%") where sql = [here| + SELECT value_hash_id FROM causal + INNER JOIN hash ON id = value_hash_id + WHERE base32 LIKE ? +|] -- * helper functions queryList :: (DB f, ToRow q, FromField b) => SQLite.Query -> q -> f [b] diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 0f1795dd17..71ddefdb4b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -413,7 +413,7 @@ putBranchFormat = \case putFoldable put adds putFoldable put removes putPatchOp BranchDiff.PatchRemove = putWord8 0 - putPatchOp BranchDiff.PatchAdd = putWord8 1 + putPatchOp (BranchDiff.PatchAdd pId) = putWord8 1 *> putVarInt pId putPatchOp (BranchDiff.PatchEdit d) = putWord8 2 *> putPatchDiff d putPatchFormat :: MonadPut m => PatchFormat.PatchFormat -> m () diff --git a/codebase2/codebase/U/Codebase/Branch.hs b/codebase2/codebase/U/Codebase/Branch.hs index 6149a95a73..c974632f10 100644 --- a/codebase2/codebase/U/Codebase/Branch.hs +++ b/codebase2/codebase/U/Codebase/Branch.hs @@ -3,18 +3,15 @@ module U.Codebase.Branch where import Data.Map (Map) import Data.Set (Set) import Data.Text (Text) +import U.Codebase.HashTags (PatchHash) import U.Codebase.Reference (Reference) import U.Codebase.Referent (Referent) import U.Codebase.TermEdit (TermEdit) import U.Codebase.TypeEdit (TypeEdit) -import U.Util.Hash (Hash) newtype NameSegment = NameSegment Text -newtype EditHash = EditHash Hash -newtype CausalHash = CausalHash Hash -newtype BranchHash = BranchHash Hash + newtype MdValues = MdValues (Set Reference) -newtype PatchHash = PatchHash Hash data Branch m = Branch { terms :: Map NameSegment (Map Referent (m MdValues)), @@ -26,4 +23,4 @@ data Branch m = Branch data Patch = Patch { termEdits :: Map Referent TermEdit, typeEdits :: Map Reference TypeEdit - } \ No newline at end of file + } diff --git a/codebase2/codebase/U/Codebase/Codebase.hs b/codebase2/codebase/U/Codebase/Codebase.hs index b3dd52d6d3..4d9e1d725b 100644 --- a/codebase2/codebase/U/Codebase/Codebase.hs +++ b/codebase2/codebase/U/Codebase/Codebase.hs @@ -1,81 +1,72 @@ {-# LANGUAGE RankNTypes #-} + module U.Codebase.Codebase where -import qualified U.Codebase.Reference as Reference -import qualified U.Codebase.Referent as Referent +import Data.Set (Set) +import Data.Text (Text) +import U.Codebase.Branch (Branch) import U.Codebase.Causal (Causal) +import U.Codebase.Decl (Decl) +import U.Codebase.HashTags (BranchHash, CausalHash) import U.Codebase.Reference (Reference) +import qualified U.Codebase.Reference as Reference +import qualified U.Codebase.Referent as Referent +import qualified U.Codebase.Reflog as Reflog +import U.Codebase.ShortHash (ShortBranchHash, ShortHash) import U.Codebase.Term (Term) import U.Codebase.Type (TypeT) -import U.Codebase.Decl (Decl) import U.Codebase.WatchKind (WatchKind) -import U.Codebase.Branch (Branch) -import qualified U.Codebase.Reflog as Reflog -import U.Codebase.ShortHash (ShortBranchHash, ShortHash) import U.Util.Hash (Hash) -import Data.Text (Text) -import Data.Set (Set) -newtype BranchHash = BranchHash Hash -newtype CausalHash = CausalHash Hash newtype CodebasePath = CodebasePath FilePath -data SyncMode = SyncShortCircuit | SyncComplete - -data Codebase m v = Codebase { - getTerm :: Reference.Id -> m (Maybe (Term v)), - getTypeOfTerm :: Reference.Id -> m (Maybe (TypeT v)), - getTypeDeclaration :: Reference.Id -> m (Maybe (Decl v)), - - putTerm :: Reference.Id -> Term v -> TypeT v -> m (), - putTypeDeclaration :: Reference.Id -> Decl v -> m (), - - getBranch :: BranchHash -> m (Maybe (Branch m)), - getRootBranch :: m (Either GetRootBranchError (Branch m)), - putRootBranch :: Branch m -> m (), - getBranchForCausal :: CausalHash -> m (Maybe (Branch m)), - - -- |Supports syncing from a current or older codebase format - syncFromDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), - -- |Only writes the latest codebase format - syncToDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), - -- ^ maybe return type needs to reflect failure if remote codebase has an old version - - -- |Watch expressions are part of the codebase, the `Reference.Id` is - -- the hash of the source of the watch expression, and the `Term v a` - -- is the evaluated result of the expression, decompiled to a term. - watches :: WatchKind -> m [Reference.Id], - getWatch :: WatchKind -> Reference.Id -> m (Maybe (Term v)), - putWatch :: WatchKind -> Reference.Id -> Term v -> m (), - - getReflog :: m [Reflog.Entry], - appendReflog :: Text -> Branch m -> Branch m -> m (), - - -- |the nicely-named versions will utilize these, and add builtins to the result set - termsHavingType :: Reference -> m (Set Referent.Id), - termsMentioningType :: Reference -> m (Set Referent.Id), - - -- |number of base32 characters needed to distinguish any two hashes in the codebase; - -- we don't have to compute it separately for different namespaces - hashLength :: m Int, - termReferencesByPrefix :: ShortHash -> m (Set Reference.Id), - typeReferencesByPrefix :: ShortHash -> m (Set Reference.Id), - termReferentsByPrefix :: ShortHash -> m (Set Referent.Id), - branchHashesByPrefix :: ShortBranchHash -> m (Set BranchHash), +data SyncMode = SyncShortCircuit | SyncComplete - -- - lca :: (forall he e. [Causal m CausalHash he e] -> m (Maybe BranchHash)), - dependents :: Reference -> m (Maybe (Set Reference.Id)), - termDependencies :: Reference.Id -> m (Maybe (Set Reference.Id)), - declDependencies :: Reference.Id -> m (Maybe (Set Reference.Id)) --, - -- -- |terms, types, patches, and branches - -- branchDependencies :: - -- Branch.Hash -> m (Maybe (CausalHash, BD.Dependencies)), - -- -- |the "new" terms and types mentioned in a patch - -- patchDependencies :: EditHash -> m (Set Reference, Set Reference) -} +data Codebase m v = Codebase + { getTerm :: Reference.Id -> m (Maybe (Term v)), + getTypeOfTerm :: Reference.Id -> m (Maybe (TypeT v)), + getTypeDeclaration :: Reference.Id -> m (Maybe (Decl v)), + putTerm :: Reference.Id -> Term v -> TypeT v -> m (), + putTypeDeclaration :: Reference.Id -> Decl v -> m (), + getBranch :: BranchHash -> m (Maybe (Branch m)), + getRootBranch :: m (Either GetRootBranchError (Branch m)), + putRootBranch :: Branch m -> m (), + getBranchForCausal :: CausalHash -> m (Maybe (Branch m)), + -- | Supports syncing from a current or older codebase format + syncFromDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), + -- | Only writes the latest codebase format + syncToDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), + -- | Watch expressions are part of the codebase, the `Reference.Id` is + -- the hash of the source of the watch expression, and the `Term v a` + -- is the evaluated result of the expression, decompiled to a term. + watches :: WatchKind -> m [Reference.Id], + getWatch :: WatchKind -> Reference.Id -> m (Maybe (Term v)), + putWatch :: WatchKind -> Reference.Id -> Term v -> m (), + getReflog :: m [Reflog.Entry], + appendReflog :: Text -> Branch m -> Branch m -> m (), + -- | the nicely-named versions will utilize these, and add builtins to the result set + termsHavingType :: Reference -> m (Set Referent.Id), + termsMentioningType :: Reference -> m (Set Referent.Id), + -- | number of base32 characters needed to distinguish any two hashes in the codebase; + -- we don't have to compute it separately for different namespaces + hashLength :: m Int, + termReferencesByPrefix :: ShortHash -> m (Set Reference.Id), + typeReferencesByPrefix :: ShortHash -> m (Set Reference.Id), + termReferentsByPrefix :: ShortHash -> m (Set Referent.Id), + branchHashesByPrefix :: ShortBranchHash -> m (Set BranchHash), + -- + lca :: (forall he e. [Causal m CausalHash he e] -> m (Maybe BranchHash)), + dependents :: Reference -> m (Maybe (Set Reference.Id)), + termDependencies :: Reference.Id -> m (Maybe (Set Reference.Id)), + declDependencies :: Reference.Id -> m (Maybe (Set Reference.Id)) --, + -- -- |terms, types, patches, and branches + -- branchDependencies :: + -- Branch.Hash -> m (Maybe (CausalHash, BD.Dependencies)), + -- -- |the "new" terms and types mentioned in a patch + -- patchDependencies :: EditHash -> m (Set Reference, Set Reference) + } data GetRootBranchError = NoRootBranch | CouldntLoadRootBranch Hash - deriving Show + deriving (Show) diff --git a/codebase2/codebase/U/Codebase/HashTags.hs b/codebase2/codebase/U/Codebase/HashTags.hs new file mode 100644 index 0000000000..d9c5b8a93e --- /dev/null +++ b/codebase2/codebase/U/Codebase/HashTags.hs @@ -0,0 +1,11 @@ +module U.Codebase.HashTags where + +import U.Util.Hash (Hash) + +newtype BranchHash = BranchHash { unBranchHash :: Hash } deriving (Eq, Ord, Show) + +newtype CausalHash = CausalHash { unCausalHash :: Hash } deriving (Eq, Ord, Show) + +newtype EditHash = EditHash { unEditHash :: Hash } deriving (Eq, Ord, Show) + +newtype PatchHash = PatchHash { unPatchHash :: Hash } deriving (Eq, Ord, Show) diff --git a/codebase2/codebase/U/Codebase/Reflog.hs b/codebase2/codebase/U/Codebase/Reflog.hs index e70567ba8a..8fc7616acc 100644 --- a/codebase2/codebase/U/Codebase/Reflog.hs +++ b/codebase2/codebase/U/Codebase/Reflog.hs @@ -3,7 +3,7 @@ module U.Codebase.Reflog where import Data.Text (Text) -import U.Codebase.Branch (BranchHash) +import U.Codebase.HashTags (BranchHash) data Entry = Entry {from :: BranchHash, to :: BranchHash, reason :: Text} diff --git a/codebase2/codebase/unison-codebase.cabal b/codebase2/codebase/unison-codebase.cabal index 672d9ee8e7..bb4b99f953 100644 --- a/codebase2/codebase/unison-codebase.cabal +++ b/codebase2/codebase/unison-codebase.cabal @@ -19,6 +19,7 @@ library U.Codebase.Causal U.Codebase.Codebase U.Codebase.Decl + U.Codebase.HashTags U.Codebase.Kind U.Codebase.Reference U.Codebase.Referent diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index bbcc6398a1..64a36c4e66 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -60,6 +60,8 @@ import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF import UnliftIO (MonadIO, catchIO) import UnliftIO.STM +import qualified Unison.Codebase.Causal as Causal +import U.Codebase.HashTags (CausalHash(unCausalHash)) -- 1) buffer up the component -- 2) in the event that the component is complete, then what? @@ -348,7 +350,17 @@ sqliteCodebase root = do pure . Set.fromList $ termReferents <> declReferents branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) - branchHashesByPrefix = error "todo" + branchHashesByPrefix sh = runDB conn do + -- bs <- Ops.branchHashesByPrefix sh + cs <- Ops.causalHashesByPrefix (Cv.sbh1to2 sh) + pure $ Set.map (Causal.RawHash . Cv.hash2to1 . unCausalHash) cs + + -- Do we want to include causal hashes here or just namespace hashes? + -- Could we expose just one or the other of them to the user? + -- Git uses commit hashes and tree hashes (analogous to causal hashes + -- and namespace hashes, respectively), but the user is presented + -- primarily with commit hashes. + -- Arya leaning towards doing the same for Unison. let finalizer = do Sqlite.close conn diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 708f22b6a4..0ef2c128fa 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -9,13 +9,17 @@ import qualified U.Codebase.Reference as V2 import qualified U.Codebase.Reference as V2.Reference import qualified U.Codebase.Referent as V2 import qualified U.Codebase.Referent as V2.Referent +import qualified U.Codebase.ShortHash as V2 import qualified U.Codebase.Sqlite.Symbol as V2 import qualified U.Codebase.Term as V2.Term import qualified U.Codebase.Type as V2.Type +import qualified U.Codebase.WatchKind as V2 +import qualified U.Codebase.WatchKind as V2.WatchKind import qualified U.Core.ABT as V2.ABT import qualified U.Util.Hash as V2 import qualified U.Util.Hash as V2.Hash import qualified Unison.ABT as V1.ABT +import qualified Unison.Codebase.ShortBranchHash as V1 import qualified Unison.ConstructorType as CT import qualified Unison.DataDeclaration as V1.Decl import Unison.Hash (Hash) @@ -31,10 +35,11 @@ import qualified Unison.Referent as V1.Referent import qualified Unison.Symbol as V1 import qualified Unison.Term as V1.Term import qualified Unison.Type as V1.Type -import qualified Unison.Var as Var import qualified Unison.Var as V1.Var -import qualified U.Codebase.WatchKind as V2 -import qualified U.Codebase.WatchKind as V2.WatchKind +import qualified Unison.Var as Var + +sbh1to2 :: V1.ShortBranchHash -> V2.ShortBranchHash +sbh1to2 (V1.ShortBranchHash b32) = V2.ShortBranchHash b32 decltype2to1 :: V2.Decl.DeclType -> CT.ConstructorType decltype2to1 = \case From 19499b501d4a63fa55e437254fcbaeee0e6e4b49 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sun, 1 Nov 2020 18:49:30 -0500 Subject: [PATCH 041/225] flesh out some Branch stuff --- .../U/Codebase/Sqlite/Branch/Diff.hs | 26 +- .../U/Codebase/Sqlite/Branch/Format.hs | 9 +- .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 4 +- .../U/Codebase/Sqlite/Operations.hs | 225 +++++++++++++++++- .../U/Codebase/Sqlite/Patch/Diff.hs | 10 +- .../U/Codebase/Sqlite/Patch/Format.hs | 3 +- .../U/Codebase/Sqlite/Patch/Full.hs | 11 +- .../U/Codebase/Sqlite/Patch/TermEdit.hs | 4 +- .../U/Codebase/Sqlite/Serialization.hs | 53 +++-- codebase2/codebase/U/Codebase/Branch.hs | 8 +- codebase2/codebase/U/Codebase/TermEdit.hs | 1 - .../U/Util/Serialization.hs | 5 + 12 files changed, 298 insertions(+), 61 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs index 0dcb13eeb9..4d0d1f7755 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs @@ -1,9 +1,12 @@ +{-# LANGUAGE ViewPatterns #-} + module U.Codebase.Sqlite.Branch.Diff where +import qualified Data.List as List import Data.Map (Map) +import qualified Data.Map as Map import Data.Set (Set) import U.Codebase.Sqlite.DbId (BranchObjectId, PatchObjectId, TextId) -import U.Codebase.Sqlite.Patch.Diff (PatchDiff) import U.Codebase.Sqlite.Reference (Reference) import U.Codebase.Sqlite.Referent (Referent) @@ -11,15 +14,20 @@ type NameSegment = TextId type Metadata = Reference -data PatchOp = PatchRemove | PatchAdd PatchObjectId | PatchEdit PatchDiff +data PatchOp = PatchRemove | PatchAddReplace PatchObjectId +data DefinitionOp = RemoveDef | AddDefWithMetadata (Set Metadata) | AlterDefMetadata (AddRemove Metadata) +data ChildOp = ChildRemove | ChildAddReplace BranchObjectId +type AddRemove a = Map a Bool -data AddRemove a = AddRemove {add :: Set a, remove :: Set a} +addsRemoves :: AddRemove a -> ([a], [a]) +addsRemoves map = + let (fmap fst -> adds, fmap fst -> removes) = + List.partition snd (Map.toList map) + in (adds, removes) data Diff = Diff - { reference :: BranchObjectId, - terms :: Map NameSegment (AddRemove Referent), - types :: Map NameSegment (AddRemove Reference), - termMetadata :: Map NameSegment (Map Referent (AddRemove Metadata)), - typeMetadata :: Map NameSegment (Map Reference (AddRemove Metadata)), - patches :: Map NameSegment PatchOp + { terms :: Map NameSegment (Map Referent DefinitionOp), + types :: Map NameSegment (Map Reference DefinitionOp), + patches :: Map NameSegment PatchOp, + children :: Map NameSegment ChildOp } diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs index d3e89d41fa..f1392a308c 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs @@ -1,8 +1,7 @@ module U.Codebase.Sqlite.Branch.Format where -import U.Codebase.Sqlite.Branch.Full -import U.Codebase.Sqlite.Branch.Diff +import U.Codebase.Sqlite.Branch.Diff ( Diff ) +import U.Codebase.Sqlite.Branch.Full ( Branch ) +import U.Codebase.Sqlite.DbId (BranchObjectId) -data BranchFormat - = Full Branch - | Diff Diff +data BranchFormat = Full Branch | Diff BranchObjectId Diff diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index f925b6aba4..410855ab44 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -22,10 +22,10 @@ newtype TextId = TextId Word64 deriving (Eq, Ord, Show) newtype HashId = HashId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 -newtype PatchObjectId = PatchObjectId Word64 deriving (Eq, Ord, Show) +newtype PatchObjectId = PatchObjectId { unPatchObjectId :: ObjectId } deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via ObjectId -newtype BranchObjectId = BranchObjectId ObjectId deriving (Eq, Ord, Show) +newtype BranchObjectId = BranchObjectId { unBranchObjectId :: ObjectId } deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via ObjectId newtype BranchHashId = BranchHashId { unBranchHashId :: HashId } deriving (Eq, Ord, Show) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index db31252e89..2f39708540 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -1,9 +1,12 @@ +{-# LANGUAGE ApplicativeDo #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE ViewPatterns #-} module U.Codebase.Sqlite.Operations where @@ -23,6 +26,7 @@ import qualified Data.Foldable as Foldable import Data.Functor (void, (<&>)) import Data.Map (Map) import qualified Data.Map as Map +import qualified Data.Map.Merge.Lazy as Map import Data.Maybe (isJust) import Data.Sequence (Seq) import qualified Data.Sequence as Seq @@ -31,35 +35,59 @@ import qualified Data.Set as Set import Data.Text (Text) import qualified Data.Vector as Vector import Data.Word (Word64) +import qualified U.Codebase.Branch as C +import qualified U.Codebase.Branch as C.Branch import U.Codebase.Decl (ConstructorId) import qualified U.Codebase.Decl as C import qualified U.Codebase.Decl as C.Decl -import U.Codebase.HashTags (CausalHash(..), BranchHash(..)) +import U.Codebase.HashTags (BranchHash (..), CausalHash (..), PatchHash (..)) import qualified U.Codebase.Reference as C import qualified U.Codebase.Reference as C.Reference +import qualified U.Codebase.Referent as C import qualified U.Codebase.Referent as C.Referent import U.Codebase.ShortHash (ShortBranchHash (ShortBranchHash)) +import qualified U.Codebase.Sqlite.Branch.Diff as S.Branch +import qualified U.Codebase.Sqlite.Branch.Diff as S.Branch.Diff +import qualified U.Codebase.Sqlite.Branch.Diff as S.BranchDiff +import qualified U.Codebase.Sqlite.Branch.Format as S +import qualified U.Codebase.Sqlite.Branch.Format as S.BranchFormat +import qualified U.Codebase.Sqlite.Branch.Full as S +import qualified U.Codebase.Sqlite.Branch.Full as S.Branch.Full +import qualified U.Codebase.Sqlite.Branch.MetadataSet as S +import qualified U.Codebase.Sqlite.Branch.MetadataSet as S.MetadataSet import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Decl.Format as S.Decl import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), WatchLocalIds) import qualified U.Codebase.Sqlite.LocalIds as LocalIds import qualified U.Codebase.Sqlite.ObjectType as OT +import qualified U.Codebase.Sqlite.Patch.Diff as S +import qualified U.Codebase.Sqlite.Patch.Format as S.PatchFormat +import qualified U.Codebase.Sqlite.Patch.Full as S +import qualified U.Codebase.Sqlite.Patch.TermEdit as S +import qualified U.Codebase.Sqlite.Patch.TermEdit as S.TermEdit +import qualified U.Codebase.Sqlite.Patch.TypeEdit as S +import qualified U.Codebase.Sqlite.Patch.TypeEdit as S.TypeEdit import U.Codebase.Sqlite.Queries (DB) import qualified U.Codebase.Sqlite.Queries as Q import qualified U.Codebase.Sqlite.Reference as S import qualified U.Codebase.Sqlite.Reference as S.Reference +import qualified U.Codebase.Sqlite.Referent as S import qualified U.Codebase.Sqlite.Serialization as S import U.Codebase.Sqlite.Symbol (Symbol) import qualified U.Codebase.Sqlite.Term.Format as S.Term import qualified U.Codebase.Term as C import qualified U.Codebase.Term as C.Term +import qualified U.Codebase.TermEdit as C +import qualified U.Codebase.TermEdit as C.TermEdit import qualified U.Codebase.Type as C.Type +import qualified U.Codebase.TypeEdit as C +import qualified U.Codebase.TypeEdit as C.TypeEdit import U.Codebase.WatchKind (WatchKind) import qualified U.Core.ABT as ABT import U.Util.Base32Hex (Base32Hex) import qualified U.Util.Hash as H import qualified U.Util.Monoid as Monoid -import U.Util.Serialization (Get, getFromBytes) +import U.Util.Serialization (Get) import qualified U.Util.Serialization as S type Err m = MonadError Error m @@ -72,6 +100,10 @@ data DecodeError = ErrTermElement Word64 | ErrDeclElement Word64 | ErrFramedArrayLen + | ErrTypeOfTerm C.Reference.Id + | ErrWatch WatchKind C.Reference.Id + | ErrBranch Db.BranchObjectId + | ErrPatch Db.PatchObjectId deriving (Show) data Error @@ -118,6 +150,31 @@ c2sReferenceId = C.Reference.idH hashToObjectId s2cReferenceId :: EDB m => S.Reference.Id -> m C.Reference.Id s2cReferenceId = C.Reference.idH loadHashByObjectId +s2cReferent :: EDB m => S.Referent -> m C.Referent +s2cReferent = bitraverse s2cReference s2cReference + +s2cTermEdit :: EDB m => S.TermEdit -> m C.TermEdit +s2cTermEdit = \case + S.TermEdit.Replace r t -> C.TermEdit.Replace <$> s2cReference r <*> pure (s2cTyping t) + S.TermEdit.Deprecate -> pure C.TermEdit.Deprecate + +s2cTyping :: S.TermEdit.Typing -> C.TermEdit.Typing +s2cTyping = \case + S.TermEdit.Same -> C.TermEdit.Same + S.TermEdit.Subtype -> C.TermEdit.Subtype + S.TermEdit.Different -> C.TermEdit.Different + +s2cTypeEdit :: EDB m => S.TypeEdit -> m C.TypeEdit +s2cTypeEdit = \case + S.TypeEdit.Replace r -> C.TypeEdit.Replace <$> s2cReference r + S.TypeEdit.Deprecate -> pure C.TypeEdit.Deprecate + +s2cBranch :: EDB m => S.Branch -> m (C.Branch m) +s2cBranch = error "todo" + +s2cPatch :: EDB m => S.Patch -> m C.Patch +s2cPatch = error "todo" + lookupTextId :: DB m => Text -> MaybeT m Db.TextId lookupTextId = m Q.loadText @@ -350,7 +407,7 @@ loadTypeOfTermByTermReference r = do -- load "type of term" blob for the reference bytes <- m' "Q.loadTypeOfTerm" Q.loadTypeOfTerm r' -- deserialize the blob into the type - typ <- m' "getTypeFromBytes" (fmap pure $ getFromBytes $ S.getType S.getReference) bytes + typ <- getFromBytesOr (ErrTypeOfTerm r) (S.getType S.getReference) bytes -- convert the result type by looking up db ids C.Type.rtraverse s2cReference typ @@ -384,7 +441,7 @@ loadWatch :: EDB m => WatchKind -> C.Reference.Id -> MaybeT m (C.Term Symbol) loadWatch k r = C.Reference.idH Q.saveHashHash r >>= MaybeT . Q.loadWatch k - >>= MaybeT . pure . getFromBytes (S.getPair S.getLocalIds S.getTermElement) + >>= getFromBytesOr (ErrWatch k r) (S.getPair S.getLocalIds S.getTermElement) >>= uncurry s2cTerm saveWatch :: EDB m => WatchKind -> C.Reference.Id -> C.Term Symbol -> m () @@ -460,7 +517,165 @@ componentByObjectId id = do len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly pure [C.Reference.Id id i | i <- [0 .. len - 1]] --- loadBranchById :: DB m => Db.BranchId -> m C.Branch +loadBranchByObjectId :: EDB m => Db.BranchObjectId -> m (C.Branch m) +loadBranchByObjectId id = do + deserializeBranchObject id >>= \case + S.BranchFormat.Full f -> doFull f + S.BranchFormat.Diff r d -> doDiff r [d] + where + deserializeBranchObject :: EDB m => Db.BranchObjectId -> m S.BranchFormat + deserializeBranchObject id = + (liftQ . Q.loadObjectById) (Db.unBranchObjectId id) + >>= getFromBytesOr (ErrBranch id) S.getBranchFormat + deserializePatchObject id = + (liftQ . Q.loadObjectById) (Db.unPatchObjectId id) + >>= getFromBytesOr (ErrPatch id) S.getPatchFormat + doFull :: EDB m => S.Branch.Full.Branch -> m (C.Branch m) + doFull (S.Branch.Full.Branch tms tps patches children) = + C.Branch + <$> doTerms tms + <*> doTypes tps + <*> doPatches patches + <*> doChildren children + where + bitraverseMap :: (Applicative f, Ord b) => (a -> f b) -> (c -> f d) -> Map a c -> f (Map b d) + bitraverseMap f g = fmap Map.fromList . traverse (bitraverse f g) . Map.toList + traverseSet :: (Applicative f, Ord b) => (a -> f b) -> Set a -> f (Set b) + traverseSet f = fmap Set.fromList . traverse f . Set.toList + -- is there a way to make these tidier? + doTerms :: forall m. EDB m => Map Db.TextId (Map S.Referent S.MetadataSetFormat) -> m (Map C.Branch.NameSegment (Map C.Referent (m C.Branch.MdValues))) + doTerms = + bitraverseMap + (fmap C.Branch.NameSegment . loadTextById) + ( bitraverseMap s2cReferent \case + S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> traverseSet s2cReference rs + ) + doTypes :: forall m. EDB m => Map Db.TextId (Map S.Reference S.MetadataSetFormat) -> m (Map C.Branch.NameSegment (Map C.Reference (m C.Branch.MdValues))) + doTypes = + bitraverseMap + (fmap C.Branch.NameSegment . loadTextById) + ( bitraverseMap s2cReference \case + S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> traverseSet s2cReference rs + ) + doPatches :: forall m. EDB m => Map Db.TextId Db.PatchObjectId -> m (Map C.Branch.NameSegment (PatchHash, m C.Patch)) + doPatches = bitraverseMap (fmap C.Branch.NameSegment . loadTextById) \patchId -> do + h <- PatchHash <$> loadHashByObjectId (Db.unPatchObjectId patchId) + let patch :: m C.Patch + patch = do + deserializePatchObject patchId >>= \case + S.PatchFormat.Full (S.Patch termEdits typeEdits) -> + C.Patch <$> bitraverseMap s2cReferent (traverseSet s2cTermEdit) termEdits <*> bitraverseMap s2cReference (traverseSet s2cTypeEdit) typeEdits + S.PatchFormat.Diff ref d -> doDiff ref [d] + doDiff ref ds = + deserializePatchObject ref >>= \case + S.PatchFormat.Full f -> s2cPatch (joinFull f ds) + S.PatchFormat.Diff ref' d' -> doDiff ref' (d' : ds) + joinFull f [] = f + joinFull (S.Patch termEdits typeEdits) (S.PatchDiff addedTermEdits addedTypeEdits removedTermEdits removedTypeEdits : ds) = joinFull f' ds + where + f' = S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) + addRemove :: (Ord a, Ord b) => Map a (Set b) -> Map a (Set b) -> Map a (Set b) -> Map a (Set b) + addRemove add del src = + (Map.unionWith (<>) add (Map.differenceWith remove src del)) + remove :: Ord b => Set b -> Set b -> Maybe (Set b) + remove src del = + let diff = Set.difference src del + in if diff == mempty then Nothing else Just diff + pure (h, patch) + + doChildren :: EDB m => Map Db.TextId Db.BranchObjectId -> m (Map C.Branch.NameSegment (m (C.Branch m))) + doChildren = bitraverseMap (fmap C.NameSegment . loadTextById) (pure . loadBranchByObjectId) + doDiff :: EDB m => Db.BranchObjectId -> [S.Branch.Diff] -> m (C.Branch m) + doDiff ref ds = + deserializeBranchObject ref >>= \case + S.BranchFormat.Full f -> s2cBranch (joinFull f ds) + S.BranchFormat.Diff ref' d' -> doDiff ref' (d' : ds) + where + joinFull :: S.Branch -> [S.Branch.Diff] -> S.Branch + joinFull f [] = f + joinFull + (S.Branch.Full.Branch tms tps patches children) + (S.Branch.Diff tms' tps' patches' children' : ds) = joinFull f' ds + where + f' = + S.Branch.Full.Branch + (mergeDefns tms tms') + (mergeDefns tps tps') + (mergePatches patches patches') + (mergeChildren children children') + mergeChildren :: + Map S.Branch.Full.NameSegment Db.BranchObjectId -> + Map S.BranchDiff.NameSegment S.BranchDiff.ChildOp -> + Map S.Branch.Full.NameSegment Db.BranchObjectId + mergeChildren = + Map.merge + Map.preserveMissing + (Map.mapMissing fromChildOp) + (Map.zipWithMaybeMatched mergeChildOp) + mergeChildOp :: + S.Branch.NameSegment -> + Db.BranchObjectId -> + S.BranchDiff.ChildOp -> + Maybe Db.BranchObjectId + mergeChildOp = + const . const \case + S.BranchDiff.ChildAddReplace id -> Just id + S.BranchDiff.ChildRemove -> Nothing + fromChildOp :: S.Branch.NameSegment -> S.BranchDiff.ChildOp -> Db.BranchObjectId + fromChildOp = const \case + S.BranchDiff.ChildAddReplace id -> id + S.BranchDiff.ChildRemove -> error "diff tries to remove a nonexistent child" + mergePatches :: + Map S.Branch.Full.NameSegment Db.PatchObjectId -> + Map S.BranchDiff.NameSegment S.BranchDiff.PatchOp -> + Map S.Branch.Full.NameSegment Db.PatchObjectId + mergePatches = + Map.merge Map.preserveMissing (Map.mapMissing fromPatchOp) (Map.zipWithMaybeMatched mergePatchOp) + fromPatchOp :: S.Branch.NameSegment -> S.BranchDiff.PatchOp -> Db.PatchObjectId + fromPatchOp = const \case + S.BranchDiff.PatchAddReplace id -> id + S.BranchDiff.PatchRemove -> error "diff tries to remove a nonexistent child" + mergePatchOp :: S.Branch.NameSegment -> Db.PatchObjectId -> S.BranchDiff.PatchOp -> Maybe Db.PatchObjectId + mergePatchOp = + const . const \case + S.BranchDiff.PatchAddReplace id -> Just id + S.BranchDiff.PatchRemove -> Nothing + mergeDefns :: + Ord r => + Map S.BranchDiff.NameSegment (Map r S.MetadataSet.MetadataSetFormat) -> + Map S.BranchDiff.NameSegment (Map r S.BranchDiff.DefinitionOp) -> + Map S.BranchDiff.NameSegment (Map r S.MetadataSet.MetadataSetFormat) + mergeDefns = + Map.merge + Map.preserveMissing + (Map.mapMissing (const (fmap fromDefnOp))) + (Map.zipWithMatched (const mergeDefnOp)) + fromDefnOp :: S.BranchDiff.DefinitionOp -> S.MetadataSet.MetadataSetFormat + fromDefnOp = \case + S.Branch.Diff.AddDefWithMetadata md -> S.MetadataSet.Inline md + S.Branch.Diff.RemoveDef -> error "diff tries to remove a nonexistent definition" + S.Branch.Diff.AlterDefMetadata _md -> error "diff tries to change metadata for a nonexistent definition" + mergeDefnOp :: + Ord r => + Map r S.MetadataSet.MetadataSetFormat -> + Map r S.BranchDiff.DefinitionOp -> + Map r S.MetadataSet.MetadataSetFormat + mergeDefnOp = + Map.merge + Map.preserveMissing + (Map.mapMissing (const fromDefnOp)) + (Map.zipWithMaybeMatched (const mergeDefnOp')) + mergeDefnOp' :: + S.MetadataSet.MetadataSetFormat -> + S.BranchDiff.DefinitionOp -> + Maybe S.MetadataSet.MetadataSetFormat + mergeDefnOp' (S.MetadataSet.Inline md) = \case + S.Branch.Diff.AddDefWithMetadata _md -> + error "diff tries to create a child that already exists" + S.Branch.Diff.RemoveDef -> Nothing + S.Branch.Diff.AlterDefMetadata md' -> + let (Set.fromList -> adds, Set.fromList -> removes) = S.BranchDiff.addsRemoves md' + in Just . S.MetadataSet.Inline $ (Set.union adds $ Set.difference md removes) branchHashesByPrefix :: EDB m => ShortBranchHash -> m (Set BranchHash) branchHashesByPrefix (ShortBranchHash b32prefix) = do diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs index d6deed156f..2ce1f4274d 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs @@ -2,17 +2,15 @@ module U.Codebase.Sqlite.Patch.Diff where import Data.Map (Map) import Data.Set (Set) -import U.Codebase.Sqlite.DbId (PatchObjectId) import U.Codebase.Sqlite.Patch.TermEdit (TermEdit) import U.Codebase.Sqlite.Patch.TypeEdit (TypeEdit) import U.Codebase.Sqlite.Reference (Reference) import U.Codebase.Sqlite.Referent (Referent) data PatchDiff = PatchDiff - { reference :: PatchObjectId, - addedTermEdits :: Map Referent TermEdit, - addedTypeEdits :: Map Reference TypeEdit, - removedTermEdits :: Set Referent, - removedTypeEdits :: Set Reference + { addedTermEdits :: Map Referent (Set TermEdit), + addedTypeEdits :: Map Reference (Set TypeEdit), + removedTermEdits :: Map Referent (Set TermEdit), + removedTypeEdits :: Map Reference (Set TypeEdit) } deriving (Eq, Ord, Show) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs index 55cbf9106e..eb2261574a 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs @@ -2,5 +2,6 @@ module U.Codebase.Sqlite.Patch.Format where import U.Codebase.Sqlite.Patch.Diff import U.Codebase.Sqlite.Patch.Full +import U.Codebase.Sqlite.DbId (PatchObjectId) -data PatchFormat = Full Patch | Diff PatchDiff +data PatchFormat = Full Patch | Diff PatchObjectId PatchDiff diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs index 6d5c70033d..2b1dafdcd7 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs @@ -1,11 +1,12 @@ module U.Codebase.Sqlite.Patch.Full where import Data.Map (Map) -import U.Codebase.Sqlite.Types +import Data.Set (Set) import U.Codebase.Sqlite.Patch.TermEdit import U.Codebase.Sqlite.Patch.TypeEdit +import U.Codebase.Sqlite.Types -data Patch = Patch { - termEdits :: Map Referent TermEdit, - typeEdits :: Map Reference TypeEdit -} +data Patch = Patch + { termEdits :: Map Referent (Set TermEdit), + typeEdits :: Map Reference (Set TypeEdit) + } diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs index bccc8160af..12d2fd39ef 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs @@ -1,8 +1,8 @@ module U.Codebase.Sqlite.Patch.TermEdit where -import U.Codebase.Sqlite.Referent (Referent) +import U.Codebase.Sqlite.Reference (Reference) -data TermEdit = Replace Referent Typing | Deprecate +data TermEdit = Replace Reference Typing | Deprecate deriving (Eq, Ord, Show) -- Replacements with the Same type can be automatically propagated. diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 71ddefdb4b..17e9c93f2b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE ViewPatterns #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} @@ -38,6 +39,7 @@ import qualified U.Codebase.Type as Type import qualified U.Core.ABT as ABT import U.Util.Serialization import Prelude hiding (getChar, putChar) +import U.Codebase.Sqlite.DbId (PatchObjectId) putABT :: (MonadPut m, Foldable f, Functor f, Ord v) => @@ -391,7 +393,7 @@ lookupDeclElement = putBranchFormat :: MonadPut m => BranchFormat.BranchFormat -> m () putBranchFormat = \case BranchFormat.Full b -> putWord8 0 *> putBranchFull b - BranchFormat.Diff d -> putWord8 1 *> putBranchDiff d + BranchFormat.Diff r d -> putWord8 1 *> putBranchDiff r d where putReferent' = putReferent putReference putReference putBranchFull (BranchFull.Branch terms types patches children) = do @@ -401,51 +403,60 @@ putBranchFormat = \case putMap putVarInt putVarInt children putMetadataSetFormat = \case MetadataSet.Inline s -> putWord8 0 *> putFoldable putReference s - putBranchDiff (BranchDiff.Diff ref terms types termMD typeMD patches) = do + putBranchDiff ref (BranchDiff.Diff terms types patches children) = do putVarInt ref - putMap putVarInt (putAddRemove putReferent') terms - putMap putVarInt (putAddRemove putReference) types - putMap putVarInt (putMap putReferent' (putAddRemove putReference)) termMD - putMap putVarInt (putMap putReference (putAddRemove putReference)) typeMD + putMap putVarInt (putMap putReferent' putDiffOp) terms + putMap putVarInt (putMap putReference putDiffOp) types putMap putVarInt putPatchOp patches + putMap putVarInt putChildOp children where - putAddRemove put (BranchDiff.AddRemove adds removes) = do + putAddRemove put map = do + let (adds, removes) = BranchDiff.addsRemoves map putFoldable put adds putFoldable put removes - putPatchOp BranchDiff.PatchRemove = putWord8 0 - putPatchOp (BranchDiff.PatchAdd pId) = putWord8 1 *> putVarInt pId - putPatchOp (BranchDiff.PatchEdit d) = putWord8 2 *> putPatchDiff d + putPatchOp = \case + BranchDiff.PatchRemove -> putWord8 0 + BranchDiff.PatchAddReplace pId -> putWord8 1 *> putVarInt pId + putDiffOp = \case + BranchDiff.RemoveDef -> putWord8 0 + BranchDiff.AddDefWithMetadata md -> putWord8 1 *> putFoldable putReference md + BranchDiff.AlterDefMetadata md -> putWord8 2 *> putAddRemove putReference md + putChildOp = \case + BranchDiff.ChildRemove -> putWord8 0 + BranchDiff.ChildAddReplace b -> putWord8 1 *> putVarInt b putPatchFormat :: MonadPut m => PatchFormat.PatchFormat -> m () putPatchFormat = \case PatchFormat.Full p -> putWord8 0 *> putPatchFull p - PatchFormat.Diff p -> putWord8 1 *> putPatchDiff p + PatchFormat.Diff r p -> putWord8 1 *> putPatchDiff r p + +getPatchFormat :: MonadGet m => m PatchFormat.PatchFormat +getPatchFormat = error "todo" putPatchFull :: MonadPut m => PatchFull.Patch -> m () putPatchFull (PatchFull.Patch termEdits typeEdits) = do - putMap putReferent' putTermEdit termEdits - putMap putReference putTypeEdit typeEdits + putMap putReferent' (putFoldable putTermEdit) termEdits + putMap putReference (putFoldable putTypeEdit) typeEdits where putReferent' = putReferent putReference putReference -putPatchDiff :: MonadPut m => PatchDiff.PatchDiff -> m () -putPatchDiff (PatchDiff.PatchDiff r atm atp rtm rtp) = do +putPatchDiff :: MonadPut m => PatchObjectId -> PatchDiff.PatchDiff -> m () +putPatchDiff r (PatchDiff.PatchDiff atm atp rtm rtp) = do putVarInt r - putMap putReferent' putTermEdit atm - putMap putReference putTypeEdit atp - putFoldable putReferent' rtm - putFoldable putReference rtp + putMap putReferent' (putFoldable putTermEdit) atm + putMap putReference (putFoldable putTypeEdit) atp + putMap putReferent' (putFoldable putTermEdit) rtm + putMap putReference (putFoldable putTypeEdit) rtp where putReferent' = putReferent putReference putReference putTermEdit :: MonadPut m => TermEdit.TermEdit -> m () putTermEdit TermEdit.Deprecate = putWord8 0 -putTermEdit (TermEdit.Replace r t) = putWord8 1 *> putReferent' r *> putTyping t +putTermEdit (TermEdit.Replace r t) = putWord8 1 *> putReference r *> putTyping t where putTyping TermEdit.Same = putWord8 0 putTyping TermEdit.Subtype = putWord8 1 putTyping TermEdit.Different = putWord8 2 - putReferent' = putReferent putReference putReference putTypeEdit :: MonadPut m => TypeEdit.TypeEdit -> m () putTypeEdit TypeEdit.Deprecate = putWord8 0 diff --git a/codebase2/codebase/U/Codebase/Branch.hs b/codebase2/codebase/U/Codebase/Branch.hs index c974632f10..ca2ba3c789 100644 --- a/codebase2/codebase/U/Codebase/Branch.hs +++ b/codebase2/codebase/U/Codebase/Branch.hs @@ -9,9 +9,9 @@ import U.Codebase.Referent (Referent) import U.Codebase.TermEdit (TermEdit) import U.Codebase.TypeEdit (TypeEdit) -newtype NameSegment = NameSegment Text +newtype NameSegment = NameSegment Text deriving (Eq, Ord, Show) -newtype MdValues = MdValues (Set Reference) +newtype MdValues = MdValues (Set Reference) deriving (Eq, Ord, Show) data Branch m = Branch { terms :: Map NameSegment (Map Referent (m MdValues)), @@ -21,6 +21,6 @@ data Branch m = Branch } data Patch = Patch - { termEdits :: Map Referent TermEdit, - typeEdits :: Map Reference TypeEdit + { termEdits :: Map Referent (Set TermEdit), + typeEdits :: Map Reference (Set TypeEdit) } diff --git a/codebase2/codebase/U/Codebase/TermEdit.hs b/codebase2/codebase/U/Codebase/TermEdit.hs index 625cb4cb72..90bbcbfc09 100644 --- a/codebase2/codebase/U/Codebase/TermEdit.hs +++ b/codebase2/codebase/U/Codebase/TermEdit.hs @@ -21,4 +21,3 @@ instance Hashable Typing where instance Hashable TermEdit where tokens (Replace r t) = [H.Tag 0] ++ H.tokens r ++ H.tokens t tokens Deprecate = [H.Tag 1] - diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index 96d62f7964..387ea3bff1 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -36,6 +36,8 @@ import Data.Map (Map) import qualified Data.Map as Map import Control.Applicative (Applicative(liftA2)) import GHC.Word (Word64) +import Data.Set (Set) +import qualified Data.Set as Set type Get a = forall m. MonadGet m => m a @@ -210,6 +212,9 @@ putMap putA putB m = putFoldable (putPair putA putB) (Map.toList m) getMap :: (MonadGet m, Ord a) => m a -> m b -> m (Map a b) getMap getA getB = Map.fromList <$> getList (getPair getA getB) +getSet :: (MonadGet m, Ord a) => m a -> m (Set a) +getSet getA = Set.fromList <$> getList getA + putPair :: MonadPut m => (a -> m ()) -> (b -> m ()) -> (a,b) -> m () putPair putA putB (a,b) = putA a *> putB b From a5f8d2fc3c8f8943406f8aa377a5e9202b3c4d23 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 3 Nov 2020 22:50:28 -0500 Subject: [PATCH 042/225] saveTermComponent --- .../U/Codebase/Sqlite/LocalIds.hs | 21 +- .../U/Codebase/Sqlite/Operations.hs | 190 ++++--- .../U/Codebase/Sqlite/Queries.hs | 13 - .../U/Codebase/Sqlite/Serialization.hs | 465 ++++++++++-------- .../U/Codebase/Sqlite/Term/Format.hs | 12 +- codebase2/codebase-sqlite/sql/create.sql | 7 - .../unison-codebase-sqlite.cabal | 1 + .../U/Util/Serialization.hs | 9 +- codebase2/util/U/Util/Lens.hs | 8 + codebase2/util/unison-util.cabal | 2 + .../src/Unison/Codebase/SqliteCodebase.hs | 25 +- 11 files changed, 420 insertions(+), 333 deletions(-) create mode 100644 codebase2/util/U/Util/Lens.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs index 21e47a76cf..f77c1fb2d1 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs @@ -1,23 +1,36 @@ +{-# LANGUAGE DerivingVia #-} + module U.Codebase.Sqlite.LocalIds where +import Data.Bifoldable (Bifoldable (bifoldMap)) +import Data.Bifunctor (Bifunctor (bimap)) +import Data.Bitraversable (Bitraversable (bitraverse)) +import Data.Bits (Bits) import Data.Vector (Vector) +import Data.Word (Word64) import U.Codebase.Sqlite.DbId -import Data.Bitraversable (Bitraversable(bitraverse)) -import Data.Bifoldable (Bifoldable(bifoldMap)) -import Data.Bifunctor (Bifunctor(bimap)) --- |A mapping between index ids that are local to an object and the ids in the database +-- | A mapping between index ids that are local to an object and the ids in the database data LocalIds' t h = LocalIds { textLookup :: Vector t, defnLookup :: Vector h } type LocalIds = LocalIds' TextId ObjectId + type WatchLocalIds = LocalIds' TextId HashId +-- | represents an index into a textLookup +newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 + +-- | represents an index into a defnLookup +newtype LocalDefnId = LocalDefnId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 + instance Bitraversable LocalIds' where bitraverse f g (LocalIds t d) = LocalIds <$> traverse f t <*> traverse g d + instance Bifoldable LocalIds' where bifoldMap f g (LocalIds t d) = foldMap f t <> foldMap g d + instance Bifunctor LocalIds' where bimap f g (LocalIds t d) = LocalIds (f <$> t) (g <$> d) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 2f39708540..8c846d824a 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -33,6 +33,7 @@ import qualified Data.Sequence as Seq import Data.Set (Set) import qualified Data.Set as Set import Data.Text (Text) +import Data.Tuple.Extra (uncurry3) import qualified Data.Vector as Vector import Data.Word (Word64) import qualified U.Codebase.Branch as C @@ -57,7 +58,7 @@ import qualified U.Codebase.Sqlite.Branch.MetadataSet as S import qualified U.Codebase.Sqlite.Branch.MetadataSet as S.MetadataSet import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Decl.Format as S.Decl -import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), WatchLocalIds) +import U.Codebase.Sqlite.LocalIds (LocalDefnId (..), LocalIds, LocalIds' (..), LocalTextId (..), WatchLocalIds) import qualified U.Codebase.Sqlite.LocalIds as LocalIds import qualified U.Codebase.Sqlite.ObjectType as OT import qualified U.Codebase.Sqlite.Patch.Diff as S @@ -84,11 +85,11 @@ import qualified U.Codebase.TypeEdit as C import qualified U.Codebase.TypeEdit as C.TypeEdit import U.Codebase.WatchKind (WatchKind) import qualified U.Core.ABT as ABT -import U.Util.Base32Hex (Base32Hex) import qualified U.Util.Hash as H import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S +import qualified U.Util.Lens as Lens type Err m = MonadError Error m @@ -109,6 +110,8 @@ data DecodeError data Error = DecodeError DecodeError ByteString ErrString | DatabaseIntegrityError Q.Integrity + | UnknownDependency H.Hash + | UnknownText Text | LegacyUnknownCycleLen H.Hash | LegacyUnknownConstructorType H.Hash C.Reference.Pos deriving (Show) @@ -124,9 +127,6 @@ liftQ a = Left e -> throwError (DatabaseIntegrityError e) Right a -> pure a -loadTermComponentByHash :: DB m => Base32Hex -> m (Maybe [C.Term Symbol]) -loadTermComponentByHash = error "todo" - -- * helpers m :: (a -> f (Maybe b)) -> a -> MaybeT f b @@ -175,17 +175,20 @@ s2cBranch = error "todo" s2cPatch :: EDB m => S.Patch -> m C.Patch s2cPatch = error "todo" -lookupTextId :: DB m => Text -> MaybeT m Db.TextId -lookupTextId = m Q.loadText +lookupTextId :: EDB m => Text -> m Db.TextId +lookupTextId t = Q.loadText t >>= \case + Just textId -> pure textId + Nothing -> throwError $ UnknownText t loadTextById :: EDB m => Db.TextId -> m Text loadTextById = liftQ . Q.loadTextById --- ok to fail -hashToObjectId :: EDB m => H.Hash -> MaybeT m Db.ObjectId +-- returns Nothing if no ObjectId for Hash h +hashToObjectId :: EDB m => H.Hash -> m Db.ObjectId hashToObjectId h = do - hashId <- MaybeT $ Q.loadHashId . H.toBase32Hex $ h - liftQ $ Q.objectIdByPrimaryHashId hashId + (Q.loadHashId . H.toBase32Hex) h >>= \case + Just hashId -> liftQ $ Q.objectIdByPrimaryHashId hashId + Nothing -> throwError $ UnknownDependency h objectExistsForHash :: EDB m => H.Hash -> m Bool objectExistsForHash h = isJust <$> runMaybeT (hashToObjectId h) @@ -199,8 +202,14 @@ loadHashByHashId = fmap H.fromBase32Hex . liftQ . Q.loadHashById decodeComponentLengthOnly :: Err m => ByteString -> m Word64 decodeComponentLengthOnly = getFromBytesOr ErrFramedArrayLen S.lengthFramedArray -decodeTermElement :: Err m => C.Reference.Pos -> ByteString -> m (LocalIds, S.Term.Term) -decodeTermElement i = getFromBytesOr (ErrTermElement i) (S.lookupTermElement i) +decodeTermElementWithType :: Err m => C.Reference.Pos -> ByteString -> m (LocalIds, S.Term.Term, S.Term.Type) +decodeTermElementWithType i = getFromBytesOr (ErrTermElement i) (S.lookupTermElement i) + +decodeTermElementDiscardingTerm :: Err m => C.Reference.Pos -> ByteString -> m (LocalIds, S.Term.Type) +decodeTermElementDiscardingTerm i = getFromBytesOr (ErrTermElement i) (S.lookupTermElementDiscardingTerm i) + +decodeTermElementDiscardingType :: Err m => C.Reference.Pos -> ByteString -> m (LocalIds, S.Term.Term) +decodeTermElementDiscardingType i = getFromBytesOr (ErrTermElement i) (S.lookupTermElementDiscardingType i) decodeDeclElement :: Err m => Word64 -> ByteString -> m (LocalIds, S.Decl.Decl Symbol) decodeDeclElement i = getFromBytesOr (ErrDeclElement i) (S.lookupDeclElement i) @@ -223,40 +232,73 @@ getDeclTypeByReference r@(C.Reference.Id h pos) = -- * meat and veggies +loadTermWithTypeByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term Symbol, C.Term.Type Symbol) +loadTermWithTypeByReference (C.Reference.Id h i) = + hashToObjectId h + >>= liftQ . Q.loadObjectById + -- retrieve and deserialize the blob + >>= decodeTermElementWithType i + >>= uncurry3 s2cTermWithType + loadTermByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term Symbol) loadTermByReference (C.Reference.Id h i) = hashToObjectId h >>= liftQ . Q.loadObjectById -- retrieve and deserialize the blob - >>= decodeTermElement i + >>= decodeTermElementDiscardingType i >>= uncurry s2cTerm +s2cTermWithType :: EDB m => LocalIds -> S.Term.Term -> S.Term.Type -> MaybeT m (C.Term Symbol, C.Term.Type Symbol) +s2cTermWithType ids tm tp = do + (substText, substHash) <- localIdsToLookups loadTextById loadHashByObjectId ids + pure (x2cTerm substText substHash tm, x2cTType substText substHash tp) + s2cTerm :: EDB m => LocalIds -> S.Term.Term -> MaybeT m (C.Term Symbol) -s2cTerm = x2cTerm loadTextById loadHashByObjectId +s2cTerm ids tm = do + (substText, substHash) <- localIdsToLookups loadTextById loadHashByObjectId ids + pure $ x2cTerm substText substHash tm + +s2cTypeOfTerm :: EDB m => LocalIds -> S.Term.Type -> MaybeT m (C.Term.Type Symbol) +s2cTypeOfTerm ids tp = do + (substText, substHash) <- localIdsToLookups loadTextById loadHashByObjectId ids + pure $ x2cTType substText substHash tp w2cTerm :: EDB m => WatchLocalIds -> S.Term.Term -> MaybeT m (C.Term Symbol) -w2cTerm = x2cTerm loadTextById loadHashByHashId +w2cTerm ids tm = do + (substText, substHash) <- localIdsToLookups loadTextById loadHashByHashId ids + pure $ x2cTerm substText substHash tm -x2cTerm :: Monad m => (t -> m Text) -> (d -> m H.Hash) -> LocalIds' t d -> S.Term.Term -> m (C.Term Symbol) -x2cTerm loadText loadHash localIds term = do - -- look up the text and hashes that are used by the term +localIdsToLookups :: Monad m => (t -> m Text) -> (d -> m H.Hash) -> LocalIds' t d -> m (LocalTextId -> Text, LocalDefnId -> H.Hash) +localIdsToLookups loadText loadHash localIds = do texts <- traverse loadText $ LocalIds.textLookup localIds hashes <- traverse loadHash $ LocalIds.defnLookup localIds + let substText (LocalTextId w) = texts Vector.! fromIntegral w + substHash (LocalDefnId w) = hashes Vector.! fromIntegral w + pure (substText, substHash) +x2cTerm :: (LocalTextId -> Text) -> (LocalDefnId -> H.Hash) -> S.Term.Term -> C.Term Symbol +x2cTerm substText substHash = -- substitute the text and hashes back into the term - let substText (S.Term.LocalTextId w) = texts Vector.! fromIntegral w - substHash (S.Term.LocalDefnId w) = hashes Vector.! fromIntegral w - substTermRef = bimap substText (fmap substHash) - substTypeRef = bimap substText substHash - substTermLink = bimap substTermRef substTypeRef - substTypeLink = substTypeRef - pure (C.Term.extraMap substText substTermRef substTypeRef substTermLink substTypeLink id term) - -c2xTerm :: forall m t d. Monad m => (Text -> m t) -> (H.Hash -> m d) -> C.Term Symbol -> m (LocalIds' t d, S.Term.Term) -c2xTerm saveText saveDefn tm = - done =<< (runWriterT . flip evalStateT mempty) (ABT.transformM go tm) + C.Term.extraMap substText substTermRef substTypeRef substTermLink substTypeLink id + where + substTermRef = bimap substText (fmap substHash) + substTypeRef = bimap substText substHash + substTermLink = bimap substTermRef substTypeRef + substTypeLink = substTypeRef + +x2cTType :: (LocalTextId -> Text) -> (LocalDefnId -> H.Hash) -> S.Term.Type -> C.Term.Type Symbol +x2cTType substText substHash = C.Type.rmap (bimap substText substHash) + +-- | Shared implementation for preparing term definition+type or watch expression result for database. +-- The Type is optional, because we don't store them for watch expression results. +c2xTerm :: forall m t d. Monad m => (Text -> m t) -> (H.Hash -> m d) -> C.Term Symbol -> Maybe (C.Term.Type Symbol) -> m (LocalIds' t d, S.Term.Term, Maybe (S.Term.Type)) +c2xTerm saveText saveDefn tm tp = + done =<< (runWriterT . flip evalStateT mempty) do + sterm <- ABT.transformM go tm + stype <- traverse (ABT.transformM goType) tp + pure (sterm, stype) where - go :: forall m a. (MonadWriter (Seq Text, Seq H.Hash) m, MonadState (Map Text S.Term.LocalTextId, Map H.Hash S.Term.LocalDefnId) m) => C.Term.F Symbol a -> m (S.Term.F a) + go :: forall m a. (MonadWriter (Seq Text, Seq H.Hash) m, MonadState (Map Text LocalTextId, Map H.Hash LocalDefnId) m) => C.Term.F Symbol a -> m (S.Term.F a) go = \case C.Term.Int n -> pure $ C.Term.Int n C.Term.Nat n -> pure $ C.Term.Nat n @@ -293,7 +335,7 @@ c2xTerm saveText saveDefn tm = C.Term.TypeLink <$> bitraverse lookupText lookupDefn r goType :: forall m a. - (MonadWriter (Seq Text, Seq H.Hash) m, MonadState (Map Text S.Term.LocalTextId, Map H.Hash S.Term.LocalDefnId) m) => + (MonadWriter (Seq Text, Seq H.Hash) m, MonadState (Map Text LocalTextId, Map H.Hash LocalDefnId) m) => C.Type.FT a -> m (S.Term.FT a) goType = \case @@ -309,13 +351,13 @@ c2xTerm saveText saveDefn tm = forall m w s a. ( MonadState s m, MonadWriter w m, - Lens.Field1 s s (Map Text S.Term.LocalTextId) (Map Text S.Term.LocalTextId), - Lens.Field1 w w (Seq Text) (Seq Text), - Lens.Field2 s s (Map H.Hash S.Term.LocalDefnId) (Map H.Hash S.Term.LocalDefnId), - Lens.Field2 w w (Seq H.Hash) (Seq H.Hash) + Lens.Field1' s (Map Text LocalTextId), + Lens.Field1' w (Seq Text), + Lens.Field2' s (Map H.Hash LocalDefnId), + Lens.Field2' w (Seq H.Hash) ) => C.Term.MatchCase Text C.Term.TypeRef a -> - m (C.Term.MatchCase S.Term.LocalTextId S.Term.TypeRef a) + m (C.Term.MatchCase LocalTextId S.Term.TypeRef a) goCase = \case C.Term.MatchCase pat guard body -> C.Term.MatchCase <$> goPat pat <*> pure guard <*> pure body @@ -323,13 +365,13 @@ c2xTerm saveText saveDefn tm = forall m s w. ( MonadState s m, MonadWriter w m, - Lens.Field1 s s (Map Text S.Term.LocalTextId) (Map Text S.Term.LocalTextId), - Lens.Field1 w w (Seq Text) (Seq Text), - Lens.Field2 s s (Map H.Hash S.Term.LocalDefnId) (Map H.Hash S.Term.LocalDefnId), - Lens.Field2 w w (Seq H.Hash) (Seq H.Hash) + Lens.Field1' s (Map Text LocalTextId), + Lens.Field1' w (Seq Text), + Lens.Field2' s (Map H.Hash LocalDefnId), + Lens.Field2' w (Seq H.Hash) ) => C.Term.Pattern Text C.Term.TypeRef -> - m (C.Term.Pattern S.Term.LocalTextId S.Term.TypeRef) + m (C.Term.Pattern LocalTextId S.Term.TypeRef) goPat = \case C.Term.PUnbound -> pure $ C.Term.PUnbound C.Term.PVar -> pure $ C.Term.PVar @@ -349,24 +391,24 @@ c2xTerm saveText saveDefn tm = forall m s w t. ( MonadState s m, MonadWriter w m, - Lens.Field1 s s (Map t S.Term.LocalTextId) (Map t S.Term.LocalTextId), - Lens.Field1 w w (Seq t) (Seq t), + Lens.Field1' s (Map t LocalTextId), + Lens.Field1' w (Seq t), Ord t ) => t -> - m S.Term.LocalTextId - lookupText = lookup Lens._1 Lens._1 S.Term.LocalTextId + m LocalTextId + lookupText = lookup Lens._1 Lens._1 LocalTextId lookupDefn :: forall m s w d. ( MonadState s m, MonadWriter w m, - Lens.Field2 s s (Map d S.Term.LocalDefnId) (Map d S.Term.LocalDefnId), - Lens.Field2 w w (Seq d) (Seq d), + Lens.Field2' s (Map d LocalDefnId), + Lens.Field2' w (Seq d), Ord d ) => d -> - m S.Term.LocalDefnId - lookupDefn = lookup Lens._2 Lens._2 S.Term.LocalDefnId + m LocalDefnId + lookupDefn = lookup Lens._2 Lens._2 LocalDefnId lookup :: forall m s w t t'. (MonadState s m, MonadWriter w m, Ord t) => @@ -384,32 +426,30 @@ c2xTerm saveText saveDefn tm = Writer.tell $ Lens.set writerLens (Seq.singleton t) mempty pure id Just t' -> pure t' - done :: (S.Term.Term, (Seq Text, Seq H.Hash)) -> m (LocalIds' t d, S.Term.Term) - done (tm, (localTextValues, localDefnValues)) = do + done :: ((S.Term.Term, Maybe S.Term.Type), (Seq Text, Seq H.Hash)) -> m (LocalIds' t d, S.Term.Term, Maybe S.Term.Type) + done ((tm, tp), (localTextValues, localDefnValues)) = do textIds <- traverse saveText localTextValues defnIds <- traverse saveDefn localDefnValues let ids = LocalIds (Vector.fromList (Foldable.toList textIds)) (Vector.fromList (Foldable.toList defnIds)) - pure (ids, void tm) + pure (ids, void tm, void <$> tp) c2wTerm :: DB m => C.Term Symbol -> m (WatchLocalIds, S.Term.Term) -c2wTerm = c2xTerm Q.saveText Q.saveHashHash +c2wTerm tm = c2xTerm Q.saveText Q.saveHashHash tm Nothing <&> \(w, tm, _) -> (w, tm) -c2sTerm :: EDB m => C.Term Symbol -> MaybeT m (LocalIds, S.Term.Term) -c2sTerm = c2xTerm Q.saveText hashToObjectId +-- |returns `Nothing` if a hash dependency is missing +c2sTerm :: EDB m => C.Term Symbol -> C.Term.Type Symbol -> m (LocalIds, S.Term.Term, S.Term.Type) +c2sTerm tm tp = c2xTerm Q.saveText hashToObjectId tm (Just tp) <&> \(w, tm, Just tp) -> (w, tm, tp) loadTypeOfTermByTermReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) -loadTypeOfTermByTermReference r = do - -- convert query reference by looking up db ids - r' <- C.Reference.idH hashToObjectId r - -- load "type of term" blob for the reference - bytes <- m' "Q.loadTypeOfTerm" Q.loadTypeOfTerm r' - -- deserialize the blob into the type - typ <- getFromBytesOr (ErrTypeOfTerm r) (S.getType S.getReference) bytes - -- convert the result type by looking up db ids - C.Type.rtraverse s2cReference typ +loadTypeOfTermByTermReference (C.Reference.Id h i) = + hashToObjectId h + >>= liftQ . Q.loadObjectById + -- retrieve and deserialize the blob + >>= decodeTermElementDiscardingTerm i + >>= uncurry s2cTypeOfTerm loadDeclByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Decl Symbol) loadDeclByReference (C.Reference.Id h i) = do @@ -428,8 +468,12 @@ loadDeclByReference (C.Reference.Id h i) = do substTypeRef = bimap substText (fmap substHash) pure (C.Decl.DataDeclaration dt m b (C.Type.rmap substTypeRef <$> ct)) -- lens might be nice here -saveTermComponent :: H.Hash -> [(C.Term Symbol, C.Term.Type Symbol)] -> m () -saveTermComponent = error "todo" +saveTermComponent :: EDB m => H.Hash -> [(C.Term Symbol, C.Term.Type Symbol)] -> m Db.ObjectId +saveTermComponent h terms = do + sTermElements <- traverse (uncurry c2sTerm) terms + hashId <- Q.saveHashHash h + let bytes = S.putBytes S.putTermComponent (S.Term.LocallyIndexedComponent $ Vector.fromList sTermElements) + Q.saveObject hashId OT.TermComponent bytes saveDeclComponent :: DB m => H.Hash -> [C.Decl Symbol] -> m () saveDeclComponent = error "todo" @@ -441,21 +485,21 @@ loadWatch :: EDB m => WatchKind -> C.Reference.Id -> MaybeT m (C.Term Symbol) loadWatch k r = C.Reference.idH Q.saveHashHash r >>= MaybeT . Q.loadWatch k - >>= getFromBytesOr (ErrWatch k r) (S.getPair S.getLocalIds S.getTermElement) + >>= getFromBytesOr (ErrWatch k r) (S.getPair S.getLocalIds S.getTerm) >>= uncurry s2cTerm saveWatch :: EDB m => WatchKind -> C.Reference.Id -> C.Term Symbol -> m () saveWatch w r t = do rs <- C.Reference.idH Q.saveHashHash r wterm <- c2wTerm t - let bytes = S.putBytes (S.putPair S.putLocalIds S.putTermElement) wterm + let bytes = S.putBytes (S.putPair S.putLocalIds S.putTerm) wterm Q.saveWatch w rs bytes -termsHavingType :: DB m => C.Reference -> m (Set C.Referent.Id) -termsHavingType = error "todo" +-- termsHavingType :: DB m => C.Reference -> m (Set C.Referent.Id) +-- termsHavingType = error "todo" -termsMentioningType :: DB m => C.Reference -> m (Set C.Referent.Id) -termsMentioningType = error "todo" +-- termsMentioningType :: DB m => C.Reference -> m (Set C.Referent.Id) +-- termsMentioningType = error "todo" -- something kind of funny here. first, we don't need to enumerate all the reference pos if we're just picking one -- second, it would be nice if we could leave these as S.References a little longer diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 067bba40c8..be8eaa9526 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -223,19 +223,6 @@ setNamespaceRoot id = execute sql (Only id) where sql = [here| INSERT OR REPLACE INTO namespace_root VALUES (?) |] -saveTypeOfTerm :: DB m => Reference.Id -> ByteString -> m () -saveTypeOfTerm r blob = execute sql (r :. Only blob) where sql = [here| - INSERT OR IGNORE INTO type_of_term - VALUES (?, ?, ?) - |] - -loadTypeOfTerm :: DB m => Reference.Id -> m (Maybe ByteString) -loadTypeOfTerm r = queryOnly sql r where sql = [here| - SELECT bytes FROM type_of_term - WHERE object_id = ? AND component_index = ? -|] - --- saveWatch :: DB m => WatchKind -> Reference.IdH -> ByteString -> m () saveWatch k r blob = execute sql (r :. Only blob) >> execute sql2 (r :. Only k) where diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 17e9c93f2b..c743f85c51 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -1,7 +1,8 @@ -{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE BlockArguments #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE ViewPatterns #-} module U.Codebase.Sqlite.Serialization where @@ -25,8 +26,9 @@ import qualified U.Codebase.Sqlite.Branch.Diff as BranchDiff import qualified U.Codebase.Sqlite.Branch.Format as BranchFormat import qualified U.Codebase.Sqlite.Branch.Full as BranchFull import qualified U.Codebase.Sqlite.Branch.MetadataSet as MetadataSet +import U.Codebase.Sqlite.DbId (PatchObjectId) import qualified U.Codebase.Sqlite.Decl.Format as DeclFormat -import U.Codebase.Sqlite.LocalIds +import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), LocalTextId) import qualified U.Codebase.Sqlite.Patch.Diff as PatchDiff import qualified U.Codebase.Sqlite.Patch.Format as PatchFormat import qualified U.Codebase.Sqlite.Patch.Full as PatchFull @@ -39,7 +41,6 @@ import qualified U.Codebase.Type as Type import qualified U.Core.ABT as ABT import U.Util.Serialization import Prelude hiding (getChar, putChar) -import U.Codebase.Sqlite.DbId (PatchObjectId) putABT :: (MonadPut m, Foldable f, Functor f, Ord v) => @@ -52,11 +53,12 @@ putABT putVar putA putF abt = putFoldable putVar fvs *> go (annotateBound abt) where fvs = Set.toList $ ABT.freeVars abt - go (ABT.Term _ (a, env) abt) = putA a *> case abt of - ABT.Var v -> putWord8 0 *> putVarRef env v - ABT.Tm f -> putWord8 1 *> putF go f - ABT.Abs v body -> putWord8 2 *> putVar v *> go body - ABT.Cycle body -> putWord8 3 *> go body + go (ABT.Term _ (a, env) abt) = + putA a *> case abt of + ABT.Var v -> putWord8 0 *> putVarRef env v + ABT.Tm f -> putWord8 1 *> putF go f + ABT.Abs v body -> putWord8 2 *> putVar v *> go body + ABT.Cycle body -> putWord8 3 *> go body annotateBound :: (Ord v, Functor f, Foldable f) => ABT.Term f v a -> ABT.Term f v (a, [v]) annotateBound = go [] where @@ -141,203 +143,226 @@ putTermFormat = \case TermFormat.Term c -> putWord8 0 *> putTermComponent c getTermFormat :: MonadGet m => m TermFormat.TermFormat -getTermFormat = getWord8 >>= \case - 0 -> TermFormat.Term <$> getTermComponent - other -> unknownTag "getTermFormat" other +getTermFormat = + getWord8 >>= \case + 0 -> TermFormat.Term <$> getTermComponent + other -> unknownTag "getTermFormat" other putTermComponent :: MonadPut m => TermFormat.LocallyIndexedComponent -> m () putTermComponent (TermFormat.LocallyIndexedComponent v) = - putFramedArray (putPair putLocalIds putTermElement) v - -putTermElement :: MonadPut m => TermFormat.Term -> m () -putTermElement = putABT putSymbol putUnit putF + putFramedArray + ( \(localIds, term, typ) -> + putLocalIds localIds >> putFramed putTerm term >> putFramed putTType typ + ) + v + +putTerm :: MonadPut m => TermFormat.Term -> m () +putTerm = putABT putSymbol putUnit putF where - putF :: MonadPut m => (a -> m ()) -> TermFormat.F a -> m () - putF putChild = \case - Term.Int n -> - putWord8 0 *> putInt n - Term.Nat n -> - putWord8 1 *> putNat n - Term.Float n -> - putWord8 2 *> putFloat n - Term.Boolean b -> - putWord8 3 *> putBoolean b - Term.Text t -> - putWord8 4 *> putVarInt t - Term.Ref r -> - putWord8 5 *> putRecursiveReference r - Term.Constructor r cid -> - putWord8 6 *> putReference r *> putVarInt cid - Term.Request r cid -> - putWord8 7 *> putReference r *> putVarInt cid - Term.Handle h a -> - putWord8 8 *> putChild h *> putChild a - Term.App f arg -> - putWord8 9 *> putChild f *> putChild arg - Term.Ann e t -> - putWord8 10 *> putChild e *> putType putReference putSymbol t - Term.Sequence vs -> - putWord8 11 *> putFoldable putChild vs - Term.If cond t f -> - putWord8 12 *> putChild cond *> putChild t *> putChild f - Term.And x y -> - putWord8 13 *> putChild x *> putChild y - Term.Or x y -> - putWord8 14 *> putChild x *> putChild y - Term.Lam body -> - putWord8 15 *> putChild body - Term.LetRec bs body -> - putWord8 16 *> putFoldable putChild bs *> putChild body - Term.Let b body -> - putWord8 17 *> putChild b *> putChild body - Term.Match s cases -> - putWord8 18 *> putChild s *> putFoldable (putMatchCase putChild) cases - Term.Char c -> - putWord8 19 *> putChar c - Term.TermLink r -> - putWord8 20 *> putReferent putRecursiveReference putReference r - Term.TypeLink r -> - putWord8 21 *> putReference r - putMatchCase :: MonadPut m => (a -> m ()) -> Term.MatchCase TermFormat.LocalTextId TermFormat.TypeRef a -> m () - putMatchCase putChild (Term.MatchCase pat guard body) = - putPattern pat *> putMaybe putChild guard *> putChild body - putPattern :: MonadPut m => Term.Pattern TermFormat.LocalTextId TermFormat.TypeRef -> m () - putPattern p = case p of - Term.PUnbound -> putWord8 0 - Term.PVar -> putWord8 1 - Term.PBoolean b -> putWord8 2 *> putBoolean b - Term.PInt n -> putWord8 3 *> putInt n - Term.PNat n -> putWord8 4 *> putNat n - Term.PFloat n -> putWord8 5 *> putFloat n - Term.PConstructor r cid ps -> - putWord8 6 - *> putReference r - *> putVarInt cid - *> putFoldable putPattern ps - Term.PAs p -> putWord8 7 *> putPattern p - Term.PEffectPure p -> putWord8 8 *> putPattern p - Term.PEffectBind r cid args k -> - putWord8 9 - *> putReference r - *> putVarInt cid - *> putFoldable putPattern args - *> putPattern k - Term.PSequenceLiteral ps -> - putWord8 10 *> putFoldable putPattern ps - Term.PSequenceOp l op r -> - putWord8 11 - *> putPattern l - *> putSeqOp op - *> putPattern r - Term.PText t -> putWord8 12 *> putVarInt t - Term.PChar c -> putWord8 13 *> putChar c - putSeqOp :: MonadPut m => Term.SeqOp -> m () - putSeqOp Term.PCons = putWord8 0 - putSeqOp Term.PSnoc = putWord8 1 - putSeqOp Term.PConcat = putWord8 2 + putF :: MonadPut m => (a -> m ()) -> TermFormat.F a -> m () + putF putChild = \case + Term.Int n -> + putWord8 0 *> putInt n + Term.Nat n -> + putWord8 1 *> putNat n + Term.Float n -> + putWord8 2 *> putFloat n + Term.Boolean b -> + putWord8 3 *> putBoolean b + Term.Text t -> + putWord8 4 *> putVarInt t + Term.Ref r -> + putWord8 5 *> putRecursiveReference r + Term.Constructor r cid -> + putWord8 6 *> putReference r *> putVarInt cid + Term.Request r cid -> + putWord8 7 *> putReference r *> putVarInt cid + Term.Handle h a -> + putWord8 8 *> putChild h *> putChild a + Term.App f arg -> + putWord8 9 *> putChild f *> putChild arg + Term.Ann e t -> + putWord8 10 *> putChild e *> putTType t + Term.Sequence vs -> + putWord8 11 *> putFoldable putChild vs + Term.If cond t f -> + putWord8 12 *> putChild cond *> putChild t *> putChild f + Term.And x y -> + putWord8 13 *> putChild x *> putChild y + Term.Or x y -> + putWord8 14 *> putChild x *> putChild y + Term.Lam body -> + putWord8 15 *> putChild body + Term.LetRec bs body -> + putWord8 16 *> putFoldable putChild bs *> putChild body + Term.Let b body -> + putWord8 17 *> putChild b *> putChild body + Term.Match s cases -> + putWord8 18 *> putChild s *> putFoldable (putMatchCase putChild) cases + Term.Char c -> + putWord8 19 *> putChar c + Term.TermLink r -> + putWord8 20 *> putReferent putRecursiveReference putReference r + Term.TypeLink r -> + putWord8 21 *> putReference r + putMatchCase :: MonadPut m => (a -> m ()) -> Term.MatchCase LocalTextId TermFormat.TypeRef a -> m () + putMatchCase putChild (Term.MatchCase pat guard body) = + putPattern pat *> putMaybe putChild guard *> putChild body + putPattern :: MonadPut m => Term.Pattern LocalTextId TermFormat.TypeRef -> m () + putPattern p = case p of + Term.PUnbound -> putWord8 0 + Term.PVar -> putWord8 1 + Term.PBoolean b -> putWord8 2 *> putBoolean b + Term.PInt n -> putWord8 3 *> putInt n + Term.PNat n -> putWord8 4 *> putNat n + Term.PFloat n -> putWord8 5 *> putFloat n + Term.PConstructor r cid ps -> + putWord8 6 + *> putReference r + *> putVarInt cid + *> putFoldable putPattern ps + Term.PAs p -> putWord8 7 *> putPattern p + Term.PEffectPure p -> putWord8 8 *> putPattern p + Term.PEffectBind r cid args k -> + putWord8 9 + *> putReference r + *> putVarInt cid + *> putFoldable putPattern args + *> putPattern k + Term.PSequenceLiteral ps -> + putWord8 10 *> putFoldable putPattern ps + Term.PSequenceOp l op r -> + putWord8 11 + *> putPattern l + *> putSeqOp op + *> putPattern r + Term.PText t -> putWord8 12 *> putVarInt t + Term.PChar c -> putWord8 13 *> putChar c + putSeqOp :: MonadPut m => Term.SeqOp -> m () + putSeqOp Term.PCons = putWord8 0 + putSeqOp Term.PSnoc = putWord8 1 + putSeqOp Term.PConcat = putWord8 2 getTermComponent :: MonadGet m => m TermFormat.LocallyIndexedComponent getTermComponent = TermFormat.LocallyIndexedComponent - <$> getFramedArray (getPair getLocalIds getTermElement) + <$> getFramedArray (getTuple3 getLocalIds (getFramed getTerm) (getFramed getTType)) -getTermElement :: MonadGet m => m TermFormat.Term -getTermElement = getABT getSymbol getUnit getF +getTerm :: MonadGet m => m TermFormat.Term +getTerm = getABT getSymbol getUnit getF where getF :: MonadGet m => m a -> m (TermFormat.F a) - getF getChild = getWord8 >>= \case - 0 -> Term.Int <$> getInt - 1 -> Term.Nat <$> getNat - 2 -> Term.Float <$> getFloat - 3 -> Term.Boolean <$> getBoolean - 4 -> Term.Text <$> getVarInt - 5 -> Term.Ref <$> getRecursiveReference - 6 -> Term.Constructor <$> getReference <*> getVarInt - 7 -> Term.Request <$> getReference <*> getVarInt - 8 -> Term.Handle <$> getChild <*> getChild - 9 -> Term.App <$> getChild <*> getChild - 10 -> Term.Ann <$> getChild <*> getType getReference - 11 -> Term.Sequence <$> getSequence getChild - 12 -> Term.If <$> getChild <*> getChild <*> getChild - 13 -> Term.And <$> getChild <*> getChild - 14 -> Term.Or <$> getChild <*> getChild - 15 -> Term.Lam <$> getChild - 16 -> Term.LetRec <$> getList getChild <*> getChild - 17 -> Term.Let <$> getChild <*> getChild - 18 -> - Term.Match - <$> getChild - <*> getList - (Term.MatchCase <$> getPattern <*> getMaybe getChild <*> getChild) - 19 -> Term.Char <$> getChar - 20 -> Term.TermLink <$> getReferent - 21 -> Term.TypeLink <$> getReference - tag -> unknownTag "getTerm" tag + getF getChild = + getWord8 >>= \case + 0 -> Term.Int <$> getInt + 1 -> Term.Nat <$> getNat + 2 -> Term.Float <$> getFloat + 3 -> Term.Boolean <$> getBoolean + 4 -> Term.Text <$> getVarInt + 5 -> Term.Ref <$> getRecursiveReference + 6 -> Term.Constructor <$> getReference <*> getVarInt + 7 -> Term.Request <$> getReference <*> getVarInt + 8 -> Term.Handle <$> getChild <*> getChild + 9 -> Term.App <$> getChild <*> getChild + 10 -> Term.Ann <$> getChild <*> getType getReference + 11 -> Term.Sequence <$> getSequence getChild + 12 -> Term.If <$> getChild <*> getChild <*> getChild + 13 -> Term.And <$> getChild <*> getChild + 14 -> Term.Or <$> getChild <*> getChild + 15 -> Term.Lam <$> getChild + 16 -> Term.LetRec <$> getList getChild <*> getChild + 17 -> Term.Let <$> getChild <*> getChild + 18 -> + Term.Match + <$> getChild + <*> getList + (Term.MatchCase <$> getPattern <*> getMaybe getChild <*> getChild) + 19 -> Term.Char <$> getChar + 20 -> Term.TermLink <$> getReferent + 21 -> Term.TypeLink <$> getReference + tag -> unknownTag "getTerm" tag where getReferent :: MonadGet m => m (Referent' TermFormat.TermRef TermFormat.TypeRef) - getReferent = getWord8 >>= \case - 0 -> Referent.Ref <$> getRecursiveReference - 1 -> Referent.Con <$> getReference <*> getVarInt - x -> unknownTag "getTermComponent" x - getPattern :: MonadGet m => m (Term.Pattern TermFormat.LocalTextId TermFormat.TypeRef) - getPattern = getWord8 >>= \case - 0 -> pure Term.PUnbound - 1 -> pure Term.PVar - 2 -> Term.PBoolean <$> getBoolean - 3 -> Term.PInt <$> getInt - 4 -> Term.PNat <$> getNat - 5 -> Term.PFloat <$> getFloat - 6 -> Term.PConstructor <$> getReference <*> getVarInt <*> getList getPattern - 7 -> Term.PAs <$> getPattern - 8 -> Term.PEffectPure <$> getPattern - 9 -> - Term.PEffectBind - <$> getReference - <*> getVarInt - <*> getList getPattern - <*> getPattern - 10 -> Term.PSequenceLiteral <$> getList getPattern - 11 -> - Term.PSequenceOp - <$> getPattern - <*> getSeqOp - <*> getPattern - 12 -> Term.PText <$> getVarInt - 13 -> Term.PChar <$> getChar - x -> unknownTag "Pattern" x + getReferent = + getWord8 >>= \case + 0 -> Referent.Ref <$> getRecursiveReference + 1 -> Referent.Con <$> getReference <*> getVarInt + x -> unknownTag "getTermComponent" x + getPattern :: MonadGet m => m (Term.Pattern LocalTextId TermFormat.TypeRef) + getPattern = + getWord8 >>= \case + 0 -> pure Term.PUnbound + 1 -> pure Term.PVar + 2 -> Term.PBoolean <$> getBoolean + 3 -> Term.PInt <$> getInt + 4 -> Term.PNat <$> getNat + 5 -> Term.PFloat <$> getFloat + 6 -> Term.PConstructor <$> getReference <*> getVarInt <*> getList getPattern + 7 -> Term.PAs <$> getPattern + 8 -> Term.PEffectPure <$> getPattern + 9 -> + Term.PEffectBind + <$> getReference + <*> getVarInt + <*> getList getPattern + <*> getPattern + 10 -> Term.PSequenceLiteral <$> getList getPattern + 11 -> + Term.PSequenceOp + <$> getPattern + <*> getSeqOp + <*> getPattern + 12 -> Term.PText <$> getVarInt + 13 -> Term.PChar <$> getChar + x -> unknownTag "Pattern" x where getSeqOp :: MonadGet m => m Term.SeqOp - getSeqOp = getWord8 >>= \case - 0 -> pure Term.PCons - 1 -> pure Term.PSnoc - 2 -> pure Term.PConcat - tag -> unknownTag "SeqOp" tag - -lookupTermElement :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Term) + getSeqOp = + getWord8 >>= \case + 0 -> pure Term.PCons + 1 -> pure Term.PSnoc + 2 -> pure Term.PConcat + tag -> unknownTag "SeqOp" tag + +lookupTermElement :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Term, TermFormat.Type) lookupTermElement = - unsafeFramedArrayLookup (getPair getLocalIds getTermElement) . fromIntegral + unsafeFramedArrayLookup (getTuple3 getLocalIds (getFramed getTerm) (getFramed getTType)) . fromIntegral + +lookupTermElementDiscardingType :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Term) +lookupTermElementDiscardingType = + unsafeFramedArrayLookup ((,) <$> getLocalIds <*> getFramed getTerm <* skipFramed) . fromIntegral + +lookupTermElementDiscardingTerm :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Type) +lookupTermElementDiscardingTerm = + unsafeFramedArrayLookup ((,) <$> getLocalIds <* skipFramed <*> getFramed getTType) . fromIntegral + + +getTType :: MonadGet m => m TermFormat.Type +getTType = getType getReference getType :: MonadGet m => m r -> m (Type.TypeR r Symbol) getType getReference = getABT getSymbol getUnit go where - go getChild = getWord8 >>= \case - 0 -> Type.Ref <$> getReference - 1 -> Type.Arrow <$> getChild <*> getChild - 2 -> Type.Ann <$> getChild <*> getKind - 3 -> Type.App <$> getChild <*> getChild - 4 -> Type.Effect <$> getChild <*> getChild - 5 -> Type.Effects <$> getList getChild - 6 -> Type.Forall <$> getChild - 7 -> Type.IntroOuter <$> getChild - tag -> unknownTag "getType" tag + go getChild = + getWord8 >>= \case + 0 -> Type.Ref <$> getReference + 1 -> Type.Arrow <$> getChild <*> getChild + 2 -> Type.Ann <$> getChild <*> getKind + 3 -> Type.App <$> getChild <*> getChild + 4 -> Type.Effect <$> getChild <*> getChild + 5 -> Type.Effects <$> getList getChild + 6 -> Type.Forall <$> getChild + 7 -> Type.IntroOuter <$> getChild + tag -> unknownTag "getType" tag getKind :: MonadGet m => m Kind - getKind = getWord8 >>= \case - 0 -> pure Kind.Star - 1 -> Kind.Arrow <$> getKind <*> getKind - tag -> unknownTag "getKind" tag + getKind = + getWord8 >>= \case + 0 -> pure Kind.Star + 1 -> Kind.Arrow <$> getKind <*> getKind + tag -> unknownTag "getKind" tag putDeclFormat :: MonadPut m => DeclFormat.DeclFormat -> m () putDeclFormat = \case @@ -352,16 +377,17 @@ putDeclFormat = \case putDeclType declType putModifier modifier putFoldable putSymbol bound - putFoldable (putType putRecursiveReference putSymbol) constructorTypes + putFoldable putDType constructorTypes putDeclType Decl.Data = putWord8 0 putDeclType Decl.Effect = putWord8 1 putModifier Decl.Structural = putWord8 0 putModifier (Decl.Unique t) = putWord8 1 *> putText t getDeclFormat :: MonadGet m => m DeclFormat.DeclFormat -getDeclFormat = getWord8 >>= \case - 0 -> DeclFormat.Decl <$> getDeclComponent - other -> unknownTag "DeclFormat" other +getDeclFormat = + getWord8 >>= \case + 0 -> DeclFormat.Decl <$> getDeclComponent + other -> unknownTag "DeclFormat" other where getDeclComponent :: MonadGet m => m DeclFormat.LocallyIndexedComponent getDeclComponent = @@ -376,14 +402,16 @@ getDeclElement = <*> getList getSymbol <*> getList (getType getRecursiveReference) where - getDeclType = getWord8 >>= \case - 0 -> pure Decl.Data - 1 -> pure Decl.Effect - other -> unknownTag "DeclType" other - getModifier = getWord8 >>= \case - 0 -> pure Decl.Structural - 1 -> Decl.Unique <$> getText - other -> unknownTag "DeclModifier" other + getDeclType = + getWord8 >>= \case + 0 -> pure Decl.Data + 1 -> pure Decl.Effect + other -> unknownTag "DeclType" other + getModifier = + getWord8 >>= \case + 0 -> pure Decl.Structural + 1 -> Decl.Unique <$> getText + other -> unknownTag "DeclModifier" other lookupDeclElement :: MonadGet m => Reference.Pos -> m (LocalIds, DeclFormat.Decl Symbol) @@ -463,10 +491,11 @@ putTypeEdit TypeEdit.Deprecate = putWord8 0 putTypeEdit (TypeEdit.Replace r) = putWord8 1 *> putReference r getBranchFormat :: MonadGet m => m BranchFormat.BranchFormat -getBranchFormat = getWord8 >>= \case - 0 -> getBranchFull - 1 -> getBranchDiff - other -> unknownTag "BranchFormat" other +getBranchFormat = + getWord8 >>= \case + 0 -> getBranchFull + 1 -> getBranchDiff + other -> unknownTag "BranchFormat" other where getBranchFull = error "todo" getBranchDiff = error "todo" @@ -500,10 +529,11 @@ putReference = \case getReference :: (MonadGet m, Integral t, Bits t, Integral r, Bits r) => m (Reference' t r) -getReference = getWord8 >>= \case - 0 -> ReferenceBuiltin <$> getVarInt - 1 -> ReferenceDerived <$> (Reference.Id <$> getVarInt <*> getVarInt) - x -> unknownTag "getRecursiveReference" x +getReference = + getWord8 >>= \case + 0 -> ReferenceBuiltin <$> getVarInt + 1 -> ReferenceDerived <$> (Reference.Id <$> getVarInt <*> getVarInt) + x -> unknownTag "getRecursiveReference" x putRecursiveReference :: (MonadPut m, Integral t, Bits t, Integral r, Bits r) => @@ -518,10 +548,11 @@ putRecursiveReference = \case getRecursiveReference :: (MonadGet m, Integral t, Bits t, Integral r, Bits r) => m (Reference' t (Maybe r)) -getRecursiveReference = getWord8 >>= \case - 0 -> ReferenceBuiltin <$> getVarInt - 1 -> ReferenceDerived <$> (Reference.Id <$> getMaybe getVarInt <*> getVarInt) - x -> unknownTag "getRecursiveReference" x +getRecursiveReference = + getWord8 >>= \case + 0 -> ReferenceBuiltin <$> getVarInt + 1 -> ReferenceDerived <$> (Reference.Id <$> getMaybe getVarInt <*> getVarInt) + x -> unknownTag "getRecursiveReference" x putInt :: MonadPut m => Int64 -> m () putInt = serializeBE @@ -546,10 +577,17 @@ putBoolean False = putWord8 0 putBoolean True = putWord8 1 getBoolean :: MonadGet m => m Bool -getBoolean = getWord8 >>= \case - 0 -> pure False - 1 -> pure True - x -> unknownTag "Boolean" x +getBoolean = + getWord8 >>= \case + 0 -> pure False + 1 -> pure True + x -> unknownTag "Boolean" x + +putTType :: MonadPut m => TermFormat.Type -> m () +putTType = putType putReference putSymbol + +putDType :: MonadPut m => DeclFormat.Type Symbol -> m () +putDType = putType putRecursiveReference putSymbol putType :: (MonadPut m, Ord v) => @@ -585,10 +623,11 @@ putMaybe putA = \case Just a -> putWord8 1 *> putA a getMaybe :: MonadGet m => m a -> m (Maybe a) -getMaybe getA = getWord8 >>= \tag -> case tag of - 0 -> pure Nothing - 1 -> Just <$> getA - _ -> unknownTag "Maybe" tag +getMaybe getA = + getWord8 >>= \tag -> case tag of + 0 -> pure Nothing + 1 -> Just <$> getA + _ -> unknownTag "Maybe" tag unknownTag :: (MonadGet m, Show a) => String -> a -> m x unknownTag msg tag = diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs index d448b980c2..02d7e97059 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs @@ -2,32 +2,26 @@ module U.Codebase.Sqlite.Term.Format where -import Data.Bits (Bits) import Data.Vector (Vector) -import Data.Word (Word64) import U.Codebase.Reference (Reference') import U.Codebase.Referent (Referent') import U.Codebase.Sqlite.LocalIds -import U.Codebase.Sqlite.Symbol + ( LocalIds', LocalTextId, LocalDefnId ) +import U.Codebase.Sqlite.Symbol ( Symbol ) import qualified U.Codebase.Term as Term import qualified U.Core.ABT as ABT import qualified U.Codebase.Type as Type import qualified U.Codebase.Sqlite.Reference as Sqlite import U.Codebase.Sqlite.DbId (ObjectId, TextId) -newtype LocalDefnId = LocalDefnId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 -newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 - type TermRef = Reference' LocalTextId (Maybe LocalDefnId) - type TypeRef = Reference' LocalTextId LocalDefnId - type TermLink = Referent' TermRef TypeRef type TypeLink = TypeRef type LocallyIndexedComponent = LocallyIndexedComponent' TextId ObjectId newtype LocallyIndexedComponent' t d = - LocallyIndexedComponent (Vector (LocalIds' t d, Term)) + LocallyIndexedComponent (Vector (LocalIds' t d, Term, Type)) type F = Term.F' LocalTextId TermRef TypeRef TermLink TypeLink Symbol diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 521681ab15..d09a25c4ca 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -86,13 +86,6 @@ CREATE TABLE causal_old ( new_hash_id INTEGER NOT NULL REFERENCES hash(id) ); -CREATE TABLE type_of_term ( - object_id INTEGER NOT NULL REFERENCES object(id), - component_index INTEGER NOT NULL, - bytes BLOB NOT NULL, - PRIMARY KEY (object_id, component_index) -); - CREATE TABLE watch_result ( hash_id INTEGER NOT NULL REFERENCES object(id), component_index INTEGER NOT NULL, diff --git a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal index d60f982361..7745023d38 100644 --- a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal +++ b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal @@ -45,6 +45,7 @@ library bytes, bytestring, containers, + extra, here, lens, mtl, diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index 387ea3bff1..99c60ce1c4 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -34,7 +34,7 @@ import UnliftIO.Directory (createDirectoryIfMissing, doesFileExist) import Prelude hiding (readFile, writeFile) import Data.Map (Map) import qualified Data.Map as Map -import Control.Applicative (Applicative(liftA2)) +import Control.Applicative (liftA3, Applicative(liftA2)) import GHC.Word (Word64) import Data.Set (Set) import qualified Data.Set as Set @@ -151,11 +151,11 @@ getSequence getA = do length <- getVarInt Seq.replicateM length getA -getFramed :: MonadGet m => Get a -> m (Maybe a) +getFramed :: MonadGet m => Get a -> m a getFramed get = do size <- getVarInt bytes <- getByteString size - pure $ getFromBytes get bytes + either fail pure $ runGetS get bytes putFramed :: MonadPut m => Put a -> a -> m () putFramed put a = do @@ -220,3 +220,6 @@ putPair putA putB (a,b) = putA a *> putB b getPair :: MonadGet m => m a -> m b -> m (a,b) getPair = liftA2 (,) + +getTuple3 :: MonadGet m => m a -> m b -> m c -> m (a,b,c) +getTuple3 = liftA3 (,,) diff --git a/codebase2/util/U/Util/Lens.hs b/codebase2/util/U/Util/Lens.hs new file mode 100644 index 0000000000..edf525505f --- /dev/null +++ b/codebase2/util/U/Util/Lens.hs @@ -0,0 +1,8 @@ +{-# LANGUAGE ConstraintKinds #-} + +module U.Util.Lens where + +import qualified Control.Lens as Lens + +type Field1' s a = Lens.Field1 s s a a +type Field2' s a = Lens.Field2 s s a a diff --git a/codebase2/util/unison-util.cabal b/codebase2/util/unison-util.cabal index 594f7dbd9c..c3a77a47ee 100644 --- a/codebase2/util/unison-util.cabal +++ b/codebase2/util/unison-util.cabal @@ -19,6 +19,7 @@ library U.Util.Components U.Util.Hash U.Util.Hashable + U.Util.Lens U.Util.Monoid U.Util.Relation -- other-modules: @@ -28,6 +29,7 @@ library bytestring, containers, cryptonite, + lens, memory, text, sandi diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 64a36c4e66..fd0490be5f 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -14,6 +14,7 @@ import Control.Monad.Reader (ReaderT (runReaderT)) import Control.Monad.Trans.Maybe (MaybeT) import Data.Bifunctor (Bifunctor (first), second) import Data.Foldable (Foldable (toList), traverse_) +import Data.Functor (void) import Data.Map (Map) import qualified Data.Map as Map import Data.Set (Set) @@ -25,6 +26,9 @@ import Data.Word (Word64) import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as Sqlite import System.FilePath (()) +-- import qualified U.Codebase.Sqlite.Operations' as Ops + +import U.Codebase.HashTags (CausalHash (unCausalHash)) import qualified U.Codebase.Reference as C.Reference import qualified U.Codebase.Sqlite.ObjectType as OT import U.Codebase.Sqlite.Operations (EDB) @@ -36,6 +40,7 @@ import Unison.Codebase (CodebasePath) import qualified Unison.Codebase as Codebase1 import Unison.Codebase.Branch (Branch) import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Causal as Causal import qualified Unison.Codebase.Reflog as Reflog import Unison.Codebase.ShortBranchHash (ShortBranchHash) import qualified Unison.Codebase.SqliteCodebase.Conversions as Cv @@ -60,8 +65,6 @@ import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF import UnliftIO (MonadIO, catchIO) import UnliftIO.STM -import qualified Unison.Codebase.Causal as Causal -import U.Codebase.HashTags (CausalHash(unCausalHash)) -- 1) buffer up the component -- 2) in the event that the component is complete, then what? @@ -203,7 +206,7 @@ sqliteCodebase root = do tryFlushBuffer termBuffer ( \h2 -> - Ops.saveTermComponent h2 + void . Ops.saveTermComponent h2 . fmap (first (Cv.term1to2 h) . second Cv.ttype1to2) ) tryFlushTermBuffer @@ -355,12 +358,12 @@ sqliteCodebase root = do cs <- Ops.causalHashesByPrefix (Cv.sbh1to2 sh) pure $ Set.map (Causal.RawHash . Cv.hash2to1 . unCausalHash) cs - -- Do we want to include causal hashes here or just namespace hashes? - -- Could we expose just one or the other of them to the user? - -- Git uses commit hashes and tree hashes (analogous to causal hashes - -- and namespace hashes, respectively), but the user is presented - -- primarily with commit hashes. - -- Arya leaning towards doing the same for Unison. + -- Do we want to include causal hashes here or just namespace hashes? + -- Could we expose just one or the other of them to the user? + -- Git uses commit hashes and tree hashes (analogous to causal hashes + -- and namespace hashes, respectively), but the user is presented + -- primarily with commit hashes. + -- Arya leaning towards doing the same for Unison. let finalizer = do Sqlite.close conn @@ -368,11 +371,11 @@ sqliteCodebase root = do terms <- readTVarIO termBuffer let printBuffer header b = if b /= mempty - then putStrLn header >> putStrLn "" >> print b else pure () + then putStrLn header >> putStrLn "" >> print b + else pure () printBuffer "Decls:" decls printBuffer "Terms:" terms - pure $ ( finalizer, Codebase1.Codebase From 239690c584be37950ed0aa4722d621de6985c06d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 4 Nov 2020 00:22:40 -0500 Subject: [PATCH 043/225] saveDeclComponent --- .../U/Codebase/Sqlite/Decl/Format.hs | 12 +- .../U/Codebase/Sqlite/Operations.hs | 172 +++++++++++------- .../src/Unison/Codebase/SqliteCodebase.hs | 2 +- 3 files changed, 112 insertions(+), 74 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs index 37f7cea30d..61f284bbcb 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs @@ -2,12 +2,10 @@ module U.Codebase.Sqlite.Decl.Format where -import Data.Bits (Bits) import Data.Vector (Vector) -import Data.Word (Word64) import U.Codebase.Decl (DeclR) import U.Codebase.Reference (Reference') -import U.Codebase.Sqlite.LocalIds (LocalIds) +import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalIds, LocalTextId) import U.Codebase.Sqlite.Symbol (Symbol) import qualified U.Codebase.Type as Type import qualified U.Core.ABT as ABT @@ -22,10 +20,8 @@ data LocallyIndexedComponent type Decl v = DeclR TypeRef v -type Type v = ABT.Term (Type.F' TypeRef) v () +type Type v = ABT.Term F v () -type TypeRef = Reference' LocalTextId (Maybe LocalTypeId) +type F = Type.F' TypeRef -newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 - -newtype LocalTypeId = LocalTypeId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +type TypeRef = Reference' LocalTextId (Maybe LocalDefnId) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 8c846d824a..a0bbc0a2bd 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -86,10 +86,10 @@ import qualified U.Codebase.TypeEdit as C.TypeEdit import U.Codebase.WatchKind (WatchKind) import qualified U.Core.ABT as ABT import qualified U.Util.Hash as H +import qualified U.Util.Lens as Lens import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S -import qualified U.Util.Lens as Lens type Err m = MonadError Error m @@ -176,9 +176,10 @@ s2cPatch :: EDB m => S.Patch -> m C.Patch s2cPatch = error "todo" lookupTextId :: EDB m => Text -> m Db.TextId -lookupTextId t = Q.loadText t >>= \case - Just textId -> pure textId - Nothing -> throwError $ UnknownText t +lookupTextId t = + Q.loadText t >>= \case + Just textId -> pure textId + Nothing -> throwError $ UnknownText t loadTextById :: EDB m => Db.TextId -> m Text loadTextById = liftQ . Q.loadTextById @@ -304,16 +305,16 @@ c2xTerm saveText saveDefn tm tp = C.Term.Nat n -> pure $ C.Term.Nat n C.Term.Float n -> pure $ C.Term.Float n C.Term.Boolean b -> pure $ C.Term.Boolean b - C.Term.Text t -> C.Term.Text <$> lookupText t + C.Term.Text t -> C.Term.Text <$> lookupText_ t C.Term.Char ch -> pure $ C.Term.Char ch C.Term.Ref r -> - C.Term.Ref <$> bitraverse lookupText (traverse lookupDefn) r + C.Term.Ref <$> bitraverse lookupText_ (traverse lookupDefn_) r C.Term.Constructor typeRef cid -> C.Term.Constructor - <$> bitraverse lookupText lookupDefn typeRef + <$> bitraverse lookupText_ lookupDefn_ typeRef <*> pure cid C.Term.Request typeRef cid -> - C.Term.Request <$> bitraverse lookupText lookupDefn typeRef <*> pure cid + C.Term.Request <$> bitraverse lookupText_ lookupDefn_ typeRef <*> pure cid C.Term.Handle a a2 -> pure $ C.Term.Handle a a2 C.Term.App a a2 -> pure $ C.Term.App a a2 C.Term.Ann a typ -> C.Term.Ann a <$> ABT.transformM goType typ @@ -328,18 +329,18 @@ c2xTerm saveText saveDefn tm tp = C.Term.TermLink r -> C.Term.TermLink <$> bitraverse - (bitraverse lookupText (traverse lookupDefn)) - (bitraverse lookupText lookupDefn) + (bitraverse lookupText_ (traverse lookupDefn_)) + (bitraverse lookupText_ lookupDefn_) r C.Term.TypeLink r -> - C.Term.TypeLink <$> bitraverse lookupText lookupDefn r + C.Term.TypeLink <$> bitraverse lookupText_ lookupDefn_ r goType :: forall m a. (MonadWriter (Seq Text, Seq H.Hash) m, MonadState (Map Text LocalTextId, Map H.Hash LocalDefnId) m) => C.Type.FT a -> m (S.Term.FT a) goType = \case - C.Type.Ref r -> C.Type.Ref <$> bitraverse lookupText lookupDefn r + C.Type.Ref r -> C.Type.Ref <$> bitraverse lookupText_ lookupDefn_ r C.Type.Arrow i o -> pure $ C.Type.Arrow i o C.Type.Ann a k -> pure $ C.Type.Ann a k C.Type.App f a -> pure $ C.Type.App f a @@ -352,7 +353,7 @@ c2xTerm saveText saveDefn tm tp = ( MonadState s m, MonadWriter w m, Lens.Field1' s (Map Text LocalTextId), - Lens.Field1' w (Seq Text), + Lens.Field1' w (Seq Text), Lens.Field2' s (Map H.Hash LocalDefnId), Lens.Field2' w (Seq H.Hash) ) => @@ -366,7 +367,7 @@ c2xTerm saveText saveDefn tm tp = ( MonadState s m, MonadWriter w m, Lens.Field1' s (Map Text LocalTextId), - Lens.Field1' w (Seq Text), + Lens.Field1' w (Seq Text), Lens.Field2' s (Map H.Hash LocalDefnId), Lens.Field2' w (Seq H.Hash) ) => @@ -379,53 +380,15 @@ c2xTerm saveText saveDefn tm tp = C.Term.PInt i -> pure $ C.Term.PInt i C.Term.PNat n -> pure $ C.Term.PNat n C.Term.PFloat d -> pure $ C.Term.PFloat d - C.Term.PText t -> C.Term.PText <$> lookupText t + C.Term.PText t -> C.Term.PText <$> lookupText_ t C.Term.PChar c -> pure $ C.Term.PChar c - C.Term.PConstructor r i ps -> C.Term.PConstructor <$> bitraverse lookupText lookupDefn r <*> pure i <*> traverse goPat ps + C.Term.PConstructor r i ps -> C.Term.PConstructor <$> bitraverse lookupText_ lookupDefn_ r <*> pure i <*> traverse goPat ps C.Term.PAs p -> C.Term.PAs <$> goPat p C.Term.PEffectPure p -> C.Term.PEffectPure <$> goPat p - C.Term.PEffectBind r i bindings k -> C.Term.PEffectBind <$> bitraverse lookupText lookupDefn r <*> pure i <*> traverse goPat bindings <*> goPat k + C.Term.PEffectBind r i bindings k -> C.Term.PEffectBind <$> bitraverse lookupText_ lookupDefn_ r <*> pure i <*> traverse goPat bindings <*> goPat k C.Term.PSequenceLiteral ps -> C.Term.PSequenceLiteral <$> traverse goPat ps C.Term.PSequenceOp l op r -> C.Term.PSequenceOp <$> goPat l <*> pure op <*> goPat r - lookupText :: - forall m s w t. - ( MonadState s m, - MonadWriter w m, - Lens.Field1' s (Map t LocalTextId), - Lens.Field1' w (Seq t), - Ord t - ) => - t -> - m LocalTextId - lookupText = lookup Lens._1 Lens._1 LocalTextId - lookupDefn :: - forall m s w d. - ( MonadState s m, - MonadWriter w m, - Lens.Field2' s (Map d LocalDefnId), - Lens.Field2' w (Seq d), - Ord d - ) => - d -> - m LocalDefnId - lookupDefn = lookup Lens._2 Lens._2 LocalDefnId - lookup :: - forall m s w t t'. - (MonadState s m, MonadWriter w m, Ord t) => - Lens' s (Map t t') -> - Lens' w (Seq t) -> - (Word64 -> t') -> - t -> - m t' - lookup stateLens writerLens mk t = do - map <- Lens.use stateLens - case Map.lookup t map of - Nothing -> do - let id = mk . fromIntegral $ Map.size map - stateLens Lens.%= Map.insert t id - Writer.tell $ Lens.set writerLens (Seq.singleton t) mempty - pure id - Just t' -> pure t' + done :: ((S.Term.Term, Maybe S.Term.Type), (Seq Text, Seq H.Hash)) -> m (LocalIds' t d, S.Term.Term, Maybe S.Term.Type) done ((tm, tp), (localTextValues, localDefnValues)) = do textIds <- traverse saveText localTextValues @@ -436,13 +399,83 @@ c2xTerm saveText saveDefn tm tp = (Vector.fromList (Foldable.toList defnIds)) pure (ids, void tm, void <$> tp) +lookupText_ :: + ( MonadState s m, + MonadWriter w m, + Lens.Field1' s (Map t LocalTextId), + Lens.Field1' w (Seq t), + Ord t + ) => + t -> + m LocalTextId +lookupText_ = lookup_ Lens._1 Lens._1 LocalTextId + +lookupDefn_ :: + ( MonadState s m, + MonadWriter w m, + Lens.Field2' s (Map d LocalDefnId), + Lens.Field2' w (Seq d), + Ord d + ) => + d -> + m LocalDefnId +lookupDefn_ = lookup_ Lens._2 Lens._2 LocalDefnId + +-- | shared implementation of lookupTextHelper and lookupDefnHelper +-- Look up a value in the LUT, or append it. +lookup_ :: + (MonadState s m, MonadWriter w m, Ord t) => + Lens' s (Map t t') -> + Lens' w (Seq t) -> + (Word64 -> t') -> + t -> + m t' +lookup_ stateLens writerLens mk t = do + map <- Lens.use stateLens + case Map.lookup t map of + Nothing -> do + let id = mk . fromIntegral $ Map.size map + stateLens Lens.%= Map.insert t id + Writer.tell $ Lens.set writerLens (Seq.singleton t) mempty + pure id + Just t' -> pure t' + c2wTerm :: DB m => C.Term Symbol -> m (WatchLocalIds, S.Term.Term) c2wTerm tm = c2xTerm Q.saveText Q.saveHashHash tm Nothing <&> \(w, tm, _) -> (w, tm) --- |returns `Nothing` if a hash dependency is missing c2sTerm :: EDB m => C.Term Symbol -> C.Term.Type Symbol -> m (LocalIds, S.Term.Term, S.Term.Type) c2sTerm tm tp = c2xTerm Q.saveText hashToObjectId tm (Just tp) <&> \(w, tm, Just tp) -> (w, tm, tp) +c2sDecl :: forall m t d. EDB m => (Text -> m t) -> (H.Hash -> m d) -> C.Decl Symbol -> m (LocalIds' t d, S.Decl.Decl Symbol) +c2sDecl saveText saveDefn (C.Decl.DataDeclaration dt m b cts) = do + done =<< (runWriterT . flip evalStateT mempty) do + cts' <- traverse (ABT.transformM goType) cts + pure (C.Decl.DataDeclaration dt m b cts') + where + goType :: + forall m a. + (MonadWriter (Seq Text, Seq H.Hash) m, MonadState (Map Text LocalTextId, Map H.Hash LocalDefnId) m) => + C.Type.FD a -> + m (S.Decl.F a) + goType = \case + C.Type.Ref r -> C.Type.Ref <$> bitraverse lookupText_ (traverse lookupDefn_) r + C.Type.Arrow i o -> pure $ C.Type.Arrow i o + C.Type.Ann a k -> pure $ C.Type.Ann a k + C.Type.App f a -> pure $ C.Type.App f a + C.Type.Effect e a -> pure $ C.Type.Effect e a + C.Type.Effects es -> pure $ C.Type.Effects es + C.Type.Forall a -> pure $ C.Type.Forall a + C.Type.IntroOuter a -> pure $ C.Type.IntroOuter a + done :: (S.Decl.Decl Symbol, (Seq Text, Seq H.Hash)) -> m (LocalIds' t d, S.Decl.Decl Symbol) + done (decl, (localTextValues, localDefnValues)) = do + textIds <- traverse saveText localTextValues + defnIds <- traverse saveDefn localDefnValues + let ids = + LocalIds + (Vector.fromList (Foldable.toList textIds)) + (Vector.fromList (Foldable.toList defnIds)) + pure (ids, decl) + loadTypeOfTermByTermReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) loadTypeOfTermByTermReference (C.Reference.Id h i) = hashToObjectId h @@ -462,8 +495,8 @@ loadDeclByReference (C.Reference.Id h i) = do hashes <- traverse loadHashByObjectId $ LocalIds.defnLookup localIds -- substitute the text and hashes back into the term - let substText (S.Decl.LocalTextId w) = texts Vector.! fromIntegral w - substHash (S.Decl.LocalTypeId w) = hashes Vector.! fromIntegral w + let substText (LocalTextId w) = texts Vector.! fromIntegral w + substHash (LocalDefnId w) = hashes Vector.! fromIntegral w substTypeRef :: S.Decl.TypeRef -> C.Decl.TypeRef substTypeRef = bimap substText (fmap substHash) pure (C.Decl.DataDeclaration dt m b (C.Type.rmap substTypeRef <$> ct)) -- lens might be nice here @@ -475,8 +508,17 @@ saveTermComponent h terms = do let bytes = S.putBytes S.putTermComponent (S.Term.LocallyIndexedComponent $ Vector.fromList sTermElements) Q.saveObject hashId OT.TermComponent bytes -saveDeclComponent :: DB m => H.Hash -> [C.Decl Symbol] -> m () -saveDeclComponent = error "todo" +saveDeclComponent :: EDB m => H.Hash -> [C.Decl Symbol] -> m Db.ObjectId +saveDeclComponent h decls = do + sDeclElements <- traverse (c2sDecl Q.saveText hashToObjectId) decls + hashId <- Q.saveHashHash h + let bytes = + S.putBytes + S.putDeclFormat + ( S.Decl.Decl . S.Decl.LocallyIndexedComponent $ + Vector.fromList sDeclElements + ) + Q.saveObject hashId OT.DeclComponent bytes listWatches :: EDB m => WatchKind -> m [C.Reference.Id] listWatches k = Q.loadWatchesByWatchKind k >>= traverse s2cReferenceId @@ -495,11 +537,11 @@ saveWatch w r t = do let bytes = S.putBytes (S.putPair S.putLocalIds S.putTerm) wterm Q.saveWatch w rs bytes --- termsHavingType :: DB m => C.Reference -> m (Set C.Referent.Id) --- termsHavingType = error "todo" +termsHavingType :: DB m => C.Reference -> m (Set C.Referent.Id) +termsHavingType = error "todo" --- termsMentioningType :: DB m => C.Reference -> m (Set C.Referent.Id) --- termsMentioningType = error "todo" +termsMentioningType :: DB m => C.Reference -> m (Set C.Referent.Id) +termsMentioningType = error "todo" -- something kind of funny here. first, we don't need to enumerate all the reference pos if we're just picking one -- second, it would be nice if we could leave these as S.References a little longer diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index fd0490be5f..e746caceee 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -216,7 +216,7 @@ sqliteCodebase root = do tryFlushDeclBuffer h = tryFlushBuffer declBuffer - (\h2 -> Ops.saveDeclComponent h2 . fmap (Cv.decl1to2 h)) + (\h2 -> void . Ops.saveDeclComponent h2 . fmap (Cv.decl1to2 h)) (\h -> tryFlushTermBuffer h >> tryFlushDeclBuffer h) h From 7d0472969450fae872cdcd7bd741db561ac9b67a Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 10 Nov 2020 13:56:32 -0500 Subject: [PATCH 044/225] add LUT to Branch & make children required deps. --- .../U/Codebase/Sqlite/Branch/Diff.hs | 56 +++-- .../U/Codebase/Sqlite/Branch/Format.hs | 18 +- .../U/Codebase/Sqlite/Branch/Full.hs | 41 ++-- .../U/Codebase/Sqlite/Branch/MetadataSet.hs | 6 +- .../U/Codebase/Sqlite/Causal.hs | 9 +- .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 5 +- .../U/Codebase/Sqlite/LocalIds.hs | 4 + .../U/Codebase/Sqlite/Operations.hs | 198 +++++++++++++----- .../U/Codebase/Sqlite/Queries.hs | 91 ++++---- .../U/Codebase/Sqlite/Reference.hs | 4 + .../U/Codebase/Sqlite/Referent.hs | 2 + .../U/Codebase/Sqlite/Serialization.hs | 22 +- codebase2/codebase-sqlite/sql/create.sql | 9 +- codebase2/codebase/U/Codebase/Branch.hs | 5 +- codebase2/codebase/U/Codebase/Causal.hs | 7 + codebase2/util/U/Util/Map.hs | 12 ++ codebase2/util/U/Util/Set.hs | 8 + codebase2/util/unison-util.cabal | 2 + 18 files changed, 347 insertions(+), 152 deletions(-) create mode 100644 codebase2/util/U/Util/Map.hs create mode 100644 codebase2/util/U/Util/Set.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs index 4d0d1f7755..399bb5b9b3 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs @@ -1,3 +1,7 @@ +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE DeriveFunctor #-} +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE ViewPatterns #-} module U.Codebase.Sqlite.Branch.Diff where @@ -6,28 +10,42 @@ import qualified Data.List as List import Data.Map (Map) import qualified Data.Map as Map import Data.Set (Set) -import U.Codebase.Sqlite.DbId (BranchObjectId, PatchObjectId, TextId) -import U.Codebase.Sqlite.Reference (Reference) -import U.Codebase.Sqlite.Referent (Referent) +import U.Codebase.Reference (Reference') +import U.Codebase.Referent (Referent') +import U.Codebase.Sqlite.DbId (BranchObjectId, CausalHashId, ObjectId, PatchObjectId, TextId) +import U.Codebase.Sqlite.LocalIds (LocalBranchObjectId, LocalCausalHashId, LocalDefnId, LocalPatchObjectId, LocalTextId) +import qualified U.Util.Map as Map +import Data.Bifunctor (Bifunctor(bimap)) +import qualified Data.Set as Set -type NameSegment = TextId +type LocalDiff = Diff' LocalTextId LocalDefnId LocalPatchObjectId (LocalBranchObjectId, LocalCausalHashId) +type Diff = Diff' TextId ObjectId PatchObjectId (BranchObjectId, CausalHashId) -type Metadata = Reference - -data PatchOp = PatchRemove | PatchAddReplace PatchObjectId -data DefinitionOp = RemoveDef | AddDefWithMetadata (Set Metadata) | AlterDefMetadata (AddRemove Metadata) -data ChildOp = ChildRemove | ChildAddReplace BranchObjectId +data DefinitionOp' r = RemoveDef | AddDefWithMetadata (Set r) | AlterDefMetadata (AddRemove r) +data PatchOp' p = PatchRemove | PatchAddReplace p deriving Functor +data ChildOp' c = ChildRemove | ChildAddReplace c deriving Functor type AddRemove a = Map a Bool +type LocalDefinitionOp = DefinitionOp' (Metadata LocalTextId LocalDefnId) +type LocalPatchOp = PatchOp' LocalPatchObjectId +type LocalChildOp = ChildOp' (LocalBranchObjectId, LocalCausalHashId) + +type DefinitionOp = DefinitionOp' (Metadata TextId ObjectId) +type PatchOp = PatchOp' PatchObjectId +type ChildOp = ChildOp' (BranchObjectId, CausalHashId) + addsRemoves :: AddRemove a -> ([a], [a]) -addsRemoves map = - let (fmap fst -> adds, fmap fst -> removes) = - List.partition snd (Map.toList map) - in (adds, removes) - -data Diff = Diff - { terms :: Map NameSegment (Map Referent DefinitionOp), - types :: Map NameSegment (Map Reference DefinitionOp), - patches :: Map NameSegment PatchOp, - children :: Map NameSegment ChildOp +addsRemoves map = (adds, removes) + where + (fmap fst -> adds, fmap fst -> removes) = List.partition snd (Map.toList map) + +type Referent'' t h = Referent' (Reference' t h) (Reference' t h) + +data Diff' t h p c = Diff + { terms :: Map t (Map (Referent'' t h) (DefinitionOp' (Metadata t h))), + types :: Map t (Map (Reference' t h) (DefinitionOp' (Metadata t h))), + patches :: Map t (PatchOp' p), + children :: Map t (ChildOp' c) } + +type Metadata t h = Reference' t h diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs index f1392a308c..0f257c769d 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs @@ -1,7 +1,17 @@ module U.Codebase.Sqlite.Branch.Format where -import U.Codebase.Sqlite.Branch.Diff ( Diff ) -import U.Codebase.Sqlite.Branch.Full ( Branch ) -import U.Codebase.Sqlite.DbId (BranchObjectId) +import Data.Vector (Vector) +import U.Codebase.Sqlite.Branch.Diff (LocalDiff) +import U.Codebase.Sqlite.Branch.Full (LocalBranch) +import U.Codebase.Sqlite.DbId (CausalHashId, BranchObjectId, ObjectId, PatchObjectId, TextId) -data BranchFormat = Full Branch | Diff BranchObjectId Diff +data BranchFormat + = Full BranchLocalIds LocalBranch + | Diff BranchObjectId BranchLocalIds LocalDiff + +data BranchLocalIds = LocalIds + { branchTextLookup :: Vector TextId, + branchDefnLookup :: Vector ObjectId, + branchPatchLookup :: Vector PatchObjectId, + branchChildLookup :: Vector CausalHashId + } diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs index 187ee56dd5..d429fea06c 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs @@ -1,16 +1,33 @@ +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} + module U.Codebase.Sqlite.Branch.Full where import Data.Map (Map) -import U.Codebase.Sqlite.Branch.MetadataSet (MetadataSetFormat) -import U.Codebase.Sqlite.DbId (BranchObjectId, PatchObjectId, TextId) -import U.Codebase.Sqlite.Reference (Reference) -import U.Codebase.Sqlite.Referent (Referent) - -type NameSegment = TextId - -data Branch = Branch - { terms :: Map NameSegment (Map Referent MetadataSetFormat), - types :: Map NameSegment (Map Reference MetadataSetFormat), - patches :: Map NameSegment PatchObjectId, - children :: Map NameSegment BranchObjectId +import Data.Set (Set) +import U.Codebase.Reference (Reference') +import U.Codebase.Referent (Referent') +import U.Codebase.Sqlite.DbId (BranchObjectId, CausalHashId, ObjectId, PatchObjectId, TextId) +import U.Codebase.Sqlite.LocalIds (LocalBranchObjectId, LocalCausalHashId, LocalDefnId, LocalPatchObjectId, LocalTextId) +import qualified U.Util.Map as Map +import Data.Bifunctor (Bifunctor(bimap)) +import qualified Data.Set as Set + +type LocalBranch = Branch' LocalTextId LocalDefnId LocalPatchObjectId (LocalBranchObjectId, LocalCausalHashId) + +type DbBranch = Branch' TextId ObjectId PatchObjectId (BranchObjectId, CausalHashId) + +type Referent'' t h = Referent' (Reference' t h) (Reference' t h) + +data Branch' t h p c = Branch + { terms :: Map t (Map (Referent'' t h) (MetadataSetFormat' t h)), + types :: Map t (Map (Reference' t h) (MetadataSetFormat' t h)), + patches :: Map t p, + children :: Map t c } + +type LocalMetadataSet = MetadataSetFormat' LocalTextId LocalDefnId + +type DbMetadataSet = MetadataSetFormat' TextId ObjectId + +data MetadataSetFormat' t h = Inline (Set (Reference' t h)) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/MetadataSet.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/MetadataSet.hs index 50cc8ff5d7..a7628d0c70 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/MetadataSet.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/MetadataSet.hs @@ -1,6 +1,6 @@ module U.Codebase.Sqlite.Branch.MetadataSet where -import Data.Set (Set) -import U.Codebase.Sqlite.Reference (Reference) +-- import Data.Set (Set) +-- import U.Codebase.Sqlite.Reference (Reference) -data MetadataSetFormat = Inline (Set Reference) \ No newline at end of file +-- data MetadataSetFormat = Inline (Set Reference) \ No newline at end of file diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs index c6deaddc3b..c49bac897d 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs @@ -1,7 +1,12 @@ module U.Codebase.Sqlite.Causal where - data Causal hc he = RawCausal { valueHash :: he, parentHashes :: [hc] -} \ No newline at end of file +} + +data CausalHead id hc he = RawCausalHead { + headValueId :: id, + headValueHash :: he, + headParentHashes :: [hc] +} diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index 410855ab44..6184b12f1f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -34,9 +34,8 @@ newtype BranchHashId = BranchHashId { unBranchHashId :: HashId } deriving (Eq, O newtype CausalHashId = CausalId { unCausalHashId :: HashId } deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via HashId +newtype CausalOldHashId = CausalOldHashId HashId deriving Show deriving (Hashable, FromField, ToField) via HashId + newtype TypeId = TypeId ObjectId deriving Show deriving (FromField, ToField) via ObjectId newtype TermId = TermCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId newtype DeclId = DeclCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId --- newtype CausalHashId = CausalHashId HashId deriving Show deriving (Hashable, FromField, ToField) via HashId -newtype CausalOldHashId = CausalOldHashId HashId deriving Show deriving (Hashable, FromField, ToField) via HashId -newtype NamespaceHashId = NamespaceHashId ObjectId deriving Show deriving (Hashable, FromField, ToField) via ObjectId diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs index f77c1fb2d1..d54802a2f1 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs @@ -26,6 +26,10 @@ newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Int -- | represents an index into a defnLookup newtype LocalDefnId = LocalDefnId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalPatchObjectId = LocalPatchObjectId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalBranchObjectId = LocalBranchObjectId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalCausalHashId = LocalCausalHashId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 + instance Bitraversable LocalIds' where bitraverse f g (LocalIds t d) = LocalIds <$> traverse f t <*> traverse g d diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index a0bbc0a2bd..82d40572c1 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -38,6 +38,7 @@ import qualified Data.Vector as Vector import Data.Word (Word64) import qualified U.Codebase.Branch as C import qualified U.Codebase.Branch as C.Branch +import qualified U.Codebase.Causal as C import U.Codebase.Decl (ConstructorId) import qualified U.Codebase.Decl as C import qualified U.Codebase.Decl as C.Decl @@ -54,11 +55,18 @@ import qualified U.Codebase.Sqlite.Branch.Format as S import qualified U.Codebase.Sqlite.Branch.Format as S.BranchFormat import qualified U.Codebase.Sqlite.Branch.Full as S import qualified U.Codebase.Sqlite.Branch.Full as S.Branch.Full -import qualified U.Codebase.Sqlite.Branch.MetadataSet as S -import qualified U.Codebase.Sqlite.Branch.MetadataSet as S.MetadataSet +import qualified U.Codebase.Sqlite.Branch.Full as S.MetadataSet import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Decl.Format as S.Decl -import U.Codebase.Sqlite.LocalIds (LocalDefnId (..), LocalIds, LocalIds' (..), LocalTextId (..), WatchLocalIds) +import U.Codebase.Sqlite.LocalIds + ( LocalCausalHashId (..), + LocalDefnId (..), + LocalIds, + LocalIds' (..), + LocalPatchObjectId (..), + LocalTextId (..), + WatchLocalIds, + ) import qualified U.Codebase.Sqlite.LocalIds as LocalIds import qualified U.Codebase.Sqlite.ObjectType as OT import qualified U.Codebase.Sqlite.Patch.Diff as S @@ -87,9 +95,11 @@ import U.Codebase.WatchKind (WatchKind) import qualified U.Core.ABT as ABT import qualified U.Util.Hash as H import qualified U.Util.Lens as Lens +import qualified U.Util.Map as Map import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S +import qualified U.Util.Set as Set type Err m = MonadError Error m @@ -169,7 +179,7 @@ s2cTypeEdit = \case S.TypeEdit.Replace r -> C.TypeEdit.Replace <$> s2cReference r S.TypeEdit.Deprecate -> pure C.TypeEdit.Deprecate -s2cBranch :: EDB m => S.Branch -> m (C.Branch m) +s2cBranch :: EDB m => S.DbBranch -> m (C.Branch m) s2cBranch = error "todo" s2cPatch :: EDB m => S.Patch -> m C.Patch @@ -184,11 +194,10 @@ lookupTextId t = loadTextById :: EDB m => Db.TextId -> m Text loadTextById = liftQ . Q.loadTextById --- returns Nothing if no ObjectId for Hash h hashToObjectId :: EDB m => H.Hash -> m Db.ObjectId hashToObjectId h = do (Q.loadHashId . H.toBase32Hex) h >>= \case - Just hashId -> liftQ $ Q.objectIdByPrimaryHashId hashId + Just hashId -> liftQ $ Q.expectObjectIdForPrimaryHashId hashId Nothing -> throwError $ UnknownDependency h objectExistsForHash :: EDB m => H.Hash -> m Bool @@ -603,11 +612,26 @@ componentByObjectId id = do len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly pure [C.Reference.Id id i | i <- [0 .. len - 1]] +lookupLocalText :: S.BranchFormat.BranchLocalIds -> LocalTextId -> Db.TextId +lookupLocalText li (LocalTextId w) = S.BranchFormat.branchTextLookup li Vector.! fromIntegral w + +lookupLocalDefn :: S.BranchFormat.BranchLocalIds -> LocalDefnId -> Db.ObjectId +lookupLocalDefn li (LocalDefnId w) = S.BranchFormat.branchDefnLookup li Vector.! fromIntegral w + +lookupLocalPatch li (LocalPatchObjectId w) = S.BranchFormat.branchPatchLookup li Vector.! fromIntegral w + +lookupLocalChild li (LocalCausalHashId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w + +loadBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch m)) +loadBranchByCausalHashId id = do + (liftQ . Q.loadBranchObjectIdByCausalHashId) id + >>= traverse loadBranchByObjectId + loadBranchByObjectId :: EDB m => Db.BranchObjectId -> m (C.Branch m) loadBranchByObjectId id = do deserializeBranchObject id >>= \case - S.BranchFormat.Full f -> doFull f - S.BranchFormat.Diff r d -> doDiff r [d] + S.BranchFormat.Full li f -> doFull (l2sFull li f) + S.BranchFormat.Diff r li d -> doDiff r [l2sDiff li d] where deserializeBranchObject :: EDB m => Db.BranchObjectId -> m S.BranchFormat deserializeBranchObject id = @@ -616,7 +640,11 @@ loadBranchByObjectId id = do deserializePatchObject id = (liftQ . Q.loadObjectById) (Db.unPatchObjectId id) >>= getFromBytesOr (ErrPatch id) S.getPatchFormat - doFull :: EDB m => S.Branch.Full.Branch -> m (C.Branch m) + + l2sFull :: S.BranchFormat.BranchLocalIds -> S.LocalBranch -> S.DbBranch + l2sFull li = error "todo" + + doFull :: EDB m => S.Branch.Full.DbBranch -> m (C.Branch m) doFull (S.Branch.Full.Branch tms tps patches children) = C.Branch <$> doTerms tms @@ -624,33 +652,28 @@ loadBranchByObjectId id = do <*> doPatches patches <*> doChildren children where - bitraverseMap :: (Applicative f, Ord b) => (a -> f b) -> (c -> f d) -> Map a c -> f (Map b d) - bitraverseMap f g = fmap Map.fromList . traverse (bitraverse f g) . Map.toList - traverseSet :: (Applicative f, Ord b) => (a -> f b) -> Set a -> f (Set b) - traverseSet f = fmap Set.fromList . traverse f . Set.toList - -- is there a way to make these tidier? - doTerms :: forall m. EDB m => Map Db.TextId (Map S.Referent S.MetadataSetFormat) -> m (Map C.Branch.NameSegment (Map C.Referent (m C.Branch.MdValues))) + doTerms :: EDB m => Map Db.TextId (Map S.Referent S.DbMetadataSet) -> m (Map C.Branch.NameSegment (Map C.Referent (m C.Branch.MdValues))) doTerms = - bitraverseMap + Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) - ( bitraverseMap s2cReferent \case - S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> traverseSet s2cReference rs + ( Map.bitraverse s2cReferent \case + S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs ) - doTypes :: forall m. EDB m => Map Db.TextId (Map S.Reference S.MetadataSetFormat) -> m (Map C.Branch.NameSegment (Map C.Reference (m C.Branch.MdValues))) + doTypes :: forall m. EDB m => Map Db.TextId (Map S.Reference S.DbMetadataSet) -> m (Map C.Branch.NameSegment (Map C.Reference (m C.Branch.MdValues))) doTypes = - bitraverseMap + Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) - ( bitraverseMap s2cReference \case - S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> traverseSet s2cReference rs + ( Map.bitraverse s2cReference \case + S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs ) doPatches :: forall m. EDB m => Map Db.TextId Db.PatchObjectId -> m (Map C.Branch.NameSegment (PatchHash, m C.Patch)) - doPatches = bitraverseMap (fmap C.Branch.NameSegment . loadTextById) \patchId -> do - h <- PatchHash <$> loadHashByObjectId (Db.unPatchObjectId patchId) + doPatches = Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) \patchId -> do + h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId) patchId let patch :: m C.Patch patch = do deserializePatchObject patchId >>= \case S.PatchFormat.Full (S.Patch termEdits typeEdits) -> - C.Patch <$> bitraverseMap s2cReferent (traverseSet s2cTermEdit) termEdits <*> bitraverseMap s2cReference (traverseSet s2cTypeEdit) typeEdits + C.Patch <$> Map.bitraverse s2cReferent (Set.traverse s2cTermEdit) termEdits <*> Map.bitraverse s2cReference (Set.traverse s2cTypeEdit) typeEdits S.PatchFormat.Diff ref d -> doDiff ref [d] doDiff ref ds = deserializePatchObject ref >>= \case @@ -669,16 +692,79 @@ loadBranchByObjectId id = do in if diff == mempty then Nothing else Just diff pure (h, patch) - doChildren :: EDB m => Map Db.TextId Db.BranchObjectId -> m (Map C.Branch.NameSegment (m (C.Branch m))) - doChildren = bitraverseMap (fmap C.NameSegment . loadTextById) (pure . loadBranchByObjectId) + doChildren :: EDB m => Map Db.TextId (Db.BranchObjectId, Db.CausalHashId) -> m (Map C.Branch.NameSegment (C.CausalHead m CausalHash BranchHash (C.Branch m))) + doChildren = Map.bitraverse (fmap C.NameSegment . loadTextById) (pure . loadCausalHead) + -- doFull :: EDB m => S.BranchFormat.BranchLocalIds -> S.Branch.Full.LocalBranch -> m (C.Branch m) + -- doFull li (S.Branch.Full.Branch tms tps patches children) = + -- C.Branch + -- <$> doTerms tms + -- <*> doTypes tps + -- <*> doPatches patches + -- <*> doChildren children + -- where + -- localToDbReference :: S.LocalReference -> S.Reference + -- localToDbReference = bitraverse (lookupLocalText li) (lookupLocalDefn li) + -- Map.bitraverse :: (Applicative f, Ord b) => (a -> f b) -> (c -> f d) -> Map a c -> f (Map b d) + -- Map.bitraverse f g = fmap Map.fromList . traverse (bitraverse f g) . Map.toList + -- Set.traverse :: (Applicative f, Ord b) => (a -> f b) -> Set a -> f (Set b) + -- Set.traverse f = fmap Set.fromList . traverse f . Set.toList + -- -- |convert the term namespace from localids to C. ones + -- doTerms :: EDB m => Map LocalTextId (Map S.LocalReferent S.LocalMetadataSet) -> m (Map C.Branch.NameSegment (Map C.Referent (m C.Branch.MdValues))) + -- doTerms = + -- Map.bitraverse + -- (fmap C.Branch.NameSegment . loadTextById . lookupLocalText) + -- ( Map.bitraverse s2cReferent \case + -- S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse (s2cReference . localToDbReference) rs + -- ) + -- -- |convert the types namespace from localids to C. ones + -- doTypes :: forall m. EDB m => Map LocalTextId (Map S.LocalReference S.LocalMetadataSet) -> m (Map C.Branch.NameSegment (Map C.Reference (m C.Branch.MdValues))) + -- doTypes = + -- Map.bitraverse + -- (fmap C.Branch.NameSegment . loadTextById . lookupLocalText) + -- ( Map.bitraverse s2cReference \case + -- S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse (s2cReference . localToDbReference) rs + -- ) + -- -- |convert the patches namespace from using localids to C. ones + -- doPatches :: forall m. EDB m => Map LocalTextId LocalPatchObjectId -> m (Map C.Branch.NameSegment (PatchHash, m C.Patch)) + -- doPatches = Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) \patchId -> do + -- h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId . lookupLocalPatch) patchId + -- let patch :: m C.Patch + -- patch = do + -- deserializePatchObject patchId >>= \case + -- S.PatchFormat.Full (S.Patch termEdits typeEdits) -> + -- C.Patch <$> Map.bitraverse s2cReferent (Set.traverse s2cTermEdit) termEdits <*> Map.bitraverse s2cReference (Set.traverse s2cTypeEdit) typeEdits + -- S.PatchFormat.Diff ref d -> doDiff ref [d] + -- doDiff ref ds = + -- deserializePatchObject ref >>= \case + -- S.PatchFormat.Full f -> s2cPatch (joinFull f ds) + -- S.PatchFormat.Diff ref' d' -> doDiff ref' (d' : ds) + -- joinFull f [] = f + -- joinFull (S.Patch termEdits typeEdits) (S.PatchDiff addedTermEdits addedTypeEdits removedTermEdits removedTypeEdits : ds) = joinFull f' ds + -- where + -- f' = S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) + -- addRemove :: (Ord a, Ord b) => Map a (Set b) -> Map a (Set b) -> Map a (Set b) -> Map a (Set b) + -- addRemove add del src = + -- (Map.unionWith (<>) add (Map.differenceWith remove src del)) + -- remove :: Ord b => Set b -> Set b -> Maybe (Set b) + -- remove src del = + -- let diff = Set.difference src del + -- in if diff == mempty then Nothing else Just diff + -- pure (h, patch) + + -- doChildren :: EDB m => Map Db.TextId Db.BranchObjectId -> m (Map C.Branch.NameSegment (m (C.Branch m))) + -- doChildren = Map.bitraverse (fmap C.NameSegment . loadTextById) (pure . loadBranchByObjectId) + + l2sDiff :: S.BranchFormat.BranchLocalIds -> S.Branch.LocalDiff -> S.Branch.Diff + l2sDiff li = error "todo" + doDiff :: EDB m => Db.BranchObjectId -> [S.Branch.Diff] -> m (C.Branch m) - doDiff ref ds = + doDiff ref lds = deserializeBranchObject ref >>= \case - S.BranchFormat.Full f -> s2cBranch (joinFull f ds) - S.BranchFormat.Diff ref' d' -> doDiff ref' (d' : ds) + S.BranchFormat.Full li f -> joinFull (l2sFull li f) lds + S.BranchFormat.Diff ref' li' d' -> doDiff ref' (l2sDiff li' d' : lds) where - joinFull :: S.Branch -> [S.Branch.Diff] -> S.Branch - joinFull f [] = f + joinFull :: EDB m => S.DbBranch -> [S.Branch.Diff] -> m (C.Branch m) + joinFull f [] = doFull f joinFull (S.Branch.Full.Branch tms tps patches children) (S.Branch.Diff tms' tps' patches' children' : ds) = joinFull f' ds @@ -690,71 +776,74 @@ loadBranchByObjectId id = do (mergePatches patches patches') (mergeChildren children children') mergeChildren :: - Map S.Branch.Full.NameSegment Db.BranchObjectId -> - Map S.BranchDiff.NameSegment S.BranchDiff.ChildOp -> - Map S.Branch.Full.NameSegment Db.BranchObjectId + Ord ns => + Map ns (Db.BranchObjectId, Db.CausalHashId) -> + Map ns S.BranchDiff.ChildOp -> + Map ns (Db.BranchObjectId, Db.CausalHashId) mergeChildren = Map.merge Map.preserveMissing (Map.mapMissing fromChildOp) (Map.zipWithMaybeMatched mergeChildOp) mergeChildOp :: - S.Branch.NameSegment -> - Db.BranchObjectId -> + ns -> + (Db.BranchObjectId, Db.CausalHashId) -> S.BranchDiff.ChildOp -> - Maybe Db.BranchObjectId + Maybe (Db.BranchObjectId, Db.CausalHashId) mergeChildOp = const . const \case S.BranchDiff.ChildAddReplace id -> Just id S.BranchDiff.ChildRemove -> Nothing - fromChildOp :: S.Branch.NameSegment -> S.BranchDiff.ChildOp -> Db.BranchObjectId + fromChildOp :: ns -> S.BranchDiff.ChildOp -> (Db.BranchObjectId, Db.CausalHashId) fromChildOp = const \case S.BranchDiff.ChildAddReplace id -> id S.BranchDiff.ChildRemove -> error "diff tries to remove a nonexistent child" mergePatches :: - Map S.Branch.Full.NameSegment Db.PatchObjectId -> - Map S.BranchDiff.NameSegment S.BranchDiff.PatchOp -> - Map S.Branch.Full.NameSegment Db.PatchObjectId + Ord ns => + Map ns Db.PatchObjectId -> + Map ns S.BranchDiff.PatchOp -> + Map ns Db.PatchObjectId mergePatches = Map.merge Map.preserveMissing (Map.mapMissing fromPatchOp) (Map.zipWithMaybeMatched mergePatchOp) - fromPatchOp :: S.Branch.NameSegment -> S.BranchDiff.PatchOp -> Db.PatchObjectId + fromPatchOp :: ns -> S.BranchDiff.PatchOp -> Db.PatchObjectId fromPatchOp = const \case S.BranchDiff.PatchAddReplace id -> id S.BranchDiff.PatchRemove -> error "diff tries to remove a nonexistent child" - mergePatchOp :: S.Branch.NameSegment -> Db.PatchObjectId -> S.BranchDiff.PatchOp -> Maybe Db.PatchObjectId + mergePatchOp :: ns -> Db.PatchObjectId -> S.BranchDiff.PatchOp -> Maybe Db.PatchObjectId mergePatchOp = const . const \case S.BranchDiff.PatchAddReplace id -> Just id S.BranchDiff.PatchRemove -> Nothing + mergeDefns :: - Ord r => - Map S.BranchDiff.NameSegment (Map r S.MetadataSet.MetadataSetFormat) -> - Map S.BranchDiff.NameSegment (Map r S.BranchDiff.DefinitionOp) -> - Map S.BranchDiff.NameSegment (Map r S.MetadataSet.MetadataSetFormat) + (Ord ns, Ord r) => + Map ns (Map r S.MetadataSet.DbMetadataSet) -> + Map ns (Map r S.BranchDiff.DefinitionOp) -> + Map ns (Map r S.MetadataSet.DbMetadataSet) mergeDefns = Map.merge Map.preserveMissing (Map.mapMissing (const (fmap fromDefnOp))) (Map.zipWithMatched (const mergeDefnOp)) - fromDefnOp :: S.BranchDiff.DefinitionOp -> S.MetadataSet.MetadataSetFormat + fromDefnOp :: S.BranchDiff.DefinitionOp -> S.MetadataSet.DbMetadataSet fromDefnOp = \case S.Branch.Diff.AddDefWithMetadata md -> S.MetadataSet.Inline md S.Branch.Diff.RemoveDef -> error "diff tries to remove a nonexistent definition" S.Branch.Diff.AlterDefMetadata _md -> error "diff tries to change metadata for a nonexistent definition" mergeDefnOp :: Ord r => - Map r S.MetadataSet.MetadataSetFormat -> + Map r S.MetadataSet.DbMetadataSet -> Map r S.BranchDiff.DefinitionOp -> - Map r S.MetadataSet.MetadataSetFormat + Map r S.MetadataSet.DbMetadataSet mergeDefnOp = Map.merge Map.preserveMissing (Map.mapMissing (const fromDefnOp)) (Map.zipWithMaybeMatched (const mergeDefnOp')) mergeDefnOp' :: - S.MetadataSet.MetadataSetFormat -> + S.MetadataSet.DbMetadataSet -> S.BranchDiff.DefinitionOp -> - Maybe S.MetadataSet.MetadataSetFormat + Maybe S.MetadataSet.DbMetadataSet mergeDefnOp' (S.MetadataSet.Inline md) = \case S.Branch.Diff.AddDefWithMetadata _md -> error "diff tries to create a child that already exists" @@ -763,6 +852,9 @@ loadBranchByObjectId id = do let (Set.fromList -> adds, Set.fromList -> removes) = S.BranchDiff.addsRemoves md' in Just . S.MetadataSet.Inline $ (Set.union adds $ Set.difference md removes) +loadCausalHead :: (Db.BranchObjectId, Db.CausalHashId) -> C.CausalHead m CausalHash BranchHash (C.Branch m) +loadCausalHead = error "not implemented" + branchHashesByPrefix :: EDB m => ShortBranchHash -> m (Set BranchHash) branchHashesByPrefix (ShortBranchHash b32prefix) = do hashIds <- Q.namespaceHashIdByBase32Prefix b32prefix diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index be8eaa9526..3ce4b5a9d8 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -27,7 +27,7 @@ import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple.FromField (FromField) import Database.SQLite.Simple.ToField (ToField (..)) import U.Codebase.Reference (Reference') -import U.Codebase.Sqlite.DbId (BranchHashId, CausalHashId, CausalOldHashId, HashId (..), NamespaceHashId, ObjectId (..), TextId) +import U.Codebase.Sqlite.DbId (BranchObjectId(..), BranchHashId(..), CausalHashId, CausalOldHashId, HashId (..), ObjectId (..), TextId) import U.Codebase.Sqlite.ObjectType (ObjectType) import qualified U.Codebase.Sqlite.Reference as Reference import qualified U.Codebase.Sqlite.Referent as Referent @@ -45,6 +45,7 @@ data Integrity = UnknownHashId HashId | UnknownTextId TextId | UnknownObjectId ObjectId + | UnknownCausalHashId CausalHashId | UnknownCausalOldHashId CausalOldHashId | NoObjectForHashId HashId | NoNamespaceRoot @@ -52,13 +53,16 @@ data Integrity deriving Show -- |discard errors that you're sure are impossible -noError :: (Monad m, Show e) => ExceptT e m a -> m a -noError a = runExceptT a >>= \case - Left e -> error $ "unexpected error: " ++ show e +noExcept :: (Monad m, Show e) => ExceptT e m a -> m a +noExcept a = runExceptT a >>= \case Right a -> pure a + Left e -> error $ "unexpected error: " ++ show e + +-- noMaybe :: Maybe a -> a +-- noMaybe = fromMaybe (error "unexpected Nothing") -orError :: MonadError Integrity m => (a -> Integrity) -> a -> Maybe b -> m b -orError fe a = maybe (throwError $ fe a) pure +orError :: MonadError Integrity m => Integrity -> Maybe b -> m b +orError e = maybe (throwError e) pure -- type DerivedReferent = Referent.Id ObjectId ObjectId -- type DerivedReference = Reference.Id ObjectId @@ -78,20 +82,19 @@ loadHashId base32 = queryOnly sql (Only base32) where sql = [here| SELECT id FROM hash WHERE base32 = ? |] loadHashById :: EDB m => HashId -> m Base32Hex -loadHashById h = queryOnly sql (Only h) >>= orError UnknownHashId h +loadHashById h = queryOnly sql (Only h) >>= orError (UnknownHashId h) where sql = [here| SELECT base32 FROM hash WHERE id = ? |] saveText :: DB m => Text -> m TextId saveText t = execute sql (Only t) >> queryOne (loadText t) where sql = [here| INSERT OR IGNORE INTO text (text) VALUES (?) |] --- ok to return Nothing loadText :: DB m => Text -> m (Maybe TextId) loadText t = queryOnly sql (Only t) where sql = [here| SELECT id FROM text WHERE text = ? |] loadTextById :: EDB m => TextId -> m Text -loadTextById h = queryOnly sql (Only h) >>= orError UnknownTextId h +loadTextById h = queryOnly sql (Only h) >>= orError (UnknownTextId h) where sql = [here| SELECT text FROM text WHERE id = ? |] saveHashObject :: DB m => HashId -> ObjectId -> Int -> m () @@ -103,7 +106,7 @@ saveHashObject hId oId version = execute sql (hId, oId, version) where saveObject :: DB m => HashId -> ObjectType -> ByteString -> m ObjectId saveObject h t blob = - execute sql (h, t, blob) >> noError (objectIdByPrimaryHashId h) + execute sql (h, t, blob) >> queryOne (maybeObjectIdPrimaryHashId h) where sql = [here| INSERT OR IGNORE INTO object (primary_hash_id, type_id, bytes) @@ -111,45 +114,43 @@ saveObject h t blob = |] loadObjectById :: EDB m => ObjectId -> m ByteString -loadObjectById oId = queryOnly sql (Only oId) >>= orError UnknownObjectId oId +loadObjectById oId = queryOnly sql (Only oId) >>= orError (UnknownObjectId oId) where sql = [here| SELECT bytes FROM object WHERE id = ? |] -objectIdByPrimaryHashId :: EDB m => HashId -> m ObjectId -objectIdByPrimaryHashId h = queryOnly sql (Only h) >>= orError UnknownHashId h - where sql = [here| +-- |Not all hashes have corresponding objects; e.g., hashes of term types +expectObjectIdForPrimaryHashId :: EDB m => HashId -> m ObjectId +expectObjectIdForPrimaryHashId h = + maybeObjectIdPrimaryHashId h >>= orError (UnknownHashId h) + +maybeObjectIdPrimaryHashId :: DB m => HashId -> m (Maybe ObjectId) +maybeObjectIdPrimaryHashId h = queryOnly sql (Only h) where sql = [here| SELECT id FROM object WHERE primary_hash_id = ? |] -objectIdByAnyHashId :: EDB m => HashId -> m ObjectId -objectIdByAnyHashId h = - queryOnly sql (Only h) >>= orError NoObjectForHashId h where sql = [here| +expectObjectIdForAnyHashId :: EDB m => HashId -> m ObjectId +expectObjectIdForAnyHashId h = + maybeObjectIdForAnyHashId h >>= orError (NoObjectForHashId h) + +maybeObjectIdForAnyHashId :: DB m => HashId -> m (Maybe ObjectId) +maybeObjectIdForAnyHashId h = queryOnly sql (Only h) where sql = [here| SELECT object_id FROM hash_object WHERE hash_id = ? |] --- objectIdByAnyHash :: DB m => Base32Hex -> m (Maybe ObjectId) --- objectIdByAnyHash h = queryOnly sql (Only h) where sql = [here| --- SELECT object.id --- FROM hash --- INNER JOIN hash_object ON hash_object.hash_id = hash.id --- INNER JOIN object ON hash_object.object_id = object.id --- WHERE hash.base32 = ? --- |] - --- error to return Nothing +-- |All objects have corresponding hashes. loadPrimaryHashByObjectId :: EDB m => ObjectId -> m Base32Hex -loadPrimaryHashByObjectId oId = queryOnly sql (Only oId) >>= orError UnknownObjectId oId +loadPrimaryHashByObjectId oId = queryOnly sql (Only oId) >>= orError (UnknownObjectId oId) where sql = [here| SELECT hash.base32 - FROM hash INNER JOIN hash_object ON hash_object.hash_id = hash.id - WHERE hash_object.object_id = ? + FROM hash INNER JOIN object ON object.primary_hash_id = hash.id + WHERE object.id = ? |] objectAndPrimaryHashByAnyHash :: EDB m => Base32Hex -> m (Maybe (Base32Hex, ObjectId)) objectAndPrimaryHashByAnyHash h = runMaybeT do - hashId <- MaybeT $ loadHashId h - oId <- objectIdByAnyHashId hashId + hashId <- MaybeT $ loadHashId h -- hash may not exist + oId <- MaybeT $ maybeObjectIdForAnyHashId hashId -- hash may not correspond to object base32 <- loadPrimaryHashByObjectId oId pure (base32, oId) @@ -168,33 +169,37 @@ updateObjectBlob oId bs = execute sql (oId, bs) where sql = [here| -- |Maybe we would generalize this to something other than NamespaceHash if we -- end up wanting to store other kinds of Causals here too. -saveCausal :: DB m => CausalHashId -> NamespaceHashId -> m () -saveCausal self value = execute sql (self, value) where sql = [here| - INSERT OR IGNORE INTO causal (self_hash_id, value_hash_id) VALUES (?, ?) +saveCausal :: DB m => CausalHashId -> BranchHashId -> Maybe BranchObjectId -> m () +saveCausal self value oid = execute sql (self, value, oid) where sql = [here| + INSERT OR IGNORE INTO causal (self_hash_id, value_hash_id, value_object_id) + VALUES (?, ?,, ?) |] --- error to return Nothing -loadCausalValueHash :: DB m => CausalHashId -> m (Maybe NamespaceHashId) -loadCausalValueHash hash = queryOnly sql (Only hash) where sql = [here| +loadCausalValueHash :: EDB m => CausalHashId -> m BranchHashId +loadCausalValueHash id = + queryOnly sql (Only id) >>= orError (UnknownCausalHashId id) where sql = [here| SELECT value_hash_id FROM causal WHERE self_hash_id = ? |] +loadBranchObjectIdByCausalHashId :: EDB m => CausalHashId -> m (Maybe BranchObjectId) +loadBranchObjectIdByCausalHashId id = queryOnly sql (Only id) where sql = [here| + SELECT value_object_id FROM causal WHERE self_hash_id = ? +|] + saveCausalOld :: DB m => HashId -> CausalHashId -> m () saveCausalOld v1 v2 = execute sql (v1, v2) where sql = [here| INSERT OR IGNORE INTO causal_old (old_hash_id, new_hash_id) VALUES (?, ?) |] --- error to return Nothing loadCausalHashIdByCausalOldHash :: EDB m => CausalOldHashId -> m CausalHashId loadCausalHashIdByCausalOldHash id = - queryOnly sql (Only id) >>= orError UnknownCausalOldHashId id where sql = [here| + queryOnly sql (Only id) >>= orError (UnknownCausalOldHashId id) where sql = [here| SELECT new_hash_id FROM causal_old where old_hash_id = ? |] --- error to return Nothing -loadOldCausalValueHash :: EDB m => CausalOldHashId -> m NamespaceHashId +loadOldCausalValueHash :: EDB m => CausalOldHashId -> m BranchHashId loadOldCausalValueHash id = - queryOnly sql (Only id) >>= orError UnknownCausalOldHashId id where sql = [here| + queryOnly sql (Only id) >>= orError (UnknownCausalOldHashId id) where sql = [here| SELECT value_hash_id FROM causal INNER JOIN causal_old ON self_hash_id = new_hash_id WHERE old_hash_id = ? diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs index 22ce195e8c..e9768972d6 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs @@ -10,10 +10,14 @@ import Database.SQLite.Simple (SQLData(..), Only(..), ToRow(toRow)) import Database.SQLite.Simple.FromRow (FromRow(fromRow), field) import Database.SQLite.Simple.ToField (ToField) import Database.SQLite.Simple.FromField (FromField) +import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalTextId) type Reference = Reference' TextId ObjectId type Id = Id' ObjectId +type LocalReference = Reference' LocalTextId LocalDefnId +type LocalId = Id' LocalDefnId + type ReferenceH = Reference' TextId HashId type IdH = Id' HashId diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs index 459132c322..9dd76b1944 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs @@ -16,6 +16,8 @@ import qualified U.Codebase.Reference as Reference type Referent = Referent' Sqlite.Reference Sqlite.Reference type Id = Id' ObjectId ObjectId +type LocalReferent = Referent' Sqlite.LocalReference Sqlite.LocalReference + instance ToRow Id where -- | objectId, componentIndex, constructorIndex toRow = \case diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index c743f85c51..a58ac5ef03 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -25,7 +25,6 @@ import qualified U.Codebase.Referent as Referent import qualified U.Codebase.Sqlite.Branch.Diff as BranchDiff import qualified U.Codebase.Sqlite.Branch.Format as BranchFormat import qualified U.Codebase.Sqlite.Branch.Full as BranchFull -import qualified U.Codebase.Sqlite.Branch.MetadataSet as MetadataSet import U.Codebase.Sqlite.DbId (PatchObjectId) import qualified U.Codebase.Sqlite.Decl.Format as DeclFormat import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), LocalTextId) @@ -420,19 +419,26 @@ lookupDeclElement = putBranchFormat :: MonadPut m => BranchFormat.BranchFormat -> m () putBranchFormat = \case - BranchFormat.Full b -> putWord8 0 *> putBranchFull b - BranchFormat.Diff r d -> putWord8 1 *> putBranchDiff r d + BranchFormat.Full li b -> putWord8 0 *> putBranchFull li b + BranchFormat.Diff r li d -> putWord8 1 *> putBranchDiff r li d where putReferent' = putReferent putReference putReference - putBranchFull (BranchFull.Branch terms types patches children) = do + putBranchLocalIds (BranchFormat.LocalIds ts os ps cs) = do + putFoldable putVarInt ts + putFoldable putVarInt os + putFoldable putVarInt ps + putFoldable putVarInt cs + putBranchFull li (BranchFull.Branch terms types patches children) = do + putBranchLocalIds li putMap putVarInt (putMap putReferent' putMetadataSetFormat) terms putMap putVarInt (putMap putReference putMetadataSetFormat) types putMap putVarInt putVarInt patches - putMap putVarInt putVarInt children + putMap putVarInt (putPair putVarInt putVarInt) children putMetadataSetFormat = \case - MetadataSet.Inline s -> putWord8 0 *> putFoldable putReference s - putBranchDiff ref (BranchDiff.Diff terms types patches children) = do + BranchFull.Inline s -> putWord8 0 *> putFoldable putReference s + putBranchDiff ref li (BranchDiff.Diff terms types patches children) = do putVarInt ref + putBranchLocalIds li putMap putVarInt (putMap putReferent' putDiffOp) terms putMap putVarInt (putMap putReference putDiffOp) types putMap putVarInt putPatchOp patches @@ -451,7 +457,7 @@ putBranchFormat = \case BranchDiff.AlterDefMetadata md -> putWord8 2 *> putAddRemove putReference md putChildOp = \case BranchDiff.ChildRemove -> putWord8 0 - BranchDiff.ChildAddReplace b -> putWord8 1 *> putVarInt b + BranchDiff.ChildAddReplace b -> putWord8 1 *> putPair putVarInt putVarInt b putPatchFormat :: MonadPut m => PatchFormat.PatchFormat -> m () putPatchFormat = \case diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index d09a25c4ca..a31d1ad78e 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -50,15 +50,18 @@ CREATE INDEX object_type_id ON object(type_id); -- `causal` references value hash ids instead of value ids, in case you want -- to be able to drop values and keep just the causal spine. --- to be able to drop values and keep just the causal spine. -- This implementation keeps the hash of the dropped values, although I could -- see an argument to drop them too and just use NULL, but I thought it better -- to not lose their identities. CREATE TABLE causal ( self_hash_id INTEGER PRIMARY KEY NOT NULL REFERENCES hash(id), - -- intentionally not object_id, see above - value_hash_id INTEGER NOT NULL REFERENCES hash(id) + value_hash_id INTEGER NOT NULL REFERENCES hash(id), + value_object_id INTEGER NULL REFERENCES object(id), + gc_generation INTEGER NOT NULL ); +CREATE INDEX causal_value_hash_id ON causal(value_hash_id); +CREATE INDEX causal_gc_generation ON causal(gc_generation); + -- valueHash : Hash = hash(value) -- db.saveValue(valueHash, value) diff --git a/codebase2/codebase/U/Codebase/Branch.hs b/codebase2/codebase/U/Codebase/Branch.hs index ca2ba3c789..c2d83a2120 100644 --- a/codebase2/codebase/U/Codebase/Branch.hs +++ b/codebase2/codebase/U/Codebase/Branch.hs @@ -3,7 +3,8 @@ module U.Codebase.Branch where import Data.Map (Map) import Data.Set (Set) import Data.Text (Text) -import U.Codebase.HashTags (PatchHash) +import U.Codebase.Causal (CausalHead) +import U.Codebase.HashTags (BranchHash, CausalHash, PatchHash) import U.Codebase.Reference (Reference) import U.Codebase.Referent (Referent) import U.Codebase.TermEdit (TermEdit) @@ -17,7 +18,7 @@ data Branch m = Branch { terms :: Map NameSegment (Map Referent (m MdValues)), types :: Map NameSegment (Map Reference (m MdValues)), patches :: Map NameSegment (PatchHash, m Patch), - children :: Map NameSegment (m (Branch m)) + children :: Map NameSegment (CausalHead m CausalHash BranchHash (Branch m)) } data Patch = Patch diff --git a/codebase2/codebase/U/Codebase/Causal.hs b/codebase2/codebase/U/Codebase/Causal.hs index c4c0875d94..131d15d78d 100644 --- a/codebase2/codebase/U/Codebase/Causal.hs +++ b/codebase2/codebase/U/Codebase/Causal.hs @@ -9,3 +9,10 @@ data Causal m hc he e = Causal parents :: Map hc (m (Causal m hc he e)), value :: m (Maybe e) } + +data CausalHead m hc he e = CausalHead + { headCausalHash :: hc, + headValueHash :: he, + headParents :: Map hc (m (Causal m hc he e)), + headValue :: m e + } diff --git a/codebase2/util/U/Util/Map.hs b/codebase2/util/U/Util/Map.hs new file mode 100644 index 0000000000..0c24812d95 --- /dev/null +++ b/codebase2/util/U/Util/Map.hs @@ -0,0 +1,12 @@ +module U.Util.Map where + +import qualified Data.Bifunctor as B +import qualified Data.Bitraversable as B +import Data.Map (Map) +import qualified Data.Map as Map + +bimap :: Ord a' => (a -> a') -> (b -> b') -> Map a b -> Map a' b' +bimap fa fb = Map.fromList . map (B.bimap fa fb) . Map.toList + +bitraverse :: (Applicative f, Ord a') => (a -> f a') -> (b -> f b') -> Map a b -> f (Map a' b') +bitraverse fa fb = fmap Map.fromList . traverse (B.bitraverse fa fb) . Map.toList diff --git a/codebase2/util/U/Util/Set.hs b/codebase2/util/U/Util/Set.hs new file mode 100644 index 0000000000..cef2712246 --- /dev/null +++ b/codebase2/util/U/Util/Set.hs @@ -0,0 +1,8 @@ +module U.Util.Set where + +import Data.Set (Set) +import qualified Data.Set as Set +import qualified Data.Traversable as T + +traverse :: (Applicative f, Ord b) => (a -> f b) -> Set a -> f (Set b) +traverse f = fmap Set.fromList . T.traverse f . Set.toList diff --git a/codebase2/util/unison-util.cabal b/codebase2/util/unison-util.cabal index c3a77a47ee..a8958cbf8d 100644 --- a/codebase2/util/unison-util.cabal +++ b/codebase2/util/unison-util.cabal @@ -20,8 +20,10 @@ library U.Util.Hash U.Util.Hashable U.Util.Lens + U.Util.Map U.Util.Monoid U.Util.Relation + U.Util.Set -- other-modules: -- other-extensions: build-depends: From af5273fed9505851ea0aadadd45ff57e8e7f1e9d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 13 Nov 2020 13:12:14 -0500 Subject: [PATCH 045/225] l2sFull and l2sDiff --- .../U/Codebase/Sqlite/Branch/Diff.hs | 23 ++++++++++++++++--- .../U/Codebase/Sqlite/Branch/Format.hs | 2 +- .../U/Codebase/Sqlite/Branch/Full.hs | 16 +++++++++++-- .../U/Codebase/Sqlite/LocalIds.hs | 3 +-- .../U/Codebase/Sqlite/Operations.hs | 16 ++++++++----- .../U/Codebase/Sqlite/Serialization.hs | 6 ++--- 6 files changed, 49 insertions(+), 17 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs index 399bb5b9b3..c7b88b61b9 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs @@ -13,12 +13,12 @@ import Data.Set (Set) import U.Codebase.Reference (Reference') import U.Codebase.Referent (Referent') import U.Codebase.Sqlite.DbId (BranchObjectId, CausalHashId, ObjectId, PatchObjectId, TextId) -import U.Codebase.Sqlite.LocalIds (LocalBranchObjectId, LocalCausalHashId, LocalDefnId, LocalPatchObjectId, LocalTextId) +import U.Codebase.Sqlite.LocalIds (LocalBranchChildId, LocalDefnId, LocalPatchObjectId, LocalTextId) import qualified U.Util.Map as Map import Data.Bifunctor (Bifunctor(bimap)) import qualified Data.Set as Set -type LocalDiff = Diff' LocalTextId LocalDefnId LocalPatchObjectId (LocalBranchObjectId, LocalCausalHashId) +type LocalDiff = Diff' LocalTextId LocalDefnId LocalPatchObjectId LocalBranchChildId type Diff = Diff' TextId ObjectId PatchObjectId (BranchObjectId, CausalHashId) data DefinitionOp' r = RemoveDef | AddDefWithMetadata (Set r) | AlterDefMetadata (AddRemove r) @@ -28,7 +28,7 @@ type AddRemove a = Map a Bool type LocalDefinitionOp = DefinitionOp' (Metadata LocalTextId LocalDefnId) type LocalPatchOp = PatchOp' LocalPatchObjectId -type LocalChildOp = ChildOp' (LocalBranchObjectId, LocalCausalHashId) +type LocalChildOp = ChildOp' LocalBranchChildId type DefinitionOp = DefinitionOp' (Metadata TextId ObjectId) type PatchOp = PatchOp' PatchObjectId @@ -49,3 +49,20 @@ data Diff' t h p c = Diff } type Metadata t h = Reference' t h + +quadmap :: (Ord t', Ord h') => (t -> t') -> (h -> h') -> (p -> p') -> (c -> c') -> Diff' t h p c -> Diff' t' h' p' c' +quadmap ft fh fp fc (Diff terms types patches children) = + Diff + (Map.bimap ft (Map.bimap doReferent doDefnOp) terms) + (Map.bimap ft (Map.bimap doReference doDefnOp) types) + (Map.bimap ft doPatchOp patches) + (Map.bimap ft doChildOp children) + where + doReferent = bimap doReference doReference + doReference = bimap ft fh + doDefnOp = \case + RemoveDef -> RemoveDef + AddDefWithMetadata rs -> AddDefWithMetadata (Set.map doReference rs) + AlterDefMetadata ar -> AlterDefMetadata (Map.mapKeys doReference ar) + doPatchOp = fmap fp + doChildOp = fmap fc diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs index 0f257c769d..7248dd92de 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs @@ -13,5 +13,5 @@ data BranchLocalIds = LocalIds { branchTextLookup :: Vector TextId, branchDefnLookup :: Vector ObjectId, branchPatchLookup :: Vector PatchObjectId, - branchChildLookup :: Vector CausalHashId + branchChildLookup :: Vector (BranchObjectId, CausalHashId) } diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs index d429fea06c..b1f0e04dd7 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs @@ -8,12 +8,12 @@ import Data.Set (Set) import U.Codebase.Reference (Reference') import U.Codebase.Referent (Referent') import U.Codebase.Sqlite.DbId (BranchObjectId, CausalHashId, ObjectId, PatchObjectId, TextId) -import U.Codebase.Sqlite.LocalIds (LocalBranchObjectId, LocalCausalHashId, LocalDefnId, LocalPatchObjectId, LocalTextId) +import U.Codebase.Sqlite.LocalIds (LocalBranchChildId, LocalDefnId, LocalPatchObjectId, LocalTextId) import qualified U.Util.Map as Map import Data.Bifunctor (Bifunctor(bimap)) import qualified Data.Set as Set -type LocalBranch = Branch' LocalTextId LocalDefnId LocalPatchObjectId (LocalBranchObjectId, LocalCausalHashId) +type LocalBranch = Branch' LocalTextId LocalDefnId LocalPatchObjectId LocalBranchChildId type DbBranch = Branch' TextId ObjectId PatchObjectId (BranchObjectId, CausalHashId) @@ -31,3 +31,15 @@ type LocalMetadataSet = MetadataSetFormat' LocalTextId LocalDefnId type DbMetadataSet = MetadataSetFormat' TextId ObjectId data MetadataSetFormat' t h = Inline (Set (Reference' t h)) + +quadmap :: forall t h p c t' h' p' c'. (Ord t', Ord h') => (t -> t') -> (h -> h') -> (p -> p') -> (c -> c') -> Branch' t h p c -> Branch' t' h' p' c' +quadmap ft fh fp fc (Branch terms types patches children) = + Branch + (Map.bimap ft doTerms terms) + (Map.bimap ft doTypes types) + (Map.bimap ft fp patches) + (Map.bimap ft fc children) + where + doTerms = Map.bimap (bimap (bimap ft fh) (bimap ft fh)) doMetadata + doTypes = Map.bimap (bimap ft fh) doMetadata + doMetadata (Inline s) = Inline . Set.map (bimap ft fh) $ s diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs index d54802a2f1..01fbf0fa37 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs @@ -27,8 +27,7 @@ newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Int newtype LocalDefnId = LocalDefnId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 newtype LocalPatchObjectId = LocalPatchObjectId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 -newtype LocalBranchObjectId = LocalBranchObjectId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 -newtype LocalCausalHashId = LocalCausalHashId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalBranchChildId = LocalBranchChildId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 instance Bitraversable LocalIds' where bitraverse f g (LocalIds t d) = LocalIds <$> traverse f t <*> traverse g d diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 82d40572c1..e442742360 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -59,7 +59,7 @@ import qualified U.Codebase.Sqlite.Branch.Full as S.MetadataSet import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Decl.Format as S.Decl import U.Codebase.Sqlite.LocalIds - ( LocalCausalHashId (..), + (LocalBranchChildId (..), LocalDefnId (..), LocalIds, LocalIds' (..), @@ -100,6 +100,7 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set +import U.Codebase.Sqlite.Branch.Format (BranchLocalIds) type Err m = MonadError Error m @@ -612,15 +613,17 @@ componentByObjectId id = do len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly pure [C.Reference.Id id i | i <- [0 .. len - 1]] -lookupLocalText :: S.BranchFormat.BranchLocalIds -> LocalTextId -> Db.TextId +lookupLocalText :: S.BranchLocalIds -> LocalTextId -> Db.TextId lookupLocalText li (LocalTextId w) = S.BranchFormat.branchTextLookup li Vector.! fromIntegral w -lookupLocalDefn :: S.BranchFormat.BranchLocalIds -> LocalDefnId -> Db.ObjectId +lookupLocalDefn :: S.BranchLocalIds -> LocalDefnId -> Db.ObjectId lookupLocalDefn li (LocalDefnId w) = S.BranchFormat.branchDefnLookup li Vector.! fromIntegral w +lookupLocalPatch :: BranchLocalIds -> LocalPatchObjectId -> Db.PatchObjectId lookupLocalPatch li (LocalPatchObjectId w) = S.BranchFormat.branchPatchLookup li Vector.! fromIntegral w -lookupLocalChild li (LocalCausalHashId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w +lookupLocalChild :: BranchLocalIds -> LocalBranchChildId -> (Db.BranchObjectId, Db.CausalHashId) +lookupLocalChild li (LocalBranchChildId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w loadBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch m)) loadBranchByCausalHashId id = do @@ -642,7 +645,8 @@ loadBranchByObjectId id = do >>= getFromBytesOr (ErrPatch id) S.getPatchFormat l2sFull :: S.BranchFormat.BranchLocalIds -> S.LocalBranch -> S.DbBranch - l2sFull li = error "todo" + l2sFull li = + S.Branch.Full.quadmap (lookupLocalText li) (lookupLocalDefn li) (lookupLocalPatch li) (lookupLocalChild li) doFull :: EDB m => S.Branch.Full.DbBranch -> m (C.Branch m) doFull (S.Branch.Full.Branch tms tps patches children) = @@ -755,7 +759,7 @@ loadBranchByObjectId id = do -- doChildren = Map.bitraverse (fmap C.NameSegment . loadTextById) (pure . loadBranchByObjectId) l2sDiff :: S.BranchFormat.BranchLocalIds -> S.Branch.LocalDiff -> S.Branch.Diff - l2sDiff li = error "todo" + l2sDiff li = S.BranchDiff.quadmap (lookupLocalText li) (lookupLocalDefn li) (lookupLocalPatch li) (lookupLocalChild li) doDiff :: EDB m => Db.BranchObjectId -> [S.Branch.Diff] -> m (C.Branch m) doDiff ref lds = diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index a58ac5ef03..17c90bdcc5 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -427,13 +427,13 @@ putBranchFormat = \case putFoldable putVarInt ts putFoldable putVarInt os putFoldable putVarInt ps - putFoldable putVarInt cs + putFoldable (putPair putVarInt putVarInt) cs putBranchFull li (BranchFull.Branch terms types patches children) = do putBranchLocalIds li putMap putVarInt (putMap putReferent' putMetadataSetFormat) terms putMap putVarInt (putMap putReference putMetadataSetFormat) types putMap putVarInt putVarInt patches - putMap putVarInt (putPair putVarInt putVarInt) children + putMap putVarInt putVarInt children putMetadataSetFormat = \case BranchFull.Inline s -> putWord8 0 *> putFoldable putReference s putBranchDiff ref li (BranchDiff.Diff terms types patches children) = do @@ -457,7 +457,7 @@ putBranchFormat = \case BranchDiff.AlterDefMetadata md -> putWord8 2 *> putAddRemove putReference md putChildOp = \case BranchDiff.ChildRemove -> putWord8 0 - BranchDiff.ChildAddReplace b -> putWord8 1 *> putPair putVarInt putVarInt b + BranchDiff.ChildAddReplace b -> putWord8 1 *> putVarInt b putPatchFormat :: MonadPut m => PatchFormat.PatchFormat -> m () putPatchFormat = \case From 4526ae03423f2f41e61e26631653f0d7415082ac Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 13 Nov 2020 16:04:35 -0500 Subject: [PATCH 046/225] fleshing out s2cBranch --- .../U/Codebase/Sqlite/Operations.hs | 77 +++++++++++++++---- .../U/Codebase/Sqlite/Queries.hs | 4 +- 2 files changed, 66 insertions(+), 15 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index e442742360..427fd87f84 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE TupleSections #-} {-# LANGUAGE ApplicativeDo #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE ConstraintKinds #-} @@ -12,7 +13,7 @@ module U.Codebase.Sqlite.Operations where import Control.Lens (Lens') import qualified Control.Lens as Lens -import Control.Monad (join) +import Control.Monad (join, (<=<)) import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) import Control.Monad.State (MonadState, evalStateT) import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) @@ -51,6 +52,7 @@ import U.Codebase.ShortHash (ShortBranchHash (ShortBranchHash)) import qualified U.Codebase.Sqlite.Branch.Diff as S.Branch import qualified U.Codebase.Sqlite.Branch.Diff as S.Branch.Diff import qualified U.Codebase.Sqlite.Branch.Diff as S.BranchDiff +import U.Codebase.Sqlite.Branch.Format (BranchLocalIds) import qualified U.Codebase.Sqlite.Branch.Format as S import qualified U.Codebase.Sqlite.Branch.Format as S.BranchFormat import qualified U.Codebase.Sqlite.Branch.Full as S @@ -59,7 +61,7 @@ import qualified U.Codebase.Sqlite.Branch.Full as S.MetadataSet import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Decl.Format as S.Decl import U.Codebase.Sqlite.LocalIds - (LocalBranchChildId (..), + ( LocalBranchChildId (..), LocalDefnId (..), LocalIds, LocalIds' (..), @@ -100,7 +102,6 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set -import U.Codebase.Sqlite.Branch.Format (BranchLocalIds) type Err m = MonadError Error m @@ -140,15 +141,6 @@ liftQ a = -- * helpers -m :: (a -> f (Maybe b)) -> a -> MaybeT f b -m = fmap MaybeT - -m' :: (Functor f, Show a) => String -> (a -> f (Maybe b)) -> a -> MaybeT f b -m' msg f a = MaybeT do - f a <&> \case - Nothing -> error $ "nothing: " ++ msg ++ " " ++ show a - Just b -> Just b - c2sReference :: EDB m => C.Reference -> MaybeT m S.Reference c2sReference = bitraverse lookupTextId hashToObjectId @@ -181,7 +173,57 @@ s2cTypeEdit = \case S.TypeEdit.Deprecate -> pure C.TypeEdit.Deprecate s2cBranch :: EDB m => S.DbBranch -> m (C.Branch m) -s2cBranch = error "todo" +s2cBranch (S.Branch.Full.Branch tms tps patches children) = + C.Branch + <$> doTerms tms + <*> doTypes tps + <*> doPatches patches + <*> doChildren children + where + doTerms :: EDB m => Map Db.TextId (Map S.Referent S.DbMetadataSet) -> m (Map C.NameSegment (Map C.Referent (m C.MdValues))) + doTerms = + Map.bitraverse + (fmap C.NameSegment . loadTextById) + ( Map.bitraverse s2cReferent \case + S.MetadataSet.Inline rs -> + pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs + ) + doTypes :: EDB m => Map Db.TextId (Map S.Reference S.DbMetadataSet) -> m (Map C.NameSegment (Map C.Reference (m C.MdValues))) + doTypes = + Map.bitraverse + (fmap C.NameSegment . loadTextById) + ( Map.bitraverse s2cReference \case + S.MetadataSet.Inline rs -> + pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs + ) + doPatches :: Map Db.TextId Db.PatchObjectId -> m (Map C.NameSegment (PatchHash, m C.Patch)) + doPatches = error "not implemented" + + doChildren :: EDB m => Map Db.TextId (Db.BranchObjectId, Db.CausalHashId) -> m (Map C.Branch.NameSegment (C.CausalHead m CausalHash BranchHash (C.Branch m))) + doChildren = Map.bitraverse (fmap C.NameSegment . loadTextById) \(boId, chId) -> + C.CausalHead <$> loadCausalHashById chId + <*> loadValueHashByCausalHashId chId + <*> headParents chId + <*> pure (loadBranchByObjectId boId) + where + headParents :: EDB m => Db.CausalHashId -> m (Map CausalHash (m (C.Causal m CausalHash BranchHash (C.Branch m)))) + headParents chId = do + parentsChIds <- Q.loadCausalParents chId + fmap Map.fromList $ traverse pairParent parentsChIds + pairParent :: EDB m => Db.CausalHashId -> m (CausalHash, m (C.Causal m CausalHash BranchHash (C.Branch m))) + pairParent chId = do + h <- loadCausalHashById chId + pure (h, loadCausal chId) + loadCausal :: EDB m => Db.CausalHashId -> m (C.Causal m CausalHash BranchHash (C.Branch m)) + loadCausal chId = do + C.Causal <$> loadCausalHashById chId + <*> loadValueHashByCausalHashId chId + <*> headParents chId + <*> pure (loadValue chId) + loadValue :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch m)) + loadValue chId = do + boId <- liftQ $ Q.loadBranchObjectIdByCausalHashId chId + traverse loadBranchByObjectId boId s2cPatch :: EDB m => S.Patch -> m C.Patch s2cPatch = error "todo" @@ -210,6 +252,15 @@ loadHashByObjectId = fmap H.fromBase32Hex . liftQ . Q.loadPrimaryHashByObjectId loadHashByHashId :: EDB m => Db.HashId -> m H.Hash loadHashByHashId = fmap H.fromBase32Hex . liftQ . Q.loadHashById +loadCausalHashById :: EDB m => Db.CausalHashId -> m CausalHash +loadCausalHashById = fmap (CausalHash . H.fromBase32Hex) . liftQ . Q.loadHashById . Db.unCausalHashId + +loadValueHashByCausalHashId :: EDB m => Db.CausalHashId -> m BranchHash +loadValueHashByCausalHashId = loadValueHashById <=< liftQ . Q.loadCausalValueHashId + where + loadValueHashById :: EDB m => Db.BranchHashId -> m BranchHash + loadValueHashById = fmap (BranchHash . H.fromBase32Hex) . liftQ . Q.loadHashById . Db.unBranchHashId + decodeComponentLengthOnly :: Err m => ByteString -> m Word64 decodeComponentLengthOnly = getFromBytesOr ErrFramedArrayLen S.lengthFramedArray diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 3ce4b5a9d8..fc3623efac 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -175,8 +175,8 @@ saveCausal self value oid = execute sql (self, value, oid) where sql = [here| VALUES (?, ?,, ?) |] -loadCausalValueHash :: EDB m => CausalHashId -> m BranchHashId -loadCausalValueHash id = +loadCausalValueHashId :: EDB m => CausalHashId -> m BranchHashId +loadCausalValueHashId id = queryOnly sql (Only id) >>= orError (UnknownCausalHashId id) where sql = [here| SELECT value_hash_id FROM causal WHERE self_hash_id = ? |] From 570a953dc7e6897cd61f3eeb32d3c0307f333cd3 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 13 Nov 2020 16:41:10 -0500 Subject: [PATCH 047/225] flesh out doPatches --- .../U/Codebase/Sqlite/Operations.hs | 46 +++++++++++++++---- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 427fd87f84..ceeeb6a2da 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -102,6 +102,7 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set +import qualified U.Codebase.Sqlite.Patch.Format as S type Err m = MonadError Error m @@ -196,8 +197,31 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs ) - doPatches :: Map Db.TextId Db.PatchObjectId -> m (Map C.NameSegment (PatchHash, m C.Patch)) - doPatches = error "not implemented" + doPatches :: EDB m => Map Db.TextId Db.PatchObjectId -> m (Map C.NameSegment (PatchHash, m C.Patch)) + doPatches = Map.bitraverse (fmap C.NameSegment . loadTextById) \patchId -> do + h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId) patchId + let patch :: EDB m => m C.Patch + patch = do + deserializePatchObject patchId >>= \case + S.PatchFormat.Full (S.Patch termEdits typeEdits) -> + C.Patch <$> Map.bitraverse s2cReferent (Set.traverse s2cTermEdit) termEdits <*> Map.bitraverse s2cReference (Set.traverse s2cTypeEdit) typeEdits + S.PatchFormat.Diff ref d -> doDiff ref [d] + doDiff ref ds = + deserializePatchObject ref >>= \case + S.PatchFormat.Full f -> s2cPatch (joinFull f ds) + S.PatchFormat.Diff ref' d' -> doDiff ref' (d' : ds) + joinFull f [] = f + joinFull (S.Patch termEdits typeEdits) (S.PatchDiff addedTermEdits addedTypeEdits removedTermEdits removedTypeEdits : ds) = joinFull f' ds + where + f' = S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) + addRemove :: (Ord a, Ord b) => Map a (Set b) -> Map a (Set b) -> Map a (Set b) -> Map a (Set b) + addRemove add del src = + (Map.unionWith (<>) add (Map.differenceWith remove src del)) + remove :: Ord b => Set b -> Set b -> Maybe (Set b) + remove src del = + let diff = Set.difference src del + in if diff == mempty then Nothing else Just diff + pure (h, patch) doChildren :: EDB m => Map Db.TextId (Db.BranchObjectId, Db.CausalHashId) -> m (Map C.Branch.NameSegment (C.CausalHead m CausalHash BranchHash (C.Branch m))) doChildren = Map.bitraverse (fmap C.NameSegment . loadTextById) \(boId, chId) -> @@ -681,19 +705,21 @@ loadBranchByCausalHashId id = do (liftQ . Q.loadBranchObjectIdByCausalHashId) id >>= traverse loadBranchByObjectId +deserializePatchObject :: EDB m => Db.PatchObjectId -> m S.PatchFormat +deserializePatchObject id = + (liftQ . Q.loadObjectById) (Db.unPatchObjectId id) + >>= getFromBytesOr (ErrPatch id) S.getPatchFormat + loadBranchByObjectId :: EDB m => Db.BranchObjectId -> m (C.Branch m) loadBranchByObjectId id = do deserializeBranchObject id >>= \case - S.BranchFormat.Full li f -> doFull (l2sFull li f) + S.BranchFormat.Full li f -> s2cBranch (l2sFull li f) S.BranchFormat.Diff r li d -> doDiff r [l2sDiff li d] where deserializeBranchObject :: EDB m => Db.BranchObjectId -> m S.BranchFormat deserializeBranchObject id = (liftQ . Q.loadObjectById) (Db.unBranchObjectId id) >>= getFromBytesOr (ErrBranch id) S.getBranchFormat - deserializePatchObject id = - (liftQ . Q.loadObjectById) (Db.unPatchObjectId id) - >>= getFromBytesOr (ErrPatch id) S.getPatchFormat l2sFull :: S.BranchFormat.BranchLocalIds -> S.LocalBranch -> S.DbBranch l2sFull li = @@ -707,21 +733,21 @@ loadBranchByObjectId id = do <*> doPatches patches <*> doChildren children where - doTerms :: EDB m => Map Db.TextId (Map S.Referent S.DbMetadataSet) -> m (Map C.Branch.NameSegment (Map C.Referent (m C.Branch.MdValues))) + doTerms :: EDB m => Map Db.TextId (Map S.Referent S.DbMetadataSet) -> m (Map C.NameSegment (Map C.Referent (m C.MdValues))) doTerms = Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) ( Map.bitraverse s2cReferent \case S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs ) - doTypes :: forall m. EDB m => Map Db.TextId (Map S.Reference S.DbMetadataSet) -> m (Map C.Branch.NameSegment (Map C.Reference (m C.Branch.MdValues))) + doTypes :: forall m. EDB m => Map Db.TextId (Map S.Reference S.DbMetadataSet) -> m (Map C.NameSegment (Map C.Reference (m C.MdValues))) doTypes = Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) ( Map.bitraverse s2cReference \case S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs ) - doPatches :: forall m. EDB m => Map Db.TextId Db.PatchObjectId -> m (Map C.Branch.NameSegment (PatchHash, m C.Patch)) + doPatches :: forall m. EDB m => Map Db.TextId Db.PatchObjectId -> m (Map C.NameSegment (PatchHash, m C.Patch)) doPatches = Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) \patchId -> do h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId) patchId let patch :: m C.Patch @@ -819,7 +845,7 @@ loadBranchByObjectId id = do S.BranchFormat.Diff ref' li' d' -> doDiff ref' (l2sDiff li' d' : lds) where joinFull :: EDB m => S.DbBranch -> [S.Branch.Diff] -> m (C.Branch m) - joinFull f [] = doFull f + joinFull f [] = s2cBranch f joinFull (S.Branch.Full.Branch tms tps patches children) (S.Branch.Diff tms' tps' patches' children' : ds) = joinFull f' ds From 03e7e05305dfa0e8c9303f48b6aaf7224e17bb43 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 13 Nov 2020 16:46:54 -0500 Subject: [PATCH 048/225] deduplicate some code --- .../U/Codebase/Sqlite/Operations.hs | 120 +----------------- 1 file changed, 6 insertions(+), 114 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index ceeeb6a2da..1a52b5e6c1 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -1,4 +1,3 @@ -{-# LANGUAGE TupleSections #-} {-# LANGUAGE ApplicativeDo #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE ConstraintKinds #-} @@ -6,6 +5,7 @@ {-# LANGUAGE LambdaCase #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TupleSections #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE ViewPatterns #-} @@ -72,6 +72,7 @@ import U.Codebase.Sqlite.LocalIds import qualified U.Codebase.Sqlite.LocalIds as LocalIds import qualified U.Codebase.Sqlite.ObjectType as OT import qualified U.Codebase.Sqlite.Patch.Diff as S +import qualified U.Codebase.Sqlite.Patch.Format as S import qualified U.Codebase.Sqlite.Patch.Format as S.PatchFormat import qualified U.Codebase.Sqlite.Patch.Full as S import qualified U.Codebase.Sqlite.Patch.TermEdit as S @@ -102,7 +103,6 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set -import qualified U.Codebase.Sqlite.Patch.Format as S type Err m = MonadError Error m @@ -204,7 +204,9 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = patch = do deserializePatchObject patchId >>= \case S.PatchFormat.Full (S.Patch termEdits typeEdits) -> - C.Patch <$> Map.bitraverse s2cReferent (Set.traverse s2cTermEdit) termEdits <*> Map.bitraverse s2cReference (Set.traverse s2cTypeEdit) typeEdits + C.Patch + <$> Map.bitraverse s2cReferent (Set.traverse s2cTermEdit) termEdits + <*> Map.bitraverse s2cReference (Set.traverse s2cTypeEdit) typeEdits S.PatchFormat.Diff ref d -> doDiff ref [d] doDiff ref ds = deserializePatchObject ref >>= \case @@ -220,7 +222,7 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = remove :: Ord b => Set b -> Set b -> Maybe (Set b) remove src del = let diff = Set.difference src del - in if diff == mempty then Nothing else Just diff + in if diff == mempty then Nothing else Just diff pure (h, patch) doChildren :: EDB m => Map Db.TextId (Db.BranchObjectId, Db.CausalHashId) -> m (Map C.Branch.NameSegment (C.CausalHead m CausalHash BranchHash (C.Branch m))) @@ -725,116 +727,6 @@ loadBranchByObjectId id = do l2sFull li = S.Branch.Full.quadmap (lookupLocalText li) (lookupLocalDefn li) (lookupLocalPatch li) (lookupLocalChild li) - doFull :: EDB m => S.Branch.Full.DbBranch -> m (C.Branch m) - doFull (S.Branch.Full.Branch tms tps patches children) = - C.Branch - <$> doTerms tms - <*> doTypes tps - <*> doPatches patches - <*> doChildren children - where - doTerms :: EDB m => Map Db.TextId (Map S.Referent S.DbMetadataSet) -> m (Map C.NameSegment (Map C.Referent (m C.MdValues))) - doTerms = - Map.bitraverse - (fmap C.Branch.NameSegment . loadTextById) - ( Map.bitraverse s2cReferent \case - S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs - ) - doTypes :: forall m. EDB m => Map Db.TextId (Map S.Reference S.DbMetadataSet) -> m (Map C.NameSegment (Map C.Reference (m C.MdValues))) - doTypes = - Map.bitraverse - (fmap C.Branch.NameSegment . loadTextById) - ( Map.bitraverse s2cReference \case - S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs - ) - doPatches :: forall m. EDB m => Map Db.TextId Db.PatchObjectId -> m (Map C.NameSegment (PatchHash, m C.Patch)) - doPatches = Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) \patchId -> do - h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId) patchId - let patch :: m C.Patch - patch = do - deserializePatchObject patchId >>= \case - S.PatchFormat.Full (S.Patch termEdits typeEdits) -> - C.Patch <$> Map.bitraverse s2cReferent (Set.traverse s2cTermEdit) termEdits <*> Map.bitraverse s2cReference (Set.traverse s2cTypeEdit) typeEdits - S.PatchFormat.Diff ref d -> doDiff ref [d] - doDiff ref ds = - deserializePatchObject ref >>= \case - S.PatchFormat.Full f -> s2cPatch (joinFull f ds) - S.PatchFormat.Diff ref' d' -> doDiff ref' (d' : ds) - joinFull f [] = f - joinFull (S.Patch termEdits typeEdits) (S.PatchDiff addedTermEdits addedTypeEdits removedTermEdits removedTypeEdits : ds) = joinFull f' ds - where - f' = S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) - addRemove :: (Ord a, Ord b) => Map a (Set b) -> Map a (Set b) -> Map a (Set b) -> Map a (Set b) - addRemove add del src = - (Map.unionWith (<>) add (Map.differenceWith remove src del)) - remove :: Ord b => Set b -> Set b -> Maybe (Set b) - remove src del = - let diff = Set.difference src del - in if diff == mempty then Nothing else Just diff - pure (h, patch) - - doChildren :: EDB m => Map Db.TextId (Db.BranchObjectId, Db.CausalHashId) -> m (Map C.Branch.NameSegment (C.CausalHead m CausalHash BranchHash (C.Branch m))) - doChildren = Map.bitraverse (fmap C.NameSegment . loadTextById) (pure . loadCausalHead) - -- doFull :: EDB m => S.BranchFormat.BranchLocalIds -> S.Branch.Full.LocalBranch -> m (C.Branch m) - -- doFull li (S.Branch.Full.Branch tms tps patches children) = - -- C.Branch - -- <$> doTerms tms - -- <*> doTypes tps - -- <*> doPatches patches - -- <*> doChildren children - -- where - -- localToDbReference :: S.LocalReference -> S.Reference - -- localToDbReference = bitraverse (lookupLocalText li) (lookupLocalDefn li) - -- Map.bitraverse :: (Applicative f, Ord b) => (a -> f b) -> (c -> f d) -> Map a c -> f (Map b d) - -- Map.bitraverse f g = fmap Map.fromList . traverse (bitraverse f g) . Map.toList - -- Set.traverse :: (Applicative f, Ord b) => (a -> f b) -> Set a -> f (Set b) - -- Set.traverse f = fmap Set.fromList . traverse f . Set.toList - -- -- |convert the term namespace from localids to C. ones - -- doTerms :: EDB m => Map LocalTextId (Map S.LocalReferent S.LocalMetadataSet) -> m (Map C.Branch.NameSegment (Map C.Referent (m C.Branch.MdValues))) - -- doTerms = - -- Map.bitraverse - -- (fmap C.Branch.NameSegment . loadTextById . lookupLocalText) - -- ( Map.bitraverse s2cReferent \case - -- S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse (s2cReference . localToDbReference) rs - -- ) - -- -- |convert the types namespace from localids to C. ones - -- doTypes :: forall m. EDB m => Map LocalTextId (Map S.LocalReference S.LocalMetadataSet) -> m (Map C.Branch.NameSegment (Map C.Reference (m C.Branch.MdValues))) - -- doTypes = - -- Map.bitraverse - -- (fmap C.Branch.NameSegment . loadTextById . lookupLocalText) - -- ( Map.bitraverse s2cReference \case - -- S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse (s2cReference . localToDbReference) rs - -- ) - -- -- |convert the patches namespace from using localids to C. ones - -- doPatches :: forall m. EDB m => Map LocalTextId LocalPatchObjectId -> m (Map C.Branch.NameSegment (PatchHash, m C.Patch)) - -- doPatches = Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) \patchId -> do - -- h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId . lookupLocalPatch) patchId - -- let patch :: m C.Patch - -- patch = do - -- deserializePatchObject patchId >>= \case - -- S.PatchFormat.Full (S.Patch termEdits typeEdits) -> - -- C.Patch <$> Map.bitraverse s2cReferent (Set.traverse s2cTermEdit) termEdits <*> Map.bitraverse s2cReference (Set.traverse s2cTypeEdit) typeEdits - -- S.PatchFormat.Diff ref d -> doDiff ref [d] - -- doDiff ref ds = - -- deserializePatchObject ref >>= \case - -- S.PatchFormat.Full f -> s2cPatch (joinFull f ds) - -- S.PatchFormat.Diff ref' d' -> doDiff ref' (d' : ds) - -- joinFull f [] = f - -- joinFull (S.Patch termEdits typeEdits) (S.PatchDiff addedTermEdits addedTypeEdits removedTermEdits removedTypeEdits : ds) = joinFull f' ds - -- where - -- f' = S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) - -- addRemove :: (Ord a, Ord b) => Map a (Set b) -> Map a (Set b) -> Map a (Set b) -> Map a (Set b) - -- addRemove add del src = - -- (Map.unionWith (<>) add (Map.differenceWith remove src del)) - -- remove :: Ord b => Set b -> Set b -> Maybe (Set b) - -- remove src del = - -- let diff = Set.difference src del - -- in if diff == mempty then Nothing else Just diff - -- pure (h, patch) - - -- doChildren :: EDB m => Map Db.TextId Db.BranchObjectId -> m (Map C.Branch.NameSegment (m (C.Branch m))) - -- doChildren = Map.bitraverse (fmap C.NameSegment . loadTextById) (pure . loadBranchByObjectId) - l2sDiff :: S.BranchFormat.BranchLocalIds -> S.Branch.LocalDiff -> S.Branch.Diff l2sDiff li = S.BranchDiff.quadmap (lookupLocalText li) (lookupLocalDefn li) (lookupLocalPatch li) (lookupLocalChild li) From 8b554a73a6445dd298b25e3ee0ac0f291e2559de Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 13 Nov 2020 17:00:15 -0500 Subject: [PATCH 049/225] wip --- .../codebase-sqlite/U/Codebase/Sqlite/Operations.hs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 1a52b5e6c1..dcde1032f0 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -203,10 +203,7 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = let patch :: EDB m => m C.Patch patch = do deserializePatchObject patchId >>= \case - S.PatchFormat.Full (S.Patch termEdits typeEdits) -> - C.Patch - <$> Map.bitraverse s2cReferent (Set.traverse s2cTermEdit) termEdits - <*> Map.bitraverse s2cReference (Set.traverse s2cTypeEdit) typeEdits + S.PatchFormat.Full p -> s2cPatch p S.PatchFormat.Diff ref d -> doDiff ref [d] doDiff ref ds = deserializePatchObject ref >>= \case @@ -252,7 +249,10 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = traverse loadBranchByObjectId boId s2cPatch :: EDB m => S.Patch -> m C.Patch -s2cPatch = error "todo" +s2cPatch (S.Patch termEdits typeEdits) = + C.Patch + <$> Map.bitraverse s2cReferent (Set.traverse s2cTermEdit) termEdits + <*> Map.bitraverse s2cReference (Set.traverse s2cTypeEdit) typeEdits lookupTextId :: EDB m => Text -> m Db.TextId lookupTextId t = @@ -825,9 +825,6 @@ loadBranchByObjectId id = do let (Set.fromList -> adds, Set.fromList -> removes) = S.BranchDiff.addsRemoves md' in Just . S.MetadataSet.Inline $ (Set.union adds $ Set.difference md removes) -loadCausalHead :: (Db.BranchObjectId, Db.CausalHashId) -> C.CausalHead m CausalHash BranchHash (C.Branch m) -loadCausalHead = error "not implemented" - branchHashesByPrefix :: EDB m => ShortBranchHash -> m (Set BranchHash) branchHashesByPrefix (ShortBranchHash b32prefix) = do hashIds <- Q.namespaceHashIdByBase32Prefix b32prefix From 20d6da38517769e434eb52c85ed98c0b24949f56 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 19 Nov 2020 15:54:33 -0500 Subject: [PATCH 050/225] wip --- .../U/Codebase/Sqlite/Branch/Format.hs | 1 + .../U/Codebase/Sqlite/Operations.hs | 33 ++++-- .../U/Codebase/Sqlite/Queries.hs | 30 ++++- .../U/Codebase/Sqlite/Serialization.hs | 109 +++++++++++++++++- .../U/Codebase/Sqlite/SyncEntity.hs | 24 ++++ codebase2/codebase-sqlite/sql/create.sql | 1 - .../unison-codebase-sqlite.cabal | 1 + codebase2/codebase/U/Codebase/HashTags.hs | 2 + .../U/Util/Serialization.hs | 58 ++++++---- .../unison-util-serialization.cabal | 5 +- 10 files changed, 221 insertions(+), 43 deletions(-) create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/SyncEntity.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs index 7248dd92de..2e23fd409e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs @@ -5,6 +5,7 @@ import U.Codebase.Sqlite.Branch.Diff (LocalDiff) import U.Codebase.Sqlite.Branch.Full (LocalBranch) import U.Codebase.Sqlite.DbId (CausalHashId, BranchObjectId, ObjectId, PatchObjectId, TextId) +-- |you can use the exact same `BranchLocalIds` when converting between `Full` and `Diff` data BranchFormat = Full BranchLocalIds LocalBranch | Diff BranchObjectId BranchLocalIds LocalDiff diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index dcde1032f0..29b0582585 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -43,7 +43,7 @@ import qualified U.Codebase.Causal as C import U.Codebase.Decl (ConstructorId) import qualified U.Codebase.Decl as C import qualified U.Codebase.Decl as C.Decl -import U.Codebase.HashTags (BranchHash (..), CausalHash (..), PatchHash (..)) +import U.Codebase.HashTags (BranchHash (..), CausalHash (..), DefnHash (..), EditHash, PatchHash (..)) import qualified U.Codebase.Reference as C import qualified U.Codebase.Reference as C.Reference import qualified U.Codebase.Referent as C @@ -103,6 +103,7 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set +import qualified U.Codebase.Sqlite.SyncEntity as SE type Err m = MonadError Error m @@ -118,6 +119,7 @@ data DecodeError | ErrWatch WatchKind C.Reference.Id | ErrBranch Db.BranchObjectId | ErrPatch Db.PatchObjectId + | ErrObjectDependencies OT.ObjectType Db.ObjectId deriving (Show) data Error @@ -148,7 +150,7 @@ c2sReference = bitraverse lookupTextId hashToObjectId s2cReference :: EDB m => S.Reference -> m C.Reference s2cReference = bitraverse loadTextById loadHashByObjectId -c2sReferenceId :: EDB m => C.Reference.Id -> MaybeT m S.Reference.Id +c2sReferenceId :: EDB m => C.Reference.Id -> m S.Reference.Id c2sReferenceId = C.Reference.idH hashToObjectId s2cReferenceId :: EDB m => S.Reference.Id -> m C.Reference.Id @@ -847,18 +849,31 @@ dependents r = do cIds <- traverse s2cReferenceId sIds pure $ Set.fromList cIds --- * Sync-related dependency queries - -termDependencies :: DB m => C.Reference.Id -> m (Maybe (Set C.Reference.Id)) -termDependencies = error "todo" +-- | returns empty set for unknown inputs; doesn't distinguish between term and decl +derivedDependencies :: EDB m => C.Reference.Id -> m (Set C.Reference.Id) +derivedDependencies cid = do + sid <- c2sReferenceId cid + sids <- Q.getDependencyIdsForDependent sid + cids <- traverse s2cReferenceId sids + pure $ Set.fromList cids -declDependencies :: DB m => C.Reference.Id -> m (Maybe (Set C.Reference.Id)) -declDependencies = error "todo" +-- * Sync-related dependency queries +objectDependencies :: EDB m => Db.ObjectId -> m SE.SyncEntitySeq +objectDependencies oid = do + (ot, bs) <- liftQ $ Q.loadObjectWithTypeById oid + let getOrError = getFromBytesOr (ErrObjectDependencies ot oid) + case ot of + OT.TermComponent -> getOrError S.getTermComponentSyncEntities bs + OT.DeclComponent -> getOrError S.getDeclComponentSyncEntities bs + OT.Namespace -> getOrError S.getBranchSyncEntities bs + OT.Patch -> getOrError S.getPatchSyncEntities bs -- branchDependencies :: -- Branch.Hash -> m (Maybe (CausalHash, BD.Dependencies)), -- -- |the "new" terms and types mentioned in a patch --- patchDependencies :: EditHash -> m (Set Reference, Set Reference) + +-- patchDependencies :: EditHash -> m (Maybe (Set DefnHash)) +-- patchDependencies h = error "todo" -- getBranchByAnyHash :: -- getBranchByBranchHash :: DB m => BranchHash -> m (Maybe (Branch m)) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index fc3623efac..f81f16abd9 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -36,6 +36,7 @@ import qualified U.Codebase.WatchKind as WatchKind import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash +import UnliftIO (withRunInIO, MonadUnliftIO) -- * types type DB m = (MonadIO m, MonadReader Connection m) @@ -119,6 +120,12 @@ loadObjectById oId = queryOnly sql (Only oId) >>= orError (UnknownObjectId oId) SELECT bytes FROM object WHERE id = ? |] +loadObjectWithTypeById :: EDB m => ObjectId -> m (ObjectType, ByteString) +loadObjectWithTypeById oId = queryMaybe sql (Only oId) >>= orError (UnknownObjectId oId) + where sql = [here| + SELECT type_id, bytes FROM object WHERE id = ? + |] + -- |Not all hashes have corresponding objects; e.g., hashes of term types expectObjectIdForPrimaryHashId :: EDB m => HashId -> m ObjectId expectObjectIdForPrimaryHashId h = @@ -169,10 +176,10 @@ updateObjectBlob oId bs = execute sql (oId, bs) where sql = [here| -- |Maybe we would generalize this to something other than NamespaceHash if we -- end up wanting to store other kinds of Causals here too. -saveCausal :: DB m => CausalHashId -> BranchHashId -> Maybe BranchObjectId -> m () -saveCausal self value oid = execute sql (self, value, oid) where sql = [here| - INSERT OR IGNORE INTO causal (self_hash_id, value_hash_id, value_object_id) - VALUES (?, ?,, ?) +saveCausal :: DB m => CausalHashId -> BranchHashId -> m () +saveCausal self value = execute sql (self, value) where sql = [here| + INSERT OR IGNORE INTO causal (self_hash_id, value_hash_id) + VALUES (?, ?) |] loadCausalValueHashId :: EDB m => CausalHashId -> m BranchHashId @@ -301,6 +308,15 @@ getDependentsForDependency dependency = query sql dependency where sql = [here| AND dependency_component_index = ? |] +getDependencyIdsForDependent :: DB m => Reference.Id -> m [Reference.Id] +getDependencyIdsForDependent dependent = query sql dependent where sql = [here| + SELECT dependency_object_id, dependency_component_index + FROM dependents_index + WHERE dependency_builtin = NULL + AND dependent_object_id = ? + AND dependen_component_index = ? +|] + objectIdByBase32Prefix :: DB m => ObjectType -> Text -> m [ObjectId] objectIdByBase32Prefix objType prefix = queryList sql (objType, prefix <> "%") where sql = [here| SELECT object.id FROM object @@ -344,6 +360,12 @@ query q r = do c <- ask; liftIO $ SQLite.query c q r execute :: (DB m, ToRow q) => SQLite.Query -> q -> m () execute q r = do c <- ask; liftIO $ SQLite.execute c q r +-- |transaction that blocks +withImmediateTransaction :: (DB m, MonadUnliftIO m) => m a -> m a +withImmediateTransaction action = do + c <- ask + withRunInIO \run -> SQLite.withImmediateTransaction c (run action) + headMay :: [a] -> Maybe a headMay [] = Nothing headMay (a:_) = Just a diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 17c90bdcc5..053b3e2f6e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -1,7 +1,9 @@ +{-# LANGUAGE TypeApplications #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE TupleSections #-} {-# LANGUAGE ViewPatterns #-} module U.Codebase.Sqlite.Serialization where @@ -13,7 +15,11 @@ import Data.Bytes.Serial (SerialEndian (serializeBE), deserialize, deserializeBE import Data.Bytes.VarInt (VarInt (VarInt), unVarInt) import Data.Int (Int64) import Data.List (elemIndex) +import Data.Sequence (Seq) +import qualified Data.Sequence as Seq import qualified Data.Set as Set +import Data.Vector (Vector) +import qualified Data.Vector as Vector import Data.Word (Word64) import qualified U.Codebase.Decl as Decl import U.Codebase.Kind (Kind) @@ -25,7 +31,7 @@ import qualified U.Codebase.Referent as Referent import qualified U.Codebase.Sqlite.Branch.Diff as BranchDiff import qualified U.Codebase.Sqlite.Branch.Format as BranchFormat import qualified U.Codebase.Sqlite.Branch.Full as BranchFull -import U.Codebase.Sqlite.DbId (PatchObjectId) +import U.Codebase.Sqlite.DbId (BranchObjectId, PatchObjectId, unBranchObjectId, unPatchObjectId) import qualified U.Codebase.Sqlite.Decl.Format as DeclFormat import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), LocalTextId) import qualified U.Codebase.Sqlite.Patch.Diff as PatchDiff @@ -34,6 +40,7 @@ import qualified U.Codebase.Sqlite.Patch.Full as PatchFull import qualified U.Codebase.Sqlite.Patch.TermEdit as TermEdit import qualified U.Codebase.Sqlite.Patch.TypeEdit as TypeEdit import U.Codebase.Sqlite.Symbol +import qualified U.Codebase.Sqlite.SyncEntity as SE import qualified U.Codebase.Sqlite.Term.Format as TermFormat import qualified U.Codebase.Term as Term import qualified U.Codebase.Type as Type @@ -338,7 +345,6 @@ lookupTermElementDiscardingTerm :: MonadGet m => Reference.Pos -> m (LocalIds, T lookupTermElementDiscardingTerm = unsafeFramedArrayLookup ((,) <$> getLocalIds <* skipFramed <*> getFramed getTType) . fromIntegral - getTType :: MonadGet m => m TermFormat.Type getTType = getType getReference @@ -501,10 +507,96 @@ getBranchFormat = getWord8 >>= \case 0 -> getBranchFull 1 -> getBranchDiff - other -> unknownTag "BranchFormat" other + x -> unknownTag "getBranchFormat" x where - getBranchFull = error "todo" - getBranchDiff = error "todo" + getReferent' = getReferent getReference getReference + getBranchFull = + BranchFormat.Full <$> getBranchLocalIds <*> getLocalBranch + where + getLocalBranch = + BranchFull.Branch + <$> getMap getVarInt (getMap getReferent' getMetadataSetFormat) + <*> getMap getVarInt (getMap getReference getMetadataSetFormat) + <*> getMap getVarInt getVarInt + <*> getMap getVarInt getVarInt + + getMetadataSetFormat = + getWord8 >>= \case + 0 -> BranchFull.Inline <$> getSet getReference + x -> unknownTag "getMetadataSetFormat" x + getBranchDiff = + BranchFormat.Diff + <$> getVarInt + <*> getBranchLocalIds + <*> getLocalBranchDiff + where + getLocalBranchDiff = + BranchDiff.Diff + <$> getMap getVarInt (getMap getReferent' getDiffOp) + <*> getMap getVarInt (getMap getReference getDiffOp) + <*> getMap getVarInt getPatchOp + <*> getMap getVarInt getChildOp + getDiffOp = + getWord8 >>= \case + 0 -> pure BranchDiff.RemoveDef + 1 -> BranchDiff.AddDefWithMetadata <$> getSet getReference + 2 -> BranchDiff.AlterDefMetadata <$> getAddRemove getReference + x -> unknownTag "getDiffOp" x + getAddRemove get = do + adds <- getMap get (pure True) + -- and removes: + addToExistingMap get (pure False) adds + getPatchOp = getWord8 >>= \case + 0 -> pure BranchDiff.PatchRemove + 1 -> BranchDiff.PatchAddReplace <$> getVarInt + x -> unknownTag "getPatchOp" x + getChildOp = getWord8 >>= \case + 0 -> pure BranchDiff.ChildRemove + 1 -> BranchDiff.ChildAddReplace <$> getVarInt + x -> unknownTag "getChildOp" x + +getBranchLocalIds :: MonadGet m => m BranchFormat.BranchLocalIds +getBranchLocalIds = + BranchFormat.LocalIds + <$> getVector getVarInt + <*> getVector getVarInt + <*> getVector getVarInt + <*> getVector (getPair getVarInt getVarInt) + +getBranchSyncEntities :: MonadGet m => m SE.SyncEntitySeq +getBranchSyncEntities = + getWord8 >>= \case + -- Full + 0 -> getLocalIds + -- Diff + 1 -> do + id <- getVarInt @_ @BranchObjectId + SE.addObjectId (unBranchObjectId id) <$> getLocalIds + x -> unknownTag "getBranchSyncEntities" x + where + getLocalIds = branchLocalIdsToLocalDeps <$> getBranchLocalIds + branchLocalIdsToLocalDeps (BranchFormat.LocalIds ts os ps bcs) = + SE.SyncEntity + (vec2seq ts) + ( vec2seq os + <> vec2seq (Vector.map unPatchObjectId ps) + <> vec2seq (Vector.map unBranchObjectId bos) + ) + mempty + (vec2seq chs) + where + (bos, chs) = Vector.unzip bcs + vec2seq :: Vector a -> Seq a + vec2seq v = Seq.fromFunction (length v) (v Vector.!) + +getTermComponentSyncEntities :: m SE.SyncEntitySeq +getTermComponentSyncEntities = error "not implemented" + +getDeclComponentSyncEntities :: m SE.SyncEntitySeq +getDeclComponentSyncEntities = error "not implemented" + +getPatchSyncEntities :: m SE.SyncEntitySeq +getPatchSyncEntities = error "not implemented" getSymbol :: MonadGet m => m Symbol getSymbol = Symbol <$> getVarInt <*> getText @@ -532,6 +624,13 @@ putReference = \case ReferenceDerived (Reference.Id r index) -> putWord8 1 *> putVarInt r *> putVarInt index +getReferent :: MonadGet m => m r1 -> m r2 -> m (Referent' r1 r2) +getReferent getRefRef getConRef = + getWord8 >>= \case + 0 -> Referent.Ref <$> getRefRef + 1 -> Referent.Con <$> getConRef <*> getVarInt + x -> unknownTag "getReferent" x + getReference :: (MonadGet m, Integral t, Bits t, Integral r, Bits r) => m (Reference' t r) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/SyncEntity.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/SyncEntity.hs new file mode 100644 index 0000000000..e216bb6f4e --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/SyncEntity.hs @@ -0,0 +1,24 @@ +module U.Codebase.Sqlite.SyncEntity where + +import Data.Sequence (Seq) +import qualified Data.Sequence as Seq +import qualified U.Codebase.Sqlite.DbId as Db + +-- | Stuff you'll need to sync +data SyncEntity' f = SyncEntity + { -- | strings that need to be synced + text :: f Db.TextId, + -- | objects that need to be synced + objects :: f Db.ObjectId, + -- | hashes that need to be synced (comparable to weak refs) + hashes :: f Db.HashId, + -- | causals that need to be synced (these are not weak refs). + -- causals are relational instead of objects because we + -- ... wanted to use sqlite for LCA?? + causals :: f Db.CausalHashId + } + +type SyncEntitySeq = SyncEntity' Seq + +addObjectId :: Db.ObjectId -> SyncEntitySeq -> SyncEntitySeq +addObjectId id s = s {objects = id Seq.<| objects s} diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index a31d1ad78e..d5096a2a96 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -56,7 +56,6 @@ CREATE INDEX object_type_id ON object(type_id); CREATE TABLE causal ( self_hash_id INTEGER PRIMARY KEY NOT NULL REFERENCES hash(id), value_hash_id INTEGER NOT NULL REFERENCES hash(id), - value_object_id INTEGER NULL REFERENCES object(id), gc_generation INTEGER NOT NULL ); CREATE INDEX causal_value_hash_id ON causal(value_hash_id); diff --git a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal index 7745023d38..53fa9cdad7 100644 --- a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal +++ b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal @@ -35,6 +35,7 @@ library U.Codebase.Sqlite.Referent U.Codebase.Sqlite.Serialization U.Codebase.Sqlite.Symbol + U.Codebase.Sqlite.SyncEntity U.Codebase.Sqlite.Term.Format U.Codebase.Sqlite.Types diff --git a/codebase2/codebase/U/Codebase/HashTags.hs b/codebase2/codebase/U/Codebase/HashTags.hs index d9c5b8a93e..39efdc0651 100644 --- a/codebase2/codebase/U/Codebase/HashTags.hs +++ b/codebase2/codebase/U/Codebase/HashTags.hs @@ -9,3 +9,5 @@ newtype CausalHash = CausalHash { unCausalHash :: Hash } deriving (Eq, Ord, Show newtype EditHash = EditHash { unEditHash :: Hash } deriving (Eq, Ord, Show) newtype PatchHash = PatchHash { unPatchHash :: Hash } deriving (Eq, Ord, Show) + +newtype DefnHash = DefnHash { unDefnHash :: Hash } deriving (Eq, Ord, Show) diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index 99c60ce1c4..0c1e84de92 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -7,19 +7,24 @@ module U.Util.Serialization where -import Control.Monad (replicateM) -import Data.Bits ((.|.), Bits, clearBit, setBit, shiftL, shiftR, testBit) +import Control.Applicative (Applicative (liftA2), liftA3) +import Control.Monad (foldM, replicateM) +import Data.Bits (Bits, clearBit, setBit, shiftL, shiftR, testBit, (.|.)) import Data.ByteString (ByteString, readFile, writeFile) import qualified Data.ByteString as BS -import qualified Data.ByteString.Short as BSS import Data.ByteString.Short (ShortByteString) +import qualified Data.ByteString.Short as BSS import Data.Bytes.Get (MonadGet, getByteString, getBytes, getWord8, runGetS, skip) import Data.Bytes.Put (MonadPut, putByteString, putWord8, runPutS) import Data.Bytes.VarInt (VarInt (VarInt)) import Data.Foldable (Foldable (toList), traverse_) import Data.List.Extra (dropEnd) +import Data.Map (Map) +import qualified Data.Map as Map import Data.Sequence (Seq) import qualified Data.Sequence as Seq +import Data.Set (Set) +import qualified Data.Set as Set import Data.Text (Text) import Data.Text.Encoding (decodeUtf8, encodeUtf8) import Data.Text.Short (ShortText) @@ -28,16 +33,13 @@ import qualified Data.Text.Short.Unsafe as TSU import Data.Vector (Vector) import qualified Data.Vector as Vector import Data.Word (Word8) +import GHC.Word (Word64) import System.FilePath (takeDirectory) import UnliftIO (MonadIO, liftIO) import UnliftIO.Directory (createDirectoryIfMissing, doesFileExist) import Prelude hiding (readFile, writeFile) -import Data.Map (Map) -import qualified Data.Map as Map -import Control.Applicative (liftA3, Applicative(liftA2)) -import GHC.Word (Word64) -import Data.Set (Set) -import qualified Data.Set as Set + +-- import qualified U.Util.Monoid as Monoid type Get a = forall m. MonadGet m => m a @@ -151,6 +153,27 @@ getSequence getA = do length <- getVarInt Seq.replicateM length getA +getSet :: (MonadGet m, Ord a) => m a -> m (Set a) +getSet getA = do + length <- getVarInt + -- avoid materializing intermediate list + foldM (\s ma -> Set.insert <$> ma <*> pure s) mempty (replicate length getA) + +putMap :: MonadPut m => (a -> m ()) -> (b -> m ()) -> Map a b -> m () +putMap putA putB m = putFoldable (putPair putA putB) (Map.toList m) + +addToExistingMap :: (MonadGet m, Ord a) => m a -> m b -> Map a b -> m (Map a b) +addToExistingMap getA getB map = do + length <- getVarInt + -- avoid materializing intermediate list + foldM + (\s (ma, mb) -> Map.insert <$> ma <*> mb <*> pure s) + map + (replicate length (getA, getB)) + +getMap :: (MonadGet m, Ord a) => m a -> m b -> m (Map a b) +getMap getA getB = addToExistingMap getA getB mempty + getFramed :: MonadGet m => Get a -> m a getFramed get = do size <- getVarInt @@ -206,20 +229,11 @@ unsafeFramedArrayLookup getA index = do skip (Vector.unsafeIndex offsets index) getA -putMap :: MonadPut m => (a -> m ()) -> (b -> m ()) -> Map a b -> m () -putMap putA putB m = putFoldable (putPair putA putB) (Map.toList m) - -getMap :: (MonadGet m, Ord a) => m a -> m b -> m (Map a b) -getMap getA getB = Map.fromList <$> getList (getPair getA getB) - -getSet :: (MonadGet m, Ord a) => m a -> m (Set a) -getSet getA = Set.fromList <$> getList getA - -putPair :: MonadPut m => (a -> m ()) -> (b -> m ()) -> (a,b) -> m () -putPair putA putB (a,b) = putA a *> putB b +putPair :: MonadPut m => (a -> m ()) -> (b -> m ()) -> (a, b) -> m () +putPair putA putB (a, b) = putA a *> putB b -getPair :: MonadGet m => m a -> m b -> m (a,b) +getPair :: MonadGet m => m a -> m b -> m (a, b) getPair = liftA2 (,) -getTuple3 :: MonadGet m => m a -> m b -> m c -> m (a,b,c) +getTuple3 :: MonadGet m => m a -> m b -> m c -> m (a, b, c) getTuple3 = liftA3 (,,) diff --git a/codebase2/util-serialization/unison-util-serialization.cabal b/codebase2/util-serialization/unison-util-serialization.cabal index 45c782ac26..f19c84819b 100644 --- a/codebase2/util-serialization/unison-util-serialization.cabal +++ b/codebase2/util-serialization/unison-util-serialization.cabal @@ -18,7 +18,7 @@ library U.Util.Serialization -- other-modules: -- other-extensions: - build-depends: + build-depends: base, bytes, bytestring, @@ -28,6 +28,7 @@ library text, text-short, unliftio, - vector + vector, + unison-util hs-source-dirs: . default-language: Haskell2010 From 44259bab3c02650a575de2046e9282351e67b31a Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 20 Nov 2020 20:57:04 -0500 Subject: [PATCH 051/225] working through Patch --- .../U/Codebase/Sqlite/LocalIds.hs | 2 + .../U/Codebase/Sqlite/Operations.hs | 37 ++-- .../U/Codebase/Sqlite/Patch/Diff.hs | 21 +- .../U/Codebase/Sqlite/Patch/Format.hs | 17 +- .../U/Codebase/Sqlite/Patch/Full.hs | 19 +- .../U/Codebase/Sqlite/Patch/TermEdit.hs | 10 +- .../U/Codebase/Sqlite/Patch/TypeEdit.hs | 10 +- .../U/Codebase/Sqlite/Referent.hs | 1 + .../U/Codebase/Sqlite/Serialization.hs | 208 +++++++++++++----- .../U/Codebase/Sqlite/SyncEntity.hs | 19 ++ .../U/Util/Serialization.hs | 9 +- 11 files changed, 259 insertions(+), 94 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs index 01fbf0fa37..db52f0c54b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs @@ -26,6 +26,8 @@ newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Int -- | represents an index into a defnLookup newtype LocalDefnId = LocalDefnId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalHashId = LocalHashId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 + newtype LocalPatchObjectId = LocalPatchObjectId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 newtype LocalBranchChildId = LocalBranchChildId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 29b0582585..4a23199a0f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -43,7 +43,7 @@ import qualified U.Codebase.Causal as C import U.Codebase.Decl (ConstructorId) import qualified U.Codebase.Decl as C import qualified U.Codebase.Decl as C.Decl -import U.Codebase.HashTags (BranchHash (..), CausalHash (..), DefnHash (..), EditHash, PatchHash (..)) +import U.Codebase.HashTags (BranchHash (..), CausalHash (..), PatchHash (..)) import qualified U.Codebase.Reference as C import qualified U.Codebase.Reference as C.Reference import qualified U.Codebase.Referent as C @@ -86,6 +86,7 @@ import qualified U.Codebase.Sqlite.Reference as S.Reference import qualified U.Codebase.Sqlite.Referent as S import qualified U.Codebase.Sqlite.Serialization as S import U.Codebase.Sqlite.Symbol (Symbol) +import qualified U.Codebase.Sqlite.SyncEntity as SE import qualified U.Codebase.Sqlite.Term.Format as S.Term import qualified U.Codebase.Term as C import qualified U.Codebase.Term as C.Term @@ -103,7 +104,6 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set -import qualified U.Codebase.Sqlite.SyncEntity as SE type Err m = MonadError Error m @@ -150,6 +150,9 @@ c2sReference = bitraverse lookupTextId hashToObjectId s2cReference :: EDB m => S.Reference -> m C.Reference s2cReference = bitraverse loadTextById loadHashByObjectId +h2cReference :: EDB m => S.ReferenceH -> m C.Reference +h2cReference = bitraverse loadTextById loadHashByHashId + c2sReferenceId :: EDB m => C.Reference.Id -> m S.Reference.Id c2sReferenceId = C.Reference.idH hashToObjectId @@ -159,6 +162,9 @@ s2cReferenceId = C.Reference.idH loadHashByObjectId s2cReferent :: EDB m => S.Referent -> m C.Referent s2cReferent = bitraverse s2cReference s2cReference +h2cReferent :: EDB m => S.ReferentH -> m C.Referent +h2cReferent = bitraverse h2cReference h2cReference + s2cTermEdit :: EDB m => S.TermEdit -> m C.TermEdit s2cTermEdit = \case S.TermEdit.Replace r t -> C.TermEdit.Replace <$> s2cReference r <*> pure (s2cTyping t) @@ -203,18 +209,18 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = doPatches = Map.bitraverse (fmap C.NameSegment . loadTextById) \patchId -> do h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId) patchId let patch :: EDB m => m C.Patch - patch = do - deserializePatchObject patchId >>= \case - S.PatchFormat.Full p -> s2cPatch p - S.PatchFormat.Diff ref d -> doDiff ref [d] - doDiff ref ds = - deserializePatchObject ref >>= \case - S.PatchFormat.Full f -> s2cPatch (joinFull f ds) - S.PatchFormat.Diff ref' d' -> doDiff ref' (d' : ds) + patch = error "todo" + -- deserializePatchObject patchId >>= \case + -- S.PatchFormat.Full p -> s2cPatch p + -- S.PatchFormat.Diff ref d -> doDiff ref [d] + doDiff ref ds = error "todo" + -- deserializePatchObject ref >>= \case + -- S.PatchFormat.Full f -> s2cPatch (joinFull f ds) + -- S.PatchFormat.Diff ref' d' -> doDiff ref' (d' : ds) joinFull f [] = f joinFull (S.Patch termEdits typeEdits) (S.PatchDiff addedTermEdits addedTypeEdits removedTermEdits removedTypeEdits : ds) = joinFull f' ds where - f' = S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) + f' = error "todo" -- S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) addRemove :: (Ord a, Ord b) => Map a (Set b) -> Map a (Set b) -> Map a (Set b) -> Map a (Set b) addRemove add del src = (Map.unionWith (<>) add (Map.differenceWith remove src del)) @@ -253,8 +259,8 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = s2cPatch :: EDB m => S.Patch -> m C.Patch s2cPatch (S.Patch termEdits typeEdits) = C.Patch - <$> Map.bitraverse s2cReferent (Set.traverse s2cTermEdit) termEdits - <*> Map.bitraverse s2cReference (Set.traverse s2cTypeEdit) typeEdits + <$> Map.bitraverse h2cReferent (Set.traverse s2cTermEdit) termEdits + <*> Map.bitraverse h2cReference (Set.traverse s2cTypeEdit) typeEdits lookupTextId :: EDB m => Text -> m Db.TextId lookupTextId t = @@ -858,13 +864,14 @@ derivedDependencies cid = do pure $ Set.fromList cids -- * Sync-related dependency queries + objectDependencies :: EDB m => Db.ObjectId -> m SE.SyncEntitySeq objectDependencies oid = do (ot, bs) <- liftQ $ Q.loadObjectWithTypeById oid let getOrError = getFromBytesOr (ErrObjectDependencies ot oid) case ot of - OT.TermComponent -> getOrError S.getTermComponentSyncEntities bs - OT.DeclComponent -> getOrError S.getDeclComponentSyncEntities bs + OT.TermComponent -> getOrError S.getComponentSyncEntities bs + OT.DeclComponent -> getOrError S.getComponentSyncEntities bs OT.Namespace -> getOrError S.getBranchSyncEntities bs OT.Patch -> getOrError S.getPatchSyncEntities bs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs index 2ce1f4274d..fdd06f6c8a 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs @@ -2,15 +2,18 @@ module U.Codebase.Sqlite.Patch.Diff where import Data.Map (Map) import Data.Set (Set) -import U.Codebase.Sqlite.Patch.TermEdit (TermEdit) -import U.Codebase.Sqlite.Patch.TypeEdit (TypeEdit) -import U.Codebase.Sqlite.Reference (Reference) -import U.Codebase.Sqlite.Referent (Referent) +import U.Codebase.Reference (Reference') +import U.Codebase.Referent (Referent') +import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalHashId, LocalTextId) +import U.Codebase.Sqlite.Patch.TermEdit (TermEdit') +import U.Codebase.Sqlite.Patch.TypeEdit (TypeEdit') -data PatchDiff = PatchDiff - { addedTermEdits :: Map Referent (Set TermEdit), - addedTypeEdits :: Map Reference (Set TypeEdit), - removedTermEdits :: Map Referent (Set TermEdit), - removedTypeEdits :: Map Reference (Set TypeEdit) +type LocalPatchDiff = PatchDiff' LocalTextId LocalHashId LocalDefnId + +data PatchDiff' t h d = PatchDiff + { addedTermEdits :: Map (Referent' (Reference' t h) (Reference' t h)) (Set (TermEdit' t d)), + addedTypeEdits :: Map (Reference' t h) (Set (TypeEdit' t d)), + removedTermEdits :: Map (Referent' (Reference' t h) (Reference' t h)) (Set (TermEdit' t d)), + removedTypeEdits :: Map (Reference' t h) (Set (TypeEdit' t d)) } deriving (Eq, Ord, Show) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs index eb2261574a..e04cce00ee 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Format.hs @@ -1,7 +1,16 @@ module U.Codebase.Sqlite.Patch.Format where -import U.Codebase.Sqlite.Patch.Diff -import U.Codebase.Sqlite.Patch.Full -import U.Codebase.Sqlite.DbId (PatchObjectId) +import Data.Vector (Vector) +import U.Codebase.Sqlite.DbId (HashId, ObjectId, PatchObjectId, TextId) +import U.Codebase.Sqlite.Patch.Diff (LocalPatchDiff) +import U.Codebase.Sqlite.Patch.Full (LocalPatch) -data PatchFormat = Full Patch | Diff PatchObjectId PatchDiff +data PatchFormat + = Full PatchLocalIds LocalPatch + | Diff PatchObjectId PatchLocalIds LocalPatchDiff + +data PatchLocalIds = LocalIds + { patchTextLookup :: Vector TextId, + patchHashLookup :: Vector HashId, + patchDefnLookup :: Vector ObjectId + } diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs index 2b1dafdcd7..7b07334f15 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs @@ -2,11 +2,18 @@ module U.Codebase.Sqlite.Patch.Full where import Data.Map (Map) import Data.Set (Set) -import U.Codebase.Sqlite.Patch.TermEdit -import U.Codebase.Sqlite.Patch.TypeEdit -import U.Codebase.Sqlite.Types +import U.Codebase.Reference (Reference') +import U.Codebase.Referent (Referent') +import qualified U.Codebase.Sqlite.DbId as Db +import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalHashId, LocalTextId) +import U.Codebase.Sqlite.Patch.TermEdit (TermEdit') +import U.Codebase.Sqlite.Patch.TypeEdit (TypeEdit') -data Patch = Patch - { termEdits :: Map Referent (Set TermEdit), - typeEdits :: Map Reference (Set TypeEdit) +type Patch = Patch' Db.TextId Db.HashId Db.ObjectId + +type LocalPatch = Patch' LocalTextId LocalHashId LocalDefnId + +data Patch' t h o = Patch + { termEdits :: Map (Referent' (Reference' t h) (Reference' t h)) (Set (TermEdit' t o)), + typeEdits :: Map (Reference' t h) (Set (TypeEdit' t o)) } diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs index 12d2fd39ef..e49365c4b3 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs @@ -1,8 +1,14 @@ module U.Codebase.Sqlite.Patch.TermEdit where -import U.Codebase.Sqlite.Reference (Reference) +import U.Codebase.Reference (Reference') +import qualified U.Codebase.Sqlite.DbId as Db +import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalTextId) -data TermEdit = Replace Reference Typing | Deprecate +type TermEdit = TermEdit' Db.TextId Db.ObjectId + +type LocalTermEdit = TermEdit' LocalTextId LocalDefnId + +data TermEdit' t h = Replace (Reference' t h) Typing | Deprecate deriving (Eq, Ord, Show) -- Replacements with the Same type can be automatically propagated. diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TypeEdit.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TypeEdit.hs index cd3e3035ce..df40ef049d 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TypeEdit.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TypeEdit.hs @@ -1,6 +1,12 @@ module U.Codebase.Sqlite.Patch.TypeEdit where -import U.Codebase.Sqlite.Reference (Reference) +import U.Codebase.Reference (Reference') +import qualified U.Codebase.Sqlite.DbId as Db +import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalTextId) -data TypeEdit = Replace Reference | Deprecate +type LocalTypeEdit = TypeEdit' LocalTextId LocalDefnId + +type TypeEdit = TypeEdit' Db.TextId Db.ObjectId + +data TypeEdit' t h = Replace (Reference' t h) | Deprecate deriving (Eq, Ord, Show) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs index 9dd76b1944..1616b405b2 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs @@ -14,6 +14,7 @@ import qualified U.Codebase.Referent as Referent import qualified U.Codebase.Reference as Reference type Referent = Referent' Sqlite.Reference Sqlite.Reference +type ReferentH = Referent' Sqlite.ReferenceH Sqlite.ReferenceH type Id = Id' ObjectId ObjectId type LocalReferent = Referent' Sqlite.LocalReference Sqlite.LocalReference diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 053b3e2f6e..4bc5fcf2d9 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -1,15 +1,16 @@ -{-# LANGUAGE TypeApplications #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE TupleSections #-} +{-# LANGUAGE TypeApplications #-} {-# LANGUAGE ViewPatterns #-} module U.Codebase.Sqlite.Serialization where import Data.Bits (Bits) -import Data.Bytes.Get (MonadGet, getWord8) +import qualified Data.ByteString as BS +import Data.Bytes.Get (MonadGet, getByteString, getWord8, runGetS) import Data.Bytes.Put (MonadPut, putWord8) import Data.Bytes.Serial (SerialEndian (serializeBE), deserialize, deserializeBE, serialize) import Data.Bytes.VarInt (VarInt (VarInt), unVarInt) @@ -33,7 +34,7 @@ import qualified U.Codebase.Sqlite.Branch.Format as BranchFormat import qualified U.Codebase.Sqlite.Branch.Full as BranchFull import U.Codebase.Sqlite.DbId (BranchObjectId, PatchObjectId, unBranchObjectId, unPatchObjectId) import qualified U.Codebase.Sqlite.Decl.Format as DeclFormat -import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), LocalTextId) +import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), LocalTextId, WatchLocalIds) import qualified U.Codebase.Sqlite.Patch.Diff as PatchDiff import qualified U.Codebase.Sqlite.Patch.Format as PatchFormat import qualified U.Codebase.Sqlite.Patch.Full as PatchFull @@ -45,6 +46,7 @@ import qualified U.Codebase.Sqlite.Term.Format as TermFormat import qualified U.Codebase.Term as Term import qualified U.Codebase.Type as Type import qualified U.Core.ABT as ABT +import qualified U.Util.Monoid as Monoid import U.Util.Serialization import Prelude hiding (getChar, putChar) @@ -211,7 +213,7 @@ putTerm = putABT putSymbol putUnit putF Term.Char c -> putWord8 19 *> putChar c Term.TermLink r -> - putWord8 20 *> putReferent putRecursiveReference putReference r + putWord8 20 *> putReferent' putRecursiveReference putReference r Term.TypeLink r -> putWord8 21 *> putReference r putMatchCase :: MonadPut m => (a -> m ()) -> Term.MatchCase LocalTextId TermFormat.TypeRef a -> m () @@ -428,7 +430,6 @@ putBranchFormat = \case BranchFormat.Full li b -> putWord8 0 *> putBranchFull li b BranchFormat.Diff r li d -> putWord8 1 *> putBranchDiff r li d where - putReferent' = putReferent putReference putReference putBranchLocalIds (BranchFormat.LocalIds ts os ps cs) = do putFoldable putVarInt ts putFoldable putVarInt os @@ -436,7 +437,7 @@ putBranchFormat = \case putFoldable (putPair putVarInt putVarInt) cs putBranchFull li (BranchFull.Branch terms types patches children) = do putBranchLocalIds li - putMap putVarInt (putMap putReferent' putMetadataSetFormat) terms + putMap putVarInt (putMap putReferent putMetadataSetFormat) terms putMap putVarInt (putMap putReference putMetadataSetFormat) types putMap putVarInt putVarInt patches putMap putVarInt putVarInt children @@ -445,7 +446,7 @@ putBranchFormat = \case putBranchDiff ref li (BranchDiff.Diff terms types patches children) = do putVarInt ref putBranchLocalIds li - putMap putVarInt (putMap putReferent' putDiffOp) terms + putMap putVarInt (putMap putReferent putDiffOp) terms putMap putVarInt (putMap putReference putDiffOp) types putMap putVarInt putPatchOp patches putMap putVarInt putChildOp children @@ -467,30 +468,76 @@ putBranchFormat = \case putPatchFormat :: MonadPut m => PatchFormat.PatchFormat -> m () putPatchFormat = \case - PatchFormat.Full p -> putWord8 0 *> putPatchFull p - PatchFormat.Diff r p -> putWord8 1 *> putPatchDiff r p + PatchFormat.Full ids p -> putWord8 0 *> putPatchFull ids p + PatchFormat.Diff r ids p -> putWord8 1 *> putPatchDiff r ids p getPatchFormat :: MonadGet m => m PatchFormat.PatchFormat -getPatchFormat = error "todo" +getPatchFormat = + getWord8 >>= \case + 0 -> PatchFormat.Full <$> getPatchLocalIds <*> getPatchFull + 1 -> PatchFormat.Diff <$> getVarInt <*> getPatchLocalIds <*> getPatchDiff + x -> unknownTag "getPatchFormat" x + where + getPatchFull :: MonadGet m => m PatchFull.LocalPatch + getPatchFull = + PatchFull.Patch + <$> getMap getReferent (getSet getTermEdit) + <*> getMap getReference (getSet getTypeEdit) + getPatchDiff :: MonadGet m => m PatchDiff.LocalPatchDiff + getPatchDiff = + PatchDiff.PatchDiff + <$> getMap getReferent (getSet getTermEdit) + <*> getMap getReference (getSet getTypeEdit) + <*> getMap getReferent (getSet getTermEdit) + <*> getMap getReference (getSet getTypeEdit) + getTermEdit :: MonadGet m => m TermEdit.LocalTermEdit + getTermEdit = + getWord8 >>= \case + 0 -> pure TermEdit.Deprecate + 1 -> TermEdit.Replace <$> getReference <*> getTyping + x -> unknownTag "getTermEdit" x + getTyping :: MonadGet m => m TermEdit.Typing + getTyping = + getWord8 >>= \case + 0 -> pure TermEdit.Same + 1 -> pure TermEdit.Subtype + 2 -> pure TermEdit.Different + x -> unknownTag "getTyping" x + getTypeEdit :: MonadGet m => m TypeEdit.LocalTypeEdit + getTypeEdit = getWord8 >>= \case + 0 -> pure TypeEdit.Deprecate + 1 -> TypeEdit.Replace <$> getReference + x -> unknownTag "getTypeEdit" x + +getPatchLocalIds :: MonadGet m => m PatchFormat.PatchLocalIds +getPatchLocalIds = + PatchFormat.LocalIds + <$> getVector getVarInt + <*> getVector getVarInt + <*> getVector getVarInt -putPatchFull :: MonadPut m => PatchFull.Patch -> m () -putPatchFull (PatchFull.Patch termEdits typeEdits) = do - putMap putReferent' (putFoldable putTermEdit) termEdits +putPatchFull :: MonadPut m => PatchFormat.PatchLocalIds -> PatchFull.LocalPatch -> m () +putPatchFull ids (PatchFull.Patch termEdits typeEdits) = do + putPatchLocalIds ids + putMap putReferent (putFoldable putTermEdit) termEdits putMap putReference (putFoldable putTypeEdit) typeEdits - where - putReferent' = putReferent putReference putReference -putPatchDiff :: MonadPut m => PatchObjectId -> PatchDiff.PatchDiff -> m () -putPatchDiff r (PatchDiff.PatchDiff atm atp rtm rtp) = do +putPatchDiff :: MonadPut m => PatchObjectId -> PatchFormat.PatchLocalIds -> PatchDiff.LocalPatchDiff -> m () +putPatchDiff r ids (PatchDiff.PatchDiff atm atp rtm rtp) = do putVarInt r - putMap putReferent' (putFoldable putTermEdit) atm + putPatchLocalIds ids + putMap putReferent (putFoldable putTermEdit) atm putMap putReference (putFoldable putTypeEdit) atp - putMap putReferent' (putFoldable putTermEdit) rtm + putMap putReferent (putFoldable putTermEdit) rtm putMap putReference (putFoldable putTypeEdit) rtp - where - putReferent' = putReferent putReference putReference -putTermEdit :: MonadPut m => TermEdit.TermEdit -> m () +putPatchLocalIds :: MonadPut m => PatchFormat.PatchLocalIds -> m () +putPatchLocalIds (PatchFormat.LocalIds ts hs os) = do + putFoldable putVarInt ts + putFoldable putVarInt hs + putFoldable putVarInt os + +putTermEdit :: MonadPut m => TermEdit.LocalTermEdit -> m () putTermEdit TermEdit.Deprecate = putWord8 0 putTermEdit (TermEdit.Replace r t) = putWord8 1 *> putReference r *> putTyping t where @@ -498,7 +545,7 @@ putTermEdit (TermEdit.Replace r t) = putWord8 1 *> putReference r *> putTyping t putTyping TermEdit.Subtype = putWord8 1 putTyping TermEdit.Different = putWord8 2 -putTypeEdit :: MonadPut m => TypeEdit.TypeEdit -> m () +putTypeEdit :: MonadPut m => TypeEdit.LocalTypeEdit -> m () putTypeEdit TypeEdit.Deprecate = putWord8 0 putTypeEdit (TypeEdit.Replace r) = putWord8 1 *> putReference r @@ -509,13 +556,12 @@ getBranchFormat = 1 -> getBranchDiff x -> unknownTag "getBranchFormat" x where - getReferent' = getReferent getReference getReference getBranchFull = BranchFormat.Full <$> getBranchLocalIds <*> getLocalBranch where getLocalBranch = BranchFull.Branch - <$> getMap getVarInt (getMap getReferent' getMetadataSetFormat) + <$> getMap getVarInt (getMap getReferent getMetadataSetFormat) <*> getMap getVarInt (getMap getReference getMetadataSetFormat) <*> getMap getVarInt getVarInt <*> getMap getVarInt getVarInt @@ -532,7 +578,7 @@ getBranchFormat = where getLocalBranchDiff = BranchDiff.Diff - <$> getMap getVarInt (getMap getReferent' getDiffOp) + <$> getMap getVarInt (getMap getReferent getDiffOp) <*> getMap getVarInt (getMap getReference getDiffOp) <*> getMap getVarInt getPatchOp <*> getMap getVarInt getChildOp @@ -546,14 +592,16 @@ getBranchFormat = adds <- getMap get (pure True) -- and removes: addToExistingMap get (pure False) adds - getPatchOp = getWord8 >>= \case - 0 -> pure BranchDiff.PatchRemove - 1 -> BranchDiff.PatchAddReplace <$> getVarInt - x -> unknownTag "getPatchOp" x - getChildOp = getWord8 >>= \case - 0 -> pure BranchDiff.ChildRemove - 1 -> BranchDiff.ChildAddReplace <$> getVarInt - x -> unknownTag "getChildOp" x + getPatchOp = + getWord8 >>= \case + 0 -> pure BranchDiff.PatchRemove + 1 -> BranchDiff.PatchAddReplace <$> getVarInt + x -> unknownTag "getPatchOp" x + getChildOp = + getWord8 >>= \case + 0 -> pure BranchDiff.ChildRemove + 1 -> BranchDiff.ChildAddReplace <$> getVarInt + x -> unknownTag "getChildOp" x getBranchLocalIds :: MonadGet m => m BranchFormat.BranchLocalIds getBranchLocalIds = @@ -567,15 +615,15 @@ getBranchSyncEntities :: MonadGet m => m SE.SyncEntitySeq getBranchSyncEntities = getWord8 >>= \case -- Full - 0 -> getLocalIds + 0 -> getDeps -- Diff 1 -> do id <- getVarInt @_ @BranchObjectId - SE.addObjectId (unBranchObjectId id) <$> getLocalIds + SE.addObjectId (unBranchObjectId id) <$> getDeps x -> unknownTag "getBranchSyncEntities" x where - getLocalIds = branchLocalIdsToLocalDeps <$> getBranchLocalIds - branchLocalIdsToLocalDeps (BranchFormat.LocalIds ts os ps bcs) = + getDeps = localIdsToDeps <$> getBranchLocalIds + localIdsToDeps (BranchFormat.LocalIds ts os ps bcs) = SE.SyncEntity (vec2seq ts) ( vec2seq os @@ -586,17 +634,44 @@ getBranchSyncEntities = (vec2seq chs) where (bos, chs) = Vector.unzip bcs - vec2seq :: Vector a -> Seq a - vec2seq v = Seq.fromFunction (length v) (v Vector.!) - -getTermComponentSyncEntities :: m SE.SyncEntitySeq -getTermComponentSyncEntities = error "not implemented" - -getDeclComponentSyncEntities :: m SE.SyncEntitySeq -getDeclComponentSyncEntities = error "not implemented" -getPatchSyncEntities :: m SE.SyncEntitySeq -getPatchSyncEntities = error "not implemented" +vec2seq :: Vector a -> Seq a +vec2seq v = Seq.fromFunction (length v) (v Vector.!) + +localIdsToLocalDeps :: LocalIds -> SE.SyncEntitySeq +localIdsToLocalDeps (LocalIds ts os) = + SE.SyncEntity (vec2seq ts) (vec2seq os) mempty mempty + +watchLocalIdsToLocalDeps :: WatchLocalIds -> SE.SyncEntitySeq +watchLocalIdsToLocalDeps (LocalIds ts hs) = + SE.SyncEntity (vec2seq ts) mempty (vec2seq hs) mempty + +-- the same implementation currently works for term component and type component +getComponentSyncEntities :: MonadGet m => m SE.SyncEntitySeq +getComponentSyncEntities = do + offsets <- getList (getVarInt @_ @Int) + componentBytes <- getByteString (last offsets) + let get1 (start, end) = do + let bytes = BS.drop start $ BS.take end componentBytes + either fail pure $ runGetS getLocalIds bytes + Monoid.foldMapM (fmap localIdsToLocalDeps . get1) (zip offsets (tail offsets)) + +getPatchSyncEntities :: MonadGet m => m SE.SyncEntitySeq +getPatchSyncEntities = + getWord8 >>= \case + 0 -> getDeps + 1 -> do + id <- getVarInt @_ @PatchObjectId + SE.addObjectId (unPatchObjectId id) <$> getDeps + x -> unknownTag "getPatchSyncEntities" x + where + getDeps = localIdsToDeps <$> getPatchLocalIds + localIdsToDeps (PatchFormat.LocalIds ts hs os) = + SE.SyncEntity + (vec2seq ts) + (vec2seq os) + (vec2seq hs) + mempty getSymbol :: MonadGet m => m Symbol getSymbol = Symbol <$> getVarInt <*> getText @@ -604,8 +679,23 @@ getSymbol = Symbol <$> getVarInt <*> getText putSymbol :: MonadPut m => Symbol -> m () putSymbol (Symbol n t) = putVarInt n >> putText t -putReferent :: MonadPut m => (r1 -> m ()) -> (r2 -> m ()) -> Referent' r1 r2 -> m () -putReferent putRefRef putConRef = \case +putReferent :: + ( MonadPut m, + Integral t1, + Integral h1, + Bits t1, + Bits h1, + Integral t2, + Integral h2, + Bits t2, + Bits h2 + ) => + Referent' (Reference' t1 h1) (Reference' t2 h2) -> + m () +putReferent = putReferent' putReference putReference + +putReferent' :: MonadPut m => (r1 -> m ()) -> (r2 -> m ()) -> Referent' r1 r2 -> m () +putReferent' putRefRef putConRef = \case Referent.Ref r -> do putWord8 0 putRefRef r @@ -624,13 +714,27 @@ putReference = \case ReferenceDerived (Reference.Id r index) -> putWord8 1 *> putVarInt r *> putVarInt index -getReferent :: MonadGet m => m r1 -> m r2 -> m (Referent' r1 r2) -getReferent getRefRef getConRef = +getReferent' :: MonadGet m => m r1 -> m r2 -> m (Referent' r1 r2) +getReferent' getRefRef getConRef = getWord8 >>= \case 0 -> Referent.Ref <$> getRefRef 1 -> Referent.Con <$> getConRef <*> getVarInt x -> unknownTag "getReferent" x +getReferent :: + ( MonadGet m, + Integral t1, + Integral h1, + Bits t1, + Bits h1, + Integral t2, + Integral h2, + Bits t2, + Bits h2 + ) => + m (Referent' (Reference' t1 h1) (Reference' t2 h2)) +getReferent = getReferent' getReference getReference + getReference :: (MonadGet m, Integral t, Bits t, Integral r, Bits r) => m (Reference' t r) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/SyncEntity.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/SyncEntity.hs index e216bb6f4e..755f060b28 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/SyncEntity.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/SyncEntity.hs @@ -1,3 +1,7 @@ +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE TypeSynonymInstances #-} +{-# LANGUAGE RankNTypes #-} + module U.Codebase.Sqlite.SyncEntity where import Data.Sequence (Seq) @@ -22,3 +26,18 @@ type SyncEntitySeq = SyncEntity' Seq addObjectId :: Db.ObjectId -> SyncEntitySeq -> SyncEntitySeq addObjectId id s = s {objects = id Seq.<| objects s} + +append :: (forall a. f a -> f a -> f a) -> SyncEntity' f -> SyncEntity' f -> SyncEntity' f +append (<>) a b = + SyncEntity + (text a <> text b) + (objects a <> objects b) + (hashes a <> hashes b) + (causals a <> causals b) + +instance Semigroup SyncEntitySeq where + (<>) = append (<>) + +instance Monoid SyncEntitySeq where + mempty = SyncEntity mempty mempty mempty mempty + mappend = (<>) diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index 0c1e84de92..d264cc7305 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE TypeApplications #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} @@ -18,7 +19,6 @@ import Data.Bytes.Get (MonadGet, getByteString, getBytes, getWord8, runGetS, ski import Data.Bytes.Put (MonadPut, putByteString, putWord8, runPutS) import Data.Bytes.VarInt (VarInt (VarInt)) import Data.Foldable (Foldable (toList), traverse_) -import Data.List.Extra (dropEnd) import Data.Map (Map) import qualified Data.Map as Map import Data.Sequence (Seq) @@ -198,14 +198,15 @@ putFramedArray :: (MonadPut m, Foldable f) => Put a -> f a -> m () putFramedArray put (toList -> as) = do let bss = fmap (putBytes put) as let lengths = fmap BS.length bss - let offsets = scanl (+) 0 (dropEnd 1 lengths) + let offsets = scanl (+) 0 lengths putFoldable putVarInt offsets traverse_ putByteString bss getFramedArray :: MonadGet m => m a -> m (Vector a) getFramedArray getA = do offsets :: [Int] <- getList getVarInt - let count = length offsets - 1 + _end <- getVarInt @_ @Int + let count = length offsets Vector.replicateM count getA -- | Look up a 0-based index in a framed array, O(num array elements), @@ -214,7 +215,7 @@ getFramedArray getA = do lookupFramedArray :: MonadGet m => m a -> Int -> m (Maybe a) lookupFramedArray getA index = do offsets <- getVector getVarInt - if index > Vector.length offsets + if index > Vector.length offsets - 1 then pure Nothing else do skip (Vector.unsafeIndex offsets index) From 7f27c4a671114fd8e21b9f5f09d2174aaaf52a95 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sun, 22 Nov 2020 11:09:44 -0500 Subject: [PATCH 052/225] working on serializing patches --- .../U/Codebase/Sqlite/LocalIds.hs | 5 + .../U/Codebase/Sqlite/Operations.hs | 182 ++++++++++++------ .../U/Codebase/Sqlite/Patch/Diff.hs | 28 ++- .../U/Codebase/Sqlite/Patch/Full.hs | 19 +- .../U/Codebase/Sqlite/Patch/TermEdit.hs | 15 ++ .../U/Codebase/Sqlite/Patch/TypeEdit.hs | 15 ++ 6 files changed, 207 insertions(+), 57 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs index db52f0c54b..668891dbe6 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs @@ -26,11 +26,16 @@ newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Int -- | represents an index into a defnLookup newtype LocalDefnId = LocalDefnId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +-- | a local index to a hash, used when the corresponding object is allowed to be absent newtype LocalHashId = LocalHashId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 newtype LocalPatchObjectId = LocalPatchObjectId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 + newtype LocalBranchChildId = LocalBranchChildId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +-- | causal hashes are treated differently from HashIds, which don't have dependencies +newtype LocalCausalHashId = LocalCausalHashId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 + instance Bitraversable LocalIds' where bitraverse f g (LocalIds t d) = LocalIds <$> traverse f t <*> traverse g d diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 4a23199a0f..87ce971890 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -63,6 +63,7 @@ import qualified U.Codebase.Sqlite.Decl.Format as S.Decl import U.Codebase.Sqlite.LocalIds ( LocalBranchChildId (..), LocalDefnId (..), + LocalHashId (..), LocalIds, LocalIds' (..), LocalPatchObjectId (..), @@ -75,6 +76,7 @@ import qualified U.Codebase.Sqlite.Patch.Diff as S import qualified U.Codebase.Sqlite.Patch.Format as S import qualified U.Codebase.Sqlite.Patch.Format as S.PatchFormat import qualified U.Codebase.Sqlite.Patch.Full as S +import qualified U.Codebase.Sqlite.Patch.Full as S.Patch.Full import qualified U.Codebase.Sqlite.Patch.TermEdit as S import qualified U.Codebase.Sqlite.Patch.TermEdit as S.TermEdit import qualified U.Codebase.Sqlite.Patch.TypeEdit as S @@ -104,6 +106,7 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set +import qualified U.Codebase.Sqlite.Patch.Diff as S.PatchDiff type Err m = MonadError Error m @@ -142,7 +145,42 @@ liftQ a = Left e -> throwError (DatabaseIntegrityError e) Right a -> pure a --- * helpers +-- * Database lookups + +lookupTextId :: EDB m => Text -> m Db.TextId +lookupTextId t = + Q.loadText t >>= \case + Just textId -> pure textId + Nothing -> throwError $ UnknownText t + +loadTextById :: EDB m => Db.TextId -> m Text +loadTextById = liftQ . Q.loadTextById + +hashToObjectId :: EDB m => H.Hash -> m Db.ObjectId +hashToObjectId h = do + (Q.loadHashId . H.toBase32Hex) h >>= \case + Just hashId -> liftQ $ Q.expectObjectIdForPrimaryHashId hashId + Nothing -> throwError $ UnknownDependency h + +objectExistsForHash :: EDB m => H.Hash -> m Bool +objectExistsForHash h = isJust <$> runMaybeT ((Q.loadHashId . H.toBase32Hex) h) + +loadHashByObjectId :: EDB m => Db.ObjectId -> m H.Hash +loadHashByObjectId = fmap H.fromBase32Hex . liftQ . Q.loadPrimaryHashByObjectId + +loadHashByHashId :: EDB m => Db.HashId -> m H.Hash +loadHashByHashId = fmap H.fromBase32Hex . liftQ . Q.loadHashById + +loadCausalHashById :: EDB m => Db.CausalHashId -> m CausalHash +loadCausalHashById = fmap (CausalHash . H.fromBase32Hex) . liftQ . Q.loadHashById . Db.unCausalHashId + +loadValueHashByCausalHashId :: EDB m => Db.CausalHashId -> m BranchHash +loadValueHashByCausalHashId = loadValueHashById <=< liftQ . Q.loadCausalValueHashId + where + loadValueHashById :: EDB m => Db.BranchHashId -> m BranchHash + loadValueHashById = fmap (BranchHash . H.fromBase32Hex) . liftQ . Q.loadHashById . Db.unBranchHashId + +-- * Reference transformations c2sReference :: EDB m => C.Reference -> MaybeT m S.Reference c2sReference = bitraverse lookupTextId hashToObjectId @@ -150,21 +188,35 @@ c2sReference = bitraverse lookupTextId hashToObjectId s2cReference :: EDB m => S.Reference -> m C.Reference s2cReference = bitraverse loadTextById loadHashByObjectId -h2cReference :: EDB m => S.ReferenceH -> m C.Reference -h2cReference = bitraverse loadTextById loadHashByHashId - c2sReferenceId :: EDB m => C.Reference.Id -> m S.Reference.Id c2sReferenceId = C.Reference.idH hashToObjectId s2cReferenceId :: EDB m => S.Reference.Id -> m C.Reference.Id s2cReferenceId = C.Reference.idH loadHashByObjectId +h2cReference :: EDB m => S.ReferenceH -> m C.Reference +h2cReference = bitraverse loadTextById loadHashByHashId + s2cReferent :: EDB m => S.Referent -> m C.Referent s2cReferent = bitraverse s2cReference s2cReference h2cReferent :: EDB m => S.ReferentH -> m C.Referent h2cReferent = bitraverse h2cReference h2cReference +saveReferent :: EDB m => C.Referent -> m S.Referent +saveReferent = bitraverse saveReference saveReference + +saveReference :: EDB m => C.Reference -> m S.Reference +saveReference = bitraverse Q.saveText hashToObjectId + +saveReferentH :: DB m => C.Referent -> m S.ReferentH +saveReferentH = bitraverse saveReferenceH saveReferenceH + +saveReferenceH :: DB m => C.Reference -> m S.ReferenceH +saveReferenceH = bitraverse Q.saveText Q.saveHashHash + +-- * Edits transformations + s2cTermEdit :: EDB m => S.TermEdit -> m C.TermEdit s2cTermEdit = \case S.TermEdit.Replace r t -> C.TermEdit.Replace <$> s2cReference r <*> pure (s2cTyping t) @@ -176,11 +228,28 @@ s2cTyping = \case S.TermEdit.Subtype -> C.TermEdit.Subtype S.TermEdit.Different -> C.TermEdit.Different +c2sTyping :: C.TermEdit.Typing -> S.TermEdit.Typing +c2sTyping = \case + C.TermEdit.Same -> S.TermEdit.Same + C.TermEdit.Subtype -> S.TermEdit.Subtype + C.TermEdit.Different -> S.TermEdit.Different + s2cTypeEdit :: EDB m => S.TypeEdit -> m C.TypeEdit s2cTypeEdit = \case S.TypeEdit.Replace r -> C.TypeEdit.Replace <$> s2cReference r S.TypeEdit.Deprecate -> pure C.TypeEdit.Deprecate +saveTermEdit :: EDB m => C.TermEdit -> m S.TermEdit +saveTermEdit = \case + C.TermEdit.Replace r t -> S.TermEdit.Replace <$> saveReference r <*> pure (c2sTyping t) + C.TermEdit.Deprecate -> pure S.TermEdit.Deprecate + +saveTypeEdit :: EDB m => C.TypeEdit -> m S.TypeEdit +saveTypeEdit = \case + C.TypeEdit.Replace r -> S.TypeEdit.Replace <$> saveReference r + C.TypeEdit.Deprecate -> pure S.TypeEdit.Deprecate + +-- * Branch transformation s2cBranch :: EDB m => S.DbBranch -> m (C.Branch m) s2cBranch (S.Branch.Full.Branch tms tps patches children) = C.Branch @@ -209,18 +278,20 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = doPatches = Map.bitraverse (fmap C.NameSegment . loadTextById) \patchId -> do h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId) patchId let patch :: EDB m => m C.Patch - patch = error "todo" - -- deserializePatchObject patchId >>= \case - -- S.PatchFormat.Full p -> s2cPatch p - -- S.PatchFormat.Diff ref d -> doDiff ref [d] - doDiff ref ds = error "todo" - -- deserializePatchObject ref >>= \case - -- S.PatchFormat.Full f -> s2cPatch (joinFull f ds) - -- S.PatchFormat.Diff ref' d' -> doDiff ref' (d' : ds) - joinFull f [] = f + patch = + deserializePatchObject patchId >>= \case + S.PatchFormat.Full li p -> s2cPatch (l2sPatchFull li p) + S.PatchFormat.Diff ref li d -> doDiff ref [l2sPatchDiff li d] + doDiff :: EDB m => Db.PatchObjectId -> [S.PatchDiff] -> m C.Patch + doDiff ref ds = + deserializePatchObject ref >>= \case + S.PatchFormat.Full li f -> joinFull (l2sPatchFull li f) ds + S.PatchFormat.Diff ref' li' d' -> doDiff ref' (l2sPatchDiff li' d' : ds) + joinFull :: EDB m => S.Patch -> [S.PatchDiff] -> m C.Patch + joinFull f [] = s2cPatch f joinFull (S.Patch termEdits typeEdits) (S.PatchDiff addedTermEdits addedTypeEdits removedTermEdits removedTypeEdits : ds) = joinFull f' ds where - f' = error "todo" -- S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) + f' = S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) addRemove :: (Ord a, Ord b) => Map a (Set b) -> Map a (Set b) -> Map a (Set b) -> Map a (Set b) addRemove add del src = (Map.unionWith (<>) add (Map.differenceWith remove src del)) @@ -256,44 +327,47 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = boId <- liftQ $ Q.loadBranchObjectIdByCausalHashId chId traverse loadBranchByObjectId boId + +-- saveBranch :: EDB m => C.Branch m -> m S.DbBranch +-- saveBranch = error "todo" + +-- * Patch transformation s2cPatch :: EDB m => S.Patch -> m C.Patch s2cPatch (S.Patch termEdits typeEdits) = C.Patch <$> Map.bitraverse h2cReferent (Set.traverse s2cTermEdit) termEdits <*> Map.bitraverse h2cReference (Set.traverse s2cTypeEdit) typeEdits -lookupTextId :: EDB m => Text -> m Db.TextId -lookupTextId t = - Q.loadText t >>= \case - Just textId -> pure textId - Nothing -> throwError $ UnknownText t +savePatchHashes :: EDB m => C.Patch -> m S.Patch +savePatchHashes (C.Patch termEdits typeEdits) = + S.Patch + <$> Map.bitraverse saveReferentH (Set.traverse saveTermEdit) termEdits + <*> Map.bitraverse saveReferenceH (Set.traverse saveTypeEdit) typeEdits -loadTextById :: EDB m => Db.TextId -> m Text -loadTextById = liftQ . Q.loadTextById +-- implementation detail of loadPatchById? +lookupPatchLocalText :: S.PatchLocalIds -> LocalTextId -> Db.TextId +lookupPatchLocalText li (LocalTextId w) = S.PatchFormat.patchTextLookup li Vector.! fromIntegral w -hashToObjectId :: EDB m => H.Hash -> m Db.ObjectId -hashToObjectId h = do - (Q.loadHashId . H.toBase32Hex) h >>= \case - Just hashId -> liftQ $ Q.expectObjectIdForPrimaryHashId hashId - Nothing -> throwError $ UnknownDependency h +lookupPatchLocalHash :: S.PatchLocalIds -> LocalHashId -> Db.HashId +lookupPatchLocalHash li (LocalHashId w) = S.PatchFormat.patchHashLookup li Vector.! fromIntegral w -objectExistsForHash :: EDB m => H.Hash -> m Bool -objectExistsForHash h = isJust <$> runMaybeT (hashToObjectId h) - -loadHashByObjectId :: EDB m => Db.ObjectId -> m H.Hash -loadHashByObjectId = fmap H.fromBase32Hex . liftQ . Q.loadPrimaryHashByObjectId +lookupPatchLocalDefn :: S.PatchLocalIds -> LocalDefnId -> Db.ObjectId +lookupPatchLocalDefn li (LocalDefnId w) = S.PatchFormat.patchDefnLookup li Vector.! fromIntegral w -loadHashByHashId :: EDB m => Db.HashId -> m H.Hash -loadHashByHashId = fmap H.fromBase32Hex . liftQ . Q.loadHashById +l2sPatchFull :: S.PatchFormat.PatchLocalIds -> S.LocalPatch -> S.Patch +l2sPatchFull li = + S.Patch.Full.trimap + (lookupPatchLocalText li) + (lookupPatchLocalHash li) + (lookupPatchLocalDefn li) -loadCausalHashById :: EDB m => Db.CausalHashId -> m CausalHash -loadCausalHashById = fmap (CausalHash . H.fromBase32Hex) . liftQ . Q.loadHashById . Db.unCausalHashId +l2sPatchDiff :: S.PatchFormat.PatchLocalIds -> S.LocalPatchDiff -> S.PatchDiff +l2sPatchDiff li = S.PatchDiff.trimap + (lookupPatchLocalText li) + (lookupPatchLocalHash li) + (lookupPatchLocalDefn li) -loadValueHashByCausalHashId :: EDB m => Db.CausalHashId -> m BranchHash -loadValueHashByCausalHashId = loadValueHashById <=< liftQ . Q.loadCausalValueHashId - where - loadValueHashById :: EDB m => Db.BranchHashId -> m BranchHash - loadValueHashById = fmap (BranchHash . H.fromBase32Hex) . liftQ . Q.loadHashById . Db.unBranchHashId +-- * Deserialization helpers decodeComponentLengthOnly :: Err m => ByteString -> m Word64 decodeComponentLengthOnly = getFromBytesOr ErrFramedArrayLen S.lengthFramedArray @@ -326,7 +400,7 @@ getDeclTypeByReference r@(C.Reference.Id h pos) = >>= maybe (throwError $ LegacyUnknownConstructorType h pos) pure >>= pure . C.Decl.declType --- * meat and veggies +-- * Codebase operations loadTermWithTypeByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term Symbol, C.Term.Type Symbol) loadTermWithTypeByReference (C.Reference.Id h i) = @@ -698,17 +772,17 @@ componentByObjectId id = do len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly pure [C.Reference.Id id i | i <- [0 .. len - 1]] -lookupLocalText :: S.BranchLocalIds -> LocalTextId -> Db.TextId -lookupLocalText li (LocalTextId w) = S.BranchFormat.branchTextLookup li Vector.! fromIntegral w +lookupBranchLocalText :: S.BranchLocalIds -> LocalTextId -> Db.TextId +lookupBranchLocalText li (LocalTextId w) = S.BranchFormat.branchTextLookup li Vector.! fromIntegral w -lookupLocalDefn :: S.BranchLocalIds -> LocalDefnId -> Db.ObjectId -lookupLocalDefn li (LocalDefnId w) = S.BranchFormat.branchDefnLookup li Vector.! fromIntegral w +lookupBranchLocalDefn :: S.BranchLocalIds -> LocalDefnId -> Db.ObjectId +lookupBranchLocalDefn li (LocalDefnId w) = S.BranchFormat.branchDefnLookup li Vector.! fromIntegral w -lookupLocalPatch :: BranchLocalIds -> LocalPatchObjectId -> Db.PatchObjectId -lookupLocalPatch li (LocalPatchObjectId w) = S.BranchFormat.branchPatchLookup li Vector.! fromIntegral w +lookupBranchLocalPatch :: BranchLocalIds -> LocalPatchObjectId -> Db.PatchObjectId +lookupBranchLocalPatch li (LocalPatchObjectId w) = S.BranchFormat.branchPatchLookup li Vector.! fromIntegral w -lookupLocalChild :: BranchLocalIds -> LocalBranchChildId -> (Db.BranchObjectId, Db.CausalHashId) -lookupLocalChild li (LocalBranchChildId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w +lookupBranchLocalChild :: BranchLocalIds -> LocalBranchChildId -> (Db.BranchObjectId, Db.CausalHashId) +lookupBranchLocalChild li (LocalBranchChildId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w loadBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch m)) loadBranchByCausalHashId id = do @@ -733,16 +807,16 @@ loadBranchByObjectId id = do l2sFull :: S.BranchFormat.BranchLocalIds -> S.LocalBranch -> S.DbBranch l2sFull li = - S.Branch.Full.quadmap (lookupLocalText li) (lookupLocalDefn li) (lookupLocalPatch li) (lookupLocalChild li) + S.Branch.Full.quadmap (lookupBranchLocalText li) (lookupBranchLocalDefn li) (lookupBranchLocalPatch li) (lookupBranchLocalChild li) l2sDiff :: S.BranchFormat.BranchLocalIds -> S.Branch.LocalDiff -> S.Branch.Diff - l2sDiff li = S.BranchDiff.quadmap (lookupLocalText li) (lookupLocalDefn li) (lookupLocalPatch li) (lookupLocalChild li) + l2sDiff li = S.BranchDiff.quadmap (lookupBranchLocalText li) (lookupBranchLocalDefn li) (lookupBranchLocalPatch li) (lookupBranchLocalChild li) doDiff :: EDB m => Db.BranchObjectId -> [S.Branch.Diff] -> m (C.Branch m) - doDiff ref lds = + doDiff ref ds = deserializeBranchObject ref >>= \case - S.BranchFormat.Full li f -> joinFull (l2sFull li f) lds - S.BranchFormat.Diff ref' li' d' -> doDiff ref' (l2sDiff li' d' : lds) + S.BranchFormat.Full li f -> joinFull (l2sFull li f) ds + S.BranchFormat.Diff ref' li' d' -> doDiff ref' (l2sDiff li' d' : ds) where joinFull :: EDB m => S.DbBranch -> [S.Branch.Diff] -> m (C.Branch m) joinFull f [] = s2cBranch f diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs index fdd06f6c8a..bddb8c8b13 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs @@ -1,19 +1,43 @@ module U.Codebase.Sqlite.Patch.Diff where +import Data.Bifunctor (Bifunctor (bimap)) import Data.Map (Map) import Data.Set (Set) +import qualified Data.Set as Set import U.Codebase.Reference (Reference') import U.Codebase.Referent (Referent') +import qualified U.Codebase.Sqlite.DbId as Db import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalHashId, LocalTextId) import U.Codebase.Sqlite.Patch.TermEdit (TermEdit') import U.Codebase.Sqlite.Patch.TypeEdit (TypeEdit') +import qualified U.Util.Map as Map + +type PatchDiff = PatchDiff' Db.TextId Db.HashId Db.ObjectId type LocalPatchDiff = PatchDiff' LocalTextId LocalHashId LocalDefnId +type Referent'' t h = Referent' (Reference' t h) (Reference' t h) + data PatchDiff' t h d = PatchDiff - { addedTermEdits :: Map (Referent' (Reference' t h) (Reference' t h)) (Set (TermEdit' t d)), + { addedTermEdits :: Map (Referent'' t h) (Set (TermEdit' t d)), addedTypeEdits :: Map (Reference' t h) (Set (TypeEdit' t d)), - removedTermEdits :: Map (Referent' (Reference' t h) (Reference' t h)) (Set (TermEdit' t d)), + removedTermEdits :: Map (Referent'' t h) (Set (TermEdit' t d)), removedTypeEdits :: Map (Reference' t h) (Set (TypeEdit' t d)) } deriving (Eq, Ord, Show) + +trimap :: + (Ord t', Ord h', Ord d') => + (t -> t') -> + (h -> h') -> + (d -> d') -> + PatchDiff' t h d -> + PatchDiff' t' h' d' +trimap ft fh fd (PatchDiff atm atp rtm rtp) = + PatchDiff + (Map.bimap bimapReferent (Set.map (bimap ft fd)) atm) + (Map.bimap (bimap ft fh) (Set.map (bimap ft fd)) atp) + (Map.bimap bimapReferent (Set.map (bimap ft fd)) rtm) + (Map.bimap (bimap ft fh) (Set.map (bimap ft fd)) rtp) + where + bimapReferent = bimap (bimap ft fh) (bimap ft fh) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs index 7b07334f15..06750e7104 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Full.hs @@ -1,19 +1,36 @@ module U.Codebase.Sqlite.Patch.Full where +import Data.Bifunctor (Bifunctor (bimap)) import Data.Map (Map) import Data.Set (Set) +import qualified Data.Set as Set import U.Codebase.Reference (Reference') import U.Codebase.Referent (Referent') import qualified U.Codebase.Sqlite.DbId as Db import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalHashId, LocalTextId) import U.Codebase.Sqlite.Patch.TermEdit (TermEdit') import U.Codebase.Sqlite.Patch.TypeEdit (TypeEdit') +import qualified U.Util.Map as Map type Patch = Patch' Db.TextId Db.HashId Db.ObjectId type LocalPatch = Patch' LocalTextId LocalHashId LocalDefnId +type Referent'' t h = Referent' (Reference' t h) (Reference' t h) + data Patch' t h o = Patch - { termEdits :: Map (Referent' (Reference' t h) (Reference' t h)) (Set (TermEdit' t o)), + { termEdits :: Map (Referent'' t h) (Set (TermEdit' t o)), typeEdits :: Map (Reference' t h) (Set (TypeEdit' t o)) } + +trimap :: + (Ord t', Ord h', Ord o') => + (t -> t') -> + (h -> h') -> + (o -> o') -> + Patch' t h o -> + Patch' t' h' o' +trimap ft fh fo (Patch tms tps) = + Patch + (Map.bimap (bimap (bimap ft fh) (bimap ft fh)) (Set.map (bimap ft fo)) tms) + (Map.bimap (bimap ft fh) (Set.map (bimap ft fo)) tps) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs index e49365c4b3..724f15257b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs @@ -1,5 +1,8 @@ module U.Codebase.Sqlite.Patch.TermEdit where +import Data.Bifoldable (Bifoldable (bifoldMap)) +import Data.Bifunctor (Bifunctor (bimap)) +import Data.Bitraversable (Bitraversable (bitraverse)) import U.Codebase.Reference (Reference') import qualified U.Codebase.Sqlite.DbId as Db import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalTextId) @@ -16,3 +19,15 @@ data TermEdit' t h = Replace (Reference' t h) Typing | Deprecate -- Replacements of a Different type need to be manually propagated by the programmer. data Typing = Same | Subtype | Different deriving (Eq, Ord, Show) + +instance Bifunctor TermEdit' where + bimap f g (Replace r t) = Replace (bimap f g r) t + bimap _ _ Deprecate = Deprecate + +instance Bifoldable TermEdit' where + bifoldMap f g (Replace r _t) = bifoldMap f g r + bifoldMap _ _ Deprecate = mempty + +instance Bitraversable TermEdit' where + bitraverse f g (Replace r t) = Replace <$> bitraverse f g r <*> pure t + bitraverse _ _ Deprecate = pure Deprecate diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TypeEdit.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TypeEdit.hs index df40ef049d..c2c0233a8f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TypeEdit.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TypeEdit.hs @@ -1,5 +1,8 @@ module U.Codebase.Sqlite.Patch.TypeEdit where +import Data.Bifoldable (Bifoldable (bifoldMap)) +import Data.Bifunctor (Bifunctor (bimap)) +import Data.Bitraversable (Bitraversable (bitraverse)) import U.Codebase.Reference (Reference') import qualified U.Codebase.Sqlite.DbId as Db import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalTextId) @@ -10,3 +13,15 @@ type TypeEdit = TypeEdit' Db.TextId Db.ObjectId data TypeEdit' t h = Replace (Reference' t h) | Deprecate deriving (Eq, Ord, Show) + +instance Bifunctor TypeEdit' where + bimap f g (Replace r) = Replace (bimap f g r) + bimap _ _ Deprecate = Deprecate + +instance Bifoldable TypeEdit' where + bifoldMap f g (Replace r) = bifoldMap f g r + bifoldMap _ _ Deprecate = mempty + +instance Bitraversable TypeEdit' where + bitraverse f g (Replace r) = Replace <$> bitraverse f g r + bitraverse _ _ Deprecate = pure Deprecate From b878ade060a4ee0431d4da6d518d7dc70879e3ed Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 23 Nov 2020 16:14:25 -0500 Subject: [PATCH 053/225] patch loading and patch diff and type index stuff --- .../U/Codebase/Sqlite/Operations.hs | 125 +++++++++++++----- .../U/Codebase/Sqlite/Patch/Diff.hs | 14 +- .../U/Codebase/Sqlite/Queries.hs | 27 ++++ .../U/Codebase/Sqlite/Referent.hs | 22 ++- codebase2/codebase/U/Codebase/Branch.hs | 4 +- codebase2/codebase/U/Codebase/Codebase.hs | 6 +- 6 files changed, 153 insertions(+), 45 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 87ce971890..da84ae15f3 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -73,6 +73,7 @@ import U.Codebase.Sqlite.LocalIds import qualified U.Codebase.Sqlite.LocalIds as LocalIds import qualified U.Codebase.Sqlite.ObjectType as OT import qualified U.Codebase.Sqlite.Patch.Diff as S +import qualified U.Codebase.Sqlite.Patch.Diff as S.PatchDiff import qualified U.Codebase.Sqlite.Patch.Format as S import qualified U.Codebase.Sqlite.Patch.Format as S.PatchFormat import qualified U.Codebase.Sqlite.Patch.Full as S @@ -106,7 +107,8 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set -import qualified U.Codebase.Sqlite.Patch.Diff as S.PatchDiff +import Data.Functor.Identity (Identity) +import qualified U.Codebase.Sqlite.Referent as S.Referent type Err m = MonadError Error m @@ -182,7 +184,9 @@ loadValueHashByCausalHashId = loadValueHashById <=< liftQ . Q.loadCausalValueHas -- * Reference transformations -c2sReference :: EDB m => C.Reference -> MaybeT m S.Reference +-- ** read existing references + +c2sReference :: EDB m => C.Reference -> m S.Reference c2sReference = bitraverse lookupTextId hashToObjectId s2cReference :: EDB m => S.Reference -> m C.Reference @@ -197,15 +201,24 @@ s2cReferenceId = C.Reference.idH loadHashByObjectId h2cReference :: EDB m => S.ReferenceH -> m C.Reference h2cReference = bitraverse loadTextById loadHashByHashId +c2hReference :: DB m => C.Reference -> MaybeT m S.ReferenceH +c2hReference = bitraverse (MaybeT . Q.loadText) (MaybeT . Q.loadHashIdByHash) + s2cReferent :: EDB m => S.Referent -> m C.Referent s2cReferent = bitraverse s2cReference s2cReference +s2cReferentId :: EDB m => S.Referent.Id -> m C.Referent.Id +s2cReferentId = bitraverse loadHashByObjectId loadHashByObjectId + h2cReferent :: EDB m => S.ReferentH -> m C.Referent h2cReferent = bitraverse h2cReference h2cReference +-- ** write new references + saveReferent :: EDB m => C.Referent -> m S.Referent saveReferent = bitraverse saveReference saveReference +-- | a referenced object must necessarily exist in the db already saveReference :: EDB m => C.Reference -> m S.Reference saveReference = bitraverse Q.saveText hashToObjectId @@ -250,6 +263,7 @@ saveTypeEdit = \case C.TypeEdit.Deprecate -> pure S.TypeEdit.Deprecate -- * Branch transformation + s2cBranch :: EDB m => S.DbBranch -> m (C.Branch m) s2cBranch (S.Branch.Full.Branch tms tps patches children) = C.Branch @@ -277,29 +291,7 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = doPatches :: EDB m => Map Db.TextId Db.PatchObjectId -> m (Map C.NameSegment (PatchHash, m C.Patch)) doPatches = Map.bitraverse (fmap C.NameSegment . loadTextById) \patchId -> do h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId) patchId - let patch :: EDB m => m C.Patch - patch = - deserializePatchObject patchId >>= \case - S.PatchFormat.Full li p -> s2cPatch (l2sPatchFull li p) - S.PatchFormat.Diff ref li d -> doDiff ref [l2sPatchDiff li d] - doDiff :: EDB m => Db.PatchObjectId -> [S.PatchDiff] -> m C.Patch - doDiff ref ds = - deserializePatchObject ref >>= \case - S.PatchFormat.Full li f -> joinFull (l2sPatchFull li f) ds - S.PatchFormat.Diff ref' li' d' -> doDiff ref' (l2sPatchDiff li' d' : ds) - joinFull :: EDB m => S.Patch -> [S.PatchDiff] -> m C.Patch - joinFull f [] = s2cPatch f - joinFull (S.Patch termEdits typeEdits) (S.PatchDiff addedTermEdits addedTypeEdits removedTermEdits removedTypeEdits : ds) = joinFull f' ds - where - f' = S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) - addRemove :: (Ord a, Ord b) => Map a (Set b) -> Map a (Set b) -> Map a (Set b) -> Map a (Set b) - addRemove add del src = - (Map.unionWith (<>) add (Map.differenceWith remove src del)) - remove :: Ord b => Set b -> Set b -> Maybe (Set b) - remove src del = - let diff = Set.difference src del - in if diff == mempty then Nothing else Just diff - pure (h, patch) + pure (h, loadPatchById patchId) doChildren :: EDB m => Map Db.TextId (Db.BranchObjectId, Db.CausalHashId) -> m (Map C.Branch.NameSegment (C.CausalHead m CausalHash BranchHash (C.Branch m))) doChildren = Map.bitraverse (fmap C.NameSegment . loadTextById) \(boId, chId) -> @@ -327,11 +319,42 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = boId <- liftQ $ Q.loadBranchObjectIdByCausalHashId chId traverse loadBranchByObjectId boId - --- saveBranch :: EDB m => C.Branch m -> m S.DbBranch --- saveBranch = error "todo" +saveRootBranch :: EDB m => C.Branch.Root m -> m (Db.BranchObjectId, Db.CausalHashId) +saveRootBranch (C.CausalHead _hc _he _parents _me) = error "todo" + where + _c2sBranch :: EDB m => C.Branch m -> m S.DbBranch + _c2sBranch = error "todo" -- * Patch transformation + +loadPatchById :: EDB m => Db.PatchObjectId -> m C.Patch +loadPatchById patchId = + deserializePatchObject patchId >>= \case + S.PatchFormat.Full li p -> s2cPatch (l2sPatchFull li p) + S.PatchFormat.Diff ref li d -> doDiff ref [l2sPatchDiff li d] + where + doDiff :: EDB m => Db.PatchObjectId -> [S.PatchDiff] -> m C.Patch + doDiff ref ds = + deserializePatchObject ref >>= \case + S.PatchFormat.Full li f -> joinFull (l2sPatchFull li f) ds + S.PatchFormat.Diff ref' li' d' -> doDiff ref' (l2sPatchDiff li' d' : ds) + joinFull :: EDB m => S.Patch -> [S.PatchDiff] -> m C.Patch + joinFull f [] = s2cPatch f + joinFull (S.Patch termEdits typeEdits) (S.PatchDiff addedTermEdits addedTypeEdits removedTermEdits removedTypeEdits : ds) = joinFull f' ds + where + f' = S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) + addRemove :: (Ord a, Ord b) => Map a (Set b) -> Map a (Set b) -> Map a (Set b) -> Map a (Set b) + addRemove add del src = + (Map.unionWith (<>) add (Map.differenceWith remove src del)) + remove :: Ord b => Set b -> Set b -> Maybe (Set b) + remove src del = + let diff = Set.difference src del + in if diff == mempty then Nothing else Just diff + +savePatch :: DB m => C.Patch -> m Db.PatchObjectId +savePatch (C.Patch termEdits typeEdits) = do + error "todo" + s2cPatch :: EDB m => S.Patch -> m C.Patch s2cPatch (S.Patch termEdits typeEdits) = C.Patch @@ -344,6 +367,25 @@ savePatchHashes (C.Patch termEdits typeEdits) = <$> Map.bitraverse saveReferentH (Set.traverse saveTermEdit) termEdits <*> Map.bitraverse saveReferenceH (Set.traverse saveTypeEdit) typeEdits +-- | produces a diff +-- diff = full - ref; full = diff + ref +diffPatch :: S.LocalPatch -> S.LocalPatch -> S.LocalPatchDiff +diffPatch (S.Patch fullTerms fullTypes) (S.Patch refTerms refTypes) = + (S.PatchDiff addTermEdits addTypeEdits removeTermEdits removeTypeEdits) + where + -- add: present in full. but absent in ref. + addTermEdits = Map.merge Map.preserveMissing Map.dropMissing addDiffSet fullTerms refTerms + addTypeEdits = Map.merge Map.preserveMissing Map.dropMissing addDiffSet fullTypes refTypes + -- remove: present in ref. but absent in full. + removeTermEdits = Map.merge Map.dropMissing Map.preserveMissing removeDiffSet fullTerms refTerms + removeTypeEdits = Map.merge Map.dropMissing Map.preserveMissing removeDiffSet fullTypes refTypes + -- things that are present in full but absent in ref + addDiffSet, removeDiffSet :: + (Ord k, Ord a) => Map.WhenMatched Identity k (Set a) (Set a) (Set a) + addDiffSet = Map.zipWithMatched (const Set.difference) + removeDiffSet = Map.zipWithMatched (const (flip Set.difference)) + + -- implementation detail of loadPatchById? lookupPatchLocalText :: S.PatchLocalIds -> LocalTextId -> Db.TextId lookupPatchLocalText li (LocalTextId w) = S.PatchFormat.patchTextLookup li Vector.! fromIntegral w @@ -362,7 +404,8 @@ l2sPatchFull li = (lookupPatchLocalDefn li) l2sPatchDiff :: S.PatchFormat.PatchLocalIds -> S.LocalPatchDiff -> S.PatchDiff -l2sPatchDiff li = S.PatchDiff.trimap +l2sPatchDiff li = + S.PatchDiff.trimap (lookupPatchLocalText li) (lookupPatchLocalHash li) (lookupPatchLocalDefn li) @@ -706,11 +749,25 @@ saveWatch w r t = do let bytes = S.putBytes (S.putPair S.putLocalIds S.putTerm) wterm Q.saveWatch w rs bytes -termsHavingType :: DB m => C.Reference -> m (Set C.Referent.Id) -termsHavingType = error "todo" - -termsMentioningType :: DB m => C.Reference -> m (Set C.Referent.Id) -termsMentioningType = error "todo" +termsHavingType :: EDB m => C.Reference -> m (Set C.Referent.Id) +termsHavingType cTypeRef = do + maySet <- runMaybeT $ do + sTypeRef <- c2hReference cTypeRef + sIds <- Q.getReferentsByType sTypeRef + traverse s2cReferentId sIds + pure case maySet of + Nothing -> mempty + Just set -> Set.fromList set + +termsMentioningType :: EDB m => C.Reference -> m (Set C.Referent.Id) +termsMentioningType cTypeRef = do + maySet <- runMaybeT $ do + sTypeRef <- c2hReference cTypeRef + sIds <- Q.getReferentsByTypeMention sTypeRef + traverse s2cReferentId sIds + pure case maySet of + Nothing -> mempty + Just set -> Set.fromList set -- something kind of funny here. first, we don't need to enumerate all the reference pos if we're just picking one -- second, it would be nice if we could leave these as S.References a little longer diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs index bddb8c8b13..613f70a5a4 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs @@ -11,6 +11,7 @@ import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalHashId, LocalTextId) import U.Codebase.Sqlite.Patch.TermEdit (TermEdit') import U.Codebase.Sqlite.Patch.TypeEdit (TypeEdit') import qualified U.Util.Map as Map +import qualified Data.Monoid as Monoid type PatchDiff = PatchDiff' Db.TextId Db.HashId Db.ObjectId @@ -18,14 +19,25 @@ type LocalPatchDiff = PatchDiff' LocalTextId LocalHashId LocalDefnId type Referent'' t h = Referent' (Reference' t h) (Reference' t h) +-- | diff. = min. - sub. data PatchDiff' t h d = PatchDiff - { addedTermEdits :: Map (Referent'' t h) (Set (TermEdit' t d)), + { -- | elements present in min. but absent in sub. + addedTermEdits :: Map (Referent'' t h) (Set (TermEdit' t d)), addedTypeEdits :: Map (Reference' t h) (Set (TypeEdit' t d)), + -- | elements missing in min. but present in sub. removedTermEdits :: Map (Referent'' t h) (Set (TermEdit' t d)), removedTypeEdits :: Map (Reference' t h) (Set (TypeEdit' t d)) } deriving (Eq, Ord, Show) +-- | the number of dbids in the patch, an approximation to disk size +idcount :: PatchDiff' t h d -> Int +idcount (PatchDiff atm atp rtm rtp) = + go atm + go atp + go rtm + go rtp + where + go :: Foldable f => f (Set a) -> Int + go fsa = (Monoid.getSum . foldMap (Monoid.Sum . Set.size)) fsa + length fsa + trimap :: (Ord t', Ord h', Ord d') => (t -> t') -> diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index f81f16abd9..d570621f7f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -82,6 +82,9 @@ loadHashId :: DB m => Base32Hex -> m (Maybe HashId) loadHashId base32 = queryOnly sql (Only base32) where sql = [here| SELECT id FROM hash WHERE base32 = ? |] +loadHashIdByHash :: DB m => Hash -> m (Maybe HashId) +loadHashIdByHash = loadHashId . Hash.toBase32Hex + loadHashById :: EDB m => HashId -> m Base32Hex loadHashById h = queryOnly sql (Only h) >>= orError (UnknownHashId h) where sql = [here| SELECT base32 FROM hash WHERE id = ? |] @@ -275,6 +278,18 @@ addToTypeIndex tp tm = execute sql (tp :. tm) where sql = [here| ) VALUES (?, ?, ?, ?, ?, ?) |] +getReferentsByType :: DB m => Reference' TextId HashId -> m [Referent.Id] +getReferentsByType r = query sql r where sql = [here| + SELECT + term_referent_object_id, + term_referent_component_index, + term_referent_constructor_index + FROM find_type_index + WHERE type_reference_builtin = ? + AND type_reference_hash_id = ? + AND type_reference_component_index = ? +|] + addToTypeMentionsIndex :: DB m => Reference' TextId HashId -> Referent.Id -> m () addToTypeMentionsIndex tp tm = execute sql (tp :. tm) where sql = [here| INSERT OR IGNORE INTO find_type_mentions_index ( @@ -287,6 +302,18 @@ addToTypeMentionsIndex tp tm = execute sql (tp :. tm) where sql = [here| ) VALUES (?, ?, ?, ?, ?, ?) |] +getReferentsByTypeMention :: DB m => Reference' TextId HashId -> m [Referent.Id] +getReferentsByTypeMention r = query sql r where sql = [here| + SELECT + term_referent_object_id, + term_referent_component_index, + term_referent_constructor_index + FROM find_type_mentions_index + WHERE type_reference_builtin = ? + AND type_reference_hash_id = ? + AND type_reference_component_index = ? +|] + addToDependentsIndex :: DB m => Reference.Reference -> Reference.Id -> m () addToDependentsIndex dependency dependent = execute sql (dependency :. dependent) where sql = [here| diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs index 1616b405b2..1f59096a8a 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs @@ -1,26 +1,34 @@ {-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE TypeSynonymInstances #-} {-# LANGUAGE LambdaCase #-} +{-# LANGUAGE TypeSynonymInstances #-} {-# OPTIONS_GHC -Wno-orphans #-} module U.Codebase.Sqlite.Referent where -import Database.SQLite.Simple (SQLData(..), Only(..), ToRow(..)) - +import Control.Applicative (liftA3) +import Database.SQLite.Simple (FromRow (..), Only (..), SQLData (..), ToRow (..), field) +import qualified U.Codebase.Reference as Reference import U.Codebase.Referent (Id', Referent') -import qualified U.Codebase.Sqlite.Reference as Sqlite -import U.Codebase.Sqlite.DbId (ObjectId) import qualified U.Codebase.Referent as Referent -import qualified U.Codebase.Reference as Reference +import U.Codebase.Sqlite.DbId (ObjectId) +import qualified U.Codebase.Sqlite.Reference as Sqlite type Referent = Referent' Sqlite.Reference Sqlite.Reference + type ReferentH = Referent' Sqlite.ReferenceH Sqlite.ReferenceH + type Id = Id' ObjectId ObjectId type LocalReferent = Referent' Sqlite.LocalReference Sqlite.LocalReference instance ToRow Id where - -- | objectId, componentIndex, constructorIndex toRow = \case Referent.RefId (Reference.Id h i) -> toRow (Only h) ++ toRow (Only i) ++ [SQLNull] Referent.ConId (Reference.Id h i) cid -> toRow (Only h) ++ toRow (Only i) ++ toRow (Only cid) + +instance FromRow Id where + fromRow = liftA3 mkId field field field + where + mkId h i mayCid = case mayCid of + Nothing -> Referent.RefId (Reference.Id h i) + Just cid -> Referent.ConId (Reference.Id h i) cid diff --git a/codebase2/codebase/U/Codebase/Branch.hs b/codebase2/codebase/U/Codebase/Branch.hs index c2d83a2120..524f3a0884 100644 --- a/codebase2/codebase/U/Codebase/Branch.hs +++ b/codebase2/codebase/U/Codebase/Branch.hs @@ -14,11 +14,13 @@ newtype NameSegment = NameSegment Text deriving (Eq, Ord, Show) newtype MdValues = MdValues (Set Reference) deriving (Eq, Ord, Show) +type Root m = CausalHead m CausalHash BranchHash (Branch m) + data Branch m = Branch { terms :: Map NameSegment (Map Referent (m MdValues)), types :: Map NameSegment (Map Reference (m MdValues)), patches :: Map NameSegment (PatchHash, m Patch), - children :: Map NameSegment (CausalHead m CausalHash BranchHash (Branch m)) + children :: Map NameSegment (Root m) } data Patch = Patch diff --git a/codebase2/codebase/U/Codebase/Codebase.hs b/codebase2/codebase/U/Codebase/Codebase.hs index 4d9e1d725b..5cb35dfa96 100644 --- a/codebase2/codebase/U/Codebase/Codebase.hs +++ b/codebase2/codebase/U/Codebase/Codebase.hs @@ -4,10 +4,10 @@ module U.Codebase.Codebase where import Data.Set (Set) import Data.Text (Text) -import U.Codebase.Branch (Branch) +import U.Codebase.Branch (Branch, Patch) import U.Codebase.Causal (Causal) import U.Codebase.Decl (Decl) -import U.Codebase.HashTags (BranchHash, CausalHash) +import U.Codebase.HashTags (BranchHash, CausalHash, PatchHash) import U.Codebase.Reference (Reference) import qualified U.Codebase.Reference as Reference import qualified U.Codebase.Referent as Referent @@ -28,6 +28,8 @@ data Codebase m v = Codebase getTypeDeclaration :: Reference.Id -> m (Maybe (Decl v)), putTerm :: Reference.Id -> Term v -> TypeT v -> m (), putTypeDeclaration :: Reference.Id -> Decl v -> m (), + getPatch :: PatchHash -> m Patch, + putPatch :: PatchHash -> Patch -> m (), getBranch :: BranchHash -> m (Maybe (Branch m)), getRootBranch :: m (Either GetRootBranchError (Branch m)), putRootBranch :: Branch m -> m (), From c7d565d49da6487202982d041b2f6ae4471e5b60 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 2 Dec 2020 11:15:15 -0500 Subject: [PATCH 054/225] v1 codebase dependentsImpl, etc. --- .../U/Codebase/Sqlite/Operations.hs | 151 +++++++++++------- .../U/Codebase/Sqlite/Reference.hs | 20 ++- .../U/Codebase/Sqlite/Referent.hs | 1 + codebase2/util/U/Util/Lens.hs | 1 + .../src/Unison/Codebase/SqliteCodebase.hs | 6 +- 5 files changed, 116 insertions(+), 63 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index da84ae15f3..174b0b4210 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -25,6 +25,7 @@ import Data.ByteString (ByteString) import Data.Bytes.Get (runGetS) import qualified Data.Foldable as Foldable import Data.Functor (void, (<&>)) +import Data.Functor.Identity (Identity) import Data.Map (Map) import qualified Data.Map as Map import qualified Data.Map.Merge.Lazy as Map @@ -87,6 +88,7 @@ import qualified U.Codebase.Sqlite.Queries as Q import qualified U.Codebase.Sqlite.Reference as S import qualified U.Codebase.Sqlite.Reference as S.Reference import qualified U.Codebase.Sqlite.Referent as S +import qualified U.Codebase.Sqlite.Referent as S.Referent import qualified U.Codebase.Sqlite.Serialization as S import U.Codebase.Sqlite.Symbol (Symbol) import qualified U.Codebase.Sqlite.SyncEntity as SE @@ -107,8 +109,6 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set -import Data.Functor.Identity (Identity) -import qualified U.Codebase.Sqlite.Referent as S.Referent type Err m = MonadError Error m @@ -213,21 +213,6 @@ s2cReferentId = bitraverse loadHashByObjectId loadHashByObjectId h2cReferent :: EDB m => S.ReferentH -> m C.Referent h2cReferent = bitraverse h2cReference h2cReference --- ** write new references - -saveReferent :: EDB m => C.Referent -> m S.Referent -saveReferent = bitraverse saveReference saveReference - --- | a referenced object must necessarily exist in the db already -saveReference :: EDB m => C.Reference -> m S.Reference -saveReference = bitraverse Q.saveText hashToObjectId - -saveReferentH :: DB m => C.Referent -> m S.ReferentH -saveReferentH = bitraverse saveReferenceH saveReferenceH - -saveReferenceH :: DB m => C.Reference -> m S.ReferenceH -saveReferenceH = bitraverse Q.saveText Q.saveHashHash - -- * Edits transformations s2cTermEdit :: EDB m => S.TermEdit -> m C.TermEdit @@ -252,16 +237,6 @@ s2cTypeEdit = \case S.TypeEdit.Replace r -> C.TypeEdit.Replace <$> s2cReference r S.TypeEdit.Deprecate -> pure C.TypeEdit.Deprecate -saveTermEdit :: EDB m => C.TermEdit -> m S.TermEdit -saveTermEdit = \case - C.TermEdit.Replace r t -> S.TermEdit.Replace <$> saveReference r <*> pure (c2sTyping t) - C.TermEdit.Deprecate -> pure S.TermEdit.Deprecate - -saveTypeEdit :: EDB m => C.TypeEdit -> m S.TypeEdit -saveTypeEdit = \case - C.TypeEdit.Replace r -> S.TypeEdit.Replace <$> saveReference r - C.TypeEdit.Deprecate -> pure S.TypeEdit.Deprecate - -- * Branch transformation s2cBranch :: EDB m => S.DbBranch -> m (C.Branch m) @@ -351,9 +326,12 @@ loadPatchById patchId = let diff = Set.difference src del in if diff == mempty then Nothing else Just diff -savePatch :: DB m => C.Patch -> m Db.PatchObjectId -savePatch (C.Patch termEdits typeEdits) = do - error "todo" +savePatch :: EDB m => PatchHash -> C.Patch -> m Db.PatchObjectId +savePatch h c = do + (li, lPatch) <- c2lPatch c + hashId <- Q.saveHashHash (unPatchHash h) + let bytes = S.putBytes S.putPatchFormat $ S.PatchFormat.Full li lPatch + Db.PatchObjectId <$> Q.saveObject hashId OT.Patch bytes s2cPatch :: EDB m => S.Patch -> m C.Patch s2cPatch (S.Patch termEdits typeEdits) = @@ -361,11 +339,74 @@ s2cPatch (S.Patch termEdits typeEdits) = <$> Map.bitraverse h2cReferent (Set.traverse s2cTermEdit) termEdits <*> Map.bitraverse h2cReference (Set.traverse s2cTypeEdit) typeEdits -savePatchHashes :: EDB m => C.Patch -> m S.Patch -savePatchHashes (C.Patch termEdits typeEdits) = - S.Patch - <$> Map.bitraverse saveReferentH (Set.traverse saveTermEdit) termEdits - <*> Map.bitraverse saveReferenceH (Set.traverse saveTypeEdit) typeEdits +-- | assumes that all relevant values are already in the DB +c2lPatch :: EDB m => C.Patch -> m (S.PatchLocalIds, S.LocalPatch) +c2lPatch (C.Patch termEdits typeEdits) = + done =<< (runWriterT . flip evalStateT startState) do + S.Patch + <$> Map.bitraverse saveReferentH (Set.traverse saveTermEdit) termEdits + <*> Map.bitraverse saveReferenceH (Set.traverse saveTypeEdit) typeEdits + where + startState = mempty @(Map Text LocalTextId, Map H.Hash LocalHashId, Map H.Hash LocalDefnId) + done :: + EDB m => + (a, (Seq Text, Seq H.Hash, Seq H.Hash)) -> + m (S.PatchFormat.PatchLocalIds, a) + done (lPatch, (textValues, hashValues, defnValues)) = do + textIds <- liftQ $ traverse Q.saveText textValues + hashIds <- liftQ $ traverse Q.saveHashHash hashValues + objectIds <- traverse hashToObjectId defnValues + let ids = + S.PatchFormat.LocalIds + (Vector.fromList (Foldable.toList textIds)) + (Vector.fromList (Foldable.toList hashIds)) + (Vector.fromList (Foldable.toList objectIds)) + pure (ids, lPatch) + + lookupText :: + ( MonadState s m, + MonadWriter w m, + Lens.Field1' s (Map t LocalTextId), + Lens.Field1' w (Seq t), + Ord t + ) => + t -> + m LocalTextId + lookupText = lookup_ Lens._1 Lens._1 LocalTextId + + lookupHash :: + ( MonadState s m, + MonadWriter w m, + Lens.Field2' s (Map d LocalHashId), + Lens.Field2' w (Seq d), + Ord d + ) => + d -> + m LocalHashId + lookupHash = lookup_ Lens._2 Lens._2 LocalHashId + + lookupDefn :: + ( MonadState s m, + MonadWriter w m, + Lens.Field3' s (Map d LocalDefnId), + Lens.Field3' w (Seq d), + Ord d + ) => + d -> + m LocalDefnId + lookupDefn = lookup_ Lens._3 Lens._3 LocalDefnId + + saveTermEdit = \case + C.TermEdit.Replace r t -> S.TermEdit.Replace <$> saveReference r <*> pure (c2sTyping t) + C.TermEdit.Deprecate -> pure S.TermEdit.Deprecate + + saveTypeEdit = \case + C.TypeEdit.Replace r -> S.TypeEdit.Replace <$> saveReference r + C.TypeEdit.Deprecate -> pure S.TypeEdit.Deprecate + + saveReference = bitraverse lookupText lookupDefn + saveReferenceH = bitraverse lookupText lookupHash + saveReferentH = bitraverse saveReferenceH saveReferenceH -- | produces a diff -- diff = full - ref; full = diff + ref @@ -380,12 +421,12 @@ diffPatch (S.Patch fullTerms fullTypes) (S.Patch refTerms refTypes) = removeTermEdits = Map.merge Map.dropMissing Map.preserveMissing removeDiffSet fullTerms refTerms removeTypeEdits = Map.merge Map.dropMissing Map.preserveMissing removeDiffSet fullTypes refTypes -- things that are present in full but absent in ref - addDiffSet, removeDiffSet :: - (Ord k, Ord a) => Map.WhenMatched Identity k (Set a) (Set a) (Set a) + addDiffSet, + removeDiffSet :: + (Ord k, Ord a) => Map.WhenMatched Identity k (Set a) (Set a) (Set a) addDiffSet = Map.zipWithMatched (const Set.difference) removeDiffSet = Map.zipWithMatched (const (flip Set.difference)) - -- implementation detail of loadPatchById? lookupPatchLocalText :: S.PatchLocalIds -> LocalTextId -> Db.TextId lookupPatchLocalText li (LocalTextId w) = S.PatchFormat.patchTextLookup li Vector.! fromIntegral w @@ -517,16 +558,16 @@ c2xTerm saveText saveDefn tm tp = C.Term.Nat n -> pure $ C.Term.Nat n C.Term.Float n -> pure $ C.Term.Float n C.Term.Boolean b -> pure $ C.Term.Boolean b - C.Term.Text t -> C.Term.Text <$> lookupText_ t + C.Term.Text t -> C.Term.Text <$> lookupText t C.Term.Char ch -> pure $ C.Term.Char ch C.Term.Ref r -> - C.Term.Ref <$> bitraverse lookupText_ (traverse lookupDefn_) r + C.Term.Ref <$> bitraverse lookupText (traverse lookupDefn) r C.Term.Constructor typeRef cid -> C.Term.Constructor - <$> bitraverse lookupText_ lookupDefn_ typeRef + <$> bitraverse lookupText lookupDefn typeRef <*> pure cid C.Term.Request typeRef cid -> - C.Term.Request <$> bitraverse lookupText_ lookupDefn_ typeRef <*> pure cid + C.Term.Request <$> bitraverse lookupText lookupDefn typeRef <*> pure cid C.Term.Handle a a2 -> pure $ C.Term.Handle a a2 C.Term.App a a2 -> pure $ C.Term.App a a2 C.Term.Ann a typ -> C.Term.Ann a <$> ABT.transformM goType typ @@ -541,18 +582,18 @@ c2xTerm saveText saveDefn tm tp = C.Term.TermLink r -> C.Term.TermLink <$> bitraverse - (bitraverse lookupText_ (traverse lookupDefn_)) - (bitraverse lookupText_ lookupDefn_) + (bitraverse lookupText (traverse lookupDefn)) + (bitraverse lookupText lookupDefn) r C.Term.TypeLink r -> - C.Term.TypeLink <$> bitraverse lookupText_ lookupDefn_ r + C.Term.TypeLink <$> bitraverse lookupText lookupDefn r goType :: forall m a. (MonadWriter (Seq Text, Seq H.Hash) m, MonadState (Map Text LocalTextId, Map H.Hash LocalDefnId) m) => C.Type.FT a -> m (S.Term.FT a) goType = \case - C.Type.Ref r -> C.Type.Ref <$> bitraverse lookupText_ lookupDefn_ r + C.Type.Ref r -> C.Type.Ref <$> bitraverse lookupText lookupDefn r C.Type.Arrow i o -> pure $ C.Type.Arrow i o C.Type.Ann a k -> pure $ C.Type.Ann a k C.Type.App f a -> pure $ C.Type.App f a @@ -592,12 +633,12 @@ c2xTerm saveText saveDefn tm tp = C.Term.PInt i -> pure $ C.Term.PInt i C.Term.PNat n -> pure $ C.Term.PNat n C.Term.PFloat d -> pure $ C.Term.PFloat d - C.Term.PText t -> C.Term.PText <$> lookupText_ t + C.Term.PText t -> C.Term.PText <$> lookupText t C.Term.PChar c -> pure $ C.Term.PChar c - C.Term.PConstructor r i ps -> C.Term.PConstructor <$> bitraverse lookupText_ lookupDefn_ r <*> pure i <*> traverse goPat ps + C.Term.PConstructor r i ps -> C.Term.PConstructor <$> bitraverse lookupText lookupDefn r <*> pure i <*> traverse goPat ps C.Term.PAs p -> C.Term.PAs <$> goPat p C.Term.PEffectPure p -> C.Term.PEffectPure <$> goPat p - C.Term.PEffectBind r i bindings k -> C.Term.PEffectBind <$> bitraverse lookupText_ lookupDefn_ r <*> pure i <*> traverse goPat bindings <*> goPat k + C.Term.PEffectBind r i bindings k -> C.Term.PEffectBind <$> bitraverse lookupText lookupDefn r <*> pure i <*> traverse goPat bindings <*> goPat k C.Term.PSequenceLiteral ps -> C.Term.PSequenceLiteral <$> traverse goPat ps C.Term.PSequenceOp l op r -> C.Term.PSequenceOp <$> goPat l <*> pure op <*> goPat r @@ -611,7 +652,8 @@ c2xTerm saveText saveDefn tm tp = (Vector.fromList (Foldable.toList defnIds)) pure (ids, void tm, void <$> tp) -lookupText_ :: +lookupText :: + forall m s w t. ( MonadState s m, MonadWriter w m, Lens.Field1' s (Map t LocalTextId), @@ -620,9 +662,10 @@ lookupText_ :: ) => t -> m LocalTextId -lookupText_ = lookup_ Lens._1 Lens._1 LocalTextId +lookupText = lookup_ Lens._1 Lens._1 LocalTextId -lookupDefn_ :: +lookupDefn :: + forall m s w d. ( MonadState s m, MonadWriter w m, Lens.Field2' s (Map d LocalDefnId), @@ -631,7 +674,7 @@ lookupDefn_ :: ) => d -> m LocalDefnId -lookupDefn_ = lookup_ Lens._2 Lens._2 LocalDefnId +lookupDefn = lookup_ Lens._2 Lens._2 LocalDefnId -- | shared implementation of lookupTextHelper and lookupDefnHelper -- Look up a value in the LUT, or append it. @@ -670,7 +713,7 @@ c2sDecl saveText saveDefn (C.Decl.DataDeclaration dt m b cts) = do C.Type.FD a -> m (S.Decl.F a) goType = \case - C.Type.Ref r -> C.Type.Ref <$> bitraverse lookupText_ (traverse lookupDefn_) r + C.Type.Ref r -> C.Type.Ref <$> bitraverse lookupText (traverse lookupDefn) r C.Type.Arrow i o -> pure $ C.Type.Arrow i o C.Type.Ann a k -> pure $ C.Type.Ann a k C.Type.App f a -> pure $ C.Type.App f a diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs index e9768972d6..043aae3a01 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs @@ -4,32 +4,36 @@ module U.Codebase.Sqlite.Reference where -import U.Codebase.Sqlite.DbId -import U.Codebase.Reference (Reference'(ReferenceBuiltin, ReferenceDerived), Id'(Id)) -import Database.SQLite.Simple (SQLData(..), Only(..), ToRow(toRow)) -import Database.SQLite.Simple.FromRow (FromRow(fromRow), field) -import Database.SQLite.Simple.ToField (ToField) +import Database.SQLite.Simple (Only (..), SQLData (..), ToRow (toRow)) import Database.SQLite.Simple.FromField (FromField) -import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalTextId) +import Database.SQLite.Simple.FromRow (FromRow (fromRow), field) +import Database.SQLite.Simple.ToField (ToField) +import U.Codebase.Reference (Id' (Id), Reference' (ReferenceBuiltin, ReferenceDerived)) +import U.Codebase.Sqlite.DbId (HashId, ObjectId, TextId) +import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalHashId, LocalTextId) type Reference = Reference' TextId ObjectId + type Id = Id' ObjectId +type LocalReferenceH = Reference' LocalTextId LocalHashId + type LocalReference = Reference' LocalTextId LocalDefnId + type LocalId = Id' LocalDefnId type ReferenceH = Reference' TextId HashId + type IdH = Id' HashId -- * Orphan instances + instance ToRow (Reference' TextId HashId) where - -- | builtinId, hashId, componentIndex toRow = \case ReferenceBuiltin t -> toRow (Only t) ++ [SQLNull, SQLNull] ReferenceDerived (Id h i) -> SQLNull : toRow (Only h) ++ toRow (Only i) instance ToRow (Reference' TextId ObjectId) where - -- | builtinId, hashId, componentIndex toRow = \case ReferenceBuiltin t -> toRow (Only t) ++ [SQLNull, SQLNull] ReferenceDerived (Id h i) -> SQLNull : toRow (Only h) ++ toRow (Only i) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs index 1f59096a8a..1b3c276896 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Referent.hs @@ -20,6 +20,7 @@ type ReferentH = Referent' Sqlite.ReferenceH Sqlite.ReferenceH type Id = Id' ObjectId ObjectId type LocalReferent = Referent' Sqlite.LocalReference Sqlite.LocalReference +type LocalReferentH = Referent' Sqlite.LocalReferenceH Sqlite.LocalReferenceH instance ToRow Id where toRow = \case diff --git a/codebase2/util/U/Util/Lens.hs b/codebase2/util/U/Util/Lens.hs index edf525505f..e915a89d88 100644 --- a/codebase2/util/U/Util/Lens.hs +++ b/codebase2/util/U/Util/Lens.hs @@ -6,3 +6,4 @@ import qualified Control.Lens as Lens type Field1' s a = Lens.Field1 s s a a type Field2' s a = Lens.Field2 s s a a +type Field3' s a = Lens.Field3 s s a a diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index e746caceee..5380f72640 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -35,6 +35,7 @@ import U.Codebase.Sqlite.Operations (EDB) import qualified U.Codebase.Sqlite.Operations as Ops import qualified U.Util.Hash as H2 import qualified U.Util.Monoid as Monoid +import qualified U.Util.Set as Set import qualified Unison.Builtin as Builtins import Unison.Codebase (CodebasePath) import qualified Unison.Codebase as Codebase1 @@ -255,7 +256,10 @@ sqliteCodebase root = do getBranchForHash = error "todo" dependentsImpl :: Reference -> IO (Set Reference.Id) - dependentsImpl = error "todo" + dependentsImpl r = + runDB conn $ + Set.traverse (Cv.referenceid2to1 getCycleLen) + =<< Ops.dependents (Cv.reference1to2 r) syncFromDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () syncFromDirectory = error "todo" From 42a3b66b8de78efffc012357fb2b59364e08cb66 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 5 Dec 2020 10:27:18 -0500 Subject: [PATCH 055/225] some cleanup + rearranging for haddock --- .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 2 +- .../U/Codebase/Sqlite/Operations.hs | 675 +++++++++--------- codebase2/codebase/U/Codebase/Branch.hs | 1 + .../src/Unison/Codebase/SqliteCodebase.hs | 10 +- .../Codebase/SqliteCodebase/Conversions.hs | 38 +- questions.md | 158 ++++ 6 files changed, 560 insertions(+), 324 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index 6184b12f1f..8519d72352 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -31,7 +31,7 @@ newtype BranchObjectId = BranchObjectId { unBranchObjectId :: ObjectId } derivin newtype BranchHashId = BranchHashId { unBranchHashId :: HashId } deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via HashId -newtype CausalHashId = CausalId { unCausalHashId :: HashId } deriving (Eq, Ord, Show) +newtype CausalHashId = CausalHashId { unCausalHashId :: HashId } deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via HashId newtype CausalOldHashId = CausalOldHashId HashId deriving Show deriving (Hashable, FromField, ToField) via HashId diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 174b0b4210..5e1180d6b4 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -110,6 +110,7 @@ import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set +-- * Error handling type Err m = MonadError Error m type EDB m = (Err m, DB m) @@ -158,14 +159,21 @@ lookupTextId t = loadTextById :: EDB m => Db.TextId -> m Text loadTextById = liftQ . Q.loadTextById +-- | Q: Any Hash that UCM gets ahold of should already exist in the DB? +-- because it came from a sync or from a save +hashToHashId :: EDB m => H.Hash -> m Db.HashId +hashToHashId = liftQ . Q.expectHashIdByHash + hashToObjectId :: EDB m => H.Hash -> m Db.ObjectId hashToObjectId h = do (Q.loadHashId . H.toBase32Hex) h >>= \case Just hashId -> liftQ $ Q.expectObjectIdForPrimaryHashId hashId Nothing -> throwError $ UnknownDependency h -objectExistsForHash :: EDB m => H.Hash -> m Bool -objectExistsForHash h = isJust <$> runMaybeT ((Q.loadHashId . H.toBase32Hex) h) +objectExistsForHash :: DB m => H.Hash -> m Bool +objectExistsForHash h = isJust <$> runMaybeT do + id <- MaybeT . Q.loadHashId . H.toBase32Hex $ h + Q.maybeObjectIdForAnyHashId id loadHashByObjectId :: EDB m => Db.ObjectId -> m H.Hash loadHashByObjectId = fmap H.fromBase32Hex . liftQ . Q.loadPrimaryHashByObjectId @@ -213,7 +221,7 @@ s2cReferentId = bitraverse loadHashByObjectId loadHashByObjectId h2cReferent :: EDB m => S.ReferentH -> m C.Referent h2cReferent = bitraverse h2cReference h2cReference --- * Edits transformations +-- ** Edits transformations s2cTermEdit :: EDB m => S.TermEdit -> m C.TermEdit s2cTermEdit = \case @@ -237,108 +245,6 @@ s2cTypeEdit = \case S.TypeEdit.Replace r -> C.TypeEdit.Replace <$> s2cReference r S.TypeEdit.Deprecate -> pure C.TypeEdit.Deprecate --- * Branch transformation - -s2cBranch :: EDB m => S.DbBranch -> m (C.Branch m) -s2cBranch (S.Branch.Full.Branch tms tps patches children) = - C.Branch - <$> doTerms tms - <*> doTypes tps - <*> doPatches patches - <*> doChildren children - where - doTerms :: EDB m => Map Db.TextId (Map S.Referent S.DbMetadataSet) -> m (Map C.NameSegment (Map C.Referent (m C.MdValues))) - doTerms = - Map.bitraverse - (fmap C.NameSegment . loadTextById) - ( Map.bitraverse s2cReferent \case - S.MetadataSet.Inline rs -> - pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs - ) - doTypes :: EDB m => Map Db.TextId (Map S.Reference S.DbMetadataSet) -> m (Map C.NameSegment (Map C.Reference (m C.MdValues))) - doTypes = - Map.bitraverse - (fmap C.NameSegment . loadTextById) - ( Map.bitraverse s2cReference \case - S.MetadataSet.Inline rs -> - pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs - ) - doPatches :: EDB m => Map Db.TextId Db.PatchObjectId -> m (Map C.NameSegment (PatchHash, m C.Patch)) - doPatches = Map.bitraverse (fmap C.NameSegment . loadTextById) \patchId -> do - h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId) patchId - pure (h, loadPatchById patchId) - - doChildren :: EDB m => Map Db.TextId (Db.BranchObjectId, Db.CausalHashId) -> m (Map C.Branch.NameSegment (C.CausalHead m CausalHash BranchHash (C.Branch m))) - doChildren = Map.bitraverse (fmap C.NameSegment . loadTextById) \(boId, chId) -> - C.CausalHead <$> loadCausalHashById chId - <*> loadValueHashByCausalHashId chId - <*> headParents chId - <*> pure (loadBranchByObjectId boId) - where - headParents :: EDB m => Db.CausalHashId -> m (Map CausalHash (m (C.Causal m CausalHash BranchHash (C.Branch m)))) - headParents chId = do - parentsChIds <- Q.loadCausalParents chId - fmap Map.fromList $ traverse pairParent parentsChIds - pairParent :: EDB m => Db.CausalHashId -> m (CausalHash, m (C.Causal m CausalHash BranchHash (C.Branch m))) - pairParent chId = do - h <- loadCausalHashById chId - pure (h, loadCausal chId) - loadCausal :: EDB m => Db.CausalHashId -> m (C.Causal m CausalHash BranchHash (C.Branch m)) - loadCausal chId = do - C.Causal <$> loadCausalHashById chId - <*> loadValueHashByCausalHashId chId - <*> headParents chId - <*> pure (loadValue chId) - loadValue :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch m)) - loadValue chId = do - boId <- liftQ $ Q.loadBranchObjectIdByCausalHashId chId - traverse loadBranchByObjectId boId - -saveRootBranch :: EDB m => C.Branch.Root m -> m (Db.BranchObjectId, Db.CausalHashId) -saveRootBranch (C.CausalHead _hc _he _parents _me) = error "todo" - where - _c2sBranch :: EDB m => C.Branch m -> m S.DbBranch - _c2sBranch = error "todo" - --- * Patch transformation - -loadPatchById :: EDB m => Db.PatchObjectId -> m C.Patch -loadPatchById patchId = - deserializePatchObject patchId >>= \case - S.PatchFormat.Full li p -> s2cPatch (l2sPatchFull li p) - S.PatchFormat.Diff ref li d -> doDiff ref [l2sPatchDiff li d] - where - doDiff :: EDB m => Db.PatchObjectId -> [S.PatchDiff] -> m C.Patch - doDiff ref ds = - deserializePatchObject ref >>= \case - S.PatchFormat.Full li f -> joinFull (l2sPatchFull li f) ds - S.PatchFormat.Diff ref' li' d' -> doDiff ref' (l2sPatchDiff li' d' : ds) - joinFull :: EDB m => S.Patch -> [S.PatchDiff] -> m C.Patch - joinFull f [] = s2cPatch f - joinFull (S.Patch termEdits typeEdits) (S.PatchDiff addedTermEdits addedTypeEdits removedTermEdits removedTypeEdits : ds) = joinFull f' ds - where - f' = S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) - addRemove :: (Ord a, Ord b) => Map a (Set b) -> Map a (Set b) -> Map a (Set b) -> Map a (Set b) - addRemove add del src = - (Map.unionWith (<>) add (Map.differenceWith remove src del)) - remove :: Ord b => Set b -> Set b -> Maybe (Set b) - remove src del = - let diff = Set.difference src del - in if diff == mempty then Nothing else Just diff - -savePatch :: EDB m => PatchHash -> C.Patch -> m Db.PatchObjectId -savePatch h c = do - (li, lPatch) <- c2lPatch c - hashId <- Q.saveHashHash (unPatchHash h) - let bytes = S.putBytes S.putPatchFormat $ S.PatchFormat.Full li lPatch - Db.PatchObjectId <$> Q.saveObject hashId OT.Patch bytes - -s2cPatch :: EDB m => S.Patch -> m C.Patch -s2cPatch (S.Patch termEdits typeEdits) = - C.Patch - <$> Map.bitraverse h2cReferent (Set.traverse s2cTermEdit) termEdits - <*> Map.bitraverse h2cReference (Set.traverse s2cTypeEdit) typeEdits - -- | assumes that all relevant values are already in the DB c2lPatch :: EDB m => C.Patch -> m (S.PatchLocalIds, S.LocalPatch) c2lPatch (C.Patch termEdits typeEdits) = @@ -427,30 +333,6 @@ diffPatch (S.Patch fullTerms fullTypes) (S.Patch refTerms refTypes) = addDiffSet = Map.zipWithMatched (const Set.difference) removeDiffSet = Map.zipWithMatched (const (flip Set.difference)) --- implementation detail of loadPatchById? -lookupPatchLocalText :: S.PatchLocalIds -> LocalTextId -> Db.TextId -lookupPatchLocalText li (LocalTextId w) = S.PatchFormat.patchTextLookup li Vector.! fromIntegral w - -lookupPatchLocalHash :: S.PatchLocalIds -> LocalHashId -> Db.HashId -lookupPatchLocalHash li (LocalHashId w) = S.PatchFormat.patchHashLookup li Vector.! fromIntegral w - -lookupPatchLocalDefn :: S.PatchLocalIds -> LocalDefnId -> Db.ObjectId -lookupPatchLocalDefn li (LocalDefnId w) = S.PatchFormat.patchDefnLookup li Vector.! fromIntegral w - -l2sPatchFull :: S.PatchFormat.PatchLocalIds -> S.LocalPatch -> S.Patch -l2sPatchFull li = - S.Patch.Full.trimap - (lookupPatchLocalText li) - (lookupPatchLocalHash li) - (lookupPatchLocalDefn li) - -l2sPatchDiff :: S.PatchFormat.PatchLocalIds -> S.LocalPatchDiff -> S.PatchDiff -l2sPatchDiff li = - S.PatchDiff.trimap - (lookupPatchLocalText li) - (lookupPatchLocalHash li) - (lookupPatchLocalDefn li) - -- * Deserialization helpers decodeComponentLengthOnly :: Err m => ByteString -> m Word64 @@ -484,66 +366,22 @@ getDeclTypeByReference r@(C.Reference.Id h pos) = >>= maybe (throwError $ LegacyUnknownConstructorType h pos) pure >>= pure . C.Decl.declType --- * Codebase operations - -loadTermWithTypeByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term Symbol, C.Term.Type Symbol) -loadTermWithTypeByReference (C.Reference.Id h i) = - hashToObjectId h - >>= liftQ . Q.loadObjectById - -- retrieve and deserialize the blob - >>= decodeTermElementWithType i - >>= uncurry3 s2cTermWithType - -loadTermByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term Symbol) -loadTermByReference (C.Reference.Id h i) = - hashToObjectId h - >>= liftQ . Q.loadObjectById - -- retrieve and deserialize the blob - >>= decodeTermElementDiscardingType i - >>= uncurry s2cTerm - -s2cTermWithType :: EDB m => LocalIds -> S.Term.Term -> S.Term.Type -> MaybeT m (C.Term Symbol, C.Term.Type Symbol) -s2cTermWithType ids tm tp = do - (substText, substHash) <- localIdsToLookups loadTextById loadHashByObjectId ids - pure (x2cTerm substText substHash tm, x2cTType substText substHash tp) - -s2cTerm :: EDB m => LocalIds -> S.Term.Term -> MaybeT m (C.Term Symbol) -s2cTerm ids tm = do - (substText, substHash) <- localIdsToLookups loadTextById loadHashByObjectId ids - pure $ x2cTerm substText substHash tm - -s2cTypeOfTerm :: EDB m => LocalIds -> S.Term.Type -> MaybeT m (C.Term.Type Symbol) -s2cTypeOfTerm ids tp = do - (substText, substHash) <- localIdsToLookups loadTextById loadHashByObjectId ids - pure $ x2cTType substText substHash tp - -w2cTerm :: EDB m => WatchLocalIds -> S.Term.Term -> MaybeT m (C.Term Symbol) -w2cTerm ids tm = do - (substText, substHash) <- localIdsToLookups loadTextById loadHashByHashId ids - pure $ x2cTerm substText substHash tm - -localIdsToLookups :: Monad m => (t -> m Text) -> (d -> m H.Hash) -> LocalIds' t d -> m (LocalTextId -> Text, LocalDefnId -> H.Hash) -localIdsToLookups loadText loadHash localIds = do - texts <- traverse loadText $ LocalIds.textLookup localIds - hashes <- traverse loadHash $ LocalIds.defnLookup localIds - let substText (LocalTextId w) = texts Vector.! fromIntegral w - substHash (LocalDefnId w) = hashes Vector.! fromIntegral w - pure (substText, substHash) +componentByObjectId :: EDB m => Db.ObjectId -> m [S.Reference.Id] +componentByObjectId id = do + len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly + pure [C.Reference.Id id i | i <- [0 .. len - 1]] -x2cTerm :: (LocalTextId -> Text) -> (LocalDefnId -> H.Hash) -> S.Term.Term -> C.Term Symbol -x2cTerm substText substHash = - -- substitute the text and hashes back into the term - C.Term.extraMap substText substTermRef substTypeRef substTermLink substTypeLink id - where - substTermRef = bimap substText (fmap substHash) - substTypeRef = bimap substText substHash - substTermLink = bimap substTermRef substTypeRef - substTypeLink = substTypeRef +-- * Codebase operations -x2cTType :: (LocalTextId -> Text) -> (LocalDefnId -> H.Hash) -> S.Term.Type -> C.Term.Type Symbol -x2cTType substText substHash = C.Type.rmap (bimap substText substHash) +-- ** Saving & loading terms +saveTermComponent :: EDB m => H.Hash -> [(C.Term Symbol, C.Term.Type Symbol)] -> m Db.ObjectId +saveTermComponent h terms = do + sTermElements <- traverse (uncurry c2sTerm) terms + hashId <- Q.saveHashHash h + let bytes = S.putBytes S.putTermComponent (S.Term.LocallyIndexedComponent $ Vector.fromList sTermElements) + Q.saveObject hashId OT.TermComponent bytes --- | Shared implementation for preparing term definition+type or watch expression result for database. +-- | implementation detail of c2{s,w}Term -- The Type is optional, because we don't store them for watch expression results. c2xTerm :: forall m t d. Monad m => (Text -> m t) -> (H.Hash -> m d) -> C.Term Symbol -> Maybe (C.Term.Type Symbol) -> m (LocalIds' t d, S.Term.Term, Maybe (S.Term.Type)) c2xTerm saveText saveDefn tm tp = @@ -652,6 +490,69 @@ c2xTerm saveText saveDefn tm tp = (Vector.fromList (Foldable.toList defnIds)) pure (ids, void tm, void <$> tp) +loadTermWithTypeByReference :: EDB m => C.Reference.Id -> m (C.Term Symbol, C.Term.Type Symbol) +loadTermWithTypeByReference (C.Reference.Id h i) = + hashToObjectId h + >>= liftQ . Q.loadObjectById + -- retrieve and deserialize the blob + >>= decodeTermElementWithType i + >>= uncurry3 s2cTermWithType + +loadTermByReference :: EDB m => C.Reference.Id -> m (C.Term Symbol) +loadTermByReference (C.Reference.Id h i) = + hashToObjectId h + >>= liftQ . Q.loadObjectById + -- retrieve and deserialize the blob + >>= decodeTermElementDiscardingType i + >>= uncurry s2cTerm + +loadTypeOfTermByTermReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) +loadTypeOfTermByTermReference (C.Reference.Id h i) = + hashToObjectId h + >>= liftQ . Q.loadObjectById + -- retrieve and deserialize the blob + >>= decodeTermElementDiscardingTerm i + >>= uncurry s2cTypeOfTerm + +s2cTermWithType :: EDB m => LocalIds -> S.Term.Term -> S.Term.Type -> m (C.Term Symbol, C.Term.Type Symbol) +s2cTermWithType ids tm tp = do + (substText, substHash) <- localIdsToLookups loadTextById loadHashByObjectId ids + pure (x2cTerm substText substHash tm, x2cTType substText substHash tp) + +s2cTerm :: EDB m => LocalIds -> S.Term.Term -> m (C.Term Symbol) +s2cTerm ids tm = do + (substText, substHash) <- localIdsToLookups loadTextById loadHashByObjectId ids + pure $ x2cTerm substText substHash tm + +s2cTypeOfTerm :: EDB m => LocalIds -> S.Term.Type -> m (C.Term.Type Symbol) +s2cTypeOfTerm ids tp = do + (substText, substHash) <- localIdsToLookups loadTextById loadHashByObjectId ids + pure $ x2cTType substText substHash tp + +-- |implementation detail of {s,w}2c*Term* +localIdsToLookups :: Monad m => (t -> m Text) -> (d -> m H.Hash) -> LocalIds' t d -> m (LocalTextId -> Text, LocalDefnId -> H.Hash) +localIdsToLookups loadText loadHash localIds = do + texts <- traverse loadText $ LocalIds.textLookup localIds + hashes <- traverse loadHash $ LocalIds.defnLookup localIds + let substText (LocalTextId w) = texts Vector.! fromIntegral w + substHash (LocalDefnId w) = hashes Vector.! fromIntegral w + pure (substText, substHash) + +-- |implementation detail of {s,w}2c*Term* +x2cTerm :: (LocalTextId -> Text) -> (LocalDefnId -> H.Hash) -> S.Term.Term -> C.Term Symbol +x2cTerm substText substHash = + -- substitute the text and hashes back into the term + C.Term.extraMap substText substTermRef substTypeRef substTermLink substTypeLink id + where + substTermRef = bimap substText (fmap substHash) + substTypeRef = bimap substText substHash + substTermLink = bimap substTermRef substTypeRef + substTypeLink = substTypeRef + +-- |implementation detail of {s,w}2c*Term* +x2cTType :: (LocalTextId -> Text) -> (LocalDefnId -> H.Hash) -> S.Term.Type -> C.Term.Type Symbol +x2cTType substText substHash = C.Type.rmap (bimap substText substHash) + lookupText :: forall m s w t. ( MonadState s m, @@ -695,19 +596,57 @@ lookup_ stateLens writerLens mk t = do pure id Just t' -> pure t' +c2sTerm :: EDB m => C.Term Symbol -> C.Term.Type Symbol -> m (LocalIds, S.Term.Term, S.Term.Type) +c2sTerm tm tp = c2xTerm Q.saveText hashToObjectId tm (Just tp) <&> \(w, tm, Just tp) -> (w, tm, tp) + +-- *** Watch expressions +listWatches :: EDB m => WatchKind -> m [C.Reference.Id] +listWatches k = Q.loadWatchesByWatchKind k >>= traverse s2cReferenceId + +-- | returns Nothing is the expression isn't cached. +loadWatch :: EDB m => WatchKind -> C.Reference.Id -> MaybeT m (C.Term Symbol) +loadWatch k r = + C.Reference.idH Q.saveHashHash r + >>= MaybeT . Q.loadWatch k + >>= getFromBytesOr (ErrWatch k r) (S.getPair S.getLocalIds S.getTerm) + >>= uncurry s2cTerm + +saveWatch :: EDB m => WatchKind -> C.Reference.Id -> C.Term Symbol -> m () +saveWatch w r t = do + rs <- C.Reference.idH Q.saveHashHash r + wterm <- c2wTerm t + let bytes = S.putBytes (S.putPair S.putLocalIds S.putTerm) wterm + Q.saveWatch w rs bytes + c2wTerm :: DB m => C.Term Symbol -> m (WatchLocalIds, S.Term.Term) c2wTerm tm = c2xTerm Q.saveText Q.saveHashHash tm Nothing <&> \(w, tm, _) -> (w, tm) -c2sTerm :: EDB m => C.Term Symbol -> C.Term.Type Symbol -> m (LocalIds, S.Term.Term, S.Term.Type) -c2sTerm tm tp = c2xTerm Q.saveText hashToObjectId tm (Just tp) <&> \(w, tm, Just tp) -> (w, tm, tp) +w2cTerm :: EDB m => WatchLocalIds -> S.Term.Term -> m (C.Term Symbol) +w2cTerm ids tm = do + (substText, substHash) <- localIdsToLookups loadTextById loadHashByHashId ids + pure $ x2cTerm substText substHash tm -c2sDecl :: forall m t d. EDB m => (Text -> m t) -> (H.Hash -> m d) -> C.Decl Symbol -> m (LocalIds' t d, S.Decl.Decl Symbol) -c2sDecl saveText saveDefn (C.Decl.DataDeclaration dt m b cts) = do - done =<< (runWriterT . flip evalStateT mempty) do - cts' <- traverse (ABT.transformM goType) cts - pure (C.Decl.DataDeclaration dt m b cts') - where - goType :: +-- ** Saving & loading type decls + +saveDeclComponent :: EDB m => H.Hash -> [C.Decl Symbol] -> m Db.ObjectId +saveDeclComponent h decls = do + sDeclElements <- traverse (c2sDecl Q.saveText hashToObjectId) decls + hashId <- Q.saveHashHash h + let bytes = + S.putBytes + S.putDeclFormat + ( S.Decl.Decl . S.Decl.LocallyIndexedComponent $ + Vector.fromList sDeclElements + ) + Q.saveObject hashId OT.DeclComponent bytes + +c2sDecl :: forall m t d. EDB m => (Text -> m t) -> (H.Hash -> m d) -> C.Decl Symbol -> m (LocalIds' t d, S.Decl.Decl Symbol) +c2sDecl saveText saveDefn (C.Decl.DataDeclaration dt m b cts) = do + done =<< (runWriterT . flip evalStateT mempty) do + cts' <- traverse (ABT.transformM goType) cts + pure (C.Decl.DataDeclaration dt m b cts') + where + goType :: forall m a. (MonadWriter (Seq Text, Seq H.Hash) m, MonadState (Map Text LocalTextId, Map H.Hash LocalDefnId) m) => C.Type.FD a -> @@ -731,14 +670,6 @@ c2sDecl saveText saveDefn (C.Decl.DataDeclaration dt m b cts) = do (Vector.fromList (Foldable.toList defnIds)) pure (ids, decl) -loadTypeOfTermByTermReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) -loadTypeOfTermByTermReference (C.Reference.Id h i) = - hashToObjectId h - >>= liftQ . Q.loadObjectById - -- retrieve and deserialize the blob - >>= decodeTermElementDiscardingTerm i - >>= uncurry s2cTypeOfTerm - loadDeclByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Decl Symbol) loadDeclByReference (C.Reference.Id h i) = do -- retrieve the blob @@ -756,121 +687,68 @@ loadDeclByReference (C.Reference.Id h i) = do substTypeRef = bimap substText (fmap substHash) pure (C.Decl.DataDeclaration dt m b (C.Type.rmap substTypeRef <$> ct)) -- lens might be nice here -saveTermComponent :: EDB m => H.Hash -> [(C.Term Symbol, C.Term.Type Symbol)] -> m Db.ObjectId -saveTermComponent h terms = do - sTermElements <- traverse (uncurry c2sTerm) terms - hashId <- Q.saveHashHash h - let bytes = S.putBytes S.putTermComponent (S.Term.LocallyIndexedComponent $ Vector.fromList sTermElements) - Q.saveObject hashId OT.TermComponent bytes - -saveDeclComponent :: EDB m => H.Hash -> [C.Decl Symbol] -> m Db.ObjectId -saveDeclComponent h decls = do - sDeclElements <- traverse (c2sDecl Q.saveText hashToObjectId) decls - hashId <- Q.saveHashHash h - let bytes = - S.putBytes - S.putDeclFormat - ( S.Decl.Decl . S.Decl.LocallyIndexedComponent $ - Vector.fromList sDeclElements - ) - Q.saveObject hashId OT.DeclComponent bytes - -listWatches :: EDB m => WatchKind -> m [C.Reference.Id] -listWatches k = Q.loadWatchesByWatchKind k >>= traverse s2cReferenceId - -loadWatch :: EDB m => WatchKind -> C.Reference.Id -> MaybeT m (C.Term Symbol) -loadWatch k r = - C.Reference.idH Q.saveHashHash r - >>= MaybeT . Q.loadWatch k - >>= getFromBytesOr (ErrWatch k r) (S.getPair S.getLocalIds S.getTerm) - >>= uncurry s2cTerm - -saveWatch :: EDB m => WatchKind -> C.Reference.Id -> C.Term Symbol -> m () -saveWatch w r t = do - rs <- C.Reference.idH Q.saveHashHash r - wterm <- c2wTerm t - let bytes = S.putBytes (S.putPair S.putLocalIds S.putTerm) wterm - Q.saveWatch w rs bytes - -termsHavingType :: EDB m => C.Reference -> m (Set C.Referent.Id) -termsHavingType cTypeRef = do - maySet <- runMaybeT $ do - sTypeRef <- c2hReference cTypeRef - sIds <- Q.getReferentsByType sTypeRef - traverse s2cReferentId sIds - pure case maySet of - Nothing -> mempty - Just set -> Set.fromList set - -termsMentioningType :: EDB m => C.Reference -> m (Set C.Referent.Id) -termsMentioningType cTypeRef = do - maySet <- runMaybeT $ do - sTypeRef <- c2hReference cTypeRef - sIds <- Q.getReferentsByTypeMention sTypeRef - traverse s2cReferentId sIds - pure case maySet of - Nothing -> mempty - Just set -> Set.fromList set - --- something kind of funny here. first, we don't need to enumerate all the reference pos if we're just picking one --- second, it would be nice if we could leave these as S.References a little longer --- so that we remember how to blow up if they're missing -componentReferencesByPrefix :: EDB m => OT.ObjectType -> Text -> Maybe C.Reference.Pos -> m [S.Reference.Id] -componentReferencesByPrefix ot b32prefix pos = do - oIds :: [Db.ObjectId] <- Q.objectIdByBase32Prefix ot b32prefix - let test = maybe (const True) (==) pos - let filterComponent l = [x | x@(C.Reference.Id _ pos) <- l, test pos] - fmap Monoid.fromMaybe . runMaybeT $ - join <$> traverse (fmap filterComponent . componentByObjectId) oIds - -termReferencesByPrefix :: EDB m => Text -> Maybe Word64 -> m [C.Reference.Id] -termReferencesByPrefix t w = - componentReferencesByPrefix OT.TermComponent t w - >>= traverse (C.Reference.idH loadHashByObjectId) - -declReferencesByPrefix :: EDB m => Text -> Maybe Word64 -> m [C.Reference.Id] -declReferencesByPrefix t w = - componentReferencesByPrefix OT.DeclComponent t w - >>= traverse (C.Reference.idH loadHashByObjectId) - -termReferentsByPrefix :: EDB m => Text -> Maybe Word64 -> m [C.Referent.Id] -termReferentsByPrefix b32prefix pos = - fmap C.Referent.RefId <$> termReferencesByPrefix b32prefix pos +-- * Branch transformation --- todo: simplify this if we stop caring about constructor type --- todo: remove the cycle length once we drop it from Unison.Reference -declReferentsByPrefix :: - EDB m => - Text -> - Maybe C.Reference.Pos -> - Maybe ConstructorId -> - m [(H.Hash, C.Reference.Pos, Word64, C.DeclType, [C.Decl.ConstructorId])] -declReferentsByPrefix b32prefix pos cid = do - componentReferencesByPrefix OT.DeclComponent b32prefix pos - >>= traverse (loadConstructors cid) +s2cBranch :: EDB m => S.DbBranch -> m (C.Branch m) +s2cBranch (S.Branch.Full.Branch tms tps patches children) = + C.Branch + <$> doTerms tms + <*> doTypes tps + <*> doPatches patches + <*> doChildren children where - loadConstructors :: EDB m => Maybe Word64 -> S.Reference.Id -> m (H.Hash, C.Reference.Pos, Word64, C.DeclType, [ConstructorId]) - loadConstructors cid rid@(C.Reference.Id oId pos) = do - (dt, len, ctorCount) <- getDeclCtorCount rid - h <- loadHashByObjectId oId - let test :: ConstructorId -> Bool - test = maybe (const True) (==) cid - cids = [cid | cid <- [0 :: ConstructorId .. ctorCount - 1], test cid] - pure (h, pos, len, dt, cids) - getDeclCtorCount :: EDB m => S.Reference.Id -> m (C.Decl.DeclType, Word64, ConstructorId) - getDeclCtorCount (C.Reference.Id r i) = do - bs <- liftQ (Q.loadObjectById r) - len <- decodeComponentLengthOnly bs - (_localIds, decl) <- decodeDeclElement i bs - pure (C.Decl.declType decl, len, fromIntegral $ length (C.Decl.constructorTypes decl)) + doTerms :: EDB m => Map Db.TextId (Map S.Referent S.DbMetadataSet) -> m (Map C.NameSegment (Map C.Referent (m C.MdValues))) + doTerms = + Map.bitraverse + (fmap C.NameSegment . loadTextById) + ( Map.bitraverse s2cReferent \case + S.MetadataSet.Inline rs -> + pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs + ) + doTypes :: EDB m => Map Db.TextId (Map S.Reference S.DbMetadataSet) -> m (Map C.NameSegment (Map C.Reference (m C.MdValues))) + doTypes = + Map.bitraverse + (fmap C.NameSegment . loadTextById) + ( Map.bitraverse s2cReference \case + S.MetadataSet.Inline rs -> + pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs + ) + doPatches :: EDB m => Map Db.TextId Db.PatchObjectId -> m (Map C.NameSegment (PatchHash, m C.Patch)) + doPatches = Map.bitraverse (fmap C.NameSegment . loadTextById) \patchId -> do + h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId) patchId + pure (h, loadPatchById patchId) --- (localIds, C.Decl.DataDeclaration dt m b ct) <- --- hashToObjectId h >>= liftQ . Q.loadObjectById >>= decodeDeclElement i + doChildren :: EDB m => Map Db.TextId (Db.BranchObjectId, Db.CausalHashId) -> m (Map C.Branch.NameSegment (C.CausalHead m CausalHash BranchHash (C.Branch m))) + doChildren = Map.bitraverse (fmap C.NameSegment . loadTextById) \(boId, chId) -> + C.CausalHead <$> loadCausalHashById chId + <*> loadValueHashByCausalHashId chId + <*> headParents chId + <*> pure (loadBranchByObjectId boId) + where + headParents :: EDB m => Db.CausalHashId -> m (Map CausalHash (m (C.Causal m CausalHash BranchHash (C.Branch m)))) + headParents chId = do + parentsChIds <- Q.loadCausalParents chId + fmap Map.fromList $ traverse pairParent parentsChIds + pairParent :: EDB m => Db.CausalHashId -> m (CausalHash, m (C.Causal m CausalHash BranchHash (C.Branch m))) + pairParent chId = do + h <- loadCausalHashById chId + pure (h, loadCausal chId) + loadCausal :: EDB m => Db.CausalHashId -> m (C.Causal m CausalHash BranchHash (C.Branch m)) + loadCausal chId = do + C.Causal <$> loadCausalHashById chId + <*> loadValueHashByCausalHashId chId + <*> headParents chId + <*> pure (loadValue chId) + loadValue :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch m)) + loadValue chId = do + boId <- liftQ $ Q.loadBranchObjectIdByCausalHashId chId + traverse loadBranchByObjectId boId -componentByObjectId :: EDB m => Db.ObjectId -> m [S.Reference.Id] -componentByObjectId id = do - len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly - pure [C.Reference.Id id i | i <- [0 .. len - 1]] +saveRootBranch :: EDB m => C.Branch.Root m -> m (Db.BranchObjectId, Db.CausalHashId) +saveRootBranch (C.CausalHead _hc _he _parents _me) = error "todo" + where + _c2sBranch :: EDB m => C.Branch m -> m S.DbBranch + _c2sBranch = error "todo" lookupBranchLocalText :: S.BranchLocalIds -> LocalTextId -> Db.TextId lookupBranchLocalText li (LocalTextId w) = S.BranchFormat.branchTextLookup li Vector.! fromIntegral w @@ -884,16 +762,16 @@ lookupBranchLocalPatch li (LocalPatchObjectId w) = S.BranchFormat.branchPatchLoo lookupBranchLocalChild :: BranchLocalIds -> LocalBranchChildId -> (Db.BranchObjectId, Db.CausalHashId) lookupBranchLocalChild li (LocalBranchChildId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w +loadBranchByCausalHash :: EDB m => H.Hash -> m (Maybe (C.Branch.Root m)) +loadBranchByCausalHash = error "todo" + -- loadBranchByCausalHashId <=< (fmap Db.CausalHashId . liftQ . Q.expectHashIdByHash) + +-- | is this even a thing? loading a branch by causal hash? yes I guess so. loadBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch m)) loadBranchByCausalHashId id = do (liftQ . Q.loadBranchObjectIdByCausalHashId) id >>= traverse loadBranchByObjectId -deserializePatchObject :: EDB m => Db.PatchObjectId -> m S.PatchFormat -deserializePatchObject id = - (liftQ . Q.loadObjectById) (Db.unPatchObjectId id) - >>= getFromBytesOr (ErrPatch id) S.getPatchFormat - loadBranchByObjectId :: EDB m => Db.BranchObjectId -> m (C.Branch m) loadBranchByObjectId id = do deserializeBranchObject id >>= \case @@ -1007,6 +885,163 @@ loadBranchByObjectId id = do let (Set.fromList -> adds, Set.fromList -> removes) = S.BranchDiff.addsRemoves md' in Just . S.MetadataSet.Inline $ (Set.union adds $ Set.difference md removes) + lookupBranchLocalText :: S.BranchLocalIds -> LocalTextId -> Db.TextId + lookupBranchLocalText li (LocalTextId w) = S.BranchFormat.branchTextLookup li Vector.! fromIntegral w + + lookupBranchLocalDefn :: S.BranchLocalIds -> LocalDefnId -> Db.ObjectId + lookupBranchLocalDefn li (LocalDefnId w) = S.BranchFormat.branchDefnLookup li Vector.! fromIntegral w + + lookupBranchLocalPatch :: BranchLocalIds -> LocalPatchObjectId -> Db.PatchObjectId + lookupBranchLocalPatch li (LocalPatchObjectId w) = S.BranchFormat.branchPatchLookup li Vector.! fromIntegral w + + lookupBranchLocalChild :: BranchLocalIds -> LocalBranchChildId -> (Db.BranchObjectId, Db.CausalHashId) + lookupBranchLocalChild li (LocalBranchChildId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w + +-- * Patch transformation +loadPatchById :: EDB m => Db.PatchObjectId -> m C.Patch +loadPatchById patchId = + deserializePatchObject patchId >>= \case + S.PatchFormat.Full li p -> s2cPatch (l2sPatchFull li p) + S.PatchFormat.Diff ref li d -> doDiff ref [l2sPatchDiff li d] + where + doDiff :: EDB m => Db.PatchObjectId -> [S.PatchDiff] -> m C.Patch + doDiff ref ds = + deserializePatchObject ref >>= \case + S.PatchFormat.Full li f -> joinFull (l2sPatchFull li f) ds + S.PatchFormat.Diff ref' li' d' -> doDiff ref' (l2sPatchDiff li' d' : ds) + joinFull :: EDB m => S.Patch -> [S.PatchDiff] -> m C.Patch + joinFull f [] = s2cPatch f + joinFull (S.Patch termEdits typeEdits) (S.PatchDiff addedTermEdits addedTypeEdits removedTermEdits removedTypeEdits : ds) = joinFull f' ds + where + f' = S.Patch (addRemove addedTermEdits removedTermEdits termEdits) (addRemove addedTypeEdits removedTypeEdits typeEdits) + addRemove :: (Ord a, Ord b) => Map a (Set b) -> Map a (Set b) -> Map a (Set b) -> Map a (Set b) + addRemove add del src = + (Map.unionWith (<>) add (Map.differenceWith remove src del)) + remove :: Ord b => Set b -> Set b -> Maybe (Set b) + remove src del = + let diff = Set.difference src del + in if diff == mempty then Nothing else Just diff + + -- implementation detail of loadPatchById? + lookupPatchLocalText :: S.PatchLocalIds -> LocalTextId -> Db.TextId + lookupPatchLocalText li (LocalTextId w) = S.PatchFormat.patchTextLookup li Vector.! fromIntegral w + + lookupPatchLocalHash :: S.PatchLocalIds -> LocalHashId -> Db.HashId + lookupPatchLocalHash li (LocalHashId w) = S.PatchFormat.patchHashLookup li Vector.! fromIntegral w + + lookupPatchLocalDefn :: S.PatchLocalIds -> LocalDefnId -> Db.ObjectId + lookupPatchLocalDefn li (LocalDefnId w) = S.PatchFormat.patchDefnLookup li Vector.! fromIntegral w + + l2sPatchFull :: S.PatchFormat.PatchLocalIds -> S.LocalPatch -> S.Patch + l2sPatchFull li = + S.Patch.Full.trimap + (lookupPatchLocalText li) + (lookupPatchLocalHash li) + (lookupPatchLocalDefn li) + + l2sPatchDiff :: S.PatchFormat.PatchLocalIds -> S.LocalPatchDiff -> S.PatchDiff + l2sPatchDiff li = + S.PatchDiff.trimap + (lookupPatchLocalText li) + (lookupPatchLocalHash li) + (lookupPatchLocalDefn li) + +savePatch :: EDB m => PatchHash -> C.Patch -> m Db.PatchObjectId +savePatch h c = do + (li, lPatch) <- c2lPatch c + hashId <- Q.saveHashHash (unPatchHash h) + let bytes = S.putBytes S.putPatchFormat $ S.PatchFormat.Full li lPatch + Db.PatchObjectId <$> Q.saveObject hashId OT.Patch bytes + +s2cPatch :: EDB m => S.Patch -> m C.Patch +s2cPatch (S.Patch termEdits typeEdits) = + C.Patch + <$> Map.bitraverse h2cReferent (Set.traverse s2cTermEdit) termEdits + <*> Map.bitraverse h2cReference (Set.traverse s2cTypeEdit) typeEdits + +deserializePatchObject :: EDB m => Db.PatchObjectId -> m S.PatchFormat +deserializePatchObject id = + (liftQ . Q.loadObjectById) (Db.unPatchObjectId id) + >>= getFromBytesOr (ErrPatch id) S.getPatchFormat + + +-- * Searches + +termsHavingType :: EDB m => C.Reference -> m (Set C.Referent.Id) +termsHavingType cTypeRef = do + maySet <- runMaybeT $ do + sTypeRef <- c2hReference cTypeRef + sIds <- Q.getReferentsByType sTypeRef + traverse s2cReferentId sIds + pure case maySet of + Nothing -> mempty + Just set -> Set.fromList set + +termsMentioningType :: EDB m => C.Reference -> m (Set C.Referent.Id) +termsMentioningType cTypeRef = do + maySet <- runMaybeT $ do + sTypeRef <- c2hReference cTypeRef + sIds <- Q.getReferentsByTypeMention sTypeRef + traverse s2cReferentId sIds + pure case maySet of + Nothing -> mempty + Just set -> Set.fromList set + +-- something kind of funny here. first, we don't need to enumerate all the reference pos if we're just picking one +-- second, it would be nice if we could leave these as S.References a little longer +-- so that we remember how to blow up if they're missing +componentReferencesByPrefix :: EDB m => OT.ObjectType -> Text -> Maybe C.Reference.Pos -> m [S.Reference.Id] +componentReferencesByPrefix ot b32prefix pos = do + oIds :: [Db.ObjectId] <- Q.objectIdByBase32Prefix ot b32prefix + let test = maybe (const True) (==) pos + let filterComponent l = [x | x@(C.Reference.Id _ pos) <- l, test pos] + fmap Monoid.fromMaybe . runMaybeT $ + join <$> traverse (fmap filterComponent . componentByObjectId) oIds + +termReferencesByPrefix :: EDB m => Text -> Maybe Word64 -> m [C.Reference.Id] +termReferencesByPrefix t w = + componentReferencesByPrefix OT.TermComponent t w + >>= traverse (C.Reference.idH loadHashByObjectId) + +declReferencesByPrefix :: EDB m => Text -> Maybe Word64 -> m [C.Reference.Id] +declReferencesByPrefix t w = + componentReferencesByPrefix OT.DeclComponent t w + >>= traverse (C.Reference.idH loadHashByObjectId) + +termReferentsByPrefix :: EDB m => Text -> Maybe Word64 -> m [C.Referent.Id] +termReferentsByPrefix b32prefix pos = + fmap C.Referent.RefId <$> termReferencesByPrefix b32prefix pos + +-- todo: simplify this if we stop caring about constructor type +-- todo: remove the cycle length once we drop it from Unison.Reference +declReferentsByPrefix :: + EDB m => + Text -> + Maybe C.Reference.Pos -> + Maybe ConstructorId -> + m [(H.Hash, C.Reference.Pos, Word64, C.DeclType, [C.Decl.ConstructorId])] +declReferentsByPrefix b32prefix pos cid = do + componentReferencesByPrefix OT.DeclComponent b32prefix pos + >>= traverse (loadConstructors cid) + where + loadConstructors :: EDB m => Maybe Word64 -> S.Reference.Id -> m (H.Hash, C.Reference.Pos, Word64, C.DeclType, [ConstructorId]) + loadConstructors cid rid@(C.Reference.Id oId pos) = do + (dt, len, ctorCount) <- getDeclCtorCount rid + h <- loadHashByObjectId oId + let test :: ConstructorId -> Bool + test = maybe (const True) (==) cid + cids = [cid | cid <- [0 :: ConstructorId .. ctorCount - 1], test cid] + pure (h, pos, len, dt, cids) + getDeclCtorCount :: EDB m => S.Reference.Id -> m (C.Decl.DeclType, Word64, ConstructorId) + getDeclCtorCount (C.Reference.Id r i) = do + bs <- liftQ (Q.loadObjectById r) + len <- decodeComponentLengthOnly bs + (_localIds, decl) <- decodeDeclElement i bs + pure (C.Decl.declType decl, len, fromIntegral $ length (C.Decl.constructorTypes decl)) + +-- (localIds, C.Decl.DataDeclaration dt m b ct) <- +-- hashToObjectId h >>= liftQ . Q.loadObjectById >>= decodeDeclElement i + branchHashesByPrefix :: EDB m => ShortBranchHash -> m (Set BranchHash) branchHashesByPrefix (ShortBranchHash b32prefix) = do hashIds <- Q.namespaceHashIdByBase32Prefix b32prefix @@ -1020,7 +1055,7 @@ causalHashesByPrefix (ShortBranchHash b32prefix) = do pure $ Set.fromList . fmap CausalHash . fmap H.fromBase32Hex $ b32s -- | returns a list of known definitions referencing `r` -dependents :: EDB m => C.Reference -> MaybeT m (Set C.Reference.Id) +dependents :: EDB m => C.Reference -> m (Set C.Reference.Id) dependents r = do r' <- c2sReference r sIds :: [S.Reference.Id] <- Q.getDependentsForDependency r' diff --git a/codebase2/codebase/U/Codebase/Branch.hs b/codebase2/codebase/U/Codebase/Branch.hs index 524f3a0884..a421030037 100644 --- a/codebase2/codebase/U/Codebase/Branch.hs +++ b/codebase2/codebase/U/Codebase/Branch.hs @@ -16,6 +16,7 @@ newtype MdValues = MdValues (Set Reference) deriving (Eq, Ord, Show) type Root m = CausalHead m CausalHash BranchHash (Branch m) +-- | V2.Branch is like V1.Branch0; I would rename it, at least temporarily, but too hard. data Branch m = Branch { terms :: Map NameSegment (Map Referent (m MdValues)), types :: Map NameSegment (Map Reference (m MdValues)), diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 5380f72640..5edcec1f94 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -252,8 +252,12 @@ sqliteCodebase root = do rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) rootBranchUpdates = error "todo" + -- if this blows up on cromulent hashes, then switch from `hashToHashId` + -- to one that returns Maybe. getBranchForHash :: Branch.Hash -> IO (Maybe (Branch IO)) - getBranchForHash = error "todo" + getBranchForHash _h = error "todo" + -- runDB conn $ + -- Cv.causalbranch2to1 $ Ops.loadBranchByCausalHash (Cv.hash1to2 h) dependentsImpl :: Reference -> IO (Set Reference.Id) dependentsImpl r = @@ -358,7 +362,9 @@ sqliteCodebase root = do branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) branchHashesByPrefix sh = runDB conn do - -- bs <- Ops.branchHashesByPrefix sh + ---- bs <- Ops.branchHashesByPrefix sh + -- given that a Branch is shallow, it's really `CausalHash` that you'd + -- refer to to specify a full namespace. cs <- Ops.causalHashesByPrefix (Cv.sbh1to2 sh) pure $ Set.map (Causal.RawHash . Cv.hash2to1 . unCausalHash) cs diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 0ef2c128fa..767c5f95e1 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -1,9 +1,17 @@ +{-# LANGUAGE ViewPatterns #-} + module Unison.Codebase.SqliteCodebase.Conversions where +import Data.Bifunctor (Bifunctor (bimap)) import qualified Data.ByteString.Short as SBS import Data.Either (fromRight) +import qualified Data.Map as Map import Data.Text (Text, pack) +import qualified U.Codebase.Branch as V2 +import qualified U.Codebase.Branch as V2.Branch +import qualified U.Codebase.Causal as V2 import qualified U.Codebase.Decl as V2.Decl +import qualified U.Codebase.HashTags as V2 import qualified U.Codebase.Kind as V2.Kind import qualified U.Codebase.Reference as V2 import qualified U.Codebase.Reference as V2.Reference @@ -19,6 +27,8 @@ import qualified U.Core.ABT as V2.ABT import qualified U.Util.Hash as V2 import qualified U.Util.Hash as V2.Hash import qualified Unison.ABT as V1.ABT +import qualified Unison.Codebase.Branch as V1.Branch +import qualified Unison.Codebase.Causal as V1.Causal import qualified Unison.Codebase.ShortBranchHash as V1 import qualified Unison.ConstructorType as CT import qualified Unison.DataDeclaration as V1.Decl @@ -62,7 +72,6 @@ watchKind2to1 = \case V2.WatchKind.RegularWatch -> V1.Var.RegularWatch V2.WatchKind.TestWatch -> V1.Var.TestWatch - term1to2 :: Hash -> V1.Term.Term V1.Symbol Ann -> V2.Term.Term V2.Symbol term1to2 h = V2.ABT.transform termF1to2 @@ -364,3 +373,30 @@ type1to2' convertRef = convertKind = \case V1.Kind.Star -> V2.Kind.Star V1.Kind.Arrow i o -> V2.Kind.Arrow (convertKind i) (convertKind o) + + +-- type Root m = CausalHead m CausalHash BranchHash (Branch m) +-- newtype Branch m = Branch { _history :: Causal m Raw (Branch0 m) } +causalbranch2to1 :: forall m. Applicative m => (V2.CausalHash -> m (V2.Branch.Root m)) -> V2.Branch.Root m -> m (V1.Branch.Branch m) +causalbranch2to1 _loadParent (V2.CausalHead hc _he (Map.toList -> parents) me) = do + let currentHash = causalHash2to1 hc + causalHash2to1 :: V2.CausalHash -> V1.Causal.RawHash V1.Branch.Raw + causalHash2to1 = V1.Causal.RawHash . hash2to1 . V2.unCausalHash + loadParent1 :: + V2.Causal m V2.CausalHash V2.BranchHash (V2.Branch m) -> + V1.Causal.Causal m V1.Branch.Raw (V1.Branch.Branch0 m) + loadParent1 = error "todo" + V1.Branch.Branch <$> case parents of + [] -> V1.Causal.One currentHash <$> fmap branch2to1 me + [(hp, mp)] -> do + let parentHash = causalHash2to1 hp + V1.Causal.Cons currentHash + <$> fmap branch2to1 me + <*> pure (parentHash, loadParent1 <$> mp) + merge -> do + let tailsList = map (bimap causalHash2to1 (fmap loadParent1)) merge + e <- me + pure $ V1.Causal.Merge currentHash (branch2to1 e) (Map.fromList tailsList) + +branch2to1 :: V2.Branch m -> V1.Branch.Branch0 m +branch2to1 = error "todo" diff --git a/questions.md b/questions.md index 18459eae03..e5c6261cd9 100644 --- a/questions.md +++ b/questions.md @@ -1,3 +1,161 @@ +Task list: +- [x] error reporting for deserialization failures +- [x] error reporting for codebase consistency issues + +| thing | v1↔v2 | v2↔v2s | desc. | +|-----|-----|-----|---| +|`Reference` | ✔ ✔ | . . |builtin or user-defined term or type| +|Recursive `Reference` | ✔ ✔ | || +|`Reference.Id` | ✔ ✔ | . . |user-defined term or type| +|Recursive `Reference.Id` | ✔ ✔ | || +|`ReferenceH` | | . . | weak `Reference`| +|`Reference.IdH` | | . . | weak `Reference.Id` | +|`Referent` | | . . |builtin or user-defined function or constructor| +|Recursive `Referent` | ✔ ✔ | || +|`Referent.Id` | ✔← | . . | user-defined term or type | +|`ReferentH` | | . . | weak `Referent` | +|`Referent.IdH` | | . . | weak `Referent.Id` | +|`Hash` | ✔ ✔ | n/a | | +|`ShortBranchHash` | →✔ | | the base32 prefix of a branch/causal hash | +|`ConstructorType` | ✔ ✔ | | data vs ability | +|`WatchKind` | ✔ ✔ | | test vs regular | +|`Term.Term` | ✔ ✔ | | | +|`Decl.Decl` | ✔ ✔ | | | +|`Type` | ✔ ✔ | | | +|recursive `Type` | ✔ ✔ | | | +|`Symbol` | ✔ ✔ | | | +|`ShortHash` | | | | +|`ShortHash.cycle` | →✔ | n/a | read pos and discard length | +|`ShortHash.cid` | | n/a | haven't gotten to referents yet | +|`ABT.Term` | ✔ ✔ | n/a | | +|`Causal`/`Branch` | | | | +|`Branch0` | | | | + +## `Operations.hs` + +### Exists Check + +| Exists Check | name | +| ---------------- | --------------------- | +| `HashId` by `Hash` | `objectExistsForHash` | + +### Id Lookups +| ID lookups | Create if not found | Fail if not found | +| ---------------------------- | ------------------- | ----------------------------- | +| `Text -> TextId` | | `lookupTextId` | +| `TextId -> Text` | | `loadTextById` | +| `Hash -> HashId` | | `hashToHashId` | +| `Hash -> ObjectId` | | `hashToObjectId` | +| `ObjectId -> Hash` | | `loadHashByObjectId` | +| `HashId -> Hash` | | `loadHashByHashId` | +| `CausalHashId -> CausalHash` | | `loadCausalHashById` | +| `CausalHashId -> BranchHash` | | `loadValueHashByCausalHashId` | + +### V2 ↔ Sqlite tranversals + +| V2 ↔ Sqlite conversion traversals | name | notes | +| ------------------------------------------------------------ | ----------------------------------- | ----------------- | +| `Reference' Text Hash` ↔ `Reference' TextId ObjectId` | `c2sReference`, `s2cReference` | normal references | +| `Reference.Id' Hash` ↔ `Reference.Id' ObjectId` | `c2sReferenceId`, `s2cReferenceId` | normal user references | +| `Reference' Text Hash` ↔ `Reference' TextId HashId` | `c2hReference`, `h2cReference` | weak references | +| `Referent'' TextId ObjectId` → `Referent'' Text Hash` | `s2cReferent` | normal referent | +| `Referent'' TextId HashId` → `Referent'' Text Hash` | `h2cReferent` | weak referent | +| `Referent.Id' ObjectId` → `Referent.Id' Hash` | `s2cReferentId` | normal user referent | +| `TermEdit` | `s2cTermEdit` | | +| `TermEdit.Typing` | `c2sTyping`, `s2cTyping` | | +| `TypeEdit` | `s2cTypeEdit` | | +| `Branch0` | `s2cBranch` | | +| Branch diff | | todo | +| `Patch` | `s2cPatch`, `c2lPatch` | `l` = local ids | +| `Patch` diff | `diffPatch` | | +| User `Term` | `c2sTerm`, `s2cTermWithType`, `s2cTerm`, `s2cTypeOfTerm` | | +| watch expressions | `c2wTerm`, `w2cTerm` | | +| User `Decl` | `c2sDecl`, | | + +### Saving & loading? + +| Saving & loading objects | name | notes | +| ------------------------------------- | ---------------------------- | ----------------------------------------------------------- | +| `Patch` ↔ `PatchObjectId` | `savePatch`, `loadPatchById` | | +| `H.Hash -> m (Maybe (Branch.Root m))` | `loadBranchByCausalHash` | wip | +| `CausalHashId -> m (Maybe Branch)` | `loadBranchByCausalHashId` | equivalent to old `Branch0`, *not sure if actually useful?* | +| `BranchObjectId -> m (Branch m)` | `loadBranchByObjectId` | equivalent to old `Branch0` | + +### Deserialization helpers + +| +| +| + +| Deserialization helpers | | notes | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ----- | +| `decodeComponentLengthOnly` | `ByteString -> m Word64` | | +| `decodeTermElementWithType` | `C.Reference.Pos -> ByteString -> m (LocalIds, S.Term.Term, S.Term.Type)` | | +| `decodeTermElementDiscardingTerm`, `decodeTermElementDiscardingType` | `-> m (LocalIds, S.Term.Term)`, `-> m (LocalIds, S.Term.Type)` | | +| `decodeDeclElement` | `Word64 -> ByteString -> m (LocalIds, S.Decl.Decl Symbol)` | | +| `deserializePatchObject` | `PatchObjectId -> PatchFormat` | | + +### Reconstruct V1 data + +| Reconstruct V1 data | | +| ------------------------ | ------------------------------ | +| `getCycleLen` | `Hash -> m Word64` | +| `getDeclTypeByReference` | `Reference.Id -> DeclType` | +| `componentByObjectId` | `ObjectId -> m [Reference.Id]` | + +### Codebase-y operations + +|Codebase-y operations| type | notes | +| ----------------------------- | ----------------------------- | ----- | +| `loadTermWithTypeByReference` | `Reference.Id -> MaybeT m (Term, Type)` | | +| `loadTermByReference` | `Reference.Id -> MaybeT m Term` || +| `loadTypeOfTermByTermReference` | `Reference.Id -> Maybe T m Type` || +| `saveTermComponent` | `Hash -> [(Term, Type)] -> ObjectId` || +| `loadDeclByReference` | `Reference.Id -> MaybeT m Decl` || +| `saveDeclComponent` | `Hash -> [Decl] -> m ObjectId` || +| `loadWatch` | `WatchKind -> Reference.Id -> MaybeT m Term` || +| `saveWatch` | `WatchKind -> Reference.Id -> Term -> m ()` || +| `termsHavingType` | `Reference -> m (Set Reference.Id)` || +| `termsMentioningType` | " || +| `termReferencesByPrefix` | `Text -> Maybe Word64 -> m [Reference.Id]` || +| `declReferencesByPrefix` | `Text -> Maybe Word64 -> m [Reference.Id]` || +| `saveRootBranch` | `Branch.Root m -> m (Db.BranchObjectId, Db.CausalHashId)` |wip| + +### SqliteCodebase + +| operation | status | notes | +| ----------------------- | ------ | ----- | +| getTerm | ✔ | | +| getTypeOfTerm | ✔ | | +| getTypeDeclaration | ✔ | | +| putTerm | ✔ | | +| putTypeDeclaration | ✔ | | +| getRootBranch | todo | | +| putRootBranch | todo | | +| rootBranchUpdates | todo | | +| getBranchForHash | todo | | +| dependentsImpl | ✔ | | +| syncFromDirectory | todo | | +| syncToDirectory | todo | | +| watches | ✔ | | +| getWatch | ✔ | | +| putWatch | ✔ | | +| getReflog | ✔ | | +| appendRefLog | ✔ | | +| termsOfTypeImpl | todo | | +| termsMentioningTypeImpl | todo | | +| hashLength | ✔ | | +| termReferencesByPrefix | ✔ | | +| declReferencesByPrefix | ✔ | | +| referentsByPrefix | ✔ | | +| branchHashLength | ✔ | | +| branchHashesByPrefix | ✔ | | + + + + + + Question: should the dependents / dependency index be a Relation Reference Reference (like now) a Relation Referent Reference? Example, if you have `type Foo = Blah | Blah2 Nat`, From c2c2077237a6d4e8a9e99e55fe387722ab69bf2e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 7 Dec 2020 12:11:47 -0500 Subject: [PATCH 056/225] wip --- .../U/Codebase/Sqlite/Operations.hs | 119 +++++++++++------- .../U/Codebase/Sqlite/Queries.hs | 10 +- codebase2/codebase/U/Codebase/Branch.hs | 7 +- .../src/Unison/Codebase/SqliteCodebase.hs | 22 ++-- questions.md | 109 ++++++++-------- 5 files changed, 161 insertions(+), 106 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 5e1180d6b4..59db7b7e77 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -35,10 +35,10 @@ import qualified Data.Sequence as Seq import Data.Set (Set) import qualified Data.Set as Set import Data.Text (Text) +import Data.Traversable (for) import Data.Tuple.Extra (uncurry3) import qualified Data.Vector as Vector import Data.Word (Word64) -import qualified U.Codebase.Branch as C import qualified U.Codebase.Branch as C.Branch import qualified U.Codebase.Causal as C import U.Codebase.Decl (ConstructorId) @@ -59,6 +59,7 @@ import qualified U.Codebase.Sqlite.Branch.Format as S.BranchFormat import qualified U.Codebase.Sqlite.Branch.Full as S import qualified U.Codebase.Sqlite.Branch.Full as S.Branch.Full import qualified U.Codebase.Sqlite.Branch.Full as S.MetadataSet +import U.Codebase.Sqlite.DbId (BranchHashId (unBranchHashId)) import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Decl.Format as S.Decl import U.Codebase.Sqlite.LocalIds @@ -111,6 +112,7 @@ import qualified U.Util.Serialization as S import qualified U.Util.Set as Set -- * Error handling + type Err m = MonadError Error m type EDB m = (Err m, DB m) @@ -133,6 +135,7 @@ data Error | DatabaseIntegrityError Q.Integrity | UnknownDependency H.Hash | UnknownText Text + | ExpectedBranch CausalHash BranchHash | LegacyUnknownCycleLen H.Hash | LegacyUnknownConstructorType H.Hash C.Reference.Pos deriving (Show) @@ -161,9 +164,8 @@ loadTextById = liftQ . Q.loadTextById -- | Q: Any Hash that UCM gets ahold of should already exist in the DB? -- because it came from a sync or from a save -hashToHashId :: EDB m => H.Hash -> m Db.HashId -hashToHashId = liftQ . Q.expectHashIdByHash - +-- hashToHashId :: EDB m => H.Hash -> m Db.HashId +-- hashToHashId = liftQ . Q.expectHashIdByHash hashToObjectId :: EDB m => H.Hash -> m Db.ObjectId hashToObjectId h = do (Q.loadHashId . H.toBase32Hex) h >>= \case @@ -171,9 +173,10 @@ hashToObjectId h = do Nothing -> throwError $ UnknownDependency h objectExistsForHash :: DB m => H.Hash -> m Bool -objectExistsForHash h = isJust <$> runMaybeT do - id <- MaybeT . Q.loadHashId . H.toBase32Hex $ h - Q.maybeObjectIdForAnyHashId id +objectExistsForHash h = + isJust <$> runMaybeT do + id <- MaybeT . Q.loadHashId . H.toBase32Hex $ h + Q.maybeObjectIdForAnyHashId id loadHashByObjectId :: EDB m => Db.ObjectId -> m H.Hash loadHashByObjectId = fmap H.fromBase32Hex . liftQ . Q.loadPrimaryHashByObjectId @@ -246,8 +249,8 @@ s2cTypeEdit = \case S.TypeEdit.Deprecate -> pure C.TypeEdit.Deprecate -- | assumes that all relevant values are already in the DB -c2lPatch :: EDB m => C.Patch -> m (S.PatchLocalIds, S.LocalPatch) -c2lPatch (C.Patch termEdits typeEdits) = +c2lPatch :: EDB m => C.Branch.Patch -> m (S.PatchLocalIds, S.LocalPatch) +c2lPatch (C.Branch.Patch termEdits typeEdits) = done =<< (runWriterT . flip evalStateT startState) do S.Patch <$> Map.bitraverse saveReferentH (Set.traverse saveTermEdit) termEdits @@ -374,6 +377,7 @@ componentByObjectId id = do -- * Codebase operations -- ** Saving & loading terms + saveTermComponent :: EDB m => H.Hash -> [(C.Term Symbol, C.Term.Type Symbol)] -> m Db.ObjectId saveTermComponent h terms = do sTermElements <- traverse (uncurry c2sTerm) terms @@ -529,7 +533,7 @@ s2cTypeOfTerm ids tp = do (substText, substHash) <- localIdsToLookups loadTextById loadHashByObjectId ids pure $ x2cTType substText substHash tp --- |implementation detail of {s,w}2c*Term* +-- | implementation detail of {s,w}2c*Term* localIdsToLookups :: Monad m => (t -> m Text) -> (d -> m H.Hash) -> LocalIds' t d -> m (LocalTextId -> Text, LocalDefnId -> H.Hash) localIdsToLookups loadText loadHash localIds = do texts <- traverse loadText $ LocalIds.textLookup localIds @@ -538,7 +542,7 @@ localIdsToLookups loadText loadHash localIds = do substHash (LocalDefnId w) = hashes Vector.! fromIntegral w pure (substText, substHash) --- |implementation detail of {s,w}2c*Term* +-- | implementation detail of {s,w}2c*Term* x2cTerm :: (LocalTextId -> Text) -> (LocalDefnId -> H.Hash) -> S.Term.Term -> C.Term Symbol x2cTerm substText substHash = -- substitute the text and hashes back into the term @@ -549,7 +553,7 @@ x2cTerm substText substHash = substTermLink = bimap substTermRef substTypeRef substTypeLink = substTypeRef --- |implementation detail of {s,w}2c*Term* +-- | implementation detail of {s,w}2c*Term* x2cTType :: (LocalTextId -> Text) -> (LocalDefnId -> H.Hash) -> S.Term.Type -> C.Term.Type Symbol x2cTType substText substHash = C.Type.rmap (bimap substText substHash) @@ -600,6 +604,7 @@ c2sTerm :: EDB m => C.Term Symbol -> C.Term.Type Symbol -> m (LocalIds, S.Term.T c2sTerm tm tp = c2xTerm Q.saveText hashToObjectId tm (Just tp) <&> \(w, tm, Just tp) -> (w, tm, tp) -- *** Watch expressions + listWatches :: EDB m => WatchKind -> m [C.Reference.Id] listWatches k = Q.loadWatchesByWatchKind k >>= traverse s2cReferenceId @@ -689,65 +694,65 @@ loadDeclByReference (C.Reference.Id h i) = do -- * Branch transformation -s2cBranch :: EDB m => S.DbBranch -> m (C.Branch m) +s2cBranch :: EDB m => S.DbBranch -> m (C.Branch.Branch m) s2cBranch (S.Branch.Full.Branch tms tps patches children) = - C.Branch + C.Branch.Branch <$> doTerms tms <*> doTypes tps <*> doPatches patches <*> doChildren children where - doTerms :: EDB m => Map Db.TextId (Map S.Referent S.DbMetadataSet) -> m (Map C.NameSegment (Map C.Referent (m C.MdValues))) + doTerms :: EDB m => Map Db.TextId (Map S.Referent S.DbMetadataSet) -> m (Map C.Branch.NameSegment (Map C.Referent (m C.Branch.MdValues))) doTerms = Map.bitraverse - (fmap C.NameSegment . loadTextById) + (fmap C.Branch.NameSegment . loadTextById) ( Map.bitraverse s2cReferent \case S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs ) - doTypes :: EDB m => Map Db.TextId (Map S.Reference S.DbMetadataSet) -> m (Map C.NameSegment (Map C.Reference (m C.MdValues))) + doTypes :: EDB m => Map Db.TextId (Map S.Reference S.DbMetadataSet) -> m (Map C.Branch.NameSegment (Map C.Reference (m C.Branch.MdValues))) doTypes = Map.bitraverse - (fmap C.NameSegment . loadTextById) + (fmap C.Branch.NameSegment . loadTextById) ( Map.bitraverse s2cReference \case S.MetadataSet.Inline rs -> pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs ) - doPatches :: EDB m => Map Db.TextId Db.PatchObjectId -> m (Map C.NameSegment (PatchHash, m C.Patch)) - doPatches = Map.bitraverse (fmap C.NameSegment . loadTextById) \patchId -> do + doPatches :: EDB m => Map Db.TextId Db.PatchObjectId -> m (Map C.Branch.NameSegment (PatchHash, m C.Branch.Patch)) + doPatches = Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) \patchId -> do h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId) patchId pure (h, loadPatchById patchId) - doChildren :: EDB m => Map Db.TextId (Db.BranchObjectId, Db.CausalHashId) -> m (Map C.Branch.NameSegment (C.CausalHead m CausalHash BranchHash (C.Branch m))) - doChildren = Map.bitraverse (fmap C.NameSegment . loadTextById) \(boId, chId) -> + doChildren :: EDB m => Map Db.TextId (Db.BranchObjectId, Db.CausalHashId) -> m (Map C.Branch.NameSegment (C.CausalHead m CausalHash BranchHash (C.Branch.Branch m))) + doChildren = Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) \(boId, chId) -> C.CausalHead <$> loadCausalHashById chId <*> loadValueHashByCausalHashId chId <*> headParents chId <*> pure (loadBranchByObjectId boId) where - headParents :: EDB m => Db.CausalHashId -> m (Map CausalHash (m (C.Causal m CausalHash BranchHash (C.Branch m)))) + headParents :: EDB m => Db.CausalHashId -> m (Map CausalHash (m (C.Causal m CausalHash BranchHash (C.Branch.Branch m)))) headParents chId = do parentsChIds <- Q.loadCausalParents chId fmap Map.fromList $ traverse pairParent parentsChIds - pairParent :: EDB m => Db.CausalHashId -> m (CausalHash, m (C.Causal m CausalHash BranchHash (C.Branch m))) + pairParent :: EDB m => Db.CausalHashId -> m (CausalHash, m (C.Causal m CausalHash BranchHash (C.Branch.Branch m))) pairParent chId = do h <- loadCausalHashById chId pure (h, loadCausal chId) - loadCausal :: EDB m => Db.CausalHashId -> m (C.Causal m CausalHash BranchHash (C.Branch m)) + loadCausal :: EDB m => Db.CausalHashId -> m (C.Causal m CausalHash BranchHash (C.Branch.Branch m)) loadCausal chId = do C.Causal <$> loadCausalHashById chId <*> loadValueHashByCausalHashId chId <*> headParents chId <*> pure (loadValue chId) - loadValue :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch m)) + loadValue :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch.Branch m)) loadValue chId = do boId <- liftQ $ Q.loadBranchObjectIdByCausalHashId chId traverse loadBranchByObjectId boId -saveRootBranch :: EDB m => C.Branch.Root m -> m (Db.BranchObjectId, Db.CausalHashId) +saveRootBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) saveRootBranch (C.CausalHead _hc _he _parents _me) = error "todo" where - _c2sBranch :: EDB m => C.Branch m -> m S.DbBranch + _c2sBranch :: EDB m => C.Branch.Branch m -> m S.DbBranch _c2sBranch = error "todo" lookupBranchLocalText :: S.BranchLocalIds -> LocalTextId -> Db.TextId @@ -762,17 +767,47 @@ lookupBranchLocalPatch li (LocalPatchObjectId w) = S.BranchFormat.branchPatchLoo lookupBranchLocalChild :: BranchLocalIds -> LocalBranchChildId -> (Db.BranchObjectId, Db.CausalHashId) lookupBranchLocalChild li (LocalBranchChildId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w -loadBranchByCausalHash :: EDB m => H.Hash -> m (Maybe (C.Branch.Root m)) -loadBranchByCausalHash = error "todo" - -- loadBranchByCausalHashId <=< (fmap Db.CausalHashId . liftQ . Q.expectHashIdByHash) +-- fix me... +-- [ ] load a causal, allowing a missing value (C.Branch.Spine) +-- [x] load a causal and require its value (C.Branch.Causal) +-- [ ] load a causal, returning nothing if causal is unknown + +loadCausalSpineByCausalHashId :: EDB m => Db.CausalHashId -> m (C.Branch.Spine m) +loadCausalSpineByCausalHashId = error "todo" + +loadCausalBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (C.Branch.Causal m) +loadCausalBranchByCausalHashId id = + loadCausalSpineByCausalHashId id <&> \case + C.Causal hc he parents mme -> + C.CausalHead hc he parents $ mme >>= (Q.orError $ ExpectedBranch hc he) + +-- loadCausalBranchHeadByCausalHash :: EDB m => CausalHash -> m (Maybe (C.Branch.Causal m)) +-- loadCausalBranchHeadByCausalHash hc = do +-- hId <- (fmap Db.CausalHashId . liftQ . Q.expectHashIdByHash . unCausalHash) hc +-- loadCausalBranchByCausalHashId hId + +-- loadCausalBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch.Spine m)) +-- loadCausalBranchByCausalHashId id = runMaybeT do +-- namespace <- MaybeT loadBranchByCausalHashId id +-- parentHashIds <- Q.loadCausalParents id +-- loadParents <- for parentHashIds \hId -> do +-- h <- (fmap (CausalHash . H.fromBase32Hex) . liftQ . Q.loadHashById . Db.unCausalHashId) hId +-- pure (h, loadCausalBranchByCausalHashId hId >>= Q.orError ) +-- error "todo" + +-- hb <- +-- (fmap unBranchHashId . liftQ . Q.loadCausalValueHashId) id +-- >>= fmap (BranchHash . H.fromBase32Hex) . liftQ . Q.loadHashById + +-- C.CausalHead hc hb parentsMap (pure namespace) -- | is this even a thing? loading a branch by causal hash? yes I guess so. -loadBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch m)) +loadBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch.Branch m)) loadBranchByCausalHashId id = do (liftQ . Q.loadBranchObjectIdByCausalHashId) id >>= traverse loadBranchByObjectId -loadBranchByObjectId :: EDB m => Db.BranchObjectId -> m (C.Branch m) +loadBranchByObjectId :: EDB m => Db.BranchObjectId -> m (C.Branch.Branch m) loadBranchByObjectId id = do deserializeBranchObject id >>= \case S.BranchFormat.Full li f -> s2cBranch (l2sFull li f) @@ -790,13 +825,13 @@ loadBranchByObjectId id = do l2sDiff :: S.BranchFormat.BranchLocalIds -> S.Branch.LocalDiff -> S.Branch.Diff l2sDiff li = S.BranchDiff.quadmap (lookupBranchLocalText li) (lookupBranchLocalDefn li) (lookupBranchLocalPatch li) (lookupBranchLocalChild li) - doDiff :: EDB m => Db.BranchObjectId -> [S.Branch.Diff] -> m (C.Branch m) + doDiff :: EDB m => Db.BranchObjectId -> [S.Branch.Diff] -> m (C.Branch.Branch m) doDiff ref ds = deserializeBranchObject ref >>= \case S.BranchFormat.Full li f -> joinFull (l2sFull li f) ds S.BranchFormat.Diff ref' li' d' -> doDiff ref' (l2sDiff li' d' : ds) where - joinFull :: EDB m => S.DbBranch -> [S.Branch.Diff] -> m (C.Branch m) + joinFull :: EDB m => S.DbBranch -> [S.Branch.Diff] -> m (C.Branch.Branch m) joinFull f [] = s2cBranch f joinFull (S.Branch.Full.Branch tms tps patches children) @@ -898,18 +933,19 @@ loadBranchByObjectId id = do lookupBranchLocalChild li (LocalBranchChildId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w -- * Patch transformation -loadPatchById :: EDB m => Db.PatchObjectId -> m C.Patch + +loadPatchById :: EDB m => Db.PatchObjectId -> m C.Branch.Patch loadPatchById patchId = deserializePatchObject patchId >>= \case S.PatchFormat.Full li p -> s2cPatch (l2sPatchFull li p) S.PatchFormat.Diff ref li d -> doDiff ref [l2sPatchDiff li d] where - doDiff :: EDB m => Db.PatchObjectId -> [S.PatchDiff] -> m C.Patch + doDiff :: EDB m => Db.PatchObjectId -> [S.PatchDiff] -> m C.Branch.Patch doDiff ref ds = deserializePatchObject ref >>= \case S.PatchFormat.Full li f -> joinFull (l2sPatchFull li f) ds S.PatchFormat.Diff ref' li' d' -> doDiff ref' (l2sPatchDiff li' d' : ds) - joinFull :: EDB m => S.Patch -> [S.PatchDiff] -> m C.Patch + joinFull :: EDB m => S.Patch -> [S.PatchDiff] -> m C.Branch.Patch joinFull f [] = s2cPatch f joinFull (S.Patch termEdits typeEdits) (S.PatchDiff addedTermEdits addedTypeEdits removedTermEdits removedTypeEdits : ds) = joinFull f' ds where @@ -946,16 +982,16 @@ loadPatchById patchId = (lookupPatchLocalHash li) (lookupPatchLocalDefn li) -savePatch :: EDB m => PatchHash -> C.Patch -> m Db.PatchObjectId +savePatch :: EDB m => PatchHash -> C.Branch.Patch -> m Db.PatchObjectId savePatch h c = do (li, lPatch) <- c2lPatch c hashId <- Q.saveHashHash (unPatchHash h) let bytes = S.putBytes S.putPatchFormat $ S.PatchFormat.Full li lPatch Db.PatchObjectId <$> Q.saveObject hashId OT.Patch bytes -s2cPatch :: EDB m => S.Patch -> m C.Patch +s2cPatch :: EDB m => S.Patch -> m C.Branch.Patch s2cPatch (S.Patch termEdits typeEdits) = - C.Patch + C.Branch.Patch <$> Map.bitraverse h2cReferent (Set.traverse s2cTermEdit) termEdits <*> Map.bitraverse h2cReference (Set.traverse s2cTypeEdit) typeEdits @@ -964,7 +1000,6 @@ deserializePatchObject id = (liftQ . Q.loadObjectById) (Db.unPatchObjectId id) >>= getFromBytesOr (ErrPatch id) S.getPatchFormat - -- * Searches termsHavingType :: EDB m => C.Reference -> m (Set C.Referent.Id) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index d570621f7f..f930f668f3 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -48,6 +48,8 @@ data Integrity | UnknownObjectId ObjectId | UnknownCausalHashId CausalHashId | UnknownCausalOldHashId CausalOldHashId + | UnknownHash Hash + | UnknownText Text | NoObjectForHashId HashId | NoNamespaceRoot | MultipleNamespaceRoots [CausalHashId] @@ -62,7 +64,7 @@ noExcept a = runExceptT a >>= \case -- noMaybe :: Maybe a -> a -- noMaybe = fromMaybe (error "unexpected Nothing") -orError :: MonadError Integrity m => Integrity -> Maybe b -> m b +orError :: MonadError e m => e -> Maybe b -> m b orError e = maybe (throwError e) pure -- type DerivedReferent = Referent.Id ObjectId ObjectId @@ -85,6 +87,9 @@ loadHashId base32 = queryOnly sql (Only base32) loadHashIdByHash :: DB m => Hash -> m (Maybe HashId) loadHashIdByHash = loadHashId . Hash.toBase32Hex +expectHashIdByHash :: EDB m => Hash -> m HashId +expectHashIdByHash h = loadHashIdByHash h >>= orError (UnknownHash h) + loadHashById :: EDB m => HashId -> m Base32Hex loadHashById h = queryOnly sql (Only h) >>= orError (UnknownHashId h) where sql = [here| SELECT base32 FROM hash WHERE id = ? |] @@ -97,6 +102,9 @@ loadText :: DB m => Text -> m (Maybe TextId) loadText t = queryOnly sql (Only t) where sql = [here| SELECT id FROM text WHERE text = ? |] +expectText :: EDB m => Text -> m TextId +expectText t = loadText t >>= orError (UnknownText t) + loadTextById :: EDB m => TextId -> m Text loadTextById h = queryOnly sql (Only h) >>= orError (UnknownTextId h) where sql = [here| SELECT text FROM text WHERE id = ? |] diff --git a/codebase2/codebase/U/Codebase/Branch.hs b/codebase2/codebase/U/Codebase/Branch.hs index a421030037..a08312abee 100644 --- a/codebase2/codebase/U/Codebase/Branch.hs +++ b/codebase2/codebase/U/Codebase/Branch.hs @@ -4,6 +4,7 @@ import Data.Map (Map) import Data.Set (Set) import Data.Text (Text) import U.Codebase.Causal (CausalHead) +import qualified U.Codebase.Causal as C import U.Codebase.HashTags (BranchHash, CausalHash, PatchHash) import U.Codebase.Reference (Reference) import U.Codebase.Referent (Referent) @@ -14,14 +15,16 @@ newtype NameSegment = NameSegment Text deriving (Eq, Ord, Show) newtype MdValues = MdValues (Set Reference) deriving (Eq, Ord, Show) -type Root m = CausalHead m CausalHash BranchHash (Branch m) +type Causal m = CausalHead m CausalHash BranchHash (Branch m) + +type Spine m = C.Causal m CausalHash BranchHash (Branch m) -- | V2.Branch is like V1.Branch0; I would rename it, at least temporarily, but too hard. data Branch m = Branch { terms :: Map NameSegment (Map Referent (m MdValues)), types :: Map NameSegment (Map Reference (m MdValues)), patches :: Map NameSegment (PatchHash, m Patch), - children :: Map NameSegment (Root m) + children :: Map NameSegment (Causal m) } data Patch = Patch diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 5edcec1f94..7918f34d44 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -14,7 +14,9 @@ import Control.Monad.Reader (ReaderT (runReaderT)) import Control.Monad.Trans.Maybe (MaybeT) import Data.Bifunctor (Bifunctor (first), second) import Data.Foldable (Foldable (toList), traverse_) -import Data.Functor (void) +-- import qualified U.Codebase.Sqlite.Operations' as Ops + +import Data.Functor (void, (<&>)) import Data.Map (Map) import qualified Data.Map as Map import Data.Set (Set) @@ -26,8 +28,6 @@ import Data.Word (Word64) import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as Sqlite import System.FilePath (()) --- import qualified U.Codebase.Sqlite.Operations' as Ops - import U.Codebase.HashTags (CausalHash (unCausalHash)) import qualified U.Codebase.Reference as C.Reference import qualified U.Codebase.Sqlite.ObjectType as OT @@ -255,9 +255,9 @@ sqliteCodebase root = do -- if this blows up on cromulent hashes, then switch from `hashToHashId` -- to one that returns Maybe. getBranchForHash :: Branch.Hash -> IO (Maybe (Branch IO)) - getBranchForHash _h = error "todo" - -- runDB conn $ - -- Cv.causalbranch2to1 $ Ops.loadBranchByCausalHash (Cv.hash1to2 h) + getBranchForHash h = + runDB conn $ + Cv.causalbranch2to1 $ Ops.loadBranchByCausalHash (Cv.hash1to2 h) dependentsImpl :: Reference -> IO (Set Reference.Id) dependentsImpl r = @@ -318,10 +318,16 @@ sqliteCodebase root = do reflogPath root = root "reflog" termsOfTypeImpl :: Reference -> IO (Set Referent.Id) - termsOfTypeImpl = error "todo" + termsOfTypeImpl r = + runDB conn $ + Ops.termsHavingType (Cv.reference1to2 r) + >>= Set.traverse (Cv.referentid2to1 getCycleLen getDeclType) termsMentioningTypeImpl :: Reference -> IO (Set Referent.Id) - termsMentioningTypeImpl = error "todo" + termsMentioningTypeImpl r = + runDB conn $ + Ops.termsMentioningType (Cv.reference1to2 r) + >>= Set.traverse (Cv.referentid2to1 getCycleLen getDeclType) hashLength :: IO Int hashLength = pure 10 diff --git a/questions.md b/questions.md index e5c6261cd9..fb9ec88849 100644 --- a/questions.md +++ b/questions.md @@ -1,6 +1,48 @@ -Task list: -- [x] error reporting for deserialization failures -- [x] error reporting for codebase consistency issues +next steps: + +- [ ] fix up `Operations.loadBranchByCausalHash`; currently it's getting a single namespace, but we need to somewhere get the causal history. + - [ ] load a causal, allowing a missing value (C.Branch.Spine) + - [x] load a causal and require its value (C.Branch.Causal) + - [ ] load a causal, returning nothing if causal is unknown +- [ ] `SqliteCodebase.getRootBranch` +- [ ] `SqliteCodebase.getBranchForHash` +- [ ] `SqliteCodebase.putRootBranch` +- [ ] `SqliteCodebase.syncFromDirectory` +- [ ] `SqliteCodebase.syncToDirectory` +- [ ] `SqliteCodebase.rootBranchUpdates` Is there some Sqlite function for detecting external changes? + +### SqliteCodebase progress (V1 -> V2 adaptor) + +| operation | status | notes | +| ----------------------- | ------ | ----- | +| getTerm | ✔ | | +| getTypeOfTerm | ✔ | | +| getTypeDeclaration | ✔ | | +| putTerm | ✔ | | +| putTypeDeclaration | ✔ | | +| getRootBranch | todo | | +| putRootBranch | todo | | +| rootBranchUpdates | todo | | +| getBranchForHash | todo | | +| dependentsImpl | ✔ | | +| syncFromDirectory | todo | | +| syncToDirectory | todo | | +| watches | ✔ | | +| getWatch | ✔ | | +| putWatch | ✔ | | +| getReflog | ✔ | | +| appendRefLog | ✔ | | +| termsOfTypeImpl | ✔ | | +| termsMentioningTypeImpl | ✔ | | +| hashLength | ✔ | | +| termReferencesByPrefix | ✔ | | +| declReferencesByPrefix | ✔ | | +| referentsByPrefix | ✔ | | +| branchHashLength | ✔ | | +| branchHashesByPrefix | ✔ | | + + +## less organized stuff below | thing | v1↔v2 | v2↔v2s | desc. | |-----|-----|-----|---| @@ -74,19 +116,15 @@ Task list: ### Saving & loading? -| Saving & loading objects | name | notes | -| ------------------------------------- | ---------------------------- | ----------------------------------------------------------- | -| `Patch` ↔ `PatchObjectId` | `savePatch`, `loadPatchById` | | -| `H.Hash -> m (Maybe (Branch.Root m))` | `loadBranchByCausalHash` | wip | -| `CausalHashId -> m (Maybe Branch)` | `loadBranchByCausalHashId` | equivalent to old `Branch0`, *not sure if actually useful?* | -| `BranchObjectId -> m (Branch m)` | `loadBranchByObjectId` | equivalent to old `Branch0` | +| Saving & loading objects | name | notes | +| --------------------------------------- | ---------------------------- | ----------------------------------------------------------- | +| `Patch` ↔ `PatchObjectId` | `savePatch`, `loadPatchById` | | +| `H.Hash -> m (Maybe (Branch.Causal m))` | `loadBranchByCausalHash` | wip | +| `CausalHashId -> m (Maybe Branch)` | `loadBranchByCausalHashId` | equivalent to old `Branch0`, *not sure if actually useful?* | +| `BranchObjectId -> m (Branch m)` | `loadBranchByObjectId` | equivalent to old `Branch0` | ### Deserialization helpers -| -| -| - | Deserialization helpers | | notes | | ------------------------------------------------------------ | ------------------------------------------------------------ | ----- | | `decodeComponentLengthOnly` | `ByteString -> m Word64` | | @@ -119,50 +157,15 @@ Task list: | `termsMentioningType` | " || | `termReferencesByPrefix` | `Text -> Maybe Word64 -> m [Reference.Id]` || | `declReferencesByPrefix` | `Text -> Maybe Word64 -> m [Reference.Id]` || -| `saveRootBranch` | `Branch.Root m -> m (Db.BranchObjectId, Db.CausalHashId)` |wip| - -### SqliteCodebase - -| operation | status | notes | -| ----------------------- | ------ | ----- | -| getTerm | ✔ | | -| getTypeOfTerm | ✔ | | -| getTypeDeclaration | ✔ | | -| putTerm | ✔ | | -| putTypeDeclaration | ✔ | | -| getRootBranch | todo | | -| putRootBranch | todo | | -| rootBranchUpdates | todo | | -| getBranchForHash | todo | | -| dependentsImpl | ✔ | | -| syncFromDirectory | todo | | -| syncToDirectory | todo | | -| watches | ✔ | | -| getWatch | ✔ | | -| putWatch | ✔ | | -| getReflog | ✔ | | -| appendRefLog | ✔ | | -| termsOfTypeImpl | todo | | -| termsMentioningTypeImpl | todo | | -| hashLength | ✔ | | -| termReferencesByPrefix | ✔ | | -| declReferencesByPrefix | ✔ | | -| referentsByPrefix | ✔ | | -| branchHashLength | ✔ | | -| branchHashesByPrefix | ✔ | | - - - - - - -Question: should the dependents / dependency index be a Relation Reference Reference (like now) a Relation Referent Reference? +| `saveRootBranch` | `Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId)` |wip| -Example, if you have `type Foo = Blah | Blah2 Nat`, +## Questions: +Q: Should the dependents / dependency index include individual constructors? +Against: No, that index is mainly for refactors; dependencies within objects -Advantages: +For: e.g. `type Foo = Blah | Blah2 Nat`, * If patches can replace constructors (not just types or terms), then having the index keyed by `Referent` lets you efficiently target the definitions that use those constructors. * Also lets you find things that depend on `Blah2` (rather than depending on `Foo`). From 2085a814d3a36563d1b9ec6a9dbd121c3c8d5faa Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 15 Dec 2020 15:11:54 -0500 Subject: [PATCH 057/225] wip --- .../U/Codebase/Sqlite/Causal.hs | 18 +++--- .../U/Codebase/Sqlite/Operations.hs | 44 +++++--------- .../U/Codebase/Sqlite/Queries.hs | 59 ++++++++++--------- .../src/Unison/Codebase/Branch.hs | 4 +- .../src/Unison/Codebase/SqliteCodebase.hs | 11 ++-- .../Codebase/SqliteCodebase/Conversions.hs | 42 +++++++++---- questions.md | 11 ++-- 7 files changed, 101 insertions(+), 88 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs index c49bac897d..3b4fd09db0 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs @@ -1,12 +1,12 @@ module U.Codebase.Sqlite.Causal where -data Causal hc he = RawCausal { - valueHash :: he, - parentHashes :: [hc] -} +-- data Causal hc he = RawCausal { +-- valueHash :: he, +-- parentHashes :: [hc] +-- } -data CausalHead id hc he = RawCausalHead { - headValueId :: id, - headValueHash :: he, - headParentHashes :: [hc] -} +-- data CausalHead id hc he = RawCausalHead { +-- headValueId :: id, +-- headValueHash :: he, +-- headParentHashes :: [hc] +-- } diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 59db7b7e77..ebd700ab16 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -59,7 +59,6 @@ import qualified U.Codebase.Sqlite.Branch.Format as S.BranchFormat import qualified U.Codebase.Sqlite.Branch.Full as S import qualified U.Codebase.Sqlite.Branch.Full as S.Branch.Full import qualified U.Codebase.Sqlite.Branch.Full as S.MetadataSet -import U.Codebase.Sqlite.DbId (BranchHashId (unBranchHashId)) import qualified U.Codebase.Sqlite.DbId as Db import qualified U.Codebase.Sqlite.Decl.Format as S.Decl import U.Codebase.Sqlite.LocalIds @@ -767,39 +766,28 @@ lookupBranchLocalPatch li (LocalPatchObjectId w) = S.BranchFormat.branchPatchLoo lookupBranchLocalChild :: BranchLocalIds -> LocalBranchChildId -> (Db.BranchObjectId, Db.CausalHashId) lookupBranchLocalChild li (LocalBranchChildId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w --- fix me... --- [ ] load a causal, allowing a missing value (C.Branch.Spine) --- [x] load a causal and require its value (C.Branch.Causal) --- [ ] load a causal, returning nothing if causal is unknown - -loadCausalSpineByCausalHashId :: EDB m => Db.CausalHashId -> m (C.Branch.Spine m) -loadCausalSpineByCausalHashId = error "todo" - loadCausalBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (C.Branch.Causal m) loadCausalBranchByCausalHashId id = loadCausalSpineByCausalHashId id <&> \case C.Causal hc he parents mme -> C.CausalHead hc he parents $ mme >>= (Q.orError $ ExpectedBranch hc he) --- loadCausalBranchHeadByCausalHash :: EDB m => CausalHash -> m (Maybe (C.Branch.Causal m)) --- loadCausalBranchHeadByCausalHash hc = do --- hId <- (fmap Db.CausalHashId . liftQ . Q.expectHashIdByHash . unCausalHash) hc --- loadCausalBranchByCausalHashId hId - --- loadCausalBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch.Spine m)) --- loadCausalBranchByCausalHashId id = runMaybeT do --- namespace <- MaybeT loadBranchByCausalHashId id --- parentHashIds <- Q.loadCausalParents id --- loadParents <- for parentHashIds \hId -> do --- h <- (fmap (CausalHash . H.fromBase32Hex) . liftQ . Q.loadHashById . Db.unCausalHashId) hId --- pure (h, loadCausalBranchByCausalHashId hId >>= Q.orError ) --- error "todo" - --- hb <- --- (fmap unBranchHashId . liftQ . Q.loadCausalValueHashId) id --- >>= fmap (BranchHash . H.fromBase32Hex) . liftQ . Q.loadHashById - --- C.CausalHead hc hb parentsMap (pure namespace) +loadCausalBranchByCausalHash :: EDB m => CausalHash -> m (Maybe (C.Branch.Causal m)) +loadCausalBranchByCausalHash hc = do + Q.loadCausalHashIdByCausalHash hc >>= \case + Just chId -> Just <$> loadCausalBranchByCausalHashId chId + Nothing -> pure Nothing + +loadCausalSpineByCausalHashId :: EDB m => Db.CausalHashId -> m (C.Branch.Spine m) +loadCausalSpineByCausalHashId id = do + hc <- loadCausalHashById id + hb <- loadValueHashByCausalHashId id + let loadNamespace = loadBranchByCausalHashId id + parentHashIds <- Q.loadCausalParents id + loadParents <- for parentHashIds \hId -> do + h <- loadCausalHashById hId + pure (h, loadCausalSpineByCausalHashId hId) + pure $ C.Causal hc hb (Map.fromList loadParents) loadNamespace -- | is this even a thing? loading a branch by causal hash? yes I guess so. loadBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch.Branch m)) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index f930f668f3..90fff8efc2 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -27,7 +27,7 @@ import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple.FromField (FromField) import Database.SQLite.Simple.ToField (ToField (..)) import U.Codebase.Reference (Reference') -import U.Codebase.Sqlite.DbId (BranchObjectId(..), BranchHashId(..), CausalHashId, CausalOldHashId, HashId (..), ObjectId (..), TextId) +import U.Codebase.Sqlite.DbId (CausalHashId(..), BranchObjectId(..), BranchHashId(..), CausalOldHashId, HashId (..), ObjectId (..), TextId) import U.Codebase.Sqlite.ObjectType (ObjectType) import qualified U.Codebase.Sqlite.Reference as Reference import qualified U.Codebase.Sqlite.Referent as Referent @@ -37,6 +37,7 @@ import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash import UnliftIO (withRunInIO, MonadUnliftIO) +import U.Codebase.HashTags (unCausalHash, CausalHash) -- * types type DB m = (MonadIO m, MonadReader Connection m) @@ -61,14 +62,9 @@ noExcept a = runExceptT a >>= \case Right a -> pure a Left e -> error $ "unexpected error: " ++ show e --- noMaybe :: Maybe a -> a --- noMaybe = fromMaybe (error "unexpected Nothing") - orError :: MonadError e m => e -> Maybe b -> m b orError e = maybe (throwError e) pure --- type DerivedReferent = Referent.Id ObjectId ObjectId --- type DerivedReference = Reference.Id ObjectId type TypeHashReference = Reference' TextId HashId -- * main squeeze @@ -81,17 +77,21 @@ saveHashHash :: DB m => Hash -> m HashId saveHashHash = saveHash . Hash.toBase32Hex loadHashId :: DB m => Base32Hex -> m (Maybe HashId) -loadHashId base32 = queryOnly sql (Only base32) +loadHashId base32 = queryAtom sql (Only base32) where sql = [here| SELECT id FROM hash WHERE base32 = ? |] loadHashIdByHash :: DB m => Hash -> m (Maybe HashId) loadHashIdByHash = loadHashId . Hash.toBase32Hex +loadCausalHashIdByCausalHash :: DB m => CausalHash -> m (Maybe CausalHashId) +loadCausalHashIdByCausalHash = + (fmap . fmap) CausalHashId . loadHashIdByHash . unCausalHash + expectHashIdByHash :: EDB m => Hash -> m HashId expectHashIdByHash h = loadHashIdByHash h >>= orError (UnknownHash h) loadHashById :: EDB m => HashId -> m Base32Hex -loadHashById h = queryOnly sql (Only h) >>= orError (UnknownHashId h) +loadHashById h = queryAtom sql (Only h) >>= orError (UnknownHashId h) where sql = [here| SELECT base32 FROM hash WHERE id = ? |] saveText :: DB m => Text -> m TextId @@ -99,14 +99,14 @@ saveText t = execute sql (Only t) >> queryOne (loadText t) where sql = [here| INSERT OR IGNORE INTO text (text) VALUES (?) |] loadText :: DB m => Text -> m (Maybe TextId) -loadText t = queryOnly sql (Only t) +loadText t = queryAtom sql (Only t) where sql = [here| SELECT id FROM text WHERE text = ? |] expectText :: EDB m => Text -> m TextId expectText t = loadText t >>= orError (UnknownText t) loadTextById :: EDB m => TextId -> m Text -loadTextById h = queryOnly sql (Only h) >>= orError (UnknownTextId h) +loadTextById h = queryAtom sql (Only h) >>= orError (UnknownTextId h) where sql = [here| SELECT text FROM text WHERE id = ? |] saveHashObject :: DB m => HashId -> ObjectId -> Int -> m () @@ -126,7 +126,7 @@ saveObject h t blob = |] loadObjectById :: EDB m => ObjectId -> m ByteString -loadObjectById oId = queryOnly sql (Only oId) >>= orError (UnknownObjectId oId) +loadObjectById oId = queryAtom sql (Only oId) >>= orError (UnknownObjectId oId) where sql = [here| SELECT bytes FROM object WHERE id = ? |] @@ -143,7 +143,7 @@ expectObjectIdForPrimaryHashId h = maybeObjectIdPrimaryHashId h >>= orError (UnknownHashId h) maybeObjectIdPrimaryHashId :: DB m => HashId -> m (Maybe ObjectId) -maybeObjectIdPrimaryHashId h = queryOnly sql (Only h) where sql = [here| +maybeObjectIdPrimaryHashId h = queryAtom sql (Only h) where sql = [here| SELECT id FROM object WHERE primary_hash_id = ? |] @@ -152,13 +152,13 @@ expectObjectIdForAnyHashId h = maybeObjectIdForAnyHashId h >>= orError (NoObjectForHashId h) maybeObjectIdForAnyHashId :: DB m => HashId -> m (Maybe ObjectId) -maybeObjectIdForAnyHashId h = queryOnly sql (Only h) where sql = [here| +maybeObjectIdForAnyHashId h = queryAtom sql (Only h) where sql = [here| SELECT object_id FROM hash_object WHERE hash_id = ? |] -- |All objects have corresponding hashes. loadPrimaryHashByObjectId :: EDB m => ObjectId -> m Base32Hex -loadPrimaryHashByObjectId oId = queryOnly sql (Only oId) >>= orError (UnknownObjectId oId) +loadPrimaryHashByObjectId oId = queryAtom sql (Only oId) >>= orError (UnknownObjectId oId) where sql = [here| SELECT hash.base32 FROM hash INNER JOIN object ON object.primary_hash_id = hash.id @@ -195,12 +195,12 @@ saveCausal self value = execute sql (self, value) where sql = [here| loadCausalValueHashId :: EDB m => CausalHashId -> m BranchHashId loadCausalValueHashId id = - queryOnly sql (Only id) >>= orError (UnknownCausalHashId id) where sql = [here| + queryAtom sql (Only id) >>= orError (UnknownCausalHashId id) where sql = [here| SELECT value_hash_id FROM causal WHERE self_hash_id = ? |] loadBranchObjectIdByCausalHashId :: EDB m => CausalHashId -> m (Maybe BranchObjectId) -loadBranchObjectIdByCausalHashId id = queryOnly sql (Only id) where sql = [here| +loadBranchObjectIdByCausalHashId id = queryAtom sql (Only id) where sql = [here| SELECT value_object_id FROM causal WHERE self_hash_id = ? |] @@ -211,13 +211,13 @@ saveCausalOld v1 v2 = execute sql (v1, v2) where sql = [here| loadCausalHashIdByCausalOldHash :: EDB m => CausalOldHashId -> m CausalHashId loadCausalHashIdByCausalOldHash id = - queryOnly sql (Only id) >>= orError (UnknownCausalOldHashId id) where sql = [here| + queryAtom sql (Only id) >>= orError (UnknownCausalOldHashId id) where sql = [here| SELECT new_hash_id FROM causal_old where old_hash_id = ? |] loadOldCausalValueHash :: EDB m => CausalOldHashId -> m BranchHashId loadOldCausalValueHash id = - queryOnly sql (Only id) >>= orError (UnknownCausalOldHashId id) where sql = [here| + queryAtom sql (Only id) >>= orError (UnknownCausalOldHashId id) where sql = [here| SELECT value_hash_id FROM causal INNER JOIN causal_old ON self_hash_id = new_hash_id WHERE old_hash_id = ? @@ -230,12 +230,12 @@ saveCausalParent child parent = execute sql (child, parent) where |] loadCausalParents :: DB m => CausalHashId -> m [CausalHashId] -loadCausalParents h = queryList sql (Only h) where sql = [here| +loadCausalParents h = queryAtoms sql (Only h) where sql = [here| SELECT parent_id FROM causal_parent WHERE causal_id = ? |] loadNamespaceRoot :: EDB m => m CausalHashId -loadNamespaceRoot = queryList sql () >>= \case +loadNamespaceRoot = queryAtoms sql () >>= \case [] -> throwError NoNamespaceRoot [id] -> pure id ids -> throwError (MultipleNamespaceRoots ids) @@ -261,7 +261,7 @@ saveWatch k r blob = execute sql (r :. Only blob) >> execute sql2 (r :. Only k) |] loadWatch :: DB m => WatchKind -> Reference.IdH -> m (Maybe ByteString) -loadWatch k r = queryOnly sql (Only k :. r) where sql = [here| +loadWatch k r = queryAtom sql (Only k :. r) where sql = [here| SELECT bytes FROM watch WHERE watch_kind_id = ? AND hash_id = ? @@ -353,7 +353,7 @@ getDependencyIdsForDependent dependent = query sql dependent where sql = [here| |] objectIdByBase32Prefix :: DB m => ObjectType -> Text -> m [ObjectId] -objectIdByBase32Prefix objType prefix = queryList sql (objType, prefix <> "%") where sql = [here| +objectIdByBase32Prefix objType prefix = queryAtoms sql (objType, prefix <> "%") where sql = [here| SELECT object.id FROM object INNER JOIN hash_object ON hash_object.object_id = object.id INNER JOIN hash ON hash_object.hash_id = hash.id @@ -362,33 +362,34 @@ objectIdByBase32Prefix objType prefix = queryList sql (objType, prefix <> "%") w |] causalHashIdByBase32Prefix :: DB m => Text -> m [CausalHashId] -causalHashIdByBase32Prefix prefix = queryList sql (Only $ prefix <> "%") where sql = [here| +causalHashIdByBase32Prefix prefix = queryAtoms sql (Only $ prefix <> "%") where sql = [here| SELECT self_hash_id FROM causal INNER JOIN hash ON id = self_hash_id WHERE base32 LIKE ? |] namespaceHashIdByBase32Prefix :: DB m => Text -> m [BranchHashId] -namespaceHashIdByBase32Prefix prefix = queryList sql (Only $ prefix <> "%") where sql = [here| +namespaceHashIdByBase32Prefix prefix = queryAtoms sql (Only $ prefix <> "%") where sql = [here| SELECT value_hash_id FROM causal INNER JOIN hash ON id = value_hash_id WHERE base32 LIKE ? |] -- * helper functions -queryList :: (DB f, ToRow q, FromField b) => SQLite.Query -> q -> f [b] -queryList q r = map fromOnly <$> query q r + +queryAtoms :: (DB f, ToRow q, FromField b) => SQLite.Query -> q -> f [b] +queryAtoms q r = map fromOnly <$> query q r queryMaybe :: (DB f, ToRow q, FromRow b) => SQLite.Query -> q -> f (Maybe b) queryMaybe q r = headMay <$> query q r -queryOnly :: (DB f, ToRow q, FromField b) => SQLite.Query -> q -> f (Maybe b) -queryOnly q r = fmap fromOnly <$> queryMaybe q r +queryAtom :: (DB f, ToRow q, FromField b) => SQLite.Query -> q -> f (Maybe b) +queryAtom q r = fmap fromOnly <$> queryMaybe q r queryOne :: Functor f => f (Maybe b) -> f b queryOne = fmap fromJust queryExists :: (DB m, ToRow q) => SQLite.Query -> q -> m Bool -queryExists q r = not . null . map (id @SQLData) <$> queryList q r +queryExists q r = not . null . map (id @SQLData) <$> queryAtoms q r query :: (DB m, ToRow q, FromRow r) => SQLite.Query -> q -> m [r] query q r = do c <- ask; liftIO $ SQLite.query c q r diff --git a/parser-typechecker/src/Unison/Codebase/Branch.hs b/parser-typechecker/src/Unison/Codebase/Branch.hs index 0568038acd..fb96565a0e 100644 --- a/parser-typechecker/src/Unison/Codebase/Branch.hs +++ b/parser-typechecker/src/Unison/Codebase/Branch.hs @@ -9,6 +9,7 @@ module Unison.Codebase.Branch Branch(..) , Branch0(..) , MergeMode(..) + , UnwrappedBranch , Raw(..) , Star , Hash @@ -157,8 +158,9 @@ import Unison.HashQualified (HashQualified) import qualified Unison.LabeledDependency as LD import Unison.LabeledDependency (LabeledDependency) -newtype Branch m = Branch { _history :: Causal m Raw (Branch0 m) } +newtype Branch m = Branch { _history :: UnwrappedBranch m } deriving (Eq, Ord) +type UnwrappedBranch m = Causal m Raw (Branch0 m) type Hash = Causal.RawHash Raw type EditHash = Hash.Hash diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 7918f34d44..5be2c87a1e 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -16,7 +16,7 @@ import Data.Bifunctor (Bifunctor (first), second) import Data.Foldable (Foldable (toList), traverse_) -- import qualified U.Codebase.Sqlite.Operations' as Ops -import Data.Functor (void, (<&>)) +import Data.Functor (void) import Data.Map (Map) import qualified Data.Map as Map import Data.Set (Set) @@ -255,9 +255,12 @@ sqliteCodebase root = do -- if this blows up on cromulent hashes, then switch from `hashToHashId` -- to one that returns Maybe. getBranchForHash :: Branch.Hash -> IO (Maybe (Branch IO)) - getBranchForHash h = - runDB conn $ - Cv.causalbranch2to1 $ Ops.loadBranchByCausalHash (Cv.hash1to2 h) + getBranchForHash h = runDB conn do + Ops.loadCausalBranchByCausalHash (Cv.branchHash1to2 h) >>= \case + Just b -> + pure . Just . Branch.transform (runDB conn) + =<< Cv.unsafecausalbranch2to1 b + Nothing -> pure Nothing dependentsImpl :: Reference -> IO (Set Reference.Id) dependentsImpl r = diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 767c5f95e1..f4ee13763f 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -6,8 +6,8 @@ import Data.Bifunctor (Bifunctor (bimap)) import qualified Data.ByteString.Short as SBS import Data.Either (fromRight) import qualified Data.Map as Map +import Data.Maybe (fromJust) import Data.Text (Text, pack) -import qualified U.Codebase.Branch as V2 import qualified U.Codebase.Branch as V2.Branch import qualified U.Codebase.Causal as V2 import qualified U.Codebase.Decl as V2.Decl @@ -278,6 +278,9 @@ rreferenceid1to2 h (V1.Reference.Id h' i _n) = V2.Reference.Id oh i hash1to2 :: Hash -> V2.Hash hash1to2 (V1.Hash bs) = V2.Hash.Hash (SBS.toShort bs) +branchHash1to2 :: V1.Branch.Hash -> V2.CausalHash +branchHash1to2 = V2.CausalHash . hash1to2 . V1.Causal.unRawHash + reference2to1 :: Applicative m => (Hash -> m V1.Reference.Size) -> V2.Reference -> m V1.Reference reference2to1 lookupSize = \case V2.ReferenceBuiltin t -> pure $ V1.Reference.Builtin t @@ -374,29 +377,42 @@ type1to2' convertRef = V1.Kind.Star -> V2.Kind.Star V1.Kind.Arrow i o -> V2.Kind.Arrow (convertKind i) (convertKind o) - --- type Root m = CausalHead m CausalHash BranchHash (Branch m) --- newtype Branch m = Branch { _history :: Causal m Raw (Branch0 m) } -causalbranch2to1 :: forall m. Applicative m => (V2.CausalHash -> m (V2.Branch.Root m)) -> V2.Branch.Root m -> m (V1.Branch.Branch m) -causalbranch2to1 _loadParent (V2.CausalHead hc _he (Map.toList -> parents) me) = do +-- |forces loading v1 branches even if they may not exist +unsafecausalbranch2to1 :: Monad m => V2.Branch.Causal m -> m (V1.Branch.Branch m) +unsafecausalbranch2to1 (V2.CausalHead hc _he (Map.toList -> parents) me) = do let currentHash = causalHash2to1 hc causalHash2to1 :: V2.CausalHash -> V1.Causal.RawHash V1.Branch.Raw causalHash2to1 = V1.Causal.RawHash . hash2to1 . V2.unCausalHash - loadParent1 :: - V2.Causal m V2.CausalHash V2.BranchHash (V2.Branch m) -> - V1.Causal.Causal m V1.Branch.Raw (V1.Branch.Branch0 m) - loadParent1 = error "todo" V1.Branch.Branch <$> case parents of [] -> V1.Causal.One currentHash <$> fmap branch2to1 me [(hp, mp)] -> do let parentHash = causalHash2to1 hp V1.Causal.Cons currentHash <$> fmap branch2to1 me - <*> pure (parentHash, loadParent1 <$> mp) + <*> pure (parentHash, unsafecausalspine2to1 =<< mp) merge -> do - let tailsList = map (bimap causalHash2to1 (fmap loadParent1)) merge + let tailsList = map (bimap causalHash2to1 (unsafecausalspine2to1 =<<)) merge e <- me pure $ V1.Causal.Merge currentHash (branch2to1 e) (Map.fromList tailsList) -branch2to1 :: V2.Branch m -> V1.Branch.Branch0 m +-- |force loading a v1 branch even when it may not exist +unsafecausalspine2to1 :: forall m. Monad m => V2.Branch.Spine m -> m (V1.Branch.UnwrappedBranch m) +unsafecausalspine2to1 (V2.Causal hc _he (Map.toList -> parents) me) = do + let currentHash = causalHash2to1 hc + causalHash2to1 :: V2.CausalHash -> V1.Causal.RawHash V1.Branch.Raw + causalHash2to1 = V1.Causal.RawHash . hash2to1 . V2.unCausalHash + case parents of + [] -> V1.Causal.One currentHash <$> fmap (branch2to1 . fromJust) me + [(hp, mp)] -> do + let parentHash = causalHash2to1 hp + head <- fmap (branch2to1 . fromJust) me + let loadParent = unsafecausalspine2to1 =<< mp + pure $ + V1.Causal.Cons currentHash head (parentHash, loadParent) + merge -> do + let tailsList = map (bimap causalHash2to1 (unsafecausalspine2to1 =<<)) merge + e <- fromJust <$> me + pure $ V1.Causal.Merge currentHash (branch2to1 e) (Map.fromList tailsList) + +branch2to1 :: V2.Branch.Branch m -> V1.Branch.Branch0 m branch2to1 = error "todo" diff --git a/questions.md b/questions.md index fb9ec88849..d4203d85b0 100644 --- a/questions.md +++ b/questions.md @@ -1,11 +1,13 @@ next steps: -- [ ] fix up `Operations.loadBranchByCausalHash`; currently it's getting a single namespace, but we need to somewhere get the causal history. - - [ ] load a causal, allowing a missing value (C.Branch.Spine) +- [x] fix up `Operations.loadBranchByCausalHash`; currently it's getting a single namespace, but we need to somewhere get the causal history. + - [x] load a causal, allowing a missing value (C.Branch.Spine) - [x] load a causal and require its value (C.Branch.Causal) - - [ ] load a causal, returning nothing if causal is unknown + - [x] load a causal, returning nothing if causal is unknown +- [x] `SqliteCodebase.Conversions.unsafecausalbranch2to1` - [ ] `SqliteCodebase.getRootBranch` -- [ ] `SqliteCodebase.getBranchForHash` +- [x] `SqliteCodebase.getBranchForHash` +- [ ] `SqliteCodebase.Conversions.causalbranch1to2` - [ ] `SqliteCodebase.putRootBranch` - [ ] `SqliteCodebase.syncFromDirectory` - [ ] `SqliteCodebase.syncToDirectory` @@ -113,6 +115,7 @@ next steps: | User `Term` | `c2sTerm`, `s2cTermWithType`, `s2cTerm`, `s2cTypeOfTerm` | | | watch expressions | `c2wTerm`, `w2cTerm` | | | User `Decl` | `c2sDecl`, | | +| `Causal` | | todo | ### Saving & loading? From f5b3d820ff280a54e68a3c70dc50dfc265f0efb7 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 16 Dec 2020 16:33:01 -0500 Subject: [PATCH 058/225] getRootBranch --- .../U/Codebase/Sqlite/Operations.hs | 6 +++++ .../src/Unison/Codebase/SqliteCodebase.hs | 24 ++++++++++++++++--- .../Codebase/SqliteCodebase/Conversions.hs | 7 +++--- questions.md | 3 ++- 4 files changed, 32 insertions(+), 8 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index ebd700ab16..28b5df7fc1 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -192,6 +192,9 @@ loadValueHashByCausalHashId = loadValueHashById <=< liftQ . Q.loadCausalValueHas loadValueHashById :: EDB m => Db.BranchHashId -> m BranchHash loadValueHashById = fmap (BranchHash . H.fromBase32Hex) . liftQ . Q.loadHashById . Db.unBranchHashId +-- loadRootCausalHash :: EDB m => m CausalHash +-- loadRootCausalHash = loadCausalHashById =<< liftQ Q.loadNamespaceRoot + -- * Reference transformations -- ** read existing references @@ -766,6 +769,9 @@ lookupBranchLocalPatch li (LocalPatchObjectId w) = S.BranchFormat.branchPatchLoo lookupBranchLocalChild :: BranchLocalIds -> LocalBranchChildId -> (Db.BranchObjectId, Db.CausalHashId) lookupBranchLocalChild li (LocalBranchChildId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w +loadRootCausal :: EDB m => m (C.Branch.Causal m) +loadRootCausal = liftQ Q.loadNamespaceRoot >>= loadCausalBranchByCausalHashId + loadCausalBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (C.Branch.Causal m) loadCausalBranchByCausalHashId id = loadCausalSpineByCausalHashId id <&> \case diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 5be2c87a1e..0c2c7f57d2 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -7,15 +7,16 @@ module Unison.Codebase.SqliteCodebase where -- initCodebase :: Branch.Cache IO -> FilePath -> IO (Codebase IO Symbol Ann) +-- import qualified U.Codebase.Sqlite.Operations' as Ops + import Control.Monad (filterM, (>=>)) import Control.Monad.Except (ExceptT, runExceptT) import Control.Monad.Extra (ifM, unlessM) import Control.Monad.Reader (ReaderT (runReaderT)) import Control.Monad.Trans.Maybe (MaybeT) import Data.Bifunctor (Bifunctor (first), second) +import qualified Data.Either.Combinators as Either import Data.Foldable (Foldable (toList), traverse_) --- import qualified U.Codebase.Sqlite.Operations' as Ops - import Data.Functor (void) import Data.Map (Map) import qualified Data.Map as Map @@ -66,6 +67,7 @@ import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF import UnliftIO (MonadIO, catchIO) import UnliftIO.STM +import qualified U.Codebase.Sqlite.Queries as Q -- 1) buffer up the component -- 2) in the event that the component is complete, then what? @@ -244,7 +246,23 @@ sqliteCodebase root = do ) getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) - getRootBranch = error "todo" + getRootBranch = + fmap (Either.mapLeft err) + . runExceptT + . flip runReaderT conn + . fmap (Branch.transform (runDB conn)) + $ Cv.unsafecausalbranch2to1 =<< Ops.loadRootCausal + where + err :: Ops.Error -> Codebase1.GetRootBranchError + err = \case + Ops.DatabaseIntegrityError Q.NoNamespaceRoot -> + Codebase1.NoRootBranch + Ops.DecodeError (Ops.ErrBranch oId) _bytes _msg -> + Codebase1.CouldntParseRootBranch $ + "Couldn't decode " ++ show oId ++ ": " ++ _msg + Ops.ExpectedBranch ch _bh -> + Codebase1.CouldntLoadRootBranch $ Cv.causalHash2to1 ch + e -> error $ show e putRootBranch :: Branch IO -> IO () putRootBranch = error "todo" diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index f4ee13763f..63626fd6f5 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -321,6 +321,9 @@ referentid2to1 lookupSize lookupCT = \case hash2to1 :: V2.Hash.Hash -> Hash hash2to1 (V2.Hash.Hash sbs) = V1.Hash (SBS.fromShort sbs) +causalHash2to1 :: V2.CausalHash -> V1.Causal.RawHash V1.Branch.Raw +causalHash2to1 = V1.Causal.RawHash . hash2to1 . V2.unCausalHash + ttype2to1 :: Monad m => (Hash -> m V1.Reference.Size) -> V2.Term.Type V2.Symbol -> m (V1.Type.Type V1.Symbol Ann) ttype2to1 lookupSize = type2to1' (reference2to1 lookupSize) @@ -381,8 +384,6 @@ type1to2' convertRef = unsafecausalbranch2to1 :: Monad m => V2.Branch.Causal m -> m (V1.Branch.Branch m) unsafecausalbranch2to1 (V2.CausalHead hc _he (Map.toList -> parents) me) = do let currentHash = causalHash2to1 hc - causalHash2to1 :: V2.CausalHash -> V1.Causal.RawHash V1.Branch.Raw - causalHash2to1 = V1.Causal.RawHash . hash2to1 . V2.unCausalHash V1.Branch.Branch <$> case parents of [] -> V1.Causal.One currentHash <$> fmap branch2to1 me [(hp, mp)] -> do @@ -399,8 +400,6 @@ unsafecausalbranch2to1 (V2.CausalHead hc _he (Map.toList -> parents) me) = do unsafecausalspine2to1 :: forall m. Monad m => V2.Branch.Spine m -> m (V1.Branch.UnwrappedBranch m) unsafecausalspine2to1 (V2.Causal hc _he (Map.toList -> parents) me) = do let currentHash = causalHash2to1 hc - causalHash2to1 :: V2.CausalHash -> V1.Causal.RawHash V1.Branch.Raw - causalHash2to1 = V1.Causal.RawHash . hash2to1 . V2.unCausalHash case parents of [] -> V1.Causal.One currentHash <$> fmap (branch2to1 . fromJust) me [(hp, mp)] -> do diff --git a/questions.md b/questions.md index d4203d85b0..5da0dc64a3 100644 --- a/questions.md +++ b/questions.md @@ -5,7 +5,7 @@ next steps: - [x] load a causal and require its value (C.Branch.Causal) - [x] load a causal, returning nothing if causal is unknown - [x] `SqliteCodebase.Conversions.unsafecausalbranch2to1` -- [ ] `SqliteCodebase.getRootBranch` +- [x] `SqliteCodebase.getRootBranch` - [x] `SqliteCodebase.getBranchForHash` - [ ] `SqliteCodebase.Conversions.causalbranch1to2` - [ ] `SqliteCodebase.putRootBranch` @@ -161,6 +161,7 @@ next steps: | `termReferencesByPrefix` | `Text -> Maybe Word64 -> m [Reference.Id]` || | `declReferencesByPrefix` | `Text -> Maybe Word64 -> m [Reference.Id]` || | `saveRootBranch` | `Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId)` |wip| +| `loadRootCausal` | `m (C.Branch.Causal m)` || ## Questions: From 638245293bcf4fb68a8e3e448fd63b6950539102 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 17 Dec 2020 21:46:44 -0500 Subject: [PATCH 059/225] some saveRootBranch --- .../U/Codebase/Sqlite/Operations.hs | 19 ++++++++++++++++--- .../U/Codebase/Sqlite/Queries.hs | 12 +++++++++--- questions.md | 13 ++++++++----- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 28b5df7fc1..a4255d1736 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -752,10 +752,23 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = traverse loadBranchByObjectId boId saveRootBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) -saveRootBranch (C.CausalHead _hc _he _parents _me) = error "todo" +saveRootBranch (C.CausalHead hc he _parents me) = do + chId <- liftQ (Q.saveCausalHash hc) + liftQ (Q.loadBranchObjectIdByCausalHashId chId) >>= \case + Just boId -> pure (boId, chId) + Nothing -> do + bhId <- liftQ (Q.saveBranchHash he) + sBranch <- c2sBranch =<< me + boId <- saveBranchObject bhId sBranch + liftQ (Q.saveCausal chId bhId) + pure (boId, chId) + where - _c2sBranch :: EDB m => C.Branch.Branch m -> m S.DbBranch - _c2sBranch = error "todo" + saveBranchObject :: Db.BranchHashId -> S.DbBranch -> m Db.BranchObjectId + saveBranchObject = error "todo" + c2sBranch :: C.Branch.Branch m -> m S.DbBranch + c2sBranch (C.Branch.Branch terms types patches children) = + error "todo" lookupBranchLocalText :: S.BranchLocalIds -> LocalTextId -> Db.TextId lookupBranchLocalText li (LocalTextId w) = S.BranchFormat.branchTextLookup li Vector.! fromIntegral w diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 90fff8efc2..a8c8d62bda 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -26,8 +26,9 @@ import Database.SQLite.Simple (Connection, FromRow, Only (..), SQLData, ToRow (. import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple.FromField (FromField) import Database.SQLite.Simple.ToField (ToField (..)) +import U.Codebase.HashTags (BranchHash, CausalHash, unBranchHash, unCausalHash) import U.Codebase.Reference (Reference') -import U.Codebase.Sqlite.DbId (CausalHashId(..), BranchObjectId(..), BranchHashId(..), CausalOldHashId, HashId (..), ObjectId (..), TextId) +import U.Codebase.Sqlite.DbId (BranchHashId (..), BranchObjectId (..), CausalHashId (..), CausalOldHashId, HashId (..), ObjectId (..), TextId) import U.Codebase.Sqlite.ObjectType (ObjectType) import qualified U.Codebase.Sqlite.Reference as Reference import qualified U.Codebase.Sqlite.Referent as Referent @@ -36,8 +37,7 @@ import qualified U.Codebase.WatchKind as WatchKind import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash -import UnliftIO (withRunInIO, MonadUnliftIO) -import U.Codebase.HashTags (unCausalHash, CausalHash) +import UnliftIO (MonadUnliftIO, withRunInIO) -- * types type DB m = (MonadIO m, MonadReader Connection m) @@ -83,6 +83,12 @@ loadHashId base32 = queryAtom sql (Only base32) loadHashIdByHash :: DB m => Hash -> m (Maybe HashId) loadHashIdByHash = loadHashId . Hash.toBase32Hex +saveCausalHash :: DB m => CausalHash -> m CausalHashId +saveCausalHash = fmap CausalHashId . saveHashHash . unCausalHash + +saveBranchHash :: DB m => BranchHash -> m BranchHashId +saveBranchHash = fmap BranchHashId . saveHashHash . unBranchHash + loadCausalHashIdByCausalHash :: DB m => CausalHash -> m (Maybe CausalHashId) loadCausalHashIdByCausalHash = (fmap . fmap) CausalHashId . loadHashIdByHash . unCausalHash diff --git a/questions.md b/questions.md index 5da0dc64a3..f44ab67193 100644 --- a/questions.md +++ b/questions.md @@ -7,11 +7,14 @@ next steps: - [x] `SqliteCodebase.Conversions.unsafecausalbranch2to1` - [x] `SqliteCodebase.getRootBranch` - [x] `SqliteCodebase.getBranchForHash` -- [ ] `SqliteCodebase.Conversions.causalbranch1to2` -- [ ] `SqliteCodebase.putRootBranch` -- [ ] `SqliteCodebase.syncFromDirectory` -- [ ] `SqliteCodebase.syncToDirectory` -- [ ] `SqliteCodebase.rootBranchUpdates` Is there some Sqlite function for detecting external changes? +- [ ] Writing a branch + - [ ] `SqliteCodebase.Conversions.causalbranch1to2` + - [ ] `SqliteCodebase.putRootBranch` +- [ ] Syncing a remote codebase + - [ ] `SqliteCodebase.syncFromDirectory` + - [ ] `SqliteCodebase.syncToDirectory` +- [ ] Managing external edit events? + - [ ] `SqliteCodebase.rootBranchUpdates` Is there some Sqlite function for detecting external changes? ### SqliteCodebase progress (V1 -> V2 adaptor) From 28f33dbfabbdc9676204fc31a63199c6c6361571 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 6 Jan 2021 11:04:37 -0500 Subject: [PATCH 060/225] saveRootBranch/patches --- .../U/Codebase/Sqlite/Operations.hs | 145 +++++++++++++++--- codebase2/util/U/Util/Lens.hs | 1 + 2 files changed, 123 insertions(+), 23 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index a4255d1736..5b47900291 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -3,6 +3,7 @@ {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} +{-# LANGUAGE PartialTypeSignatures #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} @@ -15,9 +16,10 @@ import Control.Lens (Lens') import qualified Control.Lens as Lens import Control.Monad (join, (<=<)) import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) -import Control.Monad.State (MonadState, evalStateT) +import Control.Monad.State (MonadState, StateT, evalStateT) +import qualified Control.Monad.Trans as Monad import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) -import Control.Monad.Writer (MonadWriter, runWriterT) +import Control.Monad.Writer (MonadWriter, WriterT, runWriterT) import qualified Control.Monad.Writer as Writer import Data.Bifunctor (Bifunctor (bimap)) import Data.Bitraversable (Bitraversable (bitraverse)) @@ -109,6 +111,7 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set +import Control.Monad.Trans (MonadTrans(lift)) -- * Error handling @@ -165,12 +168,25 @@ loadTextById = liftQ . Q.loadTextById -- because it came from a sync or from a save -- hashToHashId :: EDB m => H.Hash -> m Db.HashId -- hashToHashId = liftQ . Q.expectHashIdByHash -hashToObjectId :: EDB m => H.Hash -> m Db.ObjectId -hashToObjectId h = do + +-- | look up an existing object by its primary hash +primaryHashToExistingObjectId :: EDB m => H.Hash -> m Db.ObjectId +primaryHashToExistingObjectId h = do (Q.loadHashId . H.toBase32Hex) h >>= \case Just hashId -> liftQ $ Q.expectObjectIdForPrimaryHashId hashId Nothing -> throwError $ UnknownDependency h +primaryHashToExistingPatchObjectId :: EDB m => PatchHash -> m Db.PatchObjectId +primaryHashToExistingPatchObjectId = + fmap Db.PatchObjectId . primaryHashToExistingObjectId . unPatchHash + +primaryHashToMaybePatchObjectId :: EDB m => PatchHash -> m (Maybe Db.PatchObjectId) +primaryHashToMaybePatchObjectId = error "todo" + +primaryHashToExistingBranchObjectId :: EDB m => BranchHash -> m Db.BranchObjectId +primaryHashToExistingBranchObjectId = + fmap Db.BranchObjectId . primaryHashToExistingObjectId . unBranchHash + objectExistsForHash :: DB m => H.Hash -> m Bool objectExistsForHash h = isJust <$> runMaybeT do @@ -200,13 +216,13 @@ loadValueHashByCausalHashId = loadValueHashById <=< liftQ . Q.loadCausalValueHas -- ** read existing references c2sReference :: EDB m => C.Reference -> m S.Reference -c2sReference = bitraverse lookupTextId hashToObjectId +c2sReference = bitraverse lookupTextId primaryHashToExistingObjectId s2cReference :: EDB m => S.Reference -> m C.Reference s2cReference = bitraverse loadTextById loadHashByObjectId c2sReferenceId :: EDB m => C.Reference.Id -> m S.Reference.Id -c2sReferenceId = C.Reference.idH hashToObjectId +c2sReferenceId = C.Reference.idH primaryHashToExistingObjectId s2cReferenceId :: EDB m => S.Reference.Id -> m C.Reference.Id s2cReferenceId = C.Reference.idH loadHashByObjectId @@ -266,7 +282,7 @@ c2lPatch (C.Branch.Patch termEdits typeEdits) = done (lPatch, (textValues, hashValues, defnValues)) = do textIds <- liftQ $ traverse Q.saveText textValues hashIds <- liftQ $ traverse Q.saveHashHash hashValues - objectIds <- traverse hashToObjectId defnValues + objectIds <- traverse primaryHashToExistingObjectId defnValues let ids = S.PatchFormat.LocalIds (Vector.fromList (Foldable.toList textIds)) @@ -359,7 +375,7 @@ decodeDeclElement i = getFromBytesOr (ErrDeclElement i) (S.lookupDeclElement i) getCycleLen :: EDB m => H.Hash -> m Word64 getCycleLen h = do - runMaybeT (hashToObjectId h) + runMaybeT (primaryHashToExistingObjectId h) >>= maybe (throwError $ LegacyUnknownCycleLen h) pure >>= liftQ . Q.loadObjectById >>= decodeComponentLengthOnly @@ -498,7 +514,7 @@ c2xTerm saveText saveDefn tm tp = loadTermWithTypeByReference :: EDB m => C.Reference.Id -> m (C.Term Symbol, C.Term.Type Symbol) loadTermWithTypeByReference (C.Reference.Id h i) = - hashToObjectId h + primaryHashToExistingObjectId h >>= liftQ . Q.loadObjectById -- retrieve and deserialize the blob >>= decodeTermElementWithType i @@ -506,7 +522,7 @@ loadTermWithTypeByReference (C.Reference.Id h i) = loadTermByReference :: EDB m => C.Reference.Id -> m (C.Term Symbol) loadTermByReference (C.Reference.Id h i) = - hashToObjectId h + primaryHashToExistingObjectId h >>= liftQ . Q.loadObjectById -- retrieve and deserialize the blob >>= decodeTermElementDiscardingType i @@ -514,7 +530,7 @@ loadTermByReference (C.Reference.Id h i) = loadTypeOfTermByTermReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) loadTypeOfTermByTermReference (C.Reference.Id h i) = - hashToObjectId h + primaryHashToExistingObjectId h >>= liftQ . Q.loadObjectById -- retrieve and deserialize the blob >>= decodeTermElementDiscardingTerm i @@ -603,7 +619,7 @@ lookup_ stateLens writerLens mk t = do Just t' -> pure t' c2sTerm :: EDB m => C.Term Symbol -> C.Term.Type Symbol -> m (LocalIds, S.Term.Term, S.Term.Type) -c2sTerm tm tp = c2xTerm Q.saveText hashToObjectId tm (Just tp) <&> \(w, tm, Just tp) -> (w, tm, tp) +c2sTerm tm tp = c2xTerm Q.saveText primaryHashToExistingObjectId tm (Just tp) <&> \(w, tm, Just tp) -> (w, tm, tp) -- *** Watch expressions @@ -637,7 +653,7 @@ w2cTerm ids tm = do saveDeclComponent :: EDB m => H.Hash -> [C.Decl Symbol] -> m Db.ObjectId saveDeclComponent h decls = do - sDeclElements <- traverse (c2sDecl Q.saveText hashToObjectId) decls + sDeclElements <- traverse (c2sDecl Q.saveText primaryHashToExistingObjectId) decls hashId <- Q.saveHashHash h let bytes = S.putBytes @@ -681,7 +697,7 @@ loadDeclByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Decl Symbol) loadDeclByReference (C.Reference.Id h i) = do -- retrieve the blob (localIds, C.Decl.DataDeclaration dt m b ct) <- - hashToObjectId h >>= liftQ . Q.loadObjectById >>= decodeDeclElement i + primaryHashToExistingObjectId h >>= liftQ . Q.loadObjectById >>= decodeDeclElement i -- look up the text and hashes that are used by the term texts <- traverse loadTextById $ LocalIds.textLookup localIds @@ -751,6 +767,12 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = boId <- liftQ $ Q.loadBranchObjectIdByCausalHashId chId traverse loadBranchByObjectId boId +-- this maps from the key used by C.Branch to a local id +type BranchSavingState = (Map Text LocalTextId, Map H.Hash LocalDefnId, Map Db.PatchObjectId LocalPatchObjectId, Map (BranchHash, CausalHash) LocalBranchChildId) +type BranchSavingWriter = (Seq Text, Seq H.Hash, Seq Db.PatchObjectId, Seq (BranchHash, CausalHash)) +type BranchSavingConstraint m = (MonadState BranchSavingState m, MonadWriter BranchSavingWriter m) +type BranchSavingMonad m = StateT BranchSavingState (WriterT BranchSavingWriter m) + saveRootBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) saveRootBranch (C.CausalHead hc he _parents me) = do chId <- liftQ (Q.saveCausalHash hc) @@ -758,17 +780,94 @@ saveRootBranch (C.CausalHead hc he _parents me) = do Just boId -> pure (boId, chId) Nothing -> do bhId <- liftQ (Q.saveBranchHash he) - sBranch <- c2sBranch =<< me - boId <- saveBranchObject bhId sBranch + (li, lBranch) <- c2lBranch =<< me + boId <- saveBranchObject bhId li lBranch liftQ (Q.saveCausal chId bhId) pure (boId, chId) - where - saveBranchObject :: Db.BranchHashId -> S.DbBranch -> m Db.BranchObjectId - saveBranchObject = error "todo" - c2sBranch :: C.Branch.Branch m -> m S.DbBranch - c2sBranch (C.Branch.Branch terms types patches children) = - error "todo" + c2lBranch :: EDB m => C.Branch.Branch m -> m (BranchLocalIds, S.Branch.Full.LocalBranch) + c2lBranch (C.Branch.Branch terms types patches children) = + done =<< (runWriterT . flip evalStateT startState) do + S.Branch + <$> Map.bitraverse saveNameSegment (Map.bitraverse saveReferent saveMetadata) terms + <*> Map.bitraverse saveNameSegment (Map.bitraverse saveReference saveMetadata) types + <*> Map.bitraverse saveNameSegment savePatch' patches + <*> Map.bitraverse saveNameSegment saveChild children + saveNameSegment (C.Branch.NameSegment t) = lookupText t + saveReference :: BranchSavingConstraint m => C.Reference.Reference -> m S.Reference.LocalReference + saveReference = bitraverse lookupText lookupDefn + saveReferent :: BranchSavingConstraint m => C.Referent.Referent -> m S.Referent.LocalReferent + saveReferent = bitraverse saveReference saveReference + saveMetadata :: Monad m => m C.Branch.MdValues -> BranchSavingMonad m S.Branch.Full.LocalMetadataSet + saveMetadata mm = do + C.Branch.MdValues s <- (lift . lift) mm + S.Branch.Full.Inline <$> Set.traverse saveReference s + savePatch' :: EDB m => (PatchHash, m C.Branch.Patch) -> BranchSavingMonad m LocalPatchObjectId + savePatch' (h, mp) = do + patchOID <- primaryHashToMaybePatchObjectId h >>= \case + Just patchOID -> pure patchOID + Nothing -> savePatch h =<< (lift . lift) mp + lookupPatch patchOID + saveChild :: C.Branch.Causal m -> BranchSavingMonad m LocalBranchChildId + saveChild = error "todo: save child" + lookupText :: + ( MonadState s m, + MonadWriter w m, + Lens.Field1' s (Map t LocalTextId), + Lens.Field1' w (Seq t), + Ord t + ) => + t -> + m LocalTextId + lookupText = lookup_ Lens._1 Lens._1 LocalTextId + lookupDefn :: + ( MonadState s m, + MonadWriter w m, + Lens.Field2' s (Map d LocalDefnId), + Lens.Field2' w (Seq d), + Ord d + ) => + d -> + m LocalDefnId + lookupDefn = lookup_ Lens._2 Lens._2 LocalDefnId + lookupPatch :: + ( MonadState s m, + MonadWriter w m, + Lens.Field3' s (Map p LocalPatchObjectId), + Lens.Field3' w (Seq p), + Ord p + ) => + p -> + m LocalPatchObjectId + lookupPatch = lookup_ Lens._3 Lens._3 LocalPatchObjectId + lookupChild :: + ( MonadState s m, + MonadWriter w m, + Lens.Field4' s (Map c LocalBranchChildId), + Lens.Field4' w (Seq c), + Ord c + ) => + c -> + m LocalBranchChildId + lookupChild = lookup_ Lens._4 Lens._4 LocalBranchChildId + startState = mempty @BranchSavingState + saveBranchObject :: DB m => Db.BranchHashId -> BranchLocalIds -> S.Branch.Full.LocalBranch -> m Db.BranchObjectId + saveBranchObject (Db.unBranchHashId -> hashId) li lBranch = do + let bytes = S.putBytes S.putBranchFormat $ S.BranchFormat.Full li lBranch + Db.BranchObjectId <$> Q.saveObject hashId OT.Namespace bytes + done :: EDB m => (a, BranchSavingWriter) -> m (BranchLocalIds, a) + done (lBranch, (textValues, defnHashes, patchHashes, branchCausalHashes)) = do + textIds <- liftQ $ traverse Q.saveText textValues + defnObjectIds <- traverse primaryHashToExistingObjectId defnHashes + patchObjectIds <- pure patchHashes + branchCausalIds <- traverse (bitraverse primaryHashToExistingBranchObjectId (liftQ . Q.saveCausalHash)) branchCausalHashes + let ids = + S.BranchFormat.LocalIds + (Vector.fromList (Foldable.toList textIds)) + (Vector.fromList (Foldable.toList defnObjectIds)) + (Vector.fromList (Foldable.toList patchObjectIds)) + (Vector.fromList (Foldable.toList branchCausalIds)) + pure (ids, lBranch) lookupBranchLocalText :: S.BranchLocalIds -> LocalTextId -> Db.TextId lookupBranchLocalText li (LocalTextId w) = S.BranchFormat.branchTextLookup li Vector.! fromIntegral w @@ -1082,7 +1181,7 @@ declReferentsByPrefix b32prefix pos cid = do pure (C.Decl.declType decl, len, fromIntegral $ length (C.Decl.constructorTypes decl)) -- (localIds, C.Decl.DataDeclaration dt m b ct) <- --- hashToObjectId h >>= liftQ . Q.loadObjectById >>= decodeDeclElement i +-- primaryHashToExistingObjectId h >>= liftQ . Q.loadObjectById >>= decodeDeclElement i branchHashesByPrefix :: EDB m => ShortBranchHash -> m (Set BranchHash) branchHashesByPrefix (ShortBranchHash b32prefix) = do diff --git a/codebase2/util/U/Util/Lens.hs b/codebase2/util/U/Util/Lens.hs index e915a89d88..21f457c4d2 100644 --- a/codebase2/util/U/Util/Lens.hs +++ b/codebase2/util/U/Util/Lens.hs @@ -7,3 +7,4 @@ import qualified Control.Lens as Lens type Field1' s a = Lens.Field1 s s a a type Field2' s a = Lens.Field2 s s a a type Field3' s a = Lens.Field3 s s a a +type Field4' s a = Lens.Field4 s s a a From 9258d54394ac5dbba508532df5db853dfecf08c6 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 7 Jan 2021 13:22:47 -0500 Subject: [PATCH 061/225] start consolidating Causal and CausalHead --- .../U/Codebase/Sqlite/Operations.hs | 79 +++++++++++-------- .../U/Codebase/Sqlite/Queries.hs | 9 ++- codebase2/codebase/U/Codebase/Causal.hs | 19 +++-- questions.md | 4 +- 4 files changed, 69 insertions(+), 42 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 5b47900291..073ab4f005 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -17,7 +17,7 @@ import qualified Control.Lens as Lens import Control.Monad (join, (<=<)) import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) import Control.Monad.State (MonadState, StateT, evalStateT) -import qualified Control.Monad.Trans as Monad +import Control.Monad.Trans (MonadTrans (lift)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) import Control.Monad.Writer (MonadWriter, WriterT, runWriterT) import qualified Control.Monad.Writer as Writer @@ -111,7 +111,6 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set -import Control.Monad.Trans (MonadTrans(lift)) -- * Error handling @@ -138,6 +137,7 @@ data Error | UnknownDependency H.Hash | UnknownText Text | ExpectedBranch CausalHash BranchHash + | ExpectedBranch' Db.CausalHashId | LegacyUnknownCycleLen H.Hash | LegacyUnknownConstructorType H.Hash C.Reference.Pos deriving (Show) @@ -178,14 +178,15 @@ primaryHashToExistingObjectId h = do primaryHashToExistingPatchObjectId :: EDB m => PatchHash -> m Db.PatchObjectId primaryHashToExistingPatchObjectId = - fmap Db.PatchObjectId . primaryHashToExistingObjectId . unPatchHash + fmap Db.PatchObjectId . primaryHashToExistingObjectId . unPatchHash primaryHashToMaybePatchObjectId :: EDB m => PatchHash -> m (Maybe Db.PatchObjectId) primaryHashToMaybePatchObjectId = error "todo" -primaryHashToExistingBranchObjectId :: EDB m => BranchHash -> m Db.BranchObjectId -primaryHashToExistingBranchObjectId = - fmap Db.BranchObjectId . primaryHashToExistingObjectId . unBranchHash +primaryHashToMaybeBranchObjectId :: DB m => BranchHash -> m (Maybe Db.BranchObjectId) +primaryHashToMaybeBranchObjectId = error "todo" + +-- (fmap . fmap) Db.BranchObjectId . liftQ . Q.maybeObjectIdPrimaryHashId . unBranchHash objectExistsForHash :: DB m => H.Hash -> m Bool objectExistsForHash h = @@ -762,20 +763,37 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = <*> loadValueHashByCausalHashId chId <*> headParents chId <*> pure (loadValue chId) - loadValue :: EDB m => Db.CausalHashId -> m (Maybe (C.Branch.Branch m)) - loadValue chId = do - boId <- liftQ $ Q.loadBranchObjectIdByCausalHashId chId - traverse loadBranchByObjectId boId + loadValue :: EDB m => Db.CausalHashId -> m (C.Branch.Branch m) + loadValue chId = + liftQ (Q.loadBranchObjectIdByCausalHashId chId) >>= \case + Nothing -> throwError (ExpectedBranch' chId) + Just boId -> loadBranchByObjectId boId -- this maps from the key used by C.Branch to a local id -type BranchSavingState = (Map Text LocalTextId, Map H.Hash LocalDefnId, Map Db.PatchObjectId LocalPatchObjectId, Map (BranchHash, CausalHash) LocalBranchChildId) -type BranchSavingWriter = (Seq Text, Seq H.Hash, Seq Db.PatchObjectId, Seq (BranchHash, CausalHash)) +type BranchSavingState = (Map Text LocalTextId, Map H.Hash LocalDefnId, Map Db.PatchObjectId LocalPatchObjectId, Map (Db.BranchObjectId, Db.CausalHashId) LocalBranchChildId) + +type BranchSavingWriter = (Seq Text, Seq H.Hash, Seq Db.PatchObjectId, Seq (Db.BranchObjectId, Db.CausalHashId)) + type BranchSavingConstraint m = (MonadState BranchSavingState m, MonadWriter BranchSavingWriter m) + type BranchSavingMonad m = StateT BranchSavingState (WriterT BranchSavingWriter m) saveRootBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) -saveRootBranch (C.CausalHead hc he _parents me) = do +saveRootBranch (C.CausalHead hc he parents me) = do + -- check if we can skip the whole thing, by checking if there are causal parents for hc chId <- liftQ (Q.saveCausalHash hc) + liftQ (Q.loadCausalParents chId) >>= \case + [] -> do + parentCausalHashIds <- for (Map.toList parents) $ \(causalHash, mcausal) -> do + -- check if we can short circuit the parent before loading it, + -- by checking if there are causal parents for hc + parentChId <- liftQ (Q.saveCausalHash causalHash) + liftQ (Q.loadCausalParents parentChId) >>= \case + [] -> do c <- mcausal; snd <$> saveRootBranch c + _grandParents -> pure parentChId + liftQ (Q.saveCausalParents chId parentCausalHashIds) + _parents -> pure () + liftQ (Q.loadBranchObjectIdByCausalHashId chId) >>= \case Just boId -> pure (boId, chId) Nothing -> do @@ -804,12 +822,13 @@ saveRootBranch (C.CausalHead hc he _parents me) = do S.Branch.Full.Inline <$> Set.traverse saveReference s savePatch' :: EDB m => (PatchHash, m C.Branch.Patch) -> BranchSavingMonad m LocalPatchObjectId savePatch' (h, mp) = do - patchOID <- primaryHashToMaybePatchObjectId h >>= \case - Just patchOID -> pure patchOID - Nothing -> savePatch h =<< (lift . lift) mp + patchOID <- + primaryHashToMaybePatchObjectId h >>= \case + Just patchOID -> pure patchOID + Nothing -> savePatch h =<< (lift . lift) mp lookupPatch patchOID - saveChild :: C.Branch.Causal m -> BranchSavingMonad m LocalBranchChildId - saveChild = error "todo: save child" + saveChild :: EDB m => C.Branch.Causal m -> BranchSavingMonad m LocalBranchChildId + saveChild c = (lift . lift) (saveRootBranch c) >>= lookupChild lookupText :: ( MonadState s m, MonadWriter w m, @@ -856,11 +875,9 @@ saveRootBranch (C.CausalHead hc he _parents me) = do let bytes = S.putBytes S.putBranchFormat $ S.BranchFormat.Full li lBranch Db.BranchObjectId <$> Q.saveObject hashId OT.Namespace bytes done :: EDB m => (a, BranchSavingWriter) -> m (BranchLocalIds, a) - done (lBranch, (textValues, defnHashes, patchHashes, branchCausalHashes)) = do + done (lBranch, (textValues, defnHashes, patchObjectIds, branchCausalIds)) = do textIds <- liftQ $ traverse Q.saveText textValues defnObjectIds <- traverse primaryHashToExistingObjectId defnHashes - patchObjectIds <- pure patchHashes - branchCausalIds <- traverse (bitraverse primaryHashToExistingBranchObjectId (liftQ . Q.saveCausalHash)) branchCausalHashes let ids = S.BranchFormat.LocalIds (Vector.fromList (Foldable.toList textIds)) @@ -882,29 +899,25 @@ lookupBranchLocalChild :: BranchLocalIds -> LocalBranchChildId -> (Db.BranchObje lookupBranchLocalChild li (LocalBranchChildId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w loadRootCausal :: EDB m => m (C.Branch.Causal m) -loadRootCausal = liftQ Q.loadNamespaceRoot >>= loadCausalBranchByCausalHashId - -loadCausalBranchByCausalHashId :: EDB m => Db.CausalHashId -> m (C.Branch.Causal m) -loadCausalBranchByCausalHashId id = - loadCausalSpineByCausalHashId id <&> \case - C.Causal hc he parents mme -> - C.CausalHead hc he parents $ mme >>= (Q.orError $ ExpectedBranch hc he) +loadRootCausal = liftQ Q.loadNamespaceRoot >>= loadCausalByCausalHashId loadCausalBranchByCausalHash :: EDB m => CausalHash -> m (Maybe (C.Branch.Causal m)) loadCausalBranchByCausalHash hc = do Q.loadCausalHashIdByCausalHash hc >>= \case - Just chId -> Just <$> loadCausalBranchByCausalHashId chId + Just chId -> Just <$> loadCausalByCausalHashId chId Nothing -> pure Nothing -loadCausalSpineByCausalHashId :: EDB m => Db.CausalHashId -> m (C.Branch.Spine m) -loadCausalSpineByCausalHashId id = do +loadCausalByCausalHashId :: EDB m => Db.CausalHashId -> m (C.Branch.Spine m) +loadCausalByCausalHashId id = do hc <- loadCausalHashById id hb <- loadValueHashByCausalHashId id - let loadNamespace = loadBranchByCausalHashId id + let loadNamespace = loadBranchByCausalHashId id >>= \case + Nothing -> throwError (ExpectedBranch' id) + Just b -> pure b parentHashIds <- Q.loadCausalParents id loadParents <- for parentHashIds \hId -> do h <- loadCausalHashById hId - pure (h, loadCausalSpineByCausalHashId hId) + pure (h, loadCausalByCausalHashId hId) pure $ C.Causal hc hb (Map.fromList loadParents) loadNamespace -- | is this even a thing? loading a branch by causal hash? yes I guess so. diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index a8c8d62bda..cf8e652a0b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE TupleSections #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE FlexibleInstances #-} @@ -230,7 +231,10 @@ loadOldCausalValueHash id = |] saveCausalParent :: DB m => CausalHashId -> CausalHashId -> m () -saveCausalParent child parent = execute sql (child, parent) where +saveCausalParent child parent = saveCausalParents child [parent] + +saveCausalParents :: DB m => CausalHashId -> [CausalHashId] -> m () +saveCausalParents child parents = executeMany sql $ (child,) <$> parents where sql = [here| INSERT OR IGNORE INTO causal_parent (causal_id, parent_id) VALUES (?, ?) |] @@ -402,6 +406,9 @@ query q r = do c <- ask; liftIO $ SQLite.query c q r execute :: (DB m, ToRow q) => SQLite.Query -> q -> m () execute q r = do c <- ask; liftIO $ SQLite.execute c q r +executeMany :: (DB m, ToRow q) => SQLite.Query -> [q] -> m () +executeMany q r = do c <- ask; liftIO $ SQLite.executeMany c q r + -- |transaction that blocks withImmediateTransaction :: (DB m, MonadUnliftIO m) => m a -> m a withImmediateTransaction action = do diff --git a/codebase2/codebase/U/Codebase/Causal.hs b/codebase2/codebase/U/Codebase/Causal.hs index 131d15d78d..5d6d7f68b8 100644 --- a/codebase2/codebase/U/Codebase/Causal.hs +++ b/codebase2/codebase/U/Codebase/Causal.hs @@ -1,18 +1,23 @@ +{-# LANGUAGE RankNTypes, PatternSynonyms #-} module U.Codebase.Causal where import Data.Map (Map) -- | Causal doesn't necessarily pre-load anything other than some hashes. -data Causal m hc he e = Causal - { causalHash :: hc, - valueHash :: he, - parents :: Map hc (m (Causal m hc he e)), - value :: m (Maybe e) - } +-- data Causal m hc he e = Causal +-- { causalHash :: hc, +-- valueHash :: he, +-- parents :: Map hc (m (Causal m hc he e)), +-- value :: m (Maybe e) +-- } +type Causal m hc he e = CausalHead m hc he e + +pattern Causal hc he hp hv = CausalHead hc he hp hv +{-# COMPLETE Causal #-} data CausalHead m hc he e = CausalHead { headCausalHash :: hc, headValueHash :: he, - headParents :: Map hc (m (Causal m hc he e)), + headParents :: Map hc (m (CausalHead m hc he e)), headValue :: m e } diff --git a/questions.md b/questions.md index f44ab67193..3dab21ca8c 100644 --- a/questions.md +++ b/questions.md @@ -15,6 +15,8 @@ next steps: - [ ] `SqliteCodebase.syncToDirectory` - [ ] Managing external edit events? - [ ] `SqliteCodebase.rootBranchUpdates` Is there some Sqlite function for detecting external changes? +- [ ] Implement relational metadata +- [ ] do the tag thing to make sure that causal hashes comes from a unique token string compared to other stuff in the codebase. (maybe `accumulate` should take a tag as its first argument, forcing us to audit all the call sites) ### SqliteCodebase progress (V1 -> V2 adaptor) @@ -47,7 +49,7 @@ next steps: | branchHashesByPrefix | ✔ | | -## less organized stuff below +## less organized stuff below | thing | v1↔v2 | v2↔v2s | desc. | |-----|-----|-----|---| From 9a8422172d58d5f04ce8e3603b8a2a96894b1b75 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 7 Jan 2021 13:54:04 -0500 Subject: [PATCH 062/225] eliminate C.Branch.Spine --- .../U/Codebase/Sqlite/Operations.hs | 2 +- codebase2/codebase/U/Codebase/Branch.hs | 3 -- .../src/Unison/Codebase/SqliteCodebase.hs | 4 +-- .../Codebase/SqliteCodebase/Conversions.hs | 31 +++++-------------- 4 files changed, 11 insertions(+), 29 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 073ab4f005..57a779e29c 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -907,7 +907,7 @@ loadCausalBranchByCausalHash hc = do Just chId -> Just <$> loadCausalByCausalHashId chId Nothing -> pure Nothing -loadCausalByCausalHashId :: EDB m => Db.CausalHashId -> m (C.Branch.Spine m) +loadCausalByCausalHashId :: EDB m => Db.CausalHashId -> m (C.Branch.Causal m) loadCausalByCausalHashId id = do hc <- loadCausalHashById id hb <- loadValueHashByCausalHashId id diff --git a/codebase2/codebase/U/Codebase/Branch.hs b/codebase2/codebase/U/Codebase/Branch.hs index a08312abee..b930ad6ef9 100644 --- a/codebase2/codebase/U/Codebase/Branch.hs +++ b/codebase2/codebase/U/Codebase/Branch.hs @@ -4,7 +4,6 @@ import Data.Map (Map) import Data.Set (Set) import Data.Text (Text) import U.Codebase.Causal (CausalHead) -import qualified U.Codebase.Causal as C import U.Codebase.HashTags (BranchHash, CausalHash, PatchHash) import U.Codebase.Reference (Reference) import U.Codebase.Referent (Referent) @@ -17,8 +16,6 @@ newtype MdValues = MdValues (Set Reference) deriving (Eq, Ord, Show) type Causal m = CausalHead m CausalHash BranchHash (Branch m) -type Spine m = C.Causal m CausalHash BranchHash (Branch m) - -- | V2.Branch is like V1.Branch0; I would rename it, at least temporarily, but too hard. data Branch m = Branch { terms :: Map NameSegment (Map Referent (m MdValues)), diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 0c2c7f57d2..42d2bedf11 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -251,7 +251,7 @@ sqliteCodebase root = do . runExceptT . flip runReaderT conn . fmap (Branch.transform (runDB conn)) - $ Cv.unsafecausalbranch2to1 =<< Ops.loadRootCausal + $ Cv.causalbranch2to1 =<< Ops.loadRootCausal where err :: Ops.Error -> Codebase1.GetRootBranchError err = \case @@ -277,7 +277,7 @@ sqliteCodebase root = do Ops.loadCausalBranchByCausalHash (Cv.branchHash1to2 h) >>= \case Just b -> pure . Just . Branch.transform (runDB conn) - =<< Cv.unsafecausalbranch2to1 b + =<< Cv.causalbranch2to1 b Nothing -> pure Nothing dependentsImpl :: Reference -> IO (Set Reference.Id) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 63626fd6f5..a8dce31ec3 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -6,7 +6,6 @@ import Data.Bifunctor (Bifunctor (bimap)) import qualified Data.ByteString.Short as SBS import Data.Either (fromRight) import qualified Data.Map as Map -import Data.Maybe (fromJust) import Data.Text (Text, pack) import qualified U.Codebase.Branch as V2.Branch import qualified U.Codebase.Causal as V2 @@ -381,37 +380,23 @@ type1to2' convertRef = V1.Kind.Arrow i o -> V2.Kind.Arrow (convertKind i) (convertKind o) -- |forces loading v1 branches even if they may not exist -unsafecausalbranch2to1 :: Monad m => V2.Branch.Causal m -> m (V1.Branch.Branch m) -unsafecausalbranch2to1 (V2.CausalHead hc _he (Map.toList -> parents) me) = do +causalbranch2to1 :: Monad m => V2.Branch.Causal m -> m (V1.Branch.Branch m) +causalbranch2to1 = fmap V1.Branch.Branch . causalbranch2to1' + +causalbranch2to1' :: Monad m => V2.Branch.Causal m -> m (V1.Branch.UnwrappedBranch m) +causalbranch2to1' (V2.CausalHead hc _he (Map.toList -> parents) me) = do let currentHash = causalHash2to1 hc - V1.Branch.Branch <$> case parents of + case parents of [] -> V1.Causal.One currentHash <$> fmap branch2to1 me [(hp, mp)] -> do let parentHash = causalHash2to1 hp V1.Causal.Cons currentHash <$> fmap branch2to1 me - <*> pure (parentHash, unsafecausalspine2to1 =<< mp) + <*> pure (parentHash, causalbranch2to1' =<< mp) merge -> do - let tailsList = map (bimap causalHash2to1 (unsafecausalspine2to1 =<<)) merge + let tailsList = map (bimap causalHash2to1 (causalbranch2to1' =<<)) merge e <- me pure $ V1.Causal.Merge currentHash (branch2to1 e) (Map.fromList tailsList) --- |force loading a v1 branch even when it may not exist -unsafecausalspine2to1 :: forall m. Monad m => V2.Branch.Spine m -> m (V1.Branch.UnwrappedBranch m) -unsafecausalspine2to1 (V2.Causal hc _he (Map.toList -> parents) me) = do - let currentHash = causalHash2to1 hc - case parents of - [] -> V1.Causal.One currentHash <$> fmap (branch2to1 . fromJust) me - [(hp, mp)] -> do - let parentHash = causalHash2to1 hp - head <- fmap (branch2to1 . fromJust) me - let loadParent = unsafecausalspine2to1 =<< mp - pure $ - V1.Causal.Cons currentHash head (parentHash, loadParent) - merge -> do - let tailsList = map (bimap causalHash2to1 (unsafecausalspine2to1 =<<)) merge - e <- fromJust <$> me - pure $ V1.Causal.Merge currentHash (branch2to1 e) (Map.fromList tailsList) - branch2to1 :: V2.Branch.Branch m -> V1.Branch.Branch0 m branch2to1 = error "todo" From cc80f74e7d2ed39e6637d016c927f5a83a6bf79d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 7 Jan 2021 13:58:05 -0500 Subject: [PATCH 063/225] rename CausalHead -> Causal --- codebase2/codebase/U/Codebase/Branch.hs | 4 ++-- codebase2/codebase/U/Codebase/Causal.hs | 22 ++++++---------------- 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/codebase2/codebase/U/Codebase/Branch.hs b/codebase2/codebase/U/Codebase/Branch.hs index b930ad6ef9..a0ba3e719e 100644 --- a/codebase2/codebase/U/Codebase/Branch.hs +++ b/codebase2/codebase/U/Codebase/Branch.hs @@ -3,7 +3,7 @@ module U.Codebase.Branch where import Data.Map (Map) import Data.Set (Set) import Data.Text (Text) -import U.Codebase.Causal (CausalHead) +import qualified U.Codebase.Causal as C import U.Codebase.HashTags (BranchHash, CausalHash, PatchHash) import U.Codebase.Reference (Reference) import U.Codebase.Referent (Referent) @@ -14,7 +14,7 @@ newtype NameSegment = NameSegment Text deriving (Eq, Ord, Show) newtype MdValues = MdValues (Set Reference) deriving (Eq, Ord, Show) -type Causal m = CausalHead m CausalHash BranchHash (Branch m) +type Causal m = C.Causal m CausalHash BranchHash (Branch m) -- | V2.Branch is like V1.Branch0; I would rename it, at least temporarily, but too hard. data Branch m = Branch diff --git a/codebase2/codebase/U/Codebase/Causal.hs b/codebase2/codebase/U/Codebase/Causal.hs index 5d6d7f68b8..a432c159bc 100644 --- a/codebase2/codebase/U/Codebase/Causal.hs +++ b/codebase2/codebase/U/Codebase/Causal.hs @@ -3,21 +3,11 @@ module U.Codebase.Causal where import Data.Map (Map) --- | Causal doesn't necessarily pre-load anything other than some hashes. --- data Causal m hc he e = Causal --- { causalHash :: hc, --- valueHash :: he, --- parents :: Map hc (m (Causal m hc he e)), --- value :: m (Maybe e) --- } -type Causal m hc he e = CausalHead m hc he e +data Causal m hc he e = Causal + { causalHash :: hc, + valueHash :: he, + parents :: Map hc (m (Causal m hc he e)), + value :: m e + } -pattern Causal hc he hp hv = CausalHead hc he hp hv -{-# COMPLETE Causal #-} -data CausalHead m hc he e = CausalHead - { headCausalHash :: hc, - headValueHash :: he, - headParents :: Map hc (m (CausalHead m hc he e)), - headValue :: m e - } From d62c14ee98a293ae8b9791388a9d01470154ab98 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 7 Jan 2021 14:02:47 -0500 Subject: [PATCH 064/225] make Cv.branch2to1 monadic --- .../src/Unison/Codebase/SqliteCodebase/Conversions.hs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index a8dce31ec3..55cefcc679 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -387,16 +387,16 @@ causalbranch2to1' :: Monad m => V2.Branch.Causal m -> m (V1.Branch.UnwrappedBran causalbranch2to1' (V2.CausalHead hc _he (Map.toList -> parents) me) = do let currentHash = causalHash2to1 hc case parents of - [] -> V1.Causal.One currentHash <$> fmap branch2to1 me + [] -> V1.Causal.One currentHash <$> (me >>= branch2to1) [(hp, mp)] -> do let parentHash = causalHash2to1 hp V1.Causal.Cons currentHash - <$> fmap branch2to1 me + <$> (me >>= branch2to1) <*> pure (parentHash, causalbranch2to1' =<< mp) merge -> do let tailsList = map (bimap causalHash2to1 (causalbranch2to1' =<<)) merge e <- me - pure $ V1.Causal.Merge currentHash (branch2to1 e) (Map.fromList tailsList) + V1.Causal.Merge currentHash <$> branch2to1 e <*> pure (Map.fromList tailsList) -branch2to1 :: V2.Branch.Branch m -> V1.Branch.Branch0 m +branch2to1 :: V2.Branch.Branch m -> m (V1.Branch.Branch0 m) branch2to1 = error "todo" From 6ebcd500da2f36fd3cfd75adf953895c7950f59a Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 7 Jan 2021 14:09:56 -0500 Subject: [PATCH 065/225] rename CausalHead -> Causal more --- codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs | 11 ----------- .../codebase-sqlite/U/Codebase/Sqlite/Operations.hs | 6 +++--- .../src/Unison/Codebase/SqliteCodebase/Conversions.hs | 2 +- 3 files changed, 4 insertions(+), 15 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs index 3b4fd09db0..7996277b1f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs @@ -1,12 +1 @@ module U.Codebase.Sqlite.Causal where - --- data Causal hc he = RawCausal { --- valueHash :: he, --- parentHashes :: [hc] --- } - --- data CausalHead id hc he = RawCausalHead { --- headValueId :: id, --- headValueHash :: he, --- headParentHashes :: [hc] --- } diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 57a779e29c..075e934068 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -742,9 +742,9 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = h <- PatchHash <$> (loadHashByObjectId . Db.unPatchObjectId) patchId pure (h, loadPatchById patchId) - doChildren :: EDB m => Map Db.TextId (Db.BranchObjectId, Db.CausalHashId) -> m (Map C.Branch.NameSegment (C.CausalHead m CausalHash BranchHash (C.Branch.Branch m))) + doChildren :: EDB m => Map Db.TextId (Db.BranchObjectId, Db.CausalHashId) -> m (Map C.Branch.NameSegment (C.Causal m CausalHash BranchHash (C.Branch.Branch m))) doChildren = Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) \(boId, chId) -> - C.CausalHead <$> loadCausalHashById chId + C.Causal <$> loadCausalHashById chId <*> loadValueHashByCausalHashId chId <*> headParents chId <*> pure (loadBranchByObjectId boId) @@ -779,7 +779,7 @@ type BranchSavingConstraint m = (MonadState BranchSavingState m, MonadWriter Bra type BranchSavingMonad m = StateT BranchSavingState (WriterT BranchSavingWriter m) saveRootBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) -saveRootBranch (C.CausalHead hc he parents me) = do +saveRootBranch (C.Causal hc he parents me) = do -- check if we can skip the whole thing, by checking if there are causal parents for hc chId <- liftQ (Q.saveCausalHash hc) liftQ (Q.loadCausalParents chId) >>= \case diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 55cefcc679..01ad7e77d7 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -384,7 +384,7 @@ causalbranch2to1 :: Monad m => V2.Branch.Causal m -> m (V1.Branch.Branch m) causalbranch2to1 = fmap V1.Branch.Branch . causalbranch2to1' causalbranch2to1' :: Monad m => V2.Branch.Causal m -> m (V1.Branch.UnwrappedBranch m) -causalbranch2to1' (V2.CausalHead hc _he (Map.toList -> parents) me) = do +causalbranch2to1' (V2.Causal hc _he (Map.toList -> parents) me) = do let currentHash = causalHash2to1 hc case parents of [] -> V1.Causal.One currentHash <$> (me >>= branch2to1) From ac1ee20d2d39548580d350547d3b35c763cedd1d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 11 Jan 2021 15:46:02 -0500 Subject: [PATCH 066/225] loading types for metadata --- .../U/Codebase/Sqlite/Operations.hs | 20 +++- .../U/Codebase/Sqlite/Queries.hs | 17 +++ .../U/Codebase/Sqlite/Reference.hs | 12 +++ codebase2/codebase/U/Codebase/Branch.hs | 4 +- .../src/Unison/Codebase/SqliteCodebase.hs | 4 +- .../Codebase/SqliteCodebase/Conversions.hs | 102 +++++++++++++++--- 6 files changed, 139 insertions(+), 20 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 075e934068..278fb21cf8 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -140,6 +140,7 @@ data Error | ExpectedBranch' Db.CausalHashId | LegacyUnknownCycleLen H.Hash | LegacyUnknownConstructorType H.Hash C.Reference.Pos + | NeedTypeForBuiltinMetadata Text deriving (Show) getFromBytesOr :: Err m => DecodeError -> Get a -> ByteString -> m a @@ -721,13 +722,21 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = <*> doPatches patches <*> doChildren children where + loadMetadataType :: EDB m => S.Reference -> m C.Reference + loadMetadataType = \case + C.ReferenceBuiltin tId -> + loadTextById tId >>= throwError . NeedTypeForBuiltinMetadata + C.ReferenceDerived id -> + typeReferenceForTerm id >>= h2cReference + + loadTypesForMetadata rs = Map.fromList <$> traverse (\r -> (,) <$> s2cReference r <*> loadMetadataType r) (Foldable.toList rs) doTerms :: EDB m => Map Db.TextId (Map S.Referent S.DbMetadataSet) -> m (Map C.Branch.NameSegment (Map C.Referent (m C.Branch.MdValues))) doTerms = Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) ( Map.bitraverse s2cReferent \case S.MetadataSet.Inline rs -> - pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs + pure $ C.Branch.MdValues <$> loadTypesForMetadata rs ) doTypes :: EDB m => Map Db.TextId (Map S.Reference S.DbMetadataSet) -> m (Map C.Branch.NameSegment (Map C.Reference (m C.Branch.MdValues))) doTypes = @@ -735,7 +744,7 @@ s2cBranch (S.Branch.Full.Branch tms tps patches children) = (fmap C.Branch.NameSegment . loadTextById) ( Map.bitraverse s2cReference \case S.MetadataSet.Inline rs -> - pure $ C.Branch.MdValues <$> Set.traverse s2cReference rs + pure $ C.Branch.MdValues <$> loadTypesForMetadata rs ) doPatches :: EDB m => Map Db.TextId Db.PatchObjectId -> m (Map C.Branch.NameSegment (PatchHash, m C.Branch.Patch)) doPatches = Map.bitraverse (fmap C.Branch.NameSegment . loadTextById) \patchId -> do @@ -818,8 +827,8 @@ saveRootBranch (C.Causal hc he parents me) = do saveReferent = bitraverse saveReference saveReference saveMetadata :: Monad m => m C.Branch.MdValues -> BranchSavingMonad m S.Branch.Full.LocalMetadataSet saveMetadata mm = do - C.Branch.MdValues s <- (lift . lift) mm - S.Branch.Full.Inline <$> Set.traverse saveReference s + C.Branch.MdValues m <- (lift . lift) mm + S.Branch.Full.Inline <$> Set.traverse saveReference (Map.keysSet m) savePatch' :: EDB m => (PatchHash, m C.Branch.Patch) -> BranchSavingMonad m LocalPatchObjectId savePatch' (h, mp) = do patchOID <- @@ -1131,6 +1140,9 @@ termsHavingType cTypeRef = do Nothing -> mempty Just set -> Set.fromList set +typeReferenceForTerm :: EDB m => S.Reference.Id -> m S.ReferenceH +typeReferenceForTerm = liftQ . Q.getTypeReferenceForReference + termsMentioningType :: EDB m => C.Reference -> m (Set C.Referent.Id) termsMentioningType cTypeRef = do maySet <- runMaybeT $ do diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index cf8e652a0b..13fe08c50d 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE ViewPatterns #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE LambdaCase #-} @@ -33,6 +34,7 @@ import U.Codebase.Sqlite.DbId (BranchHashId (..), BranchObjectId (..), CausalHas import U.Codebase.Sqlite.ObjectType (ObjectType) import qualified U.Codebase.Sqlite.Reference as Reference import qualified U.Codebase.Sqlite.Referent as Referent +import qualified U.Codebase.Referent as C.Referent import U.Codebase.WatchKind (WatchKind) import qualified U.Codebase.WatchKind as WatchKind import U.Util.Base32Hex (Base32Hex (..)) @@ -55,6 +57,7 @@ data Integrity | NoObjectForHashId HashId | NoNamespaceRoot | MultipleNamespaceRoots [CausalHashId] + | NoTypeIndexForTerm Referent.Id deriving Show -- |discard errors that you're sure are impossible @@ -308,6 +311,20 @@ getReferentsByType r = query sql r where sql = [here| AND type_reference_component_index = ? |] +getTypeReferenceForReference :: EDB m => Reference.Id -> m (Reference' TextId HashId) +getTypeReferenceForReference (C.Referent.RefId -> r) = + queryMaybe sql r >>= orError (NoTypeIndexForTerm r) + where sql = [here| + SELECT + type_reference_builtin, + type_reference_hash_id, + type_reference_component_index + FROM find_type_index + WHERE term_referent_object_id = ? + AND term_referent_component_index = ? + AND term_referent_constructor_index = ? +|] + addToTypeMentionsIndex :: DB m => Reference' TextId HashId -> Referent.Id -> m () addToTypeMentionsIndex tp tm = execute sql (tp :. tm) where sql = [here| INSERT OR IGNORE INTO find_type_mentions_index ( diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs index 043aae3a01..185d122a88 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs @@ -11,6 +11,7 @@ import Database.SQLite.Simple.ToField (ToField) import U.Codebase.Reference (Id' (Id), Reference' (ReferenceBuiltin, ReferenceDerived)) import U.Codebase.Sqlite.DbId (HashId, ObjectId, TextId) import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalHashId, LocalTextId) +import Control.Applicative (liftA3) type Reference = Reference' TextId ObjectId @@ -33,6 +34,17 @@ instance ToRow (Reference' TextId HashId) where ReferenceBuiltin t -> toRow (Only t) ++ [SQLNull, SQLNull] ReferenceDerived (Id h i) -> SQLNull : toRow (Only h) ++ toRow (Only i) +instance FromRow (Reference' TextId HashId) where + fromRow = liftA3 mkRef field field field + where + mkRef (Just t) Nothing Nothing = + ReferenceBuiltin t + mkRef Nothing (Just hashId) (Just componentIdx) = + ReferenceDerived (Id hashId componentIdx) + mkRef t h i = + error $ "invalid find_type_index type reference: " ++ str + where str = "(" ++ show t ++ ", " ++ show h ++ ", " ++ show i ++ ")" + instance ToRow (Reference' TextId ObjectId) where toRow = \case ReferenceBuiltin t -> toRow (Only t) ++ [SQLNull, SQLNull] diff --git a/codebase2/codebase/U/Codebase/Branch.hs b/codebase2/codebase/U/Codebase/Branch.hs index a0ba3e719e..6bba6df79f 100644 --- a/codebase2/codebase/U/Codebase/Branch.hs +++ b/codebase2/codebase/U/Codebase/Branch.hs @@ -12,7 +12,9 @@ import U.Codebase.TypeEdit (TypeEdit) newtype NameSegment = NameSegment Text deriving (Eq, Ord, Show) -newtype MdValues = MdValues (Set Reference) deriving (Eq, Ord, Show) +type MetadataType = Reference +type MetadataValue = Reference +data MdValues = MdValues (Map MetadataValue MetadataType) deriving (Eq, Ord, Show) type Causal m = C.Causal m CausalHash BranchHash (Branch m) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 42d2bedf11..a1f508bf82 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -251,7 +251,7 @@ sqliteCodebase root = do . runExceptT . flip runReaderT conn . fmap (Branch.transform (runDB conn)) - $ Cv.causalbranch2to1 =<< Ops.loadRootCausal + $ Cv.causalbranch2to1 getCycleLen getDeclType =<< Ops.loadRootCausal where err :: Ops.Error -> Codebase1.GetRootBranchError err = \case @@ -277,7 +277,7 @@ sqliteCodebase root = do Ops.loadCausalBranchByCausalHash (Cv.branchHash1to2 h) >>= \case Just b -> pure . Just . Branch.transform (runDB conn) - =<< Cv.causalbranch2to1 b + =<< Cv.causalbranch2to1 getCycleLen getDeclType b Nothing -> pure Nothing dependentsImpl :: Reference -> IO (Set Reference.Id) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 01ad7e77d7..8b9e0ca1df 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -2,10 +2,13 @@ module Unison.Codebase.SqliteCodebase.Conversions where +import Control.Monad (foldM) import Data.Bifunctor (Bifunctor (bimap)) import qualified Data.ByteString.Short as SBS import Data.Either (fromRight) +import Data.Map (Map) import qualified Data.Map as Map +import qualified Data.Set as Set import Data.Text (Text, pack) import qualified U.Codebase.Branch as V2.Branch import qualified U.Codebase.Causal as V2 @@ -25,15 +28,19 @@ import qualified U.Codebase.WatchKind as V2.WatchKind import qualified U.Core.ABT as V2.ABT import qualified U.Util.Hash as V2 import qualified U.Util.Hash as V2.Hash +import qualified U.Util.Map as Map import qualified Unison.ABT as V1.ABT import qualified Unison.Codebase.Branch as V1.Branch import qualified Unison.Codebase.Causal as V1.Causal +import qualified Unison.Codebase.Metadata as V1.Metadata +import qualified Unison.Codebase.Patch as V1 import qualified Unison.Codebase.ShortBranchHash as V1 import qualified Unison.ConstructorType as CT import qualified Unison.DataDeclaration as V1.Decl import Unison.Hash (Hash) import qualified Unison.Hash as V1 import qualified Unison.Kind as V1.Kind +import qualified Unison.NameSegment as V1 import Unison.Parser (Ann) import qualified Unison.Parser as Ann import qualified Unison.Pattern as V1.Pattern @@ -44,6 +51,8 @@ import qualified Unison.Referent as V1.Referent import qualified Unison.Symbol as V1 import qualified Unison.Term as V1.Term import qualified Unison.Type as V1.Type +import qualified Unison.Util.Relation as Relation +import qualified Unison.Util.Star3 as V1.Star3 import qualified Unison.Var as V1.Var import qualified Unison.Var as Var @@ -309,6 +318,11 @@ rreferent1to2 h = \case V1.Ref r -> V2.Ref (rreference1to2 h r) V1.Con r i _ct -> V2.Con (reference1to2 r) (fromIntegral i) +referent2to1 :: Applicative m => (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Referent -> m V1.Referent +referent2to1 lookupSize lookupCT = \case + V2.Ref r -> V1.Ref <$> reference2to1 lookupSize r + V2.Con r i -> V1.Con <$> reference2to1 lookupSize r <*> pure (fromIntegral i) <*> lookupCT r + referentid2to1 :: Applicative m => (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Referent.Id -> m V1.Referent.Id referentid2to1 lookupSize lookupCT = \case V2.RefId r -> V1.Ref' <$> referenceid2to1 lookupSize r @@ -379,24 +393,86 @@ type1to2' convertRef = V1.Kind.Star -> V2.Kind.Star V1.Kind.Arrow i o -> V2.Kind.Arrow (convertKind i) (convertKind o) --- |forces loading v1 branches even if they may not exist -causalbranch2to1 :: Monad m => V2.Branch.Causal m -> m (V1.Branch.Branch m) -causalbranch2to1 = fmap V1.Branch.Branch . causalbranch2to1' +-- | forces loading v1 branches even if they may not exist +causalbranch2to1 :: Monad m => (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Branch.Causal m -> m (V1.Branch.Branch m) +causalbranch2to1 lookupSize lookupCT = fmap V1.Branch.Branch . causalbranch2to1' lookupSize lookupCT -causalbranch2to1' :: Monad m => V2.Branch.Causal m -> m (V1.Branch.UnwrappedBranch m) -causalbranch2to1' (V2.Causal hc _he (Map.toList -> parents) me) = do +causalbranch2to1' :: Monad m => (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Branch.Causal m -> m (V1.Branch.UnwrappedBranch m) +causalbranch2to1' lookupSize lookupCT (V2.Causal hc _he (Map.toList -> parents) me) = do let currentHash = causalHash2to1 hc case parents of - [] -> V1.Causal.One currentHash <$> (me >>= branch2to1) + [] -> V1.Causal.One currentHash <$> (me >>= branch2to1 lookupSize lookupCT) [(hp, mp)] -> do let parentHash = causalHash2to1 hp V1.Causal.Cons currentHash - <$> (me >>= branch2to1) - <*> pure (parentHash, causalbranch2to1' =<< mp) + <$> (me >>= branch2to1 lookupSize lookupCT) + <*> pure (parentHash, causalbranch2to1' lookupSize lookupCT =<< mp) merge -> do - let tailsList = map (bimap causalHash2to1 (causalbranch2to1' =<<)) merge + let tailsList = map (bimap causalHash2to1 (causalbranch2to1' lookupSize lookupCT =<<)) merge e <- me - V1.Causal.Merge currentHash <$> branch2to1 e <*> pure (Map.fromList tailsList) - -branch2to1 :: V2.Branch.Branch m -> m (V1.Branch.Branch0 m) -branch2to1 = error "todo" + V1.Causal.Merge currentHash <$> branch2to1 lookupSize lookupCT e <*> pure (Map.fromList tailsList) + +patch2to1 :: V2.Branch.Patch -> V1.Patch +patch2to1 = undefined -- todo + +edithash2to1 :: V2.PatchHash -> V1.Branch.EditHash +edithash2to1 = undefined -- todo + +namesegment2to1 :: V2.Branch.NameSegment -> V1.NameSegment +namesegment2to1 = undefined -- todo + +-- +branch2to1 :: + Monad m => + (Hash -> m V1.Reference.Size) -> + (V2.Reference -> m CT.ConstructorType) -> + V2.Branch.Branch m -> + m (V1.Branch.Branch0 m) +branch2to1 lookupSize lookupCT (V2.Branch.Branch v2terms v2types v2patches v2children) = do + v1terms <- toStar (reference2to1 lookupSize) =<< Map.bitraverse (pure . namesegment2to1) (Map.bitraverse (referent2to1 lookupSize lookupCT) id) v2terms + v1types <- toStar (reference2to1 lookupSize) =<< Map.bitraverse (pure . namesegment2to1) (Map.bitraverse (reference2to1 lookupSize) id) v2types + let v1patches = Map.bimap namesegment2to1 (bimap edithash2to1 (fmap patch2to1)) v2patches + v1children <- Map.bitraverse (pure . namesegment2to1) (causalbranch2to1 lookupSize lookupCT) v2children + pure $ V1.Branch.branch0 v1terms v1types v1children v1patches + where + toStar :: forall m name ref. (Monad m, Ord name, Ord ref) => (V2.Reference -> m V1.Reference) -> Map name (Map ref V2.Branch.MdValues) -> m (V1.Metadata.Star ref name) + toStar mdref2to1 m = foldM insert mempty (Map.toList m) + where + insert star (name, m) = foldM (insert' name) star (Map.toList m) + insert' :: name -> V1.Metadata.Star ref name -> (ref, V2.Branch.MdValues) -> m (V1.Metadata.Star ref name) + insert' name star (ref, V2.Branch.MdValues mdvals) = do + let facts = Set.singleton ref + names = Relation.singleton ref name + types :: Relation.Relation ref V1.Metadata.Type <- + Relation.insertManyRan ref <$> traverse mdref2to1 (Map.elems mdvals) <*> pure mempty + vals :: Relation.Relation ref (V1.Metadata.Type, V1.Metadata.Value) <- + Relation.insertManyRan ref <$> (traverse (\(t, v) -> (,) <$> mdref2to1 v <*> mdref2to1 t) (Map.toList mdvals)) <*> pure mempty + pure $ star <> V1.Star3.Star3 facts names types vals + +-- V2.Branch0 should have the metadata types, could bulk load with relational operations +-- type Star a n = Star3 a n Type (Type, Value) +-- type Star a n = Star3 a n Type (Reference, Reference) +-- MdValues is a Set V2.Reference + +-- (Name, TermRef, Metadata Type, Metadata Value) <-- decided not this (because name was too long/repetitive?) +-- (BranchId/Hash, TermRef, Metadata Type, Metadata Value) <-- what about this + +-- data V2.Branch m = Branch +-- { terms :: Map NameSegment (Map Referent (m MdValues)), +-- types :: Map NameSegment (Map Reference (m MdValues)), +-- patches :: Map NameSegment (PatchHash, m Patch), +-- children :: Map NameSegment (Causal m) +-- } +-- branch0 :: Metadata.Star Referent NameSegment +-- -> Metadata.Star Reference NameSegment +-- -> Map NameSegment (Branch m) +-- -> Map NameSegment (EditHash, m Patch) +-- -> Branch0 m + +-- type Metadata.Star a n = Star3 a n Type (Type, Value) + +-- data Star3 fact d1 d2 d3 +-- = Star3 { fact :: Set fact +-- , d1 :: Relation fact d1 +-- , d2 :: Relation fact d2 +-- , d3 :: Relation fact d3 } deriving (Eq,Ord,Show) From d88c944bce221a16e1851f8d1b7eb1f09033790c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 11 Jan 2021 16:03:22 -0500 Subject: [PATCH 067/225] primaryHashToMaybeObjectId helper stuff --- .../codebase-sqlite/U/Codebase/Sqlite/Operations.hs | 12 ++++++++++-- .../codebase-sqlite/U/Codebase/Sqlite/Queries.hs | 8 ++++---- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 278fb21cf8..50b9387593 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -177,15 +177,23 @@ primaryHashToExistingObjectId h = do Just hashId -> liftQ $ Q.expectObjectIdForPrimaryHashId hashId Nothing -> throwError $ UnknownDependency h +primaryHashToMaybeObjectId :: DB m => H.Hash -> m (Maybe Db.ObjectId) +primaryHashToMaybeObjectId h = do + (Q.loadHashId . H.toBase32Hex) h >>= \case + Just hashId -> Q.maybeObjectIdForPrimaryHashId hashId + Nothing -> pure Nothing + primaryHashToExistingPatchObjectId :: EDB m => PatchHash -> m Db.PatchObjectId primaryHashToExistingPatchObjectId = fmap Db.PatchObjectId . primaryHashToExistingObjectId . unPatchHash primaryHashToMaybePatchObjectId :: EDB m => PatchHash -> m (Maybe Db.PatchObjectId) -primaryHashToMaybePatchObjectId = error "todo" +primaryHashToMaybePatchObjectId = + (fmap . fmap) Db.PatchObjectId . primaryHashToMaybeObjectId . unPatchHash primaryHashToMaybeBranchObjectId :: DB m => BranchHash -> m (Maybe Db.BranchObjectId) -primaryHashToMaybeBranchObjectId = error "todo" +primaryHashToMaybeBranchObjectId = + (fmap . fmap) Db.BranchObjectId . primaryHashToMaybeObjectId . unBranchHash -- (fmap . fmap) Db.BranchObjectId . liftQ . Q.maybeObjectIdPrimaryHashId . unBranchHash diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 13fe08c50d..0864743f3c 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -128,7 +128,7 @@ saveHashObject hId oId version = execute sql (hId, oId, version) where saveObject :: DB m => HashId -> ObjectType -> ByteString -> m ObjectId saveObject h t blob = - execute sql (h, t, blob) >> queryOne (maybeObjectIdPrimaryHashId h) + execute sql (h, t, blob) >> queryOne (maybeObjectIdForPrimaryHashId h) where sql = [here| INSERT OR IGNORE INTO object (primary_hash_id, type_id, bytes) @@ -150,10 +150,10 @@ loadObjectWithTypeById oId = queryMaybe sql (Only oId) >>= orError (UnknownObjec -- |Not all hashes have corresponding objects; e.g., hashes of term types expectObjectIdForPrimaryHashId :: EDB m => HashId -> m ObjectId expectObjectIdForPrimaryHashId h = - maybeObjectIdPrimaryHashId h >>= orError (UnknownHashId h) + maybeObjectIdForPrimaryHashId h >>= orError (UnknownHashId h) -maybeObjectIdPrimaryHashId :: DB m => HashId -> m (Maybe ObjectId) -maybeObjectIdPrimaryHashId h = queryAtom sql (Only h) where sql = [here| +maybeObjectIdForPrimaryHashId :: DB m => HashId -> m (Maybe ObjectId) +maybeObjectIdForPrimaryHashId h = queryAtom sql (Only h) where sql = [here| SELECT id FROM object WHERE primary_hash_id = ? |] From f4fd0257e96d5224273676d2bb75c9d908ede65c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 11 Jan 2021 17:25:04 -0500 Subject: [PATCH 068/225] patch2to1 --- .../U/Codebase/Sqlite/Operations.hs | 5 ++- .../U/Codebase/Sqlite/Patch/TermEdit.hs | 11 +++-- .../U/Codebase/Sqlite/Serialization.hs | 4 +- codebase2/codebase/U/Codebase/TermEdit.hs | 4 +- .../Codebase/SqliteCodebase/Conversions.hs | 44 ++++++++++++++++--- 5 files changed, 52 insertions(+), 16 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 50b9387593..ae38f7c80b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -256,7 +256,7 @@ h2cReferent = bitraverse h2cReference h2cReference s2cTermEdit :: EDB m => S.TermEdit -> m C.TermEdit s2cTermEdit = \case - S.TermEdit.Replace r t -> C.TermEdit.Replace <$> s2cReference r <*> pure (s2cTyping t) + S.TermEdit.Replace r t -> C.TermEdit.Replace <$> s2cReferent r <*> pure (s2cTyping t) S.TermEdit.Deprecate -> pure C.TermEdit.Deprecate s2cTyping :: S.TermEdit.Typing -> C.TermEdit.Typing @@ -334,7 +334,7 @@ c2lPatch (C.Branch.Patch termEdits typeEdits) = lookupDefn = lookup_ Lens._3 Lens._3 LocalDefnId saveTermEdit = \case - C.TermEdit.Replace r t -> S.TermEdit.Replace <$> saveReference r <*> pure (c2sTyping t) + C.TermEdit.Replace r t -> S.TermEdit.Replace <$> saveReferent r <*> pure (c2sTyping t) C.TermEdit.Deprecate -> pure S.TermEdit.Deprecate saveTypeEdit = \case @@ -343,6 +343,7 @@ c2lPatch (C.Branch.Patch termEdits typeEdits) = saveReference = bitraverse lookupText lookupDefn saveReferenceH = bitraverse lookupText lookupHash + saveReferent = bitraverse saveReference saveReference saveReferentH = bitraverse saveReferenceH saveReferenceH -- | produces a diff diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs index 724f15257b..82a4029382 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/TermEdit.hs @@ -4,6 +4,7 @@ import Data.Bifoldable (Bifoldable (bifoldMap)) import Data.Bifunctor (Bifunctor (bimap)) import Data.Bitraversable (Bitraversable (bitraverse)) import U.Codebase.Reference (Reference') +import qualified U.Codebase.Referent as Referent import qualified U.Codebase.Sqlite.DbId as Db import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalTextId) @@ -11,7 +12,9 @@ type TermEdit = TermEdit' Db.TextId Db.ObjectId type LocalTermEdit = TermEdit' LocalTextId LocalDefnId -data TermEdit' t h = Replace (Reference' t h) Typing | Deprecate +type Referent' t h = Referent.Referent' (Reference' t h) (Reference' t h) + +data TermEdit' t h = Replace (Referent' t h) Typing | Deprecate deriving (Eq, Ord, Show) -- Replacements with the Same type can be automatically propagated. @@ -21,13 +24,13 @@ data Typing = Same | Subtype | Different deriving (Eq, Ord, Show) instance Bifunctor TermEdit' where - bimap f g (Replace r t) = Replace (bimap f g r) t + bimap f g (Replace r t) = Replace (bimap (bimap f g) (bimap f g) r) t bimap _ _ Deprecate = Deprecate instance Bifoldable TermEdit' where - bifoldMap f g (Replace r _t) = bifoldMap f g r + bifoldMap f g (Replace r _t) = bifoldMap (bifoldMap f g) (bifoldMap f g) r bifoldMap _ _ Deprecate = mempty instance Bitraversable TermEdit' where - bitraverse f g (Replace r t) = Replace <$> bitraverse f g r <*> pure t + bitraverse f g (Replace r t) = Replace <$> bitraverse (bitraverse f g) (bitraverse f g) r <*> pure t bitraverse _ _ Deprecate = pure Deprecate diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 4bc5fcf2d9..5f27925ada 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -494,7 +494,7 @@ getPatchFormat = getTermEdit = getWord8 >>= \case 0 -> pure TermEdit.Deprecate - 1 -> TermEdit.Replace <$> getReference <*> getTyping + 1 -> TermEdit.Replace <$> getReferent <*> getTyping x -> unknownTag "getTermEdit" x getTyping :: MonadGet m => m TermEdit.Typing getTyping = @@ -539,7 +539,7 @@ putPatchLocalIds (PatchFormat.LocalIds ts hs os) = do putTermEdit :: MonadPut m => TermEdit.LocalTermEdit -> m () putTermEdit TermEdit.Deprecate = putWord8 0 -putTermEdit (TermEdit.Replace r t) = putWord8 1 *> putReference r *> putTyping t +putTermEdit (TermEdit.Replace r t) = putWord8 1 *> putReferent r *> putTyping t where putTyping TermEdit.Same = putWord8 0 putTyping TermEdit.Subtype = putWord8 1 diff --git a/codebase2/codebase/U/Codebase/TermEdit.hs b/codebase2/codebase/U/Codebase/TermEdit.hs index 90bbcbfc09..d6137d0ce4 100644 --- a/codebase2/codebase/U/Codebase/TermEdit.hs +++ b/codebase2/codebase/U/Codebase/TermEdit.hs @@ -1,10 +1,10 @@ module U.Codebase.TermEdit where import U.Util.Hashable (Hashable) -import U.Codebase.Reference (Reference) +import U.Codebase.Referent (Referent) import qualified U.Util.Hashable as H -data TermEdit = Replace Reference Typing | Deprecate +data TermEdit = Replace Referent Typing | Deprecate deriving (Eq, Ord, Show) -- Replacements with the Same type can be automatically propagated. diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 8b9e0ca1df..0810caf367 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -29,6 +29,7 @@ import qualified U.Core.ABT as V2.ABT import qualified U.Util.Hash as V2 import qualified U.Util.Hash as V2.Hash import qualified U.Util.Map as Map +import qualified U.Util.Set as Set import qualified Unison.ABT as V1.ABT import qualified Unison.Codebase.Branch as V1.Branch import qualified Unison.Codebase.Causal as V1.Causal @@ -55,6 +56,11 @@ import qualified Unison.Util.Relation as Relation import qualified Unison.Util.Star3 as V1.Star3 import qualified Unison.Var as V1.Var import qualified Unison.Var as Var +import Data.Bitraversable (Bitraversable(bitraverse)) +import qualified U.Codebase.TermEdit as V2.TermEdit +import qualified Unison.Codebase.TermEdit as V1.TermEdit +import qualified U.Codebase.TypeEdit as V2.TypeEdit +import qualified Unison.Codebase.TypeEdit as V1.TypeEdit sbh1to2 :: V1.ShortBranchHash -> V2.ShortBranchHash sbh1to2 (V1.ShortBranchHash b32) = V2.ShortBranchHash b32 @@ -412,16 +418,42 @@ causalbranch2to1' lookupSize lookupCT (V2.Causal hc _he (Map.toList -> parents) e <- me V1.Causal.Merge currentHash <$> branch2to1 lookupSize lookupCT e <*> pure (Map.fromList tailsList) -patch2to1 :: V2.Branch.Patch -> V1.Patch -patch2to1 = undefined -- todo +patch2to1 :: + forall m. + Monad m => + (Hash -> m V1.Reference.Size) -> + V2.Branch.Patch -> + m V1.Patch +patch2to1 lookupSize (V2.Branch.Patch v2termedits v2typeedits) = do + termEdits <- Map.bitraverse referent2to1' (Set.traverse termedit2to1) v2termedits + typeEdits <- Map.bitraverse (reference2to1 lookupSize) (Set.traverse typeedit2to1) v2typeedits + pure $ V1.Patch (Relation.fromMultimap termEdits) (Relation.fromMultimap typeEdits) + where + referent2to1' :: V2.Referent -> m V1.Reference + referent2to1' = \case + V2.Referent.Ref r -> reference2to1 lookupSize r + V2.Referent.Con{} -> error "found referent on LHS when converting patch2to1" + termedit2to1 :: V2.TermEdit.TermEdit -> m V1.TermEdit.TermEdit + termedit2to1 = \case + V2.TermEdit.Replace (V2.Referent.Ref r) t -> + V1.TermEdit.Replace <$> reference2to1 lookupSize r <*> typing2to1 t + V2.TermEdit.Replace{} -> error "found referent on RHS when converting patch2to1" + V2.TermEdit.Deprecate -> pure V1.TermEdit.Deprecate + typeedit2to1 :: V2.TypeEdit.TypeEdit -> m V1.TypeEdit.TypeEdit + typeedit2to1 = \case + V2.TypeEdit.Replace r -> V1.TypeEdit.Replace <$> reference2to1 lookupSize r + V2.TypeEdit.Deprecate -> pure V1.TypeEdit.Deprecate + typing2to1 t = pure $ case t of + V2.TermEdit.Same -> V1.TermEdit.Same + V2.TermEdit.Subtype -> V1.TermEdit.Subtype + V2.TermEdit.Different -> V1.TermEdit.Different edithash2to1 :: V2.PatchHash -> V1.Branch.EditHash -edithash2to1 = undefined -- todo +edithash2to1 = hash2to1 . V2.unPatchHash namesegment2to1 :: V2.Branch.NameSegment -> V1.NameSegment -namesegment2to1 = undefined -- todo +namesegment2to1 (V2.Branch.NameSegment t) = V1.NameSegment t --- branch2to1 :: Monad m => (Hash -> m V1.Reference.Size) -> @@ -431,7 +463,7 @@ branch2to1 :: branch2to1 lookupSize lookupCT (V2.Branch.Branch v2terms v2types v2patches v2children) = do v1terms <- toStar (reference2to1 lookupSize) =<< Map.bitraverse (pure . namesegment2to1) (Map.bitraverse (referent2to1 lookupSize lookupCT) id) v2terms v1types <- toStar (reference2to1 lookupSize) =<< Map.bitraverse (pure . namesegment2to1) (Map.bitraverse (reference2to1 lookupSize) id) v2types - let v1patches = Map.bimap namesegment2to1 (bimap edithash2to1 (fmap patch2to1)) v2patches + v1patches <- Map.bitraverse (pure . namesegment2to1) (bitraverse (pure . edithash2to1) (fmap (patch2to1 lookupSize))) v2patches v1children <- Map.bitraverse (pure . namesegment2to1) (causalbranch2to1 lookupSize lookupCT) v2children pure $ V1.Branch.branch0 v1terms v1types v1children v1patches where From da353a6a0a9104f1af5c17071b786b330f9a4f34 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 11 Jan 2021 17:58:52 -0500 Subject: [PATCH 069/225] update todos --- questions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/questions.md b/questions.md index 3dab21ca8c..13f8fc3f67 100644 --- a/questions.md +++ b/questions.md @@ -8,7 +8,7 @@ next steps: - [x] `SqliteCodebase.getRootBranch` - [x] `SqliteCodebase.getBranchForHash` - [ ] Writing a branch - - [ ] `SqliteCodebase.Conversions.causalbranch1to2` + - [x] `SqliteCodebase.Conversions.causalbranch1to2` - [ ] `SqliteCodebase.putRootBranch` - [ ] Syncing a remote codebase - [ ] `SqliteCodebase.syncFromDirectory` From 191413a21c8cc02e9b6688bba62825b4ae1ffb38 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 12 Jan 2021 10:28:56 -0500 Subject: [PATCH 070/225] unchange old reference hashing --- unison-core/src/Unison/Reference.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unison-core/src/Unison/Reference.hs b/unison-core/src/Unison/Reference.hs index b0cbfedffa..d9618f1252 100644 --- a/unison-core/src/Unison/Reference.hs +++ b/unison-core/src/Unison/Reference.hs @@ -178,4 +178,4 @@ instance Show Reference where show = SH.toString . SH.take 5 . toShortHash instance Hashable.Hashable Reference where tokens (Builtin txt) = [Hashable.Tag 0, Hashable.Text txt] - tokens (DerivedId (Id h i _n)) = [Hashable.Tag 1, Hashable.Bytes (H.toBytes h), Hashable.Nat i] + tokens (DerivedId (Id h i n)) = [Hashable.Tag 1, Hashable.Bytes (H.toBytes h), Hashable.Nat i, Hashable.Nat n] From b3157875bdfbca5db4d787be21ddbdbb7da00605 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 12 Jan 2021 11:23:03 -0500 Subject: [PATCH 071/225] putRootBranch --- .../src/Unison/Codebase/SqliteCodebase.hs | 5 ++++- .../src/Unison/Codebase/SqliteCodebase/Conversions.hs | 10 ++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index a1f508bf82..70e6ca02b4 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -68,6 +68,7 @@ import qualified Unison.UnisonFile as UF import UnliftIO (MonadIO, catchIO) import UnliftIO.STM import qualified U.Codebase.Sqlite.Queries as Q +import Control.Monad.Trans (MonadTrans(lift)) -- 1) buffer up the component -- 2) in the event that the component is complete, then what? @@ -265,7 +266,9 @@ sqliteCodebase root = do e -> error $ show e putRootBranch :: Branch IO -> IO () - putRootBranch = error "todo" + putRootBranch branch1 = runDB conn do + branch2 <- Cv.causalbranch1to2 (Branch.transform (lift . lift) branch1) + void $ Ops.saveRootBranch branch2 rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) rootBranchUpdates = error "todo" diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 0810caf367..f165081e81 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -418,6 +418,16 @@ causalbranch2to1' lookupSize lookupCT (V2.Causal hc _he (Map.toList -> parents) e <- me V1.Causal.Merge currentHash <$> branch2to1 lookupSize lookupCT e <*> pure (Map.fromList tailsList) +causalbranch1to2 :: Monad m => V1.Branch.Branch m -> m (V2.Branch.Causal m) +causalbranch1to2 = error "todo" + +-- data V2.Branch m = Branch +-- { terms :: Map NameSegment (Map Referent (m MdValues)), +-- types :: Map NameSegment (Map Reference (m MdValues)), +-- patches :: Map NameSegment (PatchHash, m Patch), +-- children :: Map NameSegment (Causal m) +-- } + patch2to1 :: forall m. Monad m => From 4c16d9e67af7ad623a4a29b4dc03625cf4c9e2b6 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 12 Jan 2021 15:18:11 -0500 Subject: [PATCH 072/225] putRootBranch and causalbranch1to2 --- .../src/Unison/Codebase/SqliteCodebase.hs | 5 ++-- .../Codebase/SqliteCodebase/Conversions.hs | 25 +++++++++++++++++-- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 70e6ca02b4..e04bf7a0ff 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -266,9 +266,8 @@ sqliteCodebase root = do e -> error $ show e putRootBranch :: Branch IO -> IO () - putRootBranch branch1 = runDB conn do - branch2 <- Cv.causalbranch1to2 (Branch.transform (lift . lift) branch1) - void $ Ops.saveRootBranch branch2 + putRootBranch branch1 = runDB conn . + void . Ops.saveRootBranch . Cv.causalbranch1to2 $ Branch.transform (lift . lift) branch1 rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) rootBranchUpdates = error "todo" diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index f165081e81..fc37048060 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -418,8 +418,29 @@ causalbranch2to1' lookupSize lookupCT (V2.Causal hc _he (Map.toList -> parents) e <- me V1.Causal.Merge currentHash <$> branch2to1 lookupSize lookupCT e <*> pure (Map.fromList tailsList) -causalbranch1to2 :: Monad m => V1.Branch.Branch m -> m (V2.Branch.Causal m) -causalbranch1to2 = error "todo" + +causalbranch1to2 :: forall m. Monad m => V1.Branch.Branch m -> V2.Branch.Causal m +causalbranch1to2 (V1.Branch.Branch c) = causal1to2' hash1to2cb hash1to2c branch1to2 c + where + hash1to2cb :: V1.Branch.Hash -> (V2.CausalHash, V2.BranchHash) + hash1to2cb (V1.Causal.RawHash h) = (hc, hb) where + h2 = hash1to2 h + hc = V2.CausalHash h2 + hb = V2.BranchHash h2 + + hash1to2c :: V1.Branch.Hash -> V2.CausalHash + hash1to2c = V2.CausalHash . hash1to2 . V1.Causal.unRawHash + + causal1to2' = causal1to2 @m @V1.Branch.Raw @V2.CausalHash @V2.BranchHash @(V1.Branch.Branch0 m) @(V2.Branch.Branch m) + + causal1to2 :: forall m h h2c h2e e e2. (Monad m, Ord h2c) => (V1.Causal.RawHash h -> (h2c, h2e)) -> (V1.Causal.RawHash h -> h2c) -> (e -> m e2) -> V1.Causal.Causal m h e -> V2.Causal m h2c h2e e2 + causal1to2 h1to22 h1to2 e1to2 = \case + V1.Causal.One (h1to22 -> (hc, hb)) e -> V2.Causal hc hb Map.empty (e1to2 e) + V1.Causal.Cons (h1to22 -> (hc, hb)) e (ht, mt) -> V2.Causal hc hb (Map.singleton (h1to2 ht) (causal1to2 h1to22 h1to2 e1to2 <$> mt)) (e1to2 e) + V1.Causal.Merge (h1to22 -> (hc, hb)) e parents -> V2.Causal hc hb (Map.bimap h1to2 (causal1to2 h1to22 h1to2 e1to2 <$>) parents) (e1to2 e) + + branch1to2 :: forall m. V1.Branch.Branch0 m -> m (V2.Branch.Branch m) + branch1to2 = error "todo" -- data V2.Branch m = Branch -- { terms :: Map NameSegment (Map Referent (m MdValues)), From 999e5871a6d82300d973e5cea9467c1a14da8ae0 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 12 Jan 2021 15:19:06 -0500 Subject: [PATCH 073/225] dummy implementation for rootBranchUpdates --- parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index e04bf7a0ff..a093e27e85 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -270,7 +270,7 @@ sqliteCodebase root = do void . Ops.saveRootBranch . Cv.causalbranch1to2 $ Branch.transform (lift . lift) branch1 rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) - rootBranchUpdates = error "todo" + rootBranchUpdates = pure (pure (), pure mempty) -- if this blows up on cromulent hashes, then switch from `hashToHashId` -- to one that returns Maybe. From 45b878fc0dfcfb0720f1877790352d04e63faa88 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 12 Jan 2021 15:29:07 -0500 Subject: [PATCH 074/225] scaffolding for branch1to2 --- .../Codebase/SqliteCodebase/Conversions.hs | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index fc37048060..5173716878 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -439,15 +439,23 @@ causalbranch1to2 (V1.Branch.Branch c) = causal1to2' hash1to2cb hash1to2c branch1 V1.Causal.Cons (h1to22 -> (hc, hb)) e (ht, mt) -> V2.Causal hc hb (Map.singleton (h1to2 ht) (causal1to2 h1to22 h1to2 e1to2 <$> mt)) (e1to2 e) V1.Causal.Merge (h1to22 -> (hc, hb)) e parents -> V2.Causal hc hb (Map.bimap h1to2 (causal1to2 h1to22 h1to2 e1to2 <$>) parents) (e1to2 e) - branch1to2 :: forall m. V1.Branch.Branch0 m -> m (V2.Branch.Branch m) - branch1to2 = error "todo" + branch1to2 :: forall m. Applicative m => V1.Branch.Branch0 m -> m (V2.Branch.Branch m) + branch1to2 b = do + terms <- pure $ doTerms (V1.Branch._terms b) + types <- pure $ doTypes (V1.Branch._types b) + patches <- pure $ doPatches (V1.Branch._edits b) + children <- pure $ doChildren (V1.Branch._children b) + pure $ V2.Branch.Branch terms types patches children + where + doTerms :: V1.Branch.Star V1.Referent.Referent V1.NameSegment -> Map V2.Branch.NameSegment (Map V2.Referent.Referent (m V2.Branch.MdValues)) + doTypes :: V1.Branch.Star V1.Reference.Reference V1.NameSegment -> Map V2.Branch.NameSegment (Map V2.Reference.Reference (m V2.Branch.MdValues)) + doPatches :: Map V1.NameSegment (V1.Branch.EditHash, m V1.Patch) -> Map V2.Branch.NameSegment (V2.PatchHash, m V2.Branch.Patch) + doChildren :: Map V1.NameSegment (V1.Branch.Branch m) -> Map V2.Branch.NameSegment (V2.Branch.Causal m) + doTerms = undefined + doTypes = undefined + doPatches = undefined + doChildren = undefined --- data V2.Branch m = Branch --- { terms :: Map NameSegment (Map Referent (m MdValues)), --- types :: Map NameSegment (Map Reference (m MdValues)), --- patches :: Map NameSegment (PatchHash, m Patch), --- children :: Map NameSegment (Causal m) --- } patch2to1 :: forall m. From c3365746d76d151f01be0e24ba010c940cc7a8e1 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 12 Jan 2021 23:02:00 -0500 Subject: [PATCH 075/225] branch1to2/terms + reformatting --- .../Codebase/SqliteCodebase/Conversions.hs | 53 +++++++++++++------ 1 file changed, 37 insertions(+), 16 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 5173716878..55ac497ea3 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -4,8 +4,10 @@ module Unison.Codebase.SqliteCodebase.Conversions where import Control.Monad (foldM) import Data.Bifunctor (Bifunctor (bimap)) +import Data.Bitraversable (Bitraversable (bitraverse)) import qualified Data.ByteString.Short as SBS import Data.Either (fromRight) +import Data.Foldable (Foldable (toList)) import Data.Map (Map) import qualified Data.Map as Map import qualified Data.Set as Set @@ -22,7 +24,9 @@ import qualified U.Codebase.Referent as V2.Referent import qualified U.Codebase.ShortHash as V2 import qualified U.Codebase.Sqlite.Symbol as V2 import qualified U.Codebase.Term as V2.Term +import qualified U.Codebase.TermEdit as V2.TermEdit import qualified U.Codebase.Type as V2.Type +import qualified U.Codebase.TypeEdit as V2.TypeEdit import qualified U.Codebase.WatchKind as V2 import qualified U.Codebase.WatchKind as V2.WatchKind import qualified U.Core.ABT as V2.ABT @@ -36,6 +40,8 @@ import qualified Unison.Codebase.Causal as V1.Causal import qualified Unison.Codebase.Metadata as V1.Metadata import qualified Unison.Codebase.Patch as V1 import qualified Unison.Codebase.ShortBranchHash as V1 +import qualified Unison.Codebase.TermEdit as V1.TermEdit +import qualified Unison.Codebase.TypeEdit as V1.TypeEdit import qualified Unison.ConstructorType as CT import qualified Unison.DataDeclaration as V1.Decl import Unison.Hash (Hash) @@ -56,11 +62,6 @@ import qualified Unison.Util.Relation as Relation import qualified Unison.Util.Star3 as V1.Star3 import qualified Unison.Var as V1.Var import qualified Unison.Var as Var -import Data.Bitraversable (Bitraversable(bitraverse)) -import qualified U.Codebase.TermEdit as V2.TermEdit -import qualified Unison.Codebase.TermEdit as V1.TermEdit -import qualified U.Codebase.TypeEdit as V2.TypeEdit -import qualified Unison.Codebase.TypeEdit as V1.TypeEdit sbh1to2 :: V1.ShortBranchHash -> V2.ShortBranchHash sbh1to2 (V1.ShortBranchHash b32) = V2.ShortBranchHash b32 @@ -329,6 +330,11 @@ referent2to1 lookupSize lookupCT = \case V2.Ref r -> V1.Ref <$> reference2to1 lookupSize r V2.Con r i -> V1.Con <$> reference2to1 lookupSize r <*> pure (fromIntegral i) <*> lookupCT r +referent1to2 :: V1.Referent -> V2.Referent +referent1to2 = \case + V1.Ref r -> V2.Ref $ reference1to2 r + V1.Con r i _ct -> V2.Con (reference1to2 r) (fromIntegral i) + referentid2to1 :: Applicative m => (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Referent.Id -> m V1.Referent.Id referentid2to1 lookupSize lookupCT = \case V2.RefId r -> V1.Ref' <$> referenceid2to1 lookupSize r @@ -418,15 +424,15 @@ causalbranch2to1' lookupSize lookupCT (V2.Causal hc _he (Map.toList -> parents) e <- me V1.Causal.Merge currentHash <$> branch2to1 lookupSize lookupCT e <*> pure (Map.fromList tailsList) - causalbranch1to2 :: forall m. Monad m => V1.Branch.Branch m -> V2.Branch.Causal m causalbranch1to2 (V1.Branch.Branch c) = causal1to2' hash1to2cb hash1to2c branch1to2 c - where + where hash1to2cb :: V1.Branch.Hash -> (V2.CausalHash, V2.BranchHash) - hash1to2cb (V1.Causal.RawHash h) = (hc, hb) where - h2 = hash1to2 h - hc = V2.CausalHash h2 - hb = V2.BranchHash h2 + hash1to2cb (V1.Causal.RawHash h) = (hc, hb) + where + h2 = hash1to2 h + hc = V2.CausalHash h2 + hb = V2.BranchHash h2 hash1to2c :: V1.Branch.Hash -> V2.CausalHash hash1to2c = V2.CausalHash . hash1to2 . V1.Causal.unRawHash @@ -446,17 +452,29 @@ causalbranch1to2 (V1.Branch.Branch c) = causal1to2' hash1to2cb hash1to2c branch1 patches <- pure $ doPatches (V1.Branch._edits b) children <- pure $ doChildren (V1.Branch._children b) pure $ V2.Branch.Branch terms types patches children - where + where doTerms :: V1.Branch.Star V1.Referent.Referent V1.NameSegment -> Map V2.Branch.NameSegment (Map V2.Referent.Referent (m V2.Branch.MdValues)) + doTerms s = + Map.fromList + [ (namesegment1to2 ns, m2) + | ns <- toList . Relation.ran $ V1.Star3.d1 s + , let m2 = + Map.fromList + [ (referent1to2 r, pure md) + | r <- toList . Relation.lookupRan ns $ V1.Star3.d1 s + , let + mdrefs1to2 (typeR1, valR1) = (reference1to2 valR1, reference1to2 typeR1) + md = V2.Branch.MdValues . Map.fromList . map mdrefs1to2 . toList . Relation.lookupDom r $ V1.Star3.d3 s + ] + ] + doTypes :: V1.Branch.Star V1.Reference.Reference V1.NameSegment -> Map V2.Branch.NameSegment (Map V2.Reference.Reference (m V2.Branch.MdValues)) doPatches :: Map V1.NameSegment (V1.Branch.EditHash, m V1.Patch) -> Map V2.Branch.NameSegment (V2.PatchHash, m V2.Branch.Patch) doChildren :: Map V1.NameSegment (V1.Branch.Branch m) -> Map V2.Branch.NameSegment (V2.Branch.Causal m) - doTerms = undefined doTypes = undefined doPatches = undefined doChildren = undefined - patch2to1 :: forall m. Monad m => @@ -471,12 +489,12 @@ patch2to1 lookupSize (V2.Branch.Patch v2termedits v2typeedits) = do referent2to1' :: V2.Referent -> m V1.Reference referent2to1' = \case V2.Referent.Ref r -> reference2to1 lookupSize r - V2.Referent.Con{} -> error "found referent on LHS when converting patch2to1" + V2.Referent.Con {} -> error "found referent on LHS when converting patch2to1" termedit2to1 :: V2.TermEdit.TermEdit -> m V1.TermEdit.TermEdit termedit2to1 = \case V2.TermEdit.Replace (V2.Referent.Ref r) t -> V1.TermEdit.Replace <$> reference2to1 lookupSize r <*> typing2to1 t - V2.TermEdit.Replace{} -> error "found referent on RHS when converting patch2to1" + V2.TermEdit.Replace {} -> error "found referent on RHS when converting patch2to1" V2.TermEdit.Deprecate -> pure V1.TermEdit.Deprecate typeedit2to1 :: V2.TypeEdit.TypeEdit -> m V1.TypeEdit.TypeEdit typeedit2to1 = \case @@ -493,6 +511,9 @@ edithash2to1 = hash2to1 . V2.unPatchHash namesegment2to1 :: V2.Branch.NameSegment -> V1.NameSegment namesegment2to1 (V2.Branch.NameSegment t) = V1.NameSegment t +namesegment1to2 :: V1.NameSegment -> V2.Branch.NameSegment +namesegment1to2 (V1.NameSegment t) = V2.Branch.NameSegment t + branch2to1 :: Monad m => (Hash -> m V1.Reference.Size) -> From a65ce0af31b1f66803426fb705975e6aa7cd9710 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 13 Jan 2021 11:27:49 -0500 Subject: [PATCH 076/225] finish causalbranch1to2 --- .../Codebase/SqliteCodebase/Conversions.hs | 44 +++++++++++++++++-- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 55ac497ea3..453e90cec4 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -445,7 +445,7 @@ causalbranch1to2 (V1.Branch.Branch c) = causal1to2' hash1to2cb hash1to2c branch1 V1.Causal.Cons (h1to22 -> (hc, hb)) e (ht, mt) -> V2.Causal hc hb (Map.singleton (h1to2 ht) (causal1to2 h1to22 h1to2 e1to2 <$> mt)) (e1to2 e) V1.Causal.Merge (h1to22 -> (hc, hb)) e parents -> V2.Causal hc hb (Map.bimap h1to2 (causal1to2 h1to22 h1to2 e1to2 <$>) parents) (e1to2 e) - branch1to2 :: forall m. Applicative m => V1.Branch.Branch0 m -> m (V2.Branch.Branch m) + branch1to2 :: forall m. Monad m => V1.Branch.Branch0 m -> m (V2.Branch.Branch m) branch1to2 b = do terms <- pure $ doTerms (V1.Branch._terms b) types <- pure $ doTypes (V1.Branch._types b) @@ -453,6 +453,7 @@ causalbranch1to2 (V1.Branch.Branch c) = causal1to2' hash1to2cb hash1to2c branch1 children <- pure $ doChildren (V1.Branch._children b) pure $ V2.Branch.Branch terms types patches children where + -- is there a more readable way to structure these that's also linear? doTerms :: V1.Branch.Star V1.Referent.Referent V1.NameSegment -> Map V2.Branch.NameSegment (Map V2.Referent.Referent (m V2.Branch.MdValues)) doTerms s = Map.fromList @@ -469,11 +470,25 @@ causalbranch1to2 (V1.Branch.Branch c) = causal1to2' hash1to2cb hash1to2c branch1 ] doTypes :: V1.Branch.Star V1.Reference.Reference V1.NameSegment -> Map V2.Branch.NameSegment (Map V2.Reference.Reference (m V2.Branch.MdValues)) + doTypes s = + Map.fromList + [ (namesegment1to2 ns, m2) + | ns <- toList . Relation.ran $ V1.Star3.d1 s + , let m2 = + Map.fromList + [ (reference1to2 r, pure md) + | r <- toList . Relation.lookupRan ns $ V1.Star3.d1 s + , let + mdrefs1to2 (typeR1, valR1) = (reference1to2 valR1, reference1to2 typeR1) + md = V2.Branch.MdValues . Map.fromList . map mdrefs1to2 . toList . Relation.lookupDom r $ V1.Star3.d3 s + ] + ] + doPatches :: Map V1.NameSegment (V1.Branch.EditHash, m V1.Patch) -> Map V2.Branch.NameSegment (V2.PatchHash, m V2.Branch.Patch) + doPatches = Map.bimap namesegment1to2 (bimap edithash1to2 (fmap patch1to2)) + doChildren :: Map V1.NameSegment (V1.Branch.Branch m) -> Map V2.Branch.NameSegment (V2.Branch.Causal m) - doTypes = undefined - doPatches = undefined - doChildren = undefined + doChildren = Map.bimap namesegment1to2 causalbranch1to2 patch2to1 :: forall m. @@ -505,9 +520,30 @@ patch2to1 lookupSize (V2.Branch.Patch v2termedits v2typeedits) = do V2.TermEdit.Subtype -> V1.TermEdit.Subtype V2.TermEdit.Different -> V1.TermEdit.Different +patch1to2 :: V1.Patch -> V2.Branch.Patch +patch1to2 (V1.Patch v1termedits v1typeedits) = V2.Branch.Patch v2termedits v2typeedits + where + v2termedits = Map.bimap (V2.Referent.Ref . reference1to2) (Set.map termedit1to2) $ Relation.domain v1termedits + v2typeedits = Map.bimap reference1to2 (Set.map typeedit1to2) $ Relation.domain v1typeedits + termedit1to2 :: V1.TermEdit.TermEdit -> V2.TermEdit.TermEdit + termedit1to2 = \case + V1.TermEdit.Replace r t -> V2.TermEdit.Replace (V2.Referent.Ref (reference1to2 r)) (typing1to2 t) + V1.TermEdit.Deprecate -> V2.TermEdit.Deprecate + typeedit1to2 :: V1.TypeEdit.TypeEdit -> V2.TypeEdit.TypeEdit + typeedit1to2 = \case + V1.TypeEdit.Replace r -> V2.TypeEdit.Replace (reference1to2 r) + V1.TypeEdit.Deprecate -> V2.TypeEdit.Deprecate + typing1to2 = \case + V1.TermEdit.Same -> V2.TermEdit.Same + V1.TermEdit.Subtype -> V2.TermEdit.Subtype + V1.TermEdit.Different -> V2.TermEdit.Different + edithash2to1 :: V2.PatchHash -> V1.Branch.EditHash edithash2to1 = hash2to1 . V2.unPatchHash +edithash1to2 :: V1.Branch.EditHash -> V2.PatchHash +edithash1to2 = V2.PatchHash . hash1to2 + namesegment2to1 :: V2.Branch.NameSegment -> V1.NameSegment namesegment2to1 (V2.Branch.NameSegment t) = V1.NameSegment t From 2fddd805745dd8471a2db24af2e8d437e9a71607 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 19 Jan 2021 13:27:25 -0500 Subject: [PATCH 077/225] codebase existence check and setup --- .../U/Codebase/Sqlite/Queries.hs | 51 +- .../Unison/Codebase/Editor/HandleCommand.hs | 9 +- .../src/Unison/Codebase/SqliteCodebase.hs | 797 ++++++++++-------- .../src/Unison/Codebase/TranscriptParser.hs | 5 +- .../src/Unison/CommandLine/Main.hs | 4 +- parser-typechecker/tests/Unison/Test/Git.hs | 45 +- parser-typechecker/tests/Unison/Test/IO.hs | 28 +- parser-typechecker/unison/Main.hs | 71 +- questions.md | 12 +- 9 files changed, 576 insertions(+), 446 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 0864743f3c..6bf229f46e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -15,6 +15,7 @@ module U.Codebase.Sqlite.Queries where +import Control.Monad (filterM) import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) import Control.Monad.IO.Class (MonadIO) import Control.Monad.Reader (MonadReader (ask)) @@ -22,7 +23,7 @@ import Control.Monad.Trans (MonadIO (liftIO)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) import Data.ByteString (ByteString) import Data.Maybe (fromJust) -import Data.String.Here.Uninterpolated (here) +import Data.String.Here.Uninterpolated (here, hereFile) import Data.Text (Text) import Database.SQLite.Simple (Connection, FromRow, Only (..), SQLData, ToRow (..), (:.) (..)) import qualified Database.SQLite.Simple as SQLite @@ -73,6 +74,49 @@ type TypeHashReference = Reference' TextId HashId -- * main squeeze +createSchema :: DB m => m () +createSchema = do + execute_ [hereFile|sql/create.sql|] + execute_ [hereFile|sql/create-index.sql|] + +setFlags :: DB m => m () +setFlags = execute_ [here| + PRAGMA foreign_keys = ON; +|] + +type SchemaType = String +type SchemaName = String +checkForMissingSchema :: DB m => m [(SchemaType, SchemaName)] +checkForMissingSchema = filterM missing schema + where + missing (t, n) = null @[] @(Only Int) <$> query sql (t,n) + sql = "SELECT 1 FROM sqlite_master WHERE type = ? and name = ?" + schema = + [("table", "hash") + ,("index", "hash_base32") + ,("table", "text") + ,("table", "hash_object") + ,("index", "hash_object_hash_id") + ,("index", "hash_object_object_id") + ,("table", "object_type_description") + ,("table", "object") + ,("index", "object_hash_id") + ,("index", "object_type_id") + ,("table", "causal") + ,("index", "causal_value_hash_id") + ,("index", "causal_gc_generation") + ,("table", "namespace_root") + ,("table", "causal_parent") + ,("index", "causal_parent_causal_id") + ,("index", "causal_parent_parent_id") + -- ,("table", "causal_old") + ,("table", "watch_result") + ,("table", "watch") + ,("index", "watch_kind") + ,("table", "watch_kind_description") + ] + +{- ORMOLU_DISABLE -} saveHash :: DB m => Base32Hex -> m HashId saveHash base32 = execute sql (Only base32) >> queryOne (loadHashId base32) where sql = [here| INSERT OR IGNORE INTO hash (base32) VALUES (?) |] @@ -401,6 +445,7 @@ namespaceHashIdByBase32Prefix prefix = queryAtoms sql (Only $ prefix <> "%") whe INNER JOIN hash ON id = value_hash_id WHERE base32 LIKE ? |] +{- ORMOLU_ENABLE -} -- * helper functions @@ -423,6 +468,9 @@ query q r = do c <- ask; liftIO $ SQLite.query c q r execute :: (DB m, ToRow q) => SQLite.Query -> q -> m () execute q r = do c <- ask; liftIO $ SQLite.execute c q r +execute_ :: DB m => SQLite.Query -> m () +execute_ q = do c <- ask; liftIO $ SQLite.execute_ c q + executeMany :: (DB m, ToRow q) => SQLite.Query -> [q] -> m () executeMany q r = do c <- ask; liftIO $ SQLite.executeMany c q r @@ -444,3 +492,4 @@ instance ToField WatchKind where toField = \case WatchKind.RegularWatch -> SQLite.SQLInteger 0 WatchKind.TestWatch -> SQLite.SQLInteger 1 + diff --git a/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs b/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs index b37de00107..4a797577eb 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs @@ -88,10 +88,9 @@ commandLine -> (SourceName -> IO LoadSourceResult) -> Codebase IO v Ann -> (Int -> IO gen) - -> Branch.Cache IO -> Free (Command IO i v) a -> IO a -commandLine config awaitInput setBranchRef rt notifyUser notifyNumbered loadSource codebase rngGen branchCache = +commandLine config awaitInput setBranchRef rt notifyUser notifyNumbered loadSource codebase rngGen = Free.foldWithIndex go where go :: forall x . Int -> Command IO i v x -> IO x @@ -121,11 +120,11 @@ commandLine config awaitInput setBranchRef rt notifyUser notifyNumbered loadSour setBranchRef branch Codebase.putRootBranch codebase branch ViewRemoteBranch ns -> - runExceptT $ Git.viewRemoteBranch branchCache ns + runExceptT $ Git.viewRemoteBranch undefined ns ImportRemoteBranch ns syncMode -> - runExceptT $ Git.importRemoteBranch codebase branchCache ns syncMode + runExceptT $ Git.importRemoteBranch codebase undefined ns syncMode SyncRemoteRootBranch repo branch syncMode -> - runExceptT $ Git.pushGitRootBranch codebase branchCache branch repo syncMode + runExceptT $ Git.pushGitRootBranch codebase undefined branch repo syncMode LoadTerm r -> Codebase.getTerm codebase r LoadType r -> Codebase.getTypeDeclaration codebase r LoadTypeOfTerm r -> Codebase.getTypeOfTerm codebase r diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index a093e27e85..6956d2197d 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -2,6 +2,7 @@ {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE QuasiQuotes #-} module Unison.Codebase.SqliteCodebase where @@ -13,6 +14,7 @@ import Control.Monad (filterM, (>=>)) import Control.Monad.Except (ExceptT, runExceptT) import Control.Monad.Extra (ifM, unlessM) import Control.Monad.Reader (ReaderT (runReaderT)) +import Control.Monad.Trans (MonadTrans (lift)) import Control.Monad.Trans.Maybe (MaybeT) import Data.Bifunctor (Bifunctor (first), second) import qualified Data.Either.Combinators as Either @@ -34,6 +36,7 @@ import qualified U.Codebase.Reference as C.Reference import qualified U.Codebase.Sqlite.ObjectType as OT import U.Codebase.Sqlite.Operations (EDB) import qualified U.Codebase.Sqlite.Operations as Ops +import qualified U.Codebase.Sqlite.Queries as Q import qualified U.Util.Hash as H2 import qualified U.Util.Monoid as Monoid import qualified U.Util.Set as Set @@ -53,6 +56,7 @@ import qualified Unison.DataDeclaration as Decl import Unison.Hash (Hash) import Unison.Parser (Ann) import Unison.Prelude (MaybeT (runMaybeT), fromMaybe, traceM) +import qualified Unison.PrettyTerminal as PT import Unison.Reference (Reference) import qualified Unison.Reference as Reference import qualified Unison.Referent as Referent @@ -65,10 +69,91 @@ import qualified Unison.Term as Term import Unison.Type (Type) import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF +import qualified Unison.Util.Pretty as P import UnliftIO (MonadIO, catchIO) import UnliftIO.STM -import qualified U.Codebase.Sqlite.Queries as Q -import Control.Monad.Trans (MonadTrans(lift)) +import UnliftIO.Directory (getHomeDirectory) +import System.Directory (canonicalizePath) +import qualified UnliftIO.Environment as SysEnv +import Data.String (IsString(fromString)) +import qualified System.Exit as SysExit +import qualified Control.Monad.Extra as Monad +import qualified Control.Exception + + +codebasePath :: FilePath +codebasePath = ".unison" "v2" "unison.sqlite3" + +-- get the codebase in dir, or in the home directory if not provided. +getCodebaseOrExit :: Maybe FilePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) +getCodebaseOrExit mdir = do + dir <- getCodebaseDir mdir + progName <- SysEnv.getProgName + prettyDir <- P.string <$> canonicalizePath dir + let errMsg = getNoCodebaseErrorMsg ((P.text . Text.pack) progName) prettyDir mdir + sqliteCodebase dir >>= \case + Left _missingSchema -> do + PT.putPrettyLn' errMsg + SysExit.exitFailure + Right c -> pure c + +getNoCodebaseErrorMsg :: IsString s => P.Pretty s -> P.Pretty s -> Maybe FilePath -> P.Pretty s +getNoCodebaseErrorMsg executable prettyDir mdir = + let secondLine = + case mdir of + Just dir -> "Run `" <> executable <> " -codebase " <> fromString dir + <> " init` to create one, then try again!" + Nothing -> "Run `" <> executable <> " init` to create one there," + <> " then try again;" + <> " or `" <> executable <> " -codebase ` to load a codebase from someplace else!" + in + P.lines + [ "No codebase exists in " <> prettyDir <> "." + , secondLine ] + +initCodebaseAndExit :: Maybe FilePath -> IO () +initCodebaseAndExit mdir = do + dir <- getCodebaseDir mdir + (closeCodebase, _codebase) <- initCodebase dir + closeCodebase + SysExit.exitSuccess + +-- initializes a new codebase here (i.e. `ucm -codebase dir init`) +initCodebase :: FilePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) +initCodebase path = do + prettyDir <- P.string <$> canonicalizePath path + + Monad.whenM (codebaseExists path) do + PT.putPrettyLn' + . P.wrap + $ "It looks like " <> prettyDir <> " already exists." + SysExit.exitFailure + + PT.putPrettyLn' + . P.wrap + $ "Initializing a new codebase in: " + <> prettyDir + + -- run sql create scripts + Control.Exception.bracket + (unsafeGetConnection path) + Sqlite.close + (runReaderT Q.createSchema) + + Right (closeCodebase, theCodebase) <- sqliteCodebase path + Codebase1.initializeCodebase theCodebase + pure (closeCodebase, theCodebase) + +getCodebaseDir :: Maybe FilePath -> IO FilePath +getCodebaseDir = maybe getHomeDirectory pure + +-- checks if a db exists at `path` with the minimum schema +codebaseExists :: CodebasePath -> IO Bool +codebaseExists root = sqliteCodebase root >>= \case + Left _ -> pure False + Right (close, _codebase) -> close >> pure True + +-- and <$> traverse doesDirectoryExist (minimalCodebaseStructure root) -- 1) buffer up the component -- 2) in the event that the component is complete, then what? @@ -93,360 +178,370 @@ type TermBufferEntry = BufferEntry (Term Symbol Ann, Type Symbol Ann) type DeclBufferEntry = BufferEntry (Decl Symbol Ann) -sqliteCodebase :: CodebasePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) +unsafeGetConnection :: CodebasePath -> IO Sqlite.Connection +unsafeGetConnection root = do + conn <- Sqlite.open $ root codebasePath + runReaderT Q.setFlags conn + pure conn + +sqliteCodebase :: CodebasePath -> IO (Either [(Q.SchemaType, Q.SchemaName)] (IO (), Codebase1.Codebase IO Symbol Ann)) sqliteCodebase root = do - conn :: Sqlite.Connection <- Sqlite.open $ root "v2" "unison.sqlite3" - termBuffer :: TVar (Map Hash TermBufferEntry) <- newTVarIO Map.empty - declBuffer :: TVar (Map Hash DeclBufferEntry) <- newTVarIO Map.empty - let getTerm :: Reference.Id -> IO (Maybe (Term Symbol Ann)) - getTerm (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = - runDB' conn do - term2 <- Ops.loadTermByReference (C.Reference.Id h2 i) - Cv.term2to1 h1 getCycleLen getDeclType term2 - - getCycleLen :: EDB m => Hash -> m Reference.Size - getCycleLen = Ops.getCycleLen . Cv.hash1to2 - - getDeclType :: EDB m => C.Reference.Reference -> m CT.ConstructorType - getDeclType = \case - C.Reference.ReferenceBuiltin t -> - let err = - error $ - "I don't know about the builtin type ##" - ++ show t - ++ ", but I've been asked for it's ConstructorType." - in pure . fromMaybe err $ - Map.lookup (Reference.Builtin t) Builtins.builtinConstructorType - C.Reference.ReferenceDerived i -> getDeclTypeById i - - getDeclTypeById :: EDB m => C.Reference.Id -> m CT.ConstructorType - getDeclTypeById = fmap Cv.decltype2to1 . Ops.getDeclTypeByReference - - getTypeOfTermImpl :: Reference.Id -> IO (Maybe (Type Symbol Ann)) - getTypeOfTermImpl (Reference.Id (Cv.hash1to2 -> h2) i _n) = - runDB' conn do - type2 <- Ops.loadTypeOfTermByTermReference (C.Reference.Id h2 i) - Cv.ttype2to1 getCycleLen type2 - - getTypeDeclaration :: Reference.Id -> IO (Maybe (Decl Symbol Ann)) - getTypeDeclaration (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = - runDB' conn do - decl2 <- Ops.loadDeclByReference (C.Reference.Id h2 i) - Cv.decl2to1 h1 getCycleLen decl2 - - putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () - putTerm (Reference.Id h@(Cv.hash1to2 -> h2) i n) tm tp = - runDB conn $ - unlessM - (Ops.objectExistsForHash h2) - ( withBuffer termBuffer h \(BufferEntry size comp missing waiting) -> do - let size' = Just n - pure $ - ifM - ((==) <$> size <*> size') - (pure ()) - (error $ "targetSize for term " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size') - let comp' = Map.insert i (tm, tp) comp - missingTerms' <- + conn <- unsafeGetConnection root + runReaderT Q.checkForMissingSchema conn >>= \case + [] -> do + termBuffer :: TVar (Map Hash TermBufferEntry) <- newTVarIO Map.empty + declBuffer :: TVar (Map Hash DeclBufferEntry) <- newTVarIO Map.empty + let getTerm :: Reference.Id -> IO (Maybe (Term Symbol Ann)) + getTerm (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = + runDB' conn do + term2 <- Ops.loadTermByReference (C.Reference.Id h2 i) + Cv.term2to1 h1 getCycleLen getDeclType term2 + + getCycleLen :: EDB m => Hash -> m Reference.Size + getCycleLen = Ops.getCycleLen . Cv.hash1to2 + + getDeclType :: EDB m => C.Reference.Reference -> m CT.ConstructorType + getDeclType = \case + C.Reference.ReferenceBuiltin t -> + let err = + error $ + "I don't know about the builtin type ##" + ++ show t + ++ ", but I've been asked for it's ConstructorType." + in pure . fromMaybe err $ + Map.lookup (Reference.Builtin t) Builtins.builtinConstructorType + C.Reference.ReferenceDerived i -> getDeclTypeById i + + getDeclTypeById :: EDB m => C.Reference.Id -> m CT.ConstructorType + getDeclTypeById = fmap Cv.decltype2to1 . Ops.getDeclTypeByReference + + getTypeOfTermImpl :: Reference.Id -> IO (Maybe (Type Symbol Ann)) + getTypeOfTermImpl (Reference.Id (Cv.hash1to2 -> h2) i _n) = + runDB' conn do + type2 <- Ops.loadTypeOfTermByTermReference (C.Reference.Id h2 i) + Cv.ttype2to1 getCycleLen type2 + + getTypeDeclaration :: Reference.Id -> IO (Maybe (Decl Symbol Ann)) + getTypeDeclaration (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = + runDB' conn do + decl2 <- Ops.loadDeclByReference (C.Reference.Id h2 i) + Cv.decl2to1 h1 getCycleLen decl2 + + putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () + putTerm (Reference.Id h@(Cv.hash1to2 -> h2) i n) tm tp = + runDB conn $ + unlessM + (Ops.objectExistsForHash h2) + ( withBuffer termBuffer h \(BufferEntry size comp missing waiting) -> do + let size' = Just n + pure $ + ifM + ((==) <$> size <*> size') + (pure ()) + (error $ "targetSize for term " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size') + let comp' = Map.insert i (tm, tp) comp + missingTerms' <- + filterM + (fmap not . Ops.objectExistsForHash . Cv.hash1to2) + [h | Reference.Derived h _i _n <- Set.toList $ Term.termDependencies tm] + missingTypes' <- + filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) $ + [h | Reference.Derived h _i _n <- Set.toList $ Term.typeDependencies tm] + ++ [h | Reference.Derived h _i _n <- Set.toList $ Type.dependencies tp] + let missing' = missing <> Set.fromList (missingTerms' <> missingTypes') + traverse (addBufferDependent h termBuffer) missingTerms' + traverse (addBufferDependent h declBuffer) missingTypes' + putBuffer termBuffer h (BufferEntry size' comp' missing' waiting) + tryFlushTermBuffer h + ) + + putBuffer :: (MonadIO m, Show a) => TVar (Map Hash (BufferEntry a)) -> Hash -> BufferEntry a -> m () + putBuffer tv h e = do + traceM $ "putBuffer " ++ show h ++ " " ++ show e + atomically $ modifyTVar tv (Map.insert h e) + + withBuffer :: MonadIO m => TVar (Map Hash (BufferEntry a)) -> Hash -> (BufferEntry a -> m b) -> m b + withBuffer tv h f = + Map.lookup h <$> readTVarIO tv >>= \case + Just e -> f e + Nothing -> f (BufferEntry Nothing Map.empty Set.empty Set.empty) + + removeBuffer :: MonadIO m => TVar (Map Hash (BufferEntry a)) -> Hash -> m () + removeBuffer tv h = atomically $ modifyTVar tv (Map.delete h) + + addBufferDependent :: (MonadIO m, Show a) => Hash -> TVar (Map Hash (BufferEntry a)) -> Hash -> m () + addBufferDependent dependent tv dependency = withBuffer tv dependency \be -> do + putBuffer tv dependency be {beWaitingDependents = Set.insert dependent $ beWaitingDependents be} + tryFlushBuffer :: + (EDB m, Show a) => + TVar (Map Hash (BufferEntry a)) -> + (H2.Hash -> [a] -> m ()) -> + (Hash -> m ()) -> + Hash -> + m () + tryFlushBuffer buf saveComponent tryWaiting h@(Cv.hash1to2 -> h2) = + -- skip if it has already been flushed + unlessM (Ops.objectExistsForHash h2) $ withBuffer buf h try + where + try (BufferEntry size comp (Set.delete h -> missing) waiting) = do + missing' <- filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) - [h | Reference.Derived h _i _n <- Set.toList $ Term.termDependencies tm] - missingTypes' <- - filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) $ - [h | Reference.Derived h _i _n <- Set.toList $ Term.typeDependencies tm] - ++ [h | Reference.Derived h _i _n <- Set.toList $ Type.dependencies tp] - let missing' = missing <> Set.fromList (missingTerms' <> missingTypes') - traverse (addBufferDependent h termBuffer) missingTerms' - traverse (addBufferDependent h declBuffer) missingTypes' - putBuffer termBuffer h (BufferEntry size' comp' missing' waiting) - tryFlushTermBuffer h + (toList missing) + if null missing' && size == Just (fromIntegral (length comp)) + then do + saveComponent h2 (toList comp) + removeBuffer buf h + traverse_ tryWaiting waiting + else -- update + + putBuffer buf h $ + BufferEntry size comp (Set.fromList missing') waiting + + tryFlushTermBuffer :: EDB m => Hash -> m () + tryFlushTermBuffer h = + tryFlushBuffer + termBuffer + ( \h2 -> + void . Ops.saveTermComponent h2 + . fmap (first (Cv.term1to2 h) . second Cv.ttype1to2) + ) + tryFlushTermBuffer + h + + tryFlushDeclBuffer :: EDB m => Hash -> m () + tryFlushDeclBuffer h = + tryFlushBuffer + declBuffer + (\h2 -> void . Ops.saveDeclComponent h2 . fmap (Cv.decl1to2 h)) + (\h -> tryFlushTermBuffer h >> tryFlushDeclBuffer h) + h + + putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () + putTypeDeclaration (Reference.Id h@(Cv.hash1to2 -> h2) i n) decl = + runDB conn $ + unlessM + (Ops.objectExistsForHash h2) + ( withBuffer declBuffer h \(BufferEntry size comp missing waiting) -> do + let size' = Just n + pure $ + ifM + ((==) <$> size <*> size') + (pure ()) + (error $ "targetSize for term " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size') + let comp' = Map.insert i decl comp + moreMissing <- + filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) $ + [h | Reference.Derived h _i _n <- Set.toList $ Decl.declDependencies decl] + let missing' = missing <> Set.fromList moreMissing + traverse (addBufferDependent h declBuffer) moreMissing + putBuffer declBuffer h (BufferEntry size' comp' missing' waiting) + tryFlushDeclBuffer h + ) + + getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) + getRootBranch = + fmap (Either.mapLeft err) + . runExceptT + . flip runReaderT conn + . fmap (Branch.transform (runDB conn)) + $ Cv.causalbranch2to1 getCycleLen getDeclType =<< Ops.loadRootCausal + where + err :: Ops.Error -> Codebase1.GetRootBranchError + err = \case + Ops.DatabaseIntegrityError Q.NoNamespaceRoot -> + Codebase1.NoRootBranch + Ops.DecodeError (Ops.ErrBranch oId) _bytes _msg -> + Codebase1.CouldntParseRootBranch $ + "Couldn't decode " ++ show oId ++ ": " ++ _msg + Ops.ExpectedBranch ch _bh -> + Codebase1.CouldntLoadRootBranch $ Cv.causalHash2to1 ch + e -> error $ show e + + putRootBranch :: Branch IO -> IO () + putRootBranch branch1 = + runDB conn + . void + . Ops.saveRootBranch + . Cv.causalbranch1to2 + $ Branch.transform (lift . lift) branch1 + + rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) + rootBranchUpdates = pure (pure (), pure mempty) + + -- if this blows up on cromulent hashes, then switch from `hashToHashId` + -- to one that returns Maybe. + getBranchForHash :: Branch.Hash -> IO (Maybe (Branch IO)) + getBranchForHash h = runDB conn do + Ops.loadCausalBranchByCausalHash (Cv.branchHash1to2 h) >>= \case + Just b -> + pure . Just . Branch.transform (runDB conn) + =<< Cv.causalbranch2to1 getCycleLen getDeclType b + Nothing -> pure Nothing + + dependentsImpl :: Reference -> IO (Set Reference.Id) + dependentsImpl r = + runDB conn $ + Set.traverse (Cv.referenceid2to1 getCycleLen) + =<< Ops.dependents (Cv.reference1to2 r) + + syncFromDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () + syncFromDirectory = error "todo" + + syncToDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () + syncToDirectory = error "todo" + + watches :: UF.WatchKind -> IO [Reference.Id] + watches w = + runDB conn $ + Ops.listWatches (Cv.watchKind1to2 w) + >>= traverse (Cv.referenceid2to1 getCycleLen) + + -- getWatch :: UF.WatchKind -> Reference.Id -> IO (Maybe (Term Symbol Ann)) + getWatch k r@(Reference.Id h _i _n) = + runDB' conn $ + Ops.loadWatch (Cv.watchKind1to2 k) (Cv.referenceid1to2 r) + >>= Cv.term2to1 h getCycleLen getDeclType + + putWatch :: UF.WatchKind -> Reference.Id -> Term Symbol Ann -> IO () + putWatch k r@(Reference.Id h _i _n) tm = + runDB conn $ + Ops.saveWatch + (Cv.watchKind1to2 k) + (Cv.referenceid1to2 r) + (Cv.term1to2 h tm) + + getReflog :: IO [Reflog.Entry] + getReflog = + ( do + contents <- TextIO.readFile (reflogPath root) + let lines = Text.lines contents + let entries = parseEntry <$> lines + pure entries ) - - putBuffer :: (MonadIO m, Show a) => TVar (Map Hash (BufferEntry a)) -> Hash -> BufferEntry a -> m () - putBuffer tv h e = do - traceM $ "putBuffer " ++ show h ++ " " ++ show e - atomically $ modifyTVar tv (Map.insert h e) - - withBuffer :: MonadIO m => TVar (Map Hash (BufferEntry a)) -> Hash -> (BufferEntry a -> m b) -> m b - withBuffer tv h f = - Map.lookup h <$> readTVarIO tv >>= \case - Just e -> f e - Nothing -> f (BufferEntry Nothing Map.empty Set.empty Set.empty) - - removeBuffer :: MonadIO m => TVar (Map Hash (BufferEntry a)) -> Hash -> m () - removeBuffer tv h = atomically $ modifyTVar tv (Map.delete h) - - addBufferDependent :: (MonadIO m, Show a) => Hash -> TVar (Map Hash (BufferEntry a)) -> Hash -> m () - addBufferDependent dependent tv dependency = withBuffer tv dependency \be -> do - putBuffer tv dependency be {beWaitingDependents = Set.insert dependent $ beWaitingDependents be} - tryFlushBuffer :: - (EDB m, Show a) => - TVar (Map Hash (BufferEntry a)) -> - (H2.Hash -> [a] -> m ()) -> - (Hash -> m ()) -> - Hash -> - m () - tryFlushBuffer buf saveComponent tryWaiting h@(Cv.hash1to2 -> h2) = - -- skip if it has already been flushed - unlessM (Ops.objectExistsForHash h2) $ withBuffer buf h try - where - try (BufferEntry size comp (Set.delete h -> missing) waiting) = do - missing' <- - filterM - (fmap not . Ops.objectExistsForHash . Cv.hash1to2) - (toList missing) - if null missing' && size == Just (fromIntegral (length comp)) - then do - saveComponent h2 (toList comp) - removeBuffer buf h - traverse_ tryWaiting waiting - else -- update - - putBuffer buf h $ - BufferEntry size comp (Set.fromList missing') waiting - - tryFlushTermBuffer :: EDB m => Hash -> m () - tryFlushTermBuffer h = - tryFlushBuffer - termBuffer - ( \h2 -> - void . Ops.saveTermComponent h2 - . fmap (first (Cv.term1to2 h) . second Cv.ttype1to2) - ) - tryFlushTermBuffer - h - - tryFlushDeclBuffer :: EDB m => Hash -> m () - tryFlushDeclBuffer h = - tryFlushBuffer - declBuffer - (\h2 -> void . Ops.saveDeclComponent h2 . fmap (Cv.decl1to2 h)) - (\h -> tryFlushTermBuffer h >> tryFlushDeclBuffer h) - h - - putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () - putTypeDeclaration (Reference.Id h@(Cv.hash1to2 -> h2) i n) decl = - runDB conn $ - unlessM - (Ops.objectExistsForHash h2) - ( withBuffer declBuffer h \(BufferEntry size comp missing waiting) -> do - let size' = Just n - pure $ - ifM - ((==) <$> size <*> size') - (pure ()) - (error $ "targetSize for term " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size') - let comp' = Map.insert i decl comp - moreMissing <- - filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) $ - [h | Reference.Derived h _i _n <- Set.toList $ Decl.declDependencies decl] - let missing' = missing <> Set.fromList moreMissing - traverse (addBufferDependent h declBuffer) moreMissing - putBuffer declBuffer h (BufferEntry size' comp' missing' waiting) - tryFlushDeclBuffer h - ) - - getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) - getRootBranch = - fmap (Either.mapLeft err) - . runExceptT - . flip runReaderT conn - . fmap (Branch.transform (runDB conn)) - $ Cv.causalbranch2to1 getCycleLen getDeclType =<< Ops.loadRootCausal - where - err :: Ops.Error -> Codebase1.GetRootBranchError - err = \case - Ops.DatabaseIntegrityError Q.NoNamespaceRoot -> - Codebase1.NoRootBranch - Ops.DecodeError (Ops.ErrBranch oId) _bytes _msg -> - Codebase1.CouldntParseRootBranch $ - "Couldn't decode " ++ show oId ++ ": " ++ _msg - Ops.ExpectedBranch ch _bh -> - Codebase1.CouldntLoadRootBranch $ Cv.causalHash2to1 ch - e -> error $ show e - - putRootBranch :: Branch IO -> IO () - putRootBranch branch1 = runDB conn . - void . Ops.saveRootBranch . Cv.causalbranch1to2 $ Branch.transform (lift . lift) branch1 - - rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) - rootBranchUpdates = pure (pure (), pure mempty) - - -- if this blows up on cromulent hashes, then switch from `hashToHashId` - -- to one that returns Maybe. - getBranchForHash :: Branch.Hash -> IO (Maybe (Branch IO)) - getBranchForHash h = runDB conn do - Ops.loadCausalBranchByCausalHash (Cv.branchHash1to2 h) >>= \case - Just b -> - pure . Just . Branch.transform (runDB conn) - =<< Cv.causalbranch2to1 getCycleLen getDeclType b - Nothing -> pure Nothing - - dependentsImpl :: Reference -> IO (Set Reference.Id) - dependentsImpl r = - runDB conn $ - Set.traverse (Cv.referenceid2to1 getCycleLen) - =<< Ops.dependents (Cv.reference1to2 r) - - syncFromDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () - syncFromDirectory = error "todo" - - syncToDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () - syncToDirectory = error "todo" - - watches :: UF.WatchKind -> IO [Reference.Id] - watches w = - runDB conn $ - Ops.listWatches (Cv.watchKind1to2 w) - >>= traverse (Cv.referenceid2to1 getCycleLen) - - -- getWatch :: UF.WatchKind -> Reference.Id -> IO (Maybe (Term Symbol Ann)) - getWatch k r@(Reference.Id h _i _n) = - runDB' conn $ - Ops.loadWatch (Cv.watchKind1to2 k) (Cv.referenceid1to2 r) - >>= Cv.term2to1 h getCycleLen getDeclType - - putWatch :: UF.WatchKind -> Reference.Id -> Term Symbol Ann -> IO () - putWatch k r@(Reference.Id h _i _n) tm = - runDB conn $ - Ops.saveWatch - (Cv.watchKind1to2 k) - (Cv.referenceid1to2 r) - (Cv.term1to2 h tm) - - getReflog :: IO [Reflog.Entry] - getReflog = - ( do - contents <- TextIO.readFile (reflogPath root) - let lines = Text.lines contents - let entries = parseEntry <$> lines - pure entries + `catchIO` const (pure []) + where + parseEntry t = fromMaybe (err t) (Reflog.fromText t) + err t = + error $ + "I couldn't understand this line in " ++ reflogPath root ++ "\n\n" + ++ Text.unpack t + + appendReflog :: Text -> Branch IO -> Branch IO -> IO () + appendReflog reason old new = + let t = + Reflog.toText $ + Reflog.Entry (Branch.headHash old) (Branch.headHash new) reason + in TextIO.appendFile (reflogPath root) (t <> "\n") + + reflogPath :: CodebasePath -> FilePath + reflogPath root = root "reflog" + + termsOfTypeImpl :: Reference -> IO (Set Referent.Id) + termsOfTypeImpl r = + runDB conn $ + Ops.termsHavingType (Cv.reference1to2 r) + >>= Set.traverse (Cv.referentid2to1 getCycleLen getDeclType) + + termsMentioningTypeImpl :: Reference -> IO (Set Referent.Id) + termsMentioningTypeImpl r = + runDB conn $ + Ops.termsMentioningType (Cv.reference1to2 r) + >>= Set.traverse (Cv.referentid2to1 getCycleLen getDeclType) + + hashLength :: IO Int + hashLength = pure 10 + + branchHashLength :: IO Int + branchHashLength = pure 10 + + defnReferencesByPrefix :: OT.ObjectType -> ShortHash -> IO (Set Reference.Id) + defnReferencesByPrefix _ (ShortHash.Builtin _) = pure mempty + defnReferencesByPrefix ot (ShortHash.ShortHash prefix (fmap Cv.shortHashSuffix1to2 -> cycle) _cid) = + Monoid.fromMaybe <$> runDB' conn do + refs <- do + Ops.componentReferencesByPrefix ot prefix cycle + >>= traverse (C.Reference.idH Ops.loadHashByObjectId) + >>= pure . Set.fromList + + Set.fromList <$> traverse (Cv.referenceid2to1 getCycleLen) (Set.toList refs) + + termReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) + termReferencesByPrefix = defnReferencesByPrefix OT.TermComponent + + declReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) + declReferencesByPrefix = defnReferencesByPrefix OT.DeclComponent + + referentsByPrefix :: ShortHash -> IO (Set Referent.Id) + referentsByPrefix SH.Builtin {} = pure mempty + referentsByPrefix (SH.ShortHash prefix (fmap Cv.shortHashSuffix1to2 -> cycle) cid) = runDB conn do + termReferents <- + Ops.termReferentsByPrefix prefix cycle + >>= traverse (Cv.referentid2to1 getCycleLen getDeclType) + declReferents' <- Ops.declReferentsByPrefix prefix cycle (read . Text.unpack <$> cid) + let declReferents = + [ Referent.Con' (Reference.Id (Cv.hash2to1 h) pos len) (fromIntegral cid) (Cv.decltype2to1 ct) + | (h, pos, len, ct, cids) <- declReferents', + cid <- cids + ] + pure . Set.fromList $ termReferents <> declReferents + + branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) + branchHashesByPrefix sh = runDB conn do + -- given that a Branch is shallow, it's really `CausalHash` that you'd + -- refer to to specify a full namespace w/ history. + -- but do we want to be able to refer to a namespace without its history? + cs <- Ops.causalHashesByPrefix (Cv.sbh1to2 sh) + pure $ Set.map (Causal.RawHash . Cv.hash2to1 . unCausalHash) cs + + -- Do we want to include causal hashes here or just namespace hashes? + -- Could we expose just one or the other of them to the user? + -- Git uses commit hashes and tree hashes (analogous to causal hashes + -- and namespace hashes, respectively), but the user is presented + -- primarily with commit hashes. + -- Arya leaning towards doing the same for Unison. + + let finalizer = do + Sqlite.close conn + decls <- readTVarIO declBuffer + terms <- readTVarIO termBuffer + let printBuffer header b = + if b /= mempty + then putStrLn header >> putStrLn "" >> print b + else pure () + printBuffer "Decls:" decls + printBuffer "Terms:" terms + + pure . Right $ + ( finalizer, + Codebase1.Codebase + getTerm + getTypeOfTermImpl + getTypeDeclaration + putTerm + putTypeDeclaration + getRootBranch + putRootBranch + rootBranchUpdates + getBranchForHash + dependentsImpl + syncFromDirectory + syncToDirectory + watches + getWatch + putWatch + getReflog + appendReflog + termsOfTypeImpl + termsMentioningTypeImpl + hashLength + termReferencesByPrefix + declReferencesByPrefix + referentsByPrefix + branchHashLength + branchHashesByPrefix ) - `catchIO` const (pure []) - where - parseEntry t = fromMaybe (err t) (Reflog.fromText t) - err t = - error $ - "I couldn't understand this line in " ++ reflogPath root ++ "\n\n" - ++ Text.unpack t - - appendReflog :: Text -> Branch IO -> Branch IO -> IO () - appendReflog reason old new = - let t = - Reflog.toText $ - Reflog.Entry (Branch.headHash old) (Branch.headHash new) reason - in TextIO.appendFile (reflogPath root) (t <> "\n") - - reflogPath :: CodebasePath -> FilePath - reflogPath root = root "reflog" - - termsOfTypeImpl :: Reference -> IO (Set Referent.Id) - termsOfTypeImpl r = - runDB conn $ - Ops.termsHavingType (Cv.reference1to2 r) - >>= Set.traverse (Cv.referentid2to1 getCycleLen getDeclType) - - termsMentioningTypeImpl :: Reference -> IO (Set Referent.Id) - termsMentioningTypeImpl r = - runDB conn $ - Ops.termsMentioningType (Cv.reference1to2 r) - >>= Set.traverse (Cv.referentid2to1 getCycleLen getDeclType) - - hashLength :: IO Int - hashLength = pure 10 - - branchHashLength :: IO Int - branchHashLength = pure 10 - - defnReferencesByPrefix :: OT.ObjectType -> ShortHash -> IO (Set Reference.Id) - defnReferencesByPrefix _ (ShortHash.Builtin _) = pure mempty - defnReferencesByPrefix ot (ShortHash.ShortHash prefix (fmap Cv.shortHashSuffix1to2 -> cycle) _cid) = - Monoid.fromMaybe <$> runDB' conn do - refs <- do - Ops.componentReferencesByPrefix ot prefix cycle - >>= traverse (C.Reference.idH Ops.loadHashByObjectId) - >>= pure . Set.fromList - - Set.fromList <$> traverse (Cv.referenceid2to1 getCycleLen) (Set.toList refs) - - termReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) - termReferencesByPrefix = defnReferencesByPrefix OT.TermComponent - - declReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) - declReferencesByPrefix = defnReferencesByPrefix OT.DeclComponent - - referentsByPrefix :: ShortHash -> IO (Set Referent.Id) - referentsByPrefix SH.Builtin {} = pure mempty - referentsByPrefix (SH.ShortHash prefix (fmap Cv.shortHashSuffix1to2 -> cycle) cid) = runDB conn do - termReferents <- - Ops.termReferentsByPrefix prefix cycle - >>= traverse (Cv.referentid2to1 getCycleLen getDeclType) - declReferents' <- Ops.declReferentsByPrefix prefix cycle (read . Text.unpack <$> cid) - let declReferents = - [ Referent.Con' (Reference.Id (Cv.hash2to1 h) pos len) (fromIntegral cid) (Cv.decltype2to1 ct) - | (h, pos, len, ct, cids) <- declReferents', - cid <- cids - ] - pure . Set.fromList $ termReferents <> declReferents - - branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) - branchHashesByPrefix sh = runDB conn do - ---- bs <- Ops.branchHashesByPrefix sh - -- given that a Branch is shallow, it's really `CausalHash` that you'd - -- refer to to specify a full namespace. - cs <- Ops.causalHashesByPrefix (Cv.sbh1to2 sh) - pure $ Set.map (Causal.RawHash . Cv.hash2to1 . unCausalHash) cs - - -- Do we want to include causal hashes here or just namespace hashes? - -- Could we expose just one or the other of them to the user? - -- Git uses commit hashes and tree hashes (analogous to causal hashes - -- and namespace hashes, respectively), but the user is presented - -- primarily with commit hashes. - -- Arya leaning towards doing the same for Unison. - - let finalizer = do - Sqlite.close conn - decls <- readTVarIO declBuffer - terms <- readTVarIO termBuffer - let printBuffer header b = - if b /= mempty - then putStrLn header >> putStrLn "" >> print b - else pure () - printBuffer "Decls:" decls - printBuffer "Terms:" terms - - pure $ - ( finalizer, - Codebase1.Codebase - getTerm - getTypeOfTermImpl - getTypeDeclaration - putTerm - putTypeDeclaration - getRootBranch - putRootBranch - rootBranchUpdates - getBranchForHash - dependentsImpl - syncFromDirectory - syncToDirectory - watches - getWatch - putWatch - getReflog - appendReflog - termsOfTypeImpl - termsMentioningTypeImpl - hashLength - termReferencesByPrefix - declReferencesByPrefix - referentsByPrefix - branchHashLength - branchHashesByPrefix - ) - --- x :: DB m => MaybeT m (Term Symbol) -> MaybeT m (Term Symbol Ann) --- x = error "not implemented" + missingSchema -> pure . Left $ missingSchema runDB' :: Connection -> MaybeT (ReaderT Connection (ExceptT Ops.Error IO)) a -> IO (Maybe a) runDB' conn = runDB conn . runMaybeT diff --git a/parser-typechecker/src/Unison/Codebase/TranscriptParser.hs b/parser-typechecker/src/Unison/Codebase/TranscriptParser.hs index 98dee169f4..cb59a43672 100644 --- a/parser-typechecker/src/Unison/Codebase/TranscriptParser.hs +++ b/parser-typechecker/src/Unison/Codebase/TranscriptParser.hs @@ -109,8 +109,8 @@ parse srcName txt = case P.parse (stanzas <* P.eof) srcName txt of Right a -> Right a Left e -> Left (show e) -run :: Maybe Bool -> FilePath -> FilePath -> [Stanza] -> Codebase IO Symbol Ann -> Branch.Cache IO -> IO Text -run newRt dir configFile stanzas codebase branchCache = do +run :: Maybe Bool -> FilePath -> FilePath -> [Stanza] -> Codebase IO Symbol Ann -> IO Text +run newRt dir configFile stanzas codebase = do let initialPath = Path.absoluteEmpty putPrettyLn $ P.lines [ asciiartUnison, "", @@ -296,7 +296,6 @@ run newRt dir configFile stanzas codebase branchCache = do loadPreviousUnisonBlock codebase rng - branchCache free case o of Nothing -> do diff --git a/parser-typechecker/src/Unison/CommandLine/Main.hs b/parser-typechecker/src/Unison/CommandLine/Main.hs index ba4f1fcd3c..6fe6194d05 100644 --- a/parser-typechecker/src/Unison/CommandLine/Main.hs +++ b/parser-typechecker/src/Unison/CommandLine/Main.hs @@ -161,10 +161,9 @@ main -> [Either Event Input] -> IO (Runtime v) -> Codebase IO v Ann - -> Branch.Cache IO -> String -> IO () -main dir defaultBaseLib initialPath (config,cancelConfig) initialInputs startRuntime codebase branchCache version = do +main dir defaultBaseLib initialPath (config,cancelConfig) initialInputs startRuntime codebase version = do dir' <- shortenDirectory dir root <- fromMaybe Branch.empty . rightMay <$> Codebase.getRootBranch codebase putPrettyLn $ case defaultBaseLib of @@ -247,7 +246,6 @@ main dir defaultBaseLib initialPath (config,cancelConfig) initialInputs startRun loadSourceFile codebase (const Random.getSystemDRG) - branchCache free case o of Nothing -> pure () diff --git a/parser-typechecker/tests/Unison/Test/Git.hs b/parser-typechecker/tests/Unison/Test/Git.hs index f938ce75b9..ec941eed4d 100644 --- a/parser-typechecker/tests/Unison/Test/Git.hs +++ b/parser-typechecker/tests/Unison/Test/Git.hs @@ -27,7 +27,6 @@ import Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex as SlimCopyRegenerat import Unison.Codebase.FileCodebase.Common (SyncToDir, formatAnn) import Unison.Parser (Ann) import Unison.Symbol (Symbol) -import qualified Unison.Util.Cache as Cache import Unison.Var (Var) test :: Test () @@ -53,10 +52,9 @@ syncComplete = scope "syncComplete" $ do observe title expectation files = scope title . for_ files $ \path -> scope (makeTitle path) $ io (doesFileExist $ targetDir path) >>= expectation - cache <- io Cache.nullCache - codebase <- io $ snd <$> initCodebase cache tmp "codebase" + codebase <- io $ snd <$> initCodebase tmp "codebase" - runTranscript_ tmp codebase cache [iTrim| + runTranscript_ tmp codebase [iTrim| ```ucm:hide .builtin> alias.type ##Nat Nat .builtin> alias.term ##Nat.+ Nat.+ @@ -113,10 +111,9 @@ syncTestResults = scope "syncTestResults" $ do tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "syncTestResults" targetDir <- io $ Temp.createTempDirectory tmp "target" - cache <- io Cache.nullCache - codebase <- io $ snd <$> initCodebase cache tmp "codebase" + codebase <- io $ snd <$> initCodebase tmp "codebase" - runTranscript_ tmp codebase cache [iTrim| + runTranscript_ tmp codebase [iTrim| ```ucm .> builtins.merge ``` @@ -161,7 +158,6 @@ test> tests.x = [Ok "Great!"] -- dependencies testPull :: Test () testPull = scope "pull" $ do - branchCache <- io $ Branch.boundedCache 4096 -- let's push a broader set of stuff, pull a narrower one (to a fresh codebase) -- and verify that we have the definitions we expected and don't have some of -- the ones we didn't expect. @@ -170,15 +166,15 @@ testPull = scope "pull" $ do tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-pull" -- initialize author and user codebases - authorCodebase <- io $ snd <$> initCodebase branchCache tmp "author" - (userDir, userCodebase) <- io $ initCodebase branchCache tmp "user" + authorCodebase <- io $ snd <$> initCodebase tmp "author" + (userDir, userCodebase) <- io $ initCodebase tmp "user" -- initialize git repo let repo = tmp "repo.git" io $ "git" ["init", "--bare", Text.pack repo] -- run author/push transcript - runTranscript_ tmp authorCodebase branchCache [iTrim| + runTranscript_ tmp authorCodebase [iTrim| ```ucm:hide .builtin> alias.type ##Nat Nat .builtin> alias.term ##Nat.+ Nat.+ @@ -207,7 +203,7 @@ inside.y = c + c scope (makeTitle path) $ io (doesFileExist $ tmp "repo" path) >>= expect -- run user/pull transcript - runTranscript_ tmp userCodebase branchCache [iTrim| + runTranscript_ tmp userCodebase [iTrim| ```ucm:hide .builtin> alias.type ##Nat Nat .builtin> alias.term ##Nat.+ Nat.+ @@ -290,18 +286,18 @@ inside.y = c + c -- no: B, d: aocoe|52add| -- initialize a fresh codebase -initCodebaseDir :: Branch.Cache IO -> FilePath -> String -> IO CodebasePath -initCodebaseDir branchCache tmpDir name = fst <$> initCodebase branchCache tmpDir name +initCodebaseDir :: FilePath -> String -> IO CodebasePath +initCodebaseDir tmpDir name = fst <$> initCodebase tmpDir name -initCodebase :: Branch.Cache IO -> FilePath -> String -> IO (CodebasePath, Codebase IO Symbol Ann) -initCodebase branchCache tmpDir name = do +initCodebase :: FilePath -> String -> IO (CodebasePath, Codebase IO Symbol Ann) +initCodebase tmpDir name = do let codebaseDir = tmpDir name - c <- FC.initCodebase branchCache codebaseDir + c <- FC.initCodebase undefined codebaseDir pure (codebaseDir, c) -- run a transcript on an existing codebase -runTranscript_ :: MonadIO m => FilePath -> Codebase IO Symbol Ann -> Branch.Cache IO -> String -> m () -runTranscript_ tmpDir c branchCache transcript = do +runTranscript_ :: MonadIO m => FilePath -> Codebase IO Symbol Ann -> String -> m () +runTranscript_ tmpDir c transcript = do let configFile = tmpDir ".unisonConfig" -- transcript runner wants a "current directory" for I guess writing scratch files? let cwd = tmpDir "cwd" @@ -309,7 +305,7 @@ runTranscript_ tmpDir c branchCache transcript = do -- parse and run the transcript flip (either err) (TR.parse "transcript" (Text.pack transcript)) $ \stanzas -> - void . liftIO $ TR.run Nothing cwd configFile stanzas c branchCache >>= + void . liftIO $ TR.run Nothing cwd configFile stanzas c >>= when traceTranscriptOutput . traceM . Text.unpack -- goal of this test is to make sure that push works correctly: @@ -318,15 +314,14 @@ runTranscript_ tmpDir c branchCache transcript = do -- dependents, type, and type mentions indices. testPush :: Test () testPush = scope "push" $ do - branchCache <- io $ Branch.boundedCache 4096 tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-push" -- initialize a fresh codebase named "c" - (codebasePath, c) <- io $ initCodebase branchCache tmp "c" + (codebasePath, c) <- io $ initCodebase tmp "c" -- Run the "setup transcript" to do the adds and updates; everything short of -- pushing. - runTranscript_ tmp c branchCache setupTranscript + runTranscript_ tmp c setupTranscript -- now we'll try pushing multiple ways. for_ pushImplementations $ \(implName, impl) -> scope implName $ do @@ -335,8 +330,8 @@ testPush = scope "push" $ do io $ "git" ["init", "--bare", Text.pack repoGit] -- push one way! - codebase <- io $ FC.codebase1' impl branchCache V1.formatSymbol formatAnn codebasePath - runTranscript_ tmp codebase branchCache (pushTranscript repoGit) + codebase <- io $ FC.codebase1' impl (error "todo") V1.formatSymbol formatAnn codebasePath + runTranscript_ tmp codebase (pushTranscript repoGit) -- check out the resulting repo so we can inspect it io $ "git" ["clone", Text.pack repoGit, Text.pack $ tmp implName ] diff --git a/parser-typechecker/tests/Unison/Test/IO.hs b/parser-typechecker/tests/Unison/Test/IO.hs index 0c79132787..65d829f595 100644 --- a/parser-typechecker/tests/Unison/Test/IO.hs +++ b/parser-typechecker/tests/Unison/Test/IO.hs @@ -14,8 +14,7 @@ import System.FilePath (()) import System.Directory (removeDirectoryRecursive) import Unison.Codebase (Codebase, CodebasePath) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.FileCodebase as FC +import qualified Unison.Codebase.SqliteCodebase as SqliteCodebase import qualified Unison.Codebase.TranscriptParser as TR import Unison.Parser (Ann) import Unison.Symbol (Symbol) @@ -33,11 +32,11 @@ test = scope "IO" . tests $ [ testHandleOps ] -- writes the read text to the result file which is then checked by the haskell. testHandleOps :: Test () testHandleOps = - withScopeAndTempDir "handleOps" $ \workdir codebase cache -> do + withScopeAndTempDir "handleOps" $ \workdir codebase -> do let myFile = workdir "handleOps.txt" resultFile = workdir "handleOps.result" expectedText = "Good Job!" :: Text.Text - runTranscript_ False workdir codebase cache [iTrim| + runTranscript_ False workdir codebase [iTrim| ```ucm:hide .> builtins.mergeio ``` @@ -79,11 +78,11 @@ main = 'let -- * Utilities -initCodebase :: Branch.Cache IO -> FilePath -> String -> IO (CodebasePath, Codebase IO Symbol Ann) -initCodebase branchCache tmpDir name = do +initCodebase :: FilePath -> String -> IO (CodebasePath, IO (), Codebase IO Symbol Ann) +initCodebase tmpDir name = do let codebaseDir = tmpDir name - c <- FC.initCodebase branchCache codebaseDir - pure (codebaseDir, c) + (finalize, c) <- SqliteCodebase.initCodebase codebaseDir + pure (codebaseDir, finalize, c) -- run a transcript on an existing codebase runTranscript_ @@ -91,10 +90,9 @@ runTranscript_ => Bool -> FilePath -> Codebase IO Symbol Ann - -> Branch.Cache IO -> String -> m () -runTranscript_ newRt tmpDir c branchCache transcript = do +runTranscript_ newRt tmpDir c transcript = do let configFile = tmpDir ".unisonConfig" let cwd = tmpDir "cwd" let err err = error $ "Parse error: \n" <> show err @@ -102,13 +100,13 @@ runTranscript_ newRt tmpDir c branchCache transcript = do -- parse and run the transcript flip (either err) (TR.parse "transcript" (Text.pack transcript)) $ \stanzas -> void . liftIO $ - TR.run (Just newRt) cwd configFile stanzas c branchCache + TR.run (Just newRt) cwd configFile stanzas c >>= traceM . Text.unpack -withScopeAndTempDir :: String -> (FilePath -> Codebase IO Symbol Ann -> Branch.Cache IO -> Test ()) -> Test () +withScopeAndTempDir :: String -> (FilePath -> Codebase IO Symbol Ann -> Test ()) -> Test () withScopeAndTempDir name body = scope name $ do tmp <- io (Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory name) - cache <- io $ Branch.boundedCache 4096 - (_, codebase) <- io $ initCodebase cache tmp "user" - body tmp codebase cache + (_, closeCodebase, codebase) <- io $ initCodebase tmp "user" + body tmp codebase + io $ closeCodebase io $ removeDirectoryRecursive tmp diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index 375c88e819..2b7621f3ef 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -13,11 +13,9 @@ import Data.Configurator.Types ( Config ) import System.Directory ( getCurrentDirectory, removeDirectoryRecursive ) import System.Environment ( getArgs, getProgName ) import System.Mem.Weak ( deRefWeak ) -import qualified Unison.Codebase.Branch as Branch import qualified Unison.Codebase.Editor.VersionParser as VP import Unison.Codebase.Execute ( execute ) -import qualified Unison.Codebase.FileCodebase as FileCodebase -import Unison.Codebase.FileCodebase.Common ( codebasePath ) +import qualified Unison.Codebase.SqliteCodebase as SqliteCodebase import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace) import Unison.Codebase.Runtime ( Runtime ) import Unison.CommandLine ( watchConfig ) @@ -26,7 +24,6 @@ import qualified Unison.Runtime.Rt1IO as Rt1 import qualified Unison.Runtime.Interface as RTI import Unison.Symbol ( Symbol ) import qualified Unison.Codebase.Path as Path -import qualified Unison.Util.Cache as Cache import qualified Version import qualified Unison.Codebase.TranscriptParser as TR import qualified System.Path as Path @@ -143,81 +140,81 @@ main = do _ -> (Nothing, restargs0) currentDir <- getCurrentDirectory configFilePath <- getConfigFilePath mcodepath - config@(config_, _cancelConfig) <- + config <- catchIOError (watchConfig configFilePath) $ \_ -> Exit.die "Your .unisonConfig could not be loaded. Check that it's correct!" - branchCacheSize :: Word <- Config.lookupDefault 4096 config_ "NamespaceCacheSize" - branchCache <- Cache.semispaceCache branchCacheSize case restargs of [] -> do - theCodebase <- FileCodebase.getCodebaseOrExit branchCache mcodepath - launch currentDir mNewRun config theCodebase branchCache [] + (closeCodebase, theCodebase) <- SqliteCodebase.getCodebaseOrExit mcodepath + launch currentDir mNewRun config theCodebase [] + closeCodebase [version] | isFlag "version" version -> putStrLn $ progName ++ " version: " ++ Version.gitDescribe [help] | isFlag "help" help -> PT.putPrettyLn (usage progName) - ["init"] -> FileCodebase.initCodebaseAndExit mcodepath + ["init"] -> SqliteCodebase.initCodebaseAndExit mcodepath "run" : [mainName] -> do - theCodebase <- FileCodebase.getCodebaseOrExit branchCache mcodepath + (closeCodebase, theCodebase) <- SqliteCodebase.getCodebaseOrExit mcodepath runtime <- join . getStartRuntime mNewRun $ fst config execute theCodebase runtime mainName + closeCodebase "run.file" : file : [mainName] | isDotU file -> do e <- safeReadUtf8 file case e of Left _ -> PT.putPrettyLn $ P.callout "⚠️" "I couldn't find that file or it is for some reason unreadable." Right contents -> do - theCodebase <- FileCodebase.getCodebaseOrExit branchCache mcodepath + (closeCodebase, theCodebase) <- SqliteCodebase.getCodebaseOrExit mcodepath let fileEvent = Input.UnisonFileChanged (Text.pack file) contents - launch currentDir mNewRun config theCodebase branchCache [Left fileEvent, Right $ Input.ExecuteI mainName, Right Input.QuitI] + launch currentDir mNewRun config theCodebase [Left fileEvent, Right $ Input.ExecuteI mainName, Right Input.QuitI] + closeCodebase "run.pipe" : [mainName] -> do e <- safeReadUtf8StdIn case e of Left _ -> PT.putPrettyLn $ P.callout "⚠️" "I had trouble reading this input." Right contents -> do - theCodebase <- FileCodebase.getCodebaseOrExit branchCache mcodepath + (closeCodebase, theCodebase) <- SqliteCodebase.getCodebaseOrExit mcodepath let fileEvent = Input.UnisonFileChanged (Text.pack "") contents launch - currentDir mNewRun config theCodebase branchCache + currentDir mNewRun config theCodebase [Left fileEvent, Right $ Input.ExecuteI mainName, Right Input.QuitI] + closeCodebase "transcript" : args' -> case args' of - "-save-codebase" : transcripts -> runTranscripts mNewRun branchCache False True mcodepath transcripts - _ -> runTranscripts mNewRun branchCache False False mcodepath args' + "-save-codebase" : transcripts -> runTranscripts mNewRun False True mcodepath transcripts + _ -> runTranscripts mNewRun False False mcodepath args' "transcript.fork" : args' -> case args' of - "-save-codebase" : transcripts -> runTranscripts mNewRun branchCache True True mcodepath transcripts - _ -> runTranscripts mNewRun branchCache True False mcodepath args' + "-save-codebase" : transcripts -> runTranscripts mNewRun True True mcodepath transcripts + _ -> runTranscripts mNewRun True False mcodepath args' _ -> do PT.putPrettyLn (usage progName) Exit.exitWith (Exit.ExitFailure 1) -prepareTranscriptDir :: Branch.Cache IO -> Bool -> Maybe FilePath -> IO FilePath -prepareTranscriptDir branchCache inFork mcodepath = do +prepareTranscriptDir :: Bool -> Maybe FilePath -> IO FilePath +prepareTranscriptDir inFork mcodepath = do tmp <- Temp.getCanonicalTemporaryDirectory >>= (`Temp.createTempDirectory` "transcript") unless inFork $ do PT.putPrettyLn . P.wrap $ "Transcript will be run on a new, empty codebase." - _ <- FileCodebase.initCodebase branchCache tmp + _ <- SqliteCodebase.initCodebase tmp pure() - when inFork $ FileCodebase.getCodebaseOrExit branchCache mcodepath >> do - path <- FileCodebase.getCodebaseDir mcodepath + when inFork $ SqliteCodebase.getCodebaseOrExit mcodepath >> do + path <- SqliteCodebase.getCodebaseDir mcodepath PT.putPrettyLn $ P.lines [ P.wrap "Transcript will be run on a copy of the codebase at: ", "", P.indentN 2 (P.string path) ] - Path.copyDir (path FP. codebasePath) (tmp FP. codebasePath) + Path.copyDir (path FP. SqliteCodebase.codebasePath) (tmp FP. SqliteCodebase.codebasePath) pure tmp runTranscripts' :: Maybe Bool - -> Branch.Cache IO -> Maybe FilePath -> FilePath -> [String] -> IO Bool -runTranscripts' mNewRun branchCache mcodepath transcriptDir args = do +runTranscripts' mNewRun mcodepath transcriptDir args = do currentDir <- getCurrentDirectory - theCodebase <- FileCodebase.getCodebaseOrExit branchCache $ Just transcriptDir case args of args@(_:_) -> do for_ args $ \arg -> case arg of @@ -231,7 +228,9 @@ runTranscripts' mNewRun branchCache mcodepath transcriptDir args = do P.indentN 2 $ P.string err]) Right stanzas -> do configFilePath <- getConfigFilePath mcodepath - mdOut <- TR.run mNewRun transcriptDir configFilePath stanzas theCodebase branchCache + (closeCodebase, theCodebase) <- SqliteCodebase.getCodebaseOrExit $ Just transcriptDir + mdOut <- TR.run mNewRun transcriptDir configFilePath stanzas theCodebase + closeCodebase let out = currentDir FP. FP.addExtension (FP.dropExtension arg ++ ".output") (FP.takeExtension md) @@ -248,17 +247,16 @@ runTranscripts' mNewRun branchCache mcodepath transcriptDir args = do runTranscripts :: Maybe Bool - -> Branch.Cache IO -> Bool -> Bool -> Maybe FilePath -> [String] -> IO () -runTranscripts mNewRun branchCache inFork keepTemp mcodepath args = do +runTranscripts mNewRun inFork keepTemp mcodepath args = do progName <- getProgName - transcriptDir <- prepareTranscriptDir branchCache inFork mcodepath + transcriptDir <- prepareTranscriptDir inFork mcodepath completed <- - runTranscripts' mNewRun branchCache (Just transcriptDir) transcriptDir args + runTranscripts' mNewRun (Just transcriptDir) transcriptDir args when completed $ do unless keepTemp $ removeDirectoryRecursive transcriptDir when keepTemp $ PT.putPrettyLn $ @@ -288,12 +286,11 @@ launch -> Maybe Bool -> (Config, IO ()) -> _ - -> Branch.Cache IO -> [Either Input.Event Input.Input] -> IO () -launch dir newRun config code branchCache inputs = do +launch dir newRun config code inputs = do startRuntime <- getStartRuntime newRun $ fst config - CommandLine.main dir defaultBaseLib initialPath config inputs startRuntime code branchCache Version.gitDescribe + CommandLine.main dir defaultBaseLib initialPath config inputs startRuntime code Version.gitDescribe isMarkdown :: String -> Bool isMarkdown md = case FP.takeExtension md of @@ -310,7 +307,7 @@ isFlag :: String -> String -> Bool isFlag f arg = arg == f || arg == "-" ++ f || arg == "--" ++ f getConfigFilePath :: Maybe FilePath -> IO FilePath -getConfigFilePath mcodepath = (FP. ".unisonConfig") <$> FileCodebase.getCodebaseDir mcodepath +getConfigFilePath mcodepath = (FP. ".unisonConfig") <$> SqliteCodebase.getCodebaseDir mcodepath defaultBaseLib :: Maybe RemoteNamespace defaultBaseLib = rightMay $ diff --git a/questions.md b/questions.md index 13f8fc3f67..91beca0191 100644 --- a/questions.md +++ b/questions.md @@ -7,9 +7,9 @@ next steps: - [x] `SqliteCodebase.Conversions.unsafecausalbranch2to1` - [x] `SqliteCodebase.getRootBranch` - [x] `SqliteCodebase.getBranchForHash` -- [ ] Writing a branch +- [x] Writing a branch - [x] `SqliteCodebase.Conversions.causalbranch1to2` - - [ ] `SqliteCodebase.putRootBranch` + - [x] `SqliteCodebase.putRootBranch` - [ ] Syncing a remote codebase - [ ] `SqliteCodebase.syncFromDirectory` - [ ] `SqliteCodebase.syncToDirectory` @@ -27,10 +27,10 @@ next steps: | getTypeDeclaration | ✔ | | | putTerm | ✔ | | | putTypeDeclaration | ✔ | | -| getRootBranch | todo | | -| putRootBranch | todo | | -| rootBranchUpdates | todo | | -| getBranchForHash | todo | | +| getRootBranch | ✔ | | +| putRootBranch | ✔ | | +| rootBranchUpdates | ✔ | | +| getBranchForHash | ✔ | | | dependentsImpl | ✔ | | | syncFromDirectory | todo | | | syncToDirectory | todo | | From 7132ad0af987be8ed0431a94f125e1c8d262abb7 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 21 Jan 2021 15:50:52 -0500 Subject: [PATCH 078/225] add tracing, FK constraint names, and at least one bug fix --- .../lib/U/Codebase/Convert/SyncV1V2.hs | 13 +- .../U/Codebase/Sqlite/Operations.hs | 11 +- .../U/Codebase/Sqlite/Queries.hs | 183 +++++++++++------- .../codebase-sqlite/sql/create-index.sql | 76 ++++---- codebase2/codebase-sqlite/sql/create.sql | 39 ++-- codebase2/codebase/U/Codebase/HashTags.hs | 5 +- codebase2/util/U/Util/Hash.hs | 5 +- .../src/Unison/Codebase/SqliteCodebase.hs | 69 ++++--- 8 files changed, 233 insertions(+), 168 deletions(-) diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index e1516d146d..be4d33c5e1 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -244,7 +244,7 @@ type V2DiskDeclComponent = V2.DeclFormat.LocallyIndexedComponent -- todo: this just converts a whole codebase, which we need to do locally \ -- but we also want some code that imports just a particular sub-branch. syncV1V2 :: forall m. MonadIO m => Connection -> CodebasePath -> m (Either FatalError ()) -syncV1V2 c rootDir = liftIO $ SQLite.withTransaction c . runExceptT . flip runReaderT c $ do +syncV1V2 c rootDir = liftIO $ {- SQLite.withTransaction c . -} runExceptT . flip runReaderT c $ do v1RootHash <- getV1RootBranchHash rootDir >>= maybe (throwError NoRootBranch) pure -- starting from the root namespace, convert all entities you can find convertEntities [Branch1 v1RootHash] @@ -1254,8 +1254,9 @@ convertDecl1 lookup1 lookup2 hash1 v1component = do -- -- rdId <- saveReferenceDerived idH -- -- Db.execute sql (Nothing, Just rdId) -- -- sql = [here| --- -- INSERT OR IGNORE INTO reference (builtin, reference_derived_id) +-- -- INSERT INTO reference (builtin, reference_derived_id) -- -- VALUES (?, ?) +-- -- ON CONFLICT DO NOTHING -- -- |] -- --loadReferenceByHashId :: DB m => ReferenceH HashId -> m (Maybe ReferenceId) @@ -1279,8 +1280,9 @@ convertDecl1 lookup1 lookup2 hash1 v1component = do -- -- insert hashId i >> fmap fromJust (loadReferenceDerivedByHashId r) where -- -- insert h i = liftIO $ execute sql (h, i) where -- -- sql = [here| --- -- INSERT OR IGNORE INTO reference_derived (hash_id, component_index) +-- -- INSERT INTO reference_derived (hash_id, component_index) -- -- VALUES (?, ?) +-- -- ON CONFLICT DO NOTHING -- -- |] -- -- -- --loadReferenceDerivedByHashId :: DB m => Reference.IdH Db.HashId -> m (Maybe Db.ReferenceDerivedId) @@ -1302,10 +1304,11 @@ convertDecl1 lookup1 lookup2 hash1 v1component = do -- -- fmap fromJust (loadReferenceDerivedByReferenceDerivedId r) -- -- where -- -- sql = [here| --- -- INSERT OR IGNORE INTO referent_derived +-- -- INSERT INTO referent_derived -- -- (reference_derived_id, constructor_id, constructor_type) -- -- VALUES (?, ?, ?) --- -- |] +-- -- ON CONFLICT DO NOTHING +-- -- |]s -- --loadReferentDerivedByReferenceDerivedId :: DB m => Referent' ReferenceDerivedId -> m (Maybe ReferentDerivedId) -- --loadReferentDerivedByReferenceDerivedId r = queryMaybe . query sql r where -- -- sql = [here| diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index ae38f7c80b..6dac562858 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -111,6 +111,7 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set +import Debug.Trace -- * Error handling @@ -798,19 +799,19 @@ type BranchSavingMonad m = StateT BranchSavingState (WriterT BranchSavingWriter saveRootBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) saveRootBranch (C.Causal hc he parents me) = do + traceM $ "\nsaveRootBranch \n hc = " ++ show hc ++ ",\n he = " ++ show he ++ ",\n parents = " ++ show (Map.keys parents) -- check if we can skip the whole thing, by checking if there are causal parents for hc chId <- liftQ (Q.saveCausalHash hc) - liftQ (Q.loadCausalParents chId) >>= \case + parentCausalHashIds <- liftQ (Q.loadCausalParents chId) >>= \case [] -> do - parentCausalHashIds <- for (Map.toList parents) $ \(causalHash, mcausal) -> do + for (Map.toList parents) $ \(causalHash, mcausal) -> do -- check if we can short circuit the parent before loading it, -- by checking if there are causal parents for hc parentChId <- liftQ (Q.saveCausalHash causalHash) liftQ (Q.loadCausalParents parentChId) >>= \case [] -> do c <- mcausal; snd <$> saveRootBranch c _grandParents -> pure parentChId - liftQ (Q.saveCausalParents chId parentCausalHashIds) - _parents -> pure () + parentCausalHashIds -> pure parentCausalHashIds liftQ (Q.loadBranchObjectIdByCausalHashId chId) >>= \case Just boId -> pure (boId, chId) @@ -819,6 +820,8 @@ saveRootBranch (C.Causal hc he parents me) = do (li, lBranch) <- c2lBranch =<< me boId <- saveBranchObject bhId li lBranch liftQ (Q.saveCausal chId bhId) + -- save the link between child and parents + liftQ (Q.saveCausalParents chId parentCausalHashIds) pure (boId, chId) where c2lBranch :: EDB m => C.Branch.Branch m -> m (BranchLocalIds, S.Branch.Full.LocalBranch) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 6bf229f46e..5908dbcefa 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -1,17 +1,17 @@ -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE TupleSections #-} {-# LANGUAGE BlockArguments #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE FlexibleInstances #-} -{-# OPTIONS_GHC -Wno-orphans #-} -{-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DerivingVia #-} {-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE DerivingVia #-} {-# LANGUAGE QuasiQuotes #-} -{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE TupleSections #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE ViewPatterns #-} +{-# OPTIONS_GHC -Wno-orphans #-} module U.Codebase.Sqlite.Queries where @@ -22,20 +22,24 @@ import Control.Monad.Reader (MonadReader (ask)) import Control.Monad.Trans (MonadIO (liftIO)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) import Data.ByteString (ByteString) +import Data.Foldable (traverse_) +import qualified Data.List.Extra as List import Data.Maybe (fromJust) +import Data.String (fromString) import Data.String.Here.Uninterpolated (here, hereFile) import Data.Text (Text) import Database.SQLite.Simple (Connection, FromRow, Only (..), SQLData, ToRow (..), (:.) (..)) import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple.FromField (FromField) import Database.SQLite.Simple.ToField (ToField (..)) +import Debug.Trace (traceM) import U.Codebase.HashTags (BranchHash, CausalHash, unBranchHash, unCausalHash) import U.Codebase.Reference (Reference') +import qualified U.Codebase.Referent as C.Referent import U.Codebase.Sqlite.DbId (BranchHashId (..), BranchObjectId (..), CausalHashId (..), CausalOldHashId, HashId (..), ObjectId (..), TextId) import U.Codebase.Sqlite.ObjectType (ObjectType) import qualified U.Codebase.Sqlite.Reference as Reference import qualified U.Codebase.Sqlite.Referent as Referent -import qualified U.Codebase.Referent as C.Referent import U.Codebase.WatchKind (WatchKind) import qualified U.Codebase.WatchKind as WatchKind import U.Util.Base32Hex (Base32Hex (..)) @@ -44,7 +48,9 @@ import qualified U.Util.Hash as Hash import UnliftIO (MonadUnliftIO, withRunInIO) -- * types + type DB m = (MonadIO m, MonadReader Connection m) + type EDB m = (DB m, MonadError Integrity m) data Integrity @@ -59,13 +65,14 @@ data Integrity | NoNamespaceRoot | MultipleNamespaceRoots [CausalHashId] | NoTypeIndexForTerm Referent.Id - deriving Show + deriving (Show) --- |discard errors that you're sure are impossible +-- | discard errors that you're sure are impossible noExcept :: (Monad m, Show e) => ExceptT e m a -> m a -noExcept a = runExceptT a >>= \case - Right a -> pure a - Left e -> error $ "unexpected error: " ++ show e +noExcept a = + runExceptT a >>= \case + Right a -> pure a + Left e -> error $ "unexpected error: " ++ show e orError :: MonadError e m => e -> Maybe b -> m b orError e = maybe (throwError e) pure @@ -74,52 +81,57 @@ type TypeHashReference = Reference' TextId HashId -- * main squeeze -createSchema :: DB m => m () +createSchema :: (DB m, MonadUnliftIO m) => m () createSchema = do - execute_ [hereFile|sql/create.sql|] - execute_ [hereFile|sql/create-index.sql|] + traceM "--- CREATING SCHEMA ---" + withImmediateTransaction . traverse_ (execute_ . fromString) $ + List.splitOn ";" [hereFile|sql/create.sql|] + <> List.splitOn ";" [hereFile|sql/create-index.sql|] setFlags :: DB m => m () -setFlags = execute_ [here| - PRAGMA foreign_keys = ON; -|] +setFlags = execute_ "PRAGMA foreign_keys = ON;" type SchemaType = String + type SchemaName = String + checkForMissingSchema :: DB m => m [(SchemaType, SchemaName)] checkForMissingSchema = filterM missing schema where - missing (t, n) = null @[] @(Only Int) <$> query sql (t,n) + missing (t, n) = null @[] @(Only Int) <$> query sql (t, n) sql = "SELECT 1 FROM sqlite_master WHERE type = ? and name = ?" - schema = - [("table", "hash") - ,("index", "hash_base32") - ,("table", "text") - ,("table", "hash_object") - ,("index", "hash_object_hash_id") - ,("index", "hash_object_object_id") - ,("table", "object_type_description") - ,("table", "object") - ,("index", "object_hash_id") - ,("index", "object_type_id") - ,("table", "causal") - ,("index", "causal_value_hash_id") - ,("index", "causal_gc_generation") - ,("table", "namespace_root") - ,("table", "causal_parent") - ,("index", "causal_parent_causal_id") - ,("index", "causal_parent_parent_id") - -- ,("table", "causal_old") - ,("table", "watch_result") - ,("table", "watch") - ,("index", "watch_kind") - ,("table", "watch_kind_description") + schema = + [ ("table", "hash"), + ("index", "hash_base32"), + ("table", "text"), + ("table", "hash_object"), + ("index", "hash_object_hash_id"), + ("index", "hash_object_object_id"), + ("table", "object_type_description"), + ("table", "object"), + ("index", "object_hash_id"), + ("index", "object_type_id"), + ("table", "causal"), + ("index", "causal_value_hash_id"), + ("index", "causal_gc_generation"), + ("table", "namespace_root"), + ("table", "causal_parent"), + ("index", "causal_parent_causal_id"), + ("index", "causal_parent_parent_id"), + -- ,("table", "causal_old") + ("table", "watch_result"), + ("table", "watch"), + ("index", "watch_kind"), + ("table", "watch_kind_description") ] {- ORMOLU_DISABLE -} saveHash :: DB m => Base32Hex -> m HashId saveHash base32 = execute sql (Only base32) >> queryOne (loadHashId base32) - where sql = [here| INSERT OR IGNORE INTO hash (base32) VALUES (?) |] + where sql = [here| + INSERT INTO hash (base32) VALUES (?) + ON CONFLICT DO NOTHING + |] saveHashHash :: DB m => Hash -> m HashId saveHashHash = saveHash . Hash.toBase32Hex @@ -150,7 +162,7 @@ loadHashById h = queryAtom sql (Only h) >>= orError (UnknownHashId h) saveText :: DB m => Text -> m TextId saveText t = execute sql (Only t) >> queryOne (loadText t) - where sql = [here| INSERT OR IGNORE INTO text (text) VALUES (?) |] + where sql = [here| INSERT INTO text (text) VALUES (?) ON CONFLICT DO NOTHING|] loadText :: DB m => Text -> m (Maybe TextId) loadText t = queryAtom sql (Only t) @@ -166,8 +178,9 @@ loadTextById h = queryAtom sql (Only h) >>= orError (UnknownTextId h) saveHashObject :: DB m => HashId -> ObjectId -> Int -> m () saveHashObject hId oId version = execute sql (hId, oId, version) where sql = [here| - INSERT OR IGNORE INTO hash_object (hash_id, object_id, version) + INSERT INTO hash_object (hash_id, object_id, version) VALUES (?, ?, ?) + ON CONFLICT DO NOTHING |] saveObject :: DB m => HashId -> ObjectType -> ByteString -> m ObjectId @@ -175,8 +188,9 @@ saveObject h t blob = execute sql (h, t, blob) >> queryOne (maybeObjectIdForPrimaryHashId h) where sql = [here| - INSERT OR IGNORE INTO object (primary_hash_id, type_id, bytes) + INSERT INTO object (primary_hash_id, type_id, bytes) VALUES (?, ?, ?) + ON CONFLICT DO NOTHING |] loadObjectById :: EDB m => ObjectId -> m ByteString @@ -243,8 +257,9 @@ updateObjectBlob oId bs = execute sql (oId, bs) where sql = [here| -- end up wanting to store other kinds of Causals here too. saveCausal :: DB m => CausalHashId -> BranchHashId -> m () saveCausal self value = execute sql (self, value) where sql = [here| - INSERT OR IGNORE INTO causal (self_hash_id, value_hash_id) + INSERT INTO causal (self_hash_id, value_hash_id) VALUES (?, ?) + ON CONFLICT DO NOTHING |] loadCausalValueHashId :: EDB m => CausalHashId -> m BranchHashId @@ -253,14 +268,18 @@ loadCausalValueHashId id = SELECT value_hash_id FROM causal WHERE self_hash_id = ? |] +-- todo: do a join here loadBranchObjectIdByCausalHashId :: EDB m => CausalHashId -> m (Maybe BranchObjectId) loadBranchObjectIdByCausalHashId id = queryAtom sql (Only id) where sql = [here| - SELECT value_object_id FROM causal WHERE self_hash_id = ? + SELECT object_id FROM hash_object + INNER JOIN causal ON hash_id = causal.value_hash_id + WHERE causal.self_hash_id = ? |] saveCausalOld :: DB m => HashId -> CausalHashId -> m () saveCausalOld v1 v2 = execute sql (v1, v2) where sql = [here| - INSERT OR IGNORE INTO causal_old (old_hash_id, new_hash_id) VALUES (?, ?) + INSERT INTO causal_old (old_hash_id, new_hash_id) VALUES (?, ?) + ON CONFLICT DO NOTHING |] loadCausalHashIdByCausalOldHash :: EDB m => CausalOldHashId -> m CausalHashId @@ -283,7 +302,8 @@ saveCausalParent child parent = saveCausalParents child [parent] saveCausalParents :: DB m => CausalHashId -> [CausalHashId] -> m () saveCausalParents child parents = executeMany sql $ (child,) <$> parents where sql = [here| - INSERT OR IGNORE INTO causal_parent (causal_id, parent_id) VALUES (?, ?) + INSERT INTO causal_parent (causal_id, parent_id) VALUES (?, ?) + ON CONFLICT DO NOTHING |] loadCausalParents :: DB m => CausalHashId -> m [CausalHashId] @@ -307,14 +327,14 @@ saveWatch :: DB m => WatchKind -> Reference.IdH -> ByteString -> m () saveWatch k r blob = execute sql (r :. Only blob) >> execute sql2 (r :. Only k) where sql = [here| - INSERT OR IGNORE - INTO watch_result (hash_id, component_index, result) + INSERT INTO watch_result (hash_id, component_index, result) VALUES (?, ?, ?) + ON CONFLICT DO NOTHING |] sql2 = [here| - INSERT OR IGNORE - INTO watch (hash_id, component_index, watch_kind_id) + INSERT INTO watch (hash_id, component_index, watch_kind_id) VALUES (?, ?, ?) + ON CONFLICT DO NOTHING |] loadWatch :: DB m => WatchKind -> Reference.IdH -> m (Maybe ByteString) @@ -333,7 +353,7 @@ loadWatchesByWatchKind k = query sql (Only k) where sql = [here| -- * Index-building addToTypeIndex :: DB m => Reference' TextId HashId -> Referent.Id -> m () addToTypeIndex tp tm = execute sql (tp :. tm) where sql = [here| - INSERT OR IGNORE INTO find_type_index ( + INSERT INTO find_type_index ( type_reference_builtin, type_reference_hash_id, type_reference_component_index, @@ -341,6 +361,7 @@ addToTypeIndex tp tm = execute sql (tp :. tm) where sql = [here| term_referent_component_index, term_referent_constructor_index ) VALUES (?, ?, ?, ?, ?, ?) + ON CONFLICT DO NOTHING |] getReferentsByType :: DB m => Reference' TextId HashId -> m [Referent.Id] @@ -371,7 +392,7 @@ getTypeReferenceForReference (C.Referent.RefId -> r) = addToTypeMentionsIndex :: DB m => Reference' TextId HashId -> Referent.Id -> m () addToTypeMentionsIndex tp tm = execute sql (tp :. tm) where sql = [here| - INSERT OR IGNORE INTO find_type_mentions_index ( + INSERT INTO find_type_mentions_index ( type_reference_builtin, type_reference_hash_id, type_reference_component_index, @@ -379,6 +400,7 @@ addToTypeMentionsIndex tp tm = execute sql (tp :. tm) where sql = [here| term_referent_component_index, term_referent_constructor_index ) VALUES (?, ?, ?, ?, ?, ?) + ON CONFLICT DO NOTHING |] getReferentsByTypeMention :: DB m => Reference' TextId HashId -> m [Referent.Id] @@ -396,13 +418,14 @@ getReferentsByTypeMention r = query sql r where sql = [here| addToDependentsIndex :: DB m => Reference.Reference -> Reference.Id -> m () addToDependentsIndex dependency dependent = execute sql (dependency :. dependent) where sql = [here| - INSERT OR IGNORE INTO dependents_index ( + INSERT INTO dependents_index ( dependency_builtin, dependency_object_id, dependency_component_index, dependent_object_id, dependent_component_index ) VALUES (?, ?, ?, ?, ?) + ON CONFLICT DO NOTHING |] getDependentsForDependency :: DB m => Reference.Reference -> m [Reference.Id] @@ -449,32 +472,45 @@ namespaceHashIdByBase32Prefix prefix = queryAtoms sql (Only $ prefix <> "%") whe -- * helper functions -queryAtoms :: (DB f, ToRow q, FromField b) => SQLite.Query -> q -> f [b] +queryAtoms :: (DB f, ToRow q, FromField b, Show q) => SQLite.Query -> q -> f [b] queryAtoms q r = map fromOnly <$> query q r -queryMaybe :: (DB f, ToRow q, FromRow b) => SQLite.Query -> q -> f (Maybe b) + +queryMaybe :: (DB f, ToRow q, FromRow b, Show q) => SQLite.Query -> q -> f (Maybe b) queryMaybe q r = headMay <$> query q r -queryAtom :: (DB f, ToRow q, FromField b) => SQLite.Query -> q -> f (Maybe b) +queryAtom :: (DB f, ToRow q, FromField b, Show q) => SQLite.Query -> q -> f (Maybe b) queryAtom q r = fmap fromOnly <$> queryMaybe q r queryOne :: Functor f => f (Maybe b) -> f b queryOne = fmap fromJust -queryExists :: (DB m, ToRow q) => SQLite.Query -> q -> m Bool +queryExists :: (DB m, ToRow q, Show q) => SQLite.Query -> q -> m Bool queryExists q r = not . null . map (id @SQLData) <$> queryAtoms q r -query :: (DB m, ToRow q, FromRow r) => SQLite.Query -> q -> m [r] -query q r = do c <- ask; liftIO $ SQLite.query c q r -execute :: (DB m, ToRow q) => SQLite.Query -> q -> m () -execute q r = do c <- ask; liftIO $ SQLite.execute c q r +debugQuery :: Bool +debugQuery = True + +query :: (DB m, ToRow q, FromRow r, Show q) => SQLite.Query -> q -> m [r] +query q r = do + c <- ask + liftIO . queryTrace "query" q r $ SQLite.query c q r + +queryTrace :: (Applicative m, Show q) => String -> SQLite.Query -> q -> m a -> m a +queryTrace title query input m = + if debugQuery + then (traceM $ title ++ " " ++ show input ++ " -> " ++ show query ) *> m + else m + +execute :: (DB m, ToRow q, Show q) => SQLite.Query -> q -> m () +execute q r = do c <- ask; liftIO . queryTrace "execute" q r $ SQLite.execute c q r execute_ :: DB m => SQLite.Query -> m () -execute_ q = do c <- ask; liftIO $ SQLite.execute_ c q +execute_ q = do c <- ask; liftIO . queryTrace "execute_" q "" $ SQLite.execute_ c q -executeMany :: (DB m, ToRow q) => SQLite.Query -> [q] -> m () -executeMany q r = do c <- ask; liftIO $ SQLite.executeMany c q r +executeMany :: (DB m, ToRow q, Show q) => SQLite.Query -> [q] -> m () +executeMany q r = do c <- ask; liftIO . queryTrace "executeMany" q r $ SQLite.executeMany c q r --- |transaction that blocks +-- | transaction that blocks withImmediateTransaction :: (DB m, MonadUnliftIO m) => m a -> m a withImmediateTransaction action = do c <- ask @@ -482,14 +518,15 @@ withImmediateTransaction action = do headMay :: [a] -> Maybe a headMay [] = Nothing -headMay (a:_) = Just a +headMay (a : _) = Just a -- * orphan instances + deriving via Text instance ToField Base32Hex + deriving via Text instance FromField Base32Hex instance ToField WatchKind where toField = \case WatchKind.RegularWatch -> SQLite.SQLInteger 0 WatchKind.TestWatch -> SQLite.SQLInteger 1 - diff --git a/codebase2/codebase-sqlite/sql/create-index.sql b/codebase2/codebase-sqlite/sql/create-index.sql index 285732429c..921bc50775 100644 --- a/codebase2/codebase-sqlite/sql/create-index.sql +++ b/codebase2/codebase-sqlite/sql/create-index.sql @@ -2,67 +2,67 @@ -- references, because they may be arbitrary types, not just the head -- types that are stored in the codebase. CREATE TABLE find_type_index ( - type_reference_builtin INTEGER NULL REFERENCES text(id), - type_reference_hash_id INTEGER NULL REFERENCES hash(id), + type_reference_builtin INTEGER NULL CONSTRAINT find_type_index_fk1 REFERENCES text(id), + type_reference_hash_id INTEGER NULL CONSTRAINT find_type_index_fk2 REFERENCES hash(id), type_reference_component_index INTEGER NULL, - term_referent_object_id INTEGER NOT NULL REFERENCES hash(id), + term_referent_object_id INTEGER NOT NULL CONSTRAINT find_type_index_fk3 REFERENCES hash(id), term_referent_component_index INTEGER NOT NULL, term_referent_constructor_index INTEGER NULL, - UNIQUE ( - term_referent_derived_object_id, - term_referent_derived_component_index, - term_referent_derived_constructor_index + CONSTRAINT find_type_index_c1 UNIQUE ( + term_referent_object_id, + term_referent_component_index, + term_referent_constructor_index ), - CHECK ( - type_reference_builtin IS NULL = - type_reference_derived_hash_id IS NOT NULL + CONSTRAINT find_type_index_c2 CHECK ( + (type_reference_builtin IS NULL) = + (type_reference_hash_id IS NOT NULL) ), - CHECK ( - type_reference_derived_object_id IS NULL = - type_reference_derived_component_index IS NULL + CONSTRAINT find_type_index_c3 CHECK ( + (type_reference_hash_id IS NULL) = + (type_reference_component_index IS NULL) ) ); CREATE INDEX find_type_index_type ON find_type_index ( type_reference_builtin, - type_reference_derived_hash_id, - type_reference_derived_component_index + type_reference_hash_id, + type_reference_component_index ); CREATE TABLE find_type_mentions_index ( - type_reference_builtin INTEGER NULL REFERENCES text(id), - type_reference_hash_id INTEGER NULL REFERENCES hash(id), + type_reference_builtin INTEGER NULL CONSTRAINT find_type_mentions_index_fk1 REFERENCES text(id), + type_reference_hash_id INTEGER NULL CONSTRAINT find_type_mentions_index_fk2 REFERENCES hash(id), type_reference_component_index INTEGER NULL, - term_referent_object_id INTEGER NOT NULL REFERENCES hash(id), + term_referent_object_id INTEGER NOT NULL CONSTRAINT find_type_mentions_index_fk3 REFERENCES hash(id), term_referent_derived_component_index INTEGER NOT NULL, term_referent_constructor_index INTEGER NULL, - CHECK ( - type_reference_builtin IS NULL = - type_reference_derived_hash_id IS NOT NULL + CONSTRAINT find_type_mentions_index_c1 CHECK ( + (type_reference_builtin IS NULL) = + (type_reference_hash_id IS NOT NULL) ), - CHECK ( - type_reference_derived_hash_id IS NULL = - type_reference_derived_component_index IS NULL + CONSTRAINT find_type_mentions_index_c2 CHECK ( + (type_reference_hash_id IS NULL) = + (type_reference_component_index) IS NULL ) ); CREATE INDEX find_type_mentions_index_type ON find_type_mentions_index ( - type_reference_builtin INTEGER NULL REFERENCES text(id), - type_reference_hash_id INTEGER NULL REFERENCES hash(id), - type_reference_component_index INTEGER NULL + type_reference_builtin, + type_reference_hash_id, + type_reference_component_index ); CREATE TABLE dependents_index ( - dependency_builtin INTEGER NULL REFERENCES text(id), - dependency_object_id INTEGER NULL REFERENCES hash(id), - dependency_component_index INTEGER NULL - dependent_object_id INTEGER NOT NULL REFERENCES hash(id), + dependency_builtin INTEGER NULL CONSTRAINT dependents_index_fk1 REFERENCES text(id), + dependency_object_id INTEGER NULL CONSTRAINT dependents_index_fk2 REFERENCES hash(id), + dependency_component_index INTEGER NULL, + dependent_object_id INTEGER NOT NULL CONSTRAINT dependents_index_fk3 REFERENCES hash(id), dependent_component_index INTEGER NOT NULL, - CHECK ( - type_reference_builtin IS NULL = - type_reference_derived_object_id IS NOT NULL + CONSTRAINT dependents_index_c1 CHECK ( + dependency_builtin IS NULL = + dependency_object_id IS NOT NULL ), - CHECK ( - type_reference_derived_object_id IS NULL = - type_reference_derived_component_index IS NULL + CONSTRAINT dependents_index_c2 CHECK ( + dependency_object_id IS NULL = + dependency_component_index IS NULL ) ); CREATE INDEX dependents_by_dependency ON dependents_index ( @@ -73,4 +73,4 @@ CREATE INDEX dependents_by_dependency ON dependents_index ( CREATE INDEX dependencies_by_dependent ON dependents_index ( dependent_object_id, dependent_component_index -); +) \ No newline at end of file diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index d5096a2a96..f26b82c879 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -18,10 +18,10 @@ CREATE TABLE text ( -- INNER JOIN hash ON hash_id = hash.id -- WHERE base32 LIKE 'a1b2c3%' CREATE TABLE hash_object ( - -- hashes are UNIQUE; many hashes correspond to one object + -- hashes are UNIQUE, many hashes correspond to one object -- (causal nodes are not considered objects atm) - hash_id INTEGER PRIMARY KEY NOT NULL REFERENCES hash(id), - object_id INTEGER NOT NULL REFERENCES object(id), + hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT hash_object_fk1 REFERENCES hash(id), + object_id INTEGER NOT NULL CONSTRAINT hash_object_fk2 REFERENCES object(id), hash_version INTEGER NOT NULL ); CREATE INDEX hash_object_hash_id ON hash_object(hash_id); @@ -40,11 +40,12 @@ INSERT INTO object_type_description (id, description) VALUES ; CREATE TABLE object ( - id INTEGER PRIMARY KEY, - primary_hash_id UNIQUE INTEGER NOT NULL REFERENCES hash(id), - type_id INTEGER NOT NULL REFERENCES object_type_description(id), + id INTEGER PRIMARY KEY NOT NULL, + primary_hash_id INTEGER UNIQUE NOT NULL CONSTRAINT object_fk1 REFERENCES hash(id), + type_id INTEGER NOT NULL CONSTRAINT object_fk2 REFERENCES object_type_description(id), bytes BLOB NOT NULL ); +-- Error: near "INTEGER": syntax error CREATE INDEX object_hash_id ON object(primary_hash_id); CREATE INDEX object_type_id ON object(type_id); @@ -54,9 +55,9 @@ CREATE INDEX object_type_id ON object(type_id); -- see an argument to drop them too and just use NULL, but I thought it better -- to not lose their identities. CREATE TABLE causal ( - self_hash_id INTEGER PRIMARY KEY NOT NULL REFERENCES hash(id), - value_hash_id INTEGER NOT NULL REFERENCES hash(id), - gc_generation INTEGER NOT NULL + self_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), + value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), + gc_generation INTEGER NOT NULL DEFAULT 0 ); CREATE INDEX causal_value_hash_id ON causal(value_hash_id); CREATE INDEX causal_gc_generation ON causal(gc_generation); @@ -70,13 +71,13 @@ CREATE INDEX causal_gc_generation ON causal(gc_generation); CREATE TABLE namespace_root ( -- a dummy pk because -- id INTEGER PRIMARY KEY NOT NULL, - causal_id INTEGER PRIMARY KEY NOT NULL REFERENCES causal(self_hash_id) + causal_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT namespace_root_fk1 REFERENCES causal(self_hash_id) ); CREATE TABLE causal_parent ( id INTEGER PRIMARY KEY NOT NULL, - causal_id INTEGER NOT NULL REFERENCES causal(self_hash_id), - parent_id INTEGER NOT NULL REFERENCES causal(self_hash_id), + causal_id INTEGER NOT NULL CONSTRAINT causal_parent_fk1 REFERENCES causal(self_hash_id), + parent_id INTEGER NOT NULL CONSTRAINT causal_parent_fk2 REFERENCES causal(self_hash_id), UNIQUE(causal_id, parent_id) ); CREATE INDEX causal_parent_causal_id ON causal_parent(causal_id); @@ -84,29 +85,29 @@ CREATE INDEX causal_parent_parent_id ON causal_parent(parent_id); -- associate old (e.g. v1) causal hashes with new causal hashes CREATE TABLE causal_old ( - old_hash_id INTEGER PRIMARY KEY NOT NULL REFERENCES hash(id), - new_hash_id INTEGER NOT NULL REFERENCES hash(id) + old_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_old_fk1 REFERENCES hash(id), + new_hash_id INTEGER NOT NULL CONSTRAINT causal_old_fk2 REFERENCES hash(id) ); CREATE TABLE watch_result ( - hash_id INTEGER NOT NULL REFERENCES object(id), + hash_id INTEGER NOT NULL CONSTRAINT watch_result_fk1 REFERENCES object(id), component_index INTEGER NOT NULL, result BLOB NOT NULL, PRIMARY KEY (hash_id, component_index) ); CREATE TABLE watch ( - hash_id INTEGER NOT NULL REFERENCES object(id), + hash_id INTEGER NOT NULL CONSTRAINT watch_fk1 REFERENCES object(id), component_index INTEGER NOT NULL, - watch_kind_id INTEGER NOT NULL REFERENCES watch_kind_description(id), + watch_kind_id INTEGER NOT NULL CONSTRAINT watch_fk2 REFERENCES watch_kind_description(id), PRIMARY KEY (hash_id, component_index, watch_kind_id) ); CREATE INDEX watch_kind ON watch(watch_kind_id); CREATE TABLE watch_kind_description ( - id PRIMARY KEY INTEGER UNIQUE NOT NULL, + id INTEGER PRIMARY KEY UNIQUE NOT NULL, description TEXT UNIQUE NOT NULL ); INSERT INTO watch_kind_description (id, description) VALUES (0, "Regular"), -- won't be synced - (1, "Test"); -- will be synced + (1, "Test") -- will be synced diff --git a/codebase2/codebase/U/Codebase/HashTags.hs b/codebase2/codebase/U/Codebase/HashTags.hs index 39efdc0651..3299d3af38 100644 --- a/codebase2/codebase/U/Codebase/HashTags.hs +++ b/codebase2/codebase/U/Codebase/HashTags.hs @@ -4,10 +4,13 @@ import U.Util.Hash (Hash) newtype BranchHash = BranchHash { unBranchHash :: Hash } deriving (Eq, Ord, Show) -newtype CausalHash = CausalHash { unCausalHash :: Hash } deriving (Eq, Ord, Show) +newtype CausalHash = CausalHash { unCausalHash :: Hash } deriving (Eq, Ord) newtype EditHash = EditHash { unEditHash :: Hash } deriving (Eq, Ord, Show) newtype PatchHash = PatchHash { unPatchHash :: Hash } deriving (Eq, Ord, Show) newtype DefnHash = DefnHash { unDefnHash :: Hash } deriving (Eq, Ord, Show) + +instance Show CausalHash where + show h = "CausalHash (" ++ show (unCausalHash h) ++ ")" \ No newline at end of file diff --git a/codebase2/util/U/Util/Hash.hs b/codebase2/util/U/Util/Hash.hs index 5e1bcaa9f5..9060cde32e 100644 --- a/codebase2/util/U/Util/Hash.hs +++ b/codebase2/util/U/Util/Hash.hs @@ -22,7 +22,7 @@ import qualified U.Util.Base32Hex as Base32Hex import U.Util.Base32Hex (Base32Hex) -- | Hash which uniquely identifies a Unison type or term -newtype Hash = Hash {toShort :: ShortByteString} deriving (Eq, Ord, Generic, Show) +newtype Hash = Hash {toShort :: ShortByteString} deriving (Eq, Ord, Generic) toBase32Hex :: Hash -> Base32Hex toBase32Hex = Base32Hex.fromByteString . toBytes @@ -57,3 +57,6 @@ instance H.Accumulate Hash where fromBytes :: ByteString -> Hash fromBytes = Hash . B.Short.toShort + +instance Show Hash where + show h = "fromBase32Hex " ++ (show . Base32Hex.toText . toBase32Hex) h \ No newline at end of file diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 6956d2197d..83bc267499 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -1,8 +1,9 @@ {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE QuasiQuotes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE QuasiQuotes #-} +{-# LANGUAGE TypeApplications #-} module Unison.Codebase.SqliteCodebase where @@ -10,9 +11,11 @@ module Unison.Codebase.SqliteCodebase where -- import qualified U.Codebase.Sqlite.Operations' as Ops +import qualified Control.Exception import Control.Monad (filterM, (>=>)) import Control.Monad.Except (ExceptT, runExceptT) import Control.Monad.Extra (ifM, unlessM) +import qualified Control.Monad.Extra as Monad import Control.Monad.Reader (ReaderT (runReaderT)) import Control.Monad.Trans (MonadTrans (lift)) import Control.Monad.Trans.Maybe (MaybeT) @@ -24,12 +27,15 @@ import Data.Map (Map) import qualified Data.Map as Map import Data.Set (Set) import qualified Data.Set as Set +import Data.String (IsString (fromString)) import Data.Text (Text) import qualified Data.Text as Text import qualified Data.Text.IO as TextIO import Data.Word (Word64) import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as Sqlite +import System.Directory (canonicalizePath) +import qualified System.Exit as SysExit import System.FilePath (()) import U.Codebase.HashTags (CausalHash (unCausalHash)) import qualified U.Codebase.Reference as C.Reference @@ -71,15 +77,10 @@ import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF import qualified Unison.Util.Pretty as P import UnliftIO (MonadIO, catchIO) -import UnliftIO.STM -import UnliftIO.Directory (getHomeDirectory) -import System.Directory (canonicalizePath) +import UnliftIO.Directory (createDirectoryIfMissing, getHomeDirectory) import qualified UnliftIO.Environment as SysEnv -import Data.String (IsString(fromString)) -import qualified System.Exit as SysExit -import qualified Control.Monad.Extra as Monad -import qualified Control.Exception - +import UnliftIO.STM +import qualified System.FilePath as FilePath codebasePath :: FilePath codebasePath = ".unison" "v2" "unison.sqlite3" @@ -101,15 +102,19 @@ getNoCodebaseErrorMsg :: IsString s => P.Pretty s -> P.Pretty s -> Maybe FilePat getNoCodebaseErrorMsg executable prettyDir mdir = let secondLine = case mdir of - Just dir -> "Run `" <> executable <> " -codebase " <> fromString dir - <> " init` to create one, then try again!" - Nothing -> "Run `" <> executable <> " init` to create one there," - <> " then try again;" - <> " or `" <> executable <> " -codebase ` to load a codebase from someplace else!" - in - P.lines - [ "No codebase exists in " <> prettyDir <> "." - , secondLine ] + Just dir -> + "Run `" <> executable <> " -codebase " <> fromString dir + <> " init` to create one, then try again!" + Nothing -> + "Run `" <> executable <> " init` to create one there," + <> " then try again;" + <> " or `" + <> executable + <> " -codebase ` to load a codebase from someplace else!" + in P.lines + [ "No codebase exists in " <> prettyDir <> ".", + secondLine + ] initCodebaseAndExit :: Maybe FilePath -> IO () initCodebaseAndExit mdir = do @@ -121,6 +126,7 @@ initCodebaseAndExit mdir = do -- initializes a new codebase here (i.e. `ucm -codebase dir init`) initCodebase :: FilePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) initCodebase path = do + traceM $ "initCodebase " ++ path prettyDir <- P.string <$> canonicalizePath path Monad.whenM (codebaseExists path) do @@ -133,14 +139,17 @@ initCodebase path = do . P.wrap $ "Initializing a new codebase in: " <> prettyDir - + -- run sql create scripts - Control.Exception.bracket + createDirectoryIfMissing True (path FilePath.takeDirectory codebasePath) + Control.Exception.bracket (unsafeGetConnection path) - Sqlite.close + Sqlite.close (runReaderT Q.createSchema) - Right (closeCodebase, theCodebase) <- sqliteCodebase path + (closeCodebase, theCodebase) <- sqliteCodebase path >>= \case + Right x -> pure x + Left x -> error $ show x ++ " :) " Codebase1.initializeCodebase theCodebase pure (closeCodebase, theCodebase) @@ -149,9 +158,13 @@ getCodebaseDir = maybe getHomeDirectory pure -- checks if a db exists at `path` with the minimum schema codebaseExists :: CodebasePath -> IO Bool -codebaseExists root = sqliteCodebase root >>= \case - Left _ -> pure False - Right (close, _codebase) -> close >> pure True +codebaseExists root = do + traceM $ "codebaseExists " ++ root + Control.Exception.catch @Sqlite.SQLError + (sqliteCodebase root >>= \case + Left _ -> pure False + Right (close, _codebase) -> close >> pure True) + (const $ pure False) -- and <$> traverse doesDirectoryExist (minimalCodebaseStructure root) @@ -180,12 +193,14 @@ type DeclBufferEntry = BufferEntry (Decl Symbol Ann) unsafeGetConnection :: CodebasePath -> IO Sqlite.Connection unsafeGetConnection root = do + traceM $ "unsafeGetconnection " ++ root ++ " -> " ++ (root codebasePath) conn <- Sqlite.open $ root codebasePath runReaderT Q.setFlags conn pure conn sqliteCodebase :: CodebasePath -> IO (Either [(Q.SchemaType, Q.SchemaName)] (IO (), Codebase1.Codebase IO Symbol Ann)) sqliteCodebase root = do + traceM $ "sqliteCodebase " ++ root conn <- unsafeGetConnection root runReaderT Q.checkForMissingSchema conn >>= \case [] -> do @@ -208,7 +223,7 @@ sqliteCodebase root = do "I don't know about the builtin type ##" ++ show t ++ ", but I've been asked for it's ConstructorType." - in pure . fromMaybe err $ + in pure . fromMaybe err $ Map.lookup (Reference.Builtin t) Builtins.builtinConstructorType C.Reference.ReferenceDerived i -> getDeclTypeById i @@ -432,7 +447,7 @@ sqliteCodebase root = do let t = Reflog.toText $ Reflog.Entry (Branch.headHash old) (Branch.headHash new) reason - in TextIO.appendFile (reflogPath root) (t <> "\n") + in TextIO.appendFile (reflogPath root) (t <> "\n") reflogPath :: CodebasePath -> FilePath reflogPath root = root "reflog" From bc8f668f7d012e3e8ba1311eea1a236989edce7e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 25 Jan 2021 15:05:39 -0500 Subject: [PATCH 079/225] rootBranchUpdates is meant to block --- parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 83bc267499..381dcf270f 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -81,6 +81,7 @@ import UnliftIO.Directory (createDirectoryIfMissing, getHomeDirectory) import qualified UnliftIO.Environment as SysEnv import UnliftIO.STM import qualified System.FilePath as FilePath +import qualified Control.Concurrent codebasePath :: FilePath codebasePath = ".unison" "v2" "unison.sqlite3" @@ -382,7 +383,12 @@ sqliteCodebase root = do $ Branch.transform (lift . lift) branch1 rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) - rootBranchUpdates = pure (pure (), pure mempty) + rootBranchUpdates = pure (cleanup, newRootsDiscovered) + where + newRootsDiscovered = do + Control.Concurrent.threadDelay maxBound -- hold off on returning + pure mempty -- returning nothing + cleanup = pure () -- if this blows up on cromulent hashes, then switch from `hashToHashId` -- to one that returns Maybe. From d640457ced5f536ca5a3015328b3b058ed1122b9 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 25 Jan 2021 16:15:57 -0500 Subject: [PATCH 080/225] Show instances for HashTags, LocalIds --- .../U/Codebase/Sqlite/LocalIds.hs | 12 +++++----- codebase2/codebase/U/Codebase/HashTags.hs | 22 ++++++++++++++----- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs index 668891dbe6..915eb20b60 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs @@ -21,20 +21,20 @@ type LocalIds = LocalIds' TextId ObjectId type WatchLocalIds = LocalIds' TextId HashId -- | represents an index into a textLookup -newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalTextId = LocalTextId Word64 deriving (Eq, Ord, Show, Num, Real, Enum, Integral, Bits) via Word64 -- | represents an index into a defnLookup -newtype LocalDefnId = LocalDefnId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalDefnId = LocalDefnId Word64 deriving (Eq, Ord, Show, Num, Real, Enum, Integral, Bits) via Word64 -- | a local index to a hash, used when the corresponding object is allowed to be absent -newtype LocalHashId = LocalHashId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalHashId = LocalHashId Word64 deriving (Eq, Ord, Show, Num, Real, Enum, Integral, Bits) via Word64 -newtype LocalPatchObjectId = LocalPatchObjectId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalPatchObjectId = LocalPatchObjectId Word64 deriving (Eq, Ord, Show, Num, Real, Enum, Integral, Bits) via Word64 -newtype LocalBranchChildId = LocalBranchChildId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalBranchChildId = LocalBranchChildId Word64 deriving (Eq, Ord, Show, Num, Real, Enum, Integral, Bits) via Word64 -- | causal hashes are treated differently from HashIds, which don't have dependencies -newtype LocalCausalHashId = LocalCausalHashId Word64 deriving (Eq, Ord, Num, Real, Enum, Integral, Bits) via Word64 +newtype LocalCausalHashId = LocalCausalHashId Word64 deriving (Eq, Ord, Show, Num, Real, Enum, Integral, Bits) via Word64 instance Bitraversable LocalIds' where bitraverse f g (LocalIds t d) = LocalIds <$> traverse f t <*> traverse g d diff --git a/codebase2/codebase/U/Codebase/HashTags.hs b/codebase2/codebase/U/Codebase/HashTags.hs index 3299d3af38..98691b61d2 100644 --- a/codebase2/codebase/U/Codebase/HashTags.hs +++ b/codebase2/codebase/U/Codebase/HashTags.hs @@ -2,15 +2,27 @@ module U.Codebase.HashTags where import U.Util.Hash (Hash) -newtype BranchHash = BranchHash { unBranchHash :: Hash } deriving (Eq, Ord, Show) +newtype BranchHash = BranchHash { unBranchHash :: Hash } deriving (Eq, Ord) newtype CausalHash = CausalHash { unCausalHash :: Hash } deriving (Eq, Ord) -newtype EditHash = EditHash { unEditHash :: Hash } deriving (Eq, Ord, Show) +newtype EditHash = EditHash { unEditHash :: Hash } deriving (Eq, Ord) -newtype PatchHash = PatchHash { unPatchHash :: Hash } deriving (Eq, Ord, Show) +newtype PatchHash = PatchHash { unPatchHash :: Hash } deriving (Eq, Ord) -newtype DefnHash = DefnHash { unDefnHash :: Hash } deriving (Eq, Ord, Show) +newtype DefnHash = DefnHash { unDefnHash :: Hash } deriving (Eq, Ord) + +instance Show BranchHash where + show h = "BranchHash (" ++ show (unBranchHash h) ++ ")" instance Show CausalHash where - show h = "CausalHash (" ++ show (unCausalHash h) ++ ")" \ No newline at end of file + show h = "CausalHash (" ++ show (unCausalHash h) ++ ")" + +instance Show PatchHash where + show h = "PatchHash (" ++ show (unPatchHash h) ++ ")" + +instance Show EditHash where + show h = "EditHash (" ++ show (unEditHash h) ++ ")" + +instance Show DefnHash where + show h = "DefnHash (" ++ show (unDefnHash h) ++ ")" From e87ce9865ef8c2198a36bf83ce1539a80e8b109c Mon Sep 17 00:00:00 2001 From: Dan Doel Date: Mon, 25 Jan 2021 17:49:42 -0500 Subject: [PATCH 081/225] Fix Bitraversable instances in U.Codebase.Referent - They were being derived via DeriveAnyClass, but base currently contains an erroneous default definition of `bitraverse` which just loops. Implement them manually instead. --- codebase2/codebase/U/Codebase/Referent.hs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/codebase2/codebase/U/Codebase/Referent.hs b/codebase2/codebase/U/Codebase/Referent.hs index 10d5fe4f19..b79072d76b 100644 --- a/codebase2/codebase/U/Codebase/Referent.hs +++ b/codebase2/codebase/U/Codebase/Referent.hs @@ -1,6 +1,4 @@ {-# LANGUAGE LambdaCase #-} -{-# LANGUAGE DeriveAnyClass #-} -{-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeSynonymInstances #-} @@ -23,13 +21,13 @@ type ReferentH = Referent' (Reference' Text (Maybe Hash)) (Reference' Text Hash) data Referent' rTm rTp = Ref rTm | Con rTp ConstructorId - deriving (Eq, Ord, Show, Bitraversable) + deriving (Eq, Ord, Show) type Id = Id' Hash Hash data Id' hTm hTp = RefId (Reference.Id' hTm) | ConId (Reference.Id' hTp) ConstructorId - deriving (Eq, Ord, Show, Bitraversable) + deriving (Eq, Ord, Show) instance (Hashable rTm, Hashable rTp) => Hashable (Referent' rTm rTp) where tokens (Ref r) = Hashable.Tag 0 : Hashable.tokens r @@ -45,6 +43,11 @@ instance Bifoldable Referent' where Ref r -> f r Con r _ -> g r +instance Bitraversable Referent' where + bitraverse f g = \case + Ref r -> Ref <$> f r + Con r c -> flip Con c <$> g r + instance Bifunctor Id' where bimap f g = \case RefId r -> RefId (fmap f r) @@ -54,3 +57,8 @@ instance Bifoldable Id' where bifoldMap f g = \case RefId r -> foldMap f r ConId r _ -> foldMap g r + +instance Bitraversable Id' where + bitraverse f g = \case + RefId r -> RefId <$> traverse f r + ConId r c -> flip ConId c <$> traverse g r From e0087f6d08808c2551618996d0d6ac6d2a22b500 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 26 Jan 2021 17:08:06 -0500 Subject: [PATCH 082/225] fix set namespace_root, and add a bunch of debug logging --- .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 20 +++++-- .../U/Codebase/Sqlite/Decl/Format.hs | 2 + .../U/Codebase/Sqlite/LocalIds.hs | 2 +- .../U/Codebase/Sqlite/Operations.hs | 21 +++++--- .../U/Codebase/Sqlite/Queries.hs | 52 ++++++++++++++----- .../U/Codebase/Sqlite/Serialization.hs | 7 ++- .../U/Codebase/Sqlite/Term/Format.hs | 1 + codebase2/codebase/U/Codebase/Decl.hs | 2 +- codebase2/codebase/U/Codebase/Term.hs | 6 +-- codebase2/core/U/Core/ABT.hs | 5 +- .../U/Util/Serialization.hs | 2 + parser-typechecker/src/Unison/Codebase.hs | 3 +- .../src/Unison/Codebase/SqliteCodebase.hs | 3 +- 13 files changed, 93 insertions(+), 33 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index 8519d72352..c189a9c19c 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -22,16 +22,16 @@ newtype TextId = TextId Word64 deriving (Eq, Ord, Show) newtype HashId = HashId Word64 deriving (Eq, Ord, Show) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 -newtype PatchObjectId = PatchObjectId { unPatchObjectId :: ObjectId } deriving (Eq, Ord, Show) +newtype PatchObjectId = PatchObjectId { unPatchObjectId :: ObjectId } deriving (Eq, Ord) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via ObjectId -newtype BranchObjectId = BranchObjectId { unBranchObjectId :: ObjectId } deriving (Eq, Ord, Show) +newtype BranchObjectId = BranchObjectId { unBranchObjectId :: ObjectId } deriving (Eq, Ord) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via ObjectId -newtype BranchHashId = BranchHashId { unBranchHashId :: HashId } deriving (Eq, Ord, Show) +newtype BranchHashId = BranchHashId { unBranchHashId :: HashId } deriving (Eq, Ord) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via HashId -newtype CausalHashId = CausalHashId { unCausalHashId :: HashId } deriving (Eq, Ord, Show) +newtype CausalHashId = CausalHashId { unCausalHashId :: HashId } deriving (Eq, Ord) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via HashId newtype CausalOldHashId = CausalOldHashId HashId deriving Show deriving (Hashable, FromField, ToField) via HashId @@ -39,3 +39,15 @@ newtype CausalOldHashId = CausalOldHashId HashId deriving Show deriving (Hashabl newtype TypeId = TypeId ObjectId deriving Show deriving (FromField, ToField) via ObjectId newtype TermId = TermCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId newtype DeclId = DeclCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId + +instance Show PatchObjectId where + show h = "PatchObjectId (" ++ show (unPatchObjectId h) ++ ")" + +instance Show BranchObjectId where + show h = "BranchObjectId (" ++ show (unBranchObjectId h) ++ ")" + +instance Show BranchHashId where + show h = "BranchHashId (" ++ show (unBranchHashId h) ++ ")" + +instance Show CausalHashId where + show h = "CausalHashId (" ++ show (unCausalHashId h) ++ ")" diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs index 61f284bbcb..633f784b74 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Decl/Format.hs @@ -12,11 +12,13 @@ import qualified U.Core.ABT as ABT -- | Add new formats here data DeclFormat = Decl LocallyIndexedComponent + deriving Show -- | V1: Decls included `Hash`es inline -- V2: Instead of `Hash`, we use a smaller index. data LocallyIndexedComponent = LocallyIndexedComponent (Vector (LocalIds, Decl Symbol)) + deriving Show type Decl v = DeclR TypeRef v diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs index 915eb20b60..d7e7bf85ea 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/LocalIds.hs @@ -14,7 +14,7 @@ import U.Codebase.Sqlite.DbId data LocalIds' t h = LocalIds { textLookup :: Vector t, defnLookup :: Vector h - } + } deriving Show type LocalIds = LocalIds' TextId ObjectId diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 6dac562858..0bba142ae1 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -386,6 +386,7 @@ decodeDeclElement i = getFromBytesOr (ErrDeclElement i) (S.lookupDeclElement i) -- * legacy conversion helpers getCycleLen :: EDB m => H.Hash -> m Word64 +getCycleLen id | trace ("getCycleLen " ++ show id) False = undefined getCycleLen h = do runMaybeT (primaryHashToExistingObjectId h) >>= maybe (throwError $ LegacyUnknownCycleLen h) pure @@ -400,6 +401,7 @@ getDeclTypeByReference r@(C.Reference.Id h pos) = >>= pure . C.Decl.declType componentByObjectId :: EDB m => Db.ObjectId -> m [S.Reference.Id] +componentByObjectId id | trace ("componentByObjectId " ++ show id) False = undefined componentByObjectId id = do len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly pure [C.Reference.Id id i | i <- [0 .. len - 1]] @@ -533,6 +535,7 @@ loadTermWithTypeByReference (C.Reference.Id h i) = >>= uncurry3 s2cTermWithType loadTermByReference :: EDB m => C.Reference.Id -> m (C.Term Symbol) +loadTermByReference id | trace ("loadTermByReference " ++ show id) False = undefined loadTermByReference (C.Reference.Id h i) = primaryHashToExistingObjectId h >>= liftQ . Q.loadObjectById @@ -541,6 +544,7 @@ loadTermByReference (C.Reference.Id h i) = >>= uncurry s2cTerm loadTypeOfTermByTermReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) +loadTypeOfTermByTermReference id | trace ("loadTypeOfTermByTermReference " ++ show id) False = undefined loadTypeOfTermByTermReference (C.Reference.Id h i) = primaryHashToExistingObjectId h >>= liftQ . Q.loadObjectById @@ -706,6 +710,7 @@ c2sDecl saveText saveDefn (C.Decl.DataDeclaration dt m b cts) = do pure (ids, decl) loadDeclByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Decl Symbol) +loadDeclByReference id | trace ("loadDeclByReference " ++ show id) False = undefined loadDeclByReference (C.Reference.Id h i) = do -- retrieve the blob (localIds, C.Decl.DataDeclaration dt m b ct) <- @@ -813,8 +818,8 @@ saveRootBranch (C.Causal hc he parents me) = do _grandParents -> pure parentChId parentCausalHashIds -> pure parentCausalHashIds - liftQ (Q.loadBranchObjectIdByCausalHashId chId) >>= \case - Just boId -> pure (boId, chId) + boId <- liftQ (Q.loadBranchObjectIdByCausalHashId chId) >>= \case + Just boId -> pure boId Nothing -> do bhId <- liftQ (Q.saveBranchHash he) (li, lBranch) <- c2lBranch =<< me @@ -822,7 +827,11 @@ saveRootBranch (C.Causal hc he parents me) = do liftQ (Q.saveCausal chId bhId) -- save the link between child and parents liftQ (Q.saveCausalParents chId parentCausalHashIds) - pure (boId, chId) + pure boId + + Q.setNamespaceRoot chId + pure (boId, chId) + where c2lBranch :: EDB m => C.Branch.Branch m -> m (BranchLocalIds, S.Branch.Full.LocalBranch) c2lBranch (C.Branch.Branch terms types patches children) = @@ -954,6 +963,7 @@ loadBranchByObjectId id = do S.BranchFormat.Diff r li d -> doDiff r [l2sDiff li d] where deserializeBranchObject :: EDB m => Db.BranchObjectId -> m S.BranchFormat + deserializeBranchObject id | trace ("deserializeBranchObject " ++ show id) False = undefined deserializeBranchObject id = (liftQ . Q.loadObjectById) (Db.unBranchObjectId id) >>= getFromBytesOr (ErrBranch id) S.getBranchFormat @@ -1136,6 +1146,7 @@ s2cPatch (S.Patch termEdits typeEdits) = <*> Map.bitraverse h2cReference (Set.traverse s2cTypeEdit) typeEdits deserializePatchObject :: EDB m => Db.PatchObjectId -> m S.PatchFormat +deserializePatchObject id | trace ("deserializePatchObject " ++ show id) False = undefined deserializePatchObject id = (liftQ . Q.loadObjectById) (Db.unPatchObjectId id) >>= getFromBytesOr (ErrPatch id) S.getPatchFormat @@ -1211,15 +1222,13 @@ declReferentsByPrefix b32prefix pos cid = do cids = [cid | cid <- [0 :: ConstructorId .. ctorCount - 1], test cid] pure (h, pos, len, dt, cids) getDeclCtorCount :: EDB m => S.Reference.Id -> m (C.Decl.DeclType, Word64, ConstructorId) + getDeclCtorCount id | trace ("getDeclCtorCount " ++ show id) False = undefined getDeclCtorCount (C.Reference.Id r i) = do bs <- liftQ (Q.loadObjectById r) len <- decodeComponentLengthOnly bs (_localIds, decl) <- decodeDeclElement i bs pure (C.Decl.declType decl, len, fromIntegral $ length (C.Decl.constructorTypes decl)) --- (localIds, C.Decl.DataDeclaration dt m b ct) <- --- primaryHashToExistingObjectId h >>= liftQ . Q.loadObjectById >>= decodeDeclElement i - branchHashesByPrefix :: EDB m => ShortBranchHash -> m (Set BranchHash) branchHashesByPrefix (ShortBranchHash b32prefix) = do hashIds <- Q.namespaceHashIdByBase32Prefix b32prefix diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 5908dbcefa..d14ca042aa 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -9,6 +9,7 @@ {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TemplateHaskell #-} {-# LANGUAGE TupleSections #-} +{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE ViewPatterns #-} {-# OPTIONS_GHC -Wno-orphans #-} @@ -32,7 +33,7 @@ import Database.SQLite.Simple (Connection, FromRow, Only (..), SQLData, ToRow (. import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple.FromField (FromField) import Database.SQLite.Simple.ToField (ToField (..)) -import Debug.Trace (traceM) +import Debug.Trace (trace, traceM) import U.Codebase.HashTags (BranchHash, CausalHash, unBranchHash, unCausalHash) import U.Codebase.Reference (Reference') import qualified U.Codebase.Referent as C.Referent @@ -194,7 +195,11 @@ saveObject h t blob = |] loadObjectById :: EDB m => ObjectId -> m ByteString -loadObjectById oId = queryAtom sql (Only oId) >>= orError (UnknownObjectId oId) +loadObjectById id | trace ("loadObjectById " ++ show id) False = undefined +loadObjectById oId = do + result <- queryAtom sql (Only oId) >>= orError (UnknownObjectId oId) + traceM $ "loadObjectById " ++ show oId ++ " = " ++ show result + pure result where sql = [here| SELECT bytes FROM object WHERE id = ? |] @@ -318,10 +323,14 @@ loadNamespaceRoot = queryAtoms sql () >>= \case ids -> throwError (MultipleNamespaceRoots ids) where sql = "SELECT causal_id FROM namespace_root" -setNamespaceRoot :: DB m => CausalHashId -> m () -setNamespaceRoot id = execute sql (Only id) where sql = [here| - INSERT OR REPLACE INTO namespace_root VALUES (?) -|] +setNamespaceRoot :: forall m. DB m => CausalHashId -> m () +setNamespaceRoot id = + query_ @m @(Only CausalHashId) "SELECT * FROM namespace_root" >>= \case + [] -> execute insert (Only id) + _ -> execute update (Only id) + where + insert = "INSERT INTO namespace_root VALUES (?)" + update = "UPDATE namespace_root SET causal_id = ?" saveWatch :: DB m => WatchKind -> Reference.IdH -> ByteString -> m () saveWatch k r blob = execute sql (r :. Only blob) >> execute sql2 (r :. Only k) @@ -472,13 +481,13 @@ namespaceHashIdByBase32Prefix prefix = queryAtoms sql (Only $ prefix <> "%") whe -- * helper functions -queryAtoms :: (DB f, ToRow q, FromField b, Show q) => SQLite.Query -> q -> f [b] +queryAtoms :: (DB f, ToRow q, FromField b, Show q, Show b) => SQLite.Query -> q -> f [b] queryAtoms q r = map fromOnly <$> query q r -queryMaybe :: (DB f, ToRow q, FromRow b, Show q) => SQLite.Query -> q -> f (Maybe b) +queryMaybe :: (DB f, ToRow q, FromRow b, Show q, Show b) => SQLite.Query -> q -> f (Maybe b) queryMaybe q r = headMay <$> query q r -queryAtom :: (DB f, ToRow q, FromField b, Show q) => SQLite.Query -> q -> f (Maybe b) +queryAtom :: (DB f, ToRow q, FromField b, Show q, Show b) => SQLite.Query -> q -> f (Maybe b) queryAtom q r = fmap fromOnly <$> queryMaybe q r queryOne :: Functor f => f (Maybe b) -> f b @@ -490,16 +499,31 @@ queryExists q r = not . null . map (id @SQLData) <$> queryAtoms q r debugQuery :: Bool debugQuery = True -query :: (DB m, ToRow q, FromRow r, Show q) => SQLite.Query -> q -> m [r] +query :: (DB m, ToRow q, FromRow r, Show q, Show r) => SQLite.Query -> q -> m [r] query q r = do c <- ask liftIO . queryTrace "query" q r $ SQLite.query c q r -queryTrace :: (Applicative m, Show q) => String -> SQLite.Query -> q -> m a -> m a +query_ :: (DB m, FromRow r, Show r) => SQLite.Query -> m [r] +query_ q = do + c <- ask + liftIO . queryTrace_ "query" q $ SQLite.query_ c q + +queryTrace :: (Monad m, Show q, Show a) => String -> SQLite.Query -> q -> m a -> m a queryTrace title query input m = - if debugQuery - then (traceM $ title ++ " " ++ show input ++ " -> " ++ show query ) *> m - else m + if debugQuery then do + a <- m + traceM $ title ++ " " ++ show query ++ "\n input: " ++ show input ++ "\n output: " ++ show a + pure a + else m + +queryTrace_ :: (Monad m, Show a) => String -> SQLite.Query -> m a -> m a +queryTrace_ title query m = + if debugQuery then do + a <- m + traceM $ title ++ " " ++ show query ++ "\n output: " ++ show a + pure a + else m execute :: (DB m, ToRow q, Show q) => SQLite.Query -> q -> m () execute q r = do c <- ask; liftIO . queryTrace "execute" q r $ SQLite.execute c q r diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 5f27925ada..a78c377c73 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -49,6 +49,7 @@ import qualified U.Core.ABT as ABT import qualified U.Util.Monoid as Monoid import U.Util.Serialization import Prelude hiding (getChar, putChar) +import Debug.Trace (trace) putABT :: (MonadPut m, Foldable f, Functor f, Ord v) => @@ -160,6 +161,7 @@ putTermComponent :: MonadPut m => TermFormat.LocallyIndexedComponent -> m () +putTermComponent t | trace ("putTermComponent " ++ show t) False = undefined putTermComponent (TermFormat.LocallyIndexedComponent v) = putFramedArray ( \(localIds, term, typ) -> @@ -168,7 +170,8 @@ putTermComponent (TermFormat.LocallyIndexedComponent v) = v putTerm :: MonadPut m => TermFormat.Term -> m () -putTerm = putABT putSymbol putUnit putF +putTerm _t | trace "putTerm" False = undefined +putTerm t = putABT putSymbol putUnit putF t where putF :: MonadPut m => (a -> m ()) -> TermFormat.F a -> m () putF putChild = \case @@ -377,6 +380,8 @@ putDeclFormat = \case where -- These use a framed array for randomer access putDeclComponent :: MonadPut m => DeclFormat.LocallyIndexedComponent -> m () + putDeclComponent t | trace ("putDeclComponent " ++ show t) False = undefined + putDeclComponent (DeclFormat.LocallyIndexedComponent v) = putFramedArray (putPair putLocalIds putDeclElement) v where diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs index 02d7e97059..b8ec18a40e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs @@ -22,6 +22,7 @@ type TypeLink = TypeRef type LocallyIndexedComponent = LocallyIndexedComponent' TextId ObjectId newtype LocallyIndexedComponent' t d = LocallyIndexedComponent (Vector (LocalIds' t d, Term, Type)) + deriving Show type F = Term.F' LocalTextId TermRef TypeRef TermLink TypeLink Symbol diff --git a/codebase2/codebase/U/Codebase/Decl.hs b/codebase2/codebase/U/Codebase/Decl.hs index 6f0df95cb1..b45c35e0b3 100644 --- a/codebase2/codebase/U/Codebase/Decl.hs +++ b/codebase2/codebase/U/Codebase/Decl.hs @@ -32,7 +32,7 @@ data DeclR r v = DataDeclaration bound :: [v], constructorTypes :: [TypeR r v] } - + deriving Show -- instance Hashable ConstructorType where -- tokens b = [Tag . fromIntegral $ fromEnum b] diff --git a/codebase2/codebase/U/Codebase/Term.hs b/codebase2/codebase/U/Codebase/Term.hs index b086f6dd70..9b6239f2c2 100644 --- a/codebase2/codebase/U/Codebase/Term.hs +++ b/codebase2/codebase/U/Codebase/Term.hs @@ -91,10 +91,10 @@ data F' text termRef typeRef termLink typeLink vt a Match a [MatchCase text typeRef a] | TermLink termLink | TypeLink typeLink - deriving (Foldable, Functor, Traversable) + deriving (Foldable, Functor, Traversable, Show) data MatchCase t r a = MatchCase (Pattern t r) (Maybe a) a - deriving (Foldable, Functor, Generic, Generic1, Traversable) + deriving (Foldable, Functor, Generic, Generic1, Traversable, Show) data Pattern t r = PUnbound @@ -111,7 +111,7 @@ data Pattern t r | PEffectBind !r !Int [Pattern t r] (Pattern t r) | PSequenceLiteral [Pattern t r] | PSequenceOp (Pattern t r) !SeqOp (Pattern t r) - deriving (Generic, Functor, Foldable, Traversable) + deriving (Generic, Functor, Foldable, Traversable, Show) data SeqOp = PCons diff --git a/codebase2/core/U/Core/ABT.hs b/codebase2/core/U/Core/ABT.hs index 07cafb74d9..ec6810f41a 100644 --- a/codebase2/core/U/Core/ABT.hs +++ b/codebase2/core/U/Core/ABT.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE LambdaCase #-} -- Based on: http://semantic-domain.blogspot.com/2015/03/abstract-binding-trees.html @@ -8,7 +9,7 @@ {-# LANGUAGE Rank2Types #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE ScopedTypeVariables #-} - +{-# LANGUAGE QuantifiedConstraints #-} module U.Core.ABT where @@ -38,6 +39,8 @@ data ABT f v r data Term f v a = Term { freeVars :: Set v, annotation :: a, out :: ABT f v (Term f v a) } deriving (Functor, Foldable, Traversable) +deriving instance (forall q. Show q => Show (f q), Show v, Show a) => Show (Term f v a) + amap :: (Functor f, Foldable f) => (a -> a') -> Term f v a -> Term f v a' amap f (Term fv a out) = Term fv (f a) $ case out of Var v -> Var v diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index d264cc7305..596788c69a 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -38,6 +38,7 @@ import System.FilePath (takeDirectory) import UnliftIO (MonadIO, liftIO) import UnliftIO.Directory (createDirectoryIfMissing, doesFileExist) import Prelude hiding (readFile, writeFile) +import Debug.Trace (traceM) -- import qualified U.Util.Monoid as Monoid @@ -186,6 +187,7 @@ putFramed put a = do -- 2. Put the length `len` -- 3. Put `a` let bs = putBytes put a + traceM $ "putFramed " ++ (show $ BS.length bs) ++ " bytes: " ++ show bs putVarInt (BS.length bs) putByteString bs diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs index b815b8f9a7..8e5c4dfb52 100644 --- a/parser-typechecker/src/Unison/Codebase.hs +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -246,7 +246,8 @@ makeSelfContained' code uf = do getTypeOfTerm :: (Applicative m, Var v, BuiltinAnnotation a) => Codebase m v a -> Reference -> m (Maybe (Type v a)) -getTypeOfTerm c = \case +getTypeOfTerm _c r | trace ("Codebase.getTypeOfTerm " ++ show r) False = undefined +getTypeOfTerm c r = case r of Reference.DerivedId h -> getTypeOfTermImpl c h r@Reference.Builtin{} -> pure $ fmap (const builtinAnnotation) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 381dcf270f..c85fb707f2 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -61,7 +61,7 @@ import Unison.DataDeclaration (Decl) import qualified Unison.DataDeclaration as Decl import Unison.Hash (Hash) import Unison.Parser (Ann) -import Unison.Prelude (MaybeT (runMaybeT), fromMaybe, traceM) +import Unison.Prelude (MaybeT (runMaybeT), fromMaybe, trace, traceM) import qualified Unison.PrettyTerminal as PT import Unison.Reference (Reference) import qualified Unison.Reference as Reference @@ -232,6 +232,7 @@ sqliteCodebase root = do getDeclTypeById = fmap Cv.decltype2to1 . Ops.getDeclTypeByReference getTypeOfTermImpl :: Reference.Id -> IO (Maybe (Type Symbol Ann)) + getTypeOfTermImpl id | trace ("getTypeOfTermImpl " ++ show id) False = undefined getTypeOfTermImpl (Reference.Id (Cv.hash1to2 -> h2) i _n) = runDB' conn do type2 <- Ops.loadTypeOfTermByTermReference (C.Reference.Id h2 i) From 40571dcea86bf12ace85f5b46166c676cb3ce303 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 27 Jan 2021 16:03:55 -0500 Subject: [PATCH 083/225] MaybeT and lookupDeclElement and SQL fixes --- .../U/Codebase/Sqlite/Branch/Diff.hs | 7 ++--- .../U/Codebase/Sqlite/Branch/Format.hs | 2 ++ .../U/Codebase/Sqlite/Branch/Full.hs | 2 ++ .../U/Codebase/Sqlite/Operations.hs | 26 +++++++++++++------ .../U/Codebase/Sqlite/Queries.hs | 13 ++++++---- .../U/Codebase/Sqlite/Serialization.hs | 6 +++-- 6 files changed, 38 insertions(+), 18 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs index c7b88b61b9..09dc620016 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Diff.hs @@ -21,9 +21,9 @@ import qualified Data.Set as Set type LocalDiff = Diff' LocalTextId LocalDefnId LocalPatchObjectId LocalBranchChildId type Diff = Diff' TextId ObjectId PatchObjectId (BranchObjectId, CausalHashId) -data DefinitionOp' r = RemoveDef | AddDefWithMetadata (Set r) | AlterDefMetadata (AddRemove r) -data PatchOp' p = PatchRemove | PatchAddReplace p deriving Functor -data ChildOp' c = ChildRemove | ChildAddReplace c deriving Functor +data DefinitionOp' r = RemoveDef | AddDefWithMetadata (Set r) | AlterDefMetadata (AddRemove r) deriving Show +data PatchOp' p = PatchRemove | PatchAddReplace p deriving (Functor, Show) +data ChildOp' c = ChildRemove | ChildAddReplace c deriving (Functor, Show) type AddRemove a = Map a Bool type LocalDefinitionOp = DefinitionOp' (Metadata LocalTextId LocalDefnId) @@ -47,6 +47,7 @@ data Diff' t h p c = Diff patches :: Map t (PatchOp' p), children :: Map t (ChildOp' c) } + deriving Show type Metadata t h = Reference' t h diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs index 2e23fd409e..7b71a1e433 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Format.hs @@ -9,6 +9,7 @@ import U.Codebase.Sqlite.DbId (CausalHashId, BranchObjectId, ObjectId, PatchObje data BranchFormat = Full BranchLocalIds LocalBranch | Diff BranchObjectId BranchLocalIds LocalDiff + deriving Show data BranchLocalIds = LocalIds { branchTextLookup :: Vector TextId, @@ -16,3 +17,4 @@ data BranchLocalIds = LocalIds branchPatchLookup :: Vector PatchObjectId, branchChildLookup :: Vector (BranchObjectId, CausalHashId) } + deriving Show diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs index b1f0e04dd7..1cbe01e6f0 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/Full.hs @@ -25,12 +25,14 @@ data Branch' t h p c = Branch patches :: Map t p, children :: Map t c } + deriving Show type LocalMetadataSet = MetadataSetFormat' LocalTextId LocalDefnId type DbMetadataSet = MetadataSetFormat' TextId ObjectId data MetadataSetFormat' t h = Inline (Set (Reference' t h)) + deriving Show quadmap :: forall t h p c t' h' p' c'. (Ord t', Ord h') => (t -> t') -> (h -> h') -> (p -> p') -> (c -> c') -> Branch' t h p c -> Branch' t' h' p' c' quadmap ft fh fp fc (Branch terms types patches children) = diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 0bba142ae1..7ddce31e12 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -14,7 +14,7 @@ module U.Codebase.Sqlite.Operations where import Control.Lens (Lens') import qualified Control.Lens as Lens -import Control.Monad (join, (<=<)) +import Control.Monad (MonadPlus(mzero), join, (<=<)) import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) import Control.Monad.State (MonadState, StateT, evalStateT) import Control.Monad.Trans (MonadTrans (lift)) @@ -202,7 +202,7 @@ objectExistsForHash :: DB m => H.Hash -> m Bool objectExistsForHash h = isJust <$> runMaybeT do id <- MaybeT . Q.loadHashId . H.toBase32Hex $ h - Q.maybeObjectIdForAnyHashId id + MaybeT $ Q.maybeObjectIdForAnyHashId id loadHashByObjectId :: EDB m => Db.ObjectId -> m H.Hash loadHashByObjectId = fmap H.fromBase32Hex . liftQ . Q.loadPrimaryHashByObjectId @@ -534,11 +534,12 @@ loadTermWithTypeByReference (C.Reference.Id h i) = >>= decodeTermElementWithType i >>= uncurry3 s2cTermWithType -loadTermByReference :: EDB m => C.Reference.Id -> m (C.Term Symbol) +loadTermByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term Symbol) loadTermByReference id | trace ("loadTermByReference " ++ show id) False = undefined loadTermByReference (C.Reference.Id h i) = primaryHashToExistingObjectId h - >>= liftQ . Q.loadObjectById + >>= liftQ . Q.loadObjectWithTypeById + >>= \case (OT.TermComponent, blob) -> pure blob; _ -> mzero -- retrieve and deserialize the blob >>= decodeTermElementDiscardingType i >>= uncurry s2cTerm @@ -547,7 +548,8 @@ loadTypeOfTermByTermReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term.Typ loadTypeOfTermByTermReference id | trace ("loadTypeOfTermByTermReference " ++ show id) False = undefined loadTypeOfTermByTermReference (C.Reference.Id h i) = primaryHashToExistingObjectId h - >>= liftQ . Q.loadObjectById + >>= liftQ . Q.loadObjectWithTypeById + >>= \case (OT.TermComponent, blob) -> pure blob; _ -> mzero -- retrieve and deserialize the blob >>= decodeTermElementDiscardingTerm i >>= uncurry s2cTypeOfTerm @@ -714,7 +716,10 @@ loadDeclByReference id | trace ("loadDeclByReference " ++ show id) False = undef loadDeclByReference (C.Reference.Id h i) = do -- retrieve the blob (localIds, C.Decl.DataDeclaration dt m b ct) <- - primaryHashToExistingObjectId h >>= liftQ . Q.loadObjectById >>= decodeDeclElement i + primaryHashToExistingObjectId h + >>= liftQ . Q.loadObjectWithTypeById + >>= \case (OT.DeclComponent, blob) -> pure blob; _ -> mzero + >>= decodeDeclElement i -- look up the text and hashes that are used by the term texts <- traverse loadTextById $ LocalIds.textLookup localIds @@ -809,10 +814,13 @@ saveRootBranch (C.Causal hc he parents me) = do chId <- liftQ (Q.saveCausalHash hc) parentCausalHashIds <- liftQ (Q.loadCausalParents chId) >>= \case [] -> do + -- no parents means hc maybe hasn't been saved previously, + -- so try to save each parent (recursively) before continuing to save hc for (Map.toList parents) $ \(causalHash, mcausal) -> do -- check if we can short circuit the parent before loading it, - -- by checking if there are causal parents for hc + -- by checking if there are causal parents associated with hc parentChId <- liftQ (Q.saveCausalHash causalHash) + -- test if the parent has been saved previously: liftQ (Q.loadCausalParents parentChId) >>= \case [] -> do c <- mcausal; snd <$> saveRootBranch c _grandParents -> pure parentChId @@ -903,7 +911,9 @@ saveRootBranch (C.Causal hc he parents me) = do saveBranchObject :: DB m => Db.BranchHashId -> BranchLocalIds -> S.Branch.Full.LocalBranch -> m Db.BranchObjectId saveBranchObject (Db.unBranchHashId -> hashId) li lBranch = do let bytes = S.putBytes S.putBranchFormat $ S.BranchFormat.Full li lBranch - Db.BranchObjectId <$> Q.saveObject hashId OT.Namespace bytes + oId <- Q.saveObject hashId OT.Namespace bytes + Q.saveHashObject hashId oId 1 -- todo: change me + pure $ Db.BranchObjectId oId done :: EDB m => (a, BranchSavingWriter) -> m (BranchLocalIds, a) done (lBranch, (textValues, defnHashes, patchObjectIds, branchCausalIds)) = do textIds <- liftQ $ traverse Q.saveText textValues diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index d14ca042aa..62f6d250b4 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -179,7 +179,7 @@ loadTextById h = queryAtom sql (Only h) >>= orError (UnknownTextId h) saveHashObject :: DB m => HashId -> ObjectId -> Int -> m () saveHashObject hId oId version = execute sql (hId, oId, version) where sql = [here| - INSERT INTO hash_object (hash_id, object_id, version) + INSERT INTO hash_object (hash_id, object_id, hash_version) VALUES (?, ?, ?) ON CONFLICT DO NOTHING |] @@ -348,10 +348,13 @@ saveWatch k r blob = execute sql (r :. Only blob) >> execute sql2 (r :. Only k) loadWatch :: DB m => WatchKind -> Reference.IdH -> m (Maybe ByteString) loadWatch k r = queryAtom sql (Only k :. r) where sql = [here| - SELECT bytes FROM watch - WHERE watch_kind_id = ? - AND hash_id = ? - AND component_index = ? + SELECT result FROM watch_result + INNER JOIN watch + ON watch_result.hash_id = watch.hash_id + AND watch_result.component_index = watch.component_index + WHERE watch.watch_kind_id = ? + AND watch.hash_id = ? + AND watch.component_index = ? |] loadWatchesByWatchKind :: DB m => WatchKind -> m [Reference.Id] diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index a78c377c73..88ba463b81 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -427,8 +427,10 @@ getDeclElement = lookupDeclElement :: MonadGet m => Reference.Pos -> m (LocalIds, DeclFormat.Decl Symbol) -lookupDeclElement = - unsafeFramedArrayLookup (getPair getLocalIds getDeclElement) . fromIntegral +lookupDeclElement i = + getWord8 >>= \case + 0 -> unsafeFramedArrayLookup (getPair getLocalIds getDeclElement) $ fromIntegral i + other -> unknownTag "lookupDeclElement" other putBranchFormat :: MonadPut m => BranchFormat.BranchFormat -> m () putBranchFormat = \case From 8800544ac2d0c7ea746b637e5a4c2fa150f9419e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 28 Jan 2021 15:22:00 -0500 Subject: [PATCH 084/225] move where hash_object is populated --- .../U/Codebase/Sqlite/Operations.hs | 1 - .../U/Codebase/Sqlite/Queries.hs | 6 +- codebase2/codebase/U/Codebase/Branch.hs | 7 +++ .../src/Unison/Codebase/SqliteCodebase.hs | 61 ++++++++++--------- 4 files changed, 43 insertions(+), 32 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 7ddce31e12..b8adfabff8 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -912,7 +912,6 @@ saveRootBranch (C.Causal hc he parents me) = do saveBranchObject (Db.unBranchHashId -> hashId) li lBranch = do let bytes = S.putBytes S.putBranchFormat $ S.BranchFormat.Full li lBranch oId <- Q.saveObject hashId OT.Namespace bytes - Q.saveHashObject hashId oId 1 -- todo: change me pure $ Db.BranchObjectId oId done :: EDB m => (a, BranchSavingWriter) -> m (BranchLocalIds, a) done (lBranch, (textValues, defnHashes, patchObjectIds, branchCausalIds)) = do diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 62f6d250b4..cf46de0aa5 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -185,8 +185,10 @@ saveHashObject hId oId version = execute sql (hId, oId, version) where |] saveObject :: DB m => HashId -> ObjectType -> ByteString -> m ObjectId -saveObject h t blob = - execute sql (h, t, blob) >> queryOne (maybeObjectIdForPrimaryHashId h) +saveObject h t blob = do + oId <- execute sql (h, t, blob) >> queryOne (maybeObjectIdForPrimaryHashId h) + saveHashObject h oId 1 -- todo: remove this from here, and add it to other relevant places once there are v1 and v2 hashes + pure oId where sql = [here| INSERT INTO object (primary_hash_id, type_id, bytes) diff --git a/codebase2/codebase/U/Codebase/Branch.hs b/codebase2/codebase/U/Codebase/Branch.hs index 6bba6df79f..0df4710ebc 100644 --- a/codebase2/codebase/U/Codebase/Branch.hs +++ b/codebase2/codebase/U/Codebase/Branch.hs @@ -9,6 +9,7 @@ import U.Codebase.Reference (Reference) import U.Codebase.Referent (Referent) import U.Codebase.TermEdit (TermEdit) import U.Codebase.TypeEdit (TypeEdit) +import qualified Data.Map as Map newtype NameSegment = NameSegment Text deriving (Eq, Ord, Show) @@ -30,3 +31,9 @@ data Patch = Patch { termEdits :: Map Referent (Set TermEdit), typeEdits :: Map Reference (Set TypeEdit) } + +instance Show (Branch m) where + show b = "Branch { terms = " ++ show (fmap Map.keys (terms b)) ++ + ", types = " ++ show (fmap Map.keys (types b)) ++ + ", patches = " ++ show (fmap fst (patches b)) ++ + ", children = " ++ show (Map.keys (children b)) \ No newline at end of file diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index c85fb707f2..fdbf43a2ee 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -245,18 +245,19 @@ sqliteCodebase root = do Cv.decl2to1 h1 getCycleLen decl2 putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () - putTerm (Reference.Id h@(Cv.hash1to2 -> h2) i n) tm tp = + putTerm (Reference.Id h@(Cv.hash1to2 -> h2) i n') tm tp = runDB conn $ unlessM - (Ops.objectExistsForHash h2) - ( withBuffer termBuffer h \(BufferEntry size comp missing waiting) -> do - let size' = Just n - pure $ - ifM - ((==) <$> size <*> size') - (pure ()) - (error $ "targetSize for term " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size') + (Ops.objectExistsForHash h2 >>= \b -> do traceM $ "objectExistsForHash " ++ show h2 ++ " = " ++ show b; pure b) + ( withBuffer termBuffer h \be@(BufferEntry size comp missing waiting) -> do + let size' = Just n' + -- if size was previously set, it's expected to match size'. + case size of + Just n | n /= n' -> + error $ "targetSize for term " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size' + _ -> pure () let comp' = Map.insert i (tm, tp) comp + -- for the component element that's been passed in, add its dependencies to missing' missingTerms' <- filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) @@ -266,6 +267,7 @@ sqliteCodebase root = do [h | Reference.Derived h _i _n <- Set.toList $ Term.typeDependencies tm] ++ [h | Reference.Derived h _i _n <- Set.toList $ Type.dependencies tp] let missing' = missing <> Set.fromList (missingTerms' <> missingTypes') + -- notify each of the dependencies that h depends on them. traverse (addBufferDependent h termBuffer) missingTerms' traverse (addBufferDependent h declBuffer) missingTypes' putBuffer termBuffer h (BufferEntry size' comp' missing' waiting) @@ -300,20 +302,22 @@ sqliteCodebase root = do -- skip if it has already been flushed unlessM (Ops.objectExistsForHash h2) $ withBuffer buf h try where - try (BufferEntry size comp (Set.delete h -> missing) waiting) = do - missing' <- - filterM - (fmap not . Ops.objectExistsForHash . Cv.hash1to2) - (toList missing) - if null missing' && size == Just (fromIntegral (length comp)) - then do - saveComponent h2 (toList comp) - removeBuffer buf h - traverse_ tryWaiting waiting - else -- update - - putBuffer buf h $ - BufferEntry size comp (Set.fromList missing') waiting + try (BufferEntry size comp (Set.delete h -> missing) waiting) = case size of + Just size -> do + missing' <- + filterM + (fmap not . Ops.objectExistsForHash . Cv.hash1to2) + (toList missing) + if null missing' && size == fromIntegral (length comp) + then do + saveComponent h2 (toList comp) + removeBuffer buf h + traverse_ tryWaiting waiting + else -- update + putBuffer buf h $ + BufferEntry (Just size) comp (Set.fromList missing') waiting + Nothing -> -- it's never even been added, so there's nothing to do. + pure () tryFlushTermBuffer :: EDB m => Hash -> m () tryFlushTermBuffer h = @@ -340,12 +344,11 @@ sqliteCodebase root = do unlessM (Ops.objectExistsForHash h2) ( withBuffer declBuffer h \(BufferEntry size comp missing waiting) -> do - let size' = Just n - pure $ - ifM - ((==) <$> size <*> size') - (pure ()) - (error $ "targetSize for term " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size') + let size' = Just n' + case size of + Just n | n /= n' -> + error $ "targetSize for type " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size' + _ -> pure () let comp' = Map.insert i decl comp moreMissing <- filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) $ From 955cc7c7f1fa61209a1a2d2124c8c0f354f7931c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 28 Jan 2021 15:28:24 -0500 Subject: [PATCH 085/225] accidentally omitted chunk --- parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index fdbf43a2ee..5be45ff83d 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -339,7 +339,7 @@ sqliteCodebase root = do h putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () - putTypeDeclaration (Reference.Id h@(Cv.hash1to2 -> h2) i n) decl = + putTypeDeclaration (Reference.Id h@(Cv.hash1to2 -> h2) i n') decl = runDB conn $ unlessM (Ops.objectExistsForHash h2) From aad73feacc7cc3701541a76030c72ce677043ea2 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 1 Feb 2021 20:41:28 -0500 Subject: [PATCH 086/225] codec issues with format byte on term/decl components --- .../U/Codebase/Sqlite/Operations.hs | 19 +++++++++++-------- .../U/Codebase/Sqlite/Serialization.hs | 17 +++++++++++------ .../U/Util/Serialization.hs | 2 +- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index b8adfabff8..40e78a3cf1 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -369,7 +369,7 @@ diffPatch (S.Patch fullTerms fullTypes) (S.Patch refTerms refTypes) = -- * Deserialization helpers decodeComponentLengthOnly :: Err m => ByteString -> m Word64 -decodeComponentLengthOnly = getFromBytesOr ErrFramedArrayLen S.lengthFramedArray +decodeComponentLengthOnly = getFromBytesOr ErrFramedArrayLen (S.skip 1 >> S.lengthFramedArray) decodeTermElementWithType :: Err m => C.Reference.Pos -> ByteString -> m (LocalIds, S.Term.Term, S.Term.Type) decodeTermElementWithType i = getFromBytesOr (ErrTermElement i) (S.lookupTermElement i) @@ -391,6 +391,10 @@ getCycleLen h = do runMaybeT (primaryHashToExistingObjectId h) >>= maybe (throwError $ LegacyUnknownCycleLen h) pure >>= liftQ . Q.loadObjectById + -- todo: decodeComponentLengthOnly is unintentionally a hack that relies on the + -- fact the two things that have cycles (term and decl components) have the same basic + -- serialized structure: first a format byte that is always 0 for now, followed by + -- a framed array representing the component. :grimace: >>= decodeComponentLengthOnly >>= pure . fromIntegral @@ -414,7 +418,9 @@ saveTermComponent :: EDB m => H.Hash -> [(C.Term Symbol, C.Term.Type Symbol)] -> saveTermComponent h terms = do sTermElements <- traverse (uncurry c2sTerm) terms hashId <- Q.saveHashHash h - let bytes = S.putBytes S.putTermComponent (S.Term.LocallyIndexedComponent $ Vector.fromList sTermElements) + let + li = S.Term.LocallyIndexedComponent $ Vector.fromList sTermElements + bytes = S.putBytes S.putTermFormat $ S.Term.Term li Q.saveObject hashId OT.TermComponent bytes -- | implementation detail of c2{s,w}Term @@ -673,12 +679,9 @@ saveDeclComponent :: EDB m => H.Hash -> [C.Decl Symbol] -> m Db.ObjectId saveDeclComponent h decls = do sDeclElements <- traverse (c2sDecl Q.saveText primaryHashToExistingObjectId) decls hashId <- Q.saveHashHash h - let bytes = - S.putBytes - S.putDeclFormat - ( S.Decl.Decl . S.Decl.LocallyIndexedComponent $ - Vector.fromList sDeclElements - ) + let + li = S.Decl.LocallyIndexedComponent $ Vector.fromList sDeclElements + bytes = S.putBytes S.putDeclFormat $ S.Decl.Decl li Q.saveObject hashId OT.DeclComponent bytes c2sDecl :: forall m t d. EDB m => (Text -> m t) -> (H.Hash -> m d) -> C.Decl Symbol -> m (LocalIds' t d, S.Decl.Decl Symbol) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 88ba463b81..76af012582 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -338,17 +338,22 @@ getTerm = getABT getSymbol getUnit getF 2 -> pure Term.PConcat tag -> unknownTag "SeqOp" tag + lookupTermElement :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Term, TermFormat.Type) -lookupTermElement = - unsafeFramedArrayLookup (getTuple3 getLocalIds (getFramed getTerm) (getFramed getTType)) . fromIntegral +lookupTermElement i = getWord8 >>= \case + 0 -> unsafeFramedArrayLookup (getTuple3 getLocalIds (getFramed getTerm) (getFramed getTType)) $ fromIntegral i + tag -> unknownTag "lookupTermElement" tag + lookupTermElementDiscardingType :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Term) -lookupTermElementDiscardingType = - unsafeFramedArrayLookup ((,) <$> getLocalIds <*> getFramed getTerm <* skipFramed) . fromIntegral +lookupTermElementDiscardingType i = getWord8 >>= \case + 0 -> unsafeFramedArrayLookup ((,) <$> getLocalIds <*> getFramed getTerm <* skipFramed) $ fromIntegral i + tag -> unknownTag "lookupTermElementDiscardingType" tag lookupTermElementDiscardingTerm :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Type) -lookupTermElementDiscardingTerm = - unsafeFramedArrayLookup ((,) <$> getLocalIds <* skipFramed <*> getFramed getTType) . fromIntegral +lookupTermElementDiscardingTerm i = getWord8 >>= \case + 0 -> unsafeFramedArrayLookup ((,) <$> getLocalIds <* skipFramed <*> getFramed getTType) $ fromIntegral i + tag -> unknownTag "lookupTermElementDiscardingTerm" tag getTType :: MonadGet m => m TermFormat.Type getTType = getType getReference diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index 596788c69a..36f3e2ef3d 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -224,7 +224,7 @@ lookupFramedArray getA index = do Just <$> getA lengthFramedArray :: MonadGet m => m Word64 -lengthFramedArray = getVarInt +lengthFramedArray = (\offsetsLen -> offsetsLen - 1) <$> getVarInt unsafeFramedArrayLookup :: MonadGet m => m a -> Int -> m a unsafeFramedArrayLookup getA index = do From d4c5758dc323b0884d4f47cf7371bd0065aef430 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 1 Feb 2021 20:42:16 -0500 Subject: [PATCH 087/225] load{Term,Decl}* not sufficiently MaybeT --- .../codebase-sqlite/U/Codebase/Sqlite/Operations.hs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 40e78a3cf1..354b7c111c 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -532,9 +532,9 @@ c2xTerm saveText saveDefn tm tp = (Vector.fromList (Foldable.toList defnIds)) pure (ids, void tm, void <$> tp) -loadTermWithTypeByReference :: EDB m => C.Reference.Id -> m (C.Term Symbol, C.Term.Type Symbol) +loadTermWithTypeByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term Symbol, C.Term.Type Symbol) loadTermWithTypeByReference (C.Reference.Id h i) = - primaryHashToExistingObjectId h + MaybeT (primaryHashToMaybeObjectId h) >>= liftQ . Q.loadObjectById -- retrieve and deserialize the blob >>= decodeTermElementWithType i @@ -543,7 +543,7 @@ loadTermWithTypeByReference (C.Reference.Id h i) = loadTermByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term Symbol) loadTermByReference id | trace ("loadTermByReference " ++ show id) False = undefined loadTermByReference (C.Reference.Id h i) = - primaryHashToExistingObjectId h + MaybeT (primaryHashToMaybeObjectId h) >>= liftQ . Q.loadObjectWithTypeById >>= \case (OT.TermComponent, blob) -> pure blob; _ -> mzero -- retrieve and deserialize the blob @@ -553,7 +553,7 @@ loadTermByReference (C.Reference.Id h i) = loadTypeOfTermByTermReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) loadTypeOfTermByTermReference id | trace ("loadTypeOfTermByTermReference " ++ show id) False = undefined loadTypeOfTermByTermReference (C.Reference.Id h i) = - primaryHashToExistingObjectId h + MaybeT (primaryHashToMaybeObjectId h) >>= liftQ . Q.loadObjectWithTypeById >>= \case (OT.TermComponent, blob) -> pure blob; _ -> mzero -- retrieve and deserialize the blob @@ -719,7 +719,7 @@ loadDeclByReference id | trace ("loadDeclByReference " ++ show id) False = undef loadDeclByReference (C.Reference.Id h i) = do -- retrieve the blob (localIds, C.Decl.DataDeclaration dt m b ct) <- - primaryHashToExistingObjectId h + MaybeT (primaryHashToMaybeObjectId h) >>= liftQ . Q.loadObjectWithTypeById >>= \case (OT.DeclComponent, blob) -> pure blob; _ -> mzero >>= decodeDeclElement i From c457ff098e14abec14382cbb22f603f73573200c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 1 Feb 2021 21:07:12 -0500 Subject: [PATCH 088/225] white space --- .../codebase-sqlite/U/Codebase/Sqlite/Queries.hs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index cf46de0aa5..120633d978 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -275,11 +275,11 @@ loadCausalValueHashId id = SELECT value_hash_id FROM causal WHERE self_hash_id = ? |] --- todo: do a join here +-- todo: do a join here loadBranchObjectIdByCausalHashId :: EDB m => CausalHashId -> m (Maybe BranchObjectId) loadBranchObjectIdByCausalHashId id = queryAtom sql (Only id) where sql = [here| SELECT object_id FROM hash_object - INNER JOIN causal ON hash_id = causal.value_hash_id + INNER JOIN causal ON hash_id = causal.value_hash_id WHERE causal.self_hash_id = ? |] @@ -330,7 +330,7 @@ setNamespaceRoot id = query_ @m @(Only CausalHashId) "SELECT * FROM namespace_root" >>= \case [] -> execute insert (Only id) _ -> execute update (Only id) - where + where insert = "INSERT INTO namespace_root VALUES (?)" update = "UPDATE namespace_root SET causal_id = ?" @@ -351,8 +351,8 @@ saveWatch k r blob = execute sql (r :. Only blob) >> execute sql2 (r :. Only k) loadWatch :: DB m => WatchKind -> Reference.IdH -> m (Maybe ByteString) loadWatch k r = queryAtom sql (Only k :. r) where sql = [here| SELECT result FROM watch_result - INNER JOIN watch - ON watch_result.hash_id = watch.hash_id + INNER JOIN watch + ON watch_result.hash_id = watch.hash_id AND watch_result.component_index = watch.component_index WHERE watch.watch_kind_id = ? AND watch.hash_id = ? @@ -505,7 +505,7 @@ debugQuery :: Bool debugQuery = True query :: (DB m, ToRow q, FromRow r, Show q, Show r) => SQLite.Query -> q -> m [r] -query q r = do +query q r = do c <- ask liftIO . queryTrace "query" q r $ SQLite.query c q r From 0ebe053da6f6cb03fab8dc37103d89c7598a1c1c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 1 Feb 2021 21:07:49 -0500 Subject: [PATCH 089/225] print more output on SQL errors --- .../U/Codebase/Sqlite/Queries.hs | 36 ++++++++++++------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 120633d978..820cb8d336 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -514,21 +514,31 @@ query_ q = do c <- ask liftIO . queryTrace_ "query" q $ SQLite.query_ c q -queryTrace :: (Monad m, Show q, Show a) => String -> SQLite.Query -> q -> m a -> m a +queryTrace :: (MonadUnliftIO m, Show q, Show a) => String -> SQLite.Query -> q -> m a -> m a queryTrace title query input m = - if debugQuery then do - a <- m - traceM $ title ++ " " ++ show query ++ "\n input: " ++ show input ++ "\n output: " ++ show a - pure a - else m - -queryTrace_ :: (Monad m, Show a) => String -> SQLite.Query -> m a -> m a + if debugQuery + then + try @_ @SQLite.SQLError m >>= \case + Right a -> do + traceM $ title ++ " " ++ show query ++ "\n input: " ++ show input ++ "\n output: " ++ show a + pure a + Left e -> do + traceM $ title ++ " " ++ show query ++ "\n input: " ++ show input ++ "\n(and crashed)\n" + throwIO e + else m + +queryTrace_ :: (MonadUnliftIO m, Show a) => String -> SQLite.Query -> m a -> m a queryTrace_ title query m = - if debugQuery then do - a <- m - traceM $ title ++ " " ++ show query ++ "\n output: " ++ show a - pure a - else m + if debugQuery + then + try @_ @SQLite.SQLError m >>= \case + Right a -> do + traceM $ title ++ " " ++ show query ++ "\n output: " ++ show a + pure a + Left e -> do + traceM $ title ++ " " ++ show query ++ "\n(and crashed)\n" + throwIO e + else m execute :: (DB m, ToRow q, Show q) => SQLite.Query -> q -> m () execute q r = do c <- ask; liftIO . queryTrace "execute" q r $ SQLite.execute c q r From bf9399214fd6cbc7dd6d1d79c42b752a2024986c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 1 Feb 2021 21:10:43 -0500 Subject: [PATCH 090/225] fix FK bug in watch / watch_result schema --- codebase2/codebase-sqlite/sql/create.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index f26b82c879..0003672373 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -90,14 +90,14 @@ CREATE TABLE causal_old ( ); CREATE TABLE watch_result ( - hash_id INTEGER NOT NULL CONSTRAINT watch_result_fk1 REFERENCES object(id), + hash_id INTEGER NOT NULL CONSTRAINT watch_result_fk1 REFERENCES hash(id), component_index INTEGER NOT NULL, result BLOB NOT NULL, PRIMARY KEY (hash_id, component_index) ); CREATE TABLE watch ( - hash_id INTEGER NOT NULL CONSTRAINT watch_fk1 REFERENCES object(id), + hash_id INTEGER NOT NULL CONSTRAINT watch_fk1 REFERENCES hash(id), component_index INTEGER NOT NULL, watch_kind_id INTEGER NOT NULL CONSTRAINT watch_fk2 REFERENCES watch_kind_description(id), PRIMARY KEY (hash_id, component_index, watch_kind_id) From b1ac5bee71bd9409d5987f607c3f59936a3ed9f7 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 1 Feb 2021 21:13:58 -0500 Subject: [PATCH 091/225] this symlink for haskell-language-server's sake sorry --- sql | 1 + 1 file changed, 1 insertion(+) create mode 120000 sql diff --git a/sql b/sql new file mode 120000 index 0000000000..d32fcadaae --- /dev/null +++ b/sql @@ -0,0 +1 @@ +codebase2/codebase-sqlite/sql \ No newline at end of file From 3b40ed8aab9cecbf748a2063165d78b394532e5f Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 1 Feb 2021 21:23:39 -0500 Subject: [PATCH 092/225] some leftover cleanup relating to interactive commits --- .../U/Codebase/Sqlite/Operations.hs | 87 ++++++++++--------- .../U/Codebase/Sqlite/Queries.hs | 6 +- .../src/Unison/Codebase/SqliteCodebase.hs | 4 +- 3 files changed, 49 insertions(+), 48 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 354b7c111c..e6e09ba49e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -14,7 +14,7 @@ module U.Codebase.Sqlite.Operations where import Control.Lens (Lens') import qualified Control.Lens as Lens -import Control.Monad (MonadPlus(mzero), join, (<=<)) +import Control.Monad (MonadPlus (mzero), join, (<=<)) import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) import Control.Monad.State (MonadState, StateT, evalStateT) import Control.Monad.Trans (MonadTrans (lift)) @@ -25,6 +25,7 @@ import Data.Bifunctor (Bifunctor (bimap)) import Data.Bitraversable (Bitraversable (bitraverse)) import Data.ByteString (ByteString) import Data.Bytes.Get (runGetS) +import qualified Data.Bytes.Get as Get import qualified Data.Foldable as Foldable import Data.Functor (void, (<&>)) import Data.Functor.Identity (Identity) @@ -41,6 +42,7 @@ import Data.Traversable (for) import Data.Tuple.Extra (uncurry3) import qualified Data.Vector as Vector import Data.Word (Word64) +import Debug.Trace import qualified U.Codebase.Branch as C.Branch import qualified U.Codebase.Causal as C import U.Codebase.Decl (ConstructorId) @@ -111,7 +113,6 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set -import Debug.Trace -- * Error handling @@ -369,7 +370,7 @@ diffPatch (S.Patch fullTerms fullTypes) (S.Patch refTerms refTypes) = -- * Deserialization helpers decodeComponentLengthOnly :: Err m => ByteString -> m Word64 -decodeComponentLengthOnly = getFromBytesOr ErrFramedArrayLen (S.skip 1 >> S.lengthFramedArray) +decodeComponentLengthOnly = getFromBytesOr ErrFramedArrayLen (Get.skip 1 >> S.lengthFramedArray) decodeTermElementWithType :: Err m => C.Reference.Pos -> ByteString -> m (LocalIds, S.Term.Term, S.Term.Type) decodeTermElementWithType i = getFromBytesOr (ErrTermElement i) (S.lookupTermElement i) @@ -391,9 +392,9 @@ getCycleLen h = do runMaybeT (primaryHashToExistingObjectId h) >>= maybe (throwError $ LegacyUnknownCycleLen h) pure >>= liftQ . Q.loadObjectById - -- todo: decodeComponentLengthOnly is unintentionally a hack that relies on the + -- todo: decodeComponentLengthOnly is unintentionally a hack that relies on the -- fact the two things that have cycles (term and decl components) have the same basic - -- serialized structure: first a format byte that is always 0 for now, followed by + -- serialized structure: first a format byte that is always 0 for now, followed by -- a framed array representing the component. :grimace: >>= decodeComponentLengthOnly >>= pure . fromIntegral @@ -418,9 +419,8 @@ saveTermComponent :: EDB m => H.Hash -> [(C.Term Symbol, C.Term.Type Symbol)] -> saveTermComponent h terms = do sTermElements <- traverse (uncurry c2sTerm) terms hashId <- Q.saveHashHash h - let - li = S.Term.LocallyIndexedComponent $ Vector.fromList sTermElements - bytes = S.putBytes S.putTermFormat $ S.Term.Term li + let li = S.Term.LocallyIndexedComponent $ Vector.fromList sTermElements + bytes = S.putBytes S.putTermFormat $ S.Term.Term li Q.saveObject hashId OT.TermComponent bytes -- | implementation detail of c2{s,w}Term @@ -679,9 +679,8 @@ saveDeclComponent :: EDB m => H.Hash -> [C.Decl Symbol] -> m Db.ObjectId saveDeclComponent h decls = do sDeclElements <- traverse (c2sDecl Q.saveText primaryHashToExistingObjectId) decls hashId <- Q.saveHashHash h - let - li = S.Decl.LocallyIndexedComponent $ Vector.fromList sDeclElements - bytes = S.putBytes S.putDeclFormat $ S.Decl.Decl li + let li = S.Decl.LocallyIndexedComponent $ Vector.fromList sDeclElements + bytes = S.putBytes S.putDeclFormat $ S.Decl.Decl li Q.saveObject hashId OT.DeclComponent bytes c2sDecl :: forall m t d. EDB m => (Text -> m t) -> (H.Hash -> m d) -> C.Decl Symbol -> m (LocalIds' t d, S.Decl.Decl Symbol) @@ -720,9 +719,9 @@ loadDeclByReference (C.Reference.Id h i) = do -- retrieve the blob (localIds, C.Decl.DataDeclaration dt m b ct) <- MaybeT (primaryHashToMaybeObjectId h) - >>= liftQ . Q.loadObjectWithTypeById - >>= \case (OT.DeclComponent, blob) -> pure blob; _ -> mzero - >>= decodeDeclElement i + >>= liftQ . Q.loadObjectWithTypeById + >>= \case (OT.DeclComponent, blob) -> pure blob; _ -> mzero + >>= decodeDeclElement i -- look up the text and hashes that are used by the term texts <- traverse loadTextById $ LocalIds.textLookup localIds @@ -815,34 +814,35 @@ saveRootBranch (C.Causal hc he parents me) = do traceM $ "\nsaveRootBranch \n hc = " ++ show hc ++ ",\n he = " ++ show he ++ ",\n parents = " ++ show (Map.keys parents) -- check if we can skip the whole thing, by checking if there are causal parents for hc chId <- liftQ (Q.saveCausalHash hc) - parentCausalHashIds <- liftQ (Q.loadCausalParents chId) >>= \case - [] -> do - -- no parents means hc maybe hasn't been saved previously, - -- so try to save each parent (recursively) before continuing to save hc - for (Map.toList parents) $ \(causalHash, mcausal) -> do - -- check if we can short circuit the parent before loading it, - -- by checking if there are causal parents associated with hc - parentChId <- liftQ (Q.saveCausalHash causalHash) - -- test if the parent has been saved previously: - liftQ (Q.loadCausalParents parentChId) >>= \case - [] -> do c <- mcausal; snd <$> saveRootBranch c - _grandParents -> pure parentChId - parentCausalHashIds -> pure parentCausalHashIds - - boId <- liftQ (Q.loadBranchObjectIdByCausalHashId chId) >>= \case - Just boId -> pure boId - Nothing -> do - bhId <- liftQ (Q.saveBranchHash he) - (li, lBranch) <- c2lBranch =<< me - boId <- saveBranchObject bhId li lBranch - liftQ (Q.saveCausal chId bhId) - -- save the link between child and parents - liftQ (Q.saveCausalParents chId parentCausalHashIds) - pure boId - + parentCausalHashIds <- + liftQ (Q.loadCausalParents chId) >>= \case + [] -> do + -- no parents means hc maybe hasn't been saved previously, + -- so try to save each parent (recursively) before continuing to save hc + for (Map.toList parents) $ \(causalHash, mcausal) -> do + -- check if we can short circuit the parent before loading it, + -- by checking if there are causal parents associated with hc + parentChId <- liftQ (Q.saveCausalHash causalHash) + -- test if the parent has been saved previously: + liftQ (Q.loadCausalParents parentChId) >>= \case + [] -> do c <- mcausal; snd <$> saveRootBranch c + _grandParents -> pure parentChId + parentCausalHashIds -> pure parentCausalHashIds + + boId <- + liftQ (Q.loadBranchObjectIdByCausalHashId chId) >>= \case + Just boId -> pure boId + Nothing -> do + bhId <- liftQ (Q.saveBranchHash he) + (li, lBranch) <- c2lBranch =<< me + boId <- saveBranchObject bhId li lBranch + liftQ (Q.saveCausal chId bhId) + -- save the link between child and parents + liftQ (Q.saveCausalParents chId parentCausalHashIds) + pure boId + Q.setNamespaceRoot chId pure (boId, chId) - where c2lBranch :: EDB m => C.Branch.Branch m -> m (BranchLocalIds, S.Branch.Full.LocalBranch) c2lBranch (C.Branch.Branch terms types patches children) = @@ -953,9 +953,10 @@ loadCausalByCausalHashId :: EDB m => Db.CausalHashId -> m (C.Branch.Causal m) loadCausalByCausalHashId id = do hc <- loadCausalHashById id hb <- loadValueHashByCausalHashId id - let loadNamespace = loadBranchByCausalHashId id >>= \case - Nothing -> throwError (ExpectedBranch' id) - Just b -> pure b + let loadNamespace = + loadBranchByCausalHashId id >>= \case + Nothing -> throwError (ExpectedBranch' id) + Just b -> pure b parentHashIds <- Q.loadCausalParents id loadParents <- for parentHashIds \hId -> do h <- loadCausalHashById hId diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 820cb8d336..7a3279835f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -6,10 +6,10 @@ {-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE QuasiQuotes #-} +{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TemplateHaskell #-} {-# LANGUAGE TupleSections #-} -{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE ViewPatterns #-} {-# OPTIONS_GHC -Wno-orphans #-} @@ -46,7 +46,7 @@ import qualified U.Codebase.WatchKind as WatchKind import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash -import UnliftIO (MonadUnliftIO, withRunInIO) +import UnliftIO (MonadUnliftIO, throwIO, try, withRunInIO) -- * types @@ -502,7 +502,7 @@ queryExists :: (DB m, ToRow q, Show q) => SQLite.Query -> q -> m Bool queryExists q r = not . null . map (id @SQLData) <$> queryAtoms q r debugQuery :: Bool -debugQuery = True +debugQuery = False query :: (DB m, ToRow q, FromRow r, Show q, Show r) => SQLite.Query -> q -> m [r] query q r = do diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 5be45ff83d..17a1ee771c 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -14,7 +14,7 @@ module Unison.Codebase.SqliteCodebase where import qualified Control.Exception import Control.Monad (filterM, (>=>)) import Control.Monad.Except (ExceptT, runExceptT) -import Control.Monad.Extra (ifM, unlessM) +import Control.Monad.Extra (unlessM) import qualified Control.Monad.Extra as Monad import Control.Monad.Reader (ReaderT (runReaderT)) import Control.Monad.Trans (MonadTrans (lift)) @@ -249,7 +249,7 @@ sqliteCodebase root = do runDB conn $ unlessM (Ops.objectExistsForHash h2 >>= \b -> do traceM $ "objectExistsForHash " ++ show h2 ++ " = " ++ show b; pure b) - ( withBuffer termBuffer h \be@(BufferEntry size comp missing waiting) -> do + ( withBuffer termBuffer h \(BufferEntry size comp missing waiting) -> do let size' = Just n' -- if size was previously set, it's expected to match size'. case size of From c8b6e4e957eeb38a369000a1aa2f51f7e0f68221 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 1 Feb 2021 21:56:50 -0500 Subject: [PATCH 093/225] add a bunch of debug output behind guarded by constant in each module --- .../U/Codebase/Sqlite/Operations.hs | 25 ++++--- .../U/Codebase/Sqlite/Queries.hs | 10 ++- .../U/Codebase/Sqlite/Serialization.hs | 14 ++-- .../U/Util/Serialization.hs | 7 +- parser-typechecker/src/Unison/Codebase.hs | 15 ++-- .../src/Unison/Codebase/Editor/HandleInput.hs | 2 +- .../src/Unison/Codebase/SqliteCodebase.hs | 68 +++++++++++++++---- parser-typechecker/src/Unison/FileParsers.hs | 4 ++ 8 files changed, 107 insertions(+), 38 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index e6e09ba49e..dcc3675010 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -113,10 +113,15 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set +import Control.Monad (when) -- * Error handling type Err m = MonadError Error m +debug :: Bool +debug = False + +type Err m = (MonadError Error m, HasCallStack) type EDB m = (Err m, DB m) @@ -387,7 +392,7 @@ decodeDeclElement i = getFromBytesOr (ErrDeclElement i) (S.lookupDeclElement i) -- * legacy conversion helpers getCycleLen :: EDB m => H.Hash -> m Word64 -getCycleLen id | trace ("getCycleLen " ++ show id) False = undefined +getCycleLen id | debug && trace ("getCycleLen " ++ show id) False = undefined getCycleLen h = do runMaybeT (primaryHashToExistingObjectId h) >>= maybe (throwError $ LegacyUnknownCycleLen h) pure @@ -406,7 +411,7 @@ getDeclTypeByReference r@(C.Reference.Id h pos) = >>= pure . C.Decl.declType componentByObjectId :: EDB m => Db.ObjectId -> m [S.Reference.Id] -componentByObjectId id | trace ("componentByObjectId " ++ show id) False = undefined +componentByObjectId id | debug && trace ("componentByObjectId " ++ show id) False = undefined componentByObjectId id = do len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly pure [C.Reference.Id id i | i <- [0 .. len - 1]] @@ -416,6 +421,7 @@ componentByObjectId id = do -- ** Saving & loading terms saveTermComponent :: EDB m => H.Hash -> [(C.Term Symbol, C.Term.Type Symbol)] -> m Db.ObjectId +saveTermComponent h _terms | debug && trace ("Operations.saveTermComponent " ++ show h) False = undefined saveTermComponent h terms = do sTermElements <- traverse (uncurry c2sTerm) terms hashId <- Q.saveHashHash h @@ -541,7 +547,7 @@ loadTermWithTypeByReference (C.Reference.Id h i) = >>= uncurry3 s2cTermWithType loadTermByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term Symbol) -loadTermByReference id | trace ("loadTermByReference " ++ show id) False = undefined +loadTermByReference id | debug && trace ("loadTermByReference " ++ show id) False = undefined loadTermByReference (C.Reference.Id h i) = MaybeT (primaryHashToMaybeObjectId h) >>= liftQ . Q.loadObjectWithTypeById @@ -551,7 +557,7 @@ loadTermByReference (C.Reference.Id h i) = >>= uncurry s2cTerm loadTypeOfTermByTermReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) -loadTypeOfTermByTermReference id | trace ("loadTypeOfTermByTermReference " ++ show id) False = undefined +loadTypeOfTermByTermReference id | debug && trace ("loadTypeOfTermByTermReference " ++ show id) False = undefined loadTypeOfTermByTermReference (C.Reference.Id h i) = MaybeT (primaryHashToMaybeObjectId h) >>= liftQ . Q.loadObjectWithTypeById @@ -676,6 +682,7 @@ w2cTerm ids tm = do -- ** Saving & loading type decls saveDeclComponent :: EDB m => H.Hash -> [C.Decl Symbol] -> m Db.ObjectId +saveDeclComponent h _decls | debug && trace ("Operations.saveDeclComponent " ++ show h) False = undefined saveDeclComponent h decls = do sDeclElements <- traverse (c2sDecl Q.saveText primaryHashToExistingObjectId) decls hashId <- Q.saveHashHash h @@ -714,7 +721,7 @@ c2sDecl saveText saveDefn (C.Decl.DataDeclaration dt m b cts) = do pure (ids, decl) loadDeclByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Decl Symbol) -loadDeclByReference id | trace ("loadDeclByReference " ++ show id) False = undefined +loadDeclByReference id | debug && trace ("loadDeclByReference " ++ show id) False = undefined loadDeclByReference (C.Reference.Id h i) = do -- retrieve the blob (localIds, C.Decl.DataDeclaration dt m b ct) <- @@ -811,7 +818,7 @@ type BranchSavingMonad m = StateT BranchSavingState (WriterT BranchSavingWriter saveRootBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) saveRootBranch (C.Causal hc he parents me) = do - traceM $ "\nsaveRootBranch \n hc = " ++ show hc ++ ",\n he = " ++ show he ++ ",\n parents = " ++ show (Map.keys parents) + when debug $ traceM $ "\nsaveRootBranch \n hc = " ++ show hc ++ ",\n he = " ++ show he ++ ",\n parents = " ++ show (Map.keys parents) -- check if we can skip the whole thing, by checking if there are causal parents for hc chId <- liftQ (Q.saveCausalHash hc) parentCausalHashIds <- @@ -976,7 +983,7 @@ loadBranchByObjectId id = do S.BranchFormat.Diff r li d -> doDiff r [l2sDiff li d] where deserializeBranchObject :: EDB m => Db.BranchObjectId -> m S.BranchFormat - deserializeBranchObject id | trace ("deserializeBranchObject " ++ show id) False = undefined + deserializeBranchObject id | debug && trace ("deserializeBranchObject " ++ show id) False = undefined deserializeBranchObject id = (liftQ . Q.loadObjectById) (Db.unBranchObjectId id) >>= getFromBytesOr (ErrBranch id) S.getBranchFormat @@ -1159,7 +1166,7 @@ s2cPatch (S.Patch termEdits typeEdits) = <*> Map.bitraverse h2cReference (Set.traverse s2cTypeEdit) typeEdits deserializePatchObject :: EDB m => Db.PatchObjectId -> m S.PatchFormat -deserializePatchObject id | trace ("deserializePatchObject " ++ show id) False = undefined +deserializePatchObject id | debug && trace ("deserializePatchObject " ++ show id) False = undefined deserializePatchObject id = (liftQ . Q.loadObjectById) (Db.unPatchObjectId id) >>= getFromBytesOr (ErrPatch id) S.getPatchFormat @@ -1235,7 +1242,7 @@ declReferentsByPrefix b32prefix pos cid = do cids = [cid | cid <- [0 :: ConstructorId .. ctorCount - 1], test cid] pure (h, pos, len, dt, cids) getDeclCtorCount :: EDB m => S.Reference.Id -> m (C.Decl.DeclType, Word64, ConstructorId) - getDeclCtorCount id | trace ("getDeclCtorCount " ++ show id) False = undefined + getDeclCtorCount id | debug && trace ("getDeclCtorCount " ++ show id) False = undefined getDeclCtorCount (C.Reference.Id r i) = do bs <- liftQ (Q.loadObjectById r) len <- decodeComponentLengthOnly bs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 7a3279835f..3f487967c2 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -47,6 +47,7 @@ import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash import UnliftIO (MonadUnliftIO, throwIO, try, withRunInIO) +import Control.Monad (when) -- * types @@ -84,11 +85,14 @@ type TypeHashReference = Reference' TextId HashId createSchema :: (DB m, MonadUnliftIO m) => m () createSchema = do - traceM "--- CREATING SCHEMA ---" + when debug $ traceM "--- CREATING SCHEMA ---" withImmediateTransaction . traverse_ (execute_ . fromString) $ List.splitOn ";" [hereFile|sql/create.sql|] <> List.splitOn ";" [hereFile|sql/create-index.sql|] +debug :: Bool +debug = False + setFlags :: DB m => m () setFlags = execute_ "PRAGMA foreign_keys = ON;" @@ -197,10 +201,10 @@ saveObject h t blob = do |] loadObjectById :: EDB m => ObjectId -> m ByteString -loadObjectById id | trace ("loadObjectById " ++ show id) False = undefined +loadObjectById id | debugQuery && trace ("loadObjectById " ++ show id) False = undefined loadObjectById oId = do result <- queryAtom sql (Only oId) >>= orError (UnknownObjectId oId) - traceM $ "loadObjectById " ++ show oId ++ " = " ++ show result + when debugQuery $ traceM $ "loadObjectById " ++ show oId ++ " = " ++ show result pure result where sql = [here| SELECT bytes FROM object WHERE id = ? diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 76af012582..ef33ba9b39 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -47,10 +47,13 @@ import qualified U.Codebase.Term as Term import qualified U.Codebase.Type as Type import qualified U.Core.ABT as ABT import qualified U.Util.Monoid as Monoid -import U.Util.Serialization +import U.Util.Serialization hiding (debug) import Prelude hiding (getChar, putChar) import Debug.Trace (trace) +debug :: Bool +debug = False + putABT :: (MonadPut m, Foldable f, Functor f, Ord v) => (v -> m ()) -> @@ -161,7 +164,7 @@ putTermComponent :: MonadPut m => TermFormat.LocallyIndexedComponent -> m () -putTermComponent t | trace ("putTermComponent " ++ show t) False = undefined +putTermComponent t | debug && trace ("putTermComponent " ++ show t) False = undefined putTermComponent (TermFormat.LocallyIndexedComponent v) = putFramedArray ( \(localIds, term, typ) -> @@ -170,7 +173,7 @@ putTermComponent (TermFormat.LocallyIndexedComponent v) = v putTerm :: MonadPut m => TermFormat.Term -> m () -putTerm _t | trace "putTerm" False = undefined +putTerm _t | debug && trace "putTerm" False = undefined putTerm t = putABT putSymbol putUnit putF t where putF :: MonadPut m => (a -> m ()) -> TermFormat.F a -> m () @@ -385,7 +388,7 @@ putDeclFormat = \case where -- These use a framed array for randomer access putDeclComponent :: MonadPut m => DeclFormat.LocallyIndexedComponent -> m () - putDeclComponent t | trace ("putDeclComponent " ++ show t) False = undefined + putDeclComponent t | debug && trace ("putDeclComponent " ++ show t) False = undefined putDeclComponent (DeclFormat.LocallyIndexedComponent v) = putFramedArray (putPair putLocalIds putDeclElement) v @@ -438,7 +441,8 @@ lookupDeclElement i = other -> unknownTag "lookupDeclElement" other putBranchFormat :: MonadPut m => BranchFormat.BranchFormat -> m () -putBranchFormat = \case +putBranchFormat b | debug && trace ("putBranchFormat " ++ show b) False = undefined +putBranchFormat b = case b of BranchFormat.Full li b -> putWord8 0 *> putBranchFull li b BranchFormat.Diff r li d -> putWord8 1 *> putBranchDiff r li d where diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index 36f3e2ef3d..d1f8da281a 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -9,7 +9,7 @@ module U.Util.Serialization where import Control.Applicative (Applicative (liftA2), liftA3) -import Control.Monad (foldM, replicateM) +import Control.Monad (foldM, replicateM, when) import Data.Bits (Bits, clearBit, setBit, shiftL, shiftR, testBit, (.|.)) import Data.ByteString (ByteString, readFile, writeFile) import qualified Data.ByteString as BS @@ -52,6 +52,9 @@ data Format a = Format put :: Put a } +debug :: Bool +debug = False + getFromBytes :: Get a -> ByteString -> Maybe a getFromBytes getA bytes = case runGetS getA bytes of Left _ -> Nothing; Right a -> Just a @@ -187,7 +190,7 @@ putFramed put a = do -- 2. Put the length `len` -- 3. Put `a` let bs = putBytes put a - traceM $ "putFramed " ++ (show $ BS.length bs) ++ " bytes: " ++ show bs + when debug $ traceM $ "putFramed " ++ (show $ BS.length bs) ++ " bytes: " ++ show bs putVarInt (BS.length bs) putByteString bs diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs index 8e5c4dfb52..8bd1a3aafc 100644 --- a/parser-typechecker/src/Unison/Codebase.hs +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -95,6 +95,9 @@ data GetRootBranchError | CouldntParseRootBranch String | CouldntLoadRootBranch Branch.Hash deriving Show + +debug :: Bool +debug = False data SyncFileCodebaseResult = SyncOk | UnknownDestinationRootBranch Branch.Hash | NotFastForward @@ -113,7 +116,7 @@ initializeCodebase c = do -- Feel free to refactor this to use some other type than TypecheckedUnisonFile -- if it makes sense to later. -addDefsToCodebase :: forall m v a. (Monad m, Var v) +addDefsToCodebase :: forall m v a. (Monad m, Var v, Show a) => Codebase m v a -> UF.TypecheckedUnisonFile v a -> m () addDefsToCodebase c uf = do traverse_ (goType Right) (UF.dataDeclarationsId' uf) @@ -121,8 +124,10 @@ addDefsToCodebase c uf = do -- put terms traverse_ goTerm (UF.hashTermsId uf) where + goTerm t | debug && trace ("Codebase.addDefsToCodebase.goTerm " ++ show t) False = undefined goTerm (r, tm, tp) = putTerm c r tm tp - goType :: (t -> Decl v a) -> (Reference.Id, t) -> m () + goType :: Show t => (t -> Decl v a) -> (Reference.Id, t) -> m () + goType _f pair | debug && trace ("Codebase.addDefsToCodebase.goType " ++ show pair) False = undefined goType f (ref, decl) = putTypeDeclaration c ref (f decl) getTypeOfConstructor :: @@ -138,7 +143,9 @@ getTypeOfConstructor _ r cid = typeLookupForDependencies :: (Monad m, Var v, BuiltinAnnotation a) => Codebase m v a -> Set Reference -> m (TL.TypeLookup v a) -typeLookupForDependencies codebase = foldM go mempty +typeLookupForDependencies codebase s = do + when debug $ traceM $ "typeLookupForDependencies " ++ show s + foldM go mempty s where go tl ref@(Reference.DerivedId id) = fmap (tl <>) $ getTypeOfTerm codebase ref >>= \case @@ -246,7 +253,7 @@ makeSelfContained' code uf = do getTypeOfTerm :: (Applicative m, Var v, BuiltinAnnotation a) => Codebase m v a -> Reference -> m (Maybe (Type v a)) -getTypeOfTerm _c r | trace ("Codebase.getTypeOfTerm " ++ show r) False = undefined +getTypeOfTerm _c r | debug && trace ("Codebase.getTypeOfTerm " ++ show r) False = undefined getTypeOfTerm c r = case r of Reference.DerivedId h -> getTypeOfTermImpl c h r@Reference.Builtin{} -> diff --git a/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs b/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs index e1517a3a7c..dac9b45eb8 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs @@ -1691,7 +1691,7 @@ loop = do (Map.fromList Builtin.builtinEffectDecls) mempty mempty eval $ AddDefsToCodebase uf - -- these have not neceesarily been added yet + -- these have not necessarily been added yet eval $ AddDefsToCodebase IOSource.typecheckedFile' -- add the names; note, there are more names than definitions diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 17a1ee771c..69012b7fda 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE RecordWildCards #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE QuasiQuotes #-} @@ -82,6 +83,10 @@ import qualified UnliftIO.Environment as SysEnv import UnliftIO.STM import qualified System.FilePath as FilePath import qualified Control.Concurrent +import qualified Data.List as List + +debug :: Bool +debug = False codebasePath :: FilePath codebasePath = ".unison" "v2" "unison.sqlite3" @@ -127,7 +132,7 @@ initCodebaseAndExit mdir = do -- initializes a new codebase here (i.e. `ucm -codebase dir init`) initCodebase :: FilePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) initCodebase path = do - traceM $ "initCodebase " ++ path + Monad.when debug $ traceM $ "initCodebase " ++ path prettyDir <- P.string <$> canonicalizePath path Monad.whenM (codebaseExists path) do @@ -160,7 +165,7 @@ getCodebaseDir = maybe getHomeDirectory pure -- checks if a db exists at `path` with the minimum schema codebaseExists :: CodebasePath -> IO Bool codebaseExists root = do - traceM $ "codebaseExists " ++ root + Monad.when debug $ traceM $ "codebaseExists " ++ root Control.Exception.catch @Sqlite.SQLError (sqliteCodebase root >>= \case Left _ -> pure False @@ -188,20 +193,35 @@ data BufferEntry a = BufferEntry } deriving (Eq, Show) +prettyBufferEntry :: Show a => Hash -> BufferEntry a -> String +prettyBufferEntry (h :: Hash) BufferEntry{..} = + "BufferEntry " ++ show h ++ "\n" + ++ " { beComponentTargetSize = " ++ show beComponentTargetSize ++ "\n" + ++ " , beComponent = " + ++ if Map.size beComponent < 2 then show $ Map.toList beComponent else mkString (Map.toList beComponent) (Just "\n [ ") " , " (Just "]\n") + ++ " , beMissingDependencies =" + ++ if Set.size beMissingDependencies < 2 then show $ Set.toList beMissingDependencies else mkString (Set.toList beMissingDependencies) (Just "\n [ ") " , " (Just "]\n") + ++ " , beWaitingDependents =" + ++ if Set.size beWaitingDependents < 2 then show $ Set.toList beWaitingDependents else mkString (Set.toList beWaitingDependents) (Just "\n [ ") " , " (Just "]\n") + ++ " }" + where + mkString :: (Foldable f, Show a) => f a -> Maybe String -> String -> Maybe String -> String + mkString as start middle end = fromMaybe "" start ++ List.intercalate middle (show <$> toList as) ++ fromMaybe "" end + type TermBufferEntry = BufferEntry (Term Symbol Ann, Type Symbol Ann) type DeclBufferEntry = BufferEntry (Decl Symbol Ann) unsafeGetConnection :: CodebasePath -> IO Sqlite.Connection unsafeGetConnection root = do - traceM $ "unsafeGetconnection " ++ root ++ " -> " ++ (root codebasePath) + Monad.when debug $ traceM $ "unsafeGetconnection " ++ root ++ " -> " ++ (root codebasePath) conn <- Sqlite.open $ root codebasePath runReaderT Q.setFlags conn pure conn sqliteCodebase :: CodebasePath -> IO (Either [(Q.SchemaType, Q.SchemaName)] (IO (), Codebase1.Codebase IO Symbol Ann)) sqliteCodebase root = do - traceM $ "sqliteCodebase " ++ root + Monad.when debug $ traceM $ "sqliteCodebase " ++ root conn <- unsafeGetConnection root runReaderT Q.checkForMissingSchema conn >>= \case [] -> do @@ -232,7 +252,7 @@ sqliteCodebase root = do getDeclTypeById = fmap Cv.decltype2to1 . Ops.getDeclTypeByReference getTypeOfTermImpl :: Reference.Id -> IO (Maybe (Type Symbol Ann)) - getTypeOfTermImpl id | trace ("getTypeOfTermImpl " ++ show id) False = undefined + getTypeOfTermImpl id | debug && trace ("getTypeOfTermImpl " ++ show id) False = undefined getTypeOfTermImpl (Reference.Id (Cv.hash1to2 -> h2) i _n) = runDB' conn do type2 <- Ops.loadTypeOfTermByTermReference (C.Reference.Id h2 i) @@ -245,11 +265,13 @@ sqliteCodebase root = do Cv.decl2to1 h1 getCycleLen decl2 putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () + putTerm id tm tp | debug && trace (show "SqliteCodebase.putTerm " ++ show id ++ " " ++ show tm ++ " " ++ show tp) False = undefined putTerm (Reference.Id h@(Cv.hash1to2 -> h2) i n') tm tp = runDB conn $ unlessM - (Ops.objectExistsForHash h2 >>= \b -> do traceM $ "objectExistsForHash " ++ show h2 ++ " = " ++ show b; pure b) - ( withBuffer termBuffer h \(BufferEntry size comp missing waiting) -> do + (Ops.objectExistsForHash h2 >>= if debug then \b -> do traceM $ "objectExistsForHash " ++ show h2 ++ " = " ++ show b; pure b else pure) + ( withBuffer termBuffer h \be@(BufferEntry size comp missing waiting) -> do + Monad.when debug $ traceM $ "adding to BufferEntry" ++ show be let size' = Just n' -- if size was previously set, it's expected to match size'. case size of @@ -276,17 +298,27 @@ sqliteCodebase root = do putBuffer :: (MonadIO m, Show a) => TVar (Map Hash (BufferEntry a)) -> Hash -> BufferEntry a -> m () putBuffer tv h e = do - traceM $ "putBuffer " ++ show h ++ " " ++ show e + Monad.when debug $ traceM $ "putBuffer " ++ prettyBufferEntry h e atomically $ modifyTVar tv (Map.insert h e) - withBuffer :: MonadIO m => TVar (Map Hash (BufferEntry a)) -> Hash -> (BufferEntry a -> m b) -> m b - withBuffer tv h f = + withBuffer :: (MonadIO m, Show a) => TVar (Map Hash (BufferEntry a)) -> Hash -> (BufferEntry a -> m b) -> m b + withBuffer tv h f = do + Monad.when debug $ readTVarIO tv >>= \tv -> traceM $ "tv = " ++ show tv Map.lookup h <$> readTVarIO tv >>= \case - Just e -> f e - Nothing -> f (BufferEntry Nothing Map.empty Set.empty Set.empty) + Just e -> do + Monad.when debug $ traceM $ "SqliteCodebase.withBuffer " ++ prettyBufferEntry h e + f e + Nothing -> do + Monad.when debug $ traceM $ "SqliteCodebase.with(new)Buffer " ++ show h + f (BufferEntry Nothing Map.empty Set.empty Set.empty) + + removeBuffer :: (MonadIO m, Show a) => TVar (Map Hash (BufferEntry a)) -> Hash -> m () + removeBuffer _tv h | debug && trace ("removeBuffer " ++ show h) False = undefined + removeBuffer tv h = do + Monad.when debug $ readTVarIO tv >>= \tv -> traceM $ "before delete: " ++ show tv + atomically $ modifyTVar tv (Map.delete h) + Monad.when debug $ readTVarIO tv >>= \tv -> traceM $ "after delete: " ++ show tv - removeBuffer :: MonadIO m => TVar (Map Hash (BufferEntry a)) -> Hash -> m () - removeBuffer tv h = atomically $ modifyTVar tv (Map.delete h) addBufferDependent :: (MonadIO m, Show a) => Hash -> TVar (Map Hash (BufferEntry a)) -> Hash -> m () addBufferDependent dependent tv dependency = withBuffer tv dependency \be -> do @@ -298,6 +330,7 @@ sqliteCodebase root = do (Hash -> m ()) -> Hash -> m () + tryFlushBuffer _ _ _ h | debug && trace ("tryFlushBuffer " ++ show h) False = undefined tryFlushBuffer buf saveComponent tryWaiting h@(Cv.hash1to2 -> h2) = -- skip if it has already been flushed unlessM (Ops.objectExistsForHash h2) $ withBuffer buf h try @@ -308,10 +341,15 @@ sqliteCodebase root = do filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) (toList missing) + Monad.when debug do + traceM $ "tryFlushBuffer.missing' = " ++ show missing' + traceM $ "tryFlushBuffer.size = " ++ show size + traceM $ "tryFlushBuffer.length comp = " ++ show (length comp) if null missing' && size == fromIntegral (length comp) then do saveComponent h2 (toList comp) removeBuffer buf h + Monad.when debug $ traceM $ "tryFlushBuffer.notify waiting " ++ show waiting traverse_ tryWaiting waiting else -- update putBuffer buf h $ @@ -320,6 +358,7 @@ sqliteCodebase root = do pure () tryFlushTermBuffer :: EDB m => Hash -> m () + tryFlushTermBuffer h | debug && trace ("tryFlushTermBuffer " ++ show h) False = undefined tryFlushTermBuffer h = tryFlushBuffer termBuffer @@ -331,6 +370,7 @@ sqliteCodebase root = do h tryFlushDeclBuffer :: EDB m => Hash -> m () + tryFlushDeclBuffer h | debug && trace ("tryFlushDeclBuffer " ++ show h) False = undefined tryFlushDeclBuffer h = tryFlushBuffer declBuffer diff --git a/parser-typechecker/src/Unison/FileParsers.hs b/parser-typechecker/src/Unison/FileParsers.hs index 6b36337a12..155d4d5dc9 100644 --- a/parser-typechecker/src/Unison/FileParsers.hs +++ b/parser-typechecker/src/Unison/FileParsers.hs @@ -44,6 +44,9 @@ type Type v = Type.Type v Ann type UnisonFile v = UF.UnisonFile v Ann type Result' v = Result (Seq (Note v Ann)) +debug :: Bool +debug = False + convertNotes :: Ord v => Typechecker.Notes v ann -> Seq (Note v ann) convertNotes (Typechecker.Notes bugs es is) = (CompilerBug . TypecheckerBug <$> bugs) <> (TypeError <$> es) <> (TypeInfo <$> Seq.fromList is') where @@ -66,6 +69,7 @@ parseAndSynthesizeFile m (Either Names0 (UF.TypecheckedUnisonFile v Ann)) parseAndSynthesizeFile ambient typeLookupf env filePath src = do + when debug $ traceM "parseAndSynthesizeFile" uf <- Result.fromParsing $ Parsers.parseFile filePath (unpack src) env let names0 = Names.currentNames (Parser.names env) (tm, tdnrMap, typeLookup) <- resolveNames typeLookupf names0 uf From adf80d4d007b6df134a014f984063b611394512b Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 2 Feb 2021 18:03:32 -0500 Subject: [PATCH 094/225] start adding dependencies to index --- .../U/Codebase/Sqlite/Operations.hs | 34 +- codebase2/codebase/U/Codebase/Decl.hs | 10 +- codebase2/codebase/U/Codebase/Term.hs | 299 +++++++++++------- codebase2/codebase/U/Codebase/Type.hs | 65 ++-- codebase2/codebase/unison-codebase.cabal | 1 + codebase2/util/U/Util/Set.hs | 4 + 6 files changed, 258 insertions(+), 155 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index dcc3675010..24ed171535 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -14,8 +14,8 @@ module U.Codebase.Sqlite.Operations where import Control.Lens (Lens') import qualified Control.Lens as Lens -import Control.Monad (MonadPlus (mzero), join, (<=<)) import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) +import Control.Monad (MonadPlus (mzero), join, when, (<=<)) import Control.Monad.State (MonadState, StateT, evalStateT) import Control.Monad.Trans (MonadTrans (lift)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) @@ -113,7 +113,7 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set -import Control.Monad (when) +import Data.Foldable (traverse_) -- * Error handling @@ -427,7 +427,13 @@ saveTermComponent h terms = do hashId <- Q.saveHashHash h let li = S.Term.LocallyIndexedComponent $ Vector.fromList sTermElements bytes = S.putBytes S.putTermFormat $ S.Term.Term li - Q.saveObject hashId OT.TermComponent bytes + oId <- Q.saveObject hashId OT.TermComponent bytes + -- populate dependents index + error "todo: populate dependents index" + -- populate type indexes + error "todo: populate type index" + error "todo: populate type-mentions index" + pure oId -- | implementation detail of c2{s,w}Term -- The Type is optional, because we don't store them for watch expression results. @@ -688,7 +694,23 @@ saveDeclComponent h decls = do hashId <- Q.saveHashHash h let li = S.Decl.LocallyIndexedComponent $ Vector.fromList sDeclElements bytes = S.putBytes S.putDeclFormat $ S.Decl.Decl li - Q.saveObject hashId OT.DeclComponent bytes + oId <- Q.saveObject hashId OT.DeclComponent bytes + -- populate dependents index + let dependencies :: Set (S.Reference.Reference, S.Reference.Id) = foldMap unlocalizeRefs (sDeclElements `zip` [0..]) + unlocalizeRefs :: ((LocalIds' Db.TextId Db.ObjectId, S.Decl.Decl Symbol), Word64) -> Set (S.Reference.Reference, S.Reference.Id) + unlocalizeRefs ((LocalIds tIds oIds, decl), i) = + let self = C.Reference.Id oId i + dependencies :: Set (C.Reference.Reference' LocalTextId (Maybe LocalDefnId)) = C.Decl.dependencies decl + getSRef :: C.Reference.Reference' LocalTextId (Maybe LocalDefnId) -> Maybe S.Reference.Reference + getSRef (C.ReferenceBuiltin t) = Just (C.ReferenceBuiltin (tIds Vector.! fromIntegral t)) + getSRef (C.Reference.Derived (Just h) i) = Just (C.Reference.Derived (oIds Vector.! fromIntegral h) i) + getSRef _selfCycleRef@(C.Reference.Derived Nothing _) = Nothing + in Set.mapMaybe (fmap (,self) . getSRef) dependencies + traverse_ (uncurry Q.addToDependentsIndex) dependencies + -- populate type indexes + error "todo: populate type index for constructors" + error "todo: populate type-mentions index for constructors" + pure oId c2sDecl :: forall m t d. EDB m => (Text -> m t) -> (H.Hash -> m d) -> C.Decl Symbol -> m (LocalIds' t d, S.Decl.Decl Symbol) c2sDecl saveText saveDefn (C.Decl.DataDeclaration dt m b cts) = do @@ -735,8 +757,8 @@ loadDeclByReference (C.Reference.Id h i) = do hashes <- traverse loadHashByObjectId $ LocalIds.defnLookup localIds -- substitute the text and hashes back into the term - let substText (LocalTextId w) = texts Vector.! fromIntegral w - substHash (LocalDefnId w) = hashes Vector.! fromIntegral w + let substText tIdx = texts Vector.! fromIntegral tIdx + substHash hIdx = hashes Vector.! fromIntegral hIdx substTypeRef :: S.Decl.TypeRef -> C.Decl.TypeRef substTypeRef = bimap substText (fmap substHash) pure (C.Decl.DataDeclaration dt m b (C.Type.rmap substTypeRef <$> ct)) -- lens might be nice here diff --git a/codebase2/codebase/U/Codebase/Decl.hs b/codebase2/codebase/U/Codebase/Decl.hs index b45c35e0b3..cce985a8a1 100644 --- a/codebase2/codebase/U/Codebase/Decl.hs +++ b/codebase2/codebase/U/Codebase/Decl.hs @@ -1,8 +1,10 @@ {-# LANGUAGE DeriveFoldable #-} {-# LANGUAGE DeriveFunctor #-} +{-# LANGUAGE RecordWildCards #-} module U.Codebase.Decl where +import Data.Set (Set) import Data.Text (Text) import Data.Word (Word64) import U.Codebase.Reference (Reference') @@ -21,6 +23,7 @@ data DeclType = Data | Effect type Decl v = DeclR TypeRef v type TypeRef = Reference' Text (Maybe Hash) + type Type v = TypeR TypeRef v data Modifier = Structural | Unique Text @@ -32,12 +35,13 @@ data DeclR r v = DataDeclaration bound :: [v], constructorTypes :: [TypeR r v] } - deriving Show --- instance Hashable ConstructorType where --- tokens b = [Tag . fromIntegral $ fromEnum b] + deriving (Show) -- * Hashing stuff +dependencies :: (Ord r, Ord v) => DeclR r v -> Set r +dependencies (DataDeclaration _ _ _ cts) = foldMap Type.dependencies cts + data V v = Bound v | Ctor Int -- toABT :: Ord v => Decl v -> ABT.Term F (V v) () diff --git a/codebase2/codebase/U/Codebase/Term.hs b/codebase2/codebase/U/Codebase/Term.hs index 9b6239f2c2..8461b340fa 100644 --- a/codebase2/codebase/U/Codebase/Term.hs +++ b/codebase2/codebase/U/Codebase/Term.hs @@ -1,43 +1,51 @@ -{-# LANGUAGE TypeApplications #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE BlockArguments #-} {-# LANGUAGE DeriveFoldable #-} {-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE LambdaCase #-} {-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} {-# LANGUAGE UnicodeSyntax #-} {-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE ScopedTypeVariables #-} - module U.Codebase.Term where +import qualified Control.Monad.Writer as Writer +import qualified Data.Foldable as Foldable import Data.Int (Int64) import Data.Sequence (Seq) +import Data.Set (Set) +import qualified Data.Set as Set import Data.Text (Text) import Data.Word (Word64) import GHC.Generics (Generic, Generic1) -import U.Codebase.Reference (Reference, Reference'(ReferenceBuiltin, ReferenceDerived)) +import U.Codebase.Reference (Reference, Reference' (ReferenceBuiltin, ReferenceDerived)) import qualified U.Codebase.Reference as Reference import U.Codebase.Referent (Referent') import U.Codebase.Type (TypeR) -import U.Util.Hash (Hash) -import qualified U.Core.ABT as ABT -import qualified U.Util.Hashable as H import qualified U.Codebase.Type as Type +import qualified U.Core.ABT as ABT +import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash -import qualified Data.Foldable as Foldable -  +import qualified U.Util.Hashable as H + type ConstructorId = Word64 type Term v = ABT.Term (F v) v () + type Type v = TypeR TypeRef v + type TermRef = Reference' Text (Maybe Hash) + type TypeRef = Reference + type TermLink = Referent' (Reference' Text (Maybe Hash)) (Reference' Text Hash) + type TypeLink = Reference -- | Base functor for terms in the Unison codebase @@ -119,60 +127,109 @@ data SeqOp | PConcat deriving (Eq, Show) -extraMap :: forall text termRef typeRef termLink typeLink vt - text' termRef' typeRef' termLink' typeLink' vt' v a - . (Ord v, Ord vt') - => (text -> text') -> (termRef -> termRef') -> (typeRef -> typeRef') - -> (termLink -> termLink') -> (typeLink -> typeLink') -> (vt -> vt') - -> ABT.Term (F' text termRef typeRef termLink typeLink vt) v a - -> ABT.Term (F' text' termRef' typeRef' termLink' typeLink' vt') v a -extraMap ftext ftermRef ftypeRef ftermLink ftypeLink fvt = go' where - go' = ABT.transform go - go :: forall x. F' text termRef typeRef termLink typeLink vt x -> F' text' termRef' typeRef' termLink' typeLink' vt' x - go = \case - Int i -> Int i - Nat n -> Nat n - Float d -> Float d - Boolean b -> Boolean b - Text t -> Text (ftext t) - Char c -> Char c - Ref r -> Ref (ftermRef r) - Constructor r cid -> Constructor (ftypeRef r) cid - Request r cid -> Request (ftypeRef r) cid - Handle e h -> Handle e h - App f a -> App f a - Ann a typ -> Ann a (Type.rmap ftypeRef $ ABT.vmap fvt typ) - Sequence s -> Sequence s - If c t f -> If c t f - And p q -> And p q - Or p q -> Or p q - Lam b -> Lam b - LetRec bs b -> LetRec bs b - Let a b -> Let a b - Match s cs -> Match s (goCase <$> cs) - TermLink r -> TermLink (ftermLink r) - TypeLink r -> TypeLink (ftypeLink r) - goCase :: MatchCase text typeRef x -> MatchCase text' typeRef' x - goCase (MatchCase p g b) = MatchCase (goPat p) g b - goPat = rmapPattern ftext ftypeRef +extraMap :: + forall + text + termRef + typeRef + termLink + typeLink + vt + text' + termRef' + typeRef' + termLink' + typeLink' + vt' + v + a. + (Ord v, Ord vt') => + (text -> text') -> + (termRef -> termRef') -> + (typeRef -> typeRef') -> + (termLink -> termLink') -> + (typeLink -> typeLink') -> + (vt -> vt') -> + ABT.Term (F' text termRef typeRef termLink typeLink vt) v a -> + ABT.Term (F' text' termRef' typeRef' termLink' typeLink' vt') v a +extraMap ftext ftermRef ftypeRef ftermLink ftypeLink fvt = go' + where + go' = ABT.transform go + go :: forall x. F' text termRef typeRef termLink typeLink vt x -> F' text' termRef' typeRef' termLink' typeLink' vt' x + go = \case + Int i -> Int i + Nat n -> Nat n + Float d -> Float d + Boolean b -> Boolean b + Text t -> Text (ftext t) + Char c -> Char c + Ref r -> Ref (ftermRef r) + Constructor r cid -> Constructor (ftypeRef r) cid + Request r cid -> Request (ftypeRef r) cid + Handle e h -> Handle e h + App f a -> App f a + Ann a typ -> Ann a (Type.rmap ftypeRef $ ABT.vmap fvt typ) + Sequence s -> Sequence s + If c t f -> If c t f + And p q -> And p q + Or p q -> Or p q + Lam b -> Lam b + LetRec bs b -> LetRec bs b + Let a b -> Let a b + Match s cs -> Match s (goCase <$> cs) + TermLink r -> TermLink (ftermLink r) + TypeLink r -> TypeLink (ftypeLink r) + goCase :: MatchCase text typeRef x -> MatchCase text' typeRef' x + goCase (MatchCase p g b) = MatchCase (goPat p) g b + goPat = rmapPattern ftext ftypeRef rmapPattern :: (t -> t') -> (r -> r') -> Pattern t r -> Pattern t' r' -rmapPattern ft fr = go where - go = \case - PUnbound -> PUnbound - PVar -> PVar - PBoolean b -> PBoolean b - PInt i -> PInt i - PNat n -> PNat n - PFloat d -> PFloat d - PText t -> PText (ft t) - PChar c -> PChar c - PConstructor r i ps -> PConstructor (fr r) i (go <$> ps) - PAs p -> PAs (go p) - PEffectPure p -> PEffectPure (go p) - PEffectBind r i ps p -> PEffectBind (fr r) i (go <$> ps) (go p) - PSequenceLiteral ps -> PSequenceLiteral (go <$> ps) - PSequenceOp p1 op p2 -> PSequenceOp (go p1) op (go p2) +rmapPattern ft fr = go + where + go = \case + PUnbound -> PUnbound + PVar -> PVar + PBoolean b -> PBoolean b + PInt i -> PInt i + PNat n -> PNat n + PFloat d -> PFloat d + PText t -> PText (ft t) + PChar c -> PChar c + PConstructor r i ps -> PConstructor (fr r) i (go <$> ps) + PAs p -> PAs (go p) + PEffectPure p -> PEffectPure (go p) + PEffectBind r i ps p -> PEffectBind (fr r) i (go <$> ps) (go p) + PSequenceLiteral ps -> PSequenceLiteral (go <$> ps) + PSequenceOp p1 op p2 -> PSequenceOp (go p1) op (go p2) + +dependencies :: + (Ord termRef, Ord typeRef, Ord termLink, Ord typeLink, Ord v) => + ABT.Term (F' text termRef typeRef termLink typeLink vt) v a -> + (Set termRef, Set typeRef, Set termLink, Set typeLink) +dependencies = + Writer.execWriter . ABT.visit_ \case + Ref r -> termRef r + Constructor r _ -> typeRef r + Request r _ -> typeRef r + Match _ cases -> Foldable.for_ cases \case + MatchCase pat _guard _body -> go pat + where + go = \case + PConstructor r _i args -> typeRef r *> Foldable.traverse_ go args + PAs pat -> go pat + PEffectPure pat -> go pat + PEffectBind r _i args k -> typeRef r *> Foldable.traverse_ go args *> go k + PSequenceLiteral pats -> Foldable.traverse_ go pats + PSequenceOp l _op r -> go l *> go r + _ -> pure () + TermLink r -> termLink r + TypeLink r -> typeLink r + _ -> pure () + where + termRef r = Writer.tell (Set.singleton r, mempty, mempty, mempty) + typeRef r = Writer.tell (mempty, Set.singleton r, mempty, mempty) + termLink r = Writer.tell (mempty, mempty, Set.singleton r, mempty) + typeLink r = Writer.tell (mempty, mempty, mempty, Set.singleton r) -- * Instances @@ -200,65 +257,69 @@ instance H.Hashable (Pattern Text Reference) where tokens (PChar c) = H.Tag 13 : H.tokens c instance (Eq v, Show v) => H.Hashable1 (F v) where - hash1 hashCycle hash e - = let (tag, hashed, varint) = - (H.Tag, H.Hashed, H.Nat . fromIntegral) - in - case e of - -- So long as `Reference.Derived` ctors are created using the same - -- hashing function as is used here, this case ensures that references - -- are 'transparent' wrt hash and hashing is unaffected by whether - -- expressions are linked. So for example `x = 1 + 1` and `y = x` hash - -- the same. + hash1 hashCycle hash e = + let (tag, hashed, varint) = + (H.Tag, H.Hashed, H.Nat . fromIntegral) + in case e of + -- So long as `Reference.Derived` ctors are created using the same + -- hashing function as is used here, this case ensures that references + -- are 'transparent' wrt hash and hashing is unaffected by whether + -- expressions are linked. So for example `x = 1 + 1` and `y = x` hash + -- the same. Ref (Reference.Derived (Just h) 0) -> H.fromBytes (Hash.toBytes h) - Ref (Reference.Derived h i) -> H.accumulate - [ tag 1 -- it's a term - , tag 1 -- it's a derived reference - , H.accumulateToken (Hash.toBytes <$> h) - , H.Nat i - ] + Ref (Reference.Derived h i) -> + H.accumulate + [ tag 1, -- it's a term + tag 1, -- it's a derived reference + H.accumulateToken (Hash.toBytes <$> h), + H.Nat i + ] -- Note: start each layer with leading `1` byte, to avoid collisions -- with types, which start each layer with leading `0`. -- See `Hashable1 Type.F` _ -> - H.accumulate - $ tag 1 -- it's a term - : case e of - Nat n -> tag 64 : H.tokens n - Int i -> tag 65 : H.tokens i - Float d -> tag 66 : H.tokens d - Boolean b -> tag 67 : H.tokens b - Text t -> tag 68 : H.tokens t - Char c -> tag 69 : H.tokens c - Ref (ReferenceBuiltin name) -> [tag 2, H.accumulateToken name] - Ref ReferenceDerived {} -> - error "handled above, but GHC can't figure this out" - App a a2 -> [tag 3, hashed (hash a), hashed (hash a2)] - Ann a t -> [tag 4, hashed (hash a), hashed (ABT.hash t)] - Sequence as -> tag 5 : varint (fromIntegral (length as)) : map + H.accumulate $ + tag 1 : -- it's a term + case e of + Nat n -> tag 64 : H.tokens n + Int i -> tag 65 : H.tokens i + Float d -> tag 66 : H.tokens d + Boolean b -> tag 67 : H.tokens b + Text t -> tag 68 : H.tokens t + Char c -> tag 69 : H.tokens c + Ref (ReferenceBuiltin name) -> [tag 2, H.accumulateToken name] + Ref ReferenceDerived {} -> + error "handled above, but GHC can't figure this out" + App a a2 -> [tag 3, hashed (hash a), hashed (hash a2)] + Ann a t -> [tag 4, hashed (hash a), hashed (ABT.hash t)] + Sequence as -> + tag 5 : + varint (fromIntegral (length as)) : + map (hashed . hash) (Foldable.toList as) - Lam a -> [tag 6, hashed (hash a)] - -- note: we use `hashCycle` to ensure result is independent of - -- let binding order - LetRec as a -> case hashCycle as of - (hs, hash) -> tag 7 : hashed (hash a) : map hashed hs - -- here, order is significant, so don't use hashCycle - Let b a -> [tag 8, hashed $ hash b, hashed $ hash a] - If b t f -> - [tag 9, hashed $ hash b, hashed $ hash t, hashed $ hash f] - Request r n -> [tag 10, H.accumulateToken r, varint n] - Constructor r n -> [tag 12, H.accumulateToken r, varint n] - Match e branches -> - tag 13 : hashed (hash e) : concatMap h branches - where - h (MatchCase pat guard branch) = concat - [ [H.accumulateToken pat] - , Foldable.toList @Maybe (hashed . hash <$> guard) - , [hashed (hash branch)] - ] - Handle h b -> [tag 15, hashed $ hash h, hashed $ hash b] - And x y -> [tag 16, hashed $ hash x, hashed $ hash y] - Or x y -> [tag 17, hashed $ hash x, hashed $ hash y] - TermLink r -> [tag 18, H.accumulateToken r] - TypeLink r -> [tag 19, H.accumulateToken r] + Lam a -> [tag 6, hashed (hash a)] + -- note: we use `hashCycle` to ensure result is independent of + -- let binding order + LetRec as a -> case hashCycle as of + (hs, hash) -> tag 7 : hashed (hash a) : map hashed hs + -- here, order is significant, so don't use hashCycle + Let b a -> [tag 8, hashed $ hash b, hashed $ hash a] + If b t f -> + [tag 9, hashed $ hash b, hashed $ hash t, hashed $ hash f] + Request r n -> [tag 10, H.accumulateToken r, varint n] + Constructor r n -> [tag 12, H.accumulateToken r, varint n] + Match e branches -> + tag 13 : hashed (hash e) : concatMap h branches + where + h (MatchCase pat guard branch) = + concat + [ [H.accumulateToken pat], + Foldable.toList @Maybe (hashed . hash <$> guard), + [hashed (hash branch)] + ] + Handle h b -> [tag 15, hashed $ hash h, hashed $ hash b] + And x y -> [tag 16, hashed $ hash x, hashed $ hash y] + Or x y -> [tag 17, hashed $ hash x, hashed $ hash y] + TermLink r -> [tag 18, H.accumulateToken r] + TypeLink r -> [tag 19, H.accumulateToken r] diff --git a/codebase2/codebase/U/Codebase/Type.hs b/codebase2/codebase/U/Codebase/Type.hs index 86aa35752a..98b67e4bd8 100644 --- a/codebase2/codebase/U/Codebase/Type.hs +++ b/codebase2/codebase/U/Codebase/Type.hs @@ -1,24 +1,28 @@ +{-# LANGUAGE BlockArguments #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveFoldable #-} {-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE LambdaCase #-} {-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE Rank2Types #-} {-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE BlockArguments #-} module U.Codebase.Type where -import qualified U.Core.ABT as ABT -import U.Codebase.Reference (Reference, Reference') +import qualified Control.Monad.Writer.Strict as Writer +import Data.Set (Set) +import qualified Data.Set as Set import Data.Text (Text) -import U.Util.Hash (Hash) import U.Codebase.Kind (Kind) -import Unsafe.Coerce (unsafeCoerce) +import U.Codebase.Reference (Reference, Reference') +import qualified U.Core.ABT as ABT +import U.Util.Hash (Hash) import U.Util.Hashable (Hashable, Hashable1) import qualified U.Util.Hashable as Hashable +import Unsafe.Coerce (unsafeCoerce) +import Data.Functor (($>)) -- | For standalone types, like those in Term.Ann type FT = F' Reference @@ -35,8 +39,8 @@ data F' r a | Effects [a] | Forall a | IntroOuter a -- binder like ∀, used to introduce variables that are - -- bound by outer type signatures, to support scoped type - -- variables + -- bound by outer type signatures, to support scoped type + -- variables deriving (Foldable, Functor, Eq, Ord, Show, Traversable) -- | Non-recursive type @@ -57,24 +61,31 @@ rtraverse g = ABT.transformM \case Ref r -> Ref <$> g r x -> pure $ unsafeCoerce x +dependencies :: (Ord v, Ord r) => ABT.Term (F' r) v a -> Set r +dependencies = Writer.execWriter . ABT.visit' f + where + f :: Ord r => F' r a -> Writer.Writer (Set r) (F' r a) + f t@(Ref r) = Writer.tell (Set.singleton r) $> t + f t = pure t + instance Hashable r => Hashable1 (F' r) where hash1 hashCycle hash e = - let - (tag, hashed) = (Hashable.Tag, Hashable.Hashed) - -- Note: start each layer with leading `0` byte, to avoid collisions with - -- terms, which start each layer with leading `1`. See `Hashable1 Term.F` - in Hashable.accumulate $ tag 0 : case e of - Ref r -> [tag 0, Hashable.accumulateToken r] - Arrow a b -> [tag 1, hashed (hash a), hashed (hash b) ] - App a b -> [tag 2, hashed (hash a), hashed (hash b) ] - Ann a k -> [tag 3, hashed (hash a), Hashable.accumulateToken k ] - -- Example: - -- a) {Remote, Abort} (() -> {Remote} ()) should hash the same as - -- b) {Abort, Remote} (() -> {Remote} ()) but should hash differently from - -- c) {Remote, Abort} (() -> {Abort} ()) - Effects es -> let - (hs, _) = hashCycle es - in tag 4 : map hashed hs - Effect e t -> [tag 5, hashed (hash e), hashed (hash t)] - Forall a -> [tag 6, hashed (hash a)] - IntroOuter a -> [tag 7, hashed (hash a)] + let (tag, hashed) = (Hashable.Tag, Hashable.Hashed) + in -- Note: start each layer with leading `0` byte, to avoid collisions with + -- terms, which start each layer with leading `1`. See `Hashable1 Term.F` + Hashable.accumulate $ + tag 0 : case e of + Ref r -> [tag 0, Hashable.accumulateToken r] + Arrow a b -> [tag 1, hashed (hash a), hashed (hash b)] + App a b -> [tag 2, hashed (hash a), hashed (hash b)] + Ann a k -> [tag 3, hashed (hash a), Hashable.accumulateToken k] + -- Example: + -- a) {Remote, Abort} (() -> {Remote} ()) should hash the same as + -- b) {Abort, Remote} (() -> {Remote} ()) but should hash differently from + -- c) {Remote, Abort} (() -> {Abort} ()) + Effects es -> + let (hs, _) = hashCycle es + in tag 4 : map hashed hs + Effect e t -> [tag 5, hashed (hash e), hashed (hash t)] + Forall a -> [tag 6, hashed (hash a)] + IntroOuter a -> [tag 7, hashed (hash a)] diff --git a/codebase2/codebase/unison-codebase.cabal b/codebase2/codebase/unison-codebase.cabal index bb4b99f953..45bb0355bf 100644 --- a/codebase2/codebase/unison-codebase.cabal +++ b/codebase2/codebase/unison-codebase.cabal @@ -36,6 +36,7 @@ library base, containers, lens, + mtl, text, unison-core, unison-util diff --git a/codebase2/util/U/Util/Set.hs b/codebase2/util/U/Util/Set.hs index cef2712246..788d2a2f9f 100644 --- a/codebase2/util/U/Util/Set.hs +++ b/codebase2/util/U/Util/Set.hs @@ -3,6 +3,10 @@ module U.Util.Set where import Data.Set (Set) import qualified Data.Set as Set import qualified Data.Traversable as T +import qualified Data.Maybe as Maybe traverse :: (Applicative f, Ord b) => (a -> f b) -> Set a -> f (Set b) traverse f = fmap Set.fromList . T.traverse f . Set.toList + +mapMaybe :: Ord b => (a -> Maybe b) -> Set a -> Set b +mapMaybe f = Set.fromList . Maybe.mapMaybe f . Set.toList \ No newline at end of file From fa100de7c93b0ef92662ffc24a9c60e7396caa5f Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 5 Feb 2021 12:09:56 -0500 Subject: [PATCH 095/225] move TermUtil/TypeUtil from Convert project to util-term --- .../util-term/U/Util/Term.hs | 20 +-- .../util-term/U/Util/Type.hs | 2 +- codebase2/util-term/unison-util-term.cabal | 29 ++++ hie.yaml | 138 ++++++++++++++++-- stack.yaml | 1 + 5 files changed, 164 insertions(+), 26 deletions(-) rename codebase-convert-1to2/lib/U/Codebase/Convert/TermUtil.hs => codebase2/util-term/U/Util/Term.hs (82%) rename codebase-convert-1to2/lib/U/Codebase/Convert/TypeUtil.hs => codebase2/util-term/U/Util/Type.hs (98%) create mode 100644 codebase2/util-term/unison-util-term.cabal diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/TermUtil.hs b/codebase2/util-term/U/Util/Term.hs similarity index 82% rename from codebase-convert-1to2/lib/U/Codebase/Convert/TermUtil.hs rename to codebase2/util-term/U/Util/Term.hs index 60d31295bf..f3a6491415 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/TermUtil.hs +++ b/codebase2/util-term/U/Util/Term.hs @@ -3,7 +3,7 @@ {-# LANGUAGE LambdaCase #-} {-# LANGUAGE ViewPatterns #-} -module U.Codebase.Convert.TermUtil where +module U.Util.Term where import qualified U.Core.ABT as ABT import qualified U.Codebase.Term as Term @@ -43,12 +43,12 @@ dependencies = execWriter . ABT.visit_ \case -fold :: Monad m => - (text -> m ()) -> - (termRef -> m ()) -> - (typeRef -> m ()) -> - (termLink -> m ()) -> - (typeLink -> m ()) -> - ABT.Term (Term.F' text termRef typeRef termLink typeLink vt) v a -> - m () -fold = undefined +-- fold :: Monad m => +-- (text -> m ()) -> +-- (termRef -> m ()) -> +-- (typeRef -> m ()) -> +-- (termLink -> m ()) -> +-- (typeLink -> m ()) -> +-- ABT.Term (Term.F' text termRef typeRef termLink typeLink vt) v a -> +-- m () +-- fold = error "todo: U.Util.TermUtil.fold" diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/TypeUtil.hs b/codebase2/util-term/U/Util/Type.hs similarity index 98% rename from codebase-convert-1to2/lib/U/Codebase/Convert/TypeUtil.hs rename to codebase2/util-term/U/Util/Type.hs index 90efafe24e..7b6da9ece6 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/TypeUtil.hs +++ b/codebase2/util-term/U/Util/Type.hs @@ -2,7 +2,7 @@ {-# LANGUAGE TupleSections #-} {-# LANGUAGE PatternSynonyms #-} -module U.Codebase.Convert.TypeUtil where +module U.Util.Type where import U.Codebase.Type (TypeT, F'(..), TypeR) import qualified U.Core.ABT.Var as ABT diff --git a/codebase2/util-term/unison-util-term.cabal b/codebase2/util-term/unison-util-term.cabal new file mode 100644 index 0000000000..c56407e009 --- /dev/null +++ b/codebase2/util-term/unison-util-term.cabal @@ -0,0 +1,29 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-util-term +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +library + exposed-modules: + U.Util.Term + U.Util.Type + -- other-modules: + -- other-extensions: + build-depends: + base, + containers, + mtl, + unison-core, + unison-codebase + hs-source-dirs: . + default-language: Haskell2010 diff --git a/hie.yaml b/hie.yaml index 76e1e6740a..acaffc769c 100644 --- a/hie.yaml +++ b/hie.yaml @@ -1,11 +1,5 @@ cradle: stack: - - path: "codebase-convert-1to2/app/." - component: "unison-codebase-convert-1to2:exe:uconvert12" - - - path: "codebase-convert-1to2/lib/." - component: "unison-codebase-convert-1to2:lib" - - path: "codebase1/codebase/." component: "unison-codebase1:lib" @@ -36,29 +30,143 @@ cradle: - path: "codebase2/util-serialization/." component: "unison-util-serialization:lib" - - path: "parser-typechecker/src/." + - path: "codebase2/util-term/." + component: "unison-util-term:lib" + + - path: "parser-typechecker/src" component: "unison-parser-typechecker:lib" - - path: "parser-typechecker/unison/." + - path: "parser-typechecker/unison/Main.hs" + component: "unison-parser-typechecker:exe:unison" + + - path: "parser-typechecker/unison/System/Path.hs" component: "unison-parser-typechecker:exe:unison" - - path: "parser-typechecker/prettyprintdemo/." + - path: "parser-typechecker/unison/Version.hs" + component: "unison-parser-typechecker:exe:unison" + + - path: "parser-typechecker/prettyprintdemo/Main.hs" component: "unison-parser-typechecker:exe:prettyprintdemo" - - path: "parser-typechecker/tests/." + - path: "parser-typechecker/tests/Suite.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/ABT.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/ANF.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Cache.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Codebase.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Codebase/Causal.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Codebase/FileCodebase.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Codebase/Path.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/ColorText.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Common.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/DataDeclaration.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/FileParser.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Git.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Lexer.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/IO.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/MCode.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Range.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Referent.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Term.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/TermParser.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/TermPrinter.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Type.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/TypePrinter.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Typechecker.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Typechecker/Components.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Typechecker/Context.hs" component: "unison-parser-typechecker:exe:tests" - - path: "parser-typechecker/transcripts/." + - path: "parser-typechecker/tests/Unison/Test/Typechecker/TypeError.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/UnisonSources.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/UriParser.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Util/Bytes.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Util/PinBoard.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Util/Pretty.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Var.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/VersionParser.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Core/Test/Name.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/transcripts/Transcripts.hs" component: "unison-parser-typechecker:exe:transcripts" - - path: "parser-typechecker/benchmarks/runtime/." + - path: "parser-typechecker/benchmarks/runtime/Main.hs" component: "unison-parser-typechecker:bench:runtime" - - path: "unison-core/src/." + - path: "unison-core/src" component: "unison-core1:lib" - - path: "yaks/easytest/src/." + - path: "yaks/easytest/src" component: "easytest:lib" - - path: "yaks/easytest/tests/." + - path: "yaks/easytest/tests/Suite.hs" + component: "easytest:exe:runtests" + + - path: "yaks/easytest/tests" component: "easytest:test:tests" diff --git a/stack.yaml b/stack.yaml index 7d0e8cba89..22a740a822 100644 --- a/stack.yaml +++ b/stack.yaml @@ -20,6 +20,7 @@ packages: - codebase2/syntax - codebase2/util - codebase2/util-serialization +- codebase2/util-term #compiler-check: match-exact resolver: nightly-2021-01-02 From c775fad2befdc2b28d50cc4f6698777e7626b3ca Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 8 Feb 2021 11:14:08 -0500 Subject: [PATCH 096/225] added dependents/type/type-mention indexes but still not working --- .../lib/U/Codebase/Convert/SyncV1V2.hs | 4 +- .../unison-codebase-convert-1to2.cabal | 3 +- .../U/Codebase/Sqlite/Operations.hs | 65 ++++++++++++++++--- .../codebase-sqlite/sql/create-index.sql | 12 ++-- .../unison-codebase-sqlite.cabal | 3 +- codebase2/codebase/U/Codebase/Type.hs | 5 ++ hie.yaml | 6 ++ 7 files changed, 78 insertions(+), 20 deletions(-) diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index be4d33c5e1..d72bcbee13 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -45,8 +45,8 @@ import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple.FromField (FromField) import Database.SQLite.Simple.ToField (ToField) -import qualified U.Codebase.Convert.TermUtil as TermUtil -import qualified U.Codebase.Convert.TypeUtil as TypeUtil +import qualified U.Util.Term as TermUtil +import qualified U.Util.Type as TypeUtil import qualified U.Codebase.Decl as V2.Decl import qualified U.Codebase.Kind as V2.Kind import qualified U.Codebase.Reference as V2.Reference diff --git a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal index 4fa77ee7a4..214941be7e 100644 --- a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal +++ b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal @@ -62,5 +62,6 @@ library unison-codebase, unison-codebase-sqlite, unison-util, - unison-util-serialization + unison-util-serialization, + unison-util-term default-language: Haskell2010 diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 24ed171535..5399bd160b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -21,18 +21,21 @@ import Control.Monad.Trans (MonadTrans (lift)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) import Control.Monad.Writer (MonadWriter, WriterT, runWriterT) import qualified Control.Monad.Writer as Writer +import Data.Bifoldable (bifoldMap) import Data.Bifunctor (Bifunctor (bimap)) import Data.Bitraversable (Bitraversable (bitraverse)) import Data.ByteString (ByteString) import Data.Bytes.Get (runGetS) import qualified Data.Bytes.Get as Get +import Data.Foldable (traverse_, for_) import qualified Data.Foldable as Foldable import Data.Functor (void, (<&>)) import Data.Functor.Identity (Identity) import Data.Map (Map) import qualified Data.Map as Map import qualified Data.Map.Merge.Lazy as Map -import Data.Maybe (isJust) +import Data.Maybe (catMaybes, isJust) +import Data.Monoid (First (First, getFirst)) import Data.Sequence (Seq) import qualified Data.Sequence as Seq import Data.Set (Set) @@ -113,7 +116,8 @@ import qualified U.Util.Monoid as Monoid import U.Util.Serialization (Get) import qualified U.Util.Serialization as S import qualified U.Util.Set as Set -import Data.Foldable (traverse_) +import qualified U.Util.Term as TermUtil +import qualified U.Util.Type as TypeUtil -- * Error handling @@ -429,10 +433,41 @@ saveTermComponent h terms = do bytes = S.putBytes S.putTermFormat $ S.Term.Term li oId <- Q.saveObject hashId OT.TermComponent bytes -- populate dependents index - error "todo: populate dependents index" + let dependencies :: Set (S.Reference.Reference, S.Reference.Id) = foldMap unlocalizeRefs (sTermElements `zip` [0 ..]) + unlocalizeRefs :: ((LocalIds, S.Term.Term, S.Term.Type), C.Reference.Pos) -> Set (S.Reference.Reference, S.Reference.Id) + unlocalizeRefs ((LocalIds tIds oIds, tm, tp), i) = + let self = C.Reference.Id oId i + dependencies :: Set S.Reference = + let (tmRefs, tpRefs, tmLinks, tpLinks) = TermUtil.dependencies tm + tpRefs' = Foldable.toList $ C.Type.dependencies tp + getTermSRef :: S.Term.TermRef -> Maybe S.Reference + getTermSRef (C.ReferenceBuiltin t) = Just (C.ReferenceBuiltin (tIds Vector.! fromIntegral t)) + getTermSRef (C.Reference.Derived (Just h) i) = Just (C.Reference.Derived (oIds Vector.! fromIntegral h) i) + getTermSRef _selfCycleRef@(C.Reference.Derived Nothing _) = Nothing + getTypeSRef :: S.Term.TypeRef -> S.Reference + getTypeSRef (C.ReferenceBuiltin t) = C.ReferenceBuiltin (tIds Vector.! fromIntegral t) + getTypeSRef (C.Reference.Derived h i) = C.Reference.Derived (oIds Vector.! fromIntegral h) i + getSTypeLink = getTypeSRef + getSTermLink :: S.Term.TermLink -> Maybe S.Reference + getSTermLink = getFirst . bifoldMap (First . getTermSRef) (First . Just . getTypeSRef) + in Set.fromList $ + catMaybes + (fmap getTermSRef tmRefs ++ fmap getSTermLink tmLinks) + ++ fmap getTypeSRef (tpRefs ++ tpRefs') + ++ fmap getSTypeLink tpLinks + in Set.map (, self) dependencies + traverse_ (uncurry Q.addToDependentsIndex) dependencies + -- populate type indexes - error "todo: populate type index" - error "todo: populate type-mentions index" + for_ (terms `zip` [0..]) \((_tm, tp), i) -> do + let self = C.Referent.RefId (C.Reference.Id oId i) + typeForIndexing = TypeUtil.removeAllEffectVars tp + typeMentionsForIndexing = TypeUtil.toReferenceMentions typeForIndexing + saveReferentH = bitraverse Q.saveText Q.saveHashHash + typeReferenceForIndexing <- saveReferentH $ TypeUtil.toReference typeForIndexing + Q.addToTypeIndex typeReferenceForIndexing self + traverse_ (flip Q.addToTypeMentionsIndex self <=< saveReferentH) typeMentionsForIndexing + pure oId -- | implementation detail of c2{s,w}Term @@ -696,20 +731,30 @@ saveDeclComponent h decls = do bytes = S.putBytes S.putDeclFormat $ S.Decl.Decl li oId <- Q.saveObject hashId OT.DeclComponent bytes -- populate dependents index - let dependencies :: Set (S.Reference.Reference, S.Reference.Id) = foldMap unlocalizeRefs (sDeclElements `zip` [0..]) - unlocalizeRefs :: ((LocalIds' Db.TextId Db.ObjectId, S.Decl.Decl Symbol), Word64) -> Set (S.Reference.Reference, S.Reference.Id) + let dependencies :: Set (S.Reference.Reference, S.Reference.Id) = foldMap unlocalizeRefs (sDeclElements `zip` [0 ..]) + unlocalizeRefs :: ((LocalIds, S.Decl.Decl Symbol), C.Reference.Pos) -> Set (S.Reference.Reference, S.Reference.Id) unlocalizeRefs ((LocalIds tIds oIds, decl), i) = let self = C.Reference.Id oId i - dependencies :: Set (C.Reference.Reference' LocalTextId (Maybe LocalDefnId)) = C.Decl.dependencies decl + dependencies :: Set S.Decl.TypeRef = C.Decl.dependencies decl getSRef :: C.Reference.Reference' LocalTextId (Maybe LocalDefnId) -> Maybe S.Reference.Reference getSRef (C.ReferenceBuiltin t) = Just (C.ReferenceBuiltin (tIds Vector.! fromIntegral t)) getSRef (C.Reference.Derived (Just h) i) = Just (C.Reference.Derived (oIds Vector.! fromIntegral h) i) getSRef _selfCycleRef@(C.Reference.Derived Nothing _) = Nothing in Set.mapMaybe (fmap (,self) . getSRef) dependencies traverse_ (uncurry Q.addToDependentsIndex) dependencies + -- populate type indexes - error "todo: populate type index for constructors" - error "todo: populate type-mentions index for constructors" + for_ (zip decls [0..]) + \(C.DataDeclaration _ _ _ ctorTypes, i) -> for_ (zip ctorTypes [0..]) + \(tp, j) -> do + let self = C.Referent.ConId (C.Reference.Id oId i) j + typeForIndexing :: C.Type.TypeT Symbol = TypeUtil.removeAllEffectVars (C.Type.typeD2T h tp) + typeReferenceForIndexing = TypeUtil.toReference typeForIndexing + typeMentionsForIndexing = TypeUtil.toReferenceMentions typeForIndexing + saveReferentH = bitraverse Q.saveText Q.saveHashHash + flip Q.addToTypeIndex self =<< saveReferentH typeReferenceForIndexing + traverse_ (flip Q.addToTypeMentionsIndex self <=< saveReferentH) typeMentionsForIndexing + pure oId c2sDecl :: forall m t d. EDB m => (Text -> m t) -> (H.Hash -> m d) -> C.Decl Symbol -> m (LocalIds' t d, S.Decl.Decl Symbol) diff --git a/codebase2/codebase-sqlite/sql/create-index.sql b/codebase2/codebase-sqlite/sql/create-index.sql index 921bc50775..985ecebc49 100644 --- a/codebase2/codebase-sqlite/sql/create-index.sql +++ b/codebase2/codebase-sqlite/sql/create-index.sql @@ -33,7 +33,7 @@ CREATE TABLE find_type_mentions_index ( type_reference_hash_id INTEGER NULL CONSTRAINT find_type_mentions_index_fk2 REFERENCES hash(id), type_reference_component_index INTEGER NULL, term_referent_object_id INTEGER NOT NULL CONSTRAINT find_type_mentions_index_fk3 REFERENCES hash(id), - term_referent_derived_component_index INTEGER NOT NULL, + term_referent_component_index INTEGER NOT NULL, term_referent_constructor_index INTEGER NULL, CONSTRAINT find_type_mentions_index_c1 CHECK ( (type_reference_builtin IS NULL) = @@ -41,7 +41,7 @@ CREATE TABLE find_type_mentions_index ( ), CONSTRAINT find_type_mentions_index_c2 CHECK ( (type_reference_hash_id IS NULL) = - (type_reference_component_index) IS NULL + (type_reference_component_index IS NULL) ) ); CREATE INDEX find_type_mentions_index_type ON find_type_mentions_index ( @@ -57,12 +57,12 @@ CREATE TABLE dependents_index ( dependent_object_id INTEGER NOT NULL CONSTRAINT dependents_index_fk3 REFERENCES hash(id), dependent_component_index INTEGER NOT NULL, CONSTRAINT dependents_index_c1 CHECK ( - dependency_builtin IS NULL = - dependency_object_id IS NOT NULL + (dependency_builtin IS NULL) = + (dependency_object_id IS NOT NULL) ), CONSTRAINT dependents_index_c2 CHECK ( - dependency_object_id IS NULL = - dependency_component_index IS NULL + (dependency_object_id IS NULL) = + (dependency_component_index IS NULL) ) ); CREATE INDEX dependents_by_dependency ON dependents_index ( diff --git a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal index 53fa9cdad7..033b7b655b 100644 --- a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal +++ b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal @@ -58,6 +58,7 @@ library unison-codebase, unison-core, unison-util, - unison-util-serialization + unison-util-serialization, + unison-util-term hs-source-dirs: . default-language: Haskell2010 diff --git a/codebase2/codebase/U/Codebase/Type.hs b/codebase2/codebase/U/Codebase/Type.hs index 98b67e4bd8..1a8a70f395 100644 --- a/codebase2/codebase/U/Codebase/Type.hs +++ b/codebase2/codebase/U/Codebase/Type.hs @@ -23,6 +23,8 @@ import U.Util.Hashable (Hashable, Hashable1) import qualified U.Util.Hashable as Hashable import Unsafe.Coerce (unsafeCoerce) import Data.Functor (($>)) +import Data.Bifunctor (Bifunctor(bimap)) +import qualified Data.Maybe as Maybe -- | For standalone types, like those in Term.Ann type FT = F' Reference @@ -61,6 +63,9 @@ rtraverse g = ABT.transformM \case Ref r -> Ref <$> g r x -> pure $ unsafeCoerce x +typeD2T :: Ord v => Hash -> TypeD v -> TypeT v +typeD2T h = rmap $ bimap id $ Maybe.fromMaybe h + dependencies :: (Ord v, Ord r) => ABT.Term (F' r) v a -> Set r dependencies = Writer.execWriter . ABT.visit' f where diff --git a/hie.yaml b/hie.yaml index acaffc769c..99a5b8290c 100644 --- a/hie.yaml +++ b/hie.yaml @@ -1,5 +1,11 @@ cradle: stack: + - path: "codebase-convert-1to2/app/Main.hs" + component: "unison-codebase-convert-1to2:exe:uconvert12" + + - path: "codebase-convert-1to2/lib" + component: "unison-codebase-convert-1to2:lib" + - path: "codebase1/codebase/." component: "unison-codebase1:lib" From 442881f538a498e637ecfde4ea13b24667a221b3 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 9 Feb 2021 22:01:41 -0500 Subject: [PATCH 097/225] IS/NULL + don't embed ReferenceH in watch results --- .../U/Codebase/Sqlite/Operations.hs | 11 +- .../U/Codebase/Sqlite/Queries.hs | 24 +- .../src/Unison/Builtin/Decls.hs | 443 ++++++++++-------- 3 files changed, 262 insertions(+), 216 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 5399bd160b..758b7fe868 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -76,8 +76,7 @@ import U.Codebase.Sqlite.LocalIds LocalIds' (..), LocalPatchObjectId (..), LocalTextId (..), - WatchLocalIds, - ) + ) import qualified U.Codebase.Sqlite.LocalIds as LocalIds import qualified U.Codebase.Sqlite.ObjectType as OT import qualified U.Codebase.Sqlite.Patch.Diff as S @@ -712,12 +711,12 @@ saveWatch w r t = do let bytes = S.putBytes (S.putPair S.putLocalIds S.putTerm) wterm Q.saveWatch w rs bytes -c2wTerm :: DB m => C.Term Symbol -> m (WatchLocalIds, S.Term.Term) -c2wTerm tm = c2xTerm Q.saveText Q.saveHashHash tm Nothing <&> \(w, tm, _) -> (w, tm) +c2wTerm :: EDB m => C.Term Symbol -> m (LocalIds, S.Term.Term) +c2wTerm tm = c2xTerm Q.saveText primaryHashToExistingObjectId tm Nothing <&> \(w, tm, _) -> (w, tm) -w2cTerm :: EDB m => WatchLocalIds -> S.Term.Term -> m (C.Term Symbol) +w2cTerm :: EDB m => LocalIds -> S.Term.Term -> m (C.Term Symbol) w2cTerm ids tm = do - (substText, substHash) <- localIdsToLookups loadTextById loadHashByHashId ids + (substText, substHash) <- localIdsToLookups loadTextById loadHashByObjectId ids pure $ x2cTerm substText substHash tm -- ** Saving & loading type decls diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 3f487967c2..dd501e3b5b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -85,14 +85,10 @@ type TypeHashReference = Reference' TextId HashId createSchema :: (DB m, MonadUnliftIO m) => m () createSchema = do - when debug $ traceM "--- CREATING SCHEMA ---" withImmediateTransaction . traverse_ (execute_ . fromString) $ List.splitOn ";" [hereFile|sql/create.sql|] <> List.splitOn ";" [hereFile|sql/create-index.sql|] -debug :: Bool -debug = False - setFlags :: DB m => m () setFlags = execute_ "PRAGMA foreign_keys = ON;" @@ -389,9 +385,9 @@ getReferentsByType r = query sql r where sql = [here| term_referent_component_index, term_referent_constructor_index FROM find_type_index - WHERE type_reference_builtin = ? - AND type_reference_hash_id = ? - AND type_reference_component_index = ? + WHERE type_reference_builtin IS ? + AND type_reference_hash_id IS ? + AND type_reference_component_index IS ? |] getTypeReferenceForReference :: EDB m => Reference.Id -> m (Reference' TextId HashId) @@ -428,9 +424,9 @@ getReferentsByTypeMention r = query sql r where sql = [here| term_referent_component_index, term_referent_constructor_index FROM find_type_mentions_index - WHERE type_reference_builtin = ? - AND type_reference_hash_id = ? - AND type_reference_component_index = ? + WHERE type_reference_builtin IS ? + AND type_reference_hash_id IS ? + AND type_reference_component_index IS ? |] addToDependentsIndex :: DB m => Reference.Reference -> Reference.Id -> m () @@ -450,16 +446,16 @@ getDependentsForDependency :: DB m => Reference.Reference -> m [Reference.Id] getDependentsForDependency dependency = query sql dependency where sql = [here| SELECT dependent_object_id, dependent_component_index FROM dependents_index - WHERE dependency_builtin = ? - AND dependency_object_id = ? - AND dependency_component_index = ? + WHERE dependency_builtin IS ? + AND dependency_object_id IS ? + AND dependency_component_index IS ? |] getDependencyIdsForDependent :: DB m => Reference.Id -> m [Reference.Id] getDependencyIdsForDependent dependent = query sql dependent where sql = [here| SELECT dependency_object_id, dependency_component_index FROM dependents_index - WHERE dependency_builtin = NULL + WHERE dependency_builtin IS NULL AND dependent_object_id = ? AND dependen_component_index = ? |] diff --git a/parser-typechecker/src/Unison/Builtin/Decls.hs b/parser-typechecker/src/Unison/Builtin/Decls.hs index 11ffbd457c..b6f619aa4f 100644 --- a/parser-typechecker/src/Unison/Builtin/Decls.hs +++ b/parser-typechecker/src/Unison/Builtin/Decls.hs @@ -1,38 +1,41 @@ +{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE OverloadedStrings #-} module Unison.Builtin.Decls where -import Data.List ( elemIndex, find ) -import qualified Data.Map as Map -import Data.Text (Text,unpack) +import Control.Monad (join) +import Data.List (elemIndex, find) +import qualified Data.Map as Map +import Data.Text (Text, unpack) +import Debug.Trace (trace) import qualified Unison.ABT as ABT -import qualified Unison.ConstructorType as CT +import qualified Unison.ConstructorType as CT +import Unison.DataDeclaration + ( DataDeclaration (..), + Modifier (Structural, Unique), + hashDecls, + ) import qualified Unison.DataDeclaration as DD -import Unison.DataDeclaration ( DataDeclaration(..) - , Modifier(Structural, Unique) - , hashDecls ) -import qualified Unison.Pattern as Pattern -import Unison.Reference (Reference) -import qualified Unison.Reference as Reference -import Unison.Referent (Referent) -import qualified Unison.Referent as Referent -import Unison.Symbol (Symbol) -import Unison.Term (ConstructorId, Term, Term2) -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import Unison.Type (Type) -import qualified Unison.Var as Var -import Unison.Var (Var) +import qualified Unison.Pattern as Pattern +import Unison.Reference (Reference) +import qualified Unison.Reference as Reference +import Unison.Referent (Referent) +import qualified Unison.Referent as Referent +import Unison.Symbol (Symbol) +import Unison.Term (ConstructorId, Term, Term2) +import qualified Unison.Term as Term +import Unison.Type (Type) +import qualified Unison.Type as Type +import Unison.Var (Var) +import qualified Unison.Var as Var lookupDeclRef :: Text -> Reference lookupDeclRef str - | [(_, d, _)] <- filter (\(v,_,_) -> v == Var.named str) decls - = Reference.DerivedId d - | otherwise - = error $ "lookupDeclRef: missing \"" ++ unpack str ++ "\"" - where decls = builtinDataDecls @Symbol + | [(_, d, _)] <- filter (\(v, _, _) -> v == Var.named str) decls = Reference.DerivedId d + | otherwise = error $ "lookupDeclRef: missing \"" ++ unpack str ++ "\"" + where + decls = builtinDataDecls @Symbol unitRef, pairRef, optionalRef, eitherRef :: Reference unitRef = lookupDeclRef "Unit" @@ -40,10 +43,12 @@ pairRef = lookupDeclRef "Tuple" optionalRef = lookupDeclRef "Optional" eitherRef = lookupDeclRef "Either" -testResultRef, linkRef, docRef, ioErrorRef, stdHandleRef, failureRef :: Reference +testResultRef, linkRef, docRef, isPropagatedRef, isTestRef, ioErrorRef, stdHandleRef, failureRef :: Reference testResultRef = lookupDeclRef "Test.Result" linkRef = lookupDeclRef "Link" docRef = lookupDeclRef "Doc" +isPropagatedRef = lookupDeclRef "IsPropagated" +isTestRef = lookupDeclRef "IsTest" ioErrorRef = lookupDeclRef "io2.IOError" stdHandleRef = lookupDeclRef "io2.StdHandle" failureRef = lookupDeclRef "io2.Failure" @@ -64,7 +69,9 @@ constructorId ref name = do (_,_,dd) <- find (\(_,r,_) -> Reference.DerivedId r == ref) (builtinDataDecls @Symbol) elemIndex name $ DD.constructorNames dd -okConstructorId, failConstructorId, docBlobId, docLinkId, docSignatureId, docSourceId, docEvaluateId, docJoinId, linkTermId, linkTypeId :: ConstructorId +isPropagatedConstructorId, isTestConstructorId, okConstructorId, failConstructorId, docBlobId, docLinkId, docSignatureId, docSourceId, docEvaluateId, docJoinId, linkTermId, linkTypeId :: ConstructorId +Just isPropagatedConstructorId = constructorId isPropagatedRef "IsPropagated.IsPropagated" +Just isTestConstructorId = constructorId isTestRef "IsTest.IsTest" Just okConstructorId = constructorId testResultRef "Test.Result.Ok" Just failConstructorId = constructorId testResultRef "Test.Result.Fail" Just docBlobId = constructorId docRef "Doc.Blob" @@ -84,176 +91,221 @@ failConstructorReferent = Referent.Con testResultRef failConstructorId CT.Data -- | builtinTypes' and those types defined herein builtinDataDecls :: Var v => [(v, Reference.Id, DataDeclaration v ())] builtinDataDecls = rs1 ++ rs - where - rs1 = case hashDecls $ Map.fromList - [ (v "Link" , link) - ] of Right a -> a; Left e -> error $ "builtinDataDecls: " <> show e - rs = case hashDecls $ Map.fromList - [ (v "Unit" , unit) - , (v "Tuple" , tuple) - , (v "Optional" , opt) - , (v "Either" , eith) - , (v "Test.Result" , tr) - , (v "Doc" , doc) - , (v "io2.FileMode" , fmode) - , (v "io2.BufferMode" , bmode) - , (v "io2.SeekMode" , smode) - , (v "SeqView" , seqview) - , (v "io2.IOError" , ioerr) - , (v "io2.StdHandle" , stdhnd) - , (v "io2.Failure" , failure) - , (v "io2.TlsFailure" , tlsFailure) - ] of Right a -> a; Left e -> error $ "builtinDataDecls: " <> show e - [(_, linkRef, _)] = rs1 - v = Var.named - var name = Type.var () (v name) - arr = Type.arrow' - -- see note on `hashDecls` above for why ctor must be called `Unit.Unit`. - unit = DataDeclaration Structural () [] [((), v "Unit.Unit", var "Unit")] - tuple = DataDeclaration - Structural - () - [v "a", v "b"] - [ ( () - , v "Tuple.Cons" - , Type.foralls + where + rs1 = case hashDecls $ + Map.fromList + [ (v "Link", link) + ] of + Right a -> a + Left e -> error $ "builtinDataDecls: " <> show e + rs = case hashDecls $ + Map.fromList + [ (v "Unit", unit), + (v "Tuple", tuple), + (v "Optional", opt), + (v "Either", eith), + (v "Test.Result", tr), + (v "IsPropagated", isPropagated), + (v "IsTest", isTest), + (v "Doc", doc), + (v "io2.FileMode", fmode), + (v "io2.BufferMode", bmode), + (v "io2.SeekMode", smode), + (v "SeqView", seqview), + (v "io2.IOError", ioerr), + (v "io2.StdHandle", stdhnd), + (v "io2.Failure", failure), + (v "io2.TlsFailure", tlsFailure) + ] of + Right a -> a + Left e -> error $ "builtinDataDecls: " <> show e + [(_, linkRef, _)] = rs1 + v = Var.named + var name = Type.var () (v name) + arr = Type.arrow' + -- see note on `hashDecls` above for why ctor must be called `Unit.Unit`. + unit = DataDeclaration Structural () [] [((), v "Unit.Unit", var "Unit")] + tuple = + DataDeclaration + Structural + () + [v "a", v "b"] + [ ( (), + v "Tuple.Cons", + Type.foralls + () + [v "a", v "b"] + ( var "a" + `arr` (var "b" `arr` Type.apps' (var "Tuple") [var "a", var "b"]) + ) + ) + ] + opt = + DataDeclaration + Structural + () + [v "a"] + [ ( (), + v "Optional.None", + Type.foralls () [v "a"] (Type.app' (var "Optional") (var "a")) + ), + ( (), + v "Optional.Some", + Type.foralls + () + [v "a"] + (var "a" `arr` Type.app' (var "Optional") (var "a")) + ) + ] + eith = + DataDeclaration + Structural + () + [v "a", v "b"] + [ ( (), + v "Either.Left", + Type.foralls + () + [v "a", v "b"] + (var "a" `arr` Type.apps' (var "Either") [var "a", var "b"]) + ), + ( (), + v "Either.Right", + Type.foralls + () + [v "a", v "b"] + (var "b" `arr` Type.apps' (var "Either") [var "a", var "b"]) + ) + ] + isTest = + DataDeclaration + (Unique "e6dca08b40458b03ca1660cfbdaecaa7279b42d18257898b5fd1c34596aac36f") + () + [] + [((), v "IsTest.IsTest", var "IsTest")] + isPropagated = + DataDeclaration + (Unique "b28d929d0a73d2c18eac86341a3bb9399f8550c11b5f35eabb2751e6803ccc20") + () + [] + [((), v "IsPropagated.IsPropagated", var "IsPropagated")] + fmode = + DataDeclaration + (Unique "3c11ba4f0a5d8fedd427b476cdd2d7673197d11e") + () + [] + [ ((), v "io2.FileMode.Read", var "io2.FileMode"), + ((), v "io2.FileMode.Write", var "io2.FileMode"), + ((), v "io2.FileMode.Append", var "io2.FileMode"), + ((), v "io2.FileMode.ReadWrite", var "io2.FileMode") + ] + bmode = + DataDeclaration + (Unique "7dd9560d3826c21e5e6a7e08f575b61adcddf849") + () + [] + [ ((), v "io2.BufferMode.NoBuffering", var "io2.BufferMode"), + ((), v "io2.BufferMode.LineBuffering", var "io2.BufferMode"), + ((), v "io2.BufferMode.BlockBuffering", var "io2.BufferMode"), + ( (), + v "io2.BufferMode.SizedBlockBuffering", + Type.nat () `arr` var "io2.BufferMode" + ) + ] + smode = + DataDeclaration + (Unique "453a764f73cb4c7371d9af23b2d5ed646bf9e57c") + () + [] + [ ((), v "io2.SeekMode.AbsoluteSeek", var "io2.SeekMode"), + ((), v "io2.SeekMode.RelativeSeek", var "io2.SeekMode"), + ((), v "io2.SeekMode.SeekFromEnd", var "io2.SeekMode") + ] + ioerr = + DataDeclaration + (Unique "5915e25ac83205f7885395cc6c6c988bc5ec69a1") + () + [] + [ ((), v "io2.IOError.AlreadyExists", var "io2.IOError"), + ((), v "io2.IOError.NoSuchThing", var "io2.IOError"), + ((), v "io2.IOError.ResourceBusy", var "io2.IOError"), + ((), v "io2.IOError.ResourceExhausted", var "io2.IOError"), + ((), v "io2.IOError.EOF", var "io2.IOError"), + ((), v "io2.IOError.IllegalOperation", var "io2.IOError"), + ((), v "io2.IOError.PermissionDenied", var "io2.IOError"), + ((), v "io2.IOError.UserError", var "io2.IOError") + ] + failure = + DataDeclaration + (Unique "52ad89274a358b9c802792aa05915e25ac83205f7885395cc6c6c988bc5ec69a1") + () + [] + [ ((), v "io2.Failure.Failure", (Type.typeLink () `arr` (Type.text () `arr` var "io2.Failure"))) + ] + tlsFailure = + DataDeclaration + (Unique "df5ba835130b227ab83d02d1feff5402455a732d613b51dee32230d2f2d067c6") + () + [] + [] + stdhnd = + DataDeclaration + (Unique "67bf7a8e517cbb1e9f42bc078e35498212d3be3c") + () + [] + [ ((), v "io2.StdHandle.StdIn", var "io2.StdHandle"), + ((), v "io2.StdHandle.StdOut", var "io2.StdHandle"), + ((), v "io2.StdHandle.StdErr", var "io2.StdHandle") + ] + seqview = + DataDeclaration + Structural () [v "a", v "b"] - ( var "a" - `arr` (var "b" `arr` Type.apps' (var "Tuple") [var "a", var "b"]) - ) - ) - ] - opt = DataDeclaration - Structural - () - [v "a"] - [ ( () - , v "Optional.None" - , Type.foralls () [v "a"] (Type.app' (var "Optional") (var "a")) - ) - , ( () - , v "Optional.Some" - , Type.foralls () - [v "a"] - (var "a" `arr` Type.app' (var "Optional") (var "a")) - ) - ] - eith = DataDeclaration - Structural - () - [v "a", v "b"] - [ ( () - , v "Either.Left" - , Type.foralls () [v "a", v "b"] - (var "a" `arr` Type.apps' (var "Either") [var "a", var "b"]) - ) - , ( () - , v "Either.Right" - , Type.foralls () [v "a", v "b"] - (var "b" `arr` Type.apps' (var "Either") [var "a", var "b"]) - ) - ] - fmode = DataDeclaration - (Unique "3c11ba4f0a5d8fedd427b476cdd2d7673197d11e") - () - [] - [ ((), v "io2.FileMode.Read", var "io2.FileMode") - , ((), v "io2.FileMode.Write", var "io2.FileMode") - , ((), v "io2.FileMode.Append", var "io2.FileMode") - , ((), v "io2.FileMode.ReadWrite", var "io2.FileMode") - ] - bmode = DataDeclaration - (Unique "7dd9560d3826c21e5e6a7e08f575b61adcddf849") - () - [] - [ ((), v "io2.BufferMode.NoBuffering", var "io2.BufferMode") - , ((), v "io2.BufferMode.LineBuffering", var "io2.BufferMode") - , ((), v "io2.BufferMode.BlockBuffering", var "io2.BufferMode") - , ((), v "io2.BufferMode.SizedBlockBuffering" - , Type.nat () `arr` var "io2.BufferMode") - ] - smode = DataDeclaration - (Unique "453a764f73cb4c7371d9af23b2d5ed646bf9e57c") - () - [] - [ ((), v "io2.SeekMode.AbsoluteSeek", var "io2.SeekMode") - , ((), v "io2.SeekMode.RelativeSeek", var "io2.SeekMode") - , ((), v "io2.SeekMode.SeekFromEnd", var "io2.SeekMode") - ] - ioerr = DataDeclaration - (Unique "5915e25ac83205f7885395cc6c6c988bc5ec69a1") - () - [] - [ ((), v "io2.IOError.AlreadyExists", var "io2.IOError") - , ((), v "io2.IOError.NoSuchThing", var "io2.IOError") - , ((), v "io2.IOError.ResourceBusy", var "io2.IOError") - , ((), v "io2.IOError.ResourceExhausted", var "io2.IOError") - , ((), v "io2.IOError.EOF", var "io2.IOError") - , ((), v "io2.IOError.IllegalOperation", var "io2.IOError") - , ((), v "io2.IOError.PermissionDenied", var "io2.IOError") - , ((), v "io2.IOError.UserError", var "io2.IOError") - ] - failure = DataDeclaration - (Unique "52ad89274a358b9c802792aa05915e25ac83205f7885395cc6c6c988bc5ec69a1") - () - [] - [ ((), v "io2.Failure.Failure", (Type.typeLink () `arr` (Type.text () `arr` var "io2.Failure"))) - ] - tlsFailure = DataDeclaration - (Unique "df5ba835130b227ab83d02d1feff5402455a732d613b51dee32230d2f2d067c6") - () - [] - [] - stdhnd = DataDeclaration - (Unique "67bf7a8e517cbb1e9f42bc078e35498212d3be3c") - () - [] - [ ((), v "io2.StdHandle.StdIn", var "io2.StdHandle") - , ((), v "io2.StdHandle.StdOut", var "io2.StdHandle") - , ((), v "io2.StdHandle.StdErr", var "io2.StdHandle") - ] - seqview = DataDeclaration - Structural - () - [v "a", v "b"] - [ ( () - , v "SeqView.VEmpty" - , Type.foralls () [v "a", v "b"] - (Type.apps' (var "SeqView") [var "a", var "b"]) - ) - , ( () - , v "SeqView.VElem" - , let sv = Type.apps' (var "SeqView") [var "a", var "b"] - in Type.foralls () [v "a", v "b"] - (var "a" `arr` (var "b" `arr` sv)) - ) - ] - tr = DataDeclaration - (Unique "70621e539cd802b2ad53105697800930411a3ebc") - () - [] - [ ((), v "Test.Result.Fail", Type.text () `arr` var "Test.Result") - , ((), v "Test.Result.Ok" , Type.text () `arr` var "Test.Result") - ] - doc = DataDeclaration - (Unique "c63a75b845e4f7d01107d852e4c2485c51a50aaaa94fc61995e71bbee983a2ac3713831264adb47fb6bd1e058d5f004") - () - [] - [ ((), v "Doc.Blob", Type.text () `arr` var "Doc") - , ((), v "Doc.Link", Type.refId () linkRef `arr` var "Doc") - , ((), v "Doc.Signature", Type.termLink () `arr` var "Doc") - , ((), v "Doc.Source", Type.refId () linkRef `arr` var "Doc") - , ((), v "Doc.Evaluate", Type.termLink () `arr` var "Doc") - , ((), v "Doc.Join", Type.app () (Type.vector()) (var "Doc") `arr` var "Doc") - ] - link = DataDeclaration - (Unique "a5803524366ead2d7f3780871d48771e8142a3b48802f34a96120e230939c46bd5e182fcbe1fa64e9bff9bf741f3c04") - () - [] - [ ((), v "Link.Term", Type.termLink () `arr` var "Link") - , ((), v "Link.Type", Type.typeLink () `arr` var "Link") - ] + [ ( (), + v "SeqView.VEmpty", + Type.foralls + () + [v "a", v "b"] + (Type.apps' (var "SeqView") [var "a", var "b"]) + ), + ( (), + v "SeqView.VElem", + let sv = Type.apps' (var "SeqView") [var "a", var "b"] + in Type.foralls + () + [v "a", v "b"] + (var "a" `arr` (var "b" `arr` sv)) + ) + ] + tr = + DataDeclaration + (Unique "70621e539cd802b2ad53105697800930411a3ebc") + () + [] + [ ((), v "Test.Result.Fail", Type.text () `arr` var "Test.Result"), + ((), v "Test.Result.Ok", Type.text () `arr` var "Test.Result") + ] + doc = + DataDeclaration + (Unique "c63a75b845e4f7d01107d852e4c2485c51a50aaaa94fc61995e71bbee983a2ac3713831264adb47fb6bd1e058d5f004") + () + [] + [ ((), v "Doc.Blob", Type.text () `arr` var "Doc"), + ((), v "Doc.Link", Type.refId () linkRef `arr` var "Doc"), + ((), v "Doc.Signature", Type.termLink () `arr` var "Doc"), + ((), v "Doc.Source", Type.refId () linkRef `arr` var "Doc"), + ((), v "Doc.Evaluate", Type.termLink () `arr` var "Doc"), + ((), v "Doc.Join", Type.app () (Type.vector ()) (var "Doc") `arr` var "Doc") + ] + link = + DataDeclaration + (Unique "a5803524366ead2d7f3780871d48771e8142a3b48802f34a96120e230939c46bd5e182fcbe1fa64e9bff9bf741f3c04") + () + [] + [ ((), v "Link.Term", Type.termLink () `arr` var "Link"), + ((), v "Link.Type", Type.typeLink () `arr` var "Link") + ] builtinEffectDecls :: [(v, Reference.Id, DD.EffectDeclaration v ())] builtinEffectDecls = [] @@ -349,4 +401,3 @@ unUnitRef,unPairRef,unOptionalRef:: Reference -> Bool unUnitRef = (== unitRef) unPairRef = (== pairRef) unOptionalRef = (== optionalRef) - From 6d396e765269819707e42abe0152c001e83119a8 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 10 Feb 2021 21:22:53 -0500 Subject: [PATCH 098/225] add isPropagated/isTest during codebase init --- parser-typechecker/src/Unison/Builtin.hs | 4 +- .../src/Unison/Builtin/Terms.hs | 34 +++++++ parser-typechecker/src/Unison/Codebase.hs | 14 ++- .../src/Unison/Codebase/Editor/HandleInput.hs | 7 +- .../unison-parser-typechecker.cabal | 1 + unison-src/transcripts/isPropagated-exists.md | 40 +++++++++ .../transcripts/isPropagated-exists.output.md | 90 +++++++++++++++++++ 7 files changed, 178 insertions(+), 12 deletions(-) create mode 100644 parser-typechecker/src/Unison/Builtin/Terms.hs create mode 100644 unison-src/transcripts/isPropagated-exists.md create mode 100644 unison-src/transcripts/isPropagated-exists.output.md diff --git a/parser-typechecker/src/Unison/Builtin.hs b/parser-typechecker/src/Unison/Builtin.hs index ca22715d3b..9f26889f88 100644 --- a/parser-typechecker/src/Unison/Builtin.hs +++ b/parser-typechecker/src/Unison/Builtin.hs @@ -31,6 +31,7 @@ import qualified Text.Regex.TDFA as RE import qualified Unison.ConstructorType as CT import Unison.Codebase.CodeLookup ( CodeLookup(..) ) import qualified Unison.Builtin.Decls as DD +import qualified Unison.Builtin.Terms as TD import qualified Unison.DataDeclaration as DD import Unison.Parser ( Ann(..) ) import qualified Unison.Reference as R @@ -59,7 +60,8 @@ names0 = Names3.names0 terms types where Rel.fromList [ (Name.fromVar vc, Referent.Con (R.DerivedId r) cid ct) | (ct, (_,(r,decl))) <- ((CT.Data,) <$> builtinDataDecls @Symbol) <> ((CT.Effect,) . (second . second) DD.toDataDecl <$> builtinEffectDecls) - , ((_,vc,_), cid) <- DD.constructors' decl `zip` [0..]] + , ((_,vc,_), cid) <- DD.constructors' decl `zip` [0..]] <> + Rel.fromList [ (Name.fromVar v, Referent.Ref (R.DerivedId i)) | (v,i) <- Map.toList $ TD.builtinTermsRef @Symbol Intrinsic] types = Rel.fromList builtinTypes <> Rel.fromList [ (Name.fromVar v, R.DerivedId r) | (v,(r,_)) <- builtinDataDecls @Symbol ] <> diff --git a/parser-typechecker/src/Unison/Builtin/Terms.hs b/parser-typechecker/src/Unison/Builtin/Terms.hs new file mode 100644 index 0000000000..79d4b8b9b3 --- /dev/null +++ b/parser-typechecker/src/Unison/Builtin/Terms.hs @@ -0,0 +1,34 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE TypeApplications #-} + +module Unison.Builtin.Terms where + +import Data.Map (Map) +import qualified Data.Map as Map +import Data.Text (Text) +import qualified Unison.Builtin.Decls as Decls +import qualified Unison.Reference as Reference +import Unison.Term (Term) +import qualified Unison.Term as Term +import Unison.Type (Type) +import qualified Unison.Type as Type +import Unison.Var (Var) +import qualified Unison.Var as Var + +builtinTermsSrc :: Var v => a -> [(v, Term v a, Type v a)] +builtinTermsSrc a = + [ ( v "metadata.isPropagated", + Term.constructor a Decls.isPropagatedRef Decls.isPropagatedConstructorId, + Type.ref a Decls.isPropagatedRef + ), + ( v "metadata.isTest", + Term.constructor a Decls.isTestRef Decls.isTestConstructorId, + Type.ref a Decls.isTestRef + ) + ] + +v :: Var v => Text -> v +v = Var.named + +builtinTermsRef :: Var v => a -> Map v Reference.Id +builtinTermsRef a = fmap fst . Term.hashComponents . Map.fromList . fmap (\(v, tm, _tp) -> (v, tm)) $ builtinTermsSrc a diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs index 8bd1a3aafc..e00d851f7b 100644 --- a/parser-typechecker/src/Unison/Codebase.hs +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -11,13 +11,13 @@ import qualified Data.Map as Map import qualified Data.Set as Set import qualified Unison.ABT as ABT import qualified Unison.Builtin as Builtin +import qualified Unison.Builtin.Terms as Builtin import Unison.Codebase.Branch ( Branch ) import qualified Unison.Codebase.Branch as Branch import qualified Unison.Codebase.CodeLookup as CL import qualified Unison.Codebase.Reflog as Reflog import Unison.Codebase.SyncMode ( SyncMode ) import qualified Unison.DataDeclaration as DD -import qualified Unison.Names2 as Names import Unison.Reference ( Reference ) import qualified Unison.Reference as Reference import qualified Unison.Referent as Referent @@ -31,7 +31,6 @@ import qualified Unison.Util.Relation as Rel import qualified Unison.Util.Set as Set import qualified Unison.Var as Var import Unison.Var ( Var ) -import qualified Unison.Runtime.IOSource as IOSource import Unison.Symbol ( Symbol ) import Unison.DataDeclaration (Decl) import Unison.Term (Term) @@ -101,16 +100,13 @@ debug = False data SyncFileCodebaseResult = SyncOk | UnknownDestinationRootBranch Branch.Hash | NotFastForward -bootstrapNames :: Names.Names0 -bootstrapNames = - Builtin.names0 <> UF.typecheckedToNames0 IOSource.typecheckedFile - -- | Write all of the builtins types into the codebase and create empty namespace initializeCodebase :: forall m. Monad m => Codebase m Symbol Parser.Ann -> m () initializeCodebase c = do - let uf = (UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls) - (Map.fromList Builtin.builtinEffectDecls) - mempty mempty) + let uf = UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls) + (Map.fromList Builtin.builtinEffectDecls) + [Builtin.builtinTermsSrc Parser.Intrinsic] + mempty addDefsToCodebase c uf putRootBranch c (Branch.one Branch.empty0) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs b/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs index dac9b45eb8..d3cbe4f7a4 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs @@ -118,6 +118,7 @@ import Unison.LabeledDependency (LabeledDependency) import Unison.Term (Term) import Unison.Type (Type) import qualified Unison.Builtin as Builtin +import qualified Unison.Builtin.Terms as Builtin import Unison.NameSegment (NameSegment(..)) import qualified Unison.NameSegment as NameSegment import Unison.Codebase.ShortBranchHash (ShortBranchHash) @@ -1675,7 +1676,8 @@ loop = do -- added again. let uf = UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls) (Map.fromList Builtin.builtinEffectDecls) - mempty mempty + [Builtin.builtinTermsSrc Intrinsic] + mempty eval $ AddDefsToCodebase uf -- add the names; note, there are more names than definitions -- due to builtin terms; so we don't just reuse `uf` above. @@ -1689,7 +1691,8 @@ loop = do -- added again. let uf = UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls) (Map.fromList Builtin.builtinEffectDecls) - mempty mempty + [Builtin.builtinTermsSrc Intrinsic] + mempty eval $ AddDefsToCodebase uf -- these have not necessarily been added yet eval $ AddDefsToCodebase IOSource.typecheckedFile' diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 195b98eb01..e402db0898 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -55,6 +55,7 @@ library exposed-modules: Unison.Builtin Unison.Builtin.Decls + Unison.Builtin.Terms Unison.Codecs Unison.Codebase Unison.Codebase.Branch diff --git a/unison-src/transcripts/isPropagated-exists.md b/unison-src/transcripts/isPropagated-exists.md new file mode 100644 index 0000000000..3480aec185 --- /dev/null +++ b/unison-src/transcripts/isPropagated-exists.md @@ -0,0 +1,40 @@ +This transcript tests that UCM can always access the definition of +`IsPropagated`, which is used internally. + +```ucm:hide +.> alias.term ##Nat.+ + +.> alias.type ##Nat Nat +``` + +y depends on x +```unison +x = 3 +y = x + 1 +``` + +```ucm +.> add +``` + +```unison +x = 4 +``` + +The `update` of `x` causes a propagated update of `y`, and UCM links the +`isPropagated` metadata to such resulting terms: + +```ucm +.> update +.> links y +.> view 1 +``` + +Well, it's hard to tell from those hashes, but those are right. We can confirm +by running `builtins.merge` to have UCM add names for them. + +```ucm +.> builtins.merge +.> links y +.> view 1 +``` + diff --git a/unison-src/transcripts/isPropagated-exists.output.md b/unison-src/transcripts/isPropagated-exists.output.md new file mode 100644 index 0000000000..dcfc03edc0 --- /dev/null +++ b/unison-src/transcripts/isPropagated-exists.output.md @@ -0,0 +1,90 @@ +This transcript tests that UCM can always access the definition of +`IsPropagated`, which is used internally. + +y depends on x +```unison +x = 3 +y = x + 1 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + x : Nat + y : Nat + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + x : Nat + y : Nat + +``` +```unison +x = 4 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + x : Nat + +``` +The `update` of `x` causes a propagated update of `y`, and UCM links the +`isPropagated` metadata to such resulting terms: + +```ucm +.> update + + ⍟ I've updated these names to your new definition: + + x : Nat + +.> links y + + 1. #uqdd5t2fgn : #ffb7g9cull + + Tip: Try using `display 1` to display the first result or + `view 1` to view its source. + +.> view 1 + + #uqdd5t2fgn : #ffb7g9cull + #uqdd5t2fgn = #ffb7g9cull#0 + +``` +Well, it's hard to tell from those hashes, but those are right. We can confirm +by running `builtins.merge` to have UCM add names for them. + +```ucm +.> builtins.merge + + Done. + +.> links y + + 1. builtin.metadata.isPropagated : IsPropagated + + Tip: Try using `display 1` to display the first result or + `view 1` to view its source. + +.> view 1 + + builtin.metadata.isPropagated : IsPropagated + builtin.metadata.isPropagated = IsPropagated + +``` From d26893a87687fc56369574d4b8e03ca8359c20ec Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 12 Feb 2021 13:39:46 -0500 Subject: [PATCH 099/225] white space --- .../src/Unison/Codebase/SqliteCodebase.hs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 69012b7fda..fc7d8300eb 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -194,7 +194,7 @@ data BufferEntry a = BufferEntry deriving (Eq, Show) prettyBufferEntry :: Show a => Hash -> BufferEntry a -> String -prettyBufferEntry (h :: Hash) BufferEntry{..} = +prettyBufferEntry (h :: Hash) BufferEntry{..} = "BufferEntry " ++ show h ++ "\n" ++ " { beComponentTargetSize = " ++ show beComponentTargetSize ++ "\n" ++ " , beComponent = " @@ -204,7 +204,7 @@ prettyBufferEntry (h :: Hash) BufferEntry{..} = ++ " , beWaitingDependents =" ++ if Set.size beWaitingDependents < 2 then show $ Set.toList beWaitingDependents else mkString (Set.toList beWaitingDependents) (Just "\n [ ") " , " (Just "]\n") ++ " }" - where + where mkString :: (Foldable f, Show a) => f a -> Maybe String -> String -> Maybe String -> String mkString as start middle end = fromMaybe "" start ++ List.intercalate middle (show <$> toList as) ++ fromMaybe "" end @@ -279,7 +279,7 @@ sqliteCodebase root = do error $ "targetSize for term " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size' _ -> pure () let comp' = Map.insert i (tm, tp) comp - -- for the component element that's been passed in, add its dependencies to missing' + -- for the component element that's been passed in, add its dependencies to missing' missingTerms' <- filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) @@ -341,7 +341,7 @@ sqliteCodebase root = do filterM (fmap not . Ops.objectExistsForHash . Cv.hash1to2) (toList missing) - Monad.when debug do + Monad.when debug do traceM $ "tryFlushBuffer.missing' = " ++ show missing' traceM $ "tryFlushBuffer.size = " ++ show size traceM $ "tryFlushBuffer.length comp = " ++ show (length comp) @@ -427,7 +427,7 @@ sqliteCodebase root = do $ Branch.transform (lift . lift) branch1 rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) - rootBranchUpdates = pure (cleanup, newRootsDiscovered) + rootBranchUpdates = pure (cleanup, newRootsDiscovered) where newRootsDiscovered = do Control.Concurrent.threadDelay maxBound -- hold off on returning From 696755c1968adf5a150d560656985651da9e5dc2 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 12 Feb 2021 13:40:25 -0500 Subject: [PATCH 100/225] ignore unrecognized Watch Kinds --- parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index fc7d8300eb..ac8132df88 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -463,18 +463,22 @@ sqliteCodebase root = do >>= traverse (Cv.referenceid2to1 getCycleLen) -- getWatch :: UF.WatchKind -> Reference.Id -> IO (Maybe (Term Symbol Ann)) - getWatch k r@(Reference.Id h _i _n) = + getWatch k r@(Reference.Id h _i _n) | elem k standardWatchKinds = runDB' conn $ Ops.loadWatch (Cv.watchKind1to2 k) (Cv.referenceid1to2 r) >>= Cv.term2to1 h getCycleLen getDeclType + getWatch _unknownKind _ = pure Nothing + + standardWatchKinds = [UF.RegularWatch, UF.TestWatch] putWatch :: UF.WatchKind -> Reference.Id -> Term Symbol Ann -> IO () - putWatch k r@(Reference.Id h _i _n) tm = + putWatch k r@(Reference.Id h _i _n) tm | elem k standardWatchKinds = runDB conn $ Ops.saveWatch (Cv.watchKind1to2 k) (Cv.referenceid1to2 r) (Cv.term1to2 h tm) + putWatch _unknownKind _ _ = pure () getReflog :: IO [Reflog.Entry] getReflog = From 11a9d60515eefde0ec77a6aa88cf043d5034d414 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 12 Feb 2021 15:27:37 -0500 Subject: [PATCH 101/225] term1to2 was encoding Term.Request as Constructor --- codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs | 2 +- .../src/Unison/Codebase/SqliteCodebase/Conversions.hs | 2 +- unison-core/src/Unison/Term.hs | 5 ++--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs index d72bcbee13..a1b1185a60 100644 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs @@ -917,7 +917,7 @@ convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do V1.Term.Constructor r i -> V2.Term.Constructor (lookupType r) (fromIntegral i) V1.Term.Request r i -> - V2.Term.Constructor (lookupType r) (fromIntegral i) + V2.Term.Request (lookupType r) (fromIntegral i) V1.Term.Handle b h -> V2.Term.Handle (goTerm b) (goTerm h) V1.Term.App f a -> V2.Term.App (goTerm f) (goTerm a) V1.Term.Ann e t -> V2.Term.Ann (goTerm e) (buildTermType2H lookup t) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 453e90cec4..2c9fd7c116 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -105,7 +105,7 @@ term1to2 h = V1.Term.Char c -> V2.Term.Char c V1.Term.Ref r -> V2.Term.Ref (rreference1to2 h r) V1.Term.Constructor r i -> V2.Term.Constructor (reference1to2 r) (fromIntegral i) - V1.Term.Request r i -> V2.Term.Constructor (reference1to2 r) (fromIntegral i) + V1.Term.Request r i -> V2.Term.Request (reference1to2 r) (fromIntegral i) V1.Term.Handle b h -> V2.Term.Handle b h V1.Term.App f a -> V2.Term.App f a V1.Term.Ann e t -> V2.Term.Ann e (ttype1to2 t) diff --git a/unison-core/src/Unison/Term.hs b/unison-core/src/Unison/Term.hs index 9486b2abc6..36191a8276 100644 --- a/unison-core/src/Unison/Term.hs +++ b/unison-core/src/Unison/Term.hs @@ -1097,7 +1097,6 @@ instance (ABT.Var vt, Eq at, Eq a) => Eq (F vt at p a) where instance (Show v, Show a) => Show (F v a0 p a) where showsPrec = go where - showConstructor r n = shows r <> s "#" <> shows n go _ (Int n ) = (if n >= 0 then s "+" else s "") <> shows n go _ (Nat n ) = shows n go _ (Float n ) = shows n @@ -1122,13 +1121,13 @@ instance (Show v, Show a) => Show (F v a0 p a) where go _ (Handle b body) = showParen True (s "handle " <> shows b <> s " in " <> shows body) - go _ (Constructor r n ) = showConstructor r n + go _ (Constructor r n ) = s "Con" <> shows r <> s "#" <> shows n go _ (Match scrutinee cases) = showParen True (s "case " <> shows scrutinee <> s " of " <> shows cases) go _ (Text s ) = shows s go _ (Char c ) = shows c - go _ (Request r n) = showConstructor r n + go _ (Request r n) = s "Req" <> shows r <> s "#" <> shows n go p (If c t f) = showParen (p > 0) $ s "if " From b7c868b28222dfc5c5aeadd81b9b6672034213c8 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 12 Feb 2021 15:28:03 -0500 Subject: [PATCH 102/225] white space --- codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs | 6 +++--- .../src/Unison/Codebase/SqliteCodebase/Conversions.hs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 758b7fe868..c06da98467 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -741,10 +741,10 @@ saveDeclComponent h decls = do getSRef _selfCycleRef@(C.Reference.Derived Nothing _) = Nothing in Set.mapMaybe (fmap (,self) . getSRef) dependencies traverse_ (uncurry Q.addToDependentsIndex) dependencies - + -- populate type indexes - for_ (zip decls [0..]) - \(C.DataDeclaration _ _ _ ctorTypes, i) -> for_ (zip ctorTypes [0..]) + for_ (zip decls [0..]) + \(C.DataDeclaration _ _ _ ctorTypes, i) -> for_ (zip ctorTypes [0..]) \(tp, j) -> do let self = C.Referent.ConId (C.Reference.Id oId i) j typeForIndexing :: C.Type.TypeT Symbol = TypeUtil.removeAllEffectVars (C.Type.typeD2T h tp) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 2c9fd7c116..dc8c262e7b 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -463,7 +463,7 @@ causalbranch1to2 (V1.Branch.Branch c) = causal1to2' hash1to2cb hash1to2c branch1 Map.fromList [ (referent1to2 r, pure md) | r <- toList . Relation.lookupRan ns $ V1.Star3.d1 s - , let + , let mdrefs1to2 (typeR1, valR1) = (reference1to2 valR1, reference1to2 typeR1) md = V2.Branch.MdValues . Map.fromList . map mdrefs1to2 . toList . Relation.lookupDom r $ V1.Star3.d3 s ] @@ -478,7 +478,7 @@ causalbranch1to2 (V1.Branch.Branch c) = causal1to2' hash1to2cb hash1to2c branch1 Map.fromList [ (reference1to2 r, pure md) | r <- toList . Relation.lookupRan ns $ V1.Star3.d1 s - , let + , let mdrefs1to2 (typeR1, valR1) = (reference1to2 valR1, reference1to2 typeR1) md = V2.Branch.MdValues . Map.fromList . map mdrefs1to2 . toList . Relation.lookupDom r $ V1.Star3.d3 s ] @@ -522,7 +522,7 @@ patch2to1 lookupSize (V2.Branch.Patch v2termedits v2typeedits) = do patch1to2 :: V1.Patch -> V2.Branch.Patch patch1to2 (V1.Patch v1termedits v1typeedits) = V2.Branch.Patch v2termedits v2typeedits - where + where v2termedits = Map.bimap (V2.Referent.Ref . reference1to2) (Set.map termedit1to2) $ Relation.domain v1termedits v2typeedits = Map.bimap reference1to2 (Set.map typeedit1to2) $ Relation.domain v1typeedits termedit1to2 :: V1.TermEdit.TermEdit -> V2.TermEdit.TermEdit From c05acce10bd93ab19e056788f8ff9b473ce71b16 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 22 Feb 2021 11:52:47 -0500 Subject: [PATCH 103/225] initial sync framework --- .../lib/U/Codebase/Sync/V2V2.hs | 123 ++++++++++++++++++ .../unison-codebase-sync-2to2.cabal | 43 ++++++ codebase2/core/unison-core.cabal | 2 + codebase2/util-term/U/Util/Term.hs | 12 -- stack.yaml | 2 +- 5 files changed, 169 insertions(+), 13 deletions(-) create mode 100644 codebase-convert-2to2/lib/U/Codebase/Sync/V2V2.hs create mode 100644 codebase-convert-2to2/unison-codebase-sync-2to2.cabal diff --git a/codebase-convert-2to2/lib/U/Codebase/Sync/V2V2.hs b/codebase-convert-2to2/lib/U/Codebase/Sync/V2V2.hs new file mode 100644 index 0000000000..49d523a2fc --- /dev/null +++ b/codebase-convert-2to2/lib/U/Codebase/Sync/V2V2.hs @@ -0,0 +1,123 @@ +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE ScopedTypeVariables #-} + +module U.Codebase.Sync.V2V2 where + +-- localSyncFile +-- (srcPath :: CodebasePath) +-- (destPath :: CodebasePath) +-- (root :: Maybe ShortBranchHash) +-- (path :: UnisonPath) +-- = error "todo" + +-- localSyncSql +-- (srcDb :: Connection) +-- (destDb :: Connection) +-- (root :: Maybe ShortBranchHash) +-- (path :: UnisonPath) +-- = error "todo" + +-- data Reference t h = Builtin t | Derived h Pos +-- -- |The 0-based index in a definition component/cycle +-- newtype Pos = Pos { unPos :: Word64 } +-- data RefId h = RefId h Pos +-- data TermRef t h = TermRef (Reference t h) | TermCon (Reference t h) ConstructorId +-- newtype ConstructorId = ConstructorId { unConstructorId :: Word64 } +-- data TermRefId h = TermRefId (RefId h) | TermConId (RefId h) ConstructorId + +import Data.Foldable (traverse_) +import Control.Monad.Extra (ifM) +import Data.Foldable (Foldable(foldl')) +data TypeDependency y = YType y +data TermDependency e y = ETerm e | EType y +data PatchDependency e y = PTerm e | PType y +data BranchDependency b e y p = Branch b | BTerm e | BType y | BPatch p + +data Sync m b e y p = Sync + { rootBranch :: m b + , termMissingDependencies :: e -> m [TermDependency e y] + , typeMissingDependencies :: y -> m [TypeDependency y] + , patchMissingDependencies :: p -> m [PatchDependency e y] + , branchMissingDependencies :: b -> m [BranchDependency b e y p] + -- returns True if it does some real work, False if it skips / short circuits + -- It should be expected that these functions can be called multiple times + -- for the same arguments. + , syncTerm :: e -> m Bool + , syncType :: y -> m Bool + , syncPatch :: p -> m Bool + , syncBranch :: b -> m Bool + } + +-- | Progress callbacks. +-- There's no notion of "work remaining" captured here, because that would require +-- this algorithm to keep dependencies in memory, which may be intractable. +-- An implementation, however, can use the `need*` callbacks to track this in `m`. +data Progress m b e y p = Progress + { needBranch :: b -> m () + , needTerm :: e -> m () + , needType :: y -> m () + , needPatch :: p -> m () + , doneBranch :: b -> m () + , doneTerm :: e -> m () + , doneType :: y -> m () + , donePatch :: p -> m () + , allDone :: m () + } + +sync :: forall m b e y p. Monad m => Sync m b e y p -> Progress m b e y p -> m () +sync Sync{..} Progress{..} = do b <- rootBranch; go ([], [], [], [b]) where + go :: ([y],[e],[p],[b]) -> m () + go (y : ys, es, ps, bs) = + typeMissingDependencies y >>= \case + [] -> ifM (syncType y) (doneType y) (go (ys, es, ps, bs)) + ydeps -> do + let ys' = [y | YType y <- ydeps] + traverse_ needType ys' + go (ys' ++ y : ys, es, ps, bs) + + go ([], (e : es), ps, bs) = + termMissingDependencies e >>= \case + [] -> ifM (syncTerm e) (doneTerm e) (go ([], es, ps, bs)) + edeps -> do + let (ys', es') = foldl' partitionTermDeps mempty edeps + traverse_ needType ys' + traverse_ needTerm es' + go (ys', es' ++ e : es, ps, bs) + + go ([], [], (p : ps), bs) = + patchMissingDependencies p >>= \case + [] -> ifM (syncPatch p) (donePatch p) (go ([], [], ps, bs)) + pdeps -> do + let (ys', es') = foldl' partitionPatchDeps mempty pdeps + traverse_ needType ys' + traverse_ needTerm es' + go (ys', es', p : ps, bs) + + go ([], [], [], (b : bs)) = error "todo" + branchMissingDependencies b >>= \case + [] -> ifM (syncBranch b) (doneBranch b) (go ([], [], [], bs)) + bdeps -> do + let (ys', es', ps', bs') = foldl' partitionBranchDeps mempty bdeps + traverse_ needType ys' + traverse_ needTerm es' + traverse_ needPatch ps' + traverse_ needBranch bs' + go (ys', es', ps', bs' ++ b : bs) + + go ([], [], [], []) = allDone + + partitionTermDeps (ys, es) = \case + EType y -> (y : ys, es) + ETerm e -> (ys, e : es) + + partitionPatchDeps (ys, es) = \case + PType y -> (y : ys, es) + PTerm e -> (ys, e : es) + + partitionBranchDeps (ys, es, ps, bs) = \case + BType y -> (y : ys, es, ps, bs) + BTerm e -> (ys, e : es, ps, bs) + BPatch p -> (ys, es, p :ps, bs) + Branch b -> (ys, es, ps, b : bs) diff --git a/codebase-convert-2to2/unison-codebase-sync-2to2.cabal b/codebase-convert-2to2/unison-codebase-sync-2to2.cabal new file mode 100644 index 0000000000..15d960213f --- /dev/null +++ b/codebase-convert-2to2/unison-codebase-sync-2to2.cabal @@ -0,0 +1,43 @@ +cabal-version: 2.2 +-- Initial package description 'unison-codebase2-core.cabal' generated by +-- 'cabal init'. For further documentation, see +-- http://haskell.org/cabal/users-guide/ + +name: unison-codebase-sync-2to2 +version: 0.1.0.0 +-- synopsis: +-- description: +homepage: https://github.com/unisonweb/unison +-- bug-reports: +license: MIT +copyright: Unison Computing, PBC +category: Development + +library + hs-source-dirs: lib + exposed-modules: + U.Codebase.Sync.V2V2 + -- other-modules: + -- other-extensions: + build-depends: + base, + -- bytes, + bytestring, + containers, + extra, + here, + lens, + mtl, + safe, + text, + sqlite-simple, + unliftio, + vector, + unison-core, + -- unison-codebase1, + -- unison-codebase, + -- unison-codebase-sqlite, + -- unison-util, + -- unison-util-serialization, + -- unison-util-term + default-language: Haskell2010 diff --git a/codebase2/core/unison-core.cabal b/codebase2/core/unison-core.cabal index b15a478987..3a4e1dfd5b 100644 --- a/codebase2/core/unison-core.cabal +++ b/codebase2/core/unison-core.cabal @@ -28,6 +28,8 @@ library containers, text, vector, + -- prelude-extras is deprecated in favor of base + prelude-extras, unison-util hs-source-dirs: . default-language: Haskell2010 diff --git a/codebase2/util-term/U/Util/Term.hs b/codebase2/util-term/U/Util/Term.hs index f3a6491415..bf77f5bc04 100644 --- a/codebase2/util-term/U/Util/Term.hs +++ b/codebase2/util-term/U/Util/Term.hs @@ -40,15 +40,3 @@ dependencies = execWriter . ABT.visit_ \case typeRef r = tell (mempty, pure r, mempty, mempty) termLink r = tell (mempty, mempty, pure r, mempty) typeLink r = tell (mempty, mempty, mempty, pure r) - - - --- fold :: Monad m => --- (text -> m ()) -> --- (termRef -> m ()) -> --- (typeRef -> m ()) -> --- (termLink -> m ()) -> --- (typeLink -> m ()) -> --- ABT.Term (Term.F' text termRef typeRef termLink typeLink vt) v a -> --- m () --- fold = error "todo: U.Util.TermUtil.fold" diff --git a/stack.yaml b/stack.yaml index d7f2f01ad2..66afb2c5c9 100644 --- a/stack.yaml +++ b/stack.yaml @@ -9,7 +9,7 @@ packages: - parser-typechecker - unison-core -# - codebase-convert-1to2 +- codebase-convert-1to2 - codebase1/codebase - codebase2/codebase - codebase2/codebase-sqlite From e73811326ce675b7832b6e37c5bb930c5e24ce2b Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 24 Feb 2021 17:53:27 -0500 Subject: [PATCH 104/225] move ... stuff --- .../lib/U/Codebase/Sync/V2V2.hs | 123 ---- codebase1/codebase/Unison/Codebase/V1/ABT.hs | 625 +----------------- .../Unison/Codebase/V1/Serialization/V1.hs | 4 +- .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 4 + .../U/Codebase/Sqlite/Queries.hs | 34 +- .../U/Codebase/Sqlite/Sync22.hs | 130 ++++ codebase2/codebase-sqlite/sql/create.sql | 13 +- .../unison-codebase-sqlite.cabal | 2 + codebase2/codebase-sync/U/Codebase/Sync.hs | 52 ++ .../codebase-sync/unison-codebase-sync.cabal | 24 +- .../Unison => codebase2/util/U}/Util/Cache.hs | 9 +- codebase2/util/unison-util.cabal | 2 + hie.yaml | 7 +- .../src/Unison/Builtin/Decls.hs | 2 - .../src/Unison/Codebase/Branch.hs | 2 +- .../src/Unison/Codebase/Causal.hs | 2 +- .../src/Unison/Codebase/FileCodebase.hs | 2 +- parser-typechecker/tests/Unison/Test/Cache.hs | 2 +- .../unison-parser-typechecker.cabal | 4 +- 19 files changed, 250 insertions(+), 793 deletions(-) delete mode 100644 codebase-convert-2to2/lib/U/Codebase/Sync/V2V2.hs create mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs create mode 100644 codebase2/codebase-sync/U/Codebase/Sync.hs rename codebase-convert-2to2/unison-codebase-sync-2to2.cabal => codebase2/codebase-sync/unison-codebase-sync.cabal (58%) rename {parser-typechecker/src/Unison => codebase2/util/U}/Util/Cache.hs (92%) diff --git a/codebase-convert-2to2/lib/U/Codebase/Sync/V2V2.hs b/codebase-convert-2to2/lib/U/Codebase/Sync/V2V2.hs deleted file mode 100644 index 49d523a2fc..0000000000 --- a/codebase-convert-2to2/lib/U/Codebase/Sync/V2V2.hs +++ /dev/null @@ -1,123 +0,0 @@ -{-# LANGUAGE BlockArguments #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE ScopedTypeVariables #-} - -module U.Codebase.Sync.V2V2 where - --- localSyncFile --- (srcPath :: CodebasePath) --- (destPath :: CodebasePath) --- (root :: Maybe ShortBranchHash) --- (path :: UnisonPath) --- = error "todo" - --- localSyncSql --- (srcDb :: Connection) --- (destDb :: Connection) --- (root :: Maybe ShortBranchHash) --- (path :: UnisonPath) --- = error "todo" - --- data Reference t h = Builtin t | Derived h Pos --- -- |The 0-based index in a definition component/cycle --- newtype Pos = Pos { unPos :: Word64 } --- data RefId h = RefId h Pos --- data TermRef t h = TermRef (Reference t h) | TermCon (Reference t h) ConstructorId --- newtype ConstructorId = ConstructorId { unConstructorId :: Word64 } --- data TermRefId h = TermRefId (RefId h) | TermConId (RefId h) ConstructorId - -import Data.Foldable (traverse_) -import Control.Monad.Extra (ifM) -import Data.Foldable (Foldable(foldl')) -data TypeDependency y = YType y -data TermDependency e y = ETerm e | EType y -data PatchDependency e y = PTerm e | PType y -data BranchDependency b e y p = Branch b | BTerm e | BType y | BPatch p - -data Sync m b e y p = Sync - { rootBranch :: m b - , termMissingDependencies :: e -> m [TermDependency e y] - , typeMissingDependencies :: y -> m [TypeDependency y] - , patchMissingDependencies :: p -> m [PatchDependency e y] - , branchMissingDependencies :: b -> m [BranchDependency b e y p] - -- returns True if it does some real work, False if it skips / short circuits - -- It should be expected that these functions can be called multiple times - -- for the same arguments. - , syncTerm :: e -> m Bool - , syncType :: y -> m Bool - , syncPatch :: p -> m Bool - , syncBranch :: b -> m Bool - } - --- | Progress callbacks. --- There's no notion of "work remaining" captured here, because that would require --- this algorithm to keep dependencies in memory, which may be intractable. --- An implementation, however, can use the `need*` callbacks to track this in `m`. -data Progress m b e y p = Progress - { needBranch :: b -> m () - , needTerm :: e -> m () - , needType :: y -> m () - , needPatch :: p -> m () - , doneBranch :: b -> m () - , doneTerm :: e -> m () - , doneType :: y -> m () - , donePatch :: p -> m () - , allDone :: m () - } - -sync :: forall m b e y p. Monad m => Sync m b e y p -> Progress m b e y p -> m () -sync Sync{..} Progress{..} = do b <- rootBranch; go ([], [], [], [b]) where - go :: ([y],[e],[p],[b]) -> m () - go (y : ys, es, ps, bs) = - typeMissingDependencies y >>= \case - [] -> ifM (syncType y) (doneType y) (go (ys, es, ps, bs)) - ydeps -> do - let ys' = [y | YType y <- ydeps] - traverse_ needType ys' - go (ys' ++ y : ys, es, ps, bs) - - go ([], (e : es), ps, bs) = - termMissingDependencies e >>= \case - [] -> ifM (syncTerm e) (doneTerm e) (go ([], es, ps, bs)) - edeps -> do - let (ys', es') = foldl' partitionTermDeps mempty edeps - traverse_ needType ys' - traverse_ needTerm es' - go (ys', es' ++ e : es, ps, bs) - - go ([], [], (p : ps), bs) = - patchMissingDependencies p >>= \case - [] -> ifM (syncPatch p) (donePatch p) (go ([], [], ps, bs)) - pdeps -> do - let (ys', es') = foldl' partitionPatchDeps mempty pdeps - traverse_ needType ys' - traverse_ needTerm es' - go (ys', es', p : ps, bs) - - go ([], [], [], (b : bs)) = error "todo" - branchMissingDependencies b >>= \case - [] -> ifM (syncBranch b) (doneBranch b) (go ([], [], [], bs)) - bdeps -> do - let (ys', es', ps', bs') = foldl' partitionBranchDeps mempty bdeps - traverse_ needType ys' - traverse_ needTerm es' - traverse_ needPatch ps' - traverse_ needBranch bs' - go (ys', es', ps', bs' ++ b : bs) - - go ([], [], [], []) = allDone - - partitionTermDeps (ys, es) = \case - EType y -> (y : ys, es) - ETerm e -> (ys, e : es) - - partitionPatchDeps (ys, es) = \case - PType y -> (y : ys, es) - PTerm e -> (ys, e : es) - - partitionBranchDeps (ys, es, ps, bs) = \case - BType y -> (y : ys, es, ps, bs) - BTerm e -> (ys, e : es, ps, bs) - BPatch p -> (ys, es, p :ps, bs) - Branch b -> (ys, es, ps, b : bs) diff --git a/codebase1/codebase/Unison/Codebase/V1/ABT.hs b/codebase1/codebase/Unison/Codebase/V1/ABT.hs index deaffaa51a..c8b7d39016 100644 --- a/codebase1/codebase/Unison/Codebase/V1/ABT.hs +++ b/codebase1/codebase/Unison/Codebase/V1/ABT.hs @@ -37,118 +37,6 @@ data Term f v a = Term {freeVars :: Set v, annotation :: a, out :: ABT f v (Term class Ord v => Var v where freshIn :: Set v -> v -> v --- data V v = Free v | Bound v deriving (Eq,Ord,Show,Functor) - --- unvar :: V v -> v --- unvar (Free v) = v --- unvar (Bound v) = v - --- instance Var v => Var (V v) where --- freshIn s v = freshIn (Set.map unvar s) <$> v - --- newtype Path s t a b m = Path { focus :: s -> Maybe (a, b -> Maybe t, m) } - --- here :: Monoid m => Path s t s t m --- here = Path $ \s -> Just (s, Just, mempty) - --- instance Semigroup (Path s t a b m) where --- (<>) = mappend - --- instance Monoid (Path s t a b m) where --- mempty = Path (const Nothing) --- mappend (Path p1) (Path p2) = Path p3 where --- p3 s = p1 s <|> p2 s - --- type Path' f g m = forall a v . Var v => Path (Term f v a) (Term f (V v) a) (Term g v a) (Term g (V v) a) m - --- compose :: Monoid m => Path s t a b m -> Path a b a' b' m -> Path s t a' b' m --- compose (Path p1) (Path p2) = Path p3 where --- p3 s = do --- (get1,set1,m1) <- p1 s --- (get2,set2,m2) <- p2 get1 --- pure (get2, set2 >=> set1, m1 `mappend` m2) - --- at :: Path s t a b m -> s -> Maybe a --- at p s = (\(a,_,_) -> a) <$> focus p s - --- modify' :: Path s t a b m -> (m -> a -> b) -> s -> Maybe t --- modify' p f s = focus p s >>= \(get,set,m) -> set (f m get) - --- wrap :: (Functor f, Foldable f, Var v) => v -> Term f (V v) a -> (V v, Term f (V v) a) --- wrap v t = --- if Set.member (Free v) (freeVars t) --- then let v' = fresh t (Bound v) in (v', rename (Bound v) v' t) --- else (Bound v, t) - --- wrap' :: (Functor f, Foldable f, Var v) --- => v -> Term f (V v) a -> (V v -> Term f (V v) a -> c) -> c --- wrap' v t f = uncurry f (wrap v t) - --- -- | Return the list of all variables bound by this ABT --- bound' :: Foldable f => Term f v a -> [v] --- bound' t = case out t of --- Abs v t -> v : bound' t --- Cycle t -> bound' t --- Tm f -> Foldable.toList f >>= bound' --- _ -> [] - --- annotateBound' :: (Ord v, Functor f, Foldable f) => Term f v a0 -> Term f v [v] --- annotateBound' t = snd <$> annotateBound'' t - --- -- Annotate the tree with the set of bound variables at each node. --- annotateBound :: (Ord v, Foldable f, Functor f) => Term f v a -> Term f v (a, Set v) --- annotateBound = go Set.empty where --- go bound t = let a = (annotation t, bound) in case out t of --- Var v -> annotatedVar a v --- Cycle body -> cycle' a (go bound body) --- Abs x body -> abs' a x (go (Set.insert x bound) body) --- Tm body -> tm' a (go bound <$> body) - --- annotateBound'' :: (Ord v, Functor f, Foldable f) => Term f v a -> Term f v (a, [v]) --- annotateBound'' = go [] where --- go env t = let a = (annotation t, env) in case out t of --- Abs v body -> abs' a v (go (v : env) body) --- Cycle body -> cycle' a (go env body) --- Tm f -> tm' a (go env <$> f) --- Var v -> annotatedVar a v - --- -- | Return the set of all variables bound by this ABT --- bound :: (Ord v, Foldable f) => Term f v a -> Set v --- bound t = Set.fromList (bound' t) - --- -- | `True` if the term has no free variables, `False` otherwise --- isClosed :: Term f v a -> Bool --- isClosed t = Set.null (freeVars t) - --- -- | `True` if `v` is a member of the set of free variables of `t` --- isFreeIn :: Ord v => v -> Term f v a -> Bool --- isFreeIn v t = Set.member v (freeVars t) - --- -- | Replace the annotation with the given argument. --- annotate :: a -> Term f v a -> Term f v a --- annotate a (Term fvs _ out) = Term fvs a out - --- vmap :: (Functor f, Foldable f, Ord v2) => (v -> v2) -> Term f v a -> Term f v2 a --- vmap f (Term _ a out) = case out of --- Var v -> annotatedVar a (f v) --- Tm fa -> tm' a (fmap (vmap f) fa) --- Cycle r -> cycle' a (vmap f r) --- Abs v body -> abs' a (f v) (vmap f body) - --- amap :: (Functor f, Foldable f, Ord v) => (a -> a2) -> Term f v a -> Term f v a2 --- amap = amap' . const - --- amap' :: (Functor f, Foldable f, Ord v) => (Term f v a -> a -> a2) -> Term f v a -> Term f v a2 --- amap' f t@(Term _ a out) = case out of --- Var v -> annotatedVar (f t a) v --- Tm fa -> tm' (f t a) (fmap (amap' f) fa) --- Cycle r -> cycle' (f t a) (amap' f r) --- Abs v body -> abs' (f t a) v (amap' f body) - --- -- | Modifies the annotations in this tree --- instance Functor f => Functor (Term f v) where --- fmap f (Term fvs a sub) = Term fvs (f a) (fmap (fmap f) sub) - extraMap :: Functor g => (forall k . f k -> g k) -> Term f v a -> Term g v a extraMap p (Term fvs a sub) = Term fvs a (go p sub) where go :: Functor g => (forall k . f k -> g k) -> ABT f v (Term f v a) -> ABT g v (Term g v a) @@ -158,254 +46,18 @@ extraMap p (Term fvs a sub) = Term fvs a (go p sub) where Abs v r -> Abs v (extraMap p r) Tm x -> Tm (fmap (extraMap p) (p x)) --- pattern Var' v <- Term _ _ (Var v) --- pattern Cycle' vs t <- Term _ _ (Cycle (AbsN' vs t)) --- -- pattern Abs' v body <- Term _ _ (Abs v body) --- pattern Abs' subst <- (unabs1 -> Just subst) --- pattern AbsN' vs body <- (unabs -> (vs, body)) --- pattern Tm' f <- Term _ _ (Tm f) --- pattern CycleA' a avs t <- Term _ a (Cycle (AbsNA' avs t)) --- pattern AbsNA' avs body <- (unabsA -> (avs, body)) --- pattern Abs1NA' avs body <- (unabs1A -> Just (avs, body)) - --- unabsA :: Term f v a -> ([(a,v)], Term f v a) --- unabsA (Term _ a (Abs hd body)) = --- let (tl, body') = unabsA body in ((a,hd) : tl, body') --- unabsA t = ([], t) - --- unabs1A :: Term f v a -> Maybe ([(a,v)], Term f v a) --- unabs1A t = case unabsA t of --- ([], _) -> Nothing --- x -> Just x - --- var :: v -> Term f v () --- var = annotatedVar () - -annotatedVar :: a -> v -> Term f v a -annotatedVar a v = Term (Set.singleton v) a (Var v) +var :: a -> v -> Term f v a +var a v = Term (Set.singleton v) a (Var v) abs :: Ord v => a -> v -> Term f v a -> Term f v a abs a v body = Term (Set.delete v (freeVars body)) a (Abs v body) --- absr :: (Functor f, Foldable f, Var v) => v -> Term f (V v) () -> Term f (V v) () --- absr = absr' () - --- -- | Rebuild an `abs`, renaming `v` to avoid capturing any `Free v` in `body`. --- absr' :: (Functor f, Foldable f, Var v) => a -> v -> Term f (V v) a -> Term f (V v) a --- absr' a v body = wrap' v body $ \v body -> abs' a v body - --- absChain :: Ord v => [v] -> Term f v () -> Term f v () --- absChain vs t = foldr abs t vs - --- absCycle :: Ord v => [v] -> Term f v () -> Term f v () --- absCycle vs t = cycle $ absChain vs t - --- absChain' :: Ord v => [(a, v)] -> Term f v a -> Term f v a --- absChain' vs t = foldr (\(a,v) t -> abs' a v t) t vs - tm :: (Foldable f, Ord v) => a -> f (Term f v a) -> Term f v a tm a t = Term (Set.unions (fmap freeVars (Foldable.toList t))) a (Tm t) cycle :: a -> Term f v a -> Term f v a cycle a t = Term (freeVars t) a (Cycle t) --- cycler' :: (Functor f, Foldable f, Var v) => a -> [v] -> Term f (V v) a -> Term f (V v) a --- cycler' a vs t = cycle' a $ foldr (absr' a) t vs - --- cycler :: (Functor f, Foldable f, Var v) => [v] -> Term f (V v) () -> Term f (V v) () --- cycler = cycler' () - --- into :: (Foldable f, Ord v) => ABT f v (Term f v ()) -> Term f v () --- into = into' () - --- into' :: (Foldable f, Ord v) => a -> ABT f v (Term f v a) -> Term f v a --- into' a abt = case abt of --- Var x -> annotatedVar a x --- Cycle t -> cycle' a t --- Abs v r -> abs' a v r --- Tm t -> tm' a t - --- -- | renames `old` to `new` in the given term, ignoring subtrees that bind `old` --- rename :: (Foldable f, Functor f, Var v) => v -> v -> Term f v a -> Term f v a --- rename old new t0@(Term fvs ann t) = --- if Set.notMember old fvs then t0 --- else case t of --- Var v -> if v == old then annotatedVar ann new else t0 --- Cycle body -> cycle' ann (rename old new body) --- Abs v body -> --- -- v shadows old, so skip this subtree --- if v == old then abs' ann v body - --- -- the rename would capture new, freshen this Abs --- -- to make that no longer true, then proceed with --- -- renaming `old` to `new` --- else if v == new then --- let v' = freshIn (Set.fromList [new,old] <> freeVars body) v --- in abs' ann v' (rename old new (rename v v' body)) - --- -- nothing special, just rename inside body of Abs --- else abs' ann v (rename old new body) --- Tm v -> tm' ann (fmap (rename old new) v) - --- changeVars :: (Foldable f, Functor f, Var v) => Map v v -> Term f v a -> Term f v a --- changeVars m t = case out t of --- Abs v body -> case Map.lookup v m of --- Nothing -> abs' (annotation t) v (changeVars m body) --- Just v' -> abs' (annotation t) v' (changeVars m body) --- Cycle body -> cycle' (annotation t) (changeVars m body) --- Var v -> case Map.lookup v m of --- Nothing -> t --- Just v -> annotatedVar (annotation t) v --- Tm v -> tm' (annotation t) (changeVars m <$> v) - --- -- | Produce a variable which is free in both terms --- freshInBoth :: Var v => Term f v a -> Term f v a -> v -> v --- freshInBoth t1 t2 = freshIn $ Set.union (freeVars t1) (freeVars t2) - --- fresh :: Var v => Term f v a -> v -> v --- fresh t = freshIn (freeVars t) - --- freshEverywhere :: (Foldable f, Var v) => Term f v a -> v -> v --- freshEverywhere t = freshIn . Set.fromList $ allVars t - --- allVars :: Foldable f => Term f v a -> [v] --- allVars t = case out t of --- Var v -> [v] --- Cycle body -> allVars body --- Abs v body -> v : allVars body --- Tm v -> Foldable.toList v >>= allVars - --- freshes :: Var v => Term f v a -> [v] -> [v] --- freshes = freshes' . freeVars - --- freshes' :: Var v => Set v -> [v] -> [v] --- freshes' used vs = evalState (traverse freshenS vs) used - --- -- | Freshens the given variable wrt. the set of used variables --- -- tracked by state. Adds the result to the set of used variables. --- freshenS :: (Var v, MonadState (Set v) m) => v -> m v --- freshenS = freshenS' id - --- -- | A more general version of `freshenS` that uses a lens --- -- to focus on used variables inside state. --- freshenS' :: (Var v, MonadState s m) => Lens' s (Set v) -> v -> m v --- freshenS' uvLens v = do --- usedVars <- use uvLens --- let v' = freshIn usedVars v --- uvLens .= Set.insert v' usedVars --- pure v' - --- -- | `subst v e body` substitutes `e` for `v` in `body`, avoiding capture by --- -- renaming abstractions in `body` --- subst --- :: (Foldable f, Functor f, Var v) --- => v --- -> Term f v a --- -> Term f v a --- -> Term f v a --- subst v r = subst' (const r) v (freeVars r) - --- -- Slightly generalized version of `subst`, the replacement action is handled --- -- by the function `replace`, which is given the annotation `a` at the point --- -- of replacement. `r` should be the set of free variables contained in the --- -- term returned by `replace`. See `substInheritAnnotation` for an example usage. --- subst' :: (Foldable f, Functor f, Var v) => (a -> Term f v a) -> v -> Set v -> Term f v a -> Term f v a --- subst' replace v r t2@(Term fvs ann body) --- | Set.notMember v fvs = t2 -- subtrees not containing the var can be skipped --- | otherwise = case body of --- Var v' | v == v' -> replace ann -- var match; perform replacement --- | otherwise -> t2 -- var did not match one being substituted; ignore --- Cycle body -> cycle' ann (subst' replace v r body) --- Abs x _ | x == v -> t2 -- x shadows v; ignore subtree --- Abs x e -> abs' ann x' e' --- where x' = freshIn (fvs `Set.union` r) x --- -- rename x to something that cannot be captured by `r` --- e' = if x /= x' then subst' replace v r (rename x x' e) --- else subst' replace v r e --- Tm body -> tm' ann (fmap (subst' replace v r) body) - --- -- Like `subst`, but the annotation of the replacement is inherited from --- -- the previous annotation at each replacement point. --- substInheritAnnotation :: (Foldable f, Functor f, Var v) --- => v -> Term f v b -> Term f v a -> Term f v a --- substInheritAnnotation v r = --- subst' (\ann -> const ann <$> r) v (freeVars r) - --- substsInheritAnnotation --- :: (Foldable f, Functor f, Var v) --- => [(v, Term f v b)] --- -> Term f v a --- -> Term f v a --- substsInheritAnnotation replacements body = --- foldr (uncurry substInheritAnnotation) body (reverse replacements) - --- -- | `substs [(t1,v1), (t2,v2), ...] body` performs multiple simultaneous --- -- substitutions, avoiding capture --- substs --- :: (Foldable f, Functor f, Var v) --- => [(v, Term f v a)] --- -> Term f v a --- -> Term f v a --- substs replacements body = foldr (uncurry subst) body (reverse replacements) - --- -- Count the number times the given variable appears free in the term --- occurrences :: (Foldable f, Var v) => v -> Term f v a -> Int --- occurrences v t | not (v `isFreeIn` t) = 0 --- occurrences v t = case out t of --- Var v2 -> if v == v2 then 1 else 0 --- Cycle t -> occurrences v t --- Abs v2 t -> if v == v2 then 0 else occurrences v t --- Tm t -> foldl' (\s t -> s + occurrences v t) 0 $ Foldable.toList t - --- rebuildUp :: (Ord v, Foldable f, Functor f) --- => (f (Term f v a) -> f (Term f v a)) --- -> Term f v a --- -> Term f v a --- rebuildUp f (Term _ ann body) = case body of --- Var v -> annotatedVar ann v --- Cycle body -> cycle' ann (rebuildUp f body) --- Abs x e -> abs' ann x (rebuildUp f e) --- Tm body -> tm' ann (f $ fmap (rebuildUp f) body) - --- rebuildUp' :: (Ord v, Foldable f, Functor f) --- => (Term f v a -> Term f v a) --- -> Term f v a --- -> Term f v a --- rebuildUp' f (Term _ ann body) = case body of --- Var v -> f (annotatedVar ann v) --- Cycle body -> f $ cycle' ann (rebuildUp' f body) --- Abs x e -> f $ abs' ann x (rebuildUp' f e) --- Tm body -> f $ tm' ann (fmap (rebuildUp' f) body) - --- freeVarOccurrences :: (Traversable f, Ord v) => Set v -> Term f v a -> [(v, a)] --- freeVarOccurrences except t = --- [ (v, a) | (v,a) <- go $ annotateBound t, not (Set.member v except) ] --- where --- go e = case out e of --- Var v -> if Set.member v (snd $ annotation e) --- then [] --- else [(v, fst $ annotation e)] --- Cycle body -> go body --- Abs _ body -> go body --- Tm body -> foldMap go body - --- foreachSubterm --- :: (Traversable f, Applicative g, Ord v) --- => (Term f v a -> g b) --- -> Term f v a --- -> g [b] --- foreachSubterm f e = case out e of --- Var _ -> pure <$> f e --- Cycle body -> (:) <$> f e <*> foreachSubterm f body --- Abs _ body -> (:) <$> f e <*> foreachSubterm f body --- Tm body -> --- (:) --- <$> f e --- <*> (join . Foldable.toList <$> traverse (foreachSubterm f) body) - --- subterms :: (Ord v, Traversable f) => Term f v a -> [Term f v a] --- subterms t = runIdentity $ foreachSubterm pure t - -- | `visit f t` applies an effectful function to each subtree of -- `t` and sequences the results. When `f` returns `Nothing`, `visit` -- descends into the children of the current subtree. When `f` returns @@ -434,276 +86,3 @@ visit' f t = case out t of Cycle body -> cycle (annotation t) <$> visit' f body Abs x e -> abs (annotation t) x <$> visit' f e Tm body -> f body >>= (fmap (tm (annotation t)) . traverse (visit' f)) - --- -- | `visit` specialized to the `Identity` effect. --- visitPure :: (Traversable f, Ord v) --- => (Term f v a -> Maybe (Term f v a)) -> Term f v a -> Term f v a --- visitPure f = runIdentity . visit (fmap pure . f) - --- rewriteDown :: (Traversable f, Ord v) --- => (Term f v a -> Term f v a) --- -> Term f v a --- -> Term f v a --- rewriteDown f t = let t' = f t in case out t' of --- Var _ -> t' --- Cycle body -> cycle' (annotation t) (rewriteDown f body) --- Abs x e -> abs' (annotation t) x (rewriteDown f e) --- Tm body -> tm' (annotation t) (rewriteDown f `fmap` body) - --- data Subst f v a = --- Subst { freshen :: forall m v' . Monad m => (v -> m v') -> m v' --- , bind :: Term f v a -> Term f v a --- , bindInheritAnnotation :: forall b . Term f v b -> Term f v a --- , variable :: v } - --- unabs1 :: (Foldable f, Functor f, Var v) => Term f v a -> Maybe (Subst f v a) --- unabs1 (Term _ _ (Abs v body)) = Just (Subst freshen bind bindInheritAnnotation v) where --- freshen f = f v --- bind x = subst v x body --- bindInheritAnnotation x = substInheritAnnotation v x body --- unabs1 _ = Nothing - --- unabs :: Term f v a -> ([v], Term f v a) --- unabs (Term _ _ (Abs hd body)) = --- let (tl, body') = unabs body in (hd : tl, body') --- unabs t = ([], t) - --- reabs :: Ord v => [v] -> Term f v () -> Term f v () --- reabs vs t = foldr abs t vs - --- transform :: (Ord v, Foldable g, Functor f) --- => (forall a. f a -> g a) -> Term f v a -> Term g v a --- transform f tm = case out tm of --- Var v -> annotatedVar (annotation tm) v --- Abs v body -> abs' (annotation tm) v (transform f body) --- Tm subterms -> --- let subterms' = fmap (transform f) subterms --- in tm' (annotation tm) (f subterms') --- Cycle body -> cycle' (annotation tm) (transform f body) - --- -- Rebuild the tree annotations upward, starting from the leaves, --- -- using the Monoid to choose the annotation at intermediate nodes --- reannotateUp :: (Ord v, Foldable f, Functor f, Monoid b) --- => (Term f v a -> b) --- -> Term f v a --- -> Term f v (a, b) --- reannotateUp g t = case out t of --- Var v -> annotatedVar (annotation t, g t) v --- Cycle body -> --- let body' = reannotateUp g body --- in cycle' (annotation t, snd (annotation body')) body' --- Abs v body -> --- let body' = reannotateUp g body --- in abs' (annotation t, snd (annotation body')) v body' --- Tm body -> --- let --- body' = reannotateUp g <$> body --- ann = g t <> foldMap (snd . annotation) body' --- in tm' (annotation t, ann) body' - --- -- Find all subterms that match a predicate. Prune the search for speed. --- -- (Some patterns of pruning can cut the complexity of the search.) --- data FindAction x = Found x | Prune | Continue deriving Show --- find :: (Ord v, Foldable f, Functor f) --- => (Term f v a -> FindAction x) --- -> Term f v a --- -> [x] --- find p t = case p t of --- Found x -> x : go --- Prune -> [] --- Continue -> go --- where go = case out t of --- Var _ -> [] --- Cycle body -> Unison.Codebase.V1.ABT.find p body --- Abs _ body -> Unison.Codebase.V1.ABT.find p body --- Tm body -> Foldable.concat (Unison.Codebase.V1.ABT.find p <$> body) - --- find' :: (Ord v, Foldable f, Functor f) --- => (Term f v a -> Bool) --- -> Term f v a --- -> [Term f v a] --- find' p = Unison.Codebase.V1.ABT.find (\t -> if p t then Found t else Continue) - --- instance (Foldable f, Functor f, Eq1 f, Var v) => Eq (Term f v a) where --- -- alpha equivalence, works by renaming any aligned Abs ctors to use a common fresh variable --- t1 == t2 = go (out t1) (out t2) where --- go (Var v) (Var v2) | v == v2 = True --- go (Cycle t1) (Cycle t2) = t1 == t2 --- go (Abs v1 body1) (Abs v2 body2) = --- if v1 == v2 then body1 == body2 --- else let v3 = freshInBoth body1 body2 v1 --- in rename v1 v3 body1 == rename v2 v3 body2 --- go (Tm f1) (Tm f2) = f1 ==# f2 --- go _ _ = False - --- instance (Foldable f, Functor f, Ord1 f, Var v) => Ord (Term f v a) where --- -- alpha equivalence, works by renaming any aligned Abs ctors to use a common fresh variable --- t1 `compare` t2 = go (out t1) (out t2) where --- go (Var v) (Var v2) = v `compare` v2 --- go (Cycle t1) (Cycle t2) = t1 `compare` t2 --- go (Abs v1 body1) (Abs v2 body2) = --- if v1 == v2 then body1 `compare` body2 --- else let v3 = freshInBoth body1 body2 v1 --- in rename v1 v3 body1 `compare` rename v2 v3 body2 --- go (Tm f1) (Tm f2) = compare1 f1 f2 --- go t1 t2 = tag t1 `compare` tag t2 --- tag (Var _) = 0 :: Word --- tag (Tm _) = 1 --- tag (Abs _ _) = 2 --- tag (Cycle _) = 3 - --- components :: Var v => [(v, Term f v a)] -> [[(v, Term f v a)]] --- components = Components.components freeVars - --- -- Converts to strongly connected components while preserving the --- -- order of definitions. Satisfies `join (orderedComponents bs) == bs`. --- orderedComponents' :: Var v => [(v, Term f v a)] -> [[(v, Term f v a)]] --- orderedComponents' tms = go [] Set.empty tms --- where --- go [] _ [] = [] --- go [] deps (hd:rem) = go [hd] (deps <> freeVars (snd hd)) rem --- go cur deps rem = case findIndex isDep rem of --- Nothing -> reverse cur : let (hd,tl) = splitAt 1 rem --- in go hd (depsFor hd) tl --- Just i -> go (reverse newMembers ++ cur) deps' (drop (i+1) rem) --- where deps' = deps <> depsFor newMembers --- newMembers = take (i+1) rem --- where --- depsFor = foldMap (freeVars . snd) --- isDep (v, _) = Set.member v deps - --- -- Like `orderedComponents'`, but further break up cycles and move --- -- cyclic subcycles before other components in the same cycle. --- -- Tweak suggested by @aryairani. --- -- --- -- Example: given `[[x],[ping,r,s,pong]]`, where `ping` and `pong` --- -- are mutually recursive but `r` and `s` are uninvolved, this produces: --- -- `[[x], [ping,pong], [r], [s]]`. --- orderedComponents :: Var v => [(v, Term f v a)] -> [[(v, Term f v a)]] --- orderedComponents bs0 = tweak =<< orderedComponents' bs0 where --- tweak :: Var v => [(v,Term f v a)] -> [[(v,Term f v a)]] --- tweak bs@(_:_:_) = case takeWhile isCyclic (components bs) of --- [] -> [bs] --- cycles -> cycles <> orderedComponents rest --- where --- rest = [ (v,b) | (v,b) <- bs, Set.notMember v cycleVars ] --- cycleVars = Set.fromList (fst <$> join cycles) --- tweak bs = [bs] -- any cycle with < 2 bindings is left alone --- isCyclic [(v,b)] = Set.member v (freeVars b) --- isCyclic bs = length bs > 1 - --- -- todo: --- -- Hash a strongly connected component and sort its definitions into a canonical order. --- hashComponent :: --- (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Var v, Ord h, Accumulate h) --- => Map.Map v (Term f v a) -> (h, [(v, Term f v a)]) --- hashComponent byName = let --- ts = Map.toList byName --- embeds = [ (v, void (transform Embed t)) | (v,t) <- ts ] --- vs = fst <$> ts --- -- make closed terms for each element of the component --- -- [ let x = ..., y = ..., in x --- -- , let x = ..., y = ..., in y ] --- -- so that we can then hash them (closed terms can be hashed) --- -- so that we can sort them by hash. this is the "canonical, name-agnostic" --- -- hash that yields the canonical ordering of the component. --- tms = [ (v, absCycle vs (tm $ Component (snd <$> embeds) (var v))) | v <- vs ] --- hashed = [ ((v,t), hash t) | (v,t) <- tms ] --- sortedHashed = sortOn snd hashed --- overallHash = Hashable.accumulate (Hashable.Hashed . snd <$> sortedHashed) --- in (overallHash, [ (v, t) | ((v, _),_) <- sortedHashed, Just t <- [Map.lookup v byName] ]) - - --- -- Group the definitions into strongly connected components and hash --- -- each component. Substitute the hash of each component into subsequent --- -- components (using the `termFromHash` function). Requires that the --- -- overall component has no free variables. --- hashComponents --- :: (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Var v, Ord h, Accumulate h) --- => (h -> Word64 -> Word64 -> Term f v ()) --- -> Map.Map v (Term f v a) --- -> [(h, [(v, Term f v a)])] --- hashComponents termFromHash termsByName = let --- bound = Set.fromList (Map.keys termsByName) --- escapedVars = Set.unions (freeVars <$> Map.elems termsByName) `Set.difference` bound --- sccs = components (Map.toList termsByName) --- go _ [] = [] --- go prevHashes (component : rest) = let --- sub = substsInheritAnnotation (Map.toList prevHashes) --- (h, sortedComponent) = hashComponent $ Map.fromList [ (v, sub t) | (v, t) <- component ] --- size = fromIntegral (length sortedComponent) --- curHashes = Map.fromList [ (v, termFromHash h i size) | ((v, _),i) <- sortedComponent `zip` [0..]] --- newHashes = prevHashes `Map.union` curHashes --- newHashesL = Map.toList newHashes --- sortedComponent' = [ (v, substsInheritAnnotation newHashesL t) | (v, t) <- sortedComponent ] --- in (h, sortedComponent') : go newHashes rest --- in if Set.null escapedVars then go Map.empty sccs --- else error $ "can't hashComponents if bindings have free variables:\n " --- ++ show (map show (Set.toList escapedVars)) --- ++ "\n " ++ show (map show (Map.keys termsByName)) - --- -- Implementation detail of hashComponent --- data Component f a = Component [a] a | Embed (f a) deriving (Functor, Traversable, Foldable) - --- instance (Hashable1 f, Functor f) => Hashable1 (Component f) where --- hash1 hashCycle hash c = case c of --- Component as a -> let --- (hs, hash) = hashCycle as --- toks = Hashable.Hashed <$> hs --- in Hashable.accumulate $ (Hashable.Tag 1 : toks) ++ [Hashable.Hashed (hash a)] --- Embed fa -> Hashable.hash1 hashCycle hash fa - --- -- | We ignore annotations in the `Term`, as these should never affect the --- -- meaning of the term. --- hash :: forall f v a h . (Functor f, Hashable1 f, Eq v, Show v, Var v, Ord h, Accumulate h) --- => Term f v a -> h --- hash = hash' [] where --- hash' :: [Either [v] v] -> Term f v a -> h --- hash' env (Term _ _ t) = case t of --- Var v -> maybe die hashInt ind --- where lookup (Left cycle) = v `elem` cycle --- lookup (Right v') = v == v' --- ind = findIndex lookup env --- hashInt :: Int -> h --- hashInt i = Hashable.accumulate [Hashable.Nat $ fromIntegral i] --- die = error $ "unknown var in environment: " ++ show v --- ++ " environment = " ++ show env --- Cycle (AbsN' vs t) -> hash' (Left vs : env) t --- Cycle t -> hash' env t --- Abs v t -> hash' (Right v : env) t --- Tm t -> Hashable.hash1 (hashCycle env) (hash' env) t - --- hashCycle :: [Either [v] v] -> [Term f v a] -> ([h], Term f v a -> h) --- hashCycle env@(Left cycle : envTl) ts | length cycle == length ts = --- let --- permute p xs = case Vector.fromList xs of xs -> map (xs !) p --- hashed = map (\(i,t) -> ((i,t), hash' env t)) (zip [0..] ts) --- pt = fst <$> sortOn snd hashed --- (p,ts') = unzip pt --- in case map Right (permute p cycle) ++ envTl of --- env -> (map (hash' env) ts', hash' env) --- hashCycle env ts = (map (hash' env) ts, hash' env) - --- -- | Use the `hash` function to efficiently remove duplicates from the list, preserving order. --- distinct :: forall f v h a proxy . (Functor f, Hashable1 f, Eq v, Show v, Var v, Ord h, Accumulate h) --- => proxy h --- -> [Term f v a] -> [Term f v a] --- distinct _ ts = fst <$> sortOn snd m --- where m = Map.elems (Map.fromList (hashes `zip` (ts `zip` [0 :: Int .. 1]))) --- hashes = map hash ts :: [h] - --- -- | Use the `hash` function to remove elements from `t1s` that exist in `t2s`, preserving order. --- subtract :: forall f v h a proxy . (Functor f, Hashable1 f, Eq v, Show v, Var v, Ord h, Accumulate h) --- => proxy h --- -> [Term f v a] -> [Term f v a] -> [Term f v a] --- subtract _ t1s t2s = --- let skips = Set.fromList (map hash t2s :: [h]) --- in filter (\t -> Set.notMember (hash t) skips) t1s - --- instance (Show1 f, Show v) => Show (Term f v a) where --- -- annotations not shown --- showsPrec p (Term _ _ out) = case out of --- Var v -> \x -> "Var " ++ show v ++ x --- Cycle body -> ("Cycle " ++) . showsPrec p body --- Abs v body -> showParen True $ (show v ++) . showString ". " . showsPrec p body --- Tm f -> showsPrec1 p f diff --git a/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs b/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs index f14304255b..c676c7877c 100644 --- a/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs +++ b/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs @@ -168,8 +168,8 @@ getABT getVar getA getF = getList getVar >>= go [] 0 -> do tag <- getWord8 case tag of - 0 -> ABT.annotatedVar a . (env !!) <$> getLength - 1 -> ABT.annotatedVar a . (fvs !!) <$> getLength + 0 -> ABT.var a . (env !!) <$> getLength + 1 -> ABT.var a . (fvs !!) <$> getLength _ -> unknownTag "getABT.Var" tag 1 -> ABT.tm a <$> getF (go env fvs) 2 -> do diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index c189a9c19c..0bdd609519 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -40,6 +40,10 @@ newtype TypeId = TypeId ObjectId deriving Show deriving (FromField, ToField) via newtype TermId = TermCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId newtype DeclId = DeclCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId +-- |For generational garbage-collection; 0 is the oldest generation. +newtype Generation = Generation { unGeneration :: Word64 } deriving (Eq, Ord, Show) + deriving (Enum, FromField, ToField) via Word64 + instance Show PatchObjectId where show h = "PatchObjectId (" ++ show (unPatchObjectId h) ++ ")" diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index dd501e3b5b..56e12475b0 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -25,7 +25,7 @@ import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) import Data.ByteString (ByteString) import Data.Foldable (traverse_) import qualified Data.List.Extra as List -import Data.Maybe (fromJust) +import Data.Maybe (fromJust, isJust) import Data.String (fromString) import Data.String.Here.Uninterpolated (here, hereFile) import Data.Text (Text) @@ -37,7 +37,7 @@ import Debug.Trace (trace, traceM) import U.Codebase.HashTags (BranchHash, CausalHash, unBranchHash, unCausalHash) import U.Codebase.Reference (Reference') import qualified U.Codebase.Referent as C.Referent -import U.Codebase.Sqlite.DbId (BranchHashId (..), BranchObjectId (..), CausalHashId (..), CausalOldHashId, HashId (..), ObjectId (..), TextId) +import U.Codebase.Sqlite.DbId (BranchHashId (..), BranchObjectId (..), CausalHashId (..), CausalOldHashId, HashId (..), ObjectId (..), TextId, Generation(..)) import U.Codebase.Sqlite.ObjectType (ObjectType) import qualified U.Codebase.Sqlite.Reference as Reference import qualified U.Codebase.Sqlite.Referent as Referent @@ -48,6 +48,7 @@ import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash import UnliftIO (MonadUnliftIO, throwIO, try, withRunInIO) import Control.Monad (when) +import Data.Functor ((<&>)) -- * types @@ -263,18 +264,37 @@ updateObjectBlob oId bs = execute sql (oId, bs) where sql = [here| -- |Maybe we would generalize this to something other than NamespaceHash if we -- end up wanting to store other kinds of Causals here too. saveCausal :: DB m => CausalHashId -> BranchHashId -> m () -saveCausal self value = execute sql (self, value) where sql = [here| - INSERT INTO causal (self_hash_id, value_hash_id) - VALUES (?, ?) +saveCausal self value = execute sql (self, value, Generation 0) where sql = [here| + INSERT INTO causal (self_hash_id, value_hash_id, gc_generation) + VALUES (?, ?, ?) ON CONFLICT DO NOTHING |] +-- maybe: look at whether parent causal is "committed"; if so, then increment; +-- otherwise, don't. +getNurseryGeneration :: DB m => m Generation +getNurseryGeneration = query_ sql <&> \case + [] -> Generation 0 + [fromOnly -> g] -> Generation g + (fmap fromOnly -> gs) -> + error $ "How did I get multiple values out of a MAX()? " ++ show gs + where sql = [here| + SELECT MAX(gc_generation) FROM causal; + |] + loadCausalValueHashId :: EDB m => CausalHashId -> m BranchHashId -loadCausalValueHashId id = - queryAtom sql (Only id) >>= orError (UnknownCausalHashId id) where sql = [here| +loadCausalValueHashId chId@(CausalHashId id) = + loadMaybeCausalValueHashId (id) >>= orError (UnknownCausalHashId chId) + +loadMaybeCausalValueHashId :: DB m => HashId -> m (Maybe BranchHashId) +loadMaybeCausalValueHashId id = + queryAtom sql (Only id) where sql = [here| SELECT value_hash_id FROM causal WHERE self_hash_id = ? |] +isCausalHash :: DB m => HashId -> m Bool +isCausalHash = fmap isJust . loadMaybeCausalValueHashId + -- todo: do a join here loadBranchObjectIdByCausalHashId :: EDB m => CausalHashId -> m (Maybe BranchObjectId) loadBranchObjectIdByCausalHashId id = queryAtom sql (Only id) where sql = [here| diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs new file mode 100644 index 0000000000..fd8c62e3d0 --- /dev/null +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -0,0 +1,130 @@ +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE LambdaCase #-} + +module U.Codebase.Sqlite.Sync22 where + +import Control.Monad (filterM, (<=<)) +import Control.Monad.Except (ExceptT, MonadError) +import Control.Monad.Extra (ifM) +import Control.Monad.RWS (MonadIO, MonadReader (reader)) +import Control.Monad.Reader (ReaderT) +import Control.Monad.Trans.Except (withExceptT) +import Data.Functor ((<&>)) +import Data.Word (Word64) +import Database.SQLite.Simple (Connection) +import U.Codebase.Sqlite.DbId +import qualified U.Codebase.Sqlite.Queries as Q +import U.Codebase.Sync +import qualified U.Codebase.Sync as Sync +import U.Util.Cache (Cache) +import qualified U.Util.Cache as Cache +import Data.Foldable (toList) + +data Entity = O ObjectId | C CausalHashId + +data Error = SrcDB Q.Integrity | DestDB Q.Integrity + +data Env = Env + { srcDB :: Connection, + destDB :: Connection + } + +-- data Mappings + +-- We load an object from the source; it has a bunch of dependencies. +-- Some of these dependencies exist at the defination, some don't. +-- For the ones that do, look up their ids, and update the thingie as you write it +-- For the ones that don't, copy them (then you will know their ids), and update the thingie. +-- If you want, you can try to cache that lookup. + +-- sync22 :: +-- ( MonadIO m, +-- MonadError Error m, +-- MonadReader TwoConnections m +-- ) => +-- Sync m Entity +-- sync22 = Sync roots trySync +-- where +-- roots = runSrc $ fmap (\h -> [C h]) Q.loadNamespaceRoot + +trySync :: + (MonadIO m, MonadError Error m, MonadReader Env m) => + Cache m TextId TextId -> + Cache m HashId HashId -> + Cache m CausalHashId CausalHashId -> + Cache m ObjectId ObjectId -> + Generation -> + Entity -> + m (TrySyncResult Entity) +trySync tCache hCache cCache oCache gc = \case + -- for causals, we need to get the value_hash_id of the thingo + -- - maybe enqueue their parents + -- - enqueue the self_ and value_ hashes + -- - enqueue the namespace object, if present + C chId -> do + chId' <- syncCausalHash chId + -- we're going to assume that if the dest has this in its + -- causal table, then it's safe to short-circuit + ifM + (runDest $ Q.isCausalHash chId') + (pure Sync.PreviouslyDone) + ( do + missingParents <- (fmap . fmap) C $ + runSrc (Q.loadCausalParents chId) + >>= filterM (\p -> syncCausalHash p >>= runDest . Q.isCausalHash) + -- optionally get branch object id for this causal + mayBoId <- (fmap . fmap) O $ + runSrc (Q.loadCausalValueHashId chId) + >>= runSrc . Q.maybeObjectIdForAnyHashId . unBranchHashId + -- the parents should probably be real like CausalIds, that "statically" + -- know they're in that Causal table; not just reuse the hash id. + -- that would make the missingParents real dependencies instead of weakrefs + pure (Sync.Done (toList mayBoId ++ toList missingParents)) + ) + + -- objects are the hairiest. obviously, if they + -- exist, we're done; otherwise we do some fancy stuff + O oId -> error "todo" + -- O oId -> Cache.lookup oCache oId >>= \case + -- Just{} -> pure Sync.PreviouslyDone + -- Nothing -> do + + -- error "todo" + -- pure Sync.Done + where + syncTextLiteral = Cache.apply tCache \tId -> do + t <- runSrc $ Q.loadTextById tId + runDest $ Q.saveText t + + syncHashLiteral = Cache.apply hCache \hId -> do + b32hex <- runSrc $ Q.loadHashById hId + runDest $ Q.saveHash b32hex + + syncCausalHash = syncHashLiteral . unCausalHashId + syncBranchHashId = fmap BranchHashId . syncHashLiteral . unBranchHashId + + syncObject = error "todo" + +-- syncCausal chId = do +-- value + +-- Q: Do we want to cache corresponding ID mappings? +-- A: Yes, but not yet + +runSrc :: + (MonadError Error m, MonadReader Env m) => + ReaderT Connection (ExceptT Q.Integrity m) a -> + m a +runSrc = error "todo" -- withExceptT SrcDB . (reader fst >>=) + +runDest :: + (MonadError Error m, MonadReader Env m) => + ReaderT Connection (ExceptT Q.Integrity m) a -> + m a +runDest = error "todo" -- withExceptT SrcDB . (reader fst >>=) + +-- syncs coming from git: +-- - pull a specified remote causal (Maybe CausalHash) into the local database +-- - and then maybe do some stuff +-- syncs coming from \ No newline at end of file diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 0003672373..b44972c9eb 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -57,11 +57,22 @@ CREATE INDEX object_type_id ON object(type_id); CREATE TABLE causal ( self_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), - gc_generation INTEGER NOT NULL DEFAULT 0 + gc_generation INTEGER NOT NULL ); CREATE INDEX causal_value_hash_id ON causal(value_hash_id); CREATE INDEX causal_gc_generation ON causal(gc_generation); +-- proposed: +-- CREATE TABLE causal ( +-- causal_id INTEGER PRIMARY KEY NOT NULL, +-- self_hash_id INTEGER NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), +-- value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), +-- gc_generation INTEGER NOT NULL DEFAULT 0 +-- ); +-- CREATE INDEX causal_self_hash_id ON causal(self_hash_id); +-- CREATE INDEX causal_value_hash_id ON causal(value_hash_id); +-- CREATE INDEX causal_gc_generation ON causal(gc_generation); + -- valueHash : Hash = hash(value) -- db.saveValue(valueHash, value) diff --git a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal index 033b7b655b..884aafaeb5 100644 --- a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal +++ b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal @@ -35,6 +35,7 @@ library U.Codebase.Sqlite.Referent U.Codebase.Sqlite.Serialization U.Codebase.Sqlite.Symbol + U.Codebase.Sqlite.Sync22 U.Codebase.Sqlite.SyncEntity U.Codebase.Sqlite.Term.Format U.Codebase.Sqlite.Types @@ -56,6 +57,7 @@ library unliftio, vector, unison-codebase, + unison-codebase-sync, unison-core, unison-util, unison-util-serialization, diff --git a/codebase2/codebase-sync/U/Codebase/Sync.hs b/codebase2/codebase-sync/U/Codebase/Sync.hs new file mode 100644 index 0000000000..98d8895228 --- /dev/null +++ b/codebase2/codebase-sync/U/Codebase/Sync.hs @@ -0,0 +1,52 @@ +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE ScopedTypeVariables #-} + +module U.Codebase.Sync where + +-- localSyncFile +-- (srcPath :: CodebasePath) +-- (destPath :: CodebasePath) +-- (root :: Maybe ShortBranchHash) +-- (path :: UnisonPath) +-- = error "todo" + +-- localSyncSql +-- (srcDb :: Connection) +-- (destDb :: Connection) +-- (root :: Maybe ShortBranchHash) +-- (path :: UnisonPath) +-- = error "todo" + +-- data Reference t h = Builtin t | Derived h Pos +-- -- |The 0-based index in a definition component/cycle +-- newtype Pos = Pos { unPos :: Word64 } +-- data RefId h = RefId h Pos +-- data TermRef t h = TermRef (Reference t h) | TermCon (Reference t h) ConstructorId +-- newtype ConstructorId = ConstructorId { unConstructorId :: Word64 } +-- data TermRefId h = TermRefId (RefId h) | TermConId (RefId h) ConstructorId + +import Data.Foldable (traverse_) + +data TrySyncResult h = Missing [h] | Done [h] | PreviouslyDone + +data Sync m h = Sync + { roots :: m [h], + trySync :: h -> m (TrySyncResult h) + } + +data Progress m h = Progress + { need :: h -> m (), + done :: h -> m (), + allDone :: m () + } + +sync :: forall m h. Monad m => Sync m h -> Progress m h -> m () +sync Sync{..} Progress{..} = do roots >>= go where + go :: [h] -> m () + go (h : hs) = trySync h >>= \case + Missing deps -> traverse_ need deps >> go (deps ++ h : hs) + Done externalDeps -> done h >> go (externalDeps ++ hs) + PreviouslyDone -> go hs + go [] = allDone \ No newline at end of file diff --git a/codebase-convert-2to2/unison-codebase-sync-2to2.cabal b/codebase2/codebase-sync/unison-codebase-sync.cabal similarity index 58% rename from codebase-convert-2to2/unison-codebase-sync-2to2.cabal rename to codebase2/codebase-sync/unison-codebase-sync.cabal index 15d960213f..a9381c0886 100644 --- a/codebase-convert-2to2/unison-codebase-sync-2to2.cabal +++ b/codebase2/codebase-sync/unison-codebase-sync.cabal @@ -3,7 +3,7 @@ cabal-version: 2.2 -- 'cabal init'. For further documentation, see -- http://haskell.org/cabal/users-guide/ -name: unison-codebase-sync-2to2 +name: unison-codebase-sync version: 0.1.0.0 -- synopsis: -- description: @@ -14,30 +14,12 @@ copyright: Unison Computing, PBC category: Development library - hs-source-dirs: lib + hs-source-dirs: . exposed-modules: - U.Codebase.Sync.V2V2 + U.Codebase.Sync -- other-modules: -- other-extensions: build-depends: base, - -- bytes, - bytestring, - containers, extra, - here, - lens, - mtl, - safe, - text, - sqlite-simple, - unliftio, - vector, - unison-core, - -- unison-codebase1, - -- unison-codebase, - -- unison-codebase-sqlite, - -- unison-util, - -- unison-util-serialization, - -- unison-util-term default-language: Haskell2010 diff --git a/parser-typechecker/src/Unison/Util/Cache.hs b/codebase2/util/U/Util/Cache.hs similarity index 92% rename from parser-typechecker/src/Unison/Util/Cache.hs rename to codebase2/util/U/Util/Cache.hs index 499d75f806..22fba7dab3 100644 --- a/parser-typechecker/src/Unison/Util/Cache.hs +++ b/codebase2/util/U/Util/Cache.hs @@ -1,9 +1,12 @@ -module Unison.Util.Cache where +{-# LANGUAGE LambdaCase #-} +module U.Util.Cache where import Prelude hiding (lookup) -import Unison.Prelude -import UnliftIO (newTVarIO, modifyTVar', writeTVar, atomically, readTVar, readTVarIO) +import UnliftIO (MonadIO, newTVarIO, modifyTVar', writeTVar, atomically, readTVar, readTVarIO) import qualified Data.Map as Map +import Data.Functor (($>)) +import Control.Monad (when) +import Data.Foldable (for_) data Cache m k v = Cache { lookup :: k -> m (Maybe v) diff --git a/codebase2/util/unison-util.cabal b/codebase2/util/unison-util.cabal index a8958cbf8d..39d41f99b3 100644 --- a/codebase2/util/unison-util.cabal +++ b/codebase2/util/unison-util.cabal @@ -16,6 +16,7 @@ category: Development library exposed-modules: U.Util.Base32Hex + U.Util.Cache U.Util.Components U.Util.Hash U.Util.Hashable @@ -34,6 +35,7 @@ library lens, memory, text, + unliftio, sandi hs-source-dirs: . default-language: Haskell2010 diff --git a/hie.yaml b/hie.yaml index 99a5b8290c..737b0d21a5 100644 --- a/hie.yaml +++ b/hie.yaml @@ -1,10 +1,7 @@ cradle: stack: - - path: "codebase-convert-1to2/app/Main.hs" - component: "unison-codebase-convert-1to2:exe:uconvert12" - - - path: "codebase-convert-1to2/lib" - component: "unison-codebase-convert-1to2:lib" + - path: "codebase-convert-2to2/lib" + component: "unison-codebase-sync-2to2:lib" - path: "codebase1/codebase/." component: "unison-codebase1:lib" diff --git a/parser-typechecker/src/Unison/Builtin/Decls.hs b/parser-typechecker/src/Unison/Builtin/Decls.hs index fb5edbdedd..f097b0a152 100644 --- a/parser-typechecker/src/Unison/Builtin/Decls.hs +++ b/parser-typechecker/src/Unison/Builtin/Decls.hs @@ -4,11 +4,9 @@ module Unison.Builtin.Decls where -import Control.Monad (join) import Data.List (elemIndex, find) import qualified Data.Map as Map import Data.Text (Text, unpack) -import Debug.Trace (trace) import qualified Unison.ABT as ABT import qualified Unison.ConstructorType as CT import Unison.DataDeclaration diff --git a/parser-typechecker/src/Unison/Codebase/Branch.hs b/parser-typechecker/src/Unison/Codebase/Branch.hs index fb96565a0e..a8641c26ff 100644 --- a/parser-typechecker/src/Unison/Codebase/Branch.hs +++ b/parser-typechecker/src/Unison/Codebase/Branch.hs @@ -144,7 +144,7 @@ import Unison.Referent ( Referent ) import qualified Unison.Referent as Referent import qualified Unison.Reference as Reference -import qualified Unison.Util.Cache as Cache +import qualified U.Util.Cache as Cache import qualified Unison.Util.Relation as R import Unison.Util.Relation ( Relation ) import qualified Unison.Util.Relation4 as R4 diff --git a/parser-typechecker/src/Unison/Codebase/Causal.hs b/parser-typechecker/src/Unison/Codebase/Causal.hs index 46164d168f..c859e639e7 100644 --- a/parser-typechecker/src/Unison/Codebase/Causal.hs +++ b/parser-typechecker/src/Unison/Codebase/Causal.hs @@ -14,7 +14,7 @@ import qualified Data.Sequence as Seq import Unison.Hash ( Hash ) import qualified Unison.Hashable as Hashable import Unison.Hashable ( Hashable ) -import qualified Unison.Util.Cache as Cache +import qualified U.Util.Cache as Cache import qualified Data.Map as Map import qualified Data.Set as Set diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs index 55b4558deb..41e972af53 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs @@ -56,7 +56,7 @@ import qualified Unison.Referent as Referent import qualified Unison.Util.TQueue as TQueue import Unison.Var ( Var ) import qualified Unison.UnisonFile as UF -import qualified Unison.Util.Cache as Cache +import qualified U.Util.Cache as Cache import qualified Unison.Util.Pretty as P import qualified Unison.PrettyTerminal as PT import Unison.Symbol ( Symbol ) diff --git a/parser-typechecker/tests/Unison/Test/Cache.hs b/parser-typechecker/tests/Unison/Test/Cache.hs index fafd6459f8..9d5afb3cea 100644 --- a/parser-typechecker/tests/Unison/Test/Cache.hs +++ b/parser-typechecker/tests/Unison/Test/Cache.hs @@ -4,7 +4,7 @@ import EasyTest import Control.Monad import Control.Concurrent.STM import Control.Concurrent.Async -import qualified Unison.Util.Cache as Cache +import qualified U.Util.Cache as Cache test :: Test () test = scope "util.cache" $ tests [ diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index d55e1e1922..835c7d2ada 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -157,7 +157,6 @@ library Unison.UnisonFile Unison.Util.AnnotatedText Unison.Util.Bytes - Unison.Util.Cache Unison.Util.ColorText Unison.Util.EnumContainers Unison.Util.Exception @@ -373,7 +372,8 @@ executable tests text, transformers, unison-core1, - unison-parser-typechecker + unison-parser-typechecker, + unison-util executable transcripts import: unison-common From 296953dc97e25ebbd276ba635aef5c0bfe7020f2 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 25 Feb 2021 21:23:29 -0500 Subject: [PATCH 105/225] sync causal entries --- .../U/Codebase/Sqlite/Queries.hs | 25 +++- .../U/Codebase/Sqlite/Sync22.hs | 108 +++++++++++++----- codebase2/codebase-sync/U/Codebase/Sync.hs | 4 +- 3 files changed, 104 insertions(+), 33 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 56e12475b0..7c71ab4301 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -16,15 +16,18 @@ module U.Codebase.Sqlite.Queries where -import Control.Monad (filterM) import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) +import Control.Monad (filterM, when) import Control.Monad.IO.Class (MonadIO) import Control.Monad.Reader (MonadReader (ask)) import Control.Monad.Trans (MonadIO (liftIO)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) import Data.ByteString (ByteString) import Data.Foldable (traverse_) +import Data.Functor ((<&>)) import qualified Data.List.Extra as List +import Data.List.NonEmpty (NonEmpty) +import qualified Data.List.NonEmpty as Nel import Data.Maybe (fromJust, isJust) import Data.String (fromString) import Data.String.Here.Uninterpolated (here, hereFile) @@ -37,7 +40,7 @@ import Debug.Trace (trace, traceM) import U.Codebase.HashTags (BranchHash, CausalHash, unBranchHash, unCausalHash) import U.Codebase.Reference (Reference') import qualified U.Codebase.Referent as C.Referent -import U.Codebase.Sqlite.DbId (BranchHashId (..), BranchObjectId (..), CausalHashId (..), CausalOldHashId, HashId (..), ObjectId (..), TextId, Generation(..)) +import U.Codebase.Sqlite.DbId (BranchHashId (..), BranchObjectId (..), CausalHashId (..), CausalOldHashId, Generation (..), HashId (..), ObjectId (..), TextId) import U.Codebase.Sqlite.ObjectType (ObjectType) import qualified U.Codebase.Sqlite.Reference as Reference import qualified U.Codebase.Sqlite.Referent as Referent @@ -47,8 +50,6 @@ import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash import UnliftIO (MonadUnliftIO, throwIO, try, withRunInIO) -import Control.Monad (when) -import Data.Functor ((<&>)) -- * types @@ -256,6 +257,15 @@ objectExistsWithHash h = queryExists sql (Only h) where WHERE base32 = ? |] +hashIdsForObject :: DB m => ObjectId -> m (NonEmpty HashId) +hashIdsForObject oId = do + primaryHashId <- queryOne $ queryAtom sql1 (Only oId) + hashIds <- queryAtoms sql2 (Only oId) + pure $ primaryHashId Nel.:| filter (/= primaryHashId) hashIds + where + sql1 = "SELECT primary_hash_id FROM object WHERE id = ?" + sql2 = "SELECT hash_id FROM hash_object WHERE object_id = ?" + updateObjectBlob :: DB m => ObjectId -> ByteString -> m () updateObjectBlob oId bs = execute sql (oId, bs) where sql = [here| UPDATE object SET bytes = ? WHERE id = ? @@ -506,29 +516,36 @@ namespaceHashIdByBase32Prefix prefix = queryAtoms sql (Only $ prefix <> "%") whe -- * helper functions +-- | composite input, atomic List output queryAtoms :: (DB f, ToRow q, FromField b, Show q, Show b) => SQLite.Query -> q -> f [b] queryAtoms q r = map fromOnly <$> query q r +-- | composite input, composite Maybe output queryMaybe :: (DB f, ToRow q, FromRow b, Show q, Show b) => SQLite.Query -> q -> f (Maybe b) queryMaybe q r = headMay <$> query q r +-- | composite input, atomic Maybe output queryAtom :: (DB f, ToRow q, FromField b, Show q, Show b) => SQLite.Query -> q -> f (Maybe b) queryAtom q r = fmap fromOnly <$> queryMaybe q r +-- | Just output queryOne :: Functor f => f (Maybe b) -> f b queryOne = fmap fromJust +-- | composite input, Boolean output queryExists :: (DB m, ToRow q, Show q) => SQLite.Query -> q -> m Bool queryExists q r = not . null . map (id @SQLData) <$> queryAtoms q r debugQuery :: Bool debugQuery = False +-- | composite input, composite List output query :: (DB m, ToRow q, FromRow r, Show q, Show r) => SQLite.Query -> q -> m [r] query q r = do c <- ask liftIO . queryTrace "query" q r $ SQLite.query c q r +-- | no input, composite List output query_ :: (DB m, FromRow r, Show r) => SQLite.Query -> m [r] query_ q = do c <- ask diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index fd8c62e3d0..fd0db10e95 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -1,16 +1,21 @@ {-# LANGUAGE BlockArguments #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} module U.Codebase.Sqlite.Sync22 where -import Control.Monad (filterM, (<=<)) -import Control.Monad.Except (ExceptT, MonadError) +import Control.Monad (filterM, join, (<=<)) +import Control.Monad.Except (ExceptT, MonadError (throwError)) import Control.Monad.Extra (ifM) import Control.Monad.RWS (MonadIO, MonadReader (reader)) import Control.Monad.Reader (ReaderT) import Control.Monad.Trans.Except (withExceptT) +import Data.Foldable (toList) import Data.Functor ((<&>)) +import Data.List.Extra (nubOrd) +import Data.Maybe (catMaybes, fromJust, isJust) import Data.Word (Word64) import Database.SQLite.Simple (Connection) import U.Codebase.Sqlite.DbId @@ -19,11 +24,15 @@ import U.Codebase.Sync import qualified U.Codebase.Sync as Sync import U.Util.Cache (Cache) import qualified U.Util.Cache as Cache -import Data.Foldable (toList) data Entity = O ObjectId | C CausalHashId -data Error = SrcDB Q.Integrity | DestDB Q.Integrity +data Error + = SrcDB Q.Integrity + | DestDB Q.Integrity + | -- | hashes corresponding to a single object in source codebase + -- correspond to multiple objects in destination codebase + HashObjectCorrespondence ObjectId [HashId] [ObjectId] data Env = Env { srcDB :: Connection, @@ -49,15 +58,15 @@ data Env = Env -- roots = runSrc $ fmap (\h -> [C h]) Q.loadNamespaceRoot trySync :: + forall m. (MonadIO m, MonadError Error m, MonadReader Env m) => Cache m TextId TextId -> Cache m HashId HashId -> - Cache m CausalHashId CausalHashId -> Cache m ObjectId ObjectId -> Generation -> Entity -> m (TrySyncResult Entity) -trySync tCache hCache cCache oCache gc = \case +trySync tCache hCache oCache gc = \case -- for causals, we need to get the value_hash_id of the thingo -- - maybe enqueue their parents -- - enqueue the self_ and value_ hashes @@ -67,44 +76,89 @@ trySync tCache hCache cCache oCache gc = \case -- we're going to assume that if the dest has this in its -- causal table, then it's safe to short-circuit ifM - (runDest $ Q.isCausalHash chId') + (runDest $ Q.isCausalHash $ unCausalHashId chId') (pure Sync.PreviouslyDone) ( do - missingParents <- (fmap . fmap) C $ - runSrc (Q.loadCausalParents chId) - >>= filterM (\p -> syncCausalHash p >>= runDest . Q.isCausalHash) - -- optionally get branch object id for this causal - mayBoId <- (fmap . fmap) O $ - runSrc (Q.loadCausalValueHashId chId) - >>= runSrc . Q.maybeObjectIdForAnyHashId . unBranchHashId - -- the parents should probably be real like CausalIds, that "statically" - -- know they're in that Causal table; not just reuse the hash id. - -- that would make the missingParents real dependencies instead of weakrefs - pure (Sync.Done (toList mayBoId ++ toList missingParents)) + bhId <- runSrc $ Q.loadCausalValueHashId chId + bhId' <- syncBranchHashId bhId + + mayBoId <- + runSrc . Q.maybeObjectIdForAnyHashId $ + unBranchHashId bhId + mayBoId' <- join <$> traverse (isSyncedObject) mayBoId + + findMissingParents chId >>= \case + [] -> + -- if branch object is present at src and dest, + -- or absent from both src and dest + -- then we are done + if isJust mayBoId == isJust mayBoId' + then do + runDest $ Q.saveCausal chId' bhId' + pure Sync.Done + else -- else it's present at src but not at dest., + -- so request it be copied, and revisit later + pure $ Missing [O $ fromJust mayBoId] + missingParents -> + -- if branch object is present at src and dest, + -- or absent from both src and dest + -- but there are parents missing, + -- then request the parents be copied, and revisit later + if isJust mayBoId == isJust mayBoId' + then pure $ Missing missingParents + else -- otherwise request the branch and the parents be copied + + pure $ Missing $ (O $ fromJust mayBoId) : missingParents ) -- objects are the hairiest. obviously, if they -- exist, we're done; otherwise we do some fancy stuff - O oId -> error "todo" - -- O oId -> Cache.lookup oCache oId >>= \case - -- Just{} -> pure Sync.PreviouslyDone - -- Nothing -> do - - -- error "todo" - -- pure Sync.Done + O oId -> do + error "todo: look for corresponding object using primary or any secondary hashes" + error "todo: copy non-primary hashes" + error "todo" + -- O oId -> Cache.lookup oCache oId >>= \case + -- Just{} -> pure Sync.PreviouslyDone + -- Nothing -> do + + -- error "todo" + -- pure Sync.Done where + syncTextLiteral :: TextId -> m TextId syncTextLiteral = Cache.apply tCache \tId -> do t <- runSrc $ Q.loadTextById tId runDest $ Q.saveText t + syncHashLiteral :: HashId -> m HashId syncHashLiteral = Cache.apply hCache \hId -> do b32hex <- runSrc $ Q.loadHashById hId runDest $ Q.saveHash b32hex - syncCausalHash = syncHashLiteral . unCausalHashId + syncCausalHash :: CausalHashId -> m CausalHashId + syncCausalHash = fmap CausalHashId . syncHashLiteral . unCausalHashId + + syncBranchHashId :: BranchHashId -> m BranchHashId syncBranchHashId = fmap BranchHashId . syncHashLiteral . unBranchHashId - syncObject = error "todo" + findMissingParents chId = do + runSrc (Q.loadCausalParents chId) + >>= filterM isMissing + <&> fmap C + where + isMissing p = + syncCausalHash p + >>= runDest . Q.isCausalHash . unCausalHashId + + isSyncedObject :: ObjectId -> m (Maybe ObjectId) + isSyncedObject = Cache.applyDefined oCache \oId -> do + hIds <- toList <$> runSrc (Q.hashIdsForObject oId) + ( nubOrd . catMaybes + <$> traverse (runDest . Q.maybeObjectIdForAnyHashId) hIds + ) + >>= \case + [oId'] -> pure $ Just oId' + [] -> pure $ Nothing + oIds' -> throwError (HashObjectCorrespondence oId hIds oIds') -- syncCausal chId = do -- value diff --git a/codebase2/codebase-sync/U/Codebase/Sync.hs b/codebase2/codebase-sync/U/Codebase/Sync.hs index 98d8895228..7cbf837832 100644 --- a/codebase2/codebase-sync/U/Codebase/Sync.hs +++ b/codebase2/codebase-sync/U/Codebase/Sync.hs @@ -29,7 +29,7 @@ module U.Codebase.Sync where import Data.Foldable (traverse_) -data TrySyncResult h = Missing [h] | Done [h] | PreviouslyDone +data TrySyncResult h = Missing [h] | Done | PreviouslyDone data Sync m h = Sync { roots :: m [h], @@ -47,6 +47,6 @@ sync Sync{..} Progress{..} = do roots >>= go where go :: [h] -> m () go (h : hs) = trySync h >>= \case Missing deps -> traverse_ need deps >> go (deps ++ h : hs) - Done externalDeps -> done h >> go (externalDeps ++ hs) + Done -> done h >> go hs PreviouslyDone -> go hs go [] = allDone \ No newline at end of file From 1a41e549463814ced1004fb39aac342295b2b731 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 27 Feb 2021 18:17:08 -0500 Subject: [PATCH 106/225] filling in object sync --- .../U/Codebase/Sqlite/Queries.hs | 11 +++++ .../U/Codebase/Sqlite/Sync22.hs | 40 +++++++++++++------ 2 files changed, 38 insertions(+), 13 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 7c71ab4301..9edbbad4f3 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -214,6 +214,12 @@ loadObjectWithTypeById oId = queryMaybe sql (Only oId) >>= orError (UnknownObjec SELECT type_id, bytes FROM object WHERE id = ? |] +loadObjectWithHashIdAndTypeById :: EDB m => ObjectId -> m (HashId, ObjectType, ByteString) +loadObjectWithHashIdAndTypeById oId = queryMaybe sql (Only oId) >>= orError (UnknownObjectId oId) + where sql = [here| + SELECT primary_hash_id, type_id, bytes FROM object WHERE id = ? + |] + -- |Not all hashes have corresponding objects; e.g., hashes of term types expectObjectIdForPrimaryHashId :: EDB m => HashId -> m ObjectId expectObjectIdForPrimaryHashId h = @@ -266,6 +272,11 @@ hashIdsForObject oId = do sql1 = "SELECT primary_hash_id FROM object WHERE id = ?" sql2 = "SELECT hash_id FROM hash_object WHERE object_id = ?" +hashIdWithVersionForObject :: DB m => ObjectId -> m [(HashId, Int)] +hashIdWithVersionForObject = query sql . Only where sql = [here| + SELECT hash_id, hash_version FROM hash_object WHERE object_id = ? +|] + updateObjectBlob :: DB m => ObjectId -> ByteString -> m () updateObjectBlob oId bs = execute sql (oId, bs) where sql = [here| UPDATE object SET bytes = ? WHERE id = ? diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index fd0db10e95..abc4149cf8 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -12,13 +12,14 @@ import Control.Monad.Extra (ifM) import Control.Monad.RWS (MonadIO, MonadReader (reader)) import Control.Monad.Reader (ReaderT) import Control.Monad.Trans.Except (withExceptT) -import Data.Foldable (toList) +import Data.Foldable (toList, traverse_) import Data.Functor ((<&>)) import Data.List.Extra (nubOrd) import Data.Maybe (catMaybes, fromJust, isJust) import Data.Word (Word64) import Database.SQLite.Simple (Connection) import U.Codebase.Sqlite.DbId +import qualified U.Codebase.Sqlite.ObjectType as OT import qualified U.Codebase.Sqlite.Queries as Q import U.Codebase.Sync import qualified U.Codebase.Sync as Sync @@ -107,22 +108,25 @@ trySync tCache hCache oCache gc = \case if isJust mayBoId == isJust mayBoId' then pure $ Missing missingParents else -- otherwise request the branch and the parents be copied - pure $ Missing $ (O $ fromJust mayBoId) : missingParents ) - -- objects are the hairiest. obviously, if they -- exist, we're done; otherwise we do some fancy stuff - O oId -> do - error "todo: look for corresponding object using primary or any secondary hashes" - error "todo: copy non-primary hashes" - error "todo" - -- O oId -> Cache.lookup oCache oId >>= \case - -- Just{} -> pure Sync.PreviouslyDone - -- Nothing -> do - - -- error "todo" - -- pure Sync.Done + O oId -> + isSyncedObject oId >>= \case + Just {} -> pure Sync.PreviouslyDone + Nothing -> do + (hId, objType, bytes) <- runSrc $ Q.loadObjectWithHashIdAndTypeById oId + hId' <- syncHashLiteral hId + bytes' <- case objType of + OT.TermComponent -> error "todo" + OT.DeclComponent -> error "todo" + OT.Namespace -> error "todo" + OT.Patch -> error "todo" + oId' <- runDest $ Q.saveObject hId' objType bytes' + syncSecondaryHashes oId oId' + Cache.insert oCache oId oId' + pure Sync.Done where syncTextLiteral :: TextId -> m TextId syncTextLiteral = Cache.apply tCache \tId -> do @@ -140,6 +144,7 @@ trySync tCache hCache oCache gc = \case syncBranchHashId :: BranchHashId -> m BranchHashId syncBranchHashId = fmap BranchHashId . syncHashLiteral . unBranchHashId + findMissingParents :: CausalHashId -> m [Entity] findMissingParents chId = do runSrc (Q.loadCausalParents chId) >>= filterM isMissing @@ -149,6 +154,13 @@ trySync tCache hCache oCache gc = \case syncCausalHash p >>= runDest . Q.isCausalHash . unCausalHashId + syncSecondaryHashes oId oId' = + runSrc (Q.hashIdWithVersionForObject oId) >>= traverse_ (go oId') + where + go oId' (hId, hashVersion) = do + hId' <- syncHashLiteral hId + runDest $ Q.saveHashObject hId' oId' hashVersion + isSyncedObject :: ObjectId -> m (Maybe ObjectId) isSyncedObject = Cache.applyDefined oCache \oId -> do hIds <- toList <$> runSrc (Q.hashIdsForObject oId) @@ -178,6 +190,8 @@ runDest :: m a runDest = error "todo" -- withExceptT SrcDB . (reader fst >>=) +-- applyDefined + -- syncs coming from git: -- - pull a specified remote causal (Maybe CausalHash) into the local database -- - and then maybe do some stuff From f0d053f174964b3446c27bc546951ead3fa850f5 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 1 Mar 2021 12:27:11 -0500 Subject: [PATCH 107/225] basic sync of term components --- .../U/Codebase/Sqlite/Serialization.hs | 71 +++++++++----- .../U/Codebase/Sqlite/Sync22.hs | 94 +++++++++++++++++-- .../U/Util/Serialization.hs | 9 +- 3 files changed, 137 insertions(+), 37 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index ef33ba9b39..e85b1904cb 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -11,7 +11,7 @@ module U.Codebase.Sqlite.Serialization where import Data.Bits (Bits) import qualified Data.ByteString as BS import Data.Bytes.Get (MonadGet, getByteString, getWord8, runGetS) -import Data.Bytes.Put (MonadPut, putWord8) +import Data.Bytes.Put (MonadPut (putByteString), putWord8) import Data.Bytes.Serial (SerialEndian (serializeBE), deserialize, deserializeBE, serialize) import Data.Bytes.VarInt (VarInt (VarInt), unVarInt) import Data.Int (Int64) @@ -32,7 +32,7 @@ import qualified U.Codebase.Referent as Referent import qualified U.Codebase.Sqlite.Branch.Diff as BranchDiff import qualified U.Codebase.Sqlite.Branch.Format as BranchFormat import qualified U.Codebase.Sqlite.Branch.Full as BranchFull -import U.Codebase.Sqlite.DbId (BranchObjectId, PatchObjectId, unBranchObjectId, unPatchObjectId) +import U.Codebase.Sqlite.DbId (BranchObjectId, HashId, ObjectId, PatchObjectId, TextId, unBranchObjectId, unPatchObjectId) import qualified U.Codebase.Sqlite.Decl.Format as DeclFormat import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), LocalTextId, WatchLocalIds) import qualified U.Codebase.Sqlite.Patch.Diff as PatchDiff @@ -341,22 +341,23 @@ getTerm = getABT getSymbol getUnit getF 2 -> pure Term.PConcat tag -> unknownTag "SeqOp" tag - lookupTermElement :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Term, TermFormat.Type) -lookupTermElement i = getWord8 >>= \case - 0 -> unsafeFramedArrayLookup (getTuple3 getLocalIds (getFramed getTerm) (getFramed getTType)) $ fromIntegral i - tag -> unknownTag "lookupTermElement" tag - +lookupTermElement i = + getWord8 >>= \case + 0 -> unsafeFramedArrayLookup (getTuple3 getLocalIds (getFramed getTerm) (getFramed getTType)) $ fromIntegral i + tag -> unknownTag "lookupTermElement" tag lookupTermElementDiscardingType :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Term) -lookupTermElementDiscardingType i = getWord8 >>= \case - 0 -> unsafeFramedArrayLookup ((,) <$> getLocalIds <*> getFramed getTerm <* skipFramed) $ fromIntegral i - tag -> unknownTag "lookupTermElementDiscardingType" tag +lookupTermElementDiscardingType i = + getWord8 >>= \case + 0 -> unsafeFramedArrayLookup ((,) <$> getLocalIds <*> getFramed getTerm <* skipFramed) $ fromIntegral i + tag -> unknownTag "lookupTermElementDiscardingType" tag lookupTermElementDiscardingTerm :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Type) -lookupTermElementDiscardingTerm i = getWord8 >>= \case - 0 -> unsafeFramedArrayLookup ((,) <$> getLocalIds <* skipFramed <*> getFramed getTType) $ fromIntegral i - tag -> unknownTag "lookupTermElementDiscardingTerm" tag +lookupTermElementDiscardingTerm i = + getWord8 >>= \case + 0 -> unsafeFramedArrayLookup ((,) <$> getLocalIds <* skipFramed <*> getFramed getTType) $ fromIntegral i + tag -> unknownTag "lookupTermElementDiscardingTerm" tag getTType :: MonadGet m => m TermFormat.Type getTType = getType getReference @@ -389,7 +390,6 @@ putDeclFormat = \case -- These use a framed array for randomer access putDeclComponent :: MonadPut m => DeclFormat.LocallyIndexedComponent -> m () putDeclComponent t | debug && trace ("putDeclComponent " ++ show t) False = undefined - putDeclComponent (DeclFormat.LocallyIndexedComponent v) = putFramedArray (putPair putLocalIds putDeclElement) v where @@ -520,10 +520,11 @@ getPatchFormat = 2 -> pure TermEdit.Different x -> unknownTag "getTyping" x getTypeEdit :: MonadGet m => m TypeEdit.LocalTypeEdit - getTypeEdit = getWord8 >>= \case - 0 -> pure TypeEdit.Deprecate - 1 -> TypeEdit.Replace <$> getReference - x -> unknownTag "getTypeEdit" x + getTypeEdit = + getWord8 >>= \case + 0 -> pure TypeEdit.Deprecate + 1 -> TypeEdit.Replace <$> getReference + x -> unknownTag "getTypeEdit" x getPatchLocalIds :: MonadGet m => m PatchFormat.PatchLocalIds getPatchLocalIds = @@ -662,15 +663,39 @@ watchLocalIdsToLocalDeps :: WatchLocalIds -> SE.SyncEntitySeq watchLocalIdsToLocalDeps (LocalIds ts hs) = SE.SyncEntity (vec2seq ts) mempty (vec2seq hs) mempty --- the same implementation currently works for term component and type component -getComponentSyncEntities :: MonadGet m => m SE.SyncEntitySeq -getComponentSyncEntities = do +decomposeTermComponent :: MonadGet m => m [(LocalIds, BS.ByteString, BS.ByteString)] +decomposeTermComponent = decomposeComponent do + ids <- getLocalIds + termBytes <- getFramedByteString + typeBytes <- getFramedByteString + pure [(ids, termBytes, typeBytes)] + +decomposeDeclComponent :: MonadGet m => m [(LocalIds, BS.ByteString)] +decomposeDeclComponent = decomposeComponent do + ids <- getLocalIds + declBytes <- getFramedByteString + pure [(ids, declBytes)] + +recomposeTermComponent :: MonadPut m => [(LocalIds, BS.ByteString, BS.ByteString)] -> m () +recomposeTermComponent = + putFramedArray \(localIds, termBytes, typeBytes) -> do + putLocalIds localIds + putFramed putByteString termBytes + putFramed putByteString typeBytes + +decomposeComponent :: (MonadGet m, Monoid a) => Get a -> m a +decomposeComponent split = do offsets <- getList (getVarInt @_ @Int) componentBytes <- getByteString (last offsets) let get1 (start, end) = do let bytes = BS.drop start $ BS.take end componentBytes - either fail pure $ runGetS getLocalIds bytes - Monoid.foldMapM (fmap localIdsToLocalDeps . get1) (zip offsets (tail offsets)) + either fail pure $ runGetS split bytes + Monoid.foldMapM get1 (zip offsets (tail offsets)) + +-- the same implementation currently works for term component and type component +getComponentSyncEntities :: MonadGet m => m SE.SyncEntitySeq +getComponentSyncEntities = + decomposeComponent $ fmap localIdsToLocalDeps getLocalIds getPatchSyncEntities :: MonadGet m => m SE.SyncEntitySeq getPatchSyncEntities = diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index abc4149cf8..16e99d1329 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -3,34 +3,54 @@ {-# LANGUAGE LambdaCase #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE ViewPatterns #-} module U.Codebase.Sqlite.Sync22 where -import Control.Monad (filterM, join, (<=<)) +import qualified Control.Lens as Lens +import Control.Monad (filterM, foldM, join, (<=<)) import Control.Monad.Except (ExceptT, MonadError (throwError)) import Control.Monad.Extra (ifM) import Control.Monad.RWS (MonadIO, MonadReader (reader)) import Control.Monad.Reader (ReaderT) -import Control.Monad.Trans.Except (withExceptT) +import Control.Monad.Trans (lift) +import Control.Monad.Trans.Except (throwE, withExceptT) +import qualified Control.Monad.Writer as Writer +import Data.ByteString (ByteString) +import Data.Bytes.Get (MonadGet, getByteString, getWord8, runGetS) +import Data.Bytes.Put (putWord8, runPutS) import Data.Foldable (toList, traverse_) import Data.Functor ((<&>)) import Data.List.Extra (nubOrd) import Data.Maybe (catMaybes, fromJust, isJust) +import Data.Sequence (Seq) +import qualified Data.Sequence as Seq +import Data.Traversable (for) import Data.Word (Word64) import Database.SQLite.Simple (Connection) import U.Codebase.Sqlite.DbId +import qualified U.Codebase.Sqlite.LocalIds as L import qualified U.Codebase.Sqlite.ObjectType as OT import qualified U.Codebase.Sqlite.Queries as Q +import qualified U.Codebase.Sqlite.Serialization as S import U.Codebase.Sync import qualified U.Codebase.Sync as Sync import U.Util.Cache (Cache) import qualified U.Util.Cache as Cache +import qualified U.Util.Serialization as S data Entity = O ObjectId | C CausalHashId +data DbTag = SrcDb | DestDb + +data DecodeError = ErrTermComponent | ErrDeclComponent + +type ErrString = String + data Error - = SrcDB Q.Integrity - | DestDB Q.Integrity + = DbIntegrity Q.Integrity + | DecodeError DbTag DecodeError ByteString ErrString | -- | hashes corresponding to a single object in source codebase -- correspond to multiple objects in destination codebase HashObjectCorrespondence ObjectId [HashId] [ObjectId] @@ -118,16 +138,70 @@ trySync tCache hCache oCache gc = \case Nothing -> do (hId, objType, bytes) <- runSrc $ Q.loadObjectWithHashIdAndTypeById oId hId' <- syncHashLiteral hId - bytes' <- case objType of - OT.TermComponent -> error "todo" + result <- case objType of + OT.TermComponent -> do + -- (fmt, termComponent) <- + -- either (throwError . DecodeError SrcDb ErrTermComponent bytes) pure -- 🤪 + -- . flip runGetS bytes + -- $ (,) <$> getWord8 <*> S.decomposeTermComponent + (fmt, unzip3 -> (localIds, termBytes, typeBytes)) <- + case flip runGetS bytes do + tag <- getWord8 + component <- S.decomposeTermComponent + pure (tag, component) of + Right x -> pure x + Left s -> throwError $ DecodeError SrcDb ErrTermComponent bytes s + -- termComponent' <- + -- S.decomposeTermComponent >>= traverse . Lens.mapMOf Lens._1 do + foldM foldLocalIds (Right mempty) localIds >>= \case + Left missingDeps -> pure $ Left missingDeps + Right (toList -> localIds') -> do + let bytes' = + runPutS $ + putWord8 fmt + >> S.recomposeTermComponent (zip3 localIds' termBytes typeBytes) + oId' <- runDest $ Q.saveObject hId' objType bytes' + error "todo: optionally copy watch cache entry" + error "todo: sync dependency index rows" + error "todo: sync type/mentions index rows" + error "todo" + pure $ Right oId' OT.DeclComponent -> error "todo" OT.Namespace -> error "todo" OT.Patch -> error "todo" - oId' <- runDest $ Q.saveObject hId' objType bytes' - syncSecondaryHashes oId oId' - Cache.insert oCache oId oId' - pure Sync.Done + case result of + Left deps -> pure . Sync.Missing $ toList deps + Right oId' -> do + syncSecondaryHashes oId oId' + Cache.insert oCache oId oId' + pure Sync.Done where + foldLocalIds :: Either (Seq Entity) (Seq L.LocalIds) -> L.LocalIds -> m (Either (Seq Entity) (Seq L.LocalIds)) + foldLocalIds (Left missing) (L.LocalIds _tIds oIds) = + syncLocalObjectIds oIds <&> \case + Left missing2 -> Left (missing <> missing2) + Right _oIds' -> Left missing + foldLocalIds (Right localIdss') (L.LocalIds tIds oIds) = + syncLocalObjectIds oIds >>= \case + Left missing -> pure $ Left missing + Right oIds' -> do + tIds' <- traverse syncTextLiteral tIds + pure $ Right (localIdss' Seq.|> L.LocalIds tIds' oIds') + + -- I want to collect all the failures, rather than short-circuiting after the first + syncLocalObjectIds :: Traversable t => t ObjectId -> m (Either (Seq Entity) (t ObjectId)) + syncLocalObjectIds oIds = do + (mayOIds', missing) <- Writer.runWriterT do + for oIds \oId -> + lift (isSyncedObject oId) >>= \case + Just oId' -> pure oId' + Nothing -> do + Writer.tell . Seq.singleton $ O oId + pure $ error "Arya didn't think this would get eval'ed." + if null missing + then pure $ Right mayOIds' + else pure $ Left missing + syncTextLiteral :: TextId -> m TextId syncTextLiteral = Cache.apply tCache \tId -> do t <- runSrc $ Q.loadTextById tId diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index d1f8da281a..13ebfe580b 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -178,11 +178,12 @@ addToExistingMap getA getB map = do getMap :: (MonadGet m, Ord a) => m a -> m b -> m (Map a b) getMap getA getB = addToExistingMap getA getB mempty +getFramedByteString :: MonadGet m => m ByteString +getFramedByteString = getVarInt >>= getByteString + getFramed :: MonadGet m => Get a -> m a -getFramed get = do - size <- getVarInt - bytes <- getByteString size - either fail pure $ runGetS get bytes +getFramed get = + getFramedByteString >>= either fail pure . runGetS get putFramed :: MonadPut m => Put a -> a -> m () putFramed put a = do From f1e2403d0f24708e132b1e9e2f2adbd5bd0c4686 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 1 Mar 2021 23:15:47 -0500 Subject: [PATCH 108/225] sync term dependency index --- .../U/Codebase/Sqlite/Queries.hs | 8 ++++ .../U/Codebase/Sqlite/Reference.hs | 29 +++++++++------ .../U/Codebase/Sqlite/Sync22.hs | 37 +++++++++++++++---- 3 files changed, 55 insertions(+), 19 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 9edbbad4f3..0717c817c8 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -492,6 +492,14 @@ getDependentsForDependency dependency = query sql dependency where sql = [here| AND dependency_component_index IS ? |] +getDependenciesForDependent :: DB m => Reference.Id -> m [Reference.Reference] +getDependenciesForDependent dependent = query sql dependent where sql = [here| + SELECT dependency_builtin, dependency_object_id, dependency_component_index + FROM dependents_index + WHERE dependent_object_id IS ? + AND dependent_component_index IS ? +|] + getDependencyIdsForDependent :: DB m => Reference.Id -> m [Reference.Id] getDependencyIdsForDependent dependent = query sql dependent where sql = [here| SELECT dependency_object_id, dependency_component_index diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs index 185d122a88..31b8e5d024 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Reference.hs @@ -4,14 +4,14 @@ module U.Codebase.Sqlite.Reference where +import Control.Applicative (liftA3) import Database.SQLite.Simple (Only (..), SQLData (..), ToRow (toRow)) import Database.SQLite.Simple.FromField (FromField) -import Database.SQLite.Simple.FromRow (FromRow (fromRow), field) +import Database.SQLite.Simple.FromRow (FromRow (fromRow), RowParser, field) import Database.SQLite.Simple.ToField (ToField) import U.Codebase.Reference (Id' (Id), Reference' (ReferenceBuiltin, ReferenceDerived)) import U.Codebase.Sqlite.DbId (HashId, ObjectId, TextId) import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalHashId, LocalTextId) -import Control.Applicative (liftA3) type Reference = Reference' TextId ObjectId @@ -35,15 +35,22 @@ instance ToRow (Reference' TextId HashId) where ReferenceDerived (Id h i) -> SQLNull : toRow (Only h) ++ toRow (Only i) instance FromRow (Reference' TextId HashId) where - fromRow = liftA3 mkRef field field field - where - mkRef (Just t) Nothing Nothing = - ReferenceBuiltin t - mkRef Nothing (Just hashId) (Just componentIdx) = - ReferenceDerived (Id hashId componentIdx) - mkRef t h i = - error $ "invalid find_type_index type reference: " ++ str - where str = "(" ++ show t ++ ", " ++ show h ++ ", " ++ show i ++ ")" + fromRow = referenceFromRow' + +instance FromRow (Reference' TextId ObjectId) where + fromRow = referenceFromRow' + +referenceFromRow' :: (FromField t, FromField h, Show t, Show h) => RowParser (Reference' t h) +referenceFromRow' = liftA3 mkRef field field field + where + mkRef (Just t) Nothing Nothing = + ReferenceBuiltin t + mkRef Nothing (Just h) (Just componentIdx) = + ReferenceDerived (Id h componentIdx) + mkRef t h i = + error $ "invalid find_type_index type reference: " ++ str + where + str = "(" ++ show t ++ ", " ++ show h ++ ", " ++ show i ++ ")" instance ToRow (Reference' TextId ObjectId) where toRow = \case diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index 16e99d1329..1f1a02da52 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -20,10 +20,10 @@ import qualified Control.Monad.Writer as Writer import Data.ByteString (ByteString) import Data.Bytes.Get (MonadGet, getByteString, getWord8, runGetS) import Data.Bytes.Put (putWord8, runPutS) -import Data.Foldable (toList, traverse_) +import Data.Foldable (toList, traverse_, for_) import Data.Functor ((<&>)) import Data.List.Extra (nubOrd) -import Data.Maybe (catMaybes, fromJust, isJust) +import Data.Maybe (catMaybes, fromJust, isJust, fromMaybe) import Data.Sequence (Seq) import qualified Data.Sequence as Seq import Data.Traversable (for) @@ -39,6 +39,9 @@ import qualified U.Codebase.Sync as Sync import U.Util.Cache (Cache) import qualified U.Util.Cache as Cache import qualified U.Util.Serialization as S +import qualified U.Codebase.Sqlite.Reference as Sqlite.Reference +import qualified U.Codebase.Reference as Reference +import qualified U.Codebase.Sqlite.Reference as Sqlite data Entity = O ObjectId | C CausalHashId @@ -151,8 +154,6 @@ trySync tCache hCache oCache gc = \case pure (tag, component) of Right x -> pure x Left s -> throwError $ DecodeError SrcDb ErrTermComponent bytes s - -- termComponent' <- - -- S.decomposeTermComponent >>= traverse . Lens.mapMOf Lens._1 do foldM foldLocalIds (Right mempty) localIds >>= \case Left missingDeps -> pure $ Left missingDeps Right (toList -> localIds') -> do @@ -161,10 +162,19 @@ trySync tCache hCache oCache gc = \case putWord8 fmt >> S.recomposeTermComponent (zip3 localIds' termBytes typeBytes) oId' <- runDest $ Q.saveObject hId' objType bytes' - error "todo: optionally copy watch cache entry" - error "todo: sync dependency index rows" - error "todo: sync type/mentions index rows" - error "todo" + -- "todo: optionally copy watch cache entry" + + -- sync dependency index + for_ [0 .. length localIds - 1] \(fromIntegral -> idx) -> do + indexDependencies <- runSrc $ Q.getDependenciesForDependent (Reference.Id oId idx) + let fromJust' = fromMaybe (error "missing objects should've been caught by `foldLocalIds` above") + indexDependencies' <- traverse (fmap fromJust' . isSyncedObjectReference) indexDependencies + runDest $ traverse_ (flip Q.addToDependentsIndex (Reference.Id oId' idx)) indexDependencies' + + -- sync type index rows + error "todo: sync type index rows" + -- sync type mentions index rows + error "todo: sync type mentions index rows" pure $ Right oId' OT.DeclComponent -> error "todo" OT.Namespace -> error "todo" @@ -212,6 +222,17 @@ trySync tCache hCache oCache gc = \case b32hex <- runSrc $ Q.loadHashById hId runDest $ Q.saveHash b32hex + isSyncedObjectReference :: Sqlite.Reference -> m (Maybe Sqlite.Reference) + isSyncedObjectReference = \case + Reference.ReferenceBuiltin t -> + Just . Reference.ReferenceBuiltin <$> syncTextLiteral t + Reference.ReferenceBuiltin id -> + Reference.ReferenceBuiltin <$> isSyncedObjectReferenceId id + + isSyncedObjectReferenceId :: Sqlite.Reference.Id -> m (Maybe Sqlite.Reference.Id) + isSyncedObjectReferenceId (Reference.Id oId idx) = + isSyncedObject oId <&> fmap (\oId' -> Reference.Id oId' idx) + syncCausalHash :: CausalHashId -> m CausalHashId syncCausalHash = fmap CausalHashId . syncHashLiteral . unCausalHashId From 624668f2be3b27683f6295cbfecb566fe585c97f Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 1 Mar 2021 23:16:19 -0500 Subject: [PATCH 109/225] fix apparently wrong fk constraint --- codebase2/codebase-sqlite/sql/create-index.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/codebase2/codebase-sqlite/sql/create-index.sql b/codebase2/codebase-sqlite/sql/create-index.sql index 985ecebc49..86743a0da1 100644 --- a/codebase2/codebase-sqlite/sql/create-index.sql +++ b/codebase2/codebase-sqlite/sql/create-index.sql @@ -5,7 +5,7 @@ CREATE TABLE find_type_index ( type_reference_builtin INTEGER NULL CONSTRAINT find_type_index_fk1 REFERENCES text(id), type_reference_hash_id INTEGER NULL CONSTRAINT find_type_index_fk2 REFERENCES hash(id), type_reference_component_index INTEGER NULL, - term_referent_object_id INTEGER NOT NULL CONSTRAINT find_type_index_fk3 REFERENCES hash(id), + term_referent_object_id INTEGER NOT NULL CONSTRAINT find_type_index_fk3 REFERENCES object(id), term_referent_component_index INTEGER NOT NULL, term_referent_constructor_index INTEGER NULL, CONSTRAINT find_type_index_c1 UNIQUE ( @@ -32,7 +32,7 @@ CREATE TABLE find_type_mentions_index ( type_reference_builtin INTEGER NULL CONSTRAINT find_type_mentions_index_fk1 REFERENCES text(id), type_reference_hash_id INTEGER NULL CONSTRAINT find_type_mentions_index_fk2 REFERENCES hash(id), type_reference_component_index INTEGER NULL, - term_referent_object_id INTEGER NOT NULL CONSTRAINT find_type_mentions_index_fk3 REFERENCES hash(id), + term_referent_object_id INTEGER NOT NULL CONSTRAINT find_type_mentions_index_fk3 REFERENCES object(id), term_referent_component_index INTEGER NOT NULL, term_referent_constructor_index INTEGER NULL, CONSTRAINT find_type_mentions_index_c1 CHECK ( @@ -52,9 +52,9 @@ CREATE INDEX find_type_mentions_index_type ON find_type_mentions_index ( CREATE TABLE dependents_index ( dependency_builtin INTEGER NULL CONSTRAINT dependents_index_fk1 REFERENCES text(id), - dependency_object_id INTEGER NULL CONSTRAINT dependents_index_fk2 REFERENCES hash(id), + dependency_object_id INTEGER NULL CONSTRAINT dependents_index_fk2 REFERENCES object(id), dependency_component_index INTEGER NULL, - dependent_object_id INTEGER NOT NULL CONSTRAINT dependents_index_fk3 REFERENCES hash(id), + dependent_object_id INTEGER NOT NULL CONSTRAINT dependents_index_fk3 REFERENCES object(id), dependent_component_index INTEGER NOT NULL, CONSTRAINT dependents_index_c1 CHECK ( (dependency_builtin IS NULL) = From c99bb47693172360fbc42faa9e856947609fc26c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 2 Mar 2021 15:49:40 -0500 Subject: [PATCH 110/225] sync watch results --- .../U/Codebase/Sqlite/Operations.hs | 27 ++++--- .../U/Codebase/Sqlite/Queries.hs | 12 +++ .../U/Codebase/Sqlite/Serialization.hs | 20 +++-- .../U/Codebase/Sqlite/Sync22.hs | 81 ++++++++++++------- codebase2/codebase-sqlite/sql/create.sql | 12 --- .../U/Util/Serialization.hs | 5 ++ 6 files changed, 96 insertions(+), 61 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index c06da98467..73e0a7fe95 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -27,7 +27,7 @@ import Data.Bitraversable (Bitraversable (bitraverse)) import Data.ByteString (ByteString) import Data.Bytes.Get (runGetS) import qualified Data.Bytes.Get as Get -import Data.Foldable (traverse_, for_) +import Data.Foldable (for_, traverse_) import qualified Data.Foldable as Foldable import Data.Functor (void, (<&>)) import Data.Functor.Identity (Identity) @@ -76,7 +76,8 @@ import U.Codebase.Sqlite.LocalIds LocalIds' (..), LocalPatchObjectId (..), LocalTextId (..), - ) + WatchLocalIds, + ) import qualified U.Codebase.Sqlite.LocalIds as LocalIds import qualified U.Codebase.Sqlite.ObjectType as OT import qualified U.Codebase.Sqlite.Patch.Diff as S @@ -454,11 +455,11 @@ saveTermComponent h terms = do (fmap getTermSRef tmRefs ++ fmap getSTermLink tmLinks) ++ fmap getTypeSRef (tpRefs ++ tpRefs') ++ fmap getSTypeLink tpLinks - in Set.map (, self) dependencies + in Set.map (,self) dependencies traverse_ (uncurry Q.addToDependentsIndex) dependencies -- populate type indexes - for_ (terms `zip` [0..]) \((_tm, tp), i) -> do + for_ (terms `zip` [0 ..]) \((_tm, tp), i) -> do let self = C.Referent.RefId (C.Reference.Id oId i) typeForIndexing = TypeUtil.removeAllEffectVars tp typeMentionsForIndexing = TypeUtil.toReferenceMentions typeForIndexing @@ -701,8 +702,8 @@ loadWatch :: EDB m => WatchKind -> C.Reference.Id -> MaybeT m (C.Term Symbol) loadWatch k r = C.Reference.idH Q.saveHashHash r >>= MaybeT . Q.loadWatch k - >>= getFromBytesOr (ErrWatch k r) (S.getPair S.getLocalIds S.getTerm) - >>= uncurry s2cTerm + >>= getFromBytesOr (ErrWatch k r) (S.getPair S.getWatchLocalIds S.getTerm) + >>= uncurry w2cTerm saveWatch :: EDB m => WatchKind -> C.Reference.Id -> C.Term Symbol -> m () saveWatch w r t = do @@ -711,12 +712,12 @@ saveWatch w r t = do let bytes = S.putBytes (S.putPair S.putLocalIds S.putTerm) wterm Q.saveWatch w rs bytes -c2wTerm :: EDB m => C.Term Symbol -> m (LocalIds, S.Term.Term) -c2wTerm tm = c2xTerm Q.saveText primaryHashToExistingObjectId tm Nothing <&> \(w, tm, _) -> (w, tm) +c2wTerm :: EDB m => C.Term Symbol -> m (WatchLocalIds, S.Term.Term) +c2wTerm tm = c2xTerm Q.saveText Q.saveHashHash tm Nothing <&> \(w, tm, _) -> (w, tm) -w2cTerm :: EDB m => LocalIds -> S.Term.Term -> m (C.Term Symbol) +w2cTerm :: EDB m => WatchLocalIds -> S.Term.Term -> m (C.Term Symbol) w2cTerm ids tm = do - (substText, substHash) <- localIdsToLookups loadTextById loadHashByObjectId ids + (substText, substHash) <- localIdsToLookups loadTextById loadHashByHashId ids pure $ x2cTerm substText substHash tm -- ** Saving & loading type decls @@ -743,8 +744,10 @@ saveDeclComponent h decls = do traverse_ (uncurry Q.addToDependentsIndex) dependencies -- populate type indexes - for_ (zip decls [0..]) - \(C.DataDeclaration _ _ _ ctorTypes, i) -> for_ (zip ctorTypes [0..]) + for_ + (zip decls [0 ..]) + \(C.DataDeclaration _ _ _ ctorTypes, i) -> for_ + (zip ctorTypes [0 ..]) \(tp, j) -> do let self = C.Referent.ConId (C.Reference.Id oId i) j typeForIndexing :: C.Type.TypeT Symbol = TypeUtil.removeAllEffectVars (C.Type.typeD2T h tp) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 0717c817c8..1fc9a2fee1 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -470,6 +470,18 @@ getReferentsByTypeMention r = query sql r where sql = [here| AND type_reference_component_index IS ? |] +getTypeMentionsByReferent :: DB m => Referent.Id -> m [TypeHashReference] +getTypeMentionsByReferent r = query sql r where sql = [here| + SELECT + type_reference_builtin, + type_reference_hash_id, + type_reference_component_index + FROM find_type_mentions_index + WHERE term_referent_object_id IS ? + AND term_referent_component_index IS ? + AND term_referent_constructor_index IS ? +|] + addToDependentsIndex :: DB m => Reference.Reference -> Reference.Id -> m () addToDependentsIndex dependency dependent = execute sql (dependency :. dependent) where sql = [here| diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index e85b1904cb..7169a316e0 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -32,7 +32,7 @@ import qualified U.Codebase.Referent as Referent import qualified U.Codebase.Sqlite.Branch.Diff as BranchDiff import qualified U.Codebase.Sqlite.Branch.Format as BranchFormat import qualified U.Codebase.Sqlite.Branch.Full as BranchFull -import U.Codebase.Sqlite.DbId (BranchObjectId, HashId, ObjectId, PatchObjectId, TextId, unBranchObjectId, unPatchObjectId) +import U.Codebase.Sqlite.DbId (BranchObjectId, PatchObjectId, unBranchObjectId, unPatchObjectId) import qualified U.Codebase.Sqlite.Decl.Format as DeclFormat import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), LocalTextId, WatchLocalIds) import qualified U.Codebase.Sqlite.Patch.Diff as PatchDiff @@ -139,10 +139,10 @@ putLocalIds LocalIds {..} = do putFoldable putVarInt defnLookup getLocalIds :: MonadGet m => m LocalIds -getLocalIds = - LocalIds - <$> getVector getVarInt - <*> getVector getVarInt +getLocalIds = LocalIds <$> getVector getVarInt <*> getVector getVarInt + +getWatchLocalIds :: MonadGet m => m WatchLocalIds +getWatchLocalIds = LocalIds <$> getVector getVarInt <*> getVector getVarInt putUnit :: Applicative m => () -> m () putUnit _ = pure () @@ -680,8 +680,8 @@ recomposeTermComponent :: MonadPut m => [(LocalIds, BS.ByteString, BS.ByteString recomposeTermComponent = putFramedArray \(localIds, termBytes, typeBytes) -> do putLocalIds localIds - putFramed putByteString termBytes - putFramed putByteString typeBytes + putFramedByteString termBytes + putFramedByteString typeBytes decomposeComponent :: (MonadGet m, Monoid a) => Get a -> m a decomposeComponent split = do @@ -692,6 +692,12 @@ decomposeComponent split = do either fail pure $ runGetS split bytes Monoid.foldMapM get1 (zip offsets (tail offsets)) +decomposeWatchResult :: MonadGet m => m (WatchLocalIds, BS.ByteString) +decomposeWatchResult = (,) <$> getWatchLocalIds <*> getFramedByteString + +recomposeWatchResult :: MonadPut m => (WatchLocalIds, BS.ByteString) -> m () +recomposeWatchResult (wli, bs) = putLocalIds wli >> putFramedByteString bs + -- the same implementation currently works for term component and type component getComponentSyncEntities :: MonadGet m => m SE.SyncEntitySeq getComponentSyncEntities = diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index 1f1a02da52..3b80c75e42 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -8,46 +8,45 @@ module U.Codebase.Sqlite.Sync22 where -import qualified Control.Lens as Lens -import Control.Monad (filterM, foldM, join, (<=<)) +import Control.Monad (filterM, foldM, join) import Control.Monad.Except (ExceptT, MonadError (throwError)) import Control.Monad.Extra (ifM) -import Control.Monad.RWS (MonadIO, MonadReader (reader)) +import Control.Monad.RWS (MonadIO, MonadReader) import Control.Monad.Reader (ReaderT) import Control.Monad.Trans (lift) -import Control.Monad.Trans.Except (throwE, withExceptT) import qualified Control.Monad.Writer as Writer +import Data.Bitraversable (bitraverse) import Data.ByteString (ByteString) -import Data.Bytes.Get (MonadGet, getByteString, getWord8, runGetS) +import Data.Bytes.Get (getWord8, runGetS) import Data.Bytes.Put (putWord8, runPutS) -import Data.Foldable (toList, traverse_, for_) +import Data.Foldable (for_, toList, traverse_) import Data.Functor ((<&>)) import Data.List.Extra (nubOrd) -import Data.Maybe (catMaybes, fromJust, isJust, fromMaybe) +import Data.Maybe (catMaybes, fromJust, fromMaybe, isJust) import Data.Sequence (Seq) import qualified Data.Sequence as Seq import Data.Traversable (for) -import Data.Word (Word64) import Database.SQLite.Simple (Connection) +import qualified U.Codebase.Reference as Reference +import qualified U.Codebase.Referent as Referent import U.Codebase.Sqlite.DbId import qualified U.Codebase.Sqlite.LocalIds as L import qualified U.Codebase.Sqlite.ObjectType as OT import qualified U.Codebase.Sqlite.Queries as Q +import qualified U.Codebase.Sqlite.Reference as Sqlite +import qualified U.Codebase.Sqlite.Reference as Sqlite.Reference import qualified U.Codebase.Sqlite.Serialization as S import U.Codebase.Sync import qualified U.Codebase.Sync as Sync +import qualified U.Codebase.WatchKind as WK import U.Util.Cache (Cache) import qualified U.Util.Cache as Cache -import qualified U.Util.Serialization as S -import qualified U.Codebase.Sqlite.Reference as Sqlite.Reference -import qualified U.Codebase.Reference as Reference -import qualified U.Codebase.Sqlite.Reference as Sqlite data Entity = O ObjectId | C CausalHashId data DbTag = SrcDb | DestDb -data DecodeError = ErrTermComponent | ErrDeclComponent +data DecodeError = ErrTermComponent | ErrDeclComponent | ErrWatchResult type ErrString = String @@ -143,10 +142,7 @@ trySync tCache hCache oCache gc = \case hId' <- syncHashLiteral hId result <- case objType of OT.TermComponent -> do - -- (fmt, termComponent) <- - -- either (throwError . DecodeError SrcDb ErrTermComponent bytes) pure -- 🤪 - -- . flip runGetS bytes - -- $ (,) <$> getWord8 <*> S.decomposeTermComponent + -- split up the localIds (parsed), term, and type blobs (fmt, unzip3 -> (localIds, termBytes, typeBytes)) <- case flip runGetS bytes do tag <- getWord8 @@ -154,27 +150,49 @@ trySync tCache hCache oCache gc = \case pure (tag, component) of Right x -> pure x Left s -> throwError $ DecodeError SrcDb ErrTermComponent bytes s + -- iterate through the local ids looking for missing deps; + -- then either enqueue the missing deps, or proceed to move the object foldM foldLocalIds (Right mempty) localIds >>= \case Left missingDeps -> pure $ Left missingDeps Right (toList -> localIds') -> do + -- reassemble and save the reindexed term let bytes' = runPutS $ putWord8 fmt >> S.recomposeTermComponent (zip3 localIds' termBytes typeBytes) oId' <- runDest $ Q.saveObject hId' objType bytes' - -- "todo: optionally copy watch cache entry" - - -- sync dependency index + -- copy reference-specific stuff for_ [0 .. length localIds - 1] \(fromIntegral -> idx) -> do - indexDependencies <- runSrc $ Q.getDependenciesForDependent (Reference.Id oId idx) + let ref = Reference.Id oId idx + ref' = Reference.Id oId' idx + -- sync watch results + for_ [WK.RegularWatch, WK.TestWatch] \wk -> do + let refH = Reference.Id hId idx + refH' = Reference.Id hId' idx + runSrc (Q.loadWatch wk refH) >>= traverse_ \blob -> do + (L.LocalIds tIds hIds, termBytes) <- + case runGetS S.decomposeWatchResult blob of + Right x -> pure x + Left s -> throwError $ DecodeError SrcDb ErrWatchResult blob s + tIds' <- traverse syncTextLiteral tIds + hIds' <- traverse syncHashLiteral hIds + let blob' = runPutS (S.recomposeWatchResult (L.LocalIds tIds' hIds', termBytes)) + runDest (Q.saveWatch wk refH' blob') + -- sync dependencies index let fromJust' = fromMaybe (error "missing objects should've been caught by `foldLocalIds` above") - indexDependencies' <- traverse (fmap fromJust' . isSyncedObjectReference) indexDependencies - runDest $ traverse_ (flip Q.addToDependentsIndex (Reference.Id oId' idx)) indexDependencies' - - -- sync type index rows - error "todo: sync type index rows" - -- sync type mentions index rows - error "todo: sync type mentions index rows" + runSrc (Q.getDependenciesForDependent ref) + >>= traverse (fmap fromJust' . isSyncedObjectReference) + >>= runDest . traverse_ (flip Q.addToDependentsIndex ref') + -- sync type index + let reft = Referent.RefId ref + reft' = Referent.RefId ref' + runSrc (Q.getTypeReferenceForReference ref) + >>= syncHashReference + >>= runDest . flip Q.addToTypeIndex reft' + -- sync type mentions index + runSrc (Q.getTypeMentionsByReferent reft) + >>= traverse syncHashReference + >>= runDest . traverse (flip Q.addToTypeMentionsIndex reft') pure $ Right oId' OT.DeclComponent -> error "todo" OT.Namespace -> error "todo" @@ -226,13 +244,16 @@ trySync tCache hCache oCache gc = \case isSyncedObjectReference = \case Reference.ReferenceBuiltin t -> Just . Reference.ReferenceBuiltin <$> syncTextLiteral t - Reference.ReferenceBuiltin id -> - Reference.ReferenceBuiltin <$> isSyncedObjectReferenceId id + Reference.ReferenceDerived id -> + fmap Reference.ReferenceDerived <$> isSyncedObjectReferenceId id isSyncedObjectReferenceId :: Sqlite.Reference.Id -> m (Maybe Sqlite.Reference.Id) isSyncedObjectReferenceId (Reference.Id oId idx) = isSyncedObject oId <&> fmap (\oId' -> Reference.Id oId' idx) + syncHashReference :: Sqlite.ReferenceH -> m Sqlite.ReferenceH + syncHashReference = bitraverse syncTextLiteral syncHashLiteral + syncCausalHash :: CausalHashId -> m CausalHashId syncCausalHash = fmap CausalHashId . syncHashLiteral . unCausalHashId diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index b44972c9eb..d0e45dd60f 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -62,18 +62,6 @@ CREATE TABLE causal ( CREATE INDEX causal_value_hash_id ON causal(value_hash_id); CREATE INDEX causal_gc_generation ON causal(gc_generation); --- proposed: --- CREATE TABLE causal ( --- causal_id INTEGER PRIMARY KEY NOT NULL, --- self_hash_id INTEGER NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), --- value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), --- gc_generation INTEGER NOT NULL DEFAULT 0 --- ); --- CREATE INDEX causal_self_hash_id ON causal(self_hash_id); --- CREATE INDEX causal_value_hash_id ON causal(value_hash_id); --- CREATE INDEX causal_gc_generation ON causal(gc_generation); - - -- valueHash : Hash = hash(value) -- db.saveValue(valueHash, value) -- causalHash : Hash = hash(new Causal(valueHash, parentCausalHashes)) diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index 13ebfe580b..d32afd12c0 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -185,6 +185,11 @@ getFramed :: MonadGet m => Get a -> m a getFramed get = getFramedByteString >>= either fail pure . runGetS get +putFramedByteString :: MonadPut m => ByteString -> m () +putFramedByteString bs = do + putVarInt (BS.length bs) + putByteString bs + putFramed :: MonadPut m => Put a -> a -> m () putFramed put a = do -- 1. figure out the length `len` of serialized `a` From 8486493d579810b3173b89c05f8de1fe47ea64d1 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 2 Mar 2021 20:59:19 -0500 Subject: [PATCH 111/225] sync decl components --- .../U/Codebase/Sqlite/Queries.hs | 48 +++++++++---- .../U/Codebase/Sqlite/Serialization.hs | 7 +- .../U/Codebase/Sqlite/Sync22.hs | 70 +++++++++++++++---- 3 files changed, 95 insertions(+), 30 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 1fc9a2fee1..c8bd581a6a 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -39,7 +39,6 @@ import Database.SQLite.Simple.ToField (ToField (..)) import Debug.Trace (trace, traceM) import U.Codebase.HashTags (BranchHash, CausalHash, unBranchHash, unCausalHash) import U.Codebase.Reference (Reference') -import qualified U.Codebase.Referent as C.Referent import U.Codebase.Sqlite.DbId (BranchHashId (..), BranchObjectId (..), CausalHashId (..), CausalOldHashId, Generation (..), HashId (..), ObjectId (..), TextId) import U.Codebase.Sqlite.ObjectType (ObjectType) import qualified U.Codebase.Sqlite.Reference as Reference @@ -431,8 +430,8 @@ getReferentsByType r = query sql r where sql = [here| AND type_reference_component_index IS ? |] -getTypeReferenceForReference :: EDB m => Reference.Id -> m (Reference' TextId HashId) -getTypeReferenceForReference (C.Referent.RefId -> r) = +getTypeReferenceForReferent :: EDB m => Referent.Id -> m (Reference' TextId HashId) +getTypeReferenceForReferent r = queryMaybe sql r >>= orError (NoTypeIndexForTerm r) where sql = [here| SELECT @@ -445,6 +444,21 @@ getTypeReferenceForReference (C.Referent.RefId -> r) = AND term_referent_constructor_index = ? |] +-- todo: error if no results +getTypeReferencesForComponent :: EDB m => ObjectId -> m [(Reference' TextId HashId, Referent.Id)] +getTypeReferencesForComponent oId = + query sql (Only oId) <&> map fixupTypeIndexRow where sql = [here| + SELECT + type_reference_builtin, + type_reference_hash_id, + type_reference_component_index, + term_referent_object_id, + term_referent_component_index, + term_referent_constructor_index + FROM find_type_index + WHERE term_referent_object_id = ? + |] + addToTypeMentionsIndex :: DB m => Reference' TextId HashId -> Referent.Id -> m () addToTypeMentionsIndex tp tm = execute sql (tp :. tm) where sql = [here| INSERT INTO find_type_mentions_index ( @@ -470,17 +484,23 @@ getReferentsByTypeMention r = query sql r where sql = [here| AND type_reference_component_index IS ? |] -getTypeMentionsByReferent :: DB m => Referent.Id -> m [TypeHashReference] -getTypeMentionsByReferent r = query sql r where sql = [here| - SELECT - type_reference_builtin, - type_reference_hash_id, - type_reference_component_index - FROM find_type_mentions_index - WHERE term_referent_object_id IS ? - AND term_referent_component_index IS ? - AND term_referent_constructor_index IS ? -|] +-- todo: error if no results +getTypeMentionsReferencesForComponent :: EDB m => ObjectId -> m [(Reference' TextId HashId, Referent.Id)] +getTypeMentionsReferencesForComponent r = + query sql (Only r) <&> map fixupTypeIndexRow where sql = [here| + SELECT + type_reference_builtin, + type_reference_hash_id, + type_reference_component_index, + term_referent_object_id, + term_referent_component_index, + term_referent_constructor_index + FROM find_type_mentions_index + WHERE term_referent_object_id IS ? + |] + +fixupTypeIndexRow :: Reference' TextId HashId :. Referent.Id -> (Reference' TextId HashId, Referent.Id) +fixupTypeIndexRow (rh :. ri) = (rh, ri) addToDependentsIndex :: DB m => Reference.Reference -> Reference.Id -> m () addToDependentsIndex dependency dependent = execute sql (dependency :. dependent) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 7169a316e0..da3409112b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -11,7 +11,7 @@ module U.Codebase.Sqlite.Serialization where import Data.Bits (Bits) import qualified Data.ByteString as BS import Data.Bytes.Get (MonadGet, getByteString, getWord8, runGetS) -import Data.Bytes.Put (MonadPut (putByteString), putWord8) +import Data.Bytes.Put (MonadPut, putWord8) import Data.Bytes.Serial (SerialEndian (serializeBE), deserialize, deserializeBE, serialize) import Data.Bytes.VarInt (VarInt (VarInt), unVarInt) import Data.Int (Int64) @@ -683,6 +683,11 @@ recomposeTermComponent = putFramedByteString termBytes putFramedByteString typeBytes +recomposeDeclComponent :: MonadPut m => [(LocalIds, BS.ByteString)] -> m () +recomposeDeclComponent = putFramedArray \(localIds, declBytes) -> do + putLocalIds localIds + putFramedByteString declBytes + decomposeComponent :: (MonadGet m, Monoid a) => Get a -> m a decomposeComponent split = do offsets <- getList (getVarInt @_ @Int) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index 3b80c75e42..1de491b812 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -15,6 +15,7 @@ import Control.Monad.RWS (MonadIO, MonadReader) import Control.Monad.Reader (ReaderT) import Control.Monad.Trans (lift) import qualified Control.Monad.Writer as Writer +import Data.Bifunctor (bimap) import Data.Bitraversable (bitraverse) import Data.ByteString (ByteString) import Data.Bytes.Get (getWord8, runGetS) @@ -28,13 +29,13 @@ import qualified Data.Sequence as Seq import Data.Traversable (for) import Database.SQLite.Simple (Connection) import qualified U.Codebase.Reference as Reference -import qualified U.Codebase.Referent as Referent import U.Codebase.Sqlite.DbId import qualified U.Codebase.Sqlite.LocalIds as L import qualified U.Codebase.Sqlite.ObjectType as OT import qualified U.Codebase.Sqlite.Queries as Q import qualified U.Codebase.Sqlite.Reference as Sqlite import qualified U.Codebase.Sqlite.Reference as Sqlite.Reference +import qualified U.Codebase.Sqlite.Referent as Sqlite.Referent import qualified U.Codebase.Sqlite.Serialization as S import U.Codebase.Sync import qualified U.Codebase.Sync as Sync @@ -52,7 +53,7 @@ type ErrString = String data Error = DbIntegrity Q.Integrity - | DecodeError DbTag DecodeError ByteString ErrString + | DecodeError DecodeError ByteString ErrString | -- | hashes corresponding to a single object in source codebase -- correspond to multiple objects in destination codebase HashObjectCorrespondence ObjectId [HashId] [ObjectId] @@ -149,7 +150,7 @@ trySync tCache hCache oCache gc = \case component <- S.decomposeTermComponent pure (tag, component) of Right x -> pure x - Left s -> throwError $ DecodeError SrcDb ErrTermComponent bytes s + Left s -> throwError $ DecodeError ErrTermComponent bytes s -- iterate through the local ids looking for missing deps; -- then either enqueue the missing deps, or proceed to move the object foldM foldLocalIds (Right mempty) localIds >>= \case @@ -163,8 +164,6 @@ trySync tCache hCache oCache gc = \case oId' <- runDest $ Q.saveObject hId' objType bytes' -- copy reference-specific stuff for_ [0 .. length localIds - 1] \(fromIntegral -> idx) -> do - let ref = Reference.Id oId idx - ref' = Reference.Id oId' idx -- sync watch results for_ [WK.RegularWatch, WK.TestWatch] \wk -> do let refH = Reference.Id hId idx @@ -173,28 +172,65 @@ trySync tCache hCache oCache gc = \case (L.LocalIds tIds hIds, termBytes) <- case runGetS S.decomposeWatchResult blob of Right x -> pure x - Left s -> throwError $ DecodeError SrcDb ErrWatchResult blob s + Left s -> throwError $ DecodeError ErrWatchResult blob s tIds' <- traverse syncTextLiteral tIds hIds' <- traverse syncHashLiteral hIds let blob' = runPutS (S.recomposeWatchResult (L.LocalIds tIds' hIds', termBytes)) runDest (Q.saveWatch wk refH' blob') -- sync dependencies index + let ref = Reference.Id oId idx + ref' = Reference.Id oId' idx + let fromJust' = fromMaybe (error "missing objects should've been caught by `foldLocalIds` above") + runSrc (Q.getDependenciesForDependent ref) + >>= traverse (fmap fromJust' . isSyncedObjectReference) + >>= runDest . traverse_ (flip Q.addToDependentsIndex ref') + -- sync type index + runSrc (Q.getTypeReferencesForComponent oId) + >>= traverse (syncTypeIndexRow oId') + >>= traverse_ (runDest . uncurry Q.addToTypeIndex) + -- sync type mentions index + runSrc (Q.getTypeMentionsReferencesForComponent oId) + >>= traverse (syncTypeIndexRow oId') + >>= traverse_ (runDest . uncurry Q.addToTypeMentionsIndex) + pure $ Right oId' + OT.DeclComponent -> do + -- split up the localIds (parsed), decl blobs + (fmt, unzip -> (localIds, declBytes)) <- + case flip runGetS bytes do + tag <- getWord8 + component <- S.decomposeDeclComponent + pure (tag, component) of + Right x -> pure x + Left s -> throwError $ DecodeError ErrDeclComponent bytes s + -- iterate through the local ids looking for missing deps; + -- then either enqueue the missing deps, or proceed to move the object + foldM foldLocalIds (Right mempty) localIds >>= \case + Left missingDeps -> pure $ Left missingDeps + Right (toList -> localIds') -> do + -- reassemble and save the reindexed term + let bytes' = + runPutS $ + putWord8 fmt + >> S.recomposeDeclComponent (zip localIds' declBytes) + oId' <- runDest $ Q.saveObject hId' objType bytes' + -- copy per-element-of-the-component stuff + for_ [0 .. length localIds - 1] \(fromIntegral -> idx) -> do + -- sync dependencies index + let ref = Reference.Id oId idx + ref' = Reference.Id oId' idx let fromJust' = fromMaybe (error "missing objects should've been caught by `foldLocalIds` above") runSrc (Q.getDependenciesForDependent ref) >>= traverse (fmap fromJust' . isSyncedObjectReference) >>= runDest . traverse_ (flip Q.addToDependentsIndex ref') -- sync type index - let reft = Referent.RefId ref - reft' = Referent.RefId ref' - runSrc (Q.getTypeReferenceForReference ref) - >>= syncHashReference - >>= runDest . flip Q.addToTypeIndex reft' + runSrc (Q.getTypeReferencesForComponent oId) + >>= traverse (syncTypeIndexRow oId') + >>= traverse_ (runDest . uncurry Q.addToTypeIndex) -- sync type mentions index - runSrc (Q.getTypeMentionsByReferent reft) - >>= traverse syncHashReference - >>= runDest . traverse (flip Q.addToTypeMentionsIndex reft') + runSrc (Q.getTypeMentionsReferencesForComponent oId) + >>= traverse (syncTypeIndexRow oId') + >>= traverse_ (runDest . uncurry Q.addToTypeMentionsIndex) pure $ Right oId' - OT.DeclComponent -> error "todo" OT.Namespace -> error "todo" OT.Patch -> error "todo" case result of @@ -230,6 +266,10 @@ trySync tCache hCache oCache gc = \case then pure $ Right mayOIds' else pure $ Left missing + syncTypeIndexRow oId' = bitraverse syncHashReference (pure . rewriteTypeIndexReferent oId') + rewriteTypeIndexReferent :: ObjectId -> Sqlite.Referent.Id -> Sqlite.Referent.Id + rewriteTypeIndexReferent oId' = bimap (const oId') (const oId') + syncTextLiteral :: TextId -> m TextId syncTextLiteral = Cache.apply tCache \tId -> do t <- runSrc $ Q.loadTextById tId From 1c4aaf01763b02d0738f7031eb90158bc58408d3 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 2 Mar 2021 23:57:56 -0500 Subject: [PATCH 112/225] sync patch objects --- .../U/Codebase/Sqlite/Operations.hs | 6 +- .../U/Codebase/Sqlite/Serialization.hs | 6 +- .../U/Codebase/Sqlite/Sync22.hs | 77 ++++++++++++++++++- 3 files changed, 79 insertions(+), 10 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 73e0a7fe95..aac89d9e09 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -702,14 +702,14 @@ loadWatch :: EDB m => WatchKind -> C.Reference.Id -> MaybeT m (C.Term Symbol) loadWatch k r = C.Reference.idH Q.saveHashHash r >>= MaybeT . Q.loadWatch k - >>= getFromBytesOr (ErrWatch k r) (S.getPair S.getWatchLocalIds S.getTerm) + >>= getFromBytesOr (ErrWatch k r) (S.getPair S.getWatchLocalIds (S.getFramed S.getTerm)) >>= uncurry w2cTerm saveWatch :: EDB m => WatchKind -> C.Reference.Id -> C.Term Symbol -> m () saveWatch w r t = do rs <- C.Reference.idH Q.saveHashHash r wterm <- c2wTerm t - let bytes = S.putBytes (S.putPair S.putLocalIds S.putTerm) wterm + let bytes = S.putBytes (S.putPair S.putLocalIds (S.putFramed S.putTerm)) wterm Q.saveWatch w rs bytes c2wTerm :: EDB m => C.Term Symbol -> m (WatchLocalIds, S.Term.Term) @@ -1253,7 +1253,7 @@ termsHavingType cTypeRef = do Just set -> Set.fromList set typeReferenceForTerm :: EDB m => S.Reference.Id -> m S.ReferenceH -typeReferenceForTerm = liftQ . Q.getTypeReferenceForReference +typeReferenceForTerm = liftQ . Q.getTypeReferenceForReferent . C.Referent.RefId termsMentioningType :: EDB m => C.Reference -> m (Set C.Referent.Id) termsMentioningType cTypeRef = do diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index da3409112b..60ba6413fa 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -22,6 +22,7 @@ import qualified Data.Set as Set import Data.Vector (Vector) import qualified Data.Vector as Vector import Data.Word (Word64) +import Debug.Trace (trace) import qualified U.Codebase.Decl as Decl import U.Codebase.Kind (Kind) import qualified U.Codebase.Kind as Kind @@ -49,7 +50,6 @@ import qualified U.Core.ABT as ABT import qualified U.Util.Monoid as Monoid import U.Util.Serialization hiding (debug) import Prelude hiding (getChar, putChar) -import Debug.Trace (trace) debug :: Bool debug = False @@ -490,8 +490,8 @@ putPatchFormat = \case getPatchFormat :: MonadGet m => m PatchFormat.PatchFormat getPatchFormat = getWord8 >>= \case - 0 -> PatchFormat.Full <$> getPatchLocalIds <*> getPatchFull - 1 -> PatchFormat.Diff <$> getVarInt <*> getPatchLocalIds <*> getPatchDiff + 0 -> PatchFormat.Full <$> getPatchLocalIds <*> getFramed getPatchFull + 1 -> PatchFormat.Diff <$> getVarInt <*> getPatchLocalIds <*> getFramed getPatchDiff x -> unknownTag "getPatchFormat" x where getPatchFull :: MonadGet m => m PatchFull.LocalPatch diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index 1de491b812..eb31055b25 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -9,15 +9,17 @@ module U.Codebase.Sqlite.Sync22 where import Control.Monad (filterM, foldM, join) -import Control.Monad.Except (ExceptT, MonadError (throwError)) +import Control.Monad.Except (ExceptT, MonadError (throwError), runExceptT) import Control.Monad.Extra (ifM) import Control.Monad.RWS (MonadIO, MonadReader) import Control.Monad.Reader (ReaderT) import Control.Monad.Trans (lift) +import Control.Monad.Trans.Except (ExceptT (ExceptT)) import qualified Control.Monad.Writer as Writer import Data.Bifunctor (bimap) import Data.Bitraversable (bitraverse) import Data.ByteString (ByteString) +import qualified Data.ByteString as BS import Data.Bytes.Get (getWord8, runGetS) import Data.Bytes.Put (putWord8, runPutS) import Data.Foldable (for_, toList, traverse_) @@ -27,11 +29,13 @@ import Data.Maybe (catMaybes, fromJust, fromMaybe, isJust) import Data.Sequence (Seq) import qualified Data.Sequence as Seq import Data.Traversable (for) +import Data.Word (Word8) import Database.SQLite.Simple (Connection) import qualified U.Codebase.Reference as Reference import U.Codebase.Sqlite.DbId import qualified U.Codebase.Sqlite.LocalIds as L import qualified U.Codebase.Sqlite.ObjectType as OT +import qualified U.Codebase.Sqlite.Patch.Format as PL import qualified U.Codebase.Sqlite.Queries as Q import qualified U.Codebase.Sqlite.Reference as Sqlite import qualified U.Codebase.Sqlite.Reference as Sqlite.Reference @@ -42,12 +46,21 @@ import qualified U.Codebase.Sync as Sync import qualified U.Codebase.WatchKind as WK import U.Util.Cache (Cache) import qualified U.Util.Cache as Cache +import qualified U.Util.Serialization as S data Entity = O ObjectId | C CausalHashId data DbTag = SrcDb | DestDb -data DecodeError = ErrTermComponent | ErrDeclComponent | ErrWatchResult +data DecodeError + = ErrTermComponent + | ErrDeclComponent + | -- | ErrTermFormat + -- | ErrDeclFormat + ErrBranchFormat + | ErrPatchFormat + | ErrPatchBody Word8 + | ErrWatchResult type ErrString = String @@ -144,6 +157,10 @@ trySync tCache hCache oCache gc = \case result <- case objType of OT.TermComponent -> do -- split up the localIds (parsed), term, and type blobs + -- note: this whole business with `fmt` is pretty weird, and will need to be + -- revisited when there are more formats. + -- (or maybe i'll learn something by implementing sync for patches and namespaces, + -- which have two formats already) (fmt, unzip3 -> (localIds, termBytes, typeBytes)) <- case flip runGetS bytes do tag <- getWord8 @@ -231,8 +248,52 @@ trySync tCache hCache oCache gc = \case >>= traverse (syncTypeIndexRow oId') >>= traverse_ (runDest . uncurry Q.addToTypeMentionsIndex) pure $ Right oId' - OT.Namespace -> error "todo" - OT.Patch -> error "todo" + OT.Namespace -> case BS.uncons bytes of + Just (0, bytes) -> error "todo" + Just (1, bytes) -> error "todo" + Just (tag, _) -> throwError $ DecodeError ErrBranchFormat bytes ("unrecognized branch format tag: " ++ show tag) + Nothing -> throwError $ DecodeError ErrBranchFormat bytes "zero-byte branch object" + OT.Patch -> case BS.uncons bytes of + Just (fmt@0, bytes) -> do + (ids, blob) <- case flip runGetS bytes do + ids <- S.getPatchLocalIds + blob <- S.getFramedByteString + pure (ids, blob) of + Right x -> pure x + Left s -> throwError $ DecodeError (ErrPatchBody 0) bytes s + syncPatchLocalIds ids >>= \case + Left missingDeps -> pure $ Left missingDeps + Right ids' -> do + let bytes' = runPutS do + putWord8 fmt + S.putPatchLocalIds ids' + S.putFramedByteString blob + oId' <- runDest $ Q.saveObject hId' objType bytes' + pure $ Right oId' + Just (fmt@1, bytes) -> do + (poId, ids, blob) <- case flip runGetS bytes do + poId <- S.getVarInt + ids <- S.getPatchLocalIds + blob <- S.getFramedByteString + pure (poId, ids, blob) of + Right x -> pure x + Left s -> throwError $ DecodeError (ErrPatchBody 0) bytes s + mayPoId' <- isSyncedObject poId + eitherIds' <- syncPatchLocalIds ids + case (mayPoId', eitherIds') of + (Nothing, Left missingDeps) -> pure $ Left (O poId Seq.<| missingDeps) + (Nothing, Right {}) -> pure $ Left (Seq.singleton (O poId)) + (Just {}, Left missingDeps) -> pure $ Left missingDeps + (Just poId', Right ids') -> do + let bytes' = runPutS do + putWord8 fmt + S.putVarInt poId' + S.putPatchLocalIds ids' + S.putFramedByteString blob + oId' <- runDest $ Q.saveObject hId' objType bytes' + pure $ Right oId' + Just (tag, _) -> throwError $ DecodeError ErrBranchFormat bytes ("unrecognized patch format tag: " ++ show tag) + Nothing -> throwError $ DecodeError ErrPatchFormat bytes "zero-byte patch object" case result of Left deps -> pure . Sync.Missing $ toList deps Right oId' -> do @@ -266,6 +327,14 @@ trySync tCache hCache oCache gc = \case then pure $ Right mayOIds' else pure $ Left missing + + syncPatchLocalIds :: PL.PatchLocalIds -> m (Either (Seq Entity) PL.PatchLocalIds) + syncPatchLocalIds (PL.LocalIds tIds hIds oIds) = runExceptT do + oIds' <- ExceptT $ syncLocalObjectIds oIds + tIds' <- lift $ traverse syncTextLiteral tIds + hIds' <- lift $ traverse syncHashLiteral hIds + pure $ PL.LocalIds tIds' hIds' oIds' + syncTypeIndexRow oId' = bitraverse syncHashReference (pure . rewriteTypeIndexReferent oId') rewriteTypeIndexReferent :: ObjectId -> Sqlite.Referent.Id -> Sqlite.Referent.Id rewriteTypeIndexReferent oId' = bimap (const oId') (const oId') From 6bdd8f6fb9ddd570f45637b178174093c940d1aa Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 3 Mar 2021 10:58:57 -0500 Subject: [PATCH 113/225] update {get,put}{Branch,Patch}Format to frame the bodies might revert later --- .../U/Codebase/Sqlite/Serialization.hs | 65 ++++++++++++------- 1 file changed, 40 insertions(+), 25 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 60ba6413fa..ddf71f03bc 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -443,25 +443,30 @@ lookupDeclElement i = putBranchFormat :: MonadPut m => BranchFormat.BranchFormat -> m () putBranchFormat b | debug && trace ("putBranchFormat " ++ show b) False = undefined putBranchFormat b = case b of - BranchFormat.Full li b -> putWord8 0 *> putBranchFull li b - BranchFormat.Diff r li d -> putWord8 1 *> putBranchDiff r li d + BranchFormat.Full li b -> do + putWord8 0 + putBranchLocalIds li + putFramed putBranchFull b + BranchFormat.Diff r li d -> do + putWord8 1 + putVarInt r + putBranchLocalIds li + putFramed putBranchDiff d where putBranchLocalIds (BranchFormat.LocalIds ts os ps cs) = do putFoldable putVarInt ts putFoldable putVarInt os putFoldable putVarInt ps putFoldable (putPair putVarInt putVarInt) cs - putBranchFull li (BranchFull.Branch terms types patches children) = do - putBranchLocalIds li + putBranchFull (BranchFull.Branch terms types patches children) = do putMap putVarInt (putMap putReferent putMetadataSetFormat) terms putMap putVarInt (putMap putReference putMetadataSetFormat) types putMap putVarInt putVarInt patches putMap putVarInt putVarInt children - putMetadataSetFormat = \case - BranchFull.Inline s -> putWord8 0 *> putFoldable putReference s - putBranchDiff ref li (BranchDiff.Diff terms types patches children) = do - putVarInt ref - putBranchLocalIds li + where + putMetadataSetFormat (BranchFull.Inline s) = + putWord8 0 *> putFoldable putReference s + putBranchDiff (BranchDiff.Diff terms types patches children) = do putMap putVarInt (putMap putReferent putDiffOp) terms putMap putVarInt (putMap putReference putDiffOp) types putMap putVarInt putPatchOp patches @@ -484,8 +489,15 @@ putBranchFormat b = case b of putPatchFormat :: MonadPut m => PatchFormat.PatchFormat -> m () putPatchFormat = \case - PatchFormat.Full ids p -> putWord8 0 *> putPatchFull ids p - PatchFormat.Diff r ids p -> putWord8 1 *> putPatchDiff r ids p + PatchFormat.Full ids p -> do + putWord8 0 + putPatchLocalIds ids + putFramed putPatchFull p + PatchFormat.Diff r ids p -> do + putWord8 1 + putVarInt r + putPatchLocalIds ids + putFramed putPatchDiff p getPatchFormat :: MonadGet m => m PatchFormat.PatchFormat getPatchFormat = @@ -533,16 +545,13 @@ getPatchLocalIds = <*> getVector getVarInt <*> getVector getVarInt -putPatchFull :: MonadPut m => PatchFormat.PatchLocalIds -> PatchFull.LocalPatch -> m () -putPatchFull ids (PatchFull.Patch termEdits typeEdits) = do - putPatchLocalIds ids +putPatchFull :: MonadPut m => PatchFull.LocalPatch -> m () +putPatchFull (PatchFull.Patch termEdits typeEdits) = do putMap putReferent (putFoldable putTermEdit) termEdits putMap putReference (putFoldable putTypeEdit) typeEdits -putPatchDiff :: MonadPut m => PatchObjectId -> PatchFormat.PatchLocalIds -> PatchDiff.LocalPatchDiff -> m () -putPatchDiff r ids (PatchDiff.PatchDiff atm atp rtm rtp) = do - putVarInt r - putPatchLocalIds ids +putPatchDiff :: MonadPut m => PatchDiff.LocalPatchDiff -> m () +putPatchDiff (PatchDiff.PatchDiff atm atp rtm rtp) = do putMap putReferent (putFoldable putTermEdit) atm putMap putReference (putFoldable putTypeEdit) atp putMap putReferent (putFoldable putTermEdit) rtm @@ -573,32 +582,36 @@ getBranchFormat = 1 -> getBranchDiff x -> unknownTag "getBranchFormat" x where + getBranchFull :: MonadGet m => m BranchFormat.BranchFormat getBranchFull = - BranchFormat.Full <$> getBranchLocalIds <*> getLocalBranch + BranchFormat.Full <$> getBranchLocalIds <*> getFramed getLocalBranch where + getLocalBranch :: MonadGet m => m BranchFull.LocalBranch getLocalBranch = BranchFull.Branch <$> getMap getVarInt (getMap getReferent getMetadataSetFormat) <*> getMap getVarInt (getMap getReference getMetadataSetFormat) <*> getMap getVarInt getVarInt <*> getMap getVarInt getVarInt - - getMetadataSetFormat = - getWord8 >>= \case - 0 -> BranchFull.Inline <$> getSet getReference - x -> unknownTag "getMetadataSetFormat" x + getMetadataSetFormat :: MonadGet m => m BranchFull.LocalMetadataSet + getMetadataSetFormat = + getWord8 >>= \case + 0 -> BranchFull.Inline <$> getSet getReference + x -> unknownTag "getMetadataSetFormat" x getBranchDiff = BranchFormat.Diff <$> getVarInt <*> getBranchLocalIds - <*> getLocalBranchDiff + <*> getFramed getLocalBranchDiff where + getLocalBranchDiff :: MonadGet m => m BranchDiff.LocalDiff getLocalBranchDiff = BranchDiff.Diff <$> getMap getVarInt (getMap getReferent getDiffOp) <*> getMap getVarInt (getMap getReference getDiffOp) <*> getMap getVarInt getPatchOp <*> getMap getVarInt getChildOp + getDiffOp :: MonadGet m => m BranchDiff.LocalDefinitionOp getDiffOp = getWord8 >>= \case 0 -> pure BranchDiff.RemoveDef @@ -609,11 +622,13 @@ getBranchFormat = adds <- getMap get (pure True) -- and removes: addToExistingMap get (pure False) adds + getPatchOp :: MonadGet m => m BranchDiff.LocalPatchOp getPatchOp = getWord8 >>= \case 0 -> pure BranchDiff.PatchRemove 1 -> BranchDiff.PatchAddReplace <$> getVarInt x -> unknownTag "getPatchOp" x + getChildOp :: MonadGet m => m BranchDiff.LocalChildOp getChildOp = getWord8 >>= \case 0 -> pure BranchDiff.ChildRemove From c8d0e1951b5daa3c51e7f2ed6895f5b6badbecac Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 3 Mar 2021 12:04:29 -0500 Subject: [PATCH 114/225] undo some framing --- .../U/Codebase/Sqlite/Serialization.hs | 71 +++++++------------ .../U/Codebase/Sqlite/Sync22.hs | 20 +++--- .../U/Util/Serialization.hs | 5 +- 3 files changed, 41 insertions(+), 55 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index ddf71f03bc..39096371ae 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -11,7 +11,7 @@ module U.Codebase.Sqlite.Serialization where import Data.Bits (Bits) import qualified Data.ByteString as BS import Data.Bytes.Get (MonadGet, getByteString, getWord8, runGetS) -import Data.Bytes.Put (MonadPut, putWord8) +import Data.Bytes.Put (MonadPut, putWord8, putByteString) import Data.Bytes.Serial (SerialEndian (serializeBE), deserialize, deserializeBE, serialize) import Data.Bytes.VarInt (VarInt (VarInt), unVarInt) import Data.Int (Int64) @@ -168,7 +168,7 @@ putTermComponent t | debug && trace ("putTermComponent " ++ show t) False = unde putTermComponent (TermFormat.LocallyIndexedComponent v) = putFramedArray ( \(localIds, term, typ) -> - putLocalIds localIds >> putFramed putTerm term >> putFramed putTType typ + putLocalIds localIds >> putFramed putTerm term >> putTType typ ) v @@ -263,7 +263,7 @@ putTerm t = putABT putSymbol putUnit putF t getTermComponent :: MonadGet m => m TermFormat.LocallyIndexedComponent getTermComponent = TermFormat.LocallyIndexedComponent - <$> getFramedArray (getTuple3 getLocalIds (getFramed getTerm) (getFramed getTType)) + <$> getFramedArray (getTuple3 getLocalIds (getFramed getTerm) getTType) getTerm :: MonadGet m => m TermFormat.Term getTerm = getABT getSymbol getUnit getF @@ -344,19 +344,19 @@ getTerm = getABT getSymbol getUnit getF lookupTermElement :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Term, TermFormat.Type) lookupTermElement i = getWord8 >>= \case - 0 -> unsafeFramedArrayLookup (getTuple3 getLocalIds (getFramed getTerm) (getFramed getTType)) $ fromIntegral i + 0 -> unsafeFramedArrayLookup (getTuple3 getLocalIds (getFramed getTerm) getTType) $ fromIntegral i tag -> unknownTag "lookupTermElement" tag lookupTermElementDiscardingType :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Term) lookupTermElementDiscardingType i = getWord8 >>= \case - 0 -> unsafeFramedArrayLookup ((,) <$> getLocalIds <*> getFramed getTerm <* skipFramed) $ fromIntegral i + 0 -> unsafeFramedArrayLookup ((,) <$> getLocalIds <*> getFramed getTerm) $ fromIntegral i tag -> unknownTag "lookupTermElementDiscardingType" tag lookupTermElementDiscardingTerm :: MonadGet m => Reference.Pos -> m (LocalIds, TermFormat.Type) lookupTermElementDiscardingTerm i = getWord8 >>= \case - 0 -> unsafeFramedArrayLookup ((,) <$> getLocalIds <* skipFramed <*> getFramed getTType) $ fromIntegral i + 0 -> unsafeFramedArrayLookup ((,) <$> getLocalIds <* skipFramed <*> getTType) $ fromIntegral i tag -> unknownTag "lookupTermElementDiscardingTerm" tag getTType :: MonadGet m => m TermFormat.Type @@ -446,12 +446,12 @@ putBranchFormat b = case b of BranchFormat.Full li b -> do putWord8 0 putBranchLocalIds li - putFramed putBranchFull b + putBranchFull b BranchFormat.Diff r li d -> do putWord8 1 putVarInt r putBranchLocalIds li - putFramed putBranchDiff d + putBranchDiff d where putBranchLocalIds (BranchFormat.LocalIds ts os ps cs) = do putFoldable putVarInt ts @@ -492,18 +492,18 @@ putPatchFormat = \case PatchFormat.Full ids p -> do putWord8 0 putPatchLocalIds ids - putFramed putPatchFull p + putPatchFull p PatchFormat.Diff r ids p -> do putWord8 1 putVarInt r putPatchLocalIds ids - putFramed putPatchDiff p + putPatchDiff p getPatchFormat :: MonadGet m => m PatchFormat.PatchFormat getPatchFormat = getWord8 >>= \case - 0 -> PatchFormat.Full <$> getPatchLocalIds <*> getFramed getPatchFull - 1 -> PatchFormat.Diff <$> getVarInt <*> getPatchLocalIds <*> getFramed getPatchDiff + 0 -> PatchFormat.Full <$> getPatchLocalIds <*> getPatchFull + 1 -> PatchFormat.Diff <$> getVarInt <*> getPatchLocalIds <*> getPatchDiff x -> unknownTag "getPatchFormat" x where getPatchFull :: MonadGet m => m PatchFull.LocalPatch @@ -584,7 +584,7 @@ getBranchFormat = where getBranchFull :: MonadGet m => m BranchFormat.BranchFormat getBranchFull = - BranchFormat.Full <$> getBranchLocalIds <*> getFramed getLocalBranch + BranchFormat.Full <$> getBranchLocalIds <*> getLocalBranch where getLocalBranch :: MonadGet m => m BranchFull.LocalBranch getLocalBranch = @@ -602,7 +602,7 @@ getBranchFormat = BranchFormat.Diff <$> getVarInt <*> getBranchLocalIds - <*> getFramed getLocalBranchDiff + <*> getLocalBranchDiff where getLocalBranchDiff :: MonadGet m => m BranchDiff.LocalDiff getLocalBranchDiff = @@ -678,50 +678,31 @@ watchLocalIdsToLocalDeps :: WatchLocalIds -> SE.SyncEntitySeq watchLocalIdsToLocalDeps (LocalIds ts hs) = SE.SyncEntity (vec2seq ts) mempty (vec2seq hs) mempty -decomposeTermComponent :: MonadGet m => m [(LocalIds, BS.ByteString, BS.ByteString)] -decomposeTermComponent = decomposeComponent do - ids <- getLocalIds - termBytes <- getFramedByteString - typeBytes <- getFramedByteString - pure [(ids, termBytes, typeBytes)] - -decomposeDeclComponent :: MonadGet m => m [(LocalIds, BS.ByteString)] -decomposeDeclComponent = decomposeComponent do - ids <- getLocalIds - declBytes <- getFramedByteString - pure [(ids, declBytes)] - -recomposeTermComponent :: MonadPut m => [(LocalIds, BS.ByteString, BS.ByteString)] -> m () -recomposeTermComponent = - putFramedArray \(localIds, termBytes, typeBytes) -> do - putLocalIds localIds - putFramedByteString termBytes - putFramedByteString typeBytes - -recomposeDeclComponent :: MonadPut m => [(LocalIds, BS.ByteString)] -> m () -recomposeDeclComponent = putFramedArray \(localIds, declBytes) -> do - putLocalIds localIds - putFramedByteString declBytes - -decomposeComponent :: (MonadGet m, Monoid a) => Get a -> m a -decomposeComponent split = do +decomposeComponent :: MonadGet m => m [(LocalIds, BS.ByteString)] +decomposeComponent = do offsets <- getList (getVarInt @_ @Int) componentBytes <- getByteString (last offsets) let get1 (start, end) = do let bytes = BS.drop start $ BS.take end componentBytes - either fail pure $ runGetS split bytes + either fail (pure . pure) $ runGetS split bytes + split = (,) <$> getLocalIds <*> getRemainingByteString Monoid.foldMapM get1 (zip offsets (tail offsets)) +recomposeComponent :: MonadPut m => [(LocalIds, BS.ByteString)] -> m () +recomposeComponent = putFramedArray \(localIds, bytes) -> do + putLocalIds localIds + putByteString bytes + decomposeWatchResult :: MonadGet m => m (WatchLocalIds, BS.ByteString) -decomposeWatchResult = (,) <$> getWatchLocalIds <*> getFramedByteString +decomposeWatchResult = (,) <$> getWatchLocalIds <*> getRemainingByteString recomposeWatchResult :: MonadPut m => (WatchLocalIds, BS.ByteString) -> m () -recomposeWatchResult (wli, bs) = putLocalIds wli >> putFramedByteString bs +recomposeWatchResult (wli, bs) = putLocalIds wli >> putByteString bs -- the same implementation currently works for term component and type component getComponentSyncEntities :: MonadGet m => m SE.SyncEntitySeq getComponentSyncEntities = - decomposeComponent $ fmap localIdsToLocalDeps getLocalIds + foldMap (localIdsToLocalDeps . fst) <$> decomposeComponent getPatchSyncEntities :: MonadGet m => m SE.SyncEntitySeq getPatchSyncEntities = diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index eb31055b25..1cb2609f97 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -47,6 +47,8 @@ import qualified U.Codebase.WatchKind as WK import U.Util.Cache (Cache) import qualified U.Util.Cache as Cache import qualified U.Util.Serialization as S +import qualified Data.Bytes.Get as Get +import qualified Data.Bytes.Put as Put data Entity = O ObjectId | C CausalHashId @@ -161,10 +163,10 @@ trySync tCache hCache oCache gc = \case -- revisited when there are more formats. -- (or maybe i'll learn something by implementing sync for patches and namespaces, -- which have two formats already) - (fmt, unzip3 -> (localIds, termBytes, typeBytes)) <- + (fmt, unzip -> (localIds, bytes)) <- case flip runGetS bytes do tag <- getWord8 - component <- S.decomposeTermComponent + component <- S.decomposeComponent pure (tag, component) of Right x -> pure x Left s -> throwError $ DecodeError ErrTermComponent bytes s @@ -177,7 +179,7 @@ trySync tCache hCache oCache gc = \case let bytes' = runPutS $ putWord8 fmt - >> S.recomposeTermComponent (zip3 localIds' termBytes typeBytes) + >> S.recomposeComponent (zip localIds' bytes) oId' <- runDest $ Q.saveObject hId' objType bytes' -- copy reference-specific stuff for_ [0 .. length localIds - 1] \(fromIntegral -> idx) -> do @@ -215,7 +217,7 @@ trySync tCache hCache oCache gc = \case (fmt, unzip -> (localIds, declBytes)) <- case flip runGetS bytes do tag <- getWord8 - component <- S.decomposeDeclComponent + component <- S.decomposeComponent pure (tag, component) of Right x -> pure x Left s -> throwError $ DecodeError ErrDeclComponent bytes s @@ -228,7 +230,7 @@ trySync tCache hCache oCache gc = \case let bytes' = runPutS $ putWord8 fmt - >> S.recomposeDeclComponent (zip localIds' declBytes) + >> S.recomposeComponent (zip localIds' declBytes) oId' <- runDest $ Q.saveObject hId' objType bytes' -- copy per-element-of-the-component stuff for_ [0 .. length localIds - 1] \(fromIntegral -> idx) -> do @@ -257,7 +259,7 @@ trySync tCache hCache oCache gc = \case Just (fmt@0, bytes) -> do (ids, blob) <- case flip runGetS bytes do ids <- S.getPatchLocalIds - blob <- S.getFramedByteString + blob <- S.getRemainingByteString pure (ids, blob) of Right x -> pure x Left s -> throwError $ DecodeError (ErrPatchBody 0) bytes s @@ -267,14 +269,14 @@ trySync tCache hCache oCache gc = \case let bytes' = runPutS do putWord8 fmt S.putPatchLocalIds ids' - S.putFramedByteString blob + Put.putByteString blob oId' <- runDest $ Q.saveObject hId' objType bytes' pure $ Right oId' Just (fmt@1, bytes) -> do (poId, ids, blob) <- case flip runGetS bytes do poId <- S.getVarInt ids <- S.getPatchLocalIds - blob <- S.getFramedByteString + blob <- S.getRemainingByteString pure (poId, ids, blob) of Right x -> pure x Left s -> throwError $ DecodeError (ErrPatchBody 0) bytes s @@ -289,7 +291,7 @@ trySync tCache hCache oCache gc = \case putWord8 fmt S.putVarInt poId' S.putPatchLocalIds ids' - S.putFramedByteString blob + Put.putByteString blob oId' <- runDest $ Q.saveObject hId' objType bytes' pure $ Right oId' Just (tag, _) -> throwError $ DecodeError ErrBranchFormat bytes ("unrecognized patch format tag: " ++ show tag) diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index d32afd12c0..436b8dbec7 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -15,7 +15,7 @@ import Data.ByteString (ByteString, readFile, writeFile) import qualified Data.ByteString as BS import Data.ByteString.Short (ShortByteString) import qualified Data.ByteString.Short as BSS -import Data.Bytes.Get (MonadGet, getByteString, getBytes, getWord8, runGetS, skip) +import Data.Bytes.Get (MonadGet, getByteString, getBytes, getWord8, runGetS, skip, remaining) import Data.Bytes.Put (MonadPut, putByteString, putWord8, runPutS) import Data.Bytes.VarInt (VarInt (VarInt)) import Data.Foldable (Foldable (toList), traverse_) @@ -181,6 +181,9 @@ getMap getA getB = addToExistingMap getA getB mempty getFramedByteString :: MonadGet m => m ByteString getFramedByteString = getVarInt >>= getByteString +getRemainingByteString :: MonadGet m => m ByteString +getRemainingByteString = fromIntegral <$> remaining >>= getByteString + getFramed :: MonadGet m => Get a -> m a getFramed get = getFramedByteString >>= either fail pure . runGetS get From 2f7b0ebff035bb2aee96c64c2c22b9e8c8b4a6e5 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 3 Mar 2021 16:18:54 -0500 Subject: [PATCH 115/225] unframe watchterm / use ValidateT --- .../U/Codebase/Sqlite/Operations.hs | 4 +- .../U/Codebase/Sqlite/Serialization.hs | 42 ++- .../U/Codebase/Sqlite/Sync22.hs | 296 ++++++++---------- .../unison-codebase-sqlite.cabal | 1 + 4 files changed, 177 insertions(+), 166 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index aac89d9e09..80ba2240c7 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -702,14 +702,14 @@ loadWatch :: EDB m => WatchKind -> C.Reference.Id -> MaybeT m (C.Term Symbol) loadWatch k r = C.Reference.idH Q.saveHashHash r >>= MaybeT . Q.loadWatch k - >>= getFromBytesOr (ErrWatch k r) (S.getPair S.getWatchLocalIds (S.getFramed S.getTerm)) + >>= getFromBytesOr (ErrWatch k r) (S.getPair S.getWatchLocalIds S.getTerm) >>= uncurry w2cTerm saveWatch :: EDB m => WatchKind -> C.Reference.Id -> C.Term Symbol -> m () saveWatch w r t = do rs <- C.Reference.idH Q.saveHashHash r wterm <- c2wTerm t - let bytes = S.putBytes (S.putPair S.putLocalIds (S.putFramed S.putTerm)) wterm + let bytes = S.putBytes (S.putPair S.putLocalIds S.putTerm) wterm Q.saveWatch w rs bytes c2wTerm :: EDB m => C.Term Symbol -> m (WatchLocalIds, S.Term.Term) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 39096371ae..7b6fa2c437 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -11,7 +11,7 @@ module U.Codebase.Sqlite.Serialization where import Data.Bits (Bits) import qualified Data.ByteString as BS import Data.Bytes.Get (MonadGet, getByteString, getWord8, runGetS) -import Data.Bytes.Put (MonadPut, putWord8, putByteString) +import Data.Bytes.Put (MonadPut, putByteString, putWord8) import Data.Bytes.Serial (SerialEndian (serializeBE), deserialize, deserializeBE, serialize) import Data.Bytes.VarInt (VarInt (VarInt), unVarInt) import Data.Int (Int64) @@ -31,12 +31,14 @@ import qualified U.Codebase.Reference as Reference import U.Codebase.Referent (Referent') import qualified U.Codebase.Referent as Referent import qualified U.Codebase.Sqlite.Branch.Diff as BranchDiff +import U.Codebase.Sqlite.Branch.Format (BranchLocalIds) import qualified U.Codebase.Sqlite.Branch.Format as BranchFormat import qualified U.Codebase.Sqlite.Branch.Full as BranchFull -import U.Codebase.Sqlite.DbId (BranchObjectId, PatchObjectId, unBranchObjectId, unPatchObjectId) +import U.Codebase.Sqlite.DbId (BranchObjectId, ObjectId, PatchObjectId, unBranchObjectId, unPatchObjectId) import qualified U.Codebase.Sqlite.Decl.Format as DeclFormat import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), LocalTextId, WatchLocalIds) import qualified U.Codebase.Sqlite.Patch.Diff as PatchDiff +import U.Codebase.Sqlite.Patch.Format (PatchLocalIds) import qualified U.Codebase.Sqlite.Patch.Format as PatchFormat import qualified U.Codebase.Sqlite.Patch.Full as PatchFull import qualified U.Codebase.Sqlite.Patch.TermEdit as TermEdit @@ -453,11 +455,6 @@ putBranchFormat b = case b of putBranchLocalIds li putBranchDiff d where - putBranchLocalIds (BranchFormat.LocalIds ts os ps cs) = do - putFoldable putVarInt ts - putFoldable putVarInt os - putFoldable putVarInt ps - putFoldable (putPair putVarInt putVarInt) cs putBranchFull (BranchFull.Branch terms types patches children) = do putMap putVarInt (putMap putReferent putMetadataSetFormat) terms putMap putVarInt (putMap putReference putMetadataSetFormat) types @@ -487,6 +484,13 @@ putBranchFormat b = case b of BranchDiff.ChildRemove -> putWord8 0 BranchDiff.ChildAddReplace b -> putWord8 1 *> putVarInt b +putBranchLocalIds :: MonadPut m => BranchFormat.BranchLocalIds -> m () +putBranchLocalIds (BranchFormat.LocalIds ts os ps cs) = do + putFoldable putVarInt ts + putFoldable putVarInt os + putFoldable putVarInt ps + putFoldable (putPair putVarInt putVarInt) cs + putPatchFormat :: MonadPut m => PatchFormat.PatchFormat -> m () putPatchFormat = \case PatchFormat.Full ids p -> do @@ -699,6 +703,30 @@ decomposeWatchResult = (,) <$> getWatchLocalIds <*> getRemainingByteString recomposeWatchResult :: MonadPut m => (WatchLocalIds, BS.ByteString) -> m () recomposeWatchResult (wli, bs) = putLocalIds wli >> putByteString bs +decomposePatchFull :: MonadGet m => m (PatchLocalIds, BS.ByteString) +decomposePatchFull = (,) <$> getPatchLocalIds <*> getRemainingByteString + +decomposePatchDiff :: MonadGet m => m (ObjectId, PatchLocalIds, BS.ByteString) +decomposePatchDiff = (,,) <$> getVarInt <*> getPatchLocalIds <*> getRemainingByteString + +decomposeBranchFull :: MonadGet m => m (BranchLocalIds, BS.ByteString) +decomposeBranchFull = (,) <$> getBranchLocalIds <*> getRemainingByteString + +decomposeBranchDiff :: MonadGet m => m (ObjectId, BranchLocalIds, BS.ByteString) +decomposeBranchDiff = (,,) <$> getVarInt <*> getBranchLocalIds <*> getRemainingByteString + +recomposePatchFull :: MonadPut m => PatchLocalIds -> BS.ByteString -> m () +recomposePatchFull li bs = putPatchLocalIds li *> putByteString bs + +recomposePatchDiff :: MonadPut m => ObjectId -> PatchLocalIds -> BS.ByteString -> m () +recomposePatchDiff id li bs = putVarInt id *> putPatchLocalIds li *> putByteString bs + +recomposeBranchFull :: MonadPut m => BranchLocalIds -> BS.ByteString -> m () +recomposeBranchFull li bs = putBranchLocalIds li *> putByteString bs + +recomposeBranchDiff :: MonadPut m => ObjectId -> BranchLocalIds -> BS.ByteString -> m () +recomposeBranchDiff id li bs = putVarInt id *> putBranchLocalIds li *> putByteString bs + -- the same implementation currently works for term component and type component getComponentSyncEntities :: MonadGet m => m SE.SyncEntitySeq getComponentSyncEntities = diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index 1cb2609f97..5f40ddee1b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE ApplicativeDo #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} @@ -8,14 +9,13 @@ module U.Codebase.Sqlite.Sync22 where -import Control.Monad (filterM, foldM, join) -import Control.Monad.Except (ExceptT, MonadError (throwError), runExceptT) +import Control.Monad (filterM, join) +import Control.Monad.Except (ExceptT, MonadError (throwError)) import Control.Monad.Extra (ifM) import Control.Monad.RWS (MonadIO, MonadReader) -import Control.Monad.Reader (ReaderT) -import Control.Monad.Trans (lift) -import Control.Monad.Trans.Except (ExceptT (ExceptT)) -import qualified Control.Monad.Writer as Writer +import Control.Monad.Reader (ReaderT, MonadReader (reader)) +import Control.Monad.Validate (runValidateT, ValidateT) +import qualified Control.Monad.Validate as Validate import Data.Bifunctor (bimap) import Data.Bitraversable (bitraverse) import Data.ByteString (ByteString) @@ -26,12 +26,12 @@ import Data.Foldable (for_, toList, traverse_) import Data.Functor ((<&>)) import Data.List.Extra (nubOrd) import Data.Maybe (catMaybes, fromJust, fromMaybe, isJust) -import Data.Sequence (Seq) -import qualified Data.Sequence as Seq -import Data.Traversable (for) +import Data.Set (Set) +import qualified Data.Set as Set import Data.Word (Word8) import Database.SQLite.Simple (Connection) import qualified U.Codebase.Reference as Reference +import qualified U.Codebase.Sqlite.Branch.Format as BL import U.Codebase.Sqlite.DbId import qualified U.Codebase.Sqlite.LocalIds as L import qualified U.Codebase.Sqlite.ObjectType as OT @@ -46,11 +46,9 @@ import qualified U.Codebase.Sync as Sync import qualified U.Codebase.WatchKind as WK import U.Util.Cache (Cache) import qualified U.Util.Cache as Cache -import qualified U.Util.Serialization as S -import qualified Data.Bytes.Get as Get -import qualified Data.Bytes.Put as Put +import Control.Monad.Trans (lift) -data Entity = O ObjectId | C CausalHashId +data Entity = O ObjectId | C CausalHashId deriving (Eq, Ord, Show) data DbTag = SrcDb | DestDb @@ -61,6 +59,7 @@ data DecodeError -- | ErrDeclFormat ErrBranchFormat | ErrPatchFormat + | ErrBranchBody Word8 | ErrPatchBody Word8 | ErrWatchResult @@ -156,7 +155,7 @@ trySync tCache hCache oCache gc = \case Nothing -> do (hId, objType, bytes) <- runSrc $ Q.loadObjectWithHashIdAndTypeById oId hId' <- syncHashLiteral hId - result <- case objType of + result <- runValidateT @(Set Entity) @m @ObjectId case objType of OT.TermComponent -> do -- split up the localIds (parsed), term, and type blobs -- note: this whole business with `fmt` is pretty weird, and will need to be @@ -164,7 +163,7 @@ trySync tCache hCache oCache gc = \case -- (or maybe i'll learn something by implementing sync for patches and namespaces, -- which have two formats already) (fmt, unzip -> (localIds, bytes)) <- - case flip runGetS bytes do + lift case flip runGetS bytes do tag <- getWord8 component <- S.decomposeComponent pure (tag, component) of @@ -172,46 +171,42 @@ trySync tCache hCache oCache gc = \case Left s -> throwError $ DecodeError ErrTermComponent bytes s -- iterate through the local ids looking for missing deps; -- then either enqueue the missing deps, or proceed to move the object - foldM foldLocalIds (Right mempty) localIds >>= \case - Left missingDeps -> pure $ Left missingDeps - Right (toList -> localIds') -> do - -- reassemble and save the reindexed term - let bytes' = - runPutS $ - putWord8 fmt - >> S.recomposeComponent (zip localIds' bytes) - oId' <- runDest $ Q.saveObject hId' objType bytes' - -- copy reference-specific stuff - for_ [0 .. length localIds - 1] \(fromIntegral -> idx) -> do - -- sync watch results - for_ [WK.RegularWatch, WK.TestWatch] \wk -> do - let refH = Reference.Id hId idx - refH' = Reference.Id hId' idx - runSrc (Q.loadWatch wk refH) >>= traverse_ \blob -> do - (L.LocalIds tIds hIds, termBytes) <- - case runGetS S.decomposeWatchResult blob of - Right x -> pure x - Left s -> throwError $ DecodeError ErrWatchResult blob s - tIds' <- traverse syncTextLiteral tIds - hIds' <- traverse syncHashLiteral hIds - let blob' = runPutS (S.recomposeWatchResult (L.LocalIds tIds' hIds', termBytes)) - runDest (Q.saveWatch wk refH' blob') - -- sync dependencies index - let ref = Reference.Id oId idx - ref' = Reference.Id oId' idx - let fromJust' = fromMaybe (error "missing objects should've been caught by `foldLocalIds` above") - runSrc (Q.getDependenciesForDependent ref) - >>= traverse (fmap fromJust' . isSyncedObjectReference) - >>= runDest . traverse_ (flip Q.addToDependentsIndex ref') - -- sync type index - runSrc (Q.getTypeReferencesForComponent oId) - >>= traverse (syncTypeIndexRow oId') - >>= traverse_ (runDest . uncurry Q.addToTypeIndex) - -- sync type mentions index - runSrc (Q.getTypeMentionsReferencesForComponent oId) - >>= traverse (syncTypeIndexRow oId') - >>= traverse_ (runDest . uncurry Q.addToTypeMentionsIndex) - pure $ Right oId' + localIds' <- traverse syncLocalIds localIds + -- reassemble and save the reindexed term + let bytes' = runPutS $ + putWord8 fmt >> S.recomposeComponent (zip localIds' bytes) + oId' <- runDest $ Q.saveObject hId' objType bytes' + -- copy reference-specific stuff + lift $ for_ [0 .. length localIds - 1] \(fromIntegral -> idx) -> do + -- sync watch results + for_ [WK.RegularWatch, WK.TestWatch] \wk -> do + let refH = Reference.Id hId idx + refH' = Reference.Id hId' idx + runSrc (Q.loadWatch wk refH) >>= traverse_ \blob -> do + (L.LocalIds tIds hIds, termBytes) <- + case runGetS S.decomposeWatchResult blob of + Right x -> pure x + Left s -> throwError $ DecodeError ErrWatchResult blob s + tIds' <- traverse syncTextLiteral tIds + hIds' <- traverse syncHashLiteral hIds + let blob' = runPutS (S.recomposeWatchResult (L.LocalIds tIds' hIds', termBytes)) + runDest (Q.saveWatch wk refH' blob') + -- sync dependencies index + let ref = Reference.Id oId idx + ref' = Reference.Id oId' idx + let fromJust' = fromMaybe (error "missing objects should've been caught by `foldLocalIds` above") + runSrc (Q.getDependenciesForDependent ref) + >>= traverse (fmap fromJust' . isSyncedObjectReference) + >>= runDest . traverse_ (flip Q.addToDependentsIndex ref') + -- sync type index + runSrc (Q.getTypeReferencesForComponent oId) + >>= traverse (syncTypeIndexRow oId') + >>= traverse_ (runDest . uncurry Q.addToTypeIndex) + -- sync type mentions index + runSrc (Q.getTypeMentionsReferencesForComponent oId) + >>= traverse (syncTypeIndexRow oId') + >>= traverse_ (runDest . uncurry Q.addToTypeMentionsIndex) + pure oId' OT.DeclComponent -> do -- split up the localIds (parsed), decl blobs (fmt, unzip -> (localIds, declBytes)) <- @@ -223,77 +218,75 @@ trySync tCache hCache oCache gc = \case Left s -> throwError $ DecodeError ErrDeclComponent bytes s -- iterate through the local ids looking for missing deps; -- then either enqueue the missing deps, or proceed to move the object - foldM foldLocalIds (Right mempty) localIds >>= \case - Left missingDeps -> pure $ Left missingDeps - Right (toList -> localIds') -> do - -- reassemble and save the reindexed term - let bytes' = - runPutS $ - putWord8 fmt - >> S.recomposeComponent (zip localIds' declBytes) - oId' <- runDest $ Q.saveObject hId' objType bytes' - -- copy per-element-of-the-component stuff - for_ [0 .. length localIds - 1] \(fromIntegral -> idx) -> do - -- sync dependencies index - let ref = Reference.Id oId idx - ref' = Reference.Id oId' idx - let fromJust' = fromMaybe (error "missing objects should've been caught by `foldLocalIds` above") - runSrc (Q.getDependenciesForDependent ref) - >>= traverse (fmap fromJust' . isSyncedObjectReference) - >>= runDest . traverse_ (flip Q.addToDependentsIndex ref') - -- sync type index - runSrc (Q.getTypeReferencesForComponent oId) - >>= traverse (syncTypeIndexRow oId') - >>= traverse_ (runDest . uncurry Q.addToTypeIndex) - -- sync type mentions index - runSrc (Q.getTypeMentionsReferencesForComponent oId) - >>= traverse (syncTypeIndexRow oId') - >>= traverse_ (runDest . uncurry Q.addToTypeMentionsIndex) - pure $ Right oId' + localIds' <- traverse syncLocalIds localIds + -- reassemble and save the reindexed term + let bytes' = + runPutS $ + putWord8 fmt + >> S.recomposeComponent (zip localIds' declBytes) + oId' <- runDest $ Q.saveObject hId' objType bytes' + -- copy per-element-of-the-component stuff + lift $ for_ [0 .. length localIds - 1] \(fromIntegral -> idx) -> do + -- sync dependencies index + let ref = Reference.Id oId idx + ref' = Reference.Id oId' idx + let fromJust' = fromMaybe (error "missing objects should've been caught by `foldLocalIds` above") + runSrc (Q.getDependenciesForDependent ref) + >>= traverse (fmap fromJust' . isSyncedObjectReference) + >>= runDest . traverse_ (flip Q.addToDependentsIndex ref') + -- sync type index + runSrc (Q.getTypeReferencesForComponent oId) + >>= traverse (syncTypeIndexRow oId') + >>= traverse_ (runDest . uncurry Q.addToTypeIndex) + -- sync type mentions index + runSrc (Q.getTypeMentionsReferencesForComponent oId) + >>= traverse (syncTypeIndexRow oId') + >>= traverse_ (runDest . uncurry Q.addToTypeMentionsIndex) + pure oId' OT.Namespace -> case BS.uncons bytes of - Just (0, bytes) -> error "todo" - Just (1, bytes) -> error "todo" + -- full branch case + Just (fmt@0, bytes) -> do + (ids, body) <- case flip runGetS bytes S.decomposeBranchFull of + Right x -> pure x + Left s -> throwError $ DecodeError (ErrBranchBody fmt) bytes s + ids' <- syncBranchLocalIds ids + let bytes' = runPutS $ putWord8 0 *> S.recomposeBranchFull ids' body + oId' <- runDest $ Q.saveObject hId' objType bytes' + pure oId' + -- branch diff case + Just (fmt@1, bytes) -> do + (boId, ids, body) <- case flip runGetS bytes S.decomposeBranchDiff of + Right x -> pure x + Left s -> throwError $ DecodeError (ErrBranchBody fmt) bytes s + boId' <- syncLocalObjectId boId + ids' <- syncBranchLocalIds ids + let bytes' = runPutS $ putWord8 1 *> S.recomposeBranchDiff boId' ids' body + oId' <- runDest $ Q.saveObject hId' objType bytes' + pure oId' + -- unrecognized tag case Just (tag, _) -> throwError $ DecodeError ErrBranchFormat bytes ("unrecognized branch format tag: " ++ show tag) Nothing -> throwError $ DecodeError ErrBranchFormat bytes "zero-byte branch object" OT.Patch -> case BS.uncons bytes of + -- full branch case Just (fmt@0, bytes) -> do - (ids, blob) <- case flip runGetS bytes do - ids <- S.getPatchLocalIds - blob <- S.getRemainingByteString - pure (ids, blob) of + (ids, body) <- case flip runGetS bytes S.decomposePatchFull of Right x -> pure x - Left s -> throwError $ DecodeError (ErrPatchBody 0) bytes s - syncPatchLocalIds ids >>= \case - Left missingDeps -> pure $ Left missingDeps - Right ids' -> do - let bytes' = runPutS do - putWord8 fmt - S.putPatchLocalIds ids' - Put.putByteString blob - oId' <- runDest $ Q.saveObject hId' objType bytes' - pure $ Right oId' + Left s -> throwError $ DecodeError (ErrPatchBody fmt) bytes s + ids' <- syncPatchLocalIds ids + let bytes' = runPutS $ putWord8 0 *> S.recomposePatchFull ids' body + oId' <- runDest $ Q.saveObject hId' objType bytes' + pure oId' + -- branch diff case Just (fmt@1, bytes) -> do - (poId, ids, blob) <- case flip runGetS bytes do - poId <- S.getVarInt - ids <- S.getPatchLocalIds - blob <- S.getRemainingByteString - pure (poId, ids, blob) of + (poId, ids, body) <- case flip runGetS bytes S.decomposePatchDiff of Right x -> pure x - Left s -> throwError $ DecodeError (ErrPatchBody 0) bytes s - mayPoId' <- isSyncedObject poId - eitherIds' <- syncPatchLocalIds ids - case (mayPoId', eitherIds') of - (Nothing, Left missingDeps) -> pure $ Left (O poId Seq.<| missingDeps) - (Nothing, Right {}) -> pure $ Left (Seq.singleton (O poId)) - (Just {}, Left missingDeps) -> pure $ Left missingDeps - (Just poId', Right ids') -> do - let bytes' = runPutS do - putWord8 fmt - S.putVarInt poId' - S.putPatchLocalIds ids' - Put.putByteString blob - oId' <- runDest $ Q.saveObject hId' objType bytes' - pure $ Right oId' + Left s -> throwError $ DecodeError (ErrPatchBody fmt) bytes s + poId' <- syncLocalObjectId poId + ids' <- syncPatchLocalIds ids + let bytes' = runPutS $ putWord8 1 *> S.recomposePatchDiff poId' ids' body + oId' <- runDest $ Q.saveObject hId' objType bytes' + pure oId' + -- error gases Just (tag, _) -> throwError $ DecodeError ErrBranchFormat bytes ("unrecognized patch format tag: " ++ show tag) Nothing -> throwError $ DecodeError ErrPatchFormat bytes "zero-byte patch object" case result of @@ -303,40 +296,36 @@ trySync tCache hCache oCache gc = \case Cache.insert oCache oId oId' pure Sync.Done where - foldLocalIds :: Either (Seq Entity) (Seq L.LocalIds) -> L.LocalIds -> m (Either (Seq Entity) (Seq L.LocalIds)) - foldLocalIds (Left missing) (L.LocalIds _tIds oIds) = - syncLocalObjectIds oIds <&> \case - Left missing2 -> Left (missing <> missing2) - Right _oIds' -> Left missing - foldLocalIds (Right localIdss') (L.LocalIds tIds oIds) = - syncLocalObjectIds oIds >>= \case - Left missing -> pure $ Left missing - Right oIds' -> do - tIds' <- traverse syncTextLiteral tIds - pure $ Right (localIdss' Seq.|> L.LocalIds tIds' oIds') - - -- I want to collect all the failures, rather than short-circuiting after the first - syncLocalObjectIds :: Traversable t => t ObjectId -> m (Either (Seq Entity) (t ObjectId)) - syncLocalObjectIds oIds = do - (mayOIds', missing) <- Writer.runWriterT do - for oIds \oId -> - lift (isSyncedObject oId) >>= \case - Just oId' -> pure oId' - Nothing -> do - Writer.tell . Seq.singleton $ O oId - pure $ error "Arya didn't think this would get eval'ed." - if null missing - then pure $ Right mayOIds' - else pure $ Left missing - - - syncPatchLocalIds :: PL.PatchLocalIds -> m (Either (Seq Entity) PL.PatchLocalIds) - syncPatchLocalIds (PL.LocalIds tIds hIds oIds) = runExceptT do - oIds' <- ExceptT $ syncLocalObjectIds oIds + syncLocalObjectId :: ObjectId -> ValidateT (Set Entity) m ObjectId + syncLocalObjectId oId = + lift (isSyncedObject oId) >>= \case + Just oId' -> pure oId' + Nothing -> Validate.refute . Set.singleton $ O oId + + syncBranchObjectId :: BranchObjectId -> ValidateT (Set Entity) m BranchObjectId + syncBranchObjectId = fmap BranchObjectId . syncLocalObjectId . unBranchObjectId + + syncLocalIds :: L.LocalIds -> ValidateT (Set Entity) m L.LocalIds + syncLocalIds (L.LocalIds tIds oIds) = do + oIds' <- traverse syncLocalObjectId oIds + tIds' <- lift $ traverse syncTextLiteral tIds + pure $ L.LocalIds tIds' oIds' + + syncPatchLocalIds :: PL.PatchLocalIds -> ValidateT (Set Entity) m PL.PatchLocalIds + syncPatchLocalIds (PL.LocalIds tIds hIds oIds) = do + oIds' <- traverse syncLocalObjectId oIds tIds' <- lift $ traverse syncTextLiteral tIds hIds' <- lift $ traverse syncHashLiteral hIds pure $ PL.LocalIds tIds' hIds' oIds' + syncBranchLocalIds :: BL.BranchLocalIds -> ValidateT (Set Entity) m BL.BranchLocalIds + syncBranchLocalIds (BL.LocalIds tIds oIds poIds chboIds) = do + oIds' <- traverse syncLocalObjectId oIds + poIds' <- traverse (fmap PatchObjectId . syncLocalObjectId . unPatchObjectId) poIds + chboIds' <- traverse (bitraverse syncBranchObjectId (lift . syncCausalHash)) chboIds + tIds' <- lift $ traverse syncTextLiteral tIds + pure $ BL.LocalIds tIds' oIds' poIds' chboIds' + syncTypeIndexRow oId' = bitraverse syncHashReference (pure . rewriteTypeIndexReferent oId') rewriteTypeIndexReferent :: ObjectId -> Sqlite.Referent.Id -> Sqlite.Referent.Id rewriteTypeIndexReferent oId' = bimap (const oId') (const oId') @@ -399,17 +388,12 @@ trySync tCache hCache oCache gc = \case [] -> pure $ Nothing oIds' -> throwError (HashObjectCorrespondence oId hIds oIds') --- syncCausal chId = do --- value - --- Q: Do we want to cache corresponding ID mappings? --- A: Yes, but not yet - runSrc :: (MonadError Error m, MonadReader Env m) => ReaderT Connection (ExceptT Q.Integrity m) a -> m a -runSrc = error "todo" -- withExceptT SrcDB . (reader fst >>=) +runSrc = error "todo" -- withExceptT SrcDB do + runDest :: (MonadError Error m, MonadReader Env m) => @@ -417,8 +401,6 @@ runDest :: m a runDest = error "todo" -- withExceptT SrcDB . (reader fst >>=) --- applyDefined - -- syncs coming from git: -- - pull a specified remote causal (Maybe CausalHash) into the local database -- - and then maybe do some stuff diff --git a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal index 884aafaeb5..6e53604d71 100644 --- a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal +++ b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal @@ -50,6 +50,7 @@ library extra, here, lens, + monad-validate, mtl, sqlite-simple, text, From 3f2efac3dd75ef5c9f0b7759cfd0dbf371244bfd Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 3 Mar 2021 16:33:24 -0500 Subject: [PATCH 116/225] typo --- codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index 5f40ddee1b..5f0c02fe71 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -286,7 +286,7 @@ trySync tCache hCache oCache gc = \case let bytes' = runPutS $ putWord8 1 *> S.recomposePatchDiff poId' ids' body oId' <- runDest $ Q.saveObject hId' objType bytes' pure oId' - -- error gases + -- error cases Just (tag, _) -> throwError $ DecodeError ErrBranchFormat bytes ("unrecognized patch format tag: " ++ show tag) Nothing -> throwError $ DecodeError ErrPatchFormat bytes "zero-byte patch object" case result of From 9cc4e0f4d7ae1437c5044bc8f523d8c488132883 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 3 Mar 2021 16:35:04 -0500 Subject: [PATCH 117/225] runDB --- .../U/Codebase/Sqlite/Sync22.hs | 40 ++++++++++--------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index 5f0c02fe71..fcad8d57bc 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -11,10 +11,12 @@ module U.Codebase.Sqlite.Sync22 where import Control.Monad (filterM, join) import Control.Monad.Except (ExceptT, MonadError (throwError)) +import qualified Control.Monad.Except as Except import Control.Monad.Extra (ifM) -import Control.Monad.RWS (MonadIO, MonadReader) -import Control.Monad.Reader (ReaderT, MonadReader (reader)) -import Control.Monad.Validate (runValidateT, ValidateT) +import Control.Monad.RWS (MonadIO, MonadReader, lift) +import Control.Monad.Reader (ReaderT) +import qualified Control.Monad.Reader as Reader +import Control.Monad.Validate (ValidateT, runValidateT) import qualified Control.Monad.Validate as Validate import Data.Bifunctor (bimap) import Data.Bitraversable (bitraverse) @@ -46,7 +48,6 @@ import qualified U.Codebase.Sync as Sync import qualified U.Codebase.WatchKind as WK import U.Util.Cache (Cache) import qualified U.Util.Cache as Cache -import Control.Monad.Trans (lift) data Entity = O ObjectId | C CausalHashId deriving (Eq, Ord, Show) @@ -173,8 +174,9 @@ trySync tCache hCache oCache gc = \case -- then either enqueue the missing deps, or proceed to move the object localIds' <- traverse syncLocalIds localIds -- reassemble and save the reindexed term - let bytes' = runPutS $ - putWord8 fmt >> S.recomposeComponent (zip localIds' bytes) + let bytes' = + runPutS $ + putWord8 fmt >> S.recomposeComponent (zip localIds' bytes) oId' <- runDest $ Q.saveObject hId' objType bytes' -- copy reference-specific stuff lift $ for_ [0 .. length localIds - 1] \(fromIntegral -> idx) -> do @@ -388,18 +390,20 @@ trySync tCache hCache oCache gc = \case [] -> pure $ Nothing oIds' -> throwError (HashObjectCorrespondence oId hIds oIds') -runSrc :: - (MonadError Error m, MonadReader Env m) => - ReaderT Connection (ExceptT Q.Integrity m) a -> - m a -runSrc = error "todo" -- withExceptT SrcDB do - - -runDest :: - (MonadError Error m, MonadReader Env m) => - ReaderT Connection (ExceptT Q.Integrity m) a -> - m a -runDest = error "todo" -- withExceptT SrcDB . (reader fst >>=) +runSrc, + runDest :: + (MonadError Error m, MonadReader Env m) => + ReaderT Connection (ExceptT Q.Integrity m) a -> + m a +runSrc ma = Reader.reader srcDB >>= flip runDB ma +runDest ma = Reader.reader destDB >>= flip runDB ma + +runDB :: + MonadError Error m => Connection -> ReaderT Connection (ExceptT Q.Integrity m) a -> m a +runDB conn action = + Except.runExceptT (Reader.runReaderT action conn) >>= \case + Left e -> throwError (DbIntegrity e) + Right a -> pure a -- syncs coming from git: -- - pull a specified remote causal (Maybe CausalHash) into the local database From 28f48be943b59e7066e1da2a873444ad2e039d7a Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 9 Mar 2021 11:48:58 -0500 Subject: [PATCH 118/225] split out git-specific vs filecodebase-specific git code also MonadIO'ed stuff --- .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 3 +- .../U/Codebase/Sqlite/Queries.hs | 1 + .../U/Codebase/Sqlite/Sync22.hs | 41 +++-- .../src/Unison/Codebase/Editor/Git.hs | 146 ++---------------- .../Unison/Codebase/Editor/HandleCommand.hs | 2 +- .../src/Unison/Codebase/FileCodebase/Git.hs | 136 ++++++++++++++++ .../src/Unison/Codebase/GitError.hs | 1 + .../src/Unison/Codebase/SqliteCodebase.hs | 102 ++++++------ .../src/Unison/Codebase/SqliteCodebase/Git.hs | 138 +++++++++++++++++ .../Codebase/SqliteCodebase/SyncEphemeral.hs | 142 +++++++++++++++++ .../unison-parser-typechecker.cabal | 4 + unison-core/unison-core1.cabal | 3 +- 12 files changed, 511 insertions(+), 208 deletions(-) create mode 100644 parser-typechecker/src/Unison/Codebase/FileCodebase/Git.hs create mode 100644 parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs create mode 100644 parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index 0bdd609519..4ccfbc6b82 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -41,7 +41,8 @@ newtype TermId = TermCycleId ObjectId deriving Show deriving (FromField, ToField newtype DeclId = DeclCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId -- |For generational garbage-collection; 0 is the oldest generation. -newtype Generation = Generation { unGeneration :: Word64 } deriving (Eq, Ord, Show) +newtype Generation = Generation { unGeneration :: Word64 } + deriving (Eq, Ord, Show) deriving (Enum, FromField, ToField) via Word64 instance Show PatchObjectId where diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index c8bd581a6a..bdf5ea6317 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -14,6 +14,7 @@ {-# LANGUAGE ViewPatterns #-} {-# OPTIONS_GHC -Wno-orphans #-} +{-# LANGUAGE TypeOperators #-} module U.Codebase.Sqlite.Queries where import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index fcad8d57bc..7f7f5bf4b9 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -43,7 +43,7 @@ import qualified U.Codebase.Sqlite.Reference as Sqlite import qualified U.Codebase.Sqlite.Reference as Sqlite.Reference import qualified U.Codebase.Sqlite.Referent as Sqlite.Referent import qualified U.Codebase.Sqlite.Serialization as S -import U.Codebase.Sync +import U.Codebase.Sync (Sync (Sync), TrySyncResult (Missing)) import qualified U.Codebase.Sync as Sync import qualified U.Codebase.WatchKind as WK import U.Util.Cache (Cache) @@ -75,26 +75,27 @@ data Error data Env = Env { srcDB :: Connection, - destDB :: Connection + destDB :: Connection, + -- | there are three caches of this size + idCacheSize :: Word } -- data Mappings - --- We load an object from the source; it has a bunch of dependencies. --- Some of these dependencies exist at the defination, some don't. --- For the ones that do, look up their ids, and update the thingie as you write it --- For the ones that don't, copy them (then you will know their ids), and update the thingie. --- If you want, you can try to cache that lookup. - --- sync22 :: --- ( MonadIO m, --- MonadError Error m, --- MonadReader TwoConnections m --- ) => --- Sync m Entity --- sync22 = Sync roots trySync --- where --- roots = runSrc $ fmap (\h -> [C h]) Q.loadNamespaceRoot +sync22 :: + ( MonadIO m, + MonadError Error m, + MonadReader Env m + ) => + m (Sync m Entity) +sync22 = do + size <- Reader.reader idCacheSize + tCache <- Cache.semispaceCache size + hCache <- Cache.semispaceCache size + oCache <- Cache.semispaceCache size + gc <- runSrc $ Q.getNurseryGeneration + pure $ Sync roots (trySync tCache hCache oCache (succ gc)) + where + roots = runSrc $ fmap (\h -> [C h]) Q.loadNamespaceRoot trySync :: forall m. @@ -364,9 +365,7 @@ trySync tCache hCache oCache gc = \case findMissingParents :: CausalHashId -> m [Entity] findMissingParents chId = do - runSrc (Q.loadCausalParents chId) - >>= filterM isMissing - <&> fmap C + runSrc (Q.loadCausalParents chId) >>= filterM isMissing <&> fmap C where isMissing p = syncCausalHash p diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Git.hs b/parser-typechecker/src/Unison/Codebase/Editor/Git.hs index 2e66e2122d..bf2246ae64 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/Git.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/Git.hs @@ -1,43 +1,23 @@ {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ViewPatterns #-} -module Unison.Codebase.Editor.Git - ( importRemoteBranch - , pushGitRootBranch - , viewRemoteBranch - ) where +module Unison.Codebase.Editor.Git where import Unison.Prelude -import Control.Monad.Except ( MonadError - , throwError - , ExceptT - ) -import Control.Monad.Extra ((||^)) import qualified Control.Exception -import qualified Data.Text as Text -import Shellmet ( ($?), ($|), ($^)) -import System.FilePath ( () ) -import qualified Unison.Codebase.GitError as GitError -import Unison.Codebase.GitError (GitError) -import qualified Unison.Codebase as Codebase -import Unison.Codebase (Codebase, CodebasePath) -import Unison.Codebase.Editor.RemoteRepo ( RemoteRepo(GitRepo) - , RemoteNamespace - , printRepo - ) -import Unison.Codebase.FileCodebase as FC -import Unison.Codebase.Branch ( Branch - , headHash - ) -import qualified Unison.Codebase.Path as Path -import Unison.Codebase.SyncMode ( SyncMode ) -import qualified Unison.Util.Exception as Ex -import Unison.Util.Timing (time) -import qualified Unison.Codebase.Branch as Branch +import Control.Monad.Except (MonadError, throwError) +import qualified Data.Text as Text +import Shellmet (($?), ($^), ($|)) +import System.FilePath (()) +import Unison.Codebase (CodebasePath) +import Unison.Codebase.Editor.RemoteRepo (RemoteRepo (GitRepo)) +import Unison.Codebase.FileCodebase.Common (encodeFileName) +import Unison.Codebase.GitError (GitError) +import qualified Unison.Codebase.GitError as GitError +import qualified Unison.Util.Exception as Ex +import UnliftIO.Directory (XdgDirectory (XdgCache), doesDirectoryExist, findExecutable, getXdgDirectory, removeDirectoryRecursive) import UnliftIO.IO (hFlush, stdout) -import UnliftIO.Directory (getXdgDirectory, XdgDirectory(XdgCache), doesDirectoryExist, findExecutable, removeDirectoryRecursive) -import Unison.Codebase.FileCodebase.Common (encodeFileName, updateCausalHead, branchHeadDir) tempGitDir :: MonadIO m => Text -> m FilePath tempGitDir url = @@ -117,56 +97,6 @@ pullBranch repo@(GitRepo uri Nothing) = do Left e -> throwError (GitError.SomeOtherError (show e)) Right _ -> pure () --- | Sync elements as needed from a remote codebase into the local one. --- If `sbh` is supplied, we try to load the specified branch hash; --- otherwise we try to load the root branch. -importRemoteBranch - :: forall m v a - . MonadIO m - => Codebase m v a - -> Branch.Cache m - -> RemoteNamespace - -> SyncMode - -> ExceptT GitError m (Branch m) -importRemoteBranch codebase cache ns mode = do - (branch, cacheDir) <- viewRemoteBranch' cache ns - withStatus "Importing downloaded files into local codebase..." $ - time "SyncFromDirectory" $ - lift $ Codebase.syncFromDirectory codebase cacheDir mode branch - pure branch - --- | Pull a git branch and view it from the cache, without syncing into the --- local codebase. -viewRemoteBranch :: forall m. MonadIO m - => Branch.Cache m -> RemoteNamespace -> ExceptT GitError m (Branch m) -viewRemoteBranch cache = fmap fst . viewRemoteBranch' cache - -viewRemoteBranch' :: forall m. MonadIO m - => Branch.Cache m -> RemoteNamespace -> ExceptT GitError m (Branch m, CodebasePath) -viewRemoteBranch' cache (repo, sbh, path) = do - -- set up the cache dir - remotePath <- time "Git fetch" $ pullBranch repo - -- try to load the requested branch from it - branch <- time "Git fetch (sbh)" $ case sbh of - -- load the root branch - Nothing -> lift (FC.getRootBranch cache remotePath) >>= \case - Left Codebase.NoRootBranch -> pure Branch.empty - Left (Codebase.CouldntLoadRootBranch h) -> - throwError $ GitError.CouldntLoadRootBranch repo h - Left (Codebase.CouldntParseRootBranch s) -> - throwError $ GitError.CouldntParseRootBranch repo s - Right b -> pure b - -- load from a specific `ShortBranchHash` - Just sbh -> do - branchCompletions <- lift $ FC.branchHashesByPrefix remotePath sbh - case toList branchCompletions of - [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh - [h] -> (lift $ FC.branchFromFiles cache remotePath h) >>= \case - Just b -> pure b - Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh - _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions - pure (Branch.getAt' path branch, remotePath) - -- | See if `git` is on the system path. checkForGit :: MonadIO m => MonadError GitError m => m () checkForGit = do @@ -195,55 +125,3 @@ gitIn localPath args = liftIO $ "git" $^ (setupGitDir localPath <> args) gitTextIn :: MonadIO m => FilePath -> [Text] -> m Text gitTextIn localPath args = liftIO $ "git" $| setupGitDir localPath <> args - --- Given a branch that is "after" the existing root of a given git repo, --- stage and push the branch (as the new root) + dependencies to the repo. -pushGitRootBranch - :: MonadIO m - => Codebase m v a - -> Branch.Cache m - -> Branch m - -> RemoteRepo - -> SyncMode - -> ExceptT GitError m () -pushGitRootBranch codebase cache branch repo syncMode = do - -- Pull the remote repo into a staging directory - (remoteRoot, remotePath) <- viewRemoteBranch' cache (repo, Nothing, Path.empty) - ifM (pure (remoteRoot == Branch.empty) - ||^ lift (remoteRoot `Branch.before` branch)) - -- ours is newer 👍, meaning this is a fast-forward push, - -- so sync branch to staging area - (stageAndPush remotePath) - (throwError $ GitError.PushDestinationHasNewStuff repo) - where - stageAndPush remotePath = do - let repoString = Text.unpack $ printRepo repo - withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ - lift (Codebase.syncToDirectory codebase remotePath syncMode branch) - updateCausalHead (branchHeadDir remotePath) (Branch._history branch) - -- push staging area to remote - withStatus ("Uploading to " ++ repoString ++ " ...") $ - unlessM - (push remotePath repo - `withIOError` (throwError . GitError.PushException repo . show)) - (throwError $ GitError.PushNoOp repo) - -- Commit our changes - push :: CodebasePath -> RemoteRepo -> IO Bool -- withIOError needs IO - push remotePath (GitRepo url gitbranch) = do - -- has anything changed? - status <- gitTextIn remotePath ["status", "--short"] - if Text.null status then - pure False - else do - gitIn remotePath ["add", "--all", "."] - gitIn remotePath - ["commit", "-q", "-m", "Sync branch " <> Text.pack (show $ headHash branch)] - -- Push our changes to the repo - case gitbranch of - Nothing -> gitIn remotePath ["push", "--quiet", url] - Just gitbranch -> error $ - "Pushing to a specific branch isn't fully implemented or tested yet.\n" - ++ "InputPatterns.parseUri was expected to have prevented you " - ++ "from supplying the git treeish `" ++ Text.unpack gitbranch ++ "`!" - -- gitIn remotePath ["push", "--quiet", url, gitbranch] - pure True diff --git a/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs b/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs index 4a797577eb..c5480f013a 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs @@ -27,7 +27,7 @@ import Unison.Codebase ( Codebase ) import qualified Unison.Codebase as Codebase import Unison.Codebase.Branch ( Branch ) import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Editor.Git as Git +import qualified Unison.Codebase.FileCodebase.Git as Git import Unison.Parser ( Ann ) import qualified Unison.Parser as Parser import qualified Unison.Parsers as Parsers diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase/Git.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase/Git.hs new file mode 100644 index 0000000000..3ee4433fe2 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase/Git.hs @@ -0,0 +1,136 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.FileCodebase.Git + ( importRemoteBranch + , pushGitRootBranch + , viewRemoteBranch + ) where + +import Unison.Prelude +import Unison.Codebase.Editor.Git + +import Control.Monad.Except ( throwError + , ExceptT + ) +import Control.Monad.Extra ((||^)) +import qualified Data.Text as Text +import qualified Unison.Codebase.GitError as GitError +import Unison.Codebase.GitError (GitError) +import qualified Unison.Codebase as Codebase +import Unison.Codebase (Codebase, CodebasePath) +import Unison.Codebase.Editor.RemoteRepo ( RemoteRepo(GitRepo) + , RemoteNamespace + , printRepo + ) +import Unison.Codebase.FileCodebase as FC +import Unison.Codebase.Branch ( Branch + , headHash + ) +import qualified Unison.Codebase.Path as Path +import Unison.Codebase.SyncMode ( SyncMode ) +import Unison.Util.Timing (time) +import qualified Unison.Codebase.Branch as Branch +import Unison.Codebase.FileCodebase.Common (updateCausalHead, branchHeadDir) + +-- | Sync elements as needed from a remote codebase into the local one. +-- If `sbh` is supplied, we try to load the specified branch hash; +-- otherwise we try to load the root branch. +importRemoteBranch + :: forall m v a + . MonadIO m + => Codebase m v a + -> Branch.Cache m + -> RemoteNamespace + -> SyncMode + -> ExceptT GitError m (Branch m) +importRemoteBranch codebase cache ns mode = do + (branch, cacheDir) <- viewRemoteBranch' cache ns + withStatus "Importing downloaded files into local codebase..." $ + time "SyncFromDirectory" $ + lift $ Codebase.syncFromDirectory codebase cacheDir mode branch + pure branch + +-- | Pull a git branch and view it from the cache, without syncing into the +-- local codebase. +viewRemoteBranch :: forall m. MonadIO m + => Branch.Cache m -> RemoteNamespace -> ExceptT GitError m (Branch m) +viewRemoteBranch cache = fmap fst . viewRemoteBranch' cache + +viewRemoteBranch' :: forall m. MonadIO m + => Branch.Cache m -> RemoteNamespace -> ExceptT GitError m (Branch m, CodebasePath) +viewRemoteBranch' cache (repo, sbh, path) = do + -- set up the cache dir + remotePath <- time "Git fetch" $ pullBranch repo + -- try to load the requested branch from it + branch <- time "Git fetch (sbh)" $ case sbh of + -- load the root branch + Nothing -> lift (FC.getRootBranch cache remotePath) >>= \case + Left Codebase.NoRootBranch -> pure Branch.empty + Left (Codebase.CouldntLoadRootBranch h) -> + throwError $ GitError.CouldntLoadRootBranch repo h + Left (Codebase.CouldntParseRootBranch s) -> + throwError $ GitError.CouldntParseRootBranch repo s + Right b -> pure b + -- load from a specific `ShortBranchHash` + Just sbh -> do + branchCompletions <- lift $ FC.branchHashesByPrefix remotePath sbh + case toList branchCompletions of + [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + [h] -> (lift $ FC.branchFromFiles cache remotePath h) >>= \case + Just b -> pure b + Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions + pure (Branch.getAt' path branch, remotePath) + +-- Given a branch that is "after" the existing root of a given git repo, +-- stage and push the branch (as the new root) + dependencies to the repo. +pushGitRootBranch + :: MonadIO m + => Codebase m v a + -> Branch.Cache m + -> Branch m + -> RemoteRepo + -> SyncMode + -> ExceptT GitError m () +pushGitRootBranch codebase cache branch repo syncMode = do + -- Pull the remote repo into a staging directory + (remoteRoot, remotePath) <- viewRemoteBranch' cache (repo, Nothing, Path.empty) + ifM (pure (remoteRoot == Branch.empty) + ||^ lift (remoteRoot `Branch.before` branch)) + -- ours is newer 👍, meaning this is a fast-forward push, + -- so sync branch to staging area + (stageAndPush remotePath) + (throwError $ GitError.PushDestinationHasNewStuff repo) + where + stageAndPush remotePath = do + let repoString = Text.unpack $ printRepo repo + withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ + lift (Codebase.syncToDirectory codebase remotePath syncMode branch) + updateCausalHead (branchHeadDir remotePath) (Branch._history branch) + -- push staging area to remote + withStatus ("Uploading to " ++ repoString ++ " ...") $ + unlessM + (push remotePath repo + `withIOError` (throwError . GitError.PushException repo . show)) + (throwError $ GitError.PushNoOp repo) + -- Commit our changes + push :: CodebasePath -> RemoteRepo -> IO Bool -- withIOError needs IO + push remotePath (GitRepo url gitbranch) = do + -- has anything changed? + status <- gitTextIn remotePath ["status", "--short"] + if Text.null status then + pure False + else do + gitIn remotePath ["add", "--all", "."] + gitIn remotePath + ["commit", "-q", "-m", "Sync branch " <> Text.pack (show $ headHash branch)] + -- Push our changes to the repo + case gitbranch of + Nothing -> gitIn remotePath ["push", "--quiet", url] + Just gitbranch -> error $ + "Pushing to a specific branch isn't fully implemented or tested yet.\n" + ++ "InputPatterns.parseUri was expected to have prevented you " + ++ "from supplying the git treeish `" ++ Text.unpack gitbranch ++ "`!" + -- gitIn remotePath ["push", "--quiet", url, gitbranch] + pure True diff --git a/parser-typechecker/src/Unison/Codebase/GitError.hs b/parser-typechecker/src/Unison/Codebase/GitError.hs index 082a13b188..9798f819fb 100644 --- a/parser-typechecker/src/Unison/Codebase/GitError.hs +++ b/parser-typechecker/src/Unison/Codebase/GitError.hs @@ -19,5 +19,6 @@ data GitError = NoGit | RemoteNamespaceHashAmbiguous RemoteRepo ShortBranchHash (Set Branch.Hash) | CouldntLoadRootBranch RemoteRepo Branch.Hash | CouldntParseRootBranch RemoteRepo String + | CouldntOpenCodebase RemoteRepo CodebasePath | SomeOtherError String deriving Show diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index ac8132df88..82ee20c7d4 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -35,7 +35,7 @@ import qualified Data.Text.IO as TextIO import Data.Word (Word64) import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as Sqlite -import System.Directory (canonicalizePath) +import UnliftIO.Directory (canonicalizePath) import qualified System.Exit as SysExit import System.FilePath (()) import U.Codebase.HashTags (CausalHash (unCausalHash)) @@ -77,7 +77,7 @@ import Unison.Type (Type) import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF import qualified Unison.Util.Pretty as P -import UnliftIO (MonadIO, catchIO) +import UnliftIO (MonadIO, catchIO, liftIO) import UnliftIO.Directory (createDirectoryIfMissing, getHomeDirectory) import qualified UnliftIO.Environment as SysEnv import UnliftIO.STM @@ -92,14 +92,14 @@ codebasePath :: FilePath codebasePath = ".unison" "v2" "unison.sqlite3" -- get the codebase in dir, or in the home directory if not provided. -getCodebaseOrExit :: Maybe FilePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) +getCodebaseOrExit :: MonadIO m => Maybe FilePath -> m (m (), Codebase1.Codebase m Symbol Ann) getCodebaseOrExit mdir = do dir <- getCodebaseDir mdir progName <- SysEnv.getProgName prettyDir <- P.string <$> canonicalizePath dir let errMsg = getNoCodebaseErrorMsg ((P.text . Text.pack) progName) prettyDir mdir sqliteCodebase dir >>= \case - Left _missingSchema -> do + Left _missingSchema -> liftIO do PT.putPrettyLn' errMsg SysExit.exitFailure Right c -> pure c @@ -122,33 +122,33 @@ getNoCodebaseErrorMsg executable prettyDir mdir = secondLine ] -initCodebaseAndExit :: Maybe FilePath -> IO () +initCodebaseAndExit :: MonadIO m => Maybe FilePath -> m () initCodebaseAndExit mdir = do dir <- getCodebaseDir mdir (closeCodebase, _codebase) <- initCodebase dir closeCodebase - SysExit.exitSuccess + liftIO SysExit.exitSuccess -- initializes a new codebase here (i.e. `ucm -codebase dir init`) -initCodebase :: FilePath -> IO (IO (), Codebase1.Codebase IO Symbol Ann) +initCodebase :: MonadIO m => FilePath -> m (m (), Codebase1.Codebase m Symbol Ann) initCodebase path = do Monad.when debug $ traceM $ "initCodebase " ++ path prettyDir <- P.string <$> canonicalizePath path - Monad.whenM (codebaseExists path) do + liftIO $ Monad.whenM (codebaseExists path) do PT.putPrettyLn' . P.wrap $ "It looks like " <> prettyDir <> " already exists." SysExit.exitFailure - PT.putPrettyLn' + liftIO $ PT.putPrettyLn' . P.wrap $ "Initializing a new codebase in: " <> prettyDir -- run sql create scripts createDirectoryIfMissing True (path FilePath.takeDirectory codebasePath) - Control.Exception.bracket + liftIO $ Control.Exception.bracket (unsafeGetConnection path) Sqlite.close (runReaderT Q.createSchema) @@ -159,12 +159,12 @@ initCodebase path = do Codebase1.initializeCodebase theCodebase pure (closeCodebase, theCodebase) -getCodebaseDir :: Maybe FilePath -> IO FilePath +getCodebaseDir :: MonadIO m => Maybe FilePath -> m FilePath getCodebaseDir = maybe getHomeDirectory pure -- checks if a db exists at `path` with the minimum schema -codebaseExists :: CodebasePath -> IO Bool -codebaseExists root = do +codebaseExists :: MonadIO m => CodebasePath -> m Bool +codebaseExists root = liftIO do Monad.when debug $ traceM $ "codebaseExists " ++ root Control.Exception.catch @Sqlite.SQLError (sqliteCodebase root >>= \case @@ -212,14 +212,14 @@ type TermBufferEntry = BufferEntry (Term Symbol Ann, Type Symbol Ann) type DeclBufferEntry = BufferEntry (Decl Symbol Ann) -unsafeGetConnection :: CodebasePath -> IO Sqlite.Connection +unsafeGetConnection :: MonadIO m => CodebasePath -> m Sqlite.Connection unsafeGetConnection root = do Monad.when debug $ traceM $ "unsafeGetconnection " ++ root ++ " -> " ++ (root codebasePath) - conn <- Sqlite.open $ root codebasePath + conn <- liftIO . Sqlite.open $ root codebasePath runReaderT Q.setFlags conn pure conn -sqliteCodebase :: CodebasePath -> IO (Either [(Q.SchemaType, Q.SchemaName)] (IO (), Codebase1.Codebase IO Symbol Ann)) +sqliteCodebase :: MonadIO m => CodebasePath -> m (Either [(Q.SchemaType, Q.SchemaName)] (m (), Codebase1.Codebase m Symbol Ann)) sqliteCodebase root = do Monad.when debug $ traceM $ "sqliteCodebase " ++ root conn <- unsafeGetConnection root @@ -227,7 +227,7 @@ sqliteCodebase root = do [] -> do termBuffer :: TVar (Map Hash TermBufferEntry) <- newTVarIO Map.empty declBuffer :: TVar (Map Hash DeclBufferEntry) <- newTVarIO Map.empty - let getTerm :: Reference.Id -> IO (Maybe (Term Symbol Ann)) + let getTerm :: MonadIO m => Reference.Id -> m (Maybe (Term Symbol Ann)) getTerm (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = runDB' conn do term2 <- Ops.loadTermByReference (C.Reference.Id h2 i) @@ -251,20 +251,20 @@ sqliteCodebase root = do getDeclTypeById :: EDB m => C.Reference.Id -> m CT.ConstructorType getDeclTypeById = fmap Cv.decltype2to1 . Ops.getDeclTypeByReference - getTypeOfTermImpl :: Reference.Id -> IO (Maybe (Type Symbol Ann)) + getTypeOfTermImpl :: MonadIO m => Reference.Id -> m (Maybe (Type Symbol Ann)) getTypeOfTermImpl id | debug && trace ("getTypeOfTermImpl " ++ show id) False = undefined getTypeOfTermImpl (Reference.Id (Cv.hash1to2 -> h2) i _n) = runDB' conn do type2 <- Ops.loadTypeOfTermByTermReference (C.Reference.Id h2 i) Cv.ttype2to1 getCycleLen type2 - getTypeDeclaration :: Reference.Id -> IO (Maybe (Decl Symbol Ann)) + getTypeDeclaration :: MonadIO m => Reference.Id -> m (Maybe (Decl Symbol Ann)) getTypeDeclaration (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = runDB' conn do decl2 <- Ops.loadDeclByReference (C.Reference.Id h2 i) Cv.decl2to1 h1 getCycleLen decl2 - putTerm :: Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> IO () + putTerm :: MonadIO m => Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> m () putTerm id tm tp | debug && trace (show "SqliteCodebase.putTerm " ++ show id ++ " " ++ show tm ++ " " ++ show tp) False = undefined putTerm (Reference.Id h@(Cv.hash1to2 -> h2) i n') tm tp = runDB conn $ @@ -378,7 +378,7 @@ sqliteCodebase root = do (\h -> tryFlushTermBuffer h >> tryFlushDeclBuffer h) h - putTypeDeclaration :: Reference.Id -> Decl Symbol Ann -> IO () + putTypeDeclaration :: MonadIO m => Reference.Id -> Decl Symbol Ann -> m () putTypeDeclaration (Reference.Id h@(Cv.hash1to2 -> h2) i n') decl = runDB conn $ unlessM @@ -399,7 +399,7 @@ sqliteCodebase root = do tryFlushDeclBuffer h ) - getRootBranch :: IO (Either Codebase1.GetRootBranchError (Branch IO)) + getRootBranch :: MonadIO m => m (Either Codebase1.GetRootBranchError (Branch m)) getRootBranch = fmap (Either.mapLeft err) . runExceptT @@ -418,7 +418,7 @@ sqliteCodebase root = do Codebase1.CouldntLoadRootBranch $ Cv.causalHash2to1 ch e -> error $ show e - putRootBranch :: Branch IO -> IO () + putRootBranch :: MonadIO m => Branch m -> m () putRootBranch branch1 = runDB conn . void @@ -426,8 +426,8 @@ sqliteCodebase root = do . Cv.causalbranch1to2 $ Branch.transform (lift . lift) branch1 - rootBranchUpdates :: IO (IO (), IO (Set Branch.Hash)) - rootBranchUpdates = pure (cleanup, newRootsDiscovered) + rootBranchUpdates :: MonadIO m => m (m (), m (Set Branch.Hash)) + rootBranchUpdates = pure (cleanup, liftIO newRootsDiscovered) where newRootsDiscovered = do Control.Concurrent.threadDelay maxBound -- hold off on returning @@ -436,7 +436,7 @@ sqliteCodebase root = do -- if this blows up on cromulent hashes, then switch from `hashToHashId` -- to one that returns Maybe. - getBranchForHash :: Branch.Hash -> IO (Maybe (Branch IO)) + getBranchForHash :: MonadIO m => Branch.Hash -> m (Maybe (Branch m)) getBranchForHash h = runDB conn do Ops.loadCausalBranchByCausalHash (Cv.branchHash1to2 h) >>= \case Just b -> @@ -444,25 +444,25 @@ sqliteCodebase root = do =<< Cv.causalbranch2to1 getCycleLen getDeclType b Nothing -> pure Nothing - dependentsImpl :: Reference -> IO (Set Reference.Id) + dependentsImpl :: MonadIO m => Reference -> m (Set Reference.Id) dependentsImpl r = runDB conn $ Set.traverse (Cv.referenceid2to1 getCycleLen) =<< Ops.dependents (Cv.reference1to2 r) - syncFromDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () + syncFromDirectory :: MonadIO m => Codebase1.CodebasePath -> SyncMode -> Branch m -> m () syncFromDirectory = error "todo" - syncToDirectory :: Codebase1.CodebasePath -> SyncMode -> Branch IO -> IO () + syncToDirectory :: MonadIO m => Codebase1.CodebasePath -> SyncMode -> Branch m -> m () syncToDirectory = error "todo" - watches :: UF.WatchKind -> IO [Reference.Id] + watches :: MonadIO m => UF.WatchKind -> m [Reference.Id] watches w = runDB conn $ Ops.listWatches (Cv.watchKind1to2 w) >>= traverse (Cv.referenceid2to1 getCycleLen) - -- getWatch :: UF.WatchKind -> Reference.Id -> IO (Maybe (Term Symbol Ann)) + getWatch :: MonadIO m => UF.WatchKind -> Reference.Id -> m (Maybe (Term Symbol Ann)) getWatch k r@(Reference.Id h _i _n) | elem k standardWatchKinds = runDB' conn $ Ops.loadWatch (Cv.watchKind1to2 k) (Cv.referenceid1to2 r) @@ -471,7 +471,7 @@ sqliteCodebase root = do standardWatchKinds = [UF.RegularWatch, UF.TestWatch] - putWatch :: UF.WatchKind -> Reference.Id -> Term Symbol Ann -> IO () + putWatch :: MonadIO m => UF.WatchKind -> Reference.Id -> Term Symbol Ann -> m () putWatch k r@(Reference.Id h _i _n) tm | elem k standardWatchKinds = runDB conn $ Ops.saveWatch @@ -480,8 +480,8 @@ sqliteCodebase root = do (Cv.term1to2 h tm) putWatch _unknownKind _ _ = pure () - getReflog :: IO [Reflog.Entry] - getReflog = + getReflog :: MonadIO m => m [Reflog.Entry] + getReflog = liftIO $ ( do contents <- TextIO.readFile (reflogPath root) let lines = Text.lines contents @@ -496,35 +496,35 @@ sqliteCodebase root = do "I couldn't understand this line in " ++ reflogPath root ++ "\n\n" ++ Text.unpack t - appendReflog :: Text -> Branch IO -> Branch IO -> IO () + appendReflog :: MonadIO m => Text -> Branch m -> Branch m -> m () appendReflog reason old new = let t = Reflog.toText $ Reflog.Entry (Branch.headHash old) (Branch.headHash new) reason - in TextIO.appendFile (reflogPath root) (t <> "\n") + in liftIO $ TextIO.appendFile (reflogPath root) (t <> "\n") reflogPath :: CodebasePath -> FilePath reflogPath root = root "reflog" - termsOfTypeImpl :: Reference -> IO (Set Referent.Id) + termsOfTypeImpl :: MonadIO m => Reference -> m (Set Referent.Id) termsOfTypeImpl r = runDB conn $ Ops.termsHavingType (Cv.reference1to2 r) >>= Set.traverse (Cv.referentid2to1 getCycleLen getDeclType) - termsMentioningTypeImpl :: Reference -> IO (Set Referent.Id) + termsMentioningTypeImpl :: MonadIO m => Reference -> m (Set Referent.Id) termsMentioningTypeImpl r = runDB conn $ Ops.termsMentioningType (Cv.reference1to2 r) >>= Set.traverse (Cv.referentid2to1 getCycleLen getDeclType) - hashLength :: IO Int + hashLength :: Applicative m => m Int hashLength = pure 10 - branchHashLength :: IO Int + branchHashLength :: Applicative m => m Int branchHashLength = pure 10 - defnReferencesByPrefix :: OT.ObjectType -> ShortHash -> IO (Set Reference.Id) + defnReferencesByPrefix :: MonadIO m => OT.ObjectType -> ShortHash -> m (Set Reference.Id) defnReferencesByPrefix _ (ShortHash.Builtin _) = pure mempty defnReferencesByPrefix ot (ShortHash.ShortHash prefix (fmap Cv.shortHashSuffix1to2 -> cycle) _cid) = Monoid.fromMaybe <$> runDB' conn do @@ -535,13 +535,13 @@ sqliteCodebase root = do Set.fromList <$> traverse (Cv.referenceid2to1 getCycleLen) (Set.toList refs) - termReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) + termReferencesByPrefix :: MonadIO m => ShortHash -> m (Set Reference.Id) termReferencesByPrefix = defnReferencesByPrefix OT.TermComponent - declReferencesByPrefix :: ShortHash -> IO (Set Reference.Id) + declReferencesByPrefix :: MonadIO m => ShortHash -> m (Set Reference.Id) declReferencesByPrefix = defnReferencesByPrefix OT.DeclComponent - referentsByPrefix :: ShortHash -> IO (Set Referent.Id) + referentsByPrefix :: MonadIO m => ShortHash -> m (Set Referent.Id) referentsByPrefix SH.Builtin {} = pure mempty referentsByPrefix (SH.ShortHash prefix (fmap Cv.shortHashSuffix1to2 -> cycle) cid) = runDB conn do termReferents <- @@ -555,7 +555,7 @@ sqliteCodebase root = do ] pure . Set.fromList $ termReferents <> declReferents - branchHashesByPrefix :: ShortBranchHash -> IO (Set Branch.Hash) + branchHashesByPrefix :: MonadIO m => ShortBranchHash -> m (Set Branch.Hash) branchHashesByPrefix sh = runDB conn do -- given that a Branch is shallow, it's really `CausalHash` that you'd -- refer to to specify a full namespace w/ history. @@ -570,12 +570,14 @@ sqliteCodebase root = do -- primarily with commit hashes. -- Arya leaning towards doing the same for Unison. - let finalizer = do - Sqlite.close conn + let + finalizer :: MonadIO m => m () + finalizer = do + liftIO $ Sqlite.close conn decls <- readTVarIO declBuffer terms <- readTVarIO termBuffer let printBuffer header b = - if b /= mempty + liftIO if b /= mempty then putStrLn header >> putStrLn "" >> print b else pure () printBuffer "Decls:" decls @@ -612,10 +614,10 @@ sqliteCodebase root = do ) missingSchema -> pure . Left $ missingSchema -runDB' :: Connection -> MaybeT (ReaderT Connection (ExceptT Ops.Error IO)) a -> IO (Maybe a) +runDB' :: MonadIO m => Connection -> MaybeT (ReaderT Connection (ExceptT Ops.Error m)) a -> m (Maybe a) runDB' conn = runDB conn . runMaybeT -runDB :: Connection -> ReaderT Connection (ExceptT Ops.Error IO) a -> IO a +runDB :: MonadIO m => Connection -> ReaderT Connection (ExceptT Ops.Error m) a -> m a runDB conn = (runExceptT >=> err) . flip runReaderT conn where err = \case Left err -> error $ show err; Right a -> pure a diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs new file mode 100644 index 0000000000..e0b46742f2 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs @@ -0,0 +1,138 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.SqliteCodebase.Git + ( importRemoteBranch + , pushGitRootBranch + , viewRemoteBranch + ) where + +import Unison.Prelude +import Unison.Codebase.Editor.Git + +import Control.Monad.Except ( throwError + , ExceptT + ) +import Control.Monad.Extra ((||^)) +import qualified Data.Text as Text +import qualified Data.Validation as Validation +import qualified Unison.Codebase.GitError as GitError +import Unison.Codebase.GitError (GitError) +import qualified Unison.Codebase as Codebase +import Unison.Codebase (Codebase, CodebasePath) +import Unison.Codebase.Editor.RemoteRepo ( RemoteRepo(GitRepo) + , RemoteNamespace + , printRepo + ) +import qualified Unison.Codebase.SqliteCodebase as FC +import Unison.Codebase.Branch ( Branch + , headHash + ) +import qualified Unison.Codebase.Path as Path +import Unison.Codebase.SyncMode ( SyncMode ) +import Unison.Util.Timing (time) +import qualified Unison.Codebase.Branch as Branch +import Unison.Codebase.FileCodebase.Common (updateCausalHead, branchHeadDir) + +-- | Sync elements as needed from a remote codebase into the local one. +-- If `sbh` is supplied, we try to load the specified branch hash; +-- otherwise we try to load the root branch. +importRemoteBranch + :: forall m v a + . MonadIO m + => Codebase m v a + -> RemoteNamespace + -> SyncMode + -> ExceptT GitError m (Branch m) +importRemoteBranch codebase ns mode = do + (branch, cacheDir) <- viewRemoteBranch' ns + withStatus "Importing downloaded files into local codebase..." $ + time "SyncFromDirectory" $ + lift $ Codebase.syncFromDirectory codebase cacheDir mode branch + pure branch + +-- | Pull a git branch and view it from the cache, without syncing into the +-- local codebase. +viewRemoteBranch :: forall m. MonadIO m + => RemoteNamespace -> ExceptT GitError m (Branch m) +viewRemoteBranch = fmap fst . viewRemoteBranch' + +viewRemoteBranch' :: forall m. MonadIO m + => RemoteNamespace -> ExceptT GitError m (Branch m, CodebasePath) +viewRemoteBranch' (repo, sbh, path) = do + -- set up the cache dir + remotePath <- time "Git fetch" $ pullBranch repo + (closeCodebase, codebase) <- lift (FC.sqliteCodebase remotePath) >>= + Validation.valueOr (\_missingSchema -> throwError $ GitError.CouldntOpenCodebase repo remotePath) . fmap pure + -- try to load the requested branch from it + branch <- time "Git fetch (sbh)" $ case sbh of + -- load the root branch + Nothing -> lift (Codebase.getRootBranch codebase) >>= \case + Left Codebase.NoRootBranch -> pure Branch.empty + Left (Codebase.CouldntLoadRootBranch h) -> + throwError $ GitError.CouldntLoadRootBranch repo h + Left (Codebase.CouldntParseRootBranch s) -> + throwError $ GitError.CouldntParseRootBranch repo s + Right b -> pure b + -- load from a specific `ShortBranchHash` + Just sbh -> do + branchCompletions <- lift $ Codebase.branchHashesByPrefix codebase sbh + case toList branchCompletions of + [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + [h] -> (lift $ Codebase.getBranchForHash codebase h) >>= \case + Just b -> pure b + Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions + lift closeCodebase + pure (Branch.getAt' path branch, remotePath) + +-- Given a branch that is "after" the existing root of a given git repo, +-- stage and push the branch (as the new root) + dependencies to the repo. +pushGitRootBranch + :: MonadIO m + => Codebase m v a + -> Branch m + -> RemoteRepo + -> SyncMode + -> ExceptT GitError m () +pushGitRootBranch codebase branch repo syncMode = do + -- Pull the remote repo into a staging directory + (remoteRoot, remotePath) <- viewRemoteBranch' (repo, Nothing, Path.empty) + ifM (pure (remoteRoot == Branch.empty) + ||^ lift (remoteRoot `Branch.before` branch)) + -- ours is newer 👍, meaning this is a fast-forward push, + -- so sync branch to staging area + (stageAndPush remotePath) + (throwError $ GitError.PushDestinationHasNewStuff repo) + where + stageAndPush remotePath = do + let repoString = Text.unpack $ printRepo repo + withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ + lift (Codebase.syncToDirectory codebase remotePath syncMode branch) + updateCausalHead (branchHeadDir remotePath) (Branch._history branch) + -- push staging area to remote + withStatus ("Uploading to " ++ repoString ++ " ...") $ + unlessM + (push remotePath repo + `withIOError` (throwError . GitError.PushException repo . show)) + (throwError $ GitError.PushNoOp repo) + -- Commit our changes + push :: CodebasePath -> RemoteRepo -> IO Bool -- withIOError needs IO + push remotePath (GitRepo url gitbranch) = do + -- has anything changed? + status <- gitTextIn remotePath ["status", "--short"] + if Text.null status then + pure False + else do + gitIn remotePath ["add", "--all", "."] + gitIn remotePath + ["commit", "-q", "-m", "Sync branch " <> Text.pack (show $ headHash branch)] + -- Push our changes to the repo + case gitbranch of + Nothing -> gitIn remotePath ["push", "--quiet", url] + Just gitbranch -> error $ + "Pushing to a specific branch isn't fully implemented or tested yet.\n" + ++ "InputPatterns.parseUri was expected to have prevented you " + ++ "from supplying the git treeish `" ++ Text.unpack gitbranch ++ "`!" + -- gitIn remotePath ["push", "--quiet", url, gitbranch] + pure True diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs new file mode 100644 index 0000000000..1df25cc78a --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs @@ -0,0 +1,142 @@ +module Unison.Codebase.SqliteCodebase.SyncEphemeral where + +-- syncToDirectory :: forall m v a +-- . MonadIO m +-- => Var v +-- => S.Format v +-- -> S.Format a +-- -> CodebasePath +-- -> CodebasePath +-- -> SyncMode +-- -> Branch m +-- -> m () +-- syncToDirectory fmtV fmtA = do +-- sync <- Sync22.sync22 +-- where +-- -- Use State and Lens to do some specified thing at most once, to create a file. +-- ifNeedsSyncing :: forall m s h. (MonadIO m, MonadState s m, Ord h) +-- => h +-- -> CodebasePath +-- -> (CodebasePath -> h -> FilePath) -- done if this filepath exists +-- -> SimpleLens s (Set h) -- lens to track if `h` is already done +-- -> (h -> m ()) -- do! +-- -> m () -- don't +-- -> m () +-- ifNeedsSyncing h destPath getFilename l doSync dontSync = +-- ifM (use (l . to (Set.member h))) dontSync $ do +-- l %= Set.insert h +-- if mode == SyncMode.Complete then doSync h +-- else ifM (doesFileExist (getFilename destPath h)) dontSync (doSync h) +-- processBranches :: forall m +-- . MonadIO m +-- => MonadState SyncedEntities m +-- => MonadWriter (BD.Dependencies, Set Error) m +-- => [(Branch.Hash, Maybe (m (Branch m)))] +-- -> m () +-- processBranches [] = pure () +-- -- for each branch, +-- processBranches ((h, mmb) : rest) = +-- let tellError = Writer.tell . (mempty,) . Set.singleton +-- tellDependencies = Writer.tell . (,mempty) in +-- -- if hash exists at the destination, skip it, mark it done +-- ifNeedsSyncing h destPath branchPath syncedBranches +-- (\h -> +-- -- else if hash exists at the source, enqueue its dependencies, copy it, mark it done +-- ifM (doesFileExist (branchPath srcPath h)) +-- (do +-- (branches, deps) <- BD.fromRawCausal <$> +-- (deserializeRawBranchDependencies tellError srcPath h) +-- copyFileWithParents (branchPath srcPath h) (branchPath destPath h) +-- tellDependencies deps +-- processBranches (branches ++ rest)) +-- -- else if it's in memory, enqueue its dependencies, write it, mark it done +-- case mmb of +-- Just mb -> do +-- b <- mb +-- let (branches, deps) = BD.fromBranch b +-- let causalRaw = Branch.toCausalRaw b +-- serializeRawBranch destPath h causalRaw +-- tellDependencies deps +-- processBranches (branches ++ rest) +-- -- else -- error? +-- Nothing -> do +-- tellError (MissingBranch h) +-- processBranches rest +-- ) +-- (processBranches rest) + +-- -- syncToDirectory' (S.get fmtV) (S.get fmtA) + +-- -- data Error = Error () +-- -- = MissingBranch Branch.Hash +-- -- | MissingPatch Branch.EditHash +-- -- | MissingTerm Reference.Id +-- -- | MissingTypeOfTerm Reference.Id +-- -- | MissingDecl Reference.Id +-- -- | InvalidBranch Branch.Hash +-- -- | InvalidTerm Reference.Id +-- -- | InvalidTypeOfTerm Reference.Id +-- -- | InvalidDecl Reference.Id +-- -- deriving (Eq, Ord, Show) + +-- syncToDirectory' :: forall m v a +-- . MonadIO m +-- => Var v +-- => S.Get v +-- -> S.Get a +-- -> CodebasePath +-- -> CodebasePath +-- -> SyncMode +-- -> Branch m +-- -> m () +-- syncToDirectory' getV getA srcPath destPath mode newRoot = +-- let warnMissingEntities = False in +-- flip evalStateT mempty $ do -- MonadState s m +-- (deps, errors) <- time "Sync Branches" $ execWriterT $ +-- processBranches [(Branch.headHash newRoot +-- ,Just . pure . Branch.transform (lift . lift) $ newRoot)] +-- errors' <- time "Sync Definitions" $ +-- execWriterT $ processDependencies (BD.to' deps) +-- time "Write indices" $ do +-- lift . writeDependentsIndex =<< use dependentsIndex +-- lift . writeTypeIndex =<< use typeIndex +-- lift . writeTypeMentionsIndex =<< use typeMentionsIndex +-- when (warnMissingEntities) $ for_ (errors <> errors') traceShowM + +-- processBranches :: forall m +-- . MonadIO m +-- => MonadState SyncedEntities m +-- => MonadWriter (BD.Dependencies, Set Error) m +-- => [(Branch.Hash, Maybe (m (Branch m)))] +-- -> m () +-- processBranches [] = pure () +-- -- for each branch, +-- processBranches ((h, mmb) : rest) = +-- let tellError = Writer.tell . (mempty,) . Set.singleton +-- tellDependencies = Writer.tell . (,mempty) in +-- -- if hash exists at the destination, skip it, mark it done +-- ifNeedsSyncing h destPath branchPath syncedBranches +-- (\h -> +-- -- else if hash exists at the source, enqueue its dependencies, copy it, mark it done +-- ifM (doesFileExist (branchPath srcPath h)) +-- (do +-- (branches, deps) <- BD.fromRawCausal <$> +-- (deserializeRawBranchDependencies tellError srcPath h) +-- copyFileWithParents (branchPath srcPath h) (branchPath destPath h) +-- tellDependencies deps +-- processBranches (branches ++ rest)) +-- -- else if it's in memory, enqueue its dependencies, write it, mark it done +-- case mmb of +-- Just mb -> do +-- b <- mb +-- let (branches, deps) = BD.fromBranch b +-- let causalRaw = Branch.toCausalRaw b +-- serializeRawBranch destPath h causalRaw +-- tellDependencies deps +-- processBranches (branches ++ rest) +-- -- else -- error? +-- Nothing -> do +-- tellError (MissingBranch h) +-- processBranches rest +-- ) +-- (processBranches rest) \ No newline at end of file diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 835c7d2ada..ddfdabdef1 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -84,6 +84,7 @@ library Unison.Codebase.Editor.VersionParser Unison.Codebase.FileCodebase Unison.Codebase.FileCodebase.Common + Unison.Codebase.FileCodebase.Git Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex Unison.Codebase.GitError Unison.Codebase.Metadata @@ -99,6 +100,8 @@ library Unison.Codebase.ShortBranchHash Unison.Codebase.SqliteCodebase Unison.Codebase.SqliteCodebase.Conversions + Unison.Codebase.SqliteCodebase.Git + Unison.Codebase.SqliteCodebase.SyncEphemeral Unison.Codebase.SyncMode Unison.Codebase.TermEdit Unison.Codebase.TranscriptParser @@ -247,6 +250,7 @@ library unison-core1, unliftio, util, + validation, vector, unicode-show, x509, diff --git a/unison-core/unison-core1.cabal b/unison-core/unison-core1.cabal index 43401a3d71..0253e39a9c 100644 --- a/unison-core/unison-core1.cabal +++ b/unison-core/unison-core1.cabal @@ -106,7 +106,8 @@ library text, transformers, util, - vector + vector, + unison-core ghc-options: -Wall -O0 -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures From c77d7d6457b3ea1d85c7579be98ead30f1de899a Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 9 Mar 2021 11:50:13 -0500 Subject: [PATCH 119/225] stack.yaml --- stack.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack.yaml b/stack.yaml index 66afb2c5c9..0fed186e33 100644 --- a/stack.yaml +++ b/stack.yaml @@ -13,6 +13,7 @@ packages: - codebase1/codebase - codebase2/codebase - codebase2/codebase-sqlite +- codebase2/codebase-sync - codebase2/core - codebase2/language - codebase2/runtime @@ -47,6 +48,7 @@ extra-deps: - binary-0.8.8.0 - parsec-3.1.14.0 - Cabal-3.2.1.0 +- monad-validate-1.2.0.0@sha256:9850f408431098b28806dd464b6825a88a0b56c84f380d7fe0454c1df9d6f881,3505 ghc-options: # All packages From ca16a803bec17c253c3c8f12a4b678e40234bae2 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 10 Mar 2021 13:57:47 -0500 Subject: [PATCH 120/225] render CouldntOpenCodebase git error --- parser-typechecker/src/Unison/CommandLine/OutputMessages.hs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs b/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs index 1315a00bd2..fc43f32ab1 100644 --- a/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs +++ b/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs @@ -662,6 +662,9 @@ notifyUser dir o = case o of TodoOutput names todo -> pure (todoOutput names todo) GitError input e -> pure $ case e of + CouldntOpenCodebase repo localPath -> P.wrap $ "I couldn't open the repository at" + <> prettyRepoBranch repo <> "in the cache directory at" + <> P.backticked' (P.string localPath) "." CouldntParseRootBranch repo s -> P.wrap $ "I couldn't parse the string" <> P.red (P.string s) <> "into a namespace hash, when opening the repository at" <> P.group (prettyRepoBranch repo <> ".") From a9143347eb67bd08cf24c54b5a786c2c1572106b Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 10 Mar 2021 14:33:56 -0500 Subject: [PATCH 121/225] push crash on error debugging stuff behind a constant --- .../U/Codebase/Sqlite/Operations.hs | 13 ++++++++++--- .../codebase-sqlite/U/Codebase/Sqlite/Queries.hs | 14 +++++++++++--- questions.md | 4 ++++ 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index ee8d968a69..d63db7bc71 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -15,7 +15,8 @@ module U.Codebase.Sqlite.Operations where import Control.Lens (Lens') import qualified Control.Lens as Lens import Control.Monad (MonadPlus (mzero), join, when, (<=<)) -import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) +import Control.Monad.Except (ExceptT, MonadError, runExceptT) +import qualified Control.Monad.Except as Except import Control.Monad.State (MonadState, StateT, evalStateT) import Control.Monad.Trans (MonadTrans (lift)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) @@ -122,8 +123,12 @@ import qualified U.Util.Type as TypeUtil -- * Error handling -debug :: Bool +throwError :: Err m => Error -> m a +throwError = if crashOnError then error . show else Except.throwError + +debug, crashOnError :: Bool debug = False +crashOnError = False type Err m = (MonadError Error m, HasCallStack) @@ -988,11 +993,13 @@ saveRootBranch (C.Causal hc he parents me) = do lookupChild = lookup_ Lens._4 Lens._4 LocalBranchChildId startState = mempty @BranchSavingState saveBranchObject :: DB m => Db.BranchHashId -> BranchLocalIds -> S.Branch.Full.LocalBranch -> m Db.BranchObjectId + saveBranchObject id li lBranch | debug && trace ("saveBranchObject\n\tid = " ++ show id ++ "\n\tli = " ++ show li ++ "\n\tlBranch = " ++ show lBranch) False = undefined saveBranchObject (Db.unBranchHashId -> hashId) li lBranch = do let bytes = S.putBytes S.putBranchFormat $ S.BranchFormat.Full li lBranch oId <- Q.saveObject hashId OT.Namespace bytes pure $ Db.BranchObjectId oId - done :: EDB m => (a, BranchSavingWriter) -> m (BranchLocalIds, a) + done :: (EDB m, Show a) => (a, BranchSavingWriter) -> m (BranchLocalIds, a) + done (lBranch, written) | debug && trace ("saveRootBranch.done\n\tlBranch = " ++ show lBranch ++ "\n\twritten = " ++ show written) False = undefined done (lBranch, (textValues, defnHashes, patchObjectIds, branchCausalIds)) = do textIds <- liftQ $ traverse Q.saveText textValues defnObjectIds <- traverse primaryHashToExistingObjectId defnHashes diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index bdf5ea6317..8dd2e5045c 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -17,8 +17,9 @@ {-# LANGUAGE TypeOperators #-} module U.Codebase.Sqlite.Queries where -import Control.Monad.Except (ExceptT, MonadError, runExceptT, throwError) import Control.Monad (filterM, when) +import Control.Monad.Except (ExceptT, MonadError, runExceptT) +import qualified Control.Monad.Except as Except import Control.Monad.IO.Class (MonadIO) import Control.Monad.Reader (MonadReader (ask)) import Control.Monad.Trans (MonadIO (liftIO)) @@ -38,6 +39,7 @@ import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple.FromField (FromField) import Database.SQLite.Simple.ToField (ToField (..)) import Debug.Trace (trace, traceM) +import GHC.Stack (HasCallStack) import U.Codebase.HashTags (BranchHash, CausalHash, unBranchHash, unCausalHash) import U.Codebase.Reference (Reference') import U.Codebase.Sqlite.DbId (BranchHashId (..), BranchObjectId (..), CausalHashId (..), CausalOldHashId, Generation (..), HashId (..), ObjectId (..), TextId) @@ -55,7 +57,13 @@ import UnliftIO (MonadUnliftIO, throwIO, try, withRunInIO) type DB m = (MonadIO m, MonadReader Connection m) -type EDB m = (DB m, MonadError Integrity m) +type EDB m = (DB m, MonadError Integrity m, HasCallStack) + +crashOnError :: Bool +crashOnError = True + +throwError :: EDB m => Integrity -> m c +throwError = if crashOnError then error . show else Except.throwError data Integrity = UnknownHashId HashId @@ -78,7 +86,7 @@ noExcept a = Right a -> pure a Left e -> error $ "unexpected error: " ++ show e -orError :: MonadError e m => e -> Maybe b -> m b +orError :: MonadError Integrity m => Integrity -> Maybe b -> m b orError e = maybe (throwError e) pure type TypeHashReference = Reference' TextId HashId diff --git a/questions.md b/questions.md index 91beca0191..517078492b 100644 --- a/questions.md +++ b/questions.md @@ -1,5 +1,6 @@ next steps: +- [ ] add format tag to watch cache expressions? - [x] fix up `Operations.loadBranchByCausalHash`; currently it's getting a single namespace, but we need to somewhere get the causal history. - [x] load a causal, allowing a missing value (C.Branch.Spine) - [x] load a causal and require its value (C.Branch.Causal) @@ -15,6 +16,9 @@ next steps: - [ ] `SqliteCodebase.syncToDirectory` - [ ] Managing external edit events? - [ ] `SqliteCodebase.rootBranchUpdates` Is there some Sqlite function for detecting external changes? + + +what even are these: - [ ] Implement relational metadata - [ ] do the tag thing to make sure that causal hashes comes from a unique token string compared to other stuff in the codebase. (maybe `accumulate` should take a tag as its first argument, forcing us to audit all the call sites) From bc46711c6baf7cdbc5a9d2bc1ad3592956f927ae Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 11 Mar 2021 13:02:08 -0500 Subject: [PATCH 122/225] syncToDirectory --- .../U/Codebase/Sqlite/Operations.hs | 18 +- .../U/Codebase/Sqlite/Queries.hs | 6 +- .../U/Codebase/Sqlite/Sync22.hs | 6 +- codebase2/codebase-sqlite/sql/create.sql | 5 - codebase2/codebase-sync/U/Codebase/Sync.hs | 30 +- hie.yaml | 6 +- parser-typechecker/src/Unison/Codebase.hs | 2 + .../src/Unison/Codebase/FileCodebase.hs | 4 + .../Unison/Codebase/FileCodebase/Common.hs | 9 +- .../src/Unison/Codebase/SqliteCodebase.hs | 155 +++++++---- .../Codebase/SqliteCodebase/SyncEphemeral.hs | 256 ++++++++---------- .../unison-parser-typechecker.cabal | 1 + 12 files changed, 269 insertions(+), 229 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index d63db7bc71..e783a6e04f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -891,8 +891,15 @@ type BranchSavingConstraint m = (MonadState BranchSavingState m, MonadWriter Bra type BranchSavingMonad m = StateT BranchSavingState (WriterT BranchSavingWriter m) saveRootBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) -saveRootBranch (C.Causal hc he parents me) = do - when debug $ traceM $ "\nsaveRootBranch \n hc = " ++ show hc ++ ",\n he = " ++ show he ++ ",\n parents = " ++ show (Map.keys parents) +saveRootBranch c = do + when debug $ traceM "saveRootBranch" + (boId, chId) <- saveBranch c + Q.setNamespaceRoot chId + pure (boId, chId) + +saveBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) +saveBranch (C.Causal hc he parents me) = do + when debug $ traceM $ "\nsaveBranch \n hc = " ++ show hc ++ ",\n he = " ++ show he ++ ",\n parents = " ++ show (Map.keys parents) -- check if we can skip the whole thing, by checking if there are causal parents for hc chId <- liftQ (Q.saveCausalHash hc) parentCausalHashIds <- @@ -906,7 +913,7 @@ saveRootBranch (C.Causal hc he parents me) = do parentChId <- liftQ (Q.saveCausalHash causalHash) -- test if the parent has been saved previously: liftQ (Q.loadCausalParents parentChId) >>= \case - [] -> do c <- mcausal; snd <$> saveRootBranch c + [] -> do c <- mcausal; snd <$> saveBranch c _grandParents -> pure parentChId parentCausalHashIds -> pure parentCausalHashIds @@ -922,7 +929,6 @@ saveRootBranch (C.Causal hc he parents me) = do liftQ (Q.saveCausalParents chId parentCausalHashIds) pure boId - Q.setNamespaceRoot chId pure (boId, chId) where c2lBranch :: EDB m => C.Branch.Branch m -> m (BranchLocalIds, S.Branch.Full.LocalBranch) @@ -950,7 +956,7 @@ saveRootBranch (C.Causal hc he parents me) = do Nothing -> savePatch h =<< (lift . lift) mp lookupPatch patchOID saveChild :: EDB m => C.Branch.Causal m -> BranchSavingMonad m LocalBranchChildId - saveChild c = (lift . lift) (saveRootBranch c) >>= lookupChild + saveChild c = (lift . lift) (saveBranch c) >>= lookupChild lookupText :: ( MonadState s m, MonadWriter w m, @@ -999,7 +1005,7 @@ saveRootBranch (C.Causal hc he parents me) = do oId <- Q.saveObject hashId OT.Namespace bytes pure $ Db.BranchObjectId oId done :: (EDB m, Show a) => (a, BranchSavingWriter) -> m (BranchLocalIds, a) - done (lBranch, written) | debug && trace ("saveRootBranch.done\n\tlBranch = " ++ show lBranch ++ "\n\twritten = " ++ show written) False = undefined + done (lBranch, written) | debug && trace ("saveBranch.done\n\tlBranch = " ++ show lBranch ++ "\n\twritten = " ++ show written) False = undefined done (lBranch, (textValues, defnHashes, patchObjectIds, branchCausalIds)) = do textIds <- liftQ $ traverse Q.saveText textValues defnObjectIds <- traverse primaryHashToExistingObjectId defnHashes diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 8dd2e5045c..a8b811e138 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -57,12 +57,14 @@ import UnliftIO (MonadUnliftIO, throwIO, try, withRunInIO) type DB m = (MonadIO m, MonadReader Connection m) -type EDB m = (DB m, MonadError Integrity m, HasCallStack) +type EDB m = (DB m, Err m) + +type Err m = (MonadError Integrity m, HasCallStack) crashOnError :: Bool crashOnError = True -throwError :: EDB m => Integrity -> m c +throwError :: Err m => Integrity -> m c throwError = if crashOnError then error . show else Except.throwError data Integrity diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index ef041cdb76..e9537292fc 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -63,6 +63,7 @@ data DecodeError | ErrBranchBody Word8 | ErrPatchBody Word8 | ErrWatchResult + deriving (Show) type ErrString = String @@ -72,6 +73,7 @@ data Error | -- | hashes corresponding to a single object in source codebase -- correspond to multiple objects in destination codebase HashObjectCorrespondence ObjectId [HashId] [ObjectId] + deriving (Show) data Env = Env { srcDB :: Connection, @@ -93,9 +95,7 @@ sync22 = do hCache <- Cache.semispaceCache size oCache <- Cache.semispaceCache size gc <- runSrc $ Q.getNurseryGeneration - pure $ Sync roots (trySync tCache hCache oCache (succ gc)) - where - roots = runSrc $ fmap (\h -> [C h]) Q.loadNamespaceRoot + pure $ Sync (trySync tCache hCache oCache (succ gc)) trySync :: forall m. diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index d0e45dd60f..04bd5da104 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -62,11 +62,6 @@ CREATE TABLE causal ( CREATE INDEX causal_value_hash_id ON causal(value_hash_id); CREATE INDEX causal_gc_generation ON causal(gc_generation); --- valueHash : Hash = hash(value) --- db.saveValue(valueHash, value) --- causalHash : Hash = hash(new Causal(valueHash, parentCausalHashes)) --- db.saveCausal(selfHash = causalHash, valueHash, parentCausalHashes) - CREATE TABLE namespace_root ( -- a dummy pk because -- id INTEGER PRIMARY KEY NOT NULL, diff --git a/codebase2/codebase-sync/U/Codebase/Sync.hs b/codebase2/codebase-sync/U/Codebase/Sync.hs index 7cbf837832..7435250fb5 100644 --- a/codebase2/codebase-sync/U/Codebase/Sync.hs +++ b/codebase2/codebase-sync/U/Codebase/Sync.hs @@ -3,6 +3,7 @@ {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE RankNTypes #-} module U.Codebase.Sync where -- localSyncFile @@ -31,10 +32,10 @@ import Data.Foldable (traverse_) data TrySyncResult h = Missing [h] | Done | PreviouslyDone -data Sync m h = Sync - { roots :: m [h], - trySync :: h -> m (TrySyncResult h) - } +data Sync m h = Sync { trySync :: h -> m (TrySyncResult h) } + +transformSync :: (forall a. m a -> n a) -> Sync m h -> Sync n h +transformSync f (Sync t) = Sync (f . t) data Progress m h = Progress { need :: h -> m (), @@ -42,11 +43,16 @@ data Progress m h = Progress allDone :: m () } -sync :: forall m h. Monad m => Sync m h -> Progress m h -> m () -sync Sync{..} Progress{..} = do roots >>= go where - go :: [h] -> m () - go (h : hs) = trySync h >>= \case - Missing deps -> traverse_ need deps >> go (deps ++ h : hs) - Done -> done h >> go hs - PreviouslyDone -> go hs - go [] = allDone \ No newline at end of file +transformProgress :: (forall a. m a -> n a) -> Progress m h -> Progress n h +transformProgress f (Progress a b c) = Progress (f . a) (f . b) (f c) + +sync :: forall m h. Monad m => Sync m h -> Progress m h -> [h] -> m () +sync Sync {..} Progress {..} roots = go roots + where + go :: [h] -> m () + go (h : hs) = + trySync h >>= \case + Missing deps -> traverse_ need deps >> go (deps ++ h : hs) + Done -> done h >> go hs + PreviouslyDone -> go hs + go [] = allDone \ No newline at end of file diff --git a/hie.yaml b/hie.yaml index 737b0d21a5..e81696108e 100644 --- a/hie.yaml +++ b/hie.yaml @@ -1,8 +1,5 @@ cradle: stack: - - path: "codebase-convert-2to2/lib" - component: "unison-codebase-sync-2to2:lib" - - path: "codebase1/codebase/." component: "unison-codebase1:lib" @@ -12,6 +9,9 @@ cradle: - path: "codebase2/codebase-sqlite/." component: "unison-codebase-sqlite:lib" + - path: "codebase2/codebase-sync/." + component: "unison-codebase-sync:lib" + - path: "codebase2/core/." component: "unison-core:lib" diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs index 3f1398f10d..bb132921de 100644 --- a/parser-typechecker/src/Unison/Codebase.hs +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -59,6 +59,8 @@ data Codebase m v a = , putRootBranch :: Branch m -> m () , rootBranchUpdates :: m (m (), m (Set Branch.Hash)) , getBranchForHash :: Branch.Hash -> m (Maybe (Branch m)) + , putBranch :: Branch m -> m () + , branchExists :: Branch.Hash -> m Bool , dependentsImpl :: Reference -> m (Set Reference.Id) -- This copies all the dependencies of `b` from the specified diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs index 41e972af53..c7d13ebdd1 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs @@ -85,8 +85,10 @@ import Unison.Codebase.FileCodebase.Common , putDecl , putTerm , putRootBranch + , putBranch , putWatch --- + , hashExists , branchFromFiles , branchHashesByPrefix , termReferencesByPrefix @@ -186,6 +188,8 @@ codebase1' syncToDirectory branchCache fmtV@(S.Format getV putV) fmtA@(S.Format (putRootBranch path) (branchHeadUpdates path) (branchFromFiles branchCache path) + (putBranch path) + (hashExists path) dependents (flip (syncToDirectory fmtV fmtA) path) (syncToDirectory fmtV fmtA path) diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs index 426047a2e5..feb8486b15 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs @@ -43,6 +43,7 @@ module Unison.Codebase.FileCodebase.Common , deserializeEdits , serializeRawBranch , branchFromFiles + , putBranch , branchHashesByPrefix , termReferencesByPrefix , termReferentsByPrefix @@ -331,14 +332,18 @@ getRootBranch cache root = time "FileCodebase.Common.getRootBranch" $ Just (Branch.Hash -> h) -> branchFromFiles cache root h <&> maybeToEither (Codebase.CouldntLoadRootBranch h) --- |only syncs branches and edits -- no dependencies putRootBranch :: MonadIO m => CodebasePath -> Branch m -> m () putRootBranch root b = do + putBranch root b + updateCausalHead (branchHeadDir root) (Branch._history b) + +-- |only syncs branches and edits -- no dependencies +putBranch :: MonadIO m => CodebasePath -> Branch m -> m () +putBranch root b = Branch.sync (hashExists root) (serializeRawBranch root) (serializeEdits root) b - updateCausalHead (branchHeadDir root) (Branch._history b) hashExists :: MonadIO m => CodebasePath -> Branch.Hash -> m Bool hashExists root h = doesFileExist (branchPath root h) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 82ee20c7d4..f27d64b86e 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -1,10 +1,10 @@ -{-# LANGUAGE RecordWildCards #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE QuasiQuotes #-} +{-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE ViewPatterns #-} {-# LANGUAGE TypeApplications #-} +{-# LANGUAGE ViewPatterns #-} module Unison.Codebase.SqliteCodebase where @@ -12,6 +12,7 @@ module Unison.Codebase.SqliteCodebase where -- import qualified U.Codebase.Sqlite.Operations' as Ops +import qualified Control.Concurrent import qualified Control.Exception import Control.Monad (filterM, (>=>)) import Control.Monad.Except (ExceptT, runExceptT) @@ -24,6 +25,7 @@ import Data.Bifunctor (Bifunctor (first), second) import qualified Data.Either.Combinators as Either import Data.Foldable (Foldable (toList), traverse_) import Data.Functor (void) +import qualified Data.List as List import Data.Map (Map) import qualified Data.Map as Map import Data.Set (Set) @@ -35,9 +37,9 @@ import qualified Data.Text.IO as TextIO import Data.Word (Word64) import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as Sqlite -import UnliftIO.Directory (canonicalizePath) import qualified System.Exit as SysExit import System.FilePath (()) +import qualified System.FilePath as FilePath import U.Codebase.HashTags (CausalHash (unCausalHash)) import qualified U.Codebase.Reference as C.Reference import qualified U.Codebase.Sqlite.ObjectType as OT @@ -78,12 +80,9 @@ import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF import qualified Unison.Util.Pretty as P import UnliftIO (MonadIO, catchIO, liftIO) -import UnliftIO.Directory (createDirectoryIfMissing, getHomeDirectory) +import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, getHomeDirectory) import qualified UnliftIO.Environment as SysEnv import UnliftIO.STM -import qualified System.FilePath as FilePath -import qualified Control.Concurrent -import qualified Data.List as List debug :: Bool debug = False @@ -141,21 +140,24 @@ initCodebase path = do $ "It looks like " <> prettyDir <> " already exists." SysExit.exitFailure - liftIO $ PT.putPrettyLn' - . P.wrap - $ "Initializing a new codebase in: " - <> prettyDir + liftIO $ + PT.putPrettyLn' + . P.wrap + $ "Initializing a new codebase in: " + <> prettyDir -- run sql create scripts createDirectoryIfMissing True (path FilePath.takeDirectory codebasePath) - liftIO $ Control.Exception.bracket - (unsafeGetConnection path) - Sqlite.close - (runReaderT Q.createSchema) - - (closeCodebase, theCodebase) <- sqliteCodebase path >>= \case - Right x -> pure x - Left x -> error $ show x ++ " :) " + liftIO $ + Control.Exception.bracket + (unsafeGetConnection path) + Sqlite.close + (runReaderT Q.createSchema) + + (closeCodebase, theCodebase) <- + sqliteCodebase path >>= \case + Right x -> pure x + Left x -> error $ show x ++ " :) " Codebase1.initializeCodebase theCodebase pure (closeCodebase, theCodebase) @@ -167,9 +169,10 @@ codebaseExists :: MonadIO m => CodebasePath -> m Bool codebaseExists root = liftIO do Monad.when debug $ traceM $ "codebaseExists " ++ root Control.Exception.catch @Sqlite.SQLError - (sqliteCodebase root >>= \case - Left _ -> pure False - Right (close, _codebase) -> close >> pure True) + ( sqliteCodebase root >>= \case + Left _ -> pure False + Right (close, _codebase) -> close >> pure True + ) (const $ pure False) -- and <$> traverse doesDirectoryExist (minimalCodebaseStructure root) @@ -194,16 +197,27 @@ data BufferEntry a = BufferEntry deriving (Eq, Show) prettyBufferEntry :: Show a => Hash -> BufferEntry a -> String -prettyBufferEntry (h :: Hash) BufferEntry{..} = +prettyBufferEntry (h :: Hash) BufferEntry {..} = "BufferEntry " ++ show h ++ "\n" - ++ " { beComponentTargetSize = " ++ show beComponentTargetSize ++ "\n" + ++ " { beComponentTargetSize = " + ++ show beComponentTargetSize + ++ "\n" ++ " , beComponent = " - ++ if Map.size beComponent < 2 then show $ Map.toList beComponent else mkString (Map.toList beComponent) (Just "\n [ ") " , " (Just "]\n") - ++ " , beMissingDependencies =" - ++ if Set.size beMissingDependencies < 2 then show $ Set.toList beMissingDependencies else mkString (Set.toList beMissingDependencies) (Just "\n [ ") " , " (Just "]\n") - ++ " , beWaitingDependents =" - ++ if Set.size beWaitingDependents < 2 then show $ Set.toList beWaitingDependents else mkString (Set.toList beWaitingDependents) (Just "\n [ ") " , " (Just "]\n") - ++ " }" + ++ if Map.size beComponent < 2 + then show $ Map.toList beComponent + else + mkString (Map.toList beComponent) (Just "\n [ ") " , " (Just "]\n") + ++ " , beMissingDependencies =" + ++ if Set.size beMissingDependencies < 2 + then show $ Set.toList beMissingDependencies + else + mkString (Set.toList beMissingDependencies) (Just "\n [ ") " , " (Just "]\n") + ++ " , beWaitingDependents =" + ++ if Set.size beWaitingDependents < 2 + then show $ Set.toList beWaitingDependents + else + mkString (Set.toList beWaitingDependents) (Just "\n [ ") " , " (Just "]\n") + ++ " }" where mkString :: (Foldable f, Show a) => f a -> Maybe String -> String -> Maybe String -> String mkString as start middle end = fromMaybe "" start ++ List.intercalate middle (show <$> toList as) ++ fromMaybe "" end @@ -275,8 +289,9 @@ sqliteCodebase root = do let size' = Just n' -- if size was previously set, it's expected to match size'. case size of - Just n | n /= n' -> - error $ "targetSize for term " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size' + Just n + | n /= n' -> + error $ "targetSize for term " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size' _ -> pure () let comp' = Map.insert i (tm, tp) comp -- for the component element that's been passed in, add its dependencies to missing' @@ -319,7 +334,6 @@ sqliteCodebase root = do atomically $ modifyTVar tv (Map.delete h) Monad.when debug $ readTVarIO tv >>= \tv -> traceM $ "after delete: " ++ show tv - addBufferDependent :: (MonadIO m, Show a) => Hash -> TVar (Map Hash (BufferEntry a)) -> Hash -> m () addBufferDependent dependent tv dependency = withBuffer tv dependency \be -> do putBuffer tv dependency be {beWaitingDependents = Set.insert dependent $ beWaitingDependents be} @@ -352,9 +366,11 @@ sqliteCodebase root = do Monad.when debug $ traceM $ "tryFlushBuffer.notify waiting " ++ show waiting traverse_ tryWaiting waiting else -- update + putBuffer buf h $ BufferEntry (Just size) comp (Set.fromList missing') waiting - Nothing -> -- it's never even been added, so there's nothing to do. + Nothing -> + -- it's never even been added, so there's nothing to do. pure () tryFlushTermBuffer :: EDB m => Hash -> m () @@ -386,8 +402,9 @@ sqliteCodebase root = do ( withBuffer declBuffer h \(BufferEntry size comp missing waiting) -> do let size' = Just n' case size of - Just n | n /= n' -> - error $ "targetSize for type " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size' + Just n + | n /= n' -> + error $ "targetSize for type " ++ show h ++ " was " ++ show size ++ ", but now " ++ show size' _ -> pure () let comp' = Map.insert i decl comp moreMissing <- @@ -444,6 +461,21 @@ sqliteCodebase root = do =<< Cv.causalbranch2to1 getCycleLen getDeclType b Nothing -> pure Nothing + putBranch :: MonadIO m => Branch m -> m () + putBranch branch1 = + runDB conn + . void + . Ops.saveBranch + . Cv.causalbranch1to2 + $ Branch.transform (lift . lift) branch1 + + isCausalHash :: MonadIO m => Branch.Hash -> m Bool + isCausalHash (Causal.RawHash h) = + runDB conn $ + Q.loadHashIdByHash (Cv.hash1to2 h) >>= \case + Nothing -> pure False + Just hId -> Q.isCausalHash hId + dependentsImpl :: MonadIO m => Reference -> m (Set Reference.Id) dependentsImpl r = runDB conn $ @@ -463,32 +495,35 @@ sqliteCodebase root = do >>= traverse (Cv.referenceid2to1 getCycleLen) getWatch :: MonadIO m => UF.WatchKind -> Reference.Id -> m (Maybe (Term Symbol Ann)) - getWatch k r@(Reference.Id h _i _n) | elem k standardWatchKinds = - runDB' conn $ - Ops.loadWatch (Cv.watchKind1to2 k) (Cv.referenceid1to2 r) - >>= Cv.term2to1 h getCycleLen getDeclType + getWatch k r@(Reference.Id h _i _n) + | elem k standardWatchKinds = + runDB' conn $ + Ops.loadWatch (Cv.watchKind1to2 k) (Cv.referenceid1to2 r) + >>= Cv.term2to1 h getCycleLen getDeclType getWatch _unknownKind _ = pure Nothing standardWatchKinds = [UF.RegularWatch, UF.TestWatch] putWatch :: MonadIO m => UF.WatchKind -> Reference.Id -> Term Symbol Ann -> m () - putWatch k r@(Reference.Id h _i _n) tm | elem k standardWatchKinds = - runDB conn $ - Ops.saveWatch - (Cv.watchKind1to2 k) - (Cv.referenceid1to2 r) - (Cv.term1to2 h tm) + putWatch k r@(Reference.Id h _i _n) tm + | elem k standardWatchKinds = + runDB conn $ + Ops.saveWatch + (Cv.watchKind1to2 k) + (Cv.referenceid1to2 r) + (Cv.term1to2 h tm) putWatch _unknownKind _ _ = pure () getReflog :: MonadIO m => m [Reflog.Entry] - getReflog = liftIO $ - ( do - contents <- TextIO.readFile (reflogPath root) - let lines = Text.lines contents - let entries = parseEntry <$> lines - pure entries - ) - `catchIO` const (pure []) + getReflog = + liftIO $ + ( do + contents <- TextIO.readFile (reflogPath root) + let lines = Text.lines contents + let entries = parseEntry <$> lines + pure entries + ) + `catchIO` const (pure []) where parseEntry t = fromMaybe (err t) (Reflog.fromText t) err t = @@ -570,16 +605,16 @@ sqliteCodebase root = do -- primarily with commit hashes. -- Arya leaning towards doing the same for Unison. - let - finalizer :: MonadIO m => m () + let finalizer :: MonadIO m => m () finalizer = do liftIO $ Sqlite.close conn decls <- readTVarIO declBuffer terms <- readTVarIO termBuffer let printBuffer header b = - liftIO if b /= mempty - then putStrLn header >> putStrLn "" >> print b - else pure () + liftIO + if b /= mempty + then putStrLn header >> putStrLn "" >> print b + else pure () printBuffer "Decls:" decls printBuffer "Terms:" terms @@ -595,6 +630,8 @@ sqliteCodebase root = do putRootBranch rootBranchUpdates getBranchForHash + putBranch + isCausalHash dependentsImpl syncFromDirectory syncToDirectory diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs index 1df25cc78a..fca53720b6 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs @@ -1,142 +1,124 @@ -module Unison.Codebase.SqliteCodebase.SyncEphemeral where +{-# LANGUAGE ScopedTypeVariables #-} --- syncToDirectory :: forall m v a --- . MonadIO m --- => Var v --- => S.Format v --- -> S.Format a --- -> CodebasePath --- -> CodebasePath --- -> SyncMode --- -> Branch m --- -> m () --- syncToDirectory fmtV fmtA = do --- sync <- Sync22.sync22 --- where --- -- Use State and Lens to do some specified thing at most once, to create a file. --- ifNeedsSyncing :: forall m s h. (MonadIO m, MonadState s m, Ord h) --- => h --- -> CodebasePath --- -> (CodebasePath -> h -> FilePath) -- done if this filepath exists --- -> SimpleLens s (Set h) -- lens to track if `h` is already done --- -> (h -> m ()) -- do! --- -> m () -- don't --- -> m () --- ifNeedsSyncing h destPath getFilename l doSync dontSync = --- ifM (use (l . to (Set.member h))) dontSync $ do --- l %= Set.insert h --- if mode == SyncMode.Complete then doSync h --- else ifM (doesFileExist (getFilename destPath h)) dontSync (doSync h) --- processBranches :: forall m --- . MonadIO m --- => MonadState SyncedEntities m --- => MonadWriter (BD.Dependencies, Set Error) m --- => [(Branch.Hash, Maybe (m (Branch m)))] --- -> m () --- processBranches [] = pure () --- -- for each branch, --- processBranches ((h, mmb) : rest) = --- let tellError = Writer.tell . (mempty,) . Set.singleton --- tellDependencies = Writer.tell . (,mempty) in --- -- if hash exists at the destination, skip it, mark it done --- ifNeedsSyncing h destPath branchPath syncedBranches --- (\h -> --- -- else if hash exists at the source, enqueue its dependencies, copy it, mark it done --- ifM (doesFileExist (branchPath srcPath h)) --- (do --- (branches, deps) <- BD.fromRawCausal <$> --- (deserializeRawBranchDependencies tellError srcPath h) --- copyFileWithParents (branchPath srcPath h) (branchPath destPath h) --- tellDependencies deps --- processBranches (branches ++ rest)) --- -- else if it's in memory, enqueue its dependencies, write it, mark it done --- case mmb of --- Just mb -> do --- b <- mb --- let (branches, deps) = BD.fromBranch b --- let causalRaw = Branch.toCausalRaw b --- serializeRawBranch destPath h causalRaw --- tellDependencies deps --- processBranches (branches ++ rest) --- -- else -- error? --- Nothing -> do --- tellError (MissingBranch h) --- processBranches rest --- ) --- (processBranches rest) +module Unison.Codebase.SqliteCodebase.SyncEphemeral where --- -- syncToDirectory' (S.get fmtV) (S.get fmtA) +import Control.Monad.Except (ExceptT, throwError) +import qualified Control.Monad.Except as Except +import Control.Monad.Extra (ifM) +import Control.Monad.Reader (ReaderT, runReaderT) +import Control.Monad.Trans (lift) +import Control.Monad.Trans.Except (runExceptT, withExceptT) +import qualified Data.Either.Extra as Either +import qualified Data.Map as Map +import Data.Set (Set) +import U.Codebase.HashTags (CausalHash (CausalHash)) +import qualified U.Codebase.Sqlite.Queries as Q +import qualified U.Codebase.Sqlite.Sync22 as Sync22 +import qualified U.Codebase.Sync as Sync +import Unison.Codebase (Codebase, CodebasePath) +import qualified Unison.Codebase as Codebase +import Unison.Codebase.Branch (Branch (Branch), Branch0) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Causal as Causal +import qualified Unison.Codebase.SqliteCodebase as SqliteCodebase +import qualified Unison.Codebase.SqliteCodebase.Conversions as Cv +import Unison.Codebase.SyncMode (SyncMode) +import Unison.Hash (Hash) +import Unison.Prelude (MonadIO) +import qualified Data.Validation as Validation --- -- data Error = Error () --- -- = MissingBranch Branch.Hash --- -- | MissingPatch Branch.EditHash --- -- | MissingTerm Reference.Id --- -- | MissingTypeOfTerm Reference.Id --- -- | MissingDecl Reference.Id --- -- | InvalidBranch Branch.Hash --- -- | InvalidTerm Reference.Id --- -- | InvalidTypeOfTerm Reference.Id --- -- | InvalidDecl Reference.Id --- -- deriving (Eq, Ord, Show) +data Dependencies = Dependencies + { definitions :: Set Hash, + branches :: Set Hash + } --- syncToDirectory' :: forall m v a --- . MonadIO m --- => Var v --- => S.Get v --- -> S.Get a --- -> CodebasePath --- -> CodebasePath --- -> SyncMode --- -> Branch m --- -> m () --- syncToDirectory' getV getA srcPath destPath mode newRoot = --- let warnMissingEntities = False in --- flip evalStateT mempty $ do -- MonadState s m --- (deps, errors) <- time "Sync Branches" $ execWriterT $ --- processBranches [(Branch.headHash newRoot --- ,Just . pure . Branch.transform (lift . lift) $ newRoot)] --- errors' <- time "Sync Definitions" $ --- execWriterT $ processDependencies (BD.to' deps) --- time "Write indices" $ do --- lift . writeDependentsIndex =<< use dependentsIndex --- lift . writeTypeIndex =<< use typeIndex --- lift . writeTypeMentionsIndex =<< use typeMentionsIndex --- when (warnMissingEntities) $ for_ (errors <> errors') traceShowM +data Error + = Sync22Error Sync22.Error + | SrcMissingSchema [(Q.SchemaType, Q.SchemaName)] + | DestMissingSchema [(Q.SchemaType, Q.SchemaName)] + | DisappearingBranch CausalHash + deriving (Show) --- processBranches :: forall m --- . MonadIO m --- => MonadState SyncedEntities m --- => MonadWriter (BD.Dependencies, Set Error) m --- => [(Branch.Hash, Maybe (m (Branch m)))] --- -> m () --- processBranches [] = pure () --- -- for each branch, --- processBranches ((h, mmb) : rest) = --- let tellError = Writer.tell . (mempty,) . Set.singleton --- tellDependencies = Writer.tell . (,mempty) in --- -- if hash exists at the destination, skip it, mark it done --- ifNeedsSyncing h destPath branchPath syncedBranches --- (\h -> --- -- else if hash exists at the source, enqueue its dependencies, copy it, mark it done --- ifM (doesFileExist (branchPath srcPath h)) --- (do --- (branches, deps) <- BD.fromRawCausal <$> --- (deserializeRawBranchDependencies tellError srcPath h) --- copyFileWithParents (branchPath srcPath h) (branchPath destPath h) --- tellDependencies deps --- processBranches (branches ++ rest)) --- -- else if it's in memory, enqueue its dependencies, write it, mark it done --- case mmb of --- Just mb -> do --- b <- mb --- let (branches, deps) = BD.fromBranch b --- let causalRaw = Branch.toCausalRaw b --- serializeRawBranch destPath h causalRaw --- tellDependencies deps --- processBranches (branches ++ rest) --- -- else -- error? --- Nothing -> do --- tellError (MissingBranch h) --- processBranches rest --- ) --- (processBranches rest) \ No newline at end of file +-- does destPath need to be a codebase? +syncToDirectory :: + forall m. + MonadIO m => + CodebasePath -> + CodebasePath -> + SyncMode -> + Branch m -> + Sync.Progress m Sync22.Entity -> + m () +syncToDirectory srcPath destPath _mode newRoot progress = do + result <- runExceptT do + syncEnv@(Sync22.Env srcConn _ _) <- + Sync22.Env + <$> SqliteCodebase.unsafeGetConnection srcPath + <*> SqliteCodebase.unsafeGetConnection destPath + <*> pure (16 * 1024 * 1024) + (closeSrc, src) <- + lift (SqliteCodebase.sqliteCodebase srcPath) + >>= Except.liftEither . Either.mapLeft SrcMissingSchema + (closeDest, dest) <- + lift (SqliteCodebase.sqliteCodebase destPath) + >>= Except.liftEither . Either.mapLeft DestMissingSchema + -- we want to use sync22 wherever possible + -- so for each branch, we'll check if it exists in the destination branch + -- or if it exists in the source branch, then we can sync22 it + -- oh god but we have to figure out the dbid + -- if it doesn't exist in the dest or source branch, + -- then just use putBranch to the dest + let branchDeps :: forall m. Applicative m => Branch0 m -> [(Branch.Hash, m (Branch m))] + branchDeps = + map (\b -> (Branch.headHash b, pure b)) + . Map.elems + . Branch._children + causalDeps :: forall m. Applicative m => Branch m -> [(Branch.Hash, m (Branch m))] + causalDeps (Branch c) = case c of + Causal.One _h b -> branchDeps b + Causal.Cons _h b tail -> processTails [tail] b + Causal.Merge _h b tails -> processTails (Map.toList tails) b + where + processTails tails b = + let tails' = fmap (\(ht, mt) -> (ht, fmap Branch mt)) tails + deps = branchDeps b + in tails' ++ deps + let se :: forall m a. Functor m => (ExceptT Sync22.Error m a -> ExceptT Error m a) + se = withExceptT Sync22Error + let r :: forall m a. (ReaderT Sync22.Env m a -> m a) + r = flip runReaderT syncEnv + processBranches :: + forall m v a. + MonadIO m => + Sync.Sync (ReaderT Sync22.Env (ExceptT Sync22.Error m)) Sync22.Entity -> + Sync.Progress (ReaderT Sync22.Env (ExceptT Sync22.Error m)) Sync22.Entity -> + Codebase m v a -> + Codebase m v a -> + [(Branch.Hash, m (Branch m))] -> + ExceptT Error m () + processBranches _ _ _ _ [] = pure () + processBranches sync progress src dest ((h, mb) : rest) = do + ifM @(ExceptT Error m) + (lift $ Codebase.branchExists dest h) + (processBranches sync progress src dest rest) + ( ifM + (lift $ Codebase.branchExists src h) + ( let h2 = CausalHash . Cv.hash1to2 $ Causal.unRawHash h + in lift (flip runReaderT srcConn (Q.loadCausalHashIdByCausalHash h2)) + >>= \case + Nothing -> throwError $ DisappearingBranch h2 + Just chId -> se . r $ Sync.sync sync progress [Sync22.C chId] + ) + ( lift mb >>= \b -> do + let deps = causalDeps b + if (null deps) + then lift $ Codebase.putBranch dest b + else processBranches @m sync progress src dest (deps ++ (h, mb) : rest) + ) + ) + sync <- se . r $ Sync22.sync22 + let progress' = Sync.transformProgress (lift . lift) progress + processBranches sync progress' src dest [(Branch.headHash newRoot, pure newRoot)] + lift closeSrc + lift closeDest + pure $ Validation.valueOr (error . show) result \ No newline at end of file diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index da8c405498..9c296d975b 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -280,6 +280,7 @@ library unison-core, unison-codebase, unison-codebase-sqlite, + unison-codebase-sync, unison-util, unison-util-serialization From d4b1d0c2a8b8ad88112f3401e4296f81c40104b0 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 15 Mar 2021 00:48:15 -0400 Subject: [PATCH 123/225] syncToDirectory & initial test --- .../src/Unison/Codebase/SqliteCodebase.hs | 154 +++++++++++++++++- .../Codebase/SqliteCodebase/SyncEphemeral.hs | 104 +----------- parser-typechecker/tests/Suite.hs | 2 + .../tests/Unison/Test/GitSimple.hs | 135 +++++++++++++++ .../unison-parser-typechecker.cabal | 1 + 5 files changed, 288 insertions(+), 108 deletions(-) create mode 100644 parser-typechecker/tests/Unison/Test/GitSimple.hs diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index f27d64b86e..40c5c7890e 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -16,9 +16,12 @@ import qualified Control.Concurrent import qualified Control.Exception import Control.Monad (filterM, (>=>)) import Control.Monad.Except (ExceptT, runExceptT) -import Control.Monad.Extra (unlessM) +import qualified Control.Monad.Except as Except +import Control.Monad.Extra (ifM, unlessM) import qualified Control.Monad.Extra as Monad import Control.Monad.Reader (ReaderT (runReaderT)) +import Control.Monad.State (MonadState) +import qualified Control.Monad.State as State import Control.Monad.Trans (MonadTrans (lift)) import Control.Monad.Trans.Maybe (MaybeT) import Data.Bifunctor (Bifunctor (first), second) @@ -34,30 +37,34 @@ import Data.String (IsString (fromString)) import Data.Text (Text) import qualified Data.Text as Text import qualified Data.Text.IO as TextIO +import qualified Data.Validation as Validation import Data.Word (Word64) import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as Sqlite import qualified System.Exit as SysExit import System.FilePath (()) import qualified System.FilePath as FilePath -import U.Codebase.HashTags (CausalHash (unCausalHash)) +import U.Codebase.HashTags (CausalHash (CausalHash, unCausalHash)) import qualified U.Codebase.Reference as C.Reference import qualified U.Codebase.Sqlite.ObjectType as OT import U.Codebase.Sqlite.Operations (EDB) import qualified U.Codebase.Sqlite.Operations as Ops import qualified U.Codebase.Sqlite.Queries as Q +import qualified U.Codebase.Sqlite.Sync22 as Sync22 +import qualified U.Codebase.Sync as Sync import qualified U.Util.Hash as H2 import qualified U.Util.Monoid as Monoid import qualified U.Util.Set as Set import qualified Unison.Builtin as Builtins import Unison.Codebase (CodebasePath) import qualified Unison.Codebase as Codebase1 -import Unison.Codebase.Branch (Branch) +import Unison.Codebase.Branch (Branch (..), Branch0) import qualified Unison.Codebase.Branch as Branch import qualified Unison.Codebase.Causal as Causal import qualified Unison.Codebase.Reflog as Reflog import Unison.Codebase.ShortBranchHash (ShortBranchHash) import qualified Unison.Codebase.SqliteCodebase.Conversions as Cv +import qualified Unison.Codebase.SqliteCodebase.SyncEphemeral as SyncEphemeral import Unison.Codebase.SyncMode (SyncMode) import qualified Unison.ConstructorType as CT import Unison.DataDeclaration (Decl) @@ -483,10 +490,16 @@ sqliteCodebase root = do =<< Ops.dependents (Cv.reference1to2 r) syncFromDirectory :: MonadIO m => Codebase1.CodebasePath -> SyncMode -> Branch m -> m () - syncFromDirectory = error "todo" + syncFromDirectory srcRoot syncMode b = + flip State.evalStateT emptySyncProgressState $ + syncToDirectory' syncProgress srcRoot root syncMode $ + Branch.transform lift b syncToDirectory :: MonadIO m => Codebase1.CodebasePath -> SyncMode -> Branch m -> m () - syncToDirectory = error "todo" + syncToDirectory destRoot syncMode b = + flip State.evalStateT emptySyncProgressState $ + syncToDirectory' syncProgress root destRoot syncMode $ + Branch.transform lift b watches :: MonadIO m => UF.WatchKind -> m [Reference.Id] watches w = @@ -598,6 +611,90 @@ sqliteCodebase root = do cs <- Ops.causalHashesByPrefix (Cv.sbh1to2 sh) pure $ Set.map (Causal.RawHash . Cv.hash2to1 . unCausalHash) cs + -- does destPath need to be a codebase? + syncToDirectory' :: + forall m. + MonadIO m => + Sync.Progress m Sync22.Entity -> + CodebasePath -> + CodebasePath -> + SyncMode -> + Branch m -> + m () + syncToDirectory' progress srcPath destPath _mode newRoot = do + result <- runExceptT do + syncEnv@(Sync22.Env srcConn _ _) <- + Sync22.Env + <$> unsafeGetConnection srcPath + <*> unsafeGetConnection destPath + <*> pure (16 * 1024 * 1024) + (closeSrc, src) <- + lift (sqliteCodebase srcPath) + >>= Except.liftEither . Either.mapLeft SyncEphemeral.SrcMissingSchema + (closeDest, dest) <- + lift (sqliteCodebase destPath) + >>= Except.liftEither . Either.mapLeft SyncEphemeral.DestMissingSchema + -- we want to use sync22 wherever possible + -- so for each branch, we'll check if it exists in the destination branch + -- or if it exists in the source branch, then we can sync22 it + -- oh god but we have to figure out the dbid + -- if it doesn't exist in the dest or source branch, + -- then just use putBranch to the dest + let branchDeps :: forall m. Applicative m => Branch0 m -> [(Branch.Hash, m (Branch m))] + branchDeps = + map (\b -> (Branch.headHash b, pure b)) + . Map.elems + . Branch._children + causalDeps :: forall m. Applicative m => Branch m -> [(Branch.Hash, m (Branch m))] + causalDeps (Branch.Branch c) = case c of + Causal.One _h b -> branchDeps b + Causal.Cons _h b tail -> processTails [tail] b + Causal.Merge _h b tails -> processTails (Map.toList tails) b + where + processTails tails b = + let tails' = fmap (\(ht, mt) -> (ht, fmap Branch mt)) tails + deps = branchDeps b + in tails' ++ deps + let se :: forall m a. Functor m => (ExceptT Sync22.Error m a -> ExceptT SyncEphemeral.Error m a) + se = Except.withExceptT SyncEphemeral.Sync22Error + let r :: forall m a. (ReaderT Sync22.Env m a -> m a) + r = flip runReaderT syncEnv + processBranches :: + forall m v a. + MonadIO m => + Sync.Sync (ReaderT Sync22.Env (ExceptT Sync22.Error m)) Sync22.Entity -> + Sync.Progress (ReaderT Sync22.Env (ExceptT Sync22.Error m)) Sync22.Entity -> + Codebase1.Codebase m v a -> + Codebase1.Codebase m v a -> + [(Branch.Hash, m (Branch m))] -> + ExceptT SyncEphemeral.Error m () + processBranches _ _ _ _ [] = pure () + processBranches sync progress src dest ((h, mb) : rest) = do + ifM @(ExceptT SyncEphemeral.Error m) + (lift $ Codebase1.branchExists dest h) + (processBranches sync progress src dest rest) + ( ifM + (lift $ Codebase1.branchExists src h) + ( let h2 = CausalHash . Cv.hash1to2 $ Causal.unRawHash h + in lift (flip runReaderT srcConn (Q.loadCausalHashIdByCausalHash h2)) + >>= \case + Nothing -> Except.throwError $ SyncEphemeral.DisappearingBranch h2 + Just chId -> se . r $ Sync.sync sync progress [Sync22.C chId] + ) + ( lift mb >>= \b -> do + let deps = causalDeps b + if (null deps) + then lift $ Codebase1.putBranch dest b + else processBranches @m sync progress src dest (deps ++ (h, mb) : rest) + ) + ) + sync <- se . r $ Sync22.sync22 + let progress' = Sync.transformProgress (lift . lift) progress + processBranches sync progress' src dest [(Branch.headHash newRoot, pure newRoot)] + lift closeSrc + lift closeDest + pure $ Validation.valueOr (error . show) result + -- Do we want to include causal hashes here or just namespace hashes? -- Could we expose just one or the other of them to the user? -- Git uses commit hashes and tree hashes (analogous to causal hashes @@ -658,3 +755,50 @@ runDB :: MonadIO m => Connection -> ReaderT Connection (ExceptT Ops.Error m) a - runDB conn = (runExceptT >=> err) . flip runReaderT conn where err = \case Left err -> error $ show err; Right a -> pure a + +data SyncProgressState = SyncProgressState + { needEntities :: Maybe (Set Sync22.Entity), + doneEntities :: Either Int (Set Sync22.Entity) + } + +emptySyncProgressState :: SyncProgressState +emptySyncProgressState = SyncProgressState (Just mempty) (Right mempty) + +syncProgress :: MonadState SyncProgressState m => MonadIO m => Sync.Progress m Sync22.Entity +syncProgress = Sync.Progress need done allDone + where + maxTrackedHashCount = 1024 * 1024 + need, done :: (MonadState SyncProgressState m, MonadIO m) => Sync22.Entity -> m () + need h = do + State.get >>= \case + SyncProgressState Nothing Left {} -> pure () + SyncProgressState (Just need) (Right done) -> + if Set.size need + Set.size done > maxTrackedHashCount + then State.put $ SyncProgressState Nothing (Left $ Set.size done) + else + if Set.member h done + then pure () + else State.put $ SyncProgressState (Just $ Set.insert h need) (Right done) + SyncProgressState _ _ -> undefined + State.get >>= liftIO . putStrLn . renderState + + done h = do + State.get >>= \case + SyncProgressState Nothing (Left count) -> + State.put $ SyncProgressState Nothing (Left (count + 1)) + SyncProgressState (Just need) (Right done) -> + State.put $ SyncProgressState (Just $ Set.delete h need) (Right $ Set.insert h done) + SyncProgressState _ _ -> undefined + State.get >>= liftIO . putStrLn . renderState + + allDone = liftIO $ putStrLn "\rSync complete." + + renderState = \case + SyncProgressState Nothing (Left doneCount) -> + "Synced " ++ show doneCount ++ " entities" + SyncProgressState (Just need) (Right done) -> + "Synced " ++ show (Set.size done) ++ "/" ++ show (Set.size done + Set.size need) + SyncProgressState Nothing Right {} -> + "invalid SyncProgressState Nothing Right{}" + SyncProgressState Just {} Left {} -> + "invalid SyncProgressState Just{} Left{}" \ No newline at end of file diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs index fca53720b6..11b08ab98d 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs @@ -2,30 +2,11 @@ module Unison.Codebase.SqliteCodebase.SyncEphemeral where -import Control.Monad.Except (ExceptT, throwError) -import qualified Control.Monad.Except as Except -import Control.Monad.Extra (ifM) -import Control.Monad.Reader (ReaderT, runReaderT) -import Control.Monad.Trans (lift) -import Control.Monad.Trans.Except (runExceptT, withExceptT) -import qualified Data.Either.Extra as Either -import qualified Data.Map as Map import Data.Set (Set) -import U.Codebase.HashTags (CausalHash (CausalHash)) +import U.Codebase.HashTags (CausalHash) import qualified U.Codebase.Sqlite.Queries as Q import qualified U.Codebase.Sqlite.Sync22 as Sync22 -import qualified U.Codebase.Sync as Sync -import Unison.Codebase (Codebase, CodebasePath) -import qualified Unison.Codebase as Codebase -import Unison.Codebase.Branch (Branch (Branch), Branch0) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Causal as Causal -import qualified Unison.Codebase.SqliteCodebase as SqliteCodebase -import qualified Unison.Codebase.SqliteCodebase.Conversions as Cv -import Unison.Codebase.SyncMode (SyncMode) import Unison.Hash (Hash) -import Unison.Prelude (MonadIO) -import qualified Data.Validation as Validation data Dependencies = Dependencies { definitions :: Set Hash, @@ -39,86 +20,3 @@ data Error | DisappearingBranch CausalHash deriving (Show) --- does destPath need to be a codebase? -syncToDirectory :: - forall m. - MonadIO m => - CodebasePath -> - CodebasePath -> - SyncMode -> - Branch m -> - Sync.Progress m Sync22.Entity -> - m () -syncToDirectory srcPath destPath _mode newRoot progress = do - result <- runExceptT do - syncEnv@(Sync22.Env srcConn _ _) <- - Sync22.Env - <$> SqliteCodebase.unsafeGetConnection srcPath - <*> SqliteCodebase.unsafeGetConnection destPath - <*> pure (16 * 1024 * 1024) - (closeSrc, src) <- - lift (SqliteCodebase.sqliteCodebase srcPath) - >>= Except.liftEither . Either.mapLeft SrcMissingSchema - (closeDest, dest) <- - lift (SqliteCodebase.sqliteCodebase destPath) - >>= Except.liftEither . Either.mapLeft DestMissingSchema - -- we want to use sync22 wherever possible - -- so for each branch, we'll check if it exists in the destination branch - -- or if it exists in the source branch, then we can sync22 it - -- oh god but we have to figure out the dbid - -- if it doesn't exist in the dest or source branch, - -- then just use putBranch to the dest - let branchDeps :: forall m. Applicative m => Branch0 m -> [(Branch.Hash, m (Branch m))] - branchDeps = - map (\b -> (Branch.headHash b, pure b)) - . Map.elems - . Branch._children - causalDeps :: forall m. Applicative m => Branch m -> [(Branch.Hash, m (Branch m))] - causalDeps (Branch c) = case c of - Causal.One _h b -> branchDeps b - Causal.Cons _h b tail -> processTails [tail] b - Causal.Merge _h b tails -> processTails (Map.toList tails) b - where - processTails tails b = - let tails' = fmap (\(ht, mt) -> (ht, fmap Branch mt)) tails - deps = branchDeps b - in tails' ++ deps - let se :: forall m a. Functor m => (ExceptT Sync22.Error m a -> ExceptT Error m a) - se = withExceptT Sync22Error - let r :: forall m a. (ReaderT Sync22.Env m a -> m a) - r = flip runReaderT syncEnv - processBranches :: - forall m v a. - MonadIO m => - Sync.Sync (ReaderT Sync22.Env (ExceptT Sync22.Error m)) Sync22.Entity -> - Sync.Progress (ReaderT Sync22.Env (ExceptT Sync22.Error m)) Sync22.Entity -> - Codebase m v a -> - Codebase m v a -> - [(Branch.Hash, m (Branch m))] -> - ExceptT Error m () - processBranches _ _ _ _ [] = pure () - processBranches sync progress src dest ((h, mb) : rest) = do - ifM @(ExceptT Error m) - (lift $ Codebase.branchExists dest h) - (processBranches sync progress src dest rest) - ( ifM - (lift $ Codebase.branchExists src h) - ( let h2 = CausalHash . Cv.hash1to2 $ Causal.unRawHash h - in lift (flip runReaderT srcConn (Q.loadCausalHashIdByCausalHash h2)) - >>= \case - Nothing -> throwError $ DisappearingBranch h2 - Just chId -> se . r $ Sync.sync sync progress [Sync22.C chId] - ) - ( lift mb >>= \b -> do - let deps = causalDeps b - if (null deps) - then lift $ Codebase.putBranch dest b - else processBranches @m sync progress src dest (deps ++ (h, mb) : rest) - ) - ) - sync <- se . r $ Sync22.sync22 - let progress' = Sync.transformProgress (lift . lift) progress - processBranches sync progress' src dest [(Branch.headHash newRoot, pure newRoot)] - lift closeSrc - lift closeDest - pure $ Validation.valueOr (error . show) result \ No newline at end of file diff --git a/parser-typechecker/tests/Suite.hs b/parser-typechecker/tests/Suite.hs index 4f3835fb2e..e62823ec67 100644 --- a/parser-typechecker/tests/Suite.hs +++ b/parser-typechecker/tests/Suite.hs @@ -37,6 +37,7 @@ import qualified Unison.Test.ANF as ANF import qualified Unison.Test.MCode as MCode import qualified Unison.Test.VersionParser as VersionParser import qualified Unison.Test.Git as Git +import qualified Unison.Test.GitSimple as GitSimple test :: Bool -> Test () test rt = tests @@ -67,6 +68,7 @@ test rt = tests , UriParser.test , Context.test , Git.test + , GitSimple.test , TestIO.test , Name.test , VersionParser.test diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs new file mode 100644 index 0000000000..86b709b386 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -0,0 +1,135 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE QuasiQuotes #-} + +module Unison.Test.GitSimple where + +import Data.String.Here (iTrim) +import qualified Data.Text as Text +import EasyTest +import Shellmet () +import System.Directory (removeDirectoryRecursive) +import System.FilePath (()) +import qualified System.IO.Temp as Temp +import Unison.Codebase (Codebase, CodebasePath) +import qualified Unison.Codebase.SqliteCodebase as FC +import qualified Unison.Codebase.TranscriptParser as TR +import Unison.Parser (Ann) +import Unison.Prelude +import Unison.Symbol (Symbol) +import Data.Typeable (Typeable) + +test :: Test () +test = scope "git-simple" . tests $ [testPull] + +-- [ testPull +-- , testPush +-- , syncComplete +-- , syncTestResults +-- ] + +traceTranscriptOutput :: Bool +traceTranscriptOutput = False + +authorTranscript :: (Semigroup a1, IsString a1, Show a2, Typeable a2, Typeable a1) => a2 -> a1 +authorTranscript repo = + [iTrim| +```ucm:hide +.builtin> alias.type ##Nat Nat +.builtin> alias.term ##Nat.+ Nat.+ +``` +```unison +unique type outside.A = A Nat +unique type outside.B = B Nat Nat +outside.c = 3 +outside.d = 4 + +unique type inside.X = X outside.A +inside.y = c + c +``` +```ucm +.myLib> debug.file +.myLib> add +.myLib> push ${repo} +``` +|] + +userTranscript :: (Semigroup a1, IsString a1, Show a2, Typeable a2, Typeable a1) => a2 -> a1 +userTranscript repo = + [iTrim| +```ucm:hide +.builtin> alias.type ##Nat Nat +.builtin> alias.term ##Nat.+ Nat.+ +``` +```ucm +.yourLib> pull ${repo}:.inside +``` +```unison +> y + #msp7bv40rv + 1 +``` +|] + +-- goal of this test is to make sure that pull doesn't grab a ton of unneeded +-- dependencies +testPull :: Test () +testPull = scope "pull" $ do + -- let's push a broader set of stuff, pull a narrower one (to a fresh codebase) + -- and verify that we have the definitions we expected and don't have some of + -- the ones we didn't expect. + + -- put all our junk into here + tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-pull" + + -- initialize author and user codebases + authorCodebase <- io $ snd <$> initCodebase tmp "author" + (_userDir, userCodebase) <- io $ initCodebase tmp "user" + + -- initialize git repo + let repo = tmp "repo.git" + io $ "git" ["init", "--bare", Text.pack repo] + + -- run author/push transcript + authorOutput <- runTranscript tmp authorCodebase (authorTranscript repo) + + -- -- check out the resulting repo so we can inspect it + -- io $ "git" ["clone", Text.pack repo, Text.pack $ tmp "repo" ] + + -- run user/pull transcript + userOutput <- runTranscript tmp userCodebase (userTranscript repo) + + io $ + writeFile + "unison-src/transcripts/GitSimple.hs.output.md" + (authorOutput <> "\n-------\n" <> userOutput) + + -- -- inspect user codebase + -- scope "user-should-have" $ + -- for userShouldHave $ \path -> + -- scope (makeTitle path) $ io (doesFileExist $ userDir path) >>= expect + -- scope "user-should-not-have" $ -- this definitely won't pass with current implementation + -- for userShouldNotHave $ \path -> + -- scope (makeTitle path) $ io (doesFileExist $ userDir path) >>= expect . not + + -- if we haven't crashed, clean up! + io $ removeDirectoryRecursive tmp + +-- initialize a fresh codebase +initCodebaseDir :: FilePath -> String -> IO CodebasePath +initCodebaseDir tmpDir name = fst <$> initCodebase tmpDir name + +initCodebase :: FilePath -> String -> IO (CodebasePath, Codebase IO Symbol Ann) +initCodebase tmpDir name = do + let codebaseDir = tmpDir name + c <- FC.initCodebase codebaseDir + pure (codebaseDir, c) + +-- run a transcript on an existing codebase +runTranscript :: MonadIO m => FilePath -> Codebase IO Symbol Ann -> String -> m String +runTranscript tmpDir c transcript = do + let configFile = tmpDir ".unisonConfig" + -- transcript runner wants a "current directory" for I guess writing scratch files? + let cwd = tmpDir "cwd" + let err err = error $ "Parse error: \n" <> show err + + -- parse and run the transcript + flip (either err) (TR.parse "transcript" (Text.pack transcript)) $ \stanzas -> + liftIO . fmap Text.unpack $ TR.run Nothing cwd configFile stanzas c diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 9c296d975b..3825eea148 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -353,6 +353,7 @@ executable tests Unison.Test.DataDeclaration Unison.Test.FileParser Unison.Test.Git + Unison.Test.GitSimple Unison.Test.Lexer Unison.Test.IO Unison.Test.MCode From 4e37ebdfdc6ffea41e79d884122bf80517ed166d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 16 Mar 2021 13:48:36 -0400 Subject: [PATCH 124/225] update a FileCodebase -> SqliteCodebase dependency, and more remove stray(?) FC.updateCausalHead call set root namespace after syncToDirectory --- .../U/Codebase/Sqlite/Operations.hs | 7 ++-- .../U/Codebase/Sqlite/Queries.hs | 6 ++-- codebase2/codebase-sync/U/Codebase/Sync.hs | 1 + .../Unison/Codebase/Editor/HandleCommand.hs | 8 ++--- .../src/Unison/Codebase/SqliteCodebase.hs | 32 +++++++++++++------ .../Codebase/SqliteCodebase/Conversions.hs | 3 ++ .../src/Unison/Codebase/SqliteCodebase/Git.hs | 2 -- .../tests/Unison/Test/GitSimple.hs | 22 +++++++------ 8 files changed, 51 insertions(+), 30 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index e783a6e04f..019372c2b8 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -120,6 +120,7 @@ import qualified U.Util.Serialization as S import qualified U.Util.Set as Set import qualified U.Util.Term as TermUtil import qualified U.Util.Type as TypeUtil +import Control.Monad.Extra (ifM) -- * Error handling @@ -912,9 +913,9 @@ saveBranch (C.Causal hc he parents me) = do -- by checking if there are causal parents associated with hc parentChId <- liftQ (Q.saveCausalHash causalHash) -- test if the parent has been saved previously: - liftQ (Q.loadCausalParents parentChId) >>= \case - [] -> do c <- mcausal; snd <$> saveBranch c - _grandParents -> pure parentChId + ifM (liftQ . Q.isCausalHash $ Db.unCausalHashId parentChId) + (pure parentChId) + (do mcausal >>= fmap snd . saveBranch) parentCausalHashIds -> pure parentCausalHashIds boId <- diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index a8b811e138..e93d76a15c 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -61,6 +61,9 @@ type EDB m = (DB m, Err m) type Err m = (MonadError Integrity m, HasCallStack) +debugQuery :: Bool +debugQuery = False + crashOnError :: Bool crashOnError = True @@ -598,9 +601,6 @@ queryOne = fmap fromJust queryExists :: (DB m, ToRow q, Show q) => SQLite.Query -> q -> m Bool queryExists q r = not . null . map (id @SQLData) <$> queryAtoms q r -debugQuery :: Bool -debugQuery = False - -- | composite input, composite List output query :: (DB m, ToRow q, FromRow r, Show q, Show r) => SQLite.Query -> q -> m [r] query q r = do diff --git a/codebase2/codebase-sync/U/Codebase/Sync.hs b/codebase2/codebase-sync/U/Codebase/Sync.hs index 7435250fb5..b5d43abaae 100644 --- a/codebase2/codebase-sync/U/Codebase/Sync.hs +++ b/codebase2/codebase-sync/U/Codebase/Sync.hs @@ -31,6 +31,7 @@ module U.Codebase.Sync where import Data.Foldable (traverse_) data TrySyncResult h = Missing [h] | Done | PreviouslyDone + deriving Show data Sync m h = Sync { trySync :: h -> m (TrySyncResult h) } diff --git a/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs b/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs index df02d19daf..de033ecf2b 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs @@ -26,7 +26,7 @@ import Unison.Codebase ( Codebase ) import qualified Unison.Codebase as Codebase import Unison.Codebase.Branch ( Branch ) import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.FileCodebase.Git as Git +import qualified Unison.Codebase.SqliteCodebase.Git as Git import Unison.Parser ( Ann ) import qualified Unison.Parser as Parser import qualified Unison.Parsers as Parsers @@ -120,11 +120,11 @@ commandLine config awaitInput setBranchRef rt notifyUser notifyNumbered loadSour setBranchRef branch Codebase.putRootBranch codebase branch ViewRemoteBranch ns -> - lift $ runExceptT $ Git.viewRemoteBranch undefined ns + lift $ runExceptT $ Git.viewRemoteBranch ns ImportRemoteBranch ns syncMode -> - lift $ runExceptT $ Git.importRemoteBranch codebase undefined ns syncMode + lift $ runExceptT $ Git.importRemoteBranch codebase ns syncMode SyncRemoteRootBranch repo branch syncMode -> - lift $ runExceptT $ Git.pushGitRootBranch codebase undefined branch repo syncMode + lift $ runExceptT $ Git.pushGitRootBranch codebase branch repo syncMode LoadTerm r -> lift $ Codebase.getTerm codebase r LoadType r -> lift $ Codebase.getTypeDeclaration codebase r LoadTypeOfTerm r -> lift $ Codebase.getTypeOfTerm codebase r diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 40c5c7890e..bd880c66a0 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -8,10 +8,6 @@ module Unison.Codebase.SqliteCodebase where --- initCodebase :: Branch.Cache IO -> FilePath -> IO (Codebase IO Symbol Ann) - --- import qualified U.Codebase.Sqlite.Operations' as Ops - import qualified Control.Concurrent import qualified Control.Exception import Control.Monad (filterM, (>=>)) @@ -87,7 +83,7 @@ import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF import qualified Unison.Util.Pretty as P import UnliftIO (MonadIO, catchIO, liftIO) -import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, getHomeDirectory) +import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, doesFileExist, getHomeDirectory) import qualified UnliftIO.Environment as SysEnv import UnliftIO.STM @@ -135,6 +131,15 @@ initCodebaseAndExit mdir = do closeCodebase liftIO SysExit.exitSuccess +initSchemaIfNotExist :: MonadIO m => FilePath -> m () +initSchemaIfNotExist path = liftIO $ + unlessM (doesFileExist $ path FilePath.takeDirectory codebasePath) do + createDirectoryIfMissing True (path FilePath.takeDirectory codebasePath) + Control.Exception.bracket + (unsafeGetConnection path) + Sqlite.close + (runReaderT Q.createSchema) + -- initializes a new codebase here (i.e. `ucm -codebase dir init`) initCodebase :: MonadIO m => FilePath -> m (m (), Codebase1.Codebase m Symbol Ann) initCodebase path = do @@ -623,7 +628,8 @@ sqliteCodebase root = do m () syncToDirectory' progress srcPath destPath _mode newRoot = do result <- runExceptT do - syncEnv@(Sync22.Env srcConn _ _) <- + initSchemaIfNotExist destPath + syncEnv@(Sync22.Env srcConn destConn _) <- Sync22.Env <$> unsafeGetConnection srcPath <*> unsafeGetConnection destPath @@ -690,7 +696,15 @@ sqliteCodebase root = do ) sync <- se . r $ Sync22.sync22 let progress' = Sync.transformProgress (lift . lift) progress - processBranches sync progress' src dest [(Branch.headHash newRoot, pure newRoot)] + newRootHash = Branch.headHash newRoot + newRootHash2 = Cv.causalHash1to2 newRootHash + processBranches sync progress' src dest [(newRootHash, pure newRoot)] + -- set the root namespace + flip runReaderT destConn $ do + chId <- (Q.loadCausalHashIdByCausalHash newRootHash2) >>= \case + Nothing -> Except.throwError $ SyncEphemeral.DisappearingBranch newRootHash2 + Just chId -> pure chId + Q.setNamespaceRoot chId lift closeSrc lift closeDest pure $ Validation.valueOr (error . show) result @@ -780,7 +794,7 @@ syncProgress = Sync.Progress need done allDone then pure () else State.put $ SyncProgressState (Just $ Set.insert h need) (Right done) SyncProgressState _ _ -> undefined - State.get >>= liftIO . putStrLn . renderState + State.get >>= liftIO . putStr . ("\r"<>) . renderState done h = do State.get >>= \case @@ -797,7 +811,7 @@ syncProgress = Sync.Progress need done allDone SyncProgressState Nothing (Left doneCount) -> "Synced " ++ show doneCount ++ " entities" SyncProgressState (Just need) (Right done) -> - "Synced " ++ show (Set.size done) ++ "/" ++ show (Set.size done + Set.size need) + "Synced " ++ show (Set.size done) ++ "/" ++ show (Set.size done + Set.size need) ++ " entities" SyncProgressState Nothing Right {} -> "invalid SyncProgressState Nothing Right{}" SyncProgressState Just {} Left {} -> diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index f2fd562eaa..e5f8cc7364 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -349,6 +349,9 @@ hash2to1 (V2.Hash.Hash sbs) = V1.Hash (SBS.fromShort sbs) causalHash2to1 :: V2.CausalHash -> V1.Causal.RawHash V1.Branch.Raw causalHash2to1 = V1.Causal.RawHash . hash2to1 . V2.unCausalHash +causalHash1to2 :: V1.Causal.RawHash V1.Branch.Raw -> V2.CausalHash +causalHash1to2 = V2.CausalHash . hash1to2 . V1.Causal.unRawHash + ttype2to1 :: Monad m => (Hash -> m V1.Reference.Size) -> V2.Term.Type V2.Symbol -> m (V1.Type.Type V1.Symbol Ann) ttype2to1 lookupSize = type2to1' (reference2to1 lookupSize) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs index e0b46742f2..06ac018a82 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs @@ -32,7 +32,6 @@ import qualified Unison.Codebase.Path as Path import Unison.Codebase.SyncMode ( SyncMode ) import Unison.Util.Timing (time) import qualified Unison.Codebase.Branch as Branch -import Unison.Codebase.FileCodebase.Common (updateCausalHead, branchHeadDir) -- | Sync elements as needed from a remote codebase into the local one. -- If `sbh` is supplied, we try to load the specified branch hash; @@ -109,7 +108,6 @@ pushGitRootBranch codebase branch repo syncMode = do let repoString = Text.unpack $ printRepo repo withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ lift (Codebase.syncToDirectory codebase remotePath syncMode branch) - updateCausalHead (branchHeadDir remotePath) (Branch._history branch) -- push staging area to remote withStatus ("Uploading to " ++ repoString ++ " ...") $ unlessM diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs index 86b709b386..20ebdaa500 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -3,8 +3,10 @@ module Unison.Test.GitSimple where +import Control.Lens (view, _1) import Data.String.Here (iTrim) import qualified Data.Text as Text +import Data.Typeable (Typeable) import EasyTest import Shellmet () import System.Directory (removeDirectoryRecursive) @@ -16,7 +18,6 @@ import qualified Unison.Codebase.TranscriptParser as TR import Unison.Parser (Ann) import Unison.Prelude import Unison.Symbol (Symbol) -import Data.Typeable (Typeable) test :: Test () test = scope "git-simple" . tests $ [testPull] @@ -80,8 +81,8 @@ testPull = scope "pull" $ do tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-pull" -- initialize author and user codebases - authorCodebase <- io $ snd <$> initCodebase tmp "author" - (_userDir, userCodebase) <- io $ initCodebase tmp "user" + (_authorDir, closeAuthor, authorCodebase) <- io $ initCodebase tmp "author" + (_userDir, closeUser, userCodebase) <- io $ initCodebase tmp "user" -- initialize git repo let repo = tmp "repo.git" @@ -96,7 +97,10 @@ testPull = scope "pull" $ do -- run user/pull transcript userOutput <- runTranscript tmp userCodebase (userTranscript repo) - io $ + io do + closeAuthor + closeUser + writeFile "unison-src/transcripts/GitSimple.hs.output.md" (authorOutput <> "\n-------\n" <> userOutput) @@ -110,17 +114,17 @@ testPull = scope "pull" $ do -- scope (makeTitle path) $ io (doesFileExist $ userDir path) >>= expect . not -- if we haven't crashed, clean up! - io $ removeDirectoryRecursive tmp + removeDirectoryRecursive tmp -- initialize a fresh codebase initCodebaseDir :: FilePath -> String -> IO CodebasePath -initCodebaseDir tmpDir name = fst <$> initCodebase tmpDir name +initCodebaseDir tmpDir name = view _1 <$> initCodebase tmpDir name -initCodebase :: FilePath -> String -> IO (CodebasePath, Codebase IO Symbol Ann) +initCodebase :: FilePath -> String -> IO (CodebasePath, IO (), Codebase IO Symbol Ann) initCodebase tmpDir name = do let codebaseDir = tmpDir name - c <- FC.initCodebase codebaseDir - pure (codebaseDir, c) + (close, c) <- FC.initCodebase codebaseDir + pure (codebaseDir, close, c) -- run a transcript on an existing codebase runTranscript :: MonadIO m => FilePath -> Codebase IO Symbol Ann -> String -> m String From dbd3da0d8bd44e54ac83440cdf28e1e334f44551 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 17 Mar 2021 12:00:12 -0400 Subject: [PATCH 125/225] wip: treat empty git repo as empty codebase skip schema init if file exists fill in missing dependency stuff from SqliteCodebase.syncToDirectory'.processBranches --- .../{ => FileCodebase}/Branch/Dependencies.hs | 9 +- .../FileCodebase/SlimCopyRegenerateIndex.hs | 2 +- .../src/Unison/Codebase/Serialization/V1.hs | 2 +- .../src/Unison/Codebase/SqliteCodebase.hs | 73 ++++++++-------- .../SqliteCodebase/Branch/Dependencies.hs | 86 +++++++++++++++++++ .../src/Unison/Codebase/SqliteCodebase/Git.hs | 50 ++++++----- .../unison-parser-typechecker.cabal | 3 +- 7 files changed, 160 insertions(+), 65 deletions(-) rename parser-typechecker/src/Unison/Codebase/{ => FileCodebase}/Branch/Dependencies.hs (94%) create mode 100644 parser-typechecker/src/Unison/Codebase/SqliteCodebase/Branch/Dependencies.hs diff --git a/parser-typechecker/src/Unison/Codebase/Branch/Dependencies.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase/Branch/Dependencies.hs similarity index 94% rename from parser-typechecker/src/Unison/Codebase/Branch/Dependencies.hs rename to parser-typechecker/src/Unison/Codebase/FileCodebase/Branch/Dependencies.hs index d54e2ace49..94e837cc90 100644 --- a/parser-typechecker/src/Unison/Codebase/Branch/Dependencies.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase/Branch/Dependencies.hs @@ -2,7 +2,7 @@ {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE RecordWildCards #-} -module Unison.Codebase.Branch.Dependencies where +module Unison.Codebase.FileCodebase.Branch.Dependencies where import Data.Set (Set) import Data.Foldable (toList) @@ -39,7 +39,12 @@ data Dependencies' = Dependencies' { patches' :: [EditHash] , terms' :: [Reference.Id] , decls' :: [Reference.Id] - } deriving Show + } + deriving Show + deriving Generic + deriving Semigroup via GenericSemigroup Dependencies' + deriving Monoid via GenericMonoid Dependencies' + to' :: Dependencies -> Dependencies' to' Dependencies{..} = Dependencies' (toList patches) (toList terms) (toList decls) diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase/SlimCopyRegenerateIndex.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase/SlimCopyRegenerateIndex.hs index 1fec405f9f..3ab155f5c4 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase/SlimCopyRegenerateIndex.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase/SlimCopyRegenerateIndex.hs @@ -21,7 +21,7 @@ import Unison.Codebase ( CodebasePath ) import qualified Unison.Codebase.Causal as Causal import Unison.Codebase.Branch ( Branch(..) ) import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Branch.Dependencies as BD +import qualified Unison.Codebase.FileCodebase.Branch.Dependencies as BD import qualified Unison.Codebase.Patch as Patch import qualified Unison.Codebase.Serialization as S import qualified Unison.Codebase.Serialization.V1 as V1 diff --git a/parser-typechecker/src/Unison/Codebase/Serialization/V1.hs b/parser-typechecker/src/Unison/Codebase/Serialization/V1.hs index e7875524c1..ecb7b0754a 100644 --- a/parser-typechecker/src/Unison/Codebase/Serialization/V1.hs +++ b/parser-typechecker/src/Unison/Codebase/Serialization/V1.hs @@ -29,7 +29,7 @@ import qualified Data.Map as Map import Data.List ( elemIndex ) import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Branch.Dependencies as BD +import qualified Unison.Codebase.FileCodebase.Branch.Dependencies as BD import Unison.Codebase.Causal ( Raw(..) , RawHash(..) , unRawHash diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index bd880c66a0..cf6f8d1f59 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -54,8 +54,9 @@ import qualified U.Util.Set as Set import qualified Unison.Builtin as Builtins import Unison.Codebase (CodebasePath) import qualified Unison.Codebase as Codebase1 -import Unison.Codebase.Branch (Branch (..), Branch0) +import Unison.Codebase.Branch (Branch (..)) import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.SqliteCodebase.Branch.Dependencies as BD import qualified Unison.Codebase.Causal as Causal import qualified Unison.Codebase.Reflog as Reflog import Unison.Codebase.ShortBranchHash (ShortBranchHash) @@ -83,7 +84,7 @@ import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF import qualified Unison.Util.Pretty as P import UnliftIO (MonadIO, catchIO, liftIO) -import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, doesFileExist, getHomeDirectory) +import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, doesDirectoryExist, doesFileExist, getHomeDirectory) import qualified UnliftIO.Environment as SysEnv import UnliftIO.STM @@ -132,9 +133,10 @@ initCodebaseAndExit mdir = do liftIO SysExit.exitSuccess initSchemaIfNotExist :: MonadIO m => FilePath -> m () -initSchemaIfNotExist path = liftIO $ - unlessM (doesFileExist $ path FilePath.takeDirectory codebasePath) do +initSchemaIfNotExist path = liftIO do + unlessM (doesDirectoryExist $ path FilePath.takeDirectory codebasePath) $ createDirectoryIfMissing True (path FilePath.takeDirectory codebasePath) + unlessM (doesFileExist $ path codebasePath) $ Control.Exception.bracket (unsafeGetConnection path) Sqlite.close @@ -160,11 +162,7 @@ initCodebase path = do -- run sql create scripts createDirectoryIfMissing True (path FilePath.takeDirectory codebasePath) - liftIO $ - Control.Exception.bracket - (unsafeGetConnection path) - Sqlite.close - (runReaderT Q.createSchema) + initSchemaIfNotExist path (closeCodebase, theCodebase) <- sqliteCodebase path >>= \case @@ -646,21 +644,6 @@ sqliteCodebase root = do -- oh god but we have to figure out the dbid -- if it doesn't exist in the dest or source branch, -- then just use putBranch to the dest - let branchDeps :: forall m. Applicative m => Branch0 m -> [(Branch.Hash, m (Branch m))] - branchDeps = - map (\b -> (Branch.headHash b, pure b)) - . Map.elems - . Branch._children - causalDeps :: forall m. Applicative m => Branch m -> [(Branch.Hash, m (Branch m))] - causalDeps (Branch.Branch c) = case c of - Causal.One _h b -> branchDeps b - Causal.Cons _h b tail -> processTails [tail] b - Causal.Merge _h b tails -> processTails (Map.toList tails) b - where - processTails tails b = - let tails' = fmap (\(ht, mt) -> (ht, fmap Branch mt)) tails - deps = branchDeps b - in tails' ++ deps let se :: forall m a. Functor m => (ExceptT Sync22.Error m a -> ExceptT SyncEphemeral.Error m a) se = Except.withExceptT SyncEphemeral.Sync22Error let r :: forall m a. (ReaderT Sync22.Env m a -> m a) @@ -672,38 +655,50 @@ sqliteCodebase root = do Sync.Progress (ReaderT Sync22.Env (ExceptT Sync22.Error m)) Sync22.Entity -> Codebase1.Codebase m v a -> Codebase1.Codebase m v a -> - [(Branch.Hash, m (Branch m))] -> + [Entity m] -> ExceptT SyncEphemeral.Error m () processBranches _ _ _ _ [] = pure () - processBranches sync progress src dest ((h, mb) : rest) = do + processBranches sync progress src dest (B h mb : rest) = do ifM @(ExceptT SyncEphemeral.Error m) (lift $ Codebase1.branchExists dest h) (processBranches sync progress src dest rest) ( ifM (lift $ Codebase1.branchExists src h) ( let h2 = CausalHash . Cv.hash1to2 $ Causal.unRawHash h - in lift (flip runReaderT srcConn (Q.loadCausalHashIdByCausalHash h2)) - >>= \case - Nothing -> Except.throwError $ SyncEphemeral.DisappearingBranch h2 - Just chId -> se . r $ Sync.sync sync progress [Sync22.C chId] + in do + lift (flip runReaderT srcConn (Q.loadCausalHashIdByCausalHash h2)) + >>= \case + Nothing -> Except.throwError $ SyncEphemeral.DisappearingBranch h2 + Just chId -> se . r $ Sync.sync sync progress [Sync22.C chId] + processBranches sync progress src dest rest ) ( lift mb >>= \b -> do - let deps = causalDeps b - if (null deps) + let (branchDeps, BD.to' -> BD.Dependencies' es ts ds) = BD.fromBranch b + if null branchDeps && null es && null ts && null ds then lift $ Codebase1.putBranch dest b - else processBranches @m sync progress src dest (deps ++ (h, mb) : rest) + else let + bs = map (uncurry B) branchDeps + os = map O $ es <> ts <> ds + in processBranches @m sync progress src dest (os ++ bs ++ B h mb : rest) ) ) + processBranches sync progress src dest (O h : rest) = do + (runExceptT $ flip runReaderT srcConn (Q.expectHashIdByHash (Cv.hash1to2 h) >>= Q.expectObjectIdForAnyHashId)) >>= \case + Left e -> error $ show e + Right oId -> do + se . r $ Sync.sync sync progress [Sync22.O oId] + processBranches sync progress src dest rest sync <- se . r $ Sync22.sync22 let progress' = Sync.transformProgress (lift . lift) progress newRootHash = Branch.headHash newRoot newRootHash2 = Cv.causalHash1to2 newRootHash - processBranches sync progress' src dest [(newRootHash, pure newRoot)] + processBranches sync progress' src dest [B newRootHash (pure newRoot)] -- set the root namespace flip runReaderT destConn $ do - chId <- (Q.loadCausalHashIdByCausalHash newRootHash2) >>= \case - Nothing -> Except.throwError $ SyncEphemeral.DisappearingBranch newRootHash2 - Just chId -> pure chId + chId <- + (Q.loadCausalHashIdByCausalHash newRootHash2) >>= \case + Nothing -> Except.throwError $ SyncEphemeral.DisappearingBranch newRootHash2 + Just chId -> pure chId Q.setNamespaceRoot chId lift closeSrc lift closeDest @@ -770,6 +765,10 @@ runDB conn = (runExceptT >=> err) . flip runReaderT conn where err = \case Left err -> error $ show err; Right a -> pure a +data Entity m + = B Branch.Hash (m (Branch m)) + | O Hash + data SyncProgressState = SyncProgressState { needEntities :: Maybe (Set Sync22.Entity), doneEntities :: Either Int (Set Sync22.Entity) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Branch/Dependencies.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Branch/Dependencies.hs new file mode 100644 index 0000000000..df4dc0cef3 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Branch/Dependencies.hs @@ -0,0 +1,86 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE RecordWildCards #-} + +module Unison.Codebase.SqliteCodebase.Branch.Dependencies where + +import Data.Foldable (toList) +import Data.Map (Map) +import qualified Data.Map as Map +import Data.Monoid.Generic (GenericMonoid (..), GenericSemigroup (..)) +import Data.Set (Set) +import qualified Data.Set as Set +import GHC.Generics (Generic) +import Unison.Codebase.Branch (Branch (Branch), Branch0, EditHash) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Causal as Causal +import Unison.Codebase.Patch (Patch) +import Unison.NameSegment (NameSegment) +import Unison.Reference (Reference, pattern Derived) +import Unison.Referent (Referent) +import qualified Unison.Referent as Referent +import qualified Unison.Util.Relation as R +import qualified Unison.Util.Star3 as Star3 +import Unison.Hash (Hash) + +type Branches m = [(Branch.Hash, m (Branch m))] + +data Dependencies = Dependencies + { patches :: Set EditHash + , terms :: Set Hash + , decls :: Set Hash + } + deriving Show + deriving Generic + deriving Semigroup via GenericSemigroup Dependencies + deriving Monoid via GenericMonoid Dependencies + +data Dependencies' = Dependencies' + { patches' :: [EditHash] + , terms' :: [Hash] + , decls' :: [Hash] + } + deriving (Eq, Show) + deriving Generic + deriving Semigroup via GenericSemigroup Dependencies' + deriving Monoid via GenericMonoid Dependencies' + + +to' :: Dependencies -> Dependencies' +to' Dependencies{..} = Dependencies' (toList patches) (toList terms) (toList decls) + +fromBranch :: Applicative m => Branch m -> (Branches m, Dependencies) +fromBranch (Branch c) = case c of + Causal.One _hh e -> fromBranch0 e + Causal.Cons _hh e (h, m) -> fromBranch0 e <> fromTails (Map.singleton h m) + Causal.Merge _hh e tails -> fromBranch0 e <> fromTails tails + where + fromTails m = ([(h, Branch <$> mc) | (h, mc) <- Map.toList m], mempty) + +fromBranch0 :: Applicative m => Branch0 m -> (Branches m, Dependencies) +fromBranch0 b = + ( fromChildren (Branch._children b) + , fromTermsStar (Branch._terms b) + <> fromTypesStar (Branch._types b) + <> fromEdits (Branch._edits b) ) + where + fromChildren :: Applicative m => Map NameSegment (Branch m) -> Branches m + fromChildren m = [ (Branch.headHash b, pure b) | b <- toList m ] + references :: Branch.Star r NameSegment -> [r] + references = toList . R.dom . Star3.d1 + mdValues :: Branch.Star r NameSegment -> [Reference] + mdValues = fmap snd . toList . R.ran . Star3.d3 + fromTermsStar :: Branch.Star Referent NameSegment -> Dependencies + fromTermsStar s = Dependencies mempty terms decls where + terms = Set.fromList $ + [ h | Referent.Ref (Derived h _ _) <- references s] ++ + [ h | (Derived h _ _) <- mdValues s] + decls = Set.fromList $ + [ h | Referent.Con (Derived h _i _n) _ _ <- references s ] + fromTypesStar :: Branch.Star Reference NameSegment -> Dependencies + fromTypesStar s = Dependencies mempty terms decls where + terms = Set.fromList [ h | (Derived h _ _) <- mdValues s ] + decls = Set.fromList [ h | (Derived h _ _) <- references s ] + fromEdits :: Map NameSegment (EditHash, m Patch) -> Dependencies + fromEdits m = Dependencies (Set.fromList . fmap fst $ toList m) mempty mempty diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs index 06ac018a82..1f57d23502 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs @@ -61,29 +61,33 @@ viewRemoteBranch' :: forall m. MonadIO m viewRemoteBranch' (repo, sbh, path) = do -- set up the cache dir remotePath <- time "Git fetch" $ pullBranch repo - (closeCodebase, codebase) <- lift (FC.sqliteCodebase remotePath) >>= - Validation.valueOr (\_missingSchema -> throwError $ GitError.CouldntOpenCodebase repo remotePath) . fmap pure - -- try to load the requested branch from it - branch <- time "Git fetch (sbh)" $ case sbh of - -- load the root branch - Nothing -> lift (Codebase.getRootBranch codebase) >>= \case - Left Codebase.NoRootBranch -> pure Branch.empty - Left (Codebase.CouldntLoadRootBranch h) -> - throwError $ GitError.CouldntLoadRootBranch repo h - Left (Codebase.CouldntParseRootBranch s) -> - throwError $ GitError.CouldntParseRootBranch repo s - Right b -> pure b - -- load from a specific `ShortBranchHash` - Just sbh -> do - branchCompletions <- lift $ Codebase.branchHashesByPrefix codebase sbh - case toList branchCompletions of - [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh - [h] -> (lift $ Codebase.getBranchForHash codebase h) >>= \case - Just b -> pure b - Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh - _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions - lift closeCodebase - pure (Branch.getAt' path branch, remotePath) + ifM (FC.codebaseExists remotePath) + (do + (closeCodebase, codebase) <- lift (FC.sqliteCodebase remotePath) >>= + Validation.valueOr (\_missingSchema -> throwError $ GitError.CouldntOpenCodebase repo remotePath) . fmap pure + -- try to load the requested branch from it + branch <- time "Git fetch (sbh)" $ case sbh of + -- load the root branch + Nothing -> lift (Codebase.getRootBranch codebase) >>= \case + Left Codebase.NoRootBranch -> pure Branch.empty + Left (Codebase.CouldntLoadRootBranch h) -> + throwError $ GitError.CouldntLoadRootBranch repo h + Left (Codebase.CouldntParseRootBranch s) -> + throwError $ GitError.CouldntParseRootBranch repo s + Right b -> pure b + -- load from a specific `ShortBranchHash` + Just sbh -> do + branchCompletions <- lift $ Codebase.branchHashesByPrefix codebase sbh + case toList branchCompletions of + [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + [h] -> (lift $ Codebase.getBranchForHash codebase h) >>= \case + Just b -> pure b + Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions + lift closeCodebase + pure (Branch.getAt' path branch, remotePath)) + -- else there's no initialized codebase at this repo; we pretend there's an empty one. + (pure (Branch.empty, remotePath)) -- Given a branch that is "after" the existing root of a given git repo, -- stage and push the branch (as the new root) + dependencies to the repo. diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 3825eea148..5302de4591 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -59,7 +59,6 @@ library Unison.Codecs Unison.Codebase Unison.Codebase.Branch - Unison.Codebase.Branch.Dependencies Unison.Codebase.BranchDiff Unison.Codebase.BranchUtil Unison.Codebase.Causal @@ -82,6 +81,7 @@ library Unison.Codebase.Editor.UriParser Unison.Codebase.Editor.VersionParser Unison.Codebase.FileCodebase + Unison.Codebase.FileCodebase.Branch.Dependencies Unison.Codebase.FileCodebase.Common Unison.Codebase.FileCodebase.Git Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex @@ -97,6 +97,7 @@ library Unison.Codebase.Serialization.V1 Unison.Codebase.ShortBranchHash Unison.Codebase.SqliteCodebase + Unison.Codebase.SqliteCodebase.Branch.Dependencies Unison.Codebase.SqliteCodebase.Conversions Unison.Codebase.SqliteCodebase.Git Unison.Codebase.SqliteCodebase.SyncEphemeral From d422299998fb4a2b701a996e1ab46e2bee7b1b5d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 17 Mar 2021 13:46:44 -0400 Subject: [PATCH 126/225] remember to sync causal_parent table --- .../codebase-sqlite/U/Codebase/Sqlite/Sync22.hs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index e9537292fc..60710ae7bb 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -127,19 +127,21 @@ trySync tCache hCache oCache _gc = \case unBranchHashId bhId mayBoId' <- join <$> traverse (isSyncedObject) mayBoId - findMissingParents chId >>= \case - [] -> + findParents chId >>= \case + Right parents -> -- if branch object is present at src and dest, -- or absent from both src and dest -- then we are done if isJust mayBoId == isJust mayBoId' then do runDest $ Q.saveCausal chId' bhId' + parents' <- traverse syncCausalHash parents + runDest $ Q.saveCausalParents chId' parents' pure Sync.Done else -- else it's present at src but not at dest., -- so request it be copied, and revisit later pure $ Missing [O $ fromJust mayBoId] - missingParents -> + Left missingParents -> -- if branch object is present at src and dest, -- or absent from both src and dest -- but there are parents missing, @@ -363,9 +365,12 @@ trySync tCache hCache oCache _gc = \case syncBranchHashId :: BranchHashId -> m BranchHashId syncBranchHashId = fmap BranchHashId . syncHashLiteral . unBranchHashId - findMissingParents :: CausalHashId -> m [Entity] - findMissingParents chId = do - runSrc (Q.loadCausalParents chId) >>= filterM isMissing <&> fmap C + -- returns Left if parents are missing + findParents :: CausalHashId -> m (Either [Entity] [CausalHashId]) + findParents chId = do + srcParents <- runSrc (Q.loadCausalParents chId) + missingSrcParents <- map C <$> filterM isMissing srcParents + pure if null missingSrcParents then Right srcParents else Left missingSrcParents where isMissing p = syncCausalHash p From 6dd1e4bedd84b1597278bb7ecf755f2026e5e7fe Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 17 Mar 2021 13:47:37 -0400 Subject: [PATCH 127/225] sync progress formatting --- .../src/Unison/Codebase/SqliteCodebase.hs | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index cf6f8d1f59..ebdbf1adb4 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -793,7 +793,7 @@ syncProgress = Sync.Progress need done allDone then pure () else State.put $ SyncProgressState (Just $ Set.insert h need) (Right done) SyncProgressState _ _ -> undefined - State.get >>= liftIO . putStr . ("\r"<>) . renderState + State.get >>= liftIO . putStrLn . (\s -> "Synced " <> s <> " entities.") . renderState done h = do State.get >>= \case @@ -802,16 +802,14 @@ syncProgress = Sync.Progress need done allDone SyncProgressState (Just need) (Right done) -> State.put $ SyncProgressState (Just $ Set.delete h need) (Right $ Set.insert h done) SyncProgressState _ _ -> undefined - State.get >>= liftIO . putStrLn . renderState + State.get >>= liftIO . putStrLn . (\s -> "Synced " <> s <> " entities.") . renderState + + allDone = + liftIO . putStrLn . (\s -> "Done syncing " <> s <> " entities.") . renderState =<< State.get - allDone = liftIO $ putStrLn "\rSync complete." renderState = \case - SyncProgressState Nothing (Left doneCount) -> - "Synced " ++ show doneCount ++ " entities" - SyncProgressState (Just need) (Right done) -> - "Synced " ++ show (Set.size done) ++ "/" ++ show (Set.size done + Set.size need) ++ " entities" - SyncProgressState Nothing Right {} -> - "invalid SyncProgressState Nothing Right{}" - SyncProgressState Just {} Left {} -> - "invalid SyncProgressState Just{} Left{}" \ No newline at end of file + SyncProgressState Nothing (Left doneCount) -> show doneCount + SyncProgressState (Just need) (Right done) -> show (Set.size done) ++ "/" ++ show (Set.size done + Set.size need) + SyncProgressState Nothing Right {} -> "(invalid SyncProgressState Nothing Right{})" + SyncProgressState Just {} Left {} -> "(invalid SyncProgressState Just{} Left{})" \ No newline at end of file From 244d4636d6c69a30051cf7ea00d684f22b78b989 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 17 Mar 2021 21:47:05 -0400 Subject: [PATCH 128/225] something is missing if it is `not` present --- .../U/Codebase/Sqlite/Queries.hs | 16 +++++--- .../U/Codebase/Sqlite/Sync22.hs | 13 ++++--- .../src/Unison/Codebase/SqliteCodebase.hs | 38 ++++++++----------- 3 files changed, 34 insertions(+), 33 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index e93d76a15c..fef334219c 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -64,8 +64,11 @@ type Err m = (MonadError Integrity m, HasCallStack) debugQuery :: Bool debugQuery = False +alwaysTraceOnCrash :: Bool +alwaysTraceOnCrash = True + crashOnError :: Bool -crashOnError = True +crashOnError = False throwError :: Err m => Integrity -> m c throwError = if crashOnError then error . show else Except.throwError @@ -264,7 +267,7 @@ loadPrimaryHashByObjectId oId = queryAtom sql (Only oId) >>= orError (UnknownObj objectAndPrimaryHashByAnyHash :: EDB m => Base32Hex -> m (Maybe (Base32Hex, ObjectId)) objectAndPrimaryHashByAnyHash h = runMaybeT do hashId <- MaybeT $ loadHashId h -- hash may not exist - oId <- MaybeT $ maybeObjectIdForAnyHashId hashId -- hash may not correspond to object + oId <- MaybeT $ maybeObjectIdForAnyHashId hashId -- hash may not correspond to any object base32 <- loadPrimaryHashByObjectId oId pure (base32, oId) @@ -615,11 +618,11 @@ query_ q = do queryTrace :: (MonadUnliftIO m, Show q, Show a) => String -> SQLite.Query -> q -> m a -> m a queryTrace title query input m = - if debugQuery + if debugQuery || alwaysTraceOnCrash then try @_ @SQLite.SQLError m >>= \case Right a -> do - traceM $ title ++ " " ++ show query ++ "\n input: " ++ show input ++ "\n output: " ++ show a + when debugQuery . traceM $ title ++ " " ++ show query ++ "\n input: " ++ show input ++ "\n output: " ++ show a pure a Left e -> do traceM $ title ++ " " ++ show query ++ "\n input: " ++ show input ++ "\n(and crashed)\n" @@ -628,11 +631,11 @@ queryTrace title query input m = queryTrace_ :: (MonadUnliftIO m, Show a) => String -> SQLite.Query -> m a -> m a queryTrace_ title query m = - if debugQuery + if debugQuery || alwaysTraceOnCrash then try @_ @SQLite.SQLError m >>= \case Right a -> do - traceM $ title ++ " " ++ show query ++ "\n output: " ++ show a + when debugQuery . traceM $ title ++ " " ++ show query ++ "\n output: " ++ show a pure a Left e -> do traceM $ title ++ " " ++ show query ++ "\n(and crashed)\n" @@ -668,3 +671,4 @@ instance ToField WatchKind where toField = \case WatchKind.RegularWatch -> SQLite.SQLInteger 0 WatchKind.TestWatch -> SQLite.SQLInteger 1 + diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index 60710ae7bb..095529e260 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -72,7 +72,8 @@ data Error | DecodeError DecodeError ByteString ErrString | -- | hashes corresponding to a single object in source codebase -- correspond to multiple objects in destination codebase - HashObjectCorrespondence ObjectId [HashId] [ObjectId] + HashObjectCorrespondence ObjectId [HashId] [HashId] [ObjectId] + | SourceDbNotExist deriving (Show) data Env = Env @@ -374,7 +375,7 @@ trySync tCache hCache oCache _gc = \case where isMissing p = syncCausalHash p - >>= runDest . Q.isCausalHash . unCausalHashId + >>= runDest . fmap not . Q.isCausalHash . unCausalHashId syncSecondaryHashes oId oId' = runSrc (Q.hashIdWithVersionForObject oId) >>= traverse_ (go oId') @@ -386,13 +387,15 @@ trySync tCache hCache oCache _gc = \case isSyncedObject :: ObjectId -> m (Maybe ObjectId) isSyncedObject = Cache.applyDefined oCache \oId -> do hIds <- toList <$> runSrc (Q.hashIdsForObject oId) + hIds' <- traverse syncHashLiteral hIds ( nubOrd . catMaybes - <$> traverse (runDest . Q.maybeObjectIdForAnyHashId) hIds + <$> traverse (runDest . Q.maybeObjectIdForAnyHashId) hIds' ) >>= \case - [oId'] -> pure $ Just oId' + [oId'] -> do + pure $ Just oId' [] -> pure $ Nothing - oIds' -> throwError (HashObjectCorrespondence oId hIds oIds') + oIds' -> throwError (HashObjectCorrespondence oId hIds hIds' oIds') runSrc, runDest :: diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index ebdbf1adb4..707231b7a8 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -56,10 +56,10 @@ import Unison.Codebase (CodebasePath) import qualified Unison.Codebase as Codebase1 import Unison.Codebase.Branch (Branch (..)) import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.SqliteCodebase.Branch.Dependencies as BD import qualified Unison.Codebase.Causal as Causal import qualified Unison.Codebase.Reflog as Reflog import Unison.Codebase.ShortBranchHash (ShortBranchHash) +import qualified Unison.Codebase.SqliteCodebase.Branch.Dependencies as BD import qualified Unison.Codebase.SqliteCodebase.Conversions as Cv import qualified Unison.Codebase.SqliteCodebase.SyncEphemeral as SyncEphemeral import Unison.Codebase.SyncMode (SyncMode) @@ -656,43 +656,38 @@ sqliteCodebase root = do Codebase1.Codebase m v a -> Codebase1.Codebase m v a -> [Entity m] -> - ExceptT SyncEphemeral.Error m () + ExceptT Sync22.Error m () processBranches _ _ _ _ [] = pure () processBranches sync progress src dest (B h mb : rest) = do - ifM @(ExceptT SyncEphemeral.Error m) + ifM @(ExceptT Sync22.Error m) (lift $ Codebase1.branchExists dest h) (processBranches sync progress src dest rest) - ( ifM - (lift $ Codebase1.branchExists src h) - ( let h2 = CausalHash . Cv.hash1to2 $ Causal.unRawHash h - in do - lift (flip runReaderT srcConn (Q.loadCausalHashIdByCausalHash h2)) - >>= \case - Nothing -> Except.throwError $ SyncEphemeral.DisappearingBranch h2 - Just chId -> se . r $ Sync.sync sync progress [Sync22.C chId] - processBranches sync progress src dest rest - ) - ( lift mb >>= \b -> do + ( do + let h2 = CausalHash . Cv.hash1to2 $ Causal.unRawHash h + lift (flip runReaderT srcConn (Q.loadCausalHashIdByCausalHash h2)) >>= \case + Just chId -> do + r $ Sync.sync sync progress [Sync22.C chId] + processBranches sync progress src dest rest + Nothing -> lift mb >>= \b -> do let (branchDeps, BD.to' -> BD.Dependencies' es ts ds) = BD.fromBranch b if null branchDeps && null es && null ts && null ds then lift $ Codebase1.putBranch dest b - else let - bs = map (uncurry B) branchDeps - os = map O $ es <> ts <> ds - in processBranches @m sync progress src dest (os ++ bs ++ B h mb : rest) - ) + else + let bs = map (uncurry B) branchDeps + os = map O (es <> ts <> ds) + in processBranches @m sync progress src dest (os ++ bs ++ B h mb : rest) ) processBranches sync progress src dest (O h : rest) = do (runExceptT $ flip runReaderT srcConn (Q.expectHashIdByHash (Cv.hash1to2 h) >>= Q.expectObjectIdForAnyHashId)) >>= \case Left e -> error $ show e Right oId -> do - se . r $ Sync.sync sync progress [Sync22.O oId] + r $ Sync.sync sync progress [Sync22.O oId] processBranches sync progress src dest rest sync <- se . r $ Sync22.sync22 let progress' = Sync.transformProgress (lift . lift) progress newRootHash = Branch.headHash newRoot newRootHash2 = Cv.causalHash1to2 newRootHash - processBranches sync progress' src dest [B newRootHash (pure newRoot)] + se $ processBranches sync progress' src dest [B newRootHash (pure newRoot)] -- set the root namespace flip runReaderT destConn $ do chId <- @@ -807,7 +802,6 @@ syncProgress = Sync.Progress need done allDone allDone = liftIO . putStrLn . (\s -> "Done syncing " <> s <> " entities.") . renderState =<< State.get - renderState = \case SyncProgressState Nothing (Left doneCount) -> show doneCount SyncProgressState (Just need) (Right done) -> show (Set.size done) ++ "/" ++ show (Set.size done + Set.size need) From 8571d75892f4ca4210321f695c50be8ede7ac1d4 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 18 Mar 2021 14:43:04 -0400 Subject: [PATCH 129/225] namespace sync wasn't syncing children right --- .../U/Codebase/Sqlite/Operations.hs | 2 +- .../U/Codebase/Sqlite/Sync22.hs | 121 ++++++------ codebase2/codebase-sqlite/sql/create.sql | 5 +- parser-typechecker/tests/Suite.hs | 3 +- .../tests/Unison/Test/GitSimple.hs | 179 ++++++++++++------ questions.md | 10 +- .../transcripts/GitSimple.one-term.output.md | 71 +++++++ .../transcripts/GitSimple.one-term2.output.md | 71 +++++++ .../transcripts/GitSimple.one-type.output.md | 72 +++++++ 9 files changed, 411 insertions(+), 123 deletions(-) create mode 100644 unison-src/transcripts/GitSimple.one-term.output.md create mode 100644 unison-src/transcripts/GitSimple.one-term2.output.md create mode 100644 unison-src/transcripts/GitSimple.one-type.output.md diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 019372c2b8..9dee69727a 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -893,7 +893,7 @@ type BranchSavingMonad m = StateT BranchSavingState (WriterT BranchSavingWriter saveRootBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) saveRootBranch c = do - when debug $ traceM "saveRootBranch" + when debug $ traceM $ "saveRootBranch " ++ show (C.causalHash c) (boId, chId) <- saveBranch c Q.setNamespaceRoot chId pure (boId, chId) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index 095529e260..07772bb03d 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -9,7 +9,6 @@ module U.Codebase.Sqlite.Sync22 where -import Control.Monad (filterM, join) import Control.Monad.Except (ExceptT, MonadError (throwError)) import qualified Control.Monad.Except as Except import Control.Monad.Extra (ifM) @@ -27,7 +26,7 @@ import Data.Bytes.Put (putWord8, runPutS) import Data.Foldable (for_, toList, traverse_) import Data.Functor ((<&>)) import Data.List.Extra (nubOrd) -import Data.Maybe (catMaybes, fromJust, fromMaybe, isJust) +import Data.Maybe (catMaybes, fromMaybe) import Data.Set (Set) import qualified Data.Set as Set import Data.Word (Word8) @@ -43,7 +42,7 @@ import qualified U.Codebase.Sqlite.Reference as Sqlite import qualified U.Codebase.Sqlite.Reference as Sqlite.Reference import qualified U.Codebase.Sqlite.Referent as Sqlite.Referent import qualified U.Codebase.Sqlite.Serialization as S -import U.Codebase.Sync (Sync (Sync), TrySyncResult (Missing)) +import U.Codebase.Sync (Sync (Sync), TrySyncResult) import qualified U.Codebase.Sync as Sync import qualified U.Codebase.WatchKind as WK import U.Util.Cache (Cache) @@ -95,8 +94,9 @@ sync22 = do tCache <- Cache.semispaceCache size hCache <- Cache.semispaceCache size oCache <- Cache.semispaceCache size + cCache <- Cache.semispaceCache size gc <- runSrc $ Q.getNurseryGeneration - pure $ Sync (trySync tCache hCache oCache (succ gc)) + pure $ Sync (trySync tCache hCache oCache cCache (succ gc)) trySync :: forall m. @@ -104,54 +104,51 @@ trySync :: Cache m TextId TextId -> Cache m HashId HashId -> Cache m ObjectId ObjectId -> + Cache m CausalHashId CausalHashId -> Generation -> Entity -> m (TrySyncResult Entity) -trySync tCache hCache oCache _gc = \case +trySync t h o c _gc e = do + -- traceM $ "trySync " ++ show e ++ "..." + result <- trySync' t h o c _gc e + -- traceM $ "trySync " ++ show e ++ " = " ++ show result + pure result + +trySync' :: + forall m. + (MonadIO m, MonadError Error m, MonadReader Env m) => + Cache m TextId TextId -> + Cache m HashId HashId -> + Cache m ObjectId ObjectId -> + Cache m CausalHashId CausalHashId -> + Generation -> + Entity -> + m (TrySyncResult Entity) +trySync' tCache hCache oCache cCache _gc e = case e of -- for causals, we need to get the value_hash_id of the thingo -- - maybe enqueue their parents -- - enqueue the self_ and value_ hashes -- - enqueue the namespace object, if present - C chId -> do - chId' <- syncCausalHash chId - -- we're going to assume that if the dest has this in its - -- causal table, then it's safe to short-circuit - ifM - (runDest $ Q.isCausalHash $ unCausalHashId chId') - (pure Sync.PreviouslyDone) - ( do + C chId -> + isSyncedCausal chId >>= \case + Just {} -> pure Sync.PreviouslyDone + Nothing -> do + result <- runValidateT @(Set Entity) @m @() do bhId <- runSrc $ Q.loadCausalValueHashId chId - bhId' <- syncBranchHashId bhId - - mayBoId <- - runSrc . Q.maybeObjectIdForAnyHashId $ - unBranchHashId bhId - mayBoId' <- join <$> traverse (isSyncedObject) mayBoId - - findParents chId >>= \case - Right parents -> - -- if branch object is present at src and dest, - -- or absent from both src and dest - -- then we are done - if isJust mayBoId == isJust mayBoId' - then do - runDest $ Q.saveCausal chId' bhId' - parents' <- traverse syncCausalHash parents - runDest $ Q.saveCausalParents chId' parents' - pure Sync.Done - else -- else it's present at src but not at dest., - -- so request it be copied, and revisit later - pure $ Missing [O $ fromJust mayBoId] - Left missingParents -> - -- if branch object is present at src and dest, - -- or absent from both src and dest - -- but there are parents missing, - -- then request the parents be copied, and revisit later - if isJust mayBoId == isJust mayBoId' - then pure $ Missing missingParents - else -- otherwise request the branch and the parents be copied - pure $ Missing $ (O $ fromJust mayBoId) : missingParents - ) + mayBoId <- runSrc . Q.maybeObjectIdForAnyHashId $ unBranchHashId bhId + traverse_ syncLocalObjectId mayBoId + + parents' :: [CausalHashId] <- findParents' chId + bhId' <- lift $ syncBranchHashId bhId + chId' <- lift $ syncCausalHashId chId + runDest do + Q.saveCausal chId' bhId' + Q.saveCausalParents chId' parents' + + case result of + Left deps -> pure . Sync.Missing $ toList deps + Right () -> pure Sync.Done + -- objects are the hairiest. obviously, if they -- exist, we're done; otherwise we do some fancy stuff O oId -> @@ -311,6 +308,12 @@ trySync tCache hCache oCache _gc = \case syncBranchObjectId :: BranchObjectId -> ValidateT (Set Entity) m BranchObjectId syncBranchObjectId = fmap BranchObjectId . syncLocalObjectId . unBranchObjectId + syncCausal :: CausalHashId -> ValidateT (Set Entity) m CausalHashId + syncCausal chId = + lift (isSyncedCausal chId) >>= \case + Just chId' -> pure chId' + Nothing -> Validate.refute . Set.singleton $ C chId + syncLocalIds :: L.LocalIds -> ValidateT (Set Entity) m L.LocalIds syncLocalIds (L.LocalIds tIds oIds) = do oIds' <- traverse syncLocalObjectId oIds @@ -328,7 +331,7 @@ trySync tCache hCache oCache _gc = \case syncBranchLocalIds (BL.LocalIds tIds oIds poIds chboIds) = do oIds' <- traverse syncLocalObjectId oIds poIds' <- traverse (fmap PatchObjectId . syncLocalObjectId . unPatchObjectId) poIds - chboIds' <- traverse (bitraverse syncBranchObjectId (lift . syncCausalHash)) chboIds + chboIds' <- traverse (bitraverse syncBranchObjectId syncCausal) chboIds tIds' <- lift $ traverse syncTextLiteral tIds pure $ BL.LocalIds tIds' oIds' poIds' chboIds' @@ -360,22 +363,17 @@ trySync tCache hCache oCache _gc = \case syncHashReference :: Sqlite.ReferenceH -> m Sqlite.ReferenceH syncHashReference = bitraverse syncTextLiteral syncHashLiteral - syncCausalHash :: CausalHashId -> m CausalHashId - syncCausalHash = fmap CausalHashId . syncHashLiteral . unCausalHashId + syncCausalHashId :: CausalHashId -> m CausalHashId + syncCausalHashId = fmap CausalHashId . syncHashLiteral . unCausalHashId syncBranchHashId :: BranchHashId -> m BranchHashId syncBranchHashId = fmap BranchHashId . syncHashLiteral . unBranchHashId - -- returns Left if parents are missing - findParents :: CausalHashId -> m (Either [Entity] [CausalHashId]) - findParents chId = do - srcParents <- runSrc (Q.loadCausalParents chId) - missingSrcParents <- map C <$> filterM isMissing srcParents - pure if null missingSrcParents then Right srcParents else Left missingSrcParents - where - isMissing p = - syncCausalHash p - >>= runDest . fmap not . Q.isCausalHash . unCausalHashId + findParents' :: CausalHashId -> ValidateT (Set Entity) m [CausalHashId] + findParents' chId = do + srcParents <- runSrc $ Q.loadCausalParents chId + traverse syncCausal srcParents + syncSecondaryHashes oId oId' = runSrc (Q.hashIdWithVersionForObject oId) >>= traverse_ (go oId') @@ -397,6 +395,15 @@ trySync tCache hCache oCache _gc = \case [] -> pure $ Nothing oIds' -> throwError (HashObjectCorrespondence oId hIds hIds' oIds') + isSyncedCausal :: CausalHashId -> m (Maybe CausalHashId) + isSyncedCausal = Cache.applyDefined cCache \chId -> do + let hId = unCausalHashId chId + hId' <- syncHashLiteral hId + ifM + (runDest $ Q.isCausalHash hId') + (pure . Just $ CausalHashId hId') + (pure Nothing) + runSrc, runDest :: (MonadError Error m, MonadReader Env m) => diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 04bd5da104..f625546d85 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -56,7 +56,7 @@ CREATE INDEX object_type_id ON object(type_id); -- to not lose their identities. CREATE TABLE causal ( self_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), - value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), + value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk2 REFERENCES hash(id), gc_generation INTEGER NOT NULL ); CREATE INDEX causal_value_hash_id ON causal(value_hash_id); @@ -69,10 +69,9 @@ CREATE TABLE namespace_root ( ); CREATE TABLE causal_parent ( - id INTEGER PRIMARY KEY NOT NULL, causal_id INTEGER NOT NULL CONSTRAINT causal_parent_fk1 REFERENCES causal(self_hash_id), parent_id INTEGER NOT NULL CONSTRAINT causal_parent_fk2 REFERENCES causal(self_hash_id), - UNIQUE(causal_id, parent_id) + UNIQUE (causal_id, parent_id) ); CREATE INDEX causal_parent_causal_id ON causal_parent(causal_id); CREATE INDEX causal_parent_parent_id ON causal_parent(parent_id); diff --git a/parser-typechecker/tests/Suite.hs b/parser-typechecker/tests/Suite.hs index e62823ec67..3a984f1b40 100644 --- a/parser-typechecker/tests/Suite.hs +++ b/parser-typechecker/tests/Suite.hs @@ -1,3 +1,4 @@ +{-# OPTIONS_GHC -Wno-unused-imports #-} {-# OPTIONS_GHC -fno-warn-incomplete-patterns #-} module Main where @@ -67,7 +68,7 @@ test rt = tests , Typechecker.test , UriParser.test , Context.test - , Git.test + -- , Git.test , GitSimple.test , TestIO.test , Name.test diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs index 20ebdaa500..bf1c7247ed 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -19,66 +19,135 @@ import Unison.Parser (Ann) import Unison.Prelude import Unison.Symbol (Symbol) -test :: Test () -test = scope "git-simple" . tests $ [testPull] - --- [ testPull --- , testPush --- , syncComplete --- , syncTestResults --- ] - traceTranscriptOutput :: Bool traceTranscriptOutput = False -authorTranscript :: (Semigroup a1, IsString a1, Show a2, Typeable a2, Typeable a1) => a2 -> a1 -authorTranscript repo = - [iTrim| -```ucm:hide -.builtin> alias.type ##Nat Nat -.builtin> alias.term ##Nat.+ Nat.+ +test :: Test () +test = scope "git-simple" . tests $ [ + + pushPullTest "one-term" +-- simplest-author + (\repo -> [iTrim| +```unison +c = 3 +``` +```ucm +.> debug.file +.> add +.> push ${repo} +``` +|]) +-- simplest-user + (\repo -> [iTrim| +```ucm +.> pull ${repo} +.> alias.term ##Nat.+ + ``` ```unison -unique type outside.A = A Nat -unique type outside.B = B Nat Nat -outside.c = 3 -outside.d = 4 - -unique type inside.X = X outside.A -inside.y = c + c +> #msp7bv40rv + 1 +``` +|]) + , + pushPullTest "one-term2" +-- simplest-author + (\repo -> [iTrim| +```unison +c = 3 ``` ```ucm -.myLib> debug.file +.> debug.file .myLib> add .myLib> push ${repo} ``` -|] - -userTranscript :: (Semigroup a1, IsString a1, Show a2, Typeable a2, Typeable a1) => a2 -> a1 -userTranscript repo = - [iTrim| -```ucm:hide -.builtin> alias.type ##Nat Nat -.builtin> alias.term ##Nat.+ Nat.+ +|]) +-- simplest-user + (\repo -> [iTrim| +```ucm +.yourLib> pull ${repo} +``` +```unison +> c +``` +|]) + , + pushPullTest "one-type" +-- simplest-author + (\repo -> [iTrim| +```unison +type Foo = Foo ``` ```ucm -.yourLib> pull ${repo}:.inside +.myLib> debug.file +.myLib> add +.myLib> push ${repo} +``` +|]) +-- simplest-user + (\repo -> [iTrim| +```ucm +.yourLib> pull ${repo} ``` ```unison -> y + #msp7bv40rv + 1 +> Foo.Foo ``` -|] - --- goal of this test is to make sure that pull doesn't grab a ton of unneeded --- dependencies -testPull :: Test () -testPull = scope "pull" $ do - -- let's push a broader set of stuff, pull a narrower one (to a fresh codebase) - -- and verify that we have the definitions we expected and don't have some of - -- the ones we didn't expect. - +|]) +-- , + +-- pushPullTest "regular" +-- (\repo -> [iTrim| +-- ```ucm:hide +-- .builtin> alias.type ##Nat Nat +-- .builtin> alias.term ##Nat.+ Nat.+ +-- ``` +-- ```unison +-- unique type outside.A = A Nat +-- unique type outside.B = B Nat Nat +-- outside.c = 3 +-- outside.d = 4 + +-- unique type inside.X = X outside.A +-- inside.y = c + c +-- ``` +-- ```ucm +-- .myLib> debug.file +-- .myLib> add +-- .myLib> push ${repo} +-- ```|]) + +-- (\repo -> [iTrim| +-- ```ucm:hide +-- .builtin> alias.type ##Nat Nat +-- .builtin> alias.term ##Nat.+ Nat.+ +-- ``` +-- ```ucm +-- .yourLib> pull ${repo}:.inside +-- ``` +-- ```unison +-- > y + #msp7bv40rv + 1 +-- ``` +-- |]) + + ] + + +-- type inside.X#skinr6rvg7 +-- type outside.A#l2fmn9sdbk +-- type outside.B#nsgsq4ot5u +-- inside.y#omqnfettvj +-- outside.c#msp7bv40rv +-- outside.d#52addbrohu +-- .myLib> #6l0nd3i15e +-- .myLib.inside> #5regvciils +-- .myLib.inside.X> #kvcjrmgki6 +-- .myLib.outside> #uq1mkkhlf1 +-- .myLib.outside.A> #0e3g041m56 +-- .myLib.outside.B> #j57m94daqi + + +pushPullTest :: String -> (FilePath -> String) -> (FilePath -> String) -> Test () +pushPullTest name authorScript userScript = scope name $ do -- put all our junk into here - tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-pull" + tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory ("git-simple-" ++ name) -- initialize author and user codebases (_authorDir, closeAuthor, authorCodebase) <- io $ initCodebase tmp "author" @@ -89,32 +158,26 @@ testPull = scope "pull" $ do io $ "git" ["init", "--bare", Text.pack repo] -- run author/push transcript - authorOutput <- runTranscript tmp authorCodebase (authorTranscript repo) + authorOutput <- runTranscript tmp authorCodebase (authorScript repo) - -- -- check out the resulting repo so we can inspect it - -- io $ "git" ["clone", Text.pack repo, Text.pack $ tmp "repo" ] + -- check out the resulting repo so we can inspect it + io $ "git" ["clone", Text.pack repo, Text.pack $ tmp "repo" ] -- run user/pull transcript - userOutput <- runTranscript tmp userCodebase (userTranscript repo) + userOutput <- runTranscript tmp userCodebase (userScript repo) io do closeAuthor closeUser writeFile - "unison-src/transcripts/GitSimple.hs.output.md" + ("unison-src""transcripts"("GitSimple." ++ name ++ ".output.md")) (authorOutput <> "\n-------\n" <> userOutput) - -- -- inspect user codebase - -- scope "user-should-have" $ - -- for userShouldHave $ \path -> - -- scope (makeTitle path) $ io (doesFileExist $ userDir path) >>= expect - -- scope "user-should-not-have" $ -- this definitely won't pass with current implementation - -- for userShouldNotHave $ \path -> - -- scope (makeTitle path) $ io (doesFileExist $ userDir path) >>= expect . not - - -- if we haven't crashed, clean up! + -- if we haven't crashed, clean up! + removeDirectoryRecursive repo removeDirectoryRecursive tmp + ok -- initialize a fresh codebase initCodebaseDir :: FilePath -> String -> IO CodebasePath diff --git a/questions.md b/questions.md index 517078492b..671e3d1dad 100644 --- a/questions.md +++ b/questions.md @@ -11,11 +11,15 @@ next steps: - [x] Writing a branch - [x] `SqliteCodebase.Conversions.causalbranch1to2` - [x] `SqliteCodebase.putRootBranch` -- [ ] Syncing a remote codebase - - [ ] `SqliteCodebase.syncFromDirectory` - - [ ] `SqliteCodebase.syncToDirectory` +- [x] Syncing a remote codebase + - [x] `SqliteCodebase.syncFromDirectory` + - [x] `SqliteCodebase.syncToDirectory` + - [ ] do I need to initialize a sqlite codebase in the destination? - [ ] Managing external edit events? - [ ] `SqliteCodebase.rootBranchUpdates` Is there some Sqlite function for detecting external changes? + - https://www.sqlite.org/pragma.html#pragma_data_version + - https://user-images.githubusercontent.com/538571/111105100-8f0b4a80-8528-11eb-95f6-12bb906f315e.png +- [ ] consider using `causal` table to detect if a causal exists, instead of causal_parent? what even are these: diff --git a/unison-src/transcripts/GitSimple.one-term.output.md b/unison-src/transcripts/GitSimple.one-term.output.md new file mode 100644 index 0000000000..e4f06af5eb --- /dev/null +++ b/unison-src/transcripts/GitSimple.one-term.output.md @@ -0,0 +1,71 @@ +```unison +c = 3 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + c : ##Nat + +``` +```ucm +.> debug.file + + c#msp7bv40rv + +.> add + + ⍟ I've added these definitions: + + c : ##Nat + +.> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-9778218d89a0fc21/repo.git + + Done. + +``` + +------- +```ucm +.> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-9778218d89a0fc21/repo.git + + Here's what's changed in the current namespace after the + merge: + + Added definitions: + + 1. c : ##Nat + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +.> alias.term ##Nat.+ + + + Done. + +``` +```unison +> #msp7bv40rv + 1 +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | > #msp7bv40rv + 1 + ⧩ + 4 + +``` diff --git a/unison-src/transcripts/GitSimple.one-term2.output.md b/unison-src/transcripts/GitSimple.one-term2.output.md new file mode 100644 index 0000000000..d608d66e77 --- /dev/null +++ b/unison-src/transcripts/GitSimple.one-term2.output.md @@ -0,0 +1,71 @@ +```unison +c = 3 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + c : ##Nat + +``` +```ucm +.> debug.file + + c#msp7bv40rv + + ☝️ The namespace .myLib is empty. + +.myLib> add + + ⍟ I've added these definitions: + + c : ##Nat + +.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term2-6e1967b10dc504d9/repo.git + + Done. + +``` + +------- +```ucm + ☝️ The namespace .yourLib is empty. + +.yourLib> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term2-6e1967b10dc504d9/repo.git + + Here's what's changed in the current namespace after the + merge: + + Added definitions: + + 1. c : ##Nat + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +``` +```unison +> c +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | > c + ⧩ + 3 + +``` diff --git a/unison-src/transcripts/GitSimple.one-type.output.md b/unison-src/transcripts/GitSimple.one-type.output.md new file mode 100644 index 0000000000..c13084a8d0 --- /dev/null +++ b/unison-src/transcripts/GitSimple.one-type.output.md @@ -0,0 +1,72 @@ +```unison +type Foo = Foo +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + type Foo + +``` +```ucm + ☝️ The namespace .myLib is empty. + +.myLib> debug.file + + type Foo#568rsi7o3g + +.myLib> add + + ⍟ I've added these definitions: + + type Foo + +.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-type-cc52d9c301e40738/repo.git + + Done. + +``` + +------- +```ucm + ☝️ The namespace .yourLib is empty. + +.yourLib> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-type-cc52d9c301e40738/repo.git + + Here's what's changed in the current namespace after the + merge: + + Added definitions: + + 1. type Foo + 2. Foo.Foo : () + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +``` +```unison +> Foo.Foo +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | > Foo.Foo + ⧩ + () + +``` From 9af62780f120019cd3290dcd22860dc6453c2e19 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 18 Mar 2021 21:50:20 -0400 Subject: [PATCH 130/225] debug guards & unused import --- .../U/Codebase/Sqlite/Sync22.hs | 20 ++++++++++-- .../src/Unison/Codebase/SqliteCodebase.hs | 32 ++++++++++++------- .../tests/Unison/Test/GitSimple.hs | 6 ++-- .../transcripts/GitSimple.one-term.output.md | 4 +-- 4 files changed, 43 insertions(+), 19 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index 07772bb03d..0afb13059a 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -9,6 +9,7 @@ module U.Codebase.Sqlite.Sync22 where +import Control.Monad (when) import Control.Monad.Except (ExceptT, MonadError (throwError)) import qualified Control.Monad.Except as Except import Control.Monad.Extra (ifM) @@ -31,6 +32,7 @@ import Data.Set (Set) import qualified Data.Set as Set import Data.Word (Word8) import Database.SQLite.Simple (Connection) +import Debug.Trace (traceM) import qualified U.Codebase.Reference as Reference import qualified U.Codebase.Sqlite.Branch.Format as BL import U.Codebase.Sqlite.DbId @@ -82,6 +84,9 @@ data Env = Env idCacheSize :: Word } +debug :: Bool +debug = False + -- data Mappings sync22 :: ( MonadIO m, @@ -173,7 +178,9 @@ trySync' tCache hCache oCache cCache _gc e = case e of Left s -> throwError $ DecodeError ErrTermComponent bytes s -- iterate through the local ids looking for missing deps; -- then either enqueue the missing deps, or proceed to move the object + when debug $ traceM $ "LocalIds for Source " ++ show oId ++ ": " ++ show localIds localIds' <- traverse syncLocalIds localIds + when debug $ traceM $ "LocalIds for Dest: " ++ show localIds' -- reassemble and save the reindexed term let bytes' = runPutS $ @@ -192,6 +199,8 @@ trySync' tCache hCache oCache cCache _gc e = case e of Left s -> throwError $ DecodeError ErrWatchResult blob s tIds' <- traverse syncTextLiteral tIds hIds' <- traverse syncHashLiteral hIds + when debug $ traceM $ "LocalIds for Source watch result " ++ show refH ++ ": " ++ show (tIds, hIds) + when debug $ traceM $ "LocalIds for Dest watch result " ++ show refH' ++ ": " ++ show (tIds', hIds') let blob' = runPutS (S.recomposeWatchResult (L.LocalIds tIds' hIds', termBytes)) runDest (Q.saveWatch wk refH' blob') -- sync dependencies index @@ -296,6 +305,7 @@ trySync' tCache hCache oCache cCache _gc e = case e of Left deps -> pure . Sync.Missing $ toList deps Right oId' -> do syncSecondaryHashes oId oId' + when debug $ traceM $ "Source " ++ show (hId, oId) ++ " becomes Dest " ++ show (hId', oId') Cache.insert oCache oId oId' pure Sync.Done where @@ -342,12 +352,16 @@ trySync' tCache hCache oCache cCache _gc e = case e of syncTextLiteral :: TextId -> m TextId syncTextLiteral = Cache.apply tCache \tId -> do t <- runSrc $ Q.loadTextById tId - runDest $ Q.saveText t + tId' <- runDest $ Q.saveText t + when debug $ traceM $ "Source " ++ show tId ++ " is Dest " ++ show tId' ++ " (" ++ show t ++ ")" + pure tId' syncHashLiteral :: HashId -> m HashId syncHashLiteral = Cache.apply hCache \hId -> do b32hex <- runSrc $ Q.loadHashById hId - runDest $ Q.saveHash b32hex + hId' <- runDest $ Q.saveHash b32hex + when debug $ traceM $ "Source " ++ show hId ++ " is Dest " ++ show hId' ++ " (" ++ show b32hex ++ ")" + pure hId' isSyncedObjectReference :: Sqlite.Reference -> m (Maybe Sqlite.Reference) isSyncedObjectReference = \case @@ -374,7 +388,6 @@ trySync' tCache hCache oCache cCache _gc e = case e of srcParents <- runSrc $ Q.loadCausalParents chId traverse syncCausal srcParents - syncSecondaryHashes oId oId' = runSrc (Q.hashIdWithVersionForObject oId) >>= traverse_ (go oId') where @@ -391,6 +404,7 @@ trySync' tCache hCache oCache cCache _gc e = case e of ) >>= \case [oId'] -> do + when debug $ traceM $ "Source " ++ show oId ++ " is Dest " ++ show oId' pure $ Just oId' [] -> pure $ Nothing oIds' -> throwError (HashObjectCorrespondence oId hIds hIds' oIds') diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 707231b7a8..71fa7640d6 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -10,7 +10,7 @@ module Unison.Codebase.SqliteCodebase where import qualified Control.Concurrent import qualified Control.Exception -import Control.Monad (filterM, (>=>)) +import Control.Monad (filterM, when, (>=>)) import Control.Monad.Except (ExceptT, runExceptT) import qualified Control.Monad.Except as Except import Control.Monad.Extra (ifM, unlessM) @@ -88,8 +88,9 @@ import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, doesDirec import qualified UnliftIO.Environment as SysEnv import UnliftIO.STM -debug :: Bool +debug, debugProcessBranches :: Bool debug = False +debugProcessBranches = False codebasePath :: FilePath codebasePath = ".unison" "v2" "unison.sqlite3" @@ -659,25 +660,34 @@ sqliteCodebase root = do ExceptT Sync22.Error m () processBranches _ _ _ _ [] = pure () processBranches sync progress src dest (B h mb : rest) = do + when debugProcessBranches $ traceM $ "processBranches B " ++ take 10 (show h) ifM @(ExceptT Sync22.Error m) (lift $ Codebase1.branchExists dest h) - (processBranches sync progress src dest rest) ( do + when debugProcessBranches $ traceM " already exists in dest db" + processBranches sync progress src dest rest + ) + ( do + when debugProcessBranches $ traceM " doesn't exist in dest db" let h2 = CausalHash . Cv.hash1to2 $ Causal.unRawHash h lift (flip runReaderT srcConn (Q.loadCausalHashIdByCausalHash h2)) >>= \case Just chId -> do + when debugProcessBranches $ traceM $ " exists in source db, so delegating to direct sync" r $ Sync.sync sync progress [Sync22.C chId] processBranches sync progress src dest rest - Nothing -> lift mb >>= \b -> do - let (branchDeps, BD.to' -> BD.Dependencies' es ts ds) = BD.fromBranch b - if null branchDeps && null es && null ts && null ds - then lift $ Codebase1.putBranch dest b - else - let bs = map (uncurry B) branchDeps - os = map O (es <> ts <> ds) - in processBranches @m sync progress src dest (os ++ bs ++ B h mb : rest) + Nothing -> + lift mb >>= \b -> do + when debugProcessBranches $ traceM $ " doesn't exist in either db, so delegating to Codebase.putBranch" + let (branchDeps, BD.to' -> BD.Dependencies' es ts ds) = BD.fromBranch b + if null branchDeps && null es && null ts && null ds + then lift $ Codebase1.putBranch dest b + else + let bs = map (uncurry B) branchDeps + os = map O (es <> ts <> ds) + in processBranches @m sync progress src dest (os ++ bs ++ B h mb : rest) ) processBranches sync progress src dest (O h : rest) = do + when debugProcessBranches $ traceM $ "processBranches O " ++ take 10 (show h) (runExceptT $ flip runReaderT srcConn (Q.expectHashIdByHash (Cv.hash1to2 h) >>= Q.expectObjectIdForAnyHashId)) >>= \case Left e -> error $ show e Right oId -> do diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs index bf1c7247ed..e445b9839d 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -19,8 +19,8 @@ import Unison.Parser (Ann) import Unison.Prelude import Unison.Symbol (Symbol) -traceTranscriptOutput :: Bool -traceTranscriptOutput = False +writeTranscriptOutput :: Bool +writeTranscriptOutput = False test :: Test () test = scope "git-simple" . tests $ [ @@ -170,7 +170,7 @@ pushPullTest name authorScript userScript = scope name $ do closeAuthor closeUser - writeFile + when writeTranscriptOutput $ writeFile ("unison-src""transcripts"("GitSimple." ++ name ++ ".output.md")) (authorOutput <> "\n-------\n" <> userOutput) diff --git a/unison-src/transcripts/GitSimple.one-term.output.md b/unison-src/transcripts/GitSimple.one-term.output.md index e4f06af5eb..a7606f1fcc 100644 --- a/unison-src/transcripts/GitSimple.one-term.output.md +++ b/unison-src/transcripts/GitSimple.one-term.output.md @@ -24,7 +24,7 @@ c = 3 c : ##Nat -.> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-9778218d89a0fc21/repo.git +.> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-6e0da114c2313a44/repo.git Done. @@ -32,7 +32,7 @@ c = 3 ------- ```ucm -.> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-9778218d89a0fc21/repo.git +.> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-6e0da114c2313a44/repo.git Here's what's changed in the current namespace after the merge: From 2b77981d70489e1824bc5815765810ac96403b44 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 19 Mar 2021 03:46:30 -0400 Subject: [PATCH 131/225] wip v1 > v2 conversion --- .../src/Unison/Codebase/Conversion/Sync12.hs | 272 ++++++++++++++++++ .../Conversion/Sync12BranchDependencies.hs | 88 ++++++ .../src/Unison/Codebase/SqliteCodebase.hs | 14 +- .../tests/Unison/Test/GitSimple.hs | 1 - .../unison-parser-typechecker.cabal | 3 + 5 files changed, 374 insertions(+), 4 deletions(-) create mode 100644 parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs create mode 100644 parser-typechecker/src/Unison/Codebase/Conversion/Sync12BranchDependencies.hs diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs new file mode 100644 index 0000000000..17fd4bc56d --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -0,0 +1,272 @@ +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TemplateHaskell #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE ViewPatterns #-} + +module Unison.Codebase.Conversion.Sync12 where + +import Control.Lens +import Control.Monad.Except (MonadError, runExceptT) +import qualified Control.Monad.Except as Except +import Control.Monad.RWS (MonadRWS) +import Control.Monad.Reader +import qualified Control.Monad.Reader as Reader +import Control.Monad.State (MonadState) +import qualified Control.Monad.State as State +import Control.Monad.Trans.Except (ExceptT) +import Control.Monad.Validate (MonadValidate, ValidateT, runValidateT) +import qualified Control.Monad.Validate as Validate +import Control.Monad.Writer +import Data.Bifoldable (bitraverse_) +import Data.Foldable (traverse_) +import qualified Data.Foldable as Foldable +import Data.Map (Map) +import qualified Data.Map as Map +import Data.Sequence (Seq) +import qualified Data.Set as Set +import Data.Traversable (for) +import Database.SQLite.Simple (Connection) +import U.Codebase.Sqlite.DbId (Generation) +import qualified U.Codebase.Sqlite.Queries as Q +import U.Codebase.Sync (Sync (Sync), TrySyncResult) +import qualified U.Codebase.Sync as Sync +import Unison.Codebase (Codebase) +import qualified Unison.Codebase as Codebase +import Unison.Codebase.Branch (Branch) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Conversion.Sync12BranchDependencies as BD +import Unison.DataDeclaration (DataDeclaration, Decl) +import qualified Unison.DataDeclaration as DD +import Unison.Hash (Hash) +import qualified Unison.LabeledDependency as LD +import Unison.Prelude (Set, Word64, ifM, (<&>)) +import qualified Unison.Reference as Reference +import Unison.Symbol (Symbol) +import Unison.Term (Term) +import qualified Unison.Term as Term +import Unison.Type (Type) +import qualified Unison.Type as Type + +data Env m = Env + { srcCodebase :: Codebase m Symbol (), + destCodebase :: Codebase m Symbol (), + destConnection :: Connection + } + +data Entity m + = C Branch.Hash (m (Branch m)) + | T Hash Reference.Size + | D Hash Reference.Size + | P Branch.EditHash + +data Entity' + = C' Branch.Hash + | T' Hash + | D' Hash + | P' Branch.EditHash + deriving (Eq, Ord, Show) + +toEntity' :: Entity m -> Entity' +toEntity' = \case + C h _ -> C' h + T h _ -> T' h + D h _ -> D' h + P h -> P' h + +instance Eq (Entity m) where x == y = toEntity' x == toEntity' y + +instance Ord (Entity m) where compare x y = compare (toEntity' x) (toEntity' y) + +data BranchStatus + = BranchOk + | BranchReplaced Branch.Hash + +data TermStatus + = TermOk + | TermMissing + | TermMissingType + | TermMissingDependencies + +data DeclStatus + = DeclOk + | DeclMissing + | DeclMissingDependencies + +data PatchStatus + = PatchOk + | PatchMissing + | PatchMissingDependencies + +data Status = Status + { _branchStatus :: Map Branch.Hash BranchStatus, + _termStatus :: Map Hash TermStatus, + _declStatus :: Map Hash DeclStatus, + _patchStatus :: Map Branch.EditHash PatchStatus + } + +makeLenses ''Status + +instance Show (Entity m) where + show = \case + C h _ -> "C " ++ show h + T h len -> "T " ++ show h ++ " " ++ show len + D h len -> "D " ++ show h ++ " " ++ show len + P h -> "P " ++ show h + +sync12 :: + (MonadIO m, MonadRWS (Env m) () Status m) => + m (Sync m (Entity m)) +sync12 = do + gc <- runDest' $ Q.getNurseryGeneration + pure $ Sync (trySync (succ gc)) + +trySync :: + forall m. + MonadRWS (Env m) () Status m => + Generation -> + Entity m -> + m (TrySyncResult (Entity m)) +trySync _gc e = do + Env src dest _ <- Reader.ask + case e of + C h mb -> + isSyncedCausal h >>= \case + True -> pure Sync.PreviouslyDone + False -> do + result <- runValidateT @(Set (Entity m)) @m @() do + b <- lift mb + (h', b') <- repair b + setBranchStatus h h' + lift $ Codebase.putBranch dest b' + case result of + Left deps -> pure . Sync.Missing $ Foldable.toList deps + Right () -> pure Sync.Done + T h n -> + getTermStatus h >>= \case + Just {} -> pure Sync.PreviouslyDone + Nothing -> do + result <- runValidateT do + runExceptT (checkTermComponent h n) >>= \case + Left status -> setTermStatus h status + Right component -> do + Foldable.for_ (zip component [0 ..]) \((term, typ), i) -> + lift $ Codebase.putTerm dest (Reference.Id h i n) term typ + setTermStatus h TermOk + case result of + Left deps -> pure . Sync.Missing $ Foldable.toList deps + Right () -> pure Sync.Done + D h n -> + getDeclStatus h >>= \case + Just {} -> pure Sync.PreviouslyDone + Nothing -> do + result <- runValidateT do + runExceptT (checkDeclComponent h n) >>= \case + Left status -> setDeclStatus h status + Right component -> do + Foldable.for_ (zip component [0 ..]) \(decl, i) -> + lift $ Codebase.putTypeDeclaration dest (Reference.Id h i n) decl + setDeclStatus h DeclOk + case result of + Left deps -> pure . Sync.Missing $ Foldable.toList deps + Right () -> pure Sync.Done + P h -> + getPatchStatus h >>= \case + Just {} -> pure Sync.PreviouslyDone + Nothing -> do + result <- runValidateT do + runExceptT (checkPatch h) >>= \case + Left status -> setPatchStatus h status + Right patch -> do + lift $ Codebase.putPatch dest h patch + setPatchStatus h patchOk + case result of + Left deps -> pure . Sync.Missing $ foldable.toList deps + Right () -> pure Sync.Done + where + isSyncedCausal :: Branch.Hash -> m Bool + isSyncedCausal = undefined + getTermStatus h = use (termStatus . at h) + getDeclStatus h = use (declStatus . at h) + setTermStatus h s = termStatus . at h .= Just s + setDeclStatus h s = declStatus . at h .= Just s + setBranchStatus :: forall m. MonadState Status m => Branch.Hash -> Branch.Hash -> m () + setBranchStatus h h' = + if h == h' + then branchStatus . at h .= Just BranchOk + else branchStatus . at h .= Just (BranchReplaced h') + + checkTermComponent :: + forall m. + (MonadState Status m, MonadReader (Env m) m) => + Hash -> + Reference.Size -> + ExceptT TermStatus (ValidateT (Set (Entity m)) m) [(Term Symbol (), Type Symbol ())] + checkTermComponent h n = do + Env src _ _ <- Reader.ask + for [Reference.Id h i n | i <- [0 .. n -1]] \r -> do + term <- lift . lift $ Codebase.getTerm src r + typ <- lift . lift $ Codebase.getTypeOfTermImpl src r + case (term, typ) of + (Just term, Just typ) -> do + let termDeps = Term.labeledDependencies term + typeDeps = Type.dependencies typ + let checkDecl = \case + Reference.Builtin {} -> pure () + Reference.DerivedId (Reference.Id h' _ n') -> + getDeclStatus h' >>= \case + Just DeclOk -> pure () + Just _ -> Except.throwError TermMissingDependencies + Nothing -> Validate.dispute . Set.singleton $ D h' n' + checkTerm = \case + Reference.Builtin {} -> pure () + Reference.DerivedId (Reference.Id h' _ n') -> + getTermStatus h' >>= \case + Just TermOk -> pure () + Just _ -> Except.throwError TermMissingDependencies + Nothing -> Validate.dispute . Set.singleton $ T h' n' + traverse_ (bitraverse_ checkDecl checkTerm . LD.toReference) termDeps + traverse_ checkDecl typeDeps + pure (term, typ) + (Nothing, _) -> Except.throwError TermMissing + (_, Nothing) -> Except.throwError TermMissingType + + checkDeclComponent :: + forall m. + (MonadState Status m, MonadReader (Env m) m) => + Hash -> + Reference.Size -> + ExceptT DeclStatus (ValidateT (Set (Entity m)) m) [Decl Symbol ()] + checkDeclComponent h n = do + Env src _ _ <- Reader.ask + for [Reference.Id h i n | i <- [0 .. n -1]] \r -> do + decl <- lift . lift $ Codebase.getTypeDeclaration src r + case decl of + Just decl -> do + let deps = DD.declDependencies decl + checkDecl = \case + Reference.Builtin {} -> pure () + Reference.DerivedId (Reference.Id h' _ n') -> + getDeclStatus h' >>= \case + Just DeclOk -> pure () + Just _ -> Except.throwError DeclMissingDependencies + Nothing -> Validate.dispute . Set.singleton $ D h' n' + traverse_ checkDecl deps + pure decl + Nothing -> Except.throwError DeclMissing + + +repair :: Branch m -> ValidateT (Set (Entity m)) m (Branch.Hash, Branch m) +repair = error "not implemented" + +runSrc, runDest :: MonadReader (Env m) m => (Codebase m Symbol () -> a) -> m a +runSrc = (Reader.reader srcCodebase <&>) +runDest = (Reader.reader destCodebase <&>) + +runDest' :: + (MonadReader (Env m) m) => + ReaderT Connection m a -> + m a +runDest' ma = Reader.reader destConnection >>= flip runDB ma + +runDB :: Connection -> ReaderT Connection m a -> m a +runDB conn action = Reader.runReaderT action conn diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12BranchDependencies.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12BranchDependencies.hs new file mode 100644 index 0000000000..6e1a3acb1c --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12BranchDependencies.hs @@ -0,0 +1,88 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE RecordWildCards #-} + +module Unison.Codebase.Conversion.Sync12BranchDependencies where + +import Data.Foldable (toList) +import Data.Map (Map) +import qualified Data.Map as Map +import Data.Monoid.Generic (GenericMonoid (..), GenericSemigroup (..)) +import Data.Set (Set) +import qualified Data.Set as Set +import GHC.Generics (Generic) +import Unison.Codebase.Branch (Branch (Branch), Branch0, EditHash) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Causal as Causal +import Unison.Codebase.Patch (Patch) +import Unison.NameSegment (NameSegment) +import Unison.Reference (Reference, pattern Derived) +import Unison.Referent (Referent) +import qualified Unison.Referent as Referent +import qualified Unison.Util.Relation as R +import qualified Unison.Util.Star3 as Star3 +import Unison.Hash (Hash) +import qualified Unison.Reference as Reference + +type Branches m = [(Branch.Hash, m (Branch m))] + +data Dependencies = Dependencies + { patches :: Set EditHash + , terms :: Map Hash Reference.Size + , decls :: Map Hash Reference.Size + } + deriving Show + deriving Generic + deriving Semigroup via GenericSemigroup Dependencies + deriving Monoid via GenericMonoid Dependencies + +data Dependencies' = Dependencies' + { patches' :: [EditHash] + , terms' :: [(Hash, Reference.Size)] + , decls' :: [(Hash, Reference.Size)] + } + deriving (Eq, Show) + deriving Generic + deriving Semigroup via GenericSemigroup Dependencies' + deriving Monoid via GenericMonoid Dependencies' + + +to' :: Dependencies -> Dependencies' +to' Dependencies{..} = + Dependencies' (toList patches) (Map.toList terms) (Map.toList decls) + +fromBranch :: Applicative m => Branch m -> (Branches m, Dependencies) +fromBranch (Branch c) = case c of + Causal.One _hh e -> fromBranch0 e + Causal.Cons _hh e (h, m) -> fromBranch0 e <> fromTails (Map.singleton h m) + Causal.Merge _hh e tails -> fromBranch0 e <> fromTails tails + where + fromTails m = ([(h, Branch <$> mc) | (h, mc) <- Map.toList m], mempty) + +fromBranch0 :: Applicative m => Branch0 m -> (Branches m, Dependencies) +fromBranch0 b = + ( fromChildren (Branch._children b) + , fromTermsStar (Branch._terms b) + <> fromTypesStar (Branch._types b) + <> fromEdits (Branch._edits b) ) + where + fromChildren :: Applicative m => Map NameSegment (Branch m) -> Branches m + fromChildren m = [ (Branch.headHash b, pure b) | b <- toList m ] + references :: Branch.Star r NameSegment -> [r] + references = toList . R.dom . Star3.d1 + mdValues :: Branch.Star r NameSegment -> [Reference] + mdValues = fmap snd . toList . R.ran . Star3.d3 + fromTermsStar :: Branch.Star Referent NameSegment -> Dependencies + fromTermsStar s = Dependencies mempty terms decls where + terms = Map.fromList $ + [ (h, n) | Referent.Ref (Derived h _ n) <- references s] ++ + [ (h, n) | (Derived h _ n) <- mdValues s] + decls = Map.fromList $ + [ (h, n) | Referent.Con (Derived h _i n) _ _ <- references s ] + fromTypesStar :: Branch.Star Reference NameSegment -> Dependencies + fromTypesStar s = Dependencies mempty terms decls where + terms = Map.fromList [ (h, n) | (Derived h _ n) <- mdValues s ] + decls = Map.fromList [ (h, n) | (Derived h _ n) <- references s ] + fromEdits :: Map NameSegment (EditHash, m Patch) -> Dependencies + fromEdits m = Dependencies (Set.fromList . fmap fst $ toList m) mempty mempty diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 71fa7640d6..375afc3aec 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -786,8 +786,15 @@ syncProgress :: MonadState SyncProgressState m => MonadIO m => Sync.Progress m S syncProgress = Sync.Progress need done allDone where maxTrackedHashCount = 1024 * 1024 + size :: SyncProgressState -> Int + size = \case + SyncProgressState Nothing (Left i) -> i + SyncProgressState (Just need) (Right done) -> Set.size need + Set.size done + SyncProgressState _ _ -> undefined + need, done :: (MonadState SyncProgressState m, MonadIO m) => Sync22.Entity -> m () need h = do + Monad.whenM (fmap (>0) $ State.gets size) $ liftIO $ putStr "\n" State.get >>= \case SyncProgressState Nothing Left {} -> pure () SyncProgressState (Just need) (Right done) -> @@ -798,19 +805,20 @@ syncProgress = Sync.Progress need done allDone then pure () else State.put $ SyncProgressState (Just $ Set.insert h need) (Right done) SyncProgressState _ _ -> undefined - State.get >>= liftIO . putStrLn . (\s -> "Synced " <> s <> " entities.") . renderState + State.get >>= liftIO . putStr . (\s -> "\rSynced " <> s <> " entities.") . renderState done h = do + Monad.whenM (fmap (>0) $ State.gets size) $ liftIO $ putStr "\n" State.get >>= \case SyncProgressState Nothing (Left count) -> State.put $ SyncProgressState Nothing (Left (count + 1)) SyncProgressState (Just need) (Right done) -> State.put $ SyncProgressState (Just $ Set.delete h need) (Right $ Set.insert h done) SyncProgressState _ _ -> undefined - State.get >>= liftIO . putStrLn . (\s -> "Synced " <> s <> " entities.") . renderState + State.get >>= liftIO . putStr . (\s -> "\rSynced " <> s <> " entities.") . renderState allDone = - liftIO . putStrLn . (\s -> "Done syncing " <> s <> " entities.") . renderState =<< State.get + State.get >>= liftIO . putStrLn . (\s -> "\rDone syncing " <> s <> " entities.") . renderState renderState = \case SyncProgressState Nothing (Left doneCount) -> show doneCount diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs index e445b9839d..562dce4e6b 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -6,7 +6,6 @@ module Unison.Test.GitSimple where import Control.Lens (view, _1) import Data.String.Here (iTrim) import qualified Data.Text as Text -import Data.Typeable (Typeable) import EasyTest import Shellmet () import System.Directory (removeDirectoryRecursive) diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 5302de4591..182d53e0aa 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -64,6 +64,8 @@ library Unison.Codebase.Causal Unison.Codebase.Classes Unison.Codebase.CodeLookup + Unison.Codebase.Conversion.Sync12 + Unison.Codebase.Conversion.Sync12BranchDependencies Unison.Codebase.Editor.AuthorInfo Unison.Codebase.Editor.Command Unison.Codebase.Editor.DisplayObject @@ -233,6 +235,7 @@ library memory, mmorph, monad-loops, + monad-validate, mtl, murmur-hash, mutable-containers, From eebd3b8b6a1cbd99e0aa21dc52e913088fb29c6d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 19 Mar 2021 14:36:32 -0400 Subject: [PATCH 132/225] wip v1 > v2 conversion --- codebase2/util/U/Util/Relation.hs | 8 ++ parser-typechecker/src/Unison/Codebase.hs | 5 ++ .../src/Unison/Codebase/Conversion/Sync12.hs | 75 ++++++++++++++++--- .../src/Unison/Codebase/FileCodebase.hs | 5 +- .../Unison/Codebase/FileCodebase/Common.hs | 14 +++- .../src/Unison/Codebase/SqliteCodebase.hs | 26 ++++++- .../Codebase/SqliteCodebase/Conversions.hs | 3 + unison-core/src/Unison/Reference.hs | 1 + unison-core/src/Unison/Util/Relation.hs | 10 ++- 9 files changed, 128 insertions(+), 19 deletions(-) diff --git a/codebase2/util/U/Util/Relation.hs b/codebase2/util/U/Util/Relation.hs index 0ef86b1122..57b00207c4 100644 --- a/codebase2/util/U/Util/Relation.hs +++ b/codebase2/util/U/Util/Relation.hs @@ -12,6 +12,7 @@ import Data.Set (Set) import Data.Map (Map) import Data.Maybe (fromMaybe, isJust) import Data.Foldable (Foldable(foldl')) +import qualified Control.Monad as Monad -- | -- This implementation avoids using @"Set (a,b)"@ because @@ -234,6 +235,9 @@ filterRan f r = r |> S.filter f (ran r) filter :: (Ord a, Ord b) => ((a, b) -> Bool) -> Relation a b -> Relation a b filter f = fromList . List.filter f . toList +filterM :: (Applicative m, Ord a, Ord b) => ((a, b) -> m Bool) -> Relation a b -> m (Relation a b) +filterM f = fmap fromList . Monad.filterM f . toList + -- | Restricts the relation to domain elements having multiple range elements filterManyDom :: (Ord a, Ord b) => Relation a b -> Relation a b filterManyDom r = filterDom (`manyDom` r) r @@ -477,6 +481,10 @@ bimap :: (Ord a, Ord b, Ord c, Ord d) => (a -> c) -> (b -> d) -> Relation a b -> Relation c d bimap f g = fromList . fmap (\(a,b) -> (f a, g b)) . toList +bitraverse :: (Applicative f, Ord a, Ord b, Ord c, Ord d) + => (a -> f c) -> (b -> f d) -> Relation a b -> f (Relation c d) +bitraverse f g = fmap fromList . traverse (\(a,b) -> (,) <$> f a <*> g b) . toList + instance (Ord a, Ord b) => Monoid (Relation a b) where mempty = empty mappend = (<>) diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs index bb132921de..a42832226a 100644 --- a/parser-typechecker/src/Unison/Codebase.hs +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -37,6 +37,7 @@ import Unison.Term (Term) import Unison.Type (Type) import Unison.Codebase.ShortBranchHash (ShortBranchHash) import Unison.ShortHash (ShortHash) +import Unison.Codebase.Patch (Patch) type DataDeclaration v a = DD.DataDeclaration v a type EffectDeclaration v a = DD.EffectDeclaration v a @@ -62,6 +63,10 @@ data Codebase m v a = , putBranch :: Branch m -> m () , branchExists :: Branch.Hash -> m Bool + , getPatch :: Branch.EditHash -> m (Maybe Patch) + , putPatch :: Branch.EditHash -> Patch -> m () + , patchExists :: Branch.EditHash -> m Bool + , dependentsImpl :: Reference -> m (Set Reference.Id) -- This copies all the dependencies of `b` from the specified -- FileCodebase into this Codebase, and sets our root branch to `b` diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 17fd4bc56d..14a374b77b 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -35,9 +35,11 @@ import qualified Unison.Codebase as Codebase import Unison.Codebase.Branch (Branch) import qualified Unison.Codebase.Branch as Branch import qualified Unison.Codebase.Conversion.Sync12BranchDependencies as BD +import Unison.Codebase.Patch (Patch (..)) import Unison.DataDeclaration (DataDeclaration, Decl) import qualified Unison.DataDeclaration as DD import Unison.Hash (Hash) +import qualified Unison.Hashable as H import qualified Unison.LabeledDependency as LD import Unison.Prelude (Set, Word64, ifM, (<&>)) import qualified Unison.Reference as Reference @@ -46,6 +48,9 @@ import Unison.Term (Term) import qualified Unison.Term as Term import Unison.Type (Type) import qualified Unison.Type as Type +import qualified Unison.Util.Relation as Relation +import qualified Unison.Codebase.TermEdit as TermEdit +import qualified Unison.Codebase.TypeEdit as TypeEdit data Env m = Env { srcCodebase :: Codebase m Symbol (), @@ -79,6 +84,7 @@ instance Ord (Entity m) where compare x y = compare (toEntity' x) (toEntity' y) data BranchStatus = BranchOk + | BranchMissing | BranchReplaced Branch.Hash data TermStatus @@ -95,7 +101,7 @@ data DeclStatus data PatchStatus = PatchOk | PatchMissing - | PatchMissingDependencies + | PatchReplaced Branch.EditHash data Status = Status { _branchStatus :: Map Branch.Hash BranchStatus, @@ -135,7 +141,7 @@ trySync _gc e = do False -> do result <- runValidateT @(Set (Entity m)) @m @() do b <- lift mb - (h', b') <- repair b + (h', b') <- repairBranch b setBranchStatus h h' lift $ Codebase.putBranch dest b' case result of @@ -176,19 +182,21 @@ trySync _gc e = do result <- runValidateT do runExceptT (checkPatch h) >>= \case Left status -> setPatchStatus h status - Right patch -> do + Right (h', patch) -> do lift $ Codebase.putPatch dest h patch - setPatchStatus h patchOk + setPatchStatus h PatchOk case result of - Left deps -> pure . Sync.Missing $ foldable.toList deps + Left deps -> pure . Sync.Missing $ Foldable.toList deps Right () -> pure Sync.Done where isSyncedCausal :: Branch.Hash -> m Bool isSyncedCausal = undefined getTermStatus h = use (termStatus . at h) getDeclStatus h = use (declStatus . at h) + getPatchStatus h = use (patchStatus . at h) setTermStatus h s = termStatus . at h .= Just s setDeclStatus h s = declStatus . at h .= Just s + setPatchStatus h s = patchStatus . at h .= Just s setBranchStatus :: forall m. MonadState Status m => Branch.Hash -> Branch.Hash -> m () setBranchStatus h h' = if h == h' @@ -200,13 +208,15 @@ trySync _gc e = do (MonadState Status m, MonadReader (Env m) m) => Hash -> Reference.Size -> - ExceptT TermStatus (ValidateT (Set (Entity m)) m) [(Term Symbol (), Type Symbol ())] + EVT TermStatus m [(Term Symbol (), Type Symbol ())] checkTermComponent h n = do Env src _ _ <- Reader.ask for [Reference.Id h i n | i <- [0 .. n -1]] \r -> do term <- lift . lift $ Codebase.getTerm src r typ <- lift . lift $ Codebase.getTypeOfTermImpl src r case (term, typ) of + (Nothing, _) -> Except.throwError TermMissing + (_, Nothing) -> Except.throwError TermMissingType (Just term, Just typ) -> do let termDeps = Term.labeledDependencies term typeDeps = Type.dependencies typ @@ -227,20 +237,19 @@ trySync _gc e = do traverse_ (bitraverse_ checkDecl checkTerm . LD.toReference) termDeps traverse_ checkDecl typeDeps pure (term, typ) - (Nothing, _) -> Except.throwError TermMissing - (_, Nothing) -> Except.throwError TermMissingType checkDeclComponent :: forall m. (MonadState Status m, MonadReader (Env m) m) => Hash -> Reference.Size -> - ExceptT DeclStatus (ValidateT (Set (Entity m)) m) [Decl Symbol ()] + EVT DeclStatus m [Decl Symbol ()] checkDeclComponent h n = do Env src _ _ <- Reader.ask for [Reference.Id h i n | i <- [0 .. n -1]] \r -> do decl <- lift . lift $ Codebase.getTypeDeclaration src r case decl of + Nothing -> Except.throwError DeclMissing Just decl -> do let deps = DD.declDependencies decl checkDecl = \case @@ -252,11 +261,53 @@ trySync _gc e = do Nothing -> Validate.dispute . Set.singleton $ D h' n' traverse_ checkDecl deps pure decl - Nothing -> Except.throwError DeclMissing + checkPatch :: + forall m. + (MonadState Status m, MonadReader (Env m) m) => + Branch.EditHash -> + EVT PatchStatus m (Branch.EditHash, Patch) + checkPatch h = do + Env src _ _ <- Reader.ask + (lift . lift $ Codebase.getPatch src h) >>= \case + Nothing -> Except.throwError PatchMissing + Just patch -> do + (h', patch) <- lift $ repairPatch patch + if h == h' + then setPatchStatus h PatchOk + else setPatchStatus h (PatchReplaced h') + pure (h', patch) + + + repairBranch :: Branch m -> ValidateT (Set (Entity m)) m (Branch.Hash, Branch m) + repairBranch = error "not implemented" + + repairPatch ::forall m. MonadState Status m => + Patch -> ValidateT (Set (Entity m)) m (Branch.EditHash, Patch) + repairPatch (Patch termEdits typeEdits) = do + termEdits' <- Relation.filterM (uncurry filterTermEdit) termEdits + typeEdits' <- Relation.filterM (uncurry filterTypeEdit) typeEdits + let patch = Patch termEdits' typeEdits' + pure (H.accumulate' patch, patch) + where + filterTermEdit _ = \case + TermEdit.Deprecate -> pure True + TermEdit.Replace (Reference.Builtin _) _ -> pure True + TermEdit.Replace (Reference.DerivedId (Reference.Id h _ n)) _ -> + getTermStatus h >>= \case + Nothing -> Validate.refute (Set.singleton $ T h n) + Just TermOk -> pure True + Just _ -> pure False + filterTypeEdit _ = \case + TypeEdit.Deprecate -> pure True + TypeEdit.Replace (Reference.Builtin _) -> pure True + TypeEdit.Replace (Reference.DerivedId (Reference.Id h _ n)) -> + getDeclStatus h >>= \case + Nothing -> Validate.refute (Set.singleton $ D h n) + Just DeclOk -> pure True + Just _ -> pure False -repair :: Branch m -> ValidateT (Set (Entity m)) m (Branch.Hash, Branch m) -repair = error "not implemented" +type EVT e m = ExceptT e (ValidateT (Set (Entity m)) m) runSrc, runDest :: MonadReader (Env m) m => (Codebase m Symbol () -> a) -> m a runSrc = (Reader.reader srcCodebase <&>) diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs index c7d13ebdd1..267d1ef5c3 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs @@ -96,7 +96,7 @@ import Unison.Codebase.FileCodebase.Common , typeReferencesByPrefix --- , failWith - , listDirectory + , listDirectory, getPatch, serializeEdits, patchExists ) import qualified Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex as Sync @@ -190,6 +190,9 @@ codebase1' syncToDirectory branchCache fmtV@(S.Format getV putV) fmtA@(S.Format (branchFromFiles branchCache path) (putBranch path) (hashExists path) + (getPatch path) + (\h p -> serializeEdits path h (pure p)) + (patchExists path) dependents (flip (syncToDirectory fmtV fmtA) path) (syncToDirectory fmtV fmtA path) diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs index feb8486b15..1007db27a0 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs @@ -44,6 +44,8 @@ module Unison.Codebase.FileCodebase.Common , serializeRawBranch , branchFromFiles , putBranch + , getPatch + , patchExists , branchHashesByPrefix , termReferencesByPrefix , termReferentsByPrefix @@ -310,6 +312,13 @@ deserializeEdits root h = Left err -> failWith $ InvalidEditsFile file err Right edits -> pure edits +getPatch :: MonadIO m => CodebasePath -> Branch.EditHash -> m (Maybe Patch) +getPatch root h = + let file = editsPath root h + in S.getFromFile' V1.getEdits file >>= \case + Left _err -> pure Nothing + Right edits -> pure (Just edits) + getRootBranch :: forall m. MonadIO m => Branch.Cache m -> CodebasePath -> m (Either Codebase.GetRootBranchError (Branch m)) getRootBranch cache root = time "FileCodebase.Common.getRootBranch" $ @@ -353,10 +362,13 @@ serializeRawBranch serializeRawBranch root h = S.putWithParentDirs (V1.putRawCausal V1.putRawBranch) (branchPath root h) +patchExists :: MonadIO m => CodebasePath -> Branch.EditHash -> m Bool +patchExists root h = doesFileExist (editsPath root h) + serializeEdits :: MonadIO m => CodebasePath -> Branch.EditHash -> m Patch -> m () serializeEdits root h medits = - unlessM (doesFileExist (editsPath root h)) $ do + unlessM (patchExists root h) $ do edits <- medits S.putWithParentDirs V1.putEdits (editsPath root h) edits diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 375afc3aec..64379e3aa0 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -19,7 +19,7 @@ import Control.Monad.Reader (ReaderT (runReaderT)) import Control.Monad.State (MonadState) import qualified Control.Monad.State as State import Control.Monad.Trans (MonadTrans (lift)) -import Control.Monad.Trans.Maybe (MaybeT) +import Control.Monad.Trans.Maybe (MaybeT (MaybeT)) import Data.Bifunctor (Bifunctor (first), second) import qualified Data.Either.Combinators as Either import Data.Foldable (Foldable (toList), traverse_) @@ -57,6 +57,7 @@ import qualified Unison.Codebase as Codebase1 import Unison.Codebase.Branch (Branch (..)) import qualified Unison.Codebase.Branch as Branch import qualified Unison.Codebase.Causal as Causal +import Unison.Codebase.Patch (Patch) import qualified Unison.Codebase.Reflog as Reflog import Unison.Codebase.ShortBranchHash (ShortBranchHash) import qualified Unison.Codebase.SqliteCodebase.Branch.Dependencies as BD @@ -68,7 +69,7 @@ import Unison.DataDeclaration (Decl) import qualified Unison.DataDeclaration as Decl import Unison.Hash (Hash) import Unison.Parser (Ann) -import Unison.Prelude (MaybeT (runMaybeT), fromMaybe, trace, traceM) +import Unison.Prelude (MaybeT (runMaybeT), fromMaybe, trace, traceM, isJust) import qualified Unison.PrettyTerminal as PT import Unison.Reference (Reference) import qualified Unison.Reference as Reference @@ -487,6 +488,20 @@ sqliteCodebase root = do Nothing -> pure False Just hId -> Q.isCausalHash hId + getPatch :: MonadIO m => Branch.EditHash -> m (Maybe Patch) + getPatch h = runDB conn . runMaybeT $ + MaybeT (Ops.primaryHashToMaybePatchObjectId (Cv.patchHash1to2 h)) + >>= Ops.loadPatchById + >>= Cv.patch2to1 getCycleLen + + putPatch :: MonadIO m => Branch.EditHash -> Patch -> m () + putPatch h p = runDB conn . void $ + Ops.savePatch (Cv.patchHash1to2 h) (Cv.patch1to2 p) + + patchExists :: MonadIO m => Branch.EditHash -> m Bool + patchExists h = runDB conn . fmap isJust $ + Ops.primaryHashToMaybePatchObjectId (Cv.patchHash1to2 h) + dependentsImpl :: MonadIO m => Reference -> m (Set Reference.Id) dependentsImpl r = runDB conn $ @@ -743,6 +758,9 @@ sqliteCodebase root = do getBranchForHash putBranch isCausalHash + getPatch + putPatch + patchExists dependentsImpl syncFromDirectory syncToDirectory @@ -794,7 +812,7 @@ syncProgress = Sync.Progress need done allDone need, done :: (MonadState SyncProgressState m, MonadIO m) => Sync22.Entity -> m () need h = do - Monad.whenM (fmap (>0) $ State.gets size) $ liftIO $ putStr "\n" + Monad.whenM (fmap (> 0) $ State.gets size) $ liftIO $ putStr "\n" State.get >>= \case SyncProgressState Nothing Left {} -> pure () SyncProgressState (Just need) (Right done) -> @@ -808,7 +826,7 @@ syncProgress = Sync.Progress need done allDone State.get >>= liftIO . putStr . (\s -> "\rSynced " <> s <> " entities.") . renderState done h = do - Monad.whenM (fmap (>0) $ State.gets size) $ liftIO $ putStr "\n" + Monad.whenM (fmap (> 0) $ State.gets size) $ liftIO $ putStr "\n" State.get >>= \case SyncProgressState Nothing (Left count) -> State.put $ SyncProgressState Nothing (Left (count + 1)) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index e5f8cc7364..cb5e8c972e 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -296,6 +296,9 @@ hash1to2 (V1.Hash bs) = V2.Hash.Hash (SBS.toShort bs) branchHash1to2 :: V1.Branch.Hash -> V2.CausalHash branchHash1to2 = V2.CausalHash . hash1to2 . V1.Causal.unRawHash +patchHash1to2 :: V1.Branch.EditHash -> V2.PatchHash +patchHash1to2 = V2.PatchHash . hash1to2 + reference2to1 :: Applicative m => (Hash -> m V1.Reference.Size) -> V2.Reference -> m V1.Reference reference2to1 lookupSize = \case V2.ReferenceBuiltin t -> pure $ V1.Reference.Builtin t diff --git a/unison-core/src/Unison/Reference.hs b/unison-core/src/Unison/Reference.hs index 34e090849b..7255d771ba 100644 --- a/unison-core/src/Unison/Reference.hs +++ b/unison-core/src/Unison/Reference.hs @@ -54,6 +54,7 @@ data Reference -- Using an ugly name so no one tempted to use this | DerivedId Id deriving (Eq,Ord,Generic) +pattern Derived :: H.Hash -> Pos -> Size -> Reference pattern Derived h i n = DerivedId (Id h i n) -- A good idea, but causes a weird problem with view patterns in PatternP.hs in ghc 8.4.3 diff --git a/unison-core/src/Unison/Util/Relation.hs b/unison-core/src/Unison/Util/Relation.hs index 48d2a3c91c..32786947fb 100644 --- a/unison-core/src/Unison/Util/Relation.hs +++ b/unison-core/src/Unison/Util/Relation.hs @@ -4,12 +4,13 @@ module Unison.Util.Relation where import Unison.Prelude hiding (empty, toList) import Prelude hiding ( null, map, filter ) -import Data.Bifunctor ( first, second ) +import Data.Bifunctor ( first, second, Bifunctor ) import qualified Data.List as List import qualified Data.Map as M import qualified Data.Set as S import qualified Data.Map as Map import qualified Unison.Hashable as H +import qualified Control.Monad as Monad -- | -- This implementation avoids using @"Set (a,b)"@ because @@ -232,6 +233,9 @@ filterRan f r = r |> S.filter f (ran r) filter :: (Ord a, Ord b) => ((a, b) -> Bool) -> Relation a b -> Relation a b filter f = fromList . List.filter f . toList +filterM :: (Applicative m, Ord a, Ord b) => ((a, b) -> m Bool) -> Relation a b -> m (Relation a b) +filterM f = fmap fromList . Monad.filterM f . toList + -- | Restricts the relation to domain elements having multiple range elements filterManyDom :: (Ord a, Ord b) => Relation a b -> Relation a b filterManyDom r = filterDom (`manyDom` r) r @@ -477,6 +481,10 @@ bimap :: (Ord a, Ord b, Ord c, Ord d) => (a -> c) -> (b -> d) -> Relation a b -> Relation c d bimap f g = fromList . fmap (\(a,b) -> (f a, g b)) . toList +bitraverse :: (Applicative f, Ord a, Ord b, Ord c, Ord d) + => (a -> f c) -> (b -> f d) -> Relation a b -> f (Relation c d) +bitraverse f g = fmap fromList . traverse (\(a,b) -> (,) <$> f a <*> g b) . toList + instance (Ord a, Ord b) => Monoid (Relation a b) where mempty = empty mappend = (<>) From a496c111075ef6ad9d01e29a60d90f9a2f0e2e50 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 19 Mar 2021 14:37:05 -0400 Subject: [PATCH 133/225] read gc generation from right codebase --- codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index 0afb13059a..754218dc06 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -100,7 +100,7 @@ sync22 = do hCache <- Cache.semispaceCache size oCache <- Cache.semispaceCache size cCache <- Cache.semispaceCache size - gc <- runSrc $ Q.getNurseryGeneration + gc <- runDest $ Q.getNurseryGeneration pure $ Sync (trySync tCache hCache oCache cCache (succ gc)) trySync :: @@ -129,7 +129,7 @@ trySync' :: Generation -> Entity -> m (TrySyncResult Entity) -trySync' tCache hCache oCache cCache _gc e = case e of +trySync' tCache hCache oCache cCache _gc = \case -- for causals, we need to get the value_hash_id of the thingo -- - maybe enqueue their parents -- - enqueue the self_ and value_ hashes From 9378ac958a42f32a11f462ae19a61aa68e9db899 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 20 Mar 2021 00:16:43 -0400 Subject: [PATCH 134/225] wip v1 > v2 conversion --- codebase2/codebase-sync/U/Codebase/Sync.hs | 28 +- .../src/Unison/Codebase/Causal.hs | 6 +- .../src/Unison/Codebase/Conversion/Sync12.hs | 269 +++++++++++------- .../src/Unison/Codebase/SqliteCodebase.hs | 76 +++-- .../unison-parser-typechecker.cabal | 1 + 5 files changed, 227 insertions(+), 153 deletions(-) diff --git a/codebase2/codebase-sync/U/Codebase/Sync.hs b/codebase2/codebase-sync/U/Codebase/Sync.hs index b5d43abaae..c0b829e27b 100644 --- a/codebase2/codebase-sync/U/Codebase/Sync.hs +++ b/codebase2/codebase-sync/U/Codebase/Sync.hs @@ -6,31 +6,9 @@ {-# LANGUAGE RankNTypes #-} module U.Codebase.Sync where --- localSyncFile --- (srcPath :: CodebasePath) --- (destPath :: CodebasePath) --- (root :: Maybe ShortBranchHash) --- (path :: UnisonPath) --- = error "todo" - --- localSyncSql --- (srcDb :: Connection) --- (destDb :: Connection) --- (root :: Maybe ShortBranchHash) --- (path :: UnisonPath) --- = error "todo" - --- data Reference t h = Builtin t | Derived h Pos --- -- |The 0-based index in a definition component/cycle --- newtype Pos = Pos { unPos :: Word64 } --- data RefId h = RefId h Pos --- data TermRef t h = TermRef (Reference t h) | TermCon (Reference t h) ConstructorId --- newtype ConstructorId = ConstructorId { unConstructorId :: Word64 } --- data TermRefId h = TermRefId (RefId h) | TermConId (RefId h) ConstructorId - import Data.Foldable (traverse_) -data TrySyncResult h = Missing [h] | Done | PreviouslyDone +data TrySyncResult h = Missing [h] | Done | PreviouslyDone | NonFatalError deriving Show data Sync m h = Sync { trySync :: h -> m (TrySyncResult h) } @@ -41,11 +19,12 @@ transformSync f (Sync t) = Sync (f . t) data Progress m h = Progress { need :: h -> m (), done :: h -> m (), + error :: h -> m (), allDone :: m () } transformProgress :: (forall a. m a -> n a) -> Progress m h -> Progress n h -transformProgress f (Progress a b c) = Progress (f . a) (f . b) (f c) +transformProgress f (Progress a b c d) = Progress (f . a) (f . b) (f . c) (f d) sync :: forall m h. Monad m => Sync m h -> Progress m h -> [h] -> m () sync Sync {..} Progress {..} roots = go roots @@ -56,4 +35,5 @@ sync Sync {..} Progress {..} roots = go roots Missing deps -> traverse_ need deps >> go (deps ++ h : hs) Done -> done h >> go hs PreviouslyDone -> go hs + NonFatalError -> error h >> go hs go [] = allDone \ No newline at end of file diff --git a/parser-typechecker/src/Unison/Codebase/Causal.hs b/parser-typechecker/src/Unison/Codebase/Causal.hs index 672990a606..8e7e33238f 100644 --- a/parser-typechecker/src/Unison/Codebase/Causal.hs +++ b/parser-typechecker/src/Unison/Codebase/Causal.hs @@ -295,8 +295,10 @@ one :: Hashable e => e -> Causal m h e one e = One (RawHash $ hash e) e cons :: (Applicative m, Hashable e) => e -> Causal m h e -> Causal m h e -cons e tl = - Cons (RawHash $ hash [hash e, unRawHash . currentHash $ tl]) e (currentHash tl, pure tl) +cons e tl = cons' e (currentHash tl) (pure tl) + +cons' :: Hashable e => e -> RawHash h -> m (Causal m h e) -> Causal m h e +cons' e ht mt = Cons (RawHash $ hash [hash e, unRawHash ht]) e (ht, mt) consDistinct :: (Applicative m, Eq e, Hashable e) => e -> Causal m h e -> Causal m h e consDistinct e tl = diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 14a374b77b..ff3affdb86 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -1,6 +1,9 @@ +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TemplateHaskell #-} {-# LANGUAGE TypeApplications #-} +{-# LANGUAGE TypeOperators #-} {-# LANGUAGE ViewPatterns #-} module Unison.Codebase.Conversion.Sync12 where @@ -17,6 +20,7 @@ import Control.Monad.Trans.Except (ExceptT) import Control.Monad.Validate (MonadValidate, ValidateT, runValidateT) import qualified Control.Monad.Validate as Validate import Control.Monad.Writer +import Control.Natural (type (~>)) import Data.Bifoldable (bitraverse_) import Data.Foldable (traverse_) import qualified Data.Foldable as Foldable @@ -32,10 +36,13 @@ import U.Codebase.Sync (Sync (Sync), TrySyncResult) import qualified U.Codebase.Sync as Sync import Unison.Codebase (Codebase) import qualified Unison.Codebase as Codebase -import Unison.Codebase.Branch (Branch) +import Unison.Codebase.Branch (Branch, UnwrappedBranch) import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Causal as Causal import qualified Unison.Codebase.Conversion.Sync12BranchDependencies as BD import Unison.Codebase.Patch (Patch (..)) +import qualified Unison.Codebase.TermEdit as TermEdit +import qualified Unison.Codebase.TypeEdit as TypeEdit import Unison.DataDeclaration (DataDeclaration, Decl) import qualified Unison.DataDeclaration as DD import Unison.Hash (Hash) @@ -49,8 +56,6 @@ import qualified Unison.Term as Term import Unison.Type (Type) import qualified Unison.Type as Type import qualified Unison.Util.Relation as Relation -import qualified Unison.Codebase.TermEdit as TermEdit -import qualified Unison.Codebase.TypeEdit as TypeEdit data Env m = Env { srcCodebase :: Codebase m Symbol (), @@ -59,7 +64,7 @@ data Env m = Env } data Entity m - = C Branch.Hash (m (Branch m)) + = C Branch.Hash (m (UnwrappedBranch m)) | T Hash Reference.Size | D Hash Reference.Size | P Branch.EditHash @@ -78,14 +83,41 @@ toEntity' = \case D h _ -> D' h P h -> P' h -instance Eq (Entity m) where x == y = toEntity' x == toEntity' y +instance Eq (Entity m) where + x == y = toEntity' x == toEntity' y -instance Ord (Entity m) where compare x y = compare (toEntity' x) (toEntity' y) +instance Ord (Entity m) where + x `compare` y = toEntity' x `compare` toEntity' y -data BranchStatus +data BranchStatus m = BranchOk - | BranchMissing - | BranchReplaced Branch.Hash + | BranchReplaced Branch.Hash (UnwrappedBranch m) + +data BranchStatus' + = BranchOk' + | BranchReplaced' Branch.Hash + deriving (Eq, Ord) + +toBranchStatus' :: BranchStatus m -> BranchStatus' +toBranchStatus' = \case + BranchOk -> BranchOk' + BranchReplaced h _ -> BranchReplaced' h + +instance Eq (BranchStatus m) where + x == y = toBranchStatus' x == toBranchStatus' y + +instance Ord (BranchStatus m) where + x `compare` y = toBranchStatus' x `compare` toBranchStatus' y + +type V m n = MonadValidate (Set (Entity m)) n + +type E e n = MonadError e n + +type S m n = MonadState (Status m) n + +type R m n = MonadReader (Env m) n + +type RS m n = (R m n, S m n) data TermStatus = TermOk @@ -103,8 +135,8 @@ data PatchStatus | PatchMissing | PatchReplaced Branch.EditHash -data Status = Status - { _branchStatus :: Map Branch.Hash BranchStatus, +data Status m = Status + { _branchStatus :: Map Branch.Hash (BranchStatus m), _termStatus :: Map Hash TermStatus, _declStatus :: Map Hash DeclStatus, _patchStatus :: Map Branch.EditHash PatchStatus @@ -120,100 +152,106 @@ instance Show (Entity m) where P h -> "P " ++ show h sync12 :: - (MonadIO m, MonadRWS (Env m) () Status m) => - m (Sync m (Entity m)) -sync12 = do + forall m n. + (MonadIO n, RS m n, Applicative m) => + (m ~> n) -> + n (Sync n (Entity m)) +sync12 t = do gc <- runDest' $ Q.getNurseryGeneration - pure $ Sync (trySync (succ gc)) + pure $ Sync (trySync t (succ gc)) trySync :: - forall m. - MonadRWS (Env m) () Status m => + forall m n. + (R m n, S m n, Applicative m) => + (m ~> n) -> Generation -> Entity m -> - m (TrySyncResult (Entity m)) -trySync _gc e = do + n (TrySyncResult (Entity m)) +trySync t _gc e = do Env src dest _ <- Reader.ask case e of - C h mb -> + C h mc -> isSyncedCausal h >>= \case True -> pure Sync.PreviouslyDone False -> do - result <- runValidateT @(Set (Entity m)) @m @() do - b <- lift mb - (h', b') <- repairBranch b - setBranchStatus h h' - lift $ Codebase.putBranch dest b' - case result of + c <- t mc + runValidateT @_ @n (repairBranch c) >>= \case Left deps -> pure . Sync.Missing $ Foldable.toList deps - Right () -> pure Sync.Done + Right c' -> do + let h' = Causal.currentHash c' + if h == h' + then setBranchStatus @m @n h BranchOk + else setBranchStatus h (BranchReplaced h' c') + t $ Codebase.putBranch dest (Branch.Branch c') + pure Sync.Done T h n -> - getTermStatus h >>= \case + getTermStatus @n @m h >>= \case Just {} -> pure Sync.PreviouslyDone Nothing -> do - result <- runValidateT do - runExceptT (checkTermComponent h n) >>= \case - Left status -> setTermStatus h status - Right component -> do - Foldable.for_ (zip component [0 ..]) \((term, typ), i) -> - lift $ Codebase.putTerm dest (Reference.Id h i n) term typ - setTermStatus h TermOk - case result of - Left deps -> pure . Sync.Missing $ Foldable.toList deps - Right () -> pure Sync.Done + runExceptT (runValidateT (checkTermComponent (lift . lift . t) h n)) >>= \case + Left status -> do + setTermStatus h status + pure Sync.NonFatalError + Right (Left deps) -> + pure . Sync.Missing $ Foldable.toList deps + Right (Right component) -> do + Foldable.for_ (zip component [0 ..]) \((term, typ), i) -> + t $ Codebase.putTerm dest (Reference.Id h i n) term typ + setTermStatus h TermOk + pure Sync.Done D h n -> - getDeclStatus h >>= \case + getDeclStatus @n @m h >>= \case Just {} -> pure Sync.PreviouslyDone - Nothing -> do - result <- runValidateT do - runExceptT (checkDeclComponent h n) >>= \case - Left status -> setDeclStatus h status - Right component -> do - Foldable.for_ (zip component [0 ..]) \(decl, i) -> - lift $ Codebase.putTypeDeclaration dest (Reference.Id h i n) decl - setDeclStatus h DeclOk - case result of - Left deps -> pure . Sync.Missing $ Foldable.toList deps - Right () -> pure Sync.Done + Nothing -> + runExceptT (runValidateT (checkDeclComponent (lift . lift . t) h n)) >>= \case + Left status -> do + setDeclStatus h status + pure Sync.NonFatalError + Right (Left deps) -> + pure . Sync.Missing $ Foldable.toList deps + Right (Right component) -> do + Foldable.for_ (zip component [0 ..]) \(decl, i) -> + t $ Codebase.putTypeDeclaration dest (Reference.Id h i n) decl + setDeclStatus h DeclOk + pure Sync.Done P h -> getPatchStatus h >>= \case Just {} -> pure Sync.PreviouslyDone - Nothing -> do - result <- runValidateT do - runExceptT (checkPatch h) >>= \case - Left status -> setPatchStatus h status - Right (h', patch) -> do - lift $ Codebase.putPatch dest h patch - setPatchStatus h PatchOk - case result of - Left deps -> pure . Sync.Missing $ Foldable.toList deps - Right () -> pure Sync.Done + Nothing -> + runExceptT (runValidateT (checkPatch (lift . lift . t) h)) >>= \case + Left status -> setPatchStatus h status >> pure Sync.NonFatalError + Right (Left deps) -> pure . Sync.Missing $ Foldable.toList deps + Right (Right (h', patch')) -> do + t $ Codebase.putPatch dest h' patch' + setPatchStatus h PatchOk + pure Sync.Done where - isSyncedCausal :: Branch.Hash -> m Bool + isSyncedCausal :: forall n. Branch.Hash -> n Bool isSyncedCausal = undefined + getBranchStatus :: forall n m. S m n => Branch.Hash -> n (Maybe (BranchStatus m)) + getBranchStatus h = use (branchStatus . at h) + getTermStatus :: forall n m. S m n => Hash -> n (Maybe TermStatus) getTermStatus h = use (termStatus . at h) + getDeclStatus :: forall n m. S m n => Hash -> n (Maybe DeclStatus) getDeclStatus h = use (declStatus . at h) getPatchStatus h = use (patchStatus . at h) setTermStatus h s = termStatus . at h .= Just s setDeclStatus h s = declStatus . at h .= Just s setPatchStatus h s = patchStatus . at h .= Just s - setBranchStatus :: forall m. MonadState Status m => Branch.Hash -> Branch.Hash -> m () - setBranchStatus h h' = - if h == h' - then branchStatus . at h .= Just BranchOk - else branchStatus . at h .= Just (BranchReplaced h') - + setBranchStatus :: forall m n. S m n => Branch.Hash -> BranchStatus m -> n () + setBranchStatus h s = branchStatus . at h .= Just s checkTermComponent :: - forall m. - (MonadState Status m, MonadReader (Env m) m) => + forall m n. + (RS m n, V m n, E TermStatus n) => + (m ~> n) -> Hash -> Reference.Size -> - EVT TermStatus m [(Term Symbol (), Type Symbol ())] - checkTermComponent h n = do + n [(Term Symbol (), Type Symbol ())] + checkTermComponent t h n = do Env src _ _ <- Reader.ask for [Reference.Id h i n | i <- [0 .. n -1]] \r -> do - term <- lift . lift $ Codebase.getTerm src r - typ <- lift . lift $ Codebase.getTypeOfTermImpl src r + term <- t $ Codebase.getTerm src r + typ <- t $ Codebase.getTypeOfTermImpl src r case (term, typ) of (Nothing, _) -> Except.throwError TermMissing (_, Nothing) -> Except.throwError TermMissingType @@ -237,17 +275,17 @@ trySync _gc e = do traverse_ (bitraverse_ checkDecl checkTerm . LD.toReference) termDeps traverse_ checkDecl typeDeps pure (term, typ) - checkDeclComponent :: - forall m. - (MonadState Status m, MonadReader (Env m) m) => + forall m n. + (RS m n, E DeclStatus n, V m n) => + (m ~> n) -> Hash -> Reference.Size -> - EVT DeclStatus m [Decl Symbol ()] - checkDeclComponent h n = do + n [Decl Symbol ()] + checkDeclComponent t h n = do Env src _ _ <- Reader.ask for [Reference.Id h i n | i <- [0 .. n -1]] \r -> do - decl <- lift . lift $ Codebase.getTypeDeclaration src r + decl <- t $ Codebase.getTypeDeclaration src r case decl of Nothing -> Except.throwError DeclMissing Just decl -> do @@ -261,29 +299,69 @@ trySync _gc e = do Nothing -> Validate.dispute . Set.singleton $ D h' n' traverse_ checkDecl deps pure decl - checkPatch :: - forall m. - (MonadState Status m, MonadReader (Env m) m) => + forall m n. + (RS m n, E PatchStatus n, V m n) => + (m ~> n) -> Branch.EditHash -> - EVT PatchStatus m (Branch.EditHash, Patch) - checkPatch h = do + n (Branch.EditHash, Patch) + checkPatch t h = do Env src _ _ <- Reader.ask - (lift . lift $ Codebase.getPatch src h) >>= \case + t (Codebase.getPatch src h) >>= \case Nothing -> Except.throwError PatchMissing Just patch -> do - (h', patch) <- lift $ repairPatch patch + (h', patch) <- repairPatch patch if h == h' then setPatchStatus h PatchOk else setPatchStatus h (PatchReplaced h') pure (h', patch) + repairBranch :: + forall m n. + (S m n, V m n, Applicative m) => + UnwrappedBranch m -> + n (UnwrappedBranch m) + repairBranch = \case + Causal.One _h e -> do + e' <- repairBranch0 e + pure $ Causal.one e' + Causal.Cons _h e (ht, mt) -> do + getBranchStatus @n @m ht >>= \case + Nothing -> Validate.refute . Set.singleton $ C ht mt + Just tailStatus -> do + e' <- repairBranch0 e + pure case tailStatus of + BranchOk -> Causal.cons' e' ht mt + BranchReplaced _ht' t' -> Causal.consDistinct e' t' + Causal.Merge _h e (Map.toList -> tails) -> do + tails' <- + Map.fromList <$> for tails \(ht, mt) -> + getBranchStatus @n @m ht >>= \case + Nothing -> Validate.refute . Set.singleton $ C ht mt + Just tailStatus -> + pure case tailStatus of + BranchOk -> (ht, mt) + BranchReplaced ht' t' -> (ht', pure t') + e' <- repairBranch0 e + let h' = Causal.RawHash $ Causal.hash (e', Map.keys tails') + pure $ Causal.Merge h' e' tails' + repairBranch0 :: + forall m n. + (S m n, V m n) => + Branch.Branch0 m -> + n (Branch.Branch0 m) + repairBranch0 b = do + terms' <- error "filterTermStar" (view Branch.terms b) + types' <- error "filterTermStar" (view Branch.types b) + children' <- error "filterChildren" (view Branch.children b) + edits' <- error "filterEdits" (view Branch.edits b) + pure @n $ Branch.branch0 terms' types' children' edits' - repairBranch :: Branch m -> ValidateT (Set (Entity m)) m (Branch.Hash, Branch m) - repairBranch = error "not implemented" - - repairPatch ::forall m. MonadState Status m => - Patch -> ValidateT (Set (Entity m)) m (Branch.EditHash, Patch) + repairPatch :: + forall m n. + (MonadState (Status m) n, MonadValidate (Set (Entity m)) n) => + Patch -> + n (Branch.EditHash, Patch) repairPatch (Patch termEdits typeEdits) = do termEdits' <- Relation.filterM (uncurry filterTermEdit) termEdits typeEdits' <- Relation.filterM (uncurry filterTypeEdit) typeEdits @@ -307,16 +385,11 @@ trySync _gc e = do Just DeclOk -> pure True Just _ -> pure False -type EVT e m = ExceptT e (ValidateT (Set (Entity m)) m) - -runSrc, runDest :: MonadReader (Env m) m => (Codebase m Symbol () -> a) -> m a +runSrc, runDest :: R m n => (Codebase m Symbol () -> a) -> n a runSrc = (Reader.reader srcCodebase <&>) runDest = (Reader.reader destCodebase <&>) -runDest' :: - (MonadReader (Env m) m) => - ReaderT Connection m a -> - m a +runDest' :: R m n => ReaderT Connection n a -> n a runDest' ma = Reader.reader destConnection >>= flip runDB ma runDB :: Connection -> ReaderT Connection m a -> m a diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 64379e3aa0..0a38e77105 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -20,7 +20,7 @@ import Control.Monad.State (MonadState) import qualified Control.Monad.State as State import Control.Monad.Trans (MonadTrans (lift)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT)) -import Data.Bifunctor (Bifunctor (first), second) +import Data.Bifunctor (Bifunctor (first, bimap), second) import qualified Data.Either.Combinators as Either import Data.Foldable (Foldable (toList), traverse_) import Data.Functor (void) @@ -794,52 +794,70 @@ data Entity m data SyncProgressState = SyncProgressState { needEntities :: Maybe (Set Sync22.Entity), - doneEntities :: Either Int (Set Sync22.Entity) + doneEntities :: Either Int (Set Sync22.Entity), + warnEntities :: Either Int (Set Sync22.Entity) } emptySyncProgressState :: SyncProgressState -emptySyncProgressState = SyncProgressState (Just mempty) (Right mempty) +emptySyncProgressState = SyncProgressState (Just mempty) (Right mempty) (Right mempty) syncProgress :: MonadState SyncProgressState m => MonadIO m => Sync.Progress m Sync22.Entity -syncProgress = Sync.Progress need done allDone +syncProgress = Sync.Progress need done warn allDone where maxTrackedHashCount = 1024 * 1024 size :: SyncProgressState -> Int size = \case - SyncProgressState Nothing (Left i) -> i - SyncProgressState (Just need) (Right done) -> Set.size need + Set.size done - SyncProgressState _ _ -> undefined + SyncProgressState Nothing (Left i) (Left j) -> i + j + SyncProgressState (Just need) (Right done) (Right warn) -> Set.size need + Set.size done + Set.size warn + SyncProgressState _ _ _ -> undefined - need, done :: (MonadState SyncProgressState m, MonadIO m) => Sync22.Entity -> m () + need, done, warn :: (MonadState SyncProgressState m, MonadIO m) => Sync22.Entity -> m () need h = do Monad.whenM (fmap (> 0) $ State.gets size) $ liftIO $ putStr "\n" State.get >>= \case - SyncProgressState Nothing Left {} -> pure () - SyncProgressState (Just need) (Right done) -> - if Set.size need + Set.size done > maxTrackedHashCount - then State.put $ SyncProgressState Nothing (Left $ Set.size done) + SyncProgressState Nothing Left {} Left {} -> pure () + SyncProgressState (Just need) (Right done) (Right warn) -> + if Set.size need + Set.size done + Set.size warn > maxTrackedHashCount + then State.put $ SyncProgressState Nothing (Left $ Set.size done) (Left $ Set.size warn) else - if Set.member h done + if Set.member h done || Set.member h warn then pure () - else State.put $ SyncProgressState (Just $ Set.insert h need) (Right done) - SyncProgressState _ _ -> undefined - State.get >>= liftIO . putStr . (\s -> "\rSynced " <> s <> " entities.") . renderState + else State.put $ SyncProgressState (Just $ Set.insert h need) (Right done) (Right warn) + SyncProgressState _ _ _ -> undefined + State.get >>= liftIO . putStr . renderState ("Synced ") done h = do Monad.whenM (fmap (> 0) $ State.gets size) $ liftIO $ putStr "\n" State.get >>= \case - SyncProgressState Nothing (Left count) -> - State.put $ SyncProgressState Nothing (Left (count + 1)) - SyncProgressState (Just need) (Right done) -> - State.put $ SyncProgressState (Just $ Set.delete h need) (Right $ Set.insert h done) - SyncProgressState _ _ -> undefined - State.get >>= liftIO . putStr . (\s -> "\rSynced " <> s <> " entities.") . renderState + SyncProgressState Nothing (Left done) warn -> + State.put $ SyncProgressState Nothing (Left (done + 1)) warn + SyncProgressState (Just need) (Right done) warn -> + State.put $ SyncProgressState (Just $ Set.delete h need) (Right $ Set.insert h done) warn + SyncProgressState _ _ _ -> undefined + State.get >>= liftIO . putStr . renderState ("Synced ") + + warn h = do + Monad.whenM (fmap (> 0) $ State.gets size) $ liftIO $ putStr "\n" + State.get >>= \case + SyncProgressState Nothing done (Left warn) -> + State.put $ SyncProgressState Nothing done (Left $ warn + 1) + SyncProgressState (Just need) done (Right warn) -> + State.put $ SyncProgressState (Just $ Set.delete h need) done (Right $ Set.insert h warn) + SyncProgressState _ _ _ -> undefined + State.get >>= liftIO . putStr . renderState ("Synced ") allDone = - State.get >>= liftIO . putStrLn . (\s -> "\rDone syncing " <> s <> " entities.") . renderState - - renderState = \case - SyncProgressState Nothing (Left doneCount) -> show doneCount - SyncProgressState (Just need) (Right done) -> show (Set.size done) ++ "/" ++ show (Set.size done + Set.size need) - SyncProgressState Nothing Right {} -> "(invalid SyncProgressState Nothing Right{})" - SyncProgressState Just {} Left {} -> "(invalid SyncProgressState Just{} Left{})" \ No newline at end of file + State.get >>= liftIO . putStr . renderState ("Done syncing ") + + renderState prefix = \case + SyncProgressState Nothing (Left done) (Left warn) -> + "\r" ++ prefix ++ show done ++ " entities" ++ if warn > 0 then " with " ++ show warn ++ " warnings." else "." + SyncProgressState (Just need) (Right done) (Right warn) -> + "\r" ++ prefix ++ show (Set.size done) ++ "/" ++ show (Set.size done + Set.size need + Set.size warn) + ++ " entities" + ++ if Set.size warn > 0 + then " with " ++ show warn ++ " warnings." + else "." + SyncProgressState need done warn -> "invalid SyncProgressState " ++ + show (fmap v need, bimap id v done, bimap id v warn) + where v = const () diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 182d53e0aa..f79349c9ac 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -239,6 +239,7 @@ library mtl, murmur-hash, mutable-containers, + natural-transformation, network, network-simple, nonempty-containers, From 7ade3699e8c79430f1d1815b0df3f63b6e903778 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 20 Mar 2021 01:49:01 -0400 Subject: [PATCH 135/225] optimize imports --- .../src/Unison/Codebase/Conversion/Sync12.hs | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index ff3affdb86..9cb97df480 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -5,28 +5,24 @@ {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeOperators #-} {-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE ApplicativeDo #-} module Unison.Codebase.Conversion.Sync12 where import Control.Lens import Control.Monad.Except (MonadError, runExceptT) import qualified Control.Monad.Except as Except -import Control.Monad.RWS (MonadRWS) import Control.Monad.Reader import qualified Control.Monad.Reader as Reader import Control.Monad.State (MonadState) -import qualified Control.Monad.State as State -import Control.Monad.Trans.Except (ExceptT) -import Control.Monad.Validate (MonadValidate, ValidateT, runValidateT) +import Control.Monad.Validate (MonadValidate, runValidateT) import qualified Control.Monad.Validate as Validate -import Control.Monad.Writer import Control.Natural (type (~>)) import Data.Bifoldable (bitraverse_) import Data.Foldable (traverse_) import qualified Data.Foldable as Foldable import Data.Map (Map) import qualified Data.Map as Map -import Data.Sequence (Seq) import qualified Data.Set as Set import Data.Traversable (for) import Database.SQLite.Simple (Connection) @@ -36,19 +32,18 @@ import U.Codebase.Sync (Sync (Sync), TrySyncResult) import qualified U.Codebase.Sync as Sync import Unison.Codebase (Codebase) import qualified Unison.Codebase as Codebase -import Unison.Codebase.Branch (Branch, UnwrappedBranch) +import Unison.Codebase.Branch (UnwrappedBranch) import qualified Unison.Codebase.Branch as Branch import qualified Unison.Codebase.Causal as Causal -import qualified Unison.Codebase.Conversion.Sync12BranchDependencies as BD import Unison.Codebase.Patch (Patch (..)) import qualified Unison.Codebase.TermEdit as TermEdit import qualified Unison.Codebase.TypeEdit as TypeEdit -import Unison.DataDeclaration (DataDeclaration, Decl) +import Unison.DataDeclaration (Decl) import qualified Unison.DataDeclaration as DD import Unison.Hash (Hash) import qualified Unison.Hashable as H import qualified Unison.LabeledDependency as LD -import Unison.Prelude (Set, Word64, ifM, (<&>)) +import Unison.Prelude (Set) import qualified Unison.Reference as Reference import Unison.Symbol (Symbol) import Unison.Term (Term) From 20d69578194b245461088392487078a8abf226df Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sun, 21 Mar 2021 21:35:38 -0400 Subject: [PATCH 136/225] more debug output --- .../codebase-sqlite/U/Codebase/Sqlite/Queries.hs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index fef334219c..9eb03e7bbd 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -38,7 +38,7 @@ import Database.SQLite.Simple (Connection, FromRow, Only (..), SQLData, ToRow (. import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple.FromField (FromField) import Database.SQLite.Simple.ToField (ToField (..)) -import Debug.Trace (trace, traceM) +import Debug.Trace (trace, traceM, traceShowM) import GHC.Stack (HasCallStack) import U.Codebase.HashTags (BranchHash, CausalHash, unBranchHash, unCausalHash) import U.Codebase.Reference (Reference') @@ -62,7 +62,7 @@ type EDB m = (DB m, Err m) type Err m = (MonadError Integrity m, HasCallStack) debugQuery :: Bool -debugQuery = False +debugQuery = True alwaysTraceOnCrash :: Bool alwaysTraceOnCrash = True @@ -608,18 +608,20 @@ queryExists q r = not . null . map (id @SQLData) <$> queryAtoms q r query :: (DB m, ToRow q, FromRow r, Show q, Show r) => SQLite.Query -> q -> m [r] query q r = do c <- ask - liftIO . queryTrace "query" q r $ SQLite.query c q r + liftIO . queryTrace (show (SQLite.connectionHandle c) ++ " query") q r $ SQLite.query c q r -- | no input, composite List output query_ :: (DB m, FromRow r, Show r) => SQLite.Query -> m [r] query_ q = do c <- ask - liftIO . queryTrace_ "query" q $ SQLite.query_ c q + liftIO . queryTrace_ (show (SQLite.connectionHandle c) ++ " query") q $ SQLite.query_ c q queryTrace :: (MonadUnliftIO m, Show q, Show a) => String -> SQLite.Query -> q -> m a -> m a queryTrace title query input m = if debugQuery || alwaysTraceOnCrash then + do + traceShowM query try @_ @SQLite.SQLError m >>= \case Right a -> do when debugQuery . traceM $ title ++ " " ++ show query ++ "\n input: " ++ show input ++ "\n output: " ++ show a @@ -643,13 +645,13 @@ queryTrace_ title query m = else m execute :: (DB m, ToRow q, Show q) => SQLite.Query -> q -> m () -execute q r = do c <- ask; liftIO . queryTrace "execute" q r $ SQLite.execute c q r +execute q r = do c <- ask; liftIO . queryTrace (show (SQLite.connectionHandle c) ++ " " ++ "execute") q r $ SQLite.execute c q r execute_ :: DB m => SQLite.Query -> m () -execute_ q = do c <- ask; liftIO . queryTrace "execute_" q "" $ SQLite.execute_ c q +execute_ q = do c <- ask; liftIO . queryTrace (show (SQLite.connectionHandle c) ++ " " ++ "execute_") q "" $ SQLite.execute_ c q executeMany :: (DB m, ToRow q, Show q) => SQLite.Query -> [q] -> m () -executeMany q r = do c <- ask; liftIO . queryTrace "executeMany" q r $ SQLite.executeMany c q r +executeMany q r = do c <- ask; liftIO . queryTrace (show (SQLite.connectionHandle c) ++ " " ++ "executeMany") q r $ SQLite.executeMany c q r -- | transaction that blocks withImmediateTransaction :: (DB m, MonadUnliftIO m) => m a -> m a From 6035defa4f08c992ed781862083783d7aedc53e9 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 22 Mar 2021 12:47:46 -0400 Subject: [PATCH 137/225] fix Q.getNurseryGeneration for empty codebase --- codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 9eb03e7bbd..cdba60ab62 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -30,7 +30,7 @@ import Data.Functor ((<&>)) import qualified Data.List.Extra as List import Data.List.NonEmpty (NonEmpty) import qualified Data.List.NonEmpty as Nel -import Data.Maybe (fromJust, isJust) +import Data.Maybe (fromJust, isJust, fromMaybe) import Data.String (fromString) import Data.String.Here.Uninterpolated (here, hereFile) import Data.Text (Text) @@ -51,7 +51,7 @@ import qualified U.Codebase.WatchKind as WatchKind import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash -import UnliftIO (MonadUnliftIO, throwIO, try, withRunInIO) +import UnliftIO (MonadUnliftIO, throwIO, try, withRunInIO, tryAny) -- * types @@ -312,7 +312,7 @@ saveCausal self value = execute sql (self, value, Generation 0) where sql = [her getNurseryGeneration :: DB m => m Generation getNurseryGeneration = query_ sql <&> \case [] -> Generation 0 - [fromOnly -> g] -> Generation g + [fromOnly -> g] -> Generation $ fromMaybe 0 g (fmap fromOnly -> gs) -> error $ "How did I get multiple values out of a MAX()? " ++ show gs where sql = [here| @@ -635,7 +635,7 @@ queryTrace_ :: (MonadUnliftIO m, Show a) => String -> SQLite.Query -> m a -> m a queryTrace_ title query m = if debugQuery || alwaysTraceOnCrash then - try @_ @SQLite.SQLError m >>= \case + tryAny @_ m >>= \case Right a -> do when debugQuery . traceM $ title ++ " " ++ show query ++ "\n output: " ++ show a pure a From 0409a0ad60d6b5a94015e282d5aa2683bab2e4b2 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 22 Mar 2021 12:47:59 -0400 Subject: [PATCH 138/225] less debug info --- codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index cdba60ab62..982f5262d1 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -62,7 +62,7 @@ type EDB m = (DB m, Err m) type Err m = (MonadError Integrity m, HasCallStack) debugQuery :: Bool -debugQuery = True +debugQuery = False alwaysTraceOnCrash :: Bool alwaysTraceOnCrash = True From 281a657166162425fc54f129a6f6f0644956d84e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 22 Mar 2021 16:03:30 -0400 Subject: [PATCH 139/225] wip v1 > v2 conversion --- codebase2/util/U/Util/Relation.hs | 3 + .../src/Unison/Codebase/Conversion/Sync12.hs | 407 +++++++++++------- unison-core/src/Unison/Util/Relation.hs | 3 + 3 files changed, 258 insertions(+), 155 deletions(-) diff --git a/codebase2/util/U/Util/Relation.hs b/codebase2/util/U/Util/Relation.hs index 57b00207c4..9ea5d04983 100644 --- a/codebase2/util/U/Util/Relation.hs +++ b/codebase2/util/U/Util/Relation.hs @@ -238,6 +238,9 @@ filter f = fromList . List.filter f . toList filterM :: (Applicative m, Ord a, Ord b) => ((a, b) -> m Bool) -> Relation a b -> m (Relation a b) filterM f = fmap fromList . Monad.filterM f . toList +filterDomM :: (Applicative m, Ord a, Ord b) => (a -> m Bool) -> Relation a b -> m (Relation a b) +filterDomM f = fmap fromList . Monad.filterM (f . fst) . toList + -- | Restricts the relation to domain elements having multiple range elements filterManyDom :: (Ord a, Ord b) => Relation a b -> Relation a b filterManyDom r = filterDom (`manyDom` r) r diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 9cb97df480..fa0a6400a2 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE ApplicativeDo #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -5,7 +6,6 @@ {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeOperators #-} {-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE ApplicativeDo #-} module Unison.Codebase.Conversion.Sync12 where @@ -23,6 +23,7 @@ import Data.Foldable (traverse_) import qualified Data.Foldable as Foldable import Data.Map (Map) import qualified Data.Map as Map +import Data.Maybe (catMaybes) import qualified Data.Set as Set import Data.Traversable (for) import Database.SQLite.Simple (Connection) @@ -35,6 +36,7 @@ import qualified Unison.Codebase as Codebase import Unison.Codebase.Branch (UnwrappedBranch) import qualified Unison.Codebase.Branch as Branch import qualified Unison.Codebase.Causal as Causal +import qualified Unison.Codebase.Metadata as Metadata import Unison.Codebase.Patch (Patch (..)) import qualified Unison.Codebase.TermEdit as TermEdit import qualified Unison.Codebase.TypeEdit as TypeEdit @@ -43,14 +45,19 @@ import qualified Unison.DataDeclaration as DD import Unison.Hash (Hash) import qualified Unison.Hashable as H import qualified Unison.LabeledDependency as LD +import Unison.NameSegment (NameSegment (NameSegment)) import Unison.Prelude (Set) import qualified Unison.Reference as Reference +import Unison.Referent (Referent) +import qualified Unison.Referent as Referent import Unison.Symbol (Symbol) import Unison.Term (Term) import qualified Unison.Term as Term import Unison.Type (Type) import qualified Unison.Type as Type +import Unison.Util.Relation (Relation) import qualified Unison.Util.Relation as Relation +import Unison.Util.Star3 (Star3 (Star3)) data Env m = Env { srcCodebase :: Codebase m Symbol (), @@ -220,165 +227,248 @@ trySync t _gc e = do t $ Codebase.putPatch dest h' patch' setPatchStatus h PatchOk pure Sync.Done - where - isSyncedCausal :: forall n. Branch.Hash -> n Bool - isSyncedCausal = undefined - getBranchStatus :: forall n m. S m n => Branch.Hash -> n (Maybe (BranchStatus m)) - getBranchStatus h = use (branchStatus . at h) - getTermStatus :: forall n m. S m n => Hash -> n (Maybe TermStatus) - getTermStatus h = use (termStatus . at h) - getDeclStatus :: forall n m. S m n => Hash -> n (Maybe DeclStatus) - getDeclStatus h = use (declStatus . at h) - getPatchStatus h = use (patchStatus . at h) - setTermStatus h s = termStatus . at h .= Just s - setDeclStatus h s = declStatus . at h .= Just s - setPatchStatus h s = patchStatus . at h .= Just s - setBranchStatus :: forall m n. S m n => Branch.Hash -> BranchStatus m -> n () - setBranchStatus h s = branchStatus . at h .= Just s - checkTermComponent :: - forall m n. - (RS m n, V m n, E TermStatus n) => - (m ~> n) -> - Hash -> - Reference.Size -> - n [(Term Symbol (), Type Symbol ())] - checkTermComponent t h n = do - Env src _ _ <- Reader.ask - for [Reference.Id h i n | i <- [0 .. n -1]] \r -> do - term <- t $ Codebase.getTerm src r - typ <- t $ Codebase.getTypeOfTermImpl src r - case (term, typ) of - (Nothing, _) -> Except.throwError TermMissing - (_, Nothing) -> Except.throwError TermMissingType - (Just term, Just typ) -> do - let termDeps = Term.labeledDependencies term - typeDeps = Type.dependencies typ - let checkDecl = \case - Reference.Builtin {} -> pure () - Reference.DerivedId (Reference.Id h' _ n') -> - getDeclStatus h' >>= \case - Just DeclOk -> pure () - Just _ -> Except.throwError TermMissingDependencies - Nothing -> Validate.dispute . Set.singleton $ D h' n' - checkTerm = \case - Reference.Builtin {} -> pure () - Reference.DerivedId (Reference.Id h' _ n') -> - getTermStatus h' >>= \case - Just TermOk -> pure () - Just _ -> Except.throwError TermMissingDependencies - Nothing -> Validate.dispute . Set.singleton $ T h' n' - traverse_ (bitraverse_ checkDecl checkTerm . LD.toReference) termDeps - traverse_ checkDecl typeDeps - pure (term, typ) - checkDeclComponent :: - forall m n. - (RS m n, E DeclStatus n, V m n) => - (m ~> n) -> - Hash -> - Reference.Size -> - n [Decl Symbol ()] - checkDeclComponent t h n = do - Env src _ _ <- Reader.ask - for [Reference.Id h i n | i <- [0 .. n -1]] \r -> do - decl <- t $ Codebase.getTypeDeclaration src r - case decl of - Nothing -> Except.throwError DeclMissing - Just decl -> do - let deps = DD.declDependencies decl - checkDecl = \case - Reference.Builtin {} -> pure () - Reference.DerivedId (Reference.Id h' _ n') -> - getDeclStatus h' >>= \case - Just DeclOk -> pure () - Just _ -> Except.throwError DeclMissingDependencies - Nothing -> Validate.dispute . Set.singleton $ D h' n' - traverse_ checkDecl deps - pure decl - checkPatch :: - forall m n. - (RS m n, E PatchStatus n, V m n) => - (m ~> n) -> - Branch.EditHash -> - n (Branch.EditHash, Patch) - checkPatch t h = do - Env src _ _ <- Reader.ask - t (Codebase.getPatch src h) >>= \case - Nothing -> Except.throwError PatchMissing - Just patch -> do - (h', patch) <- repairPatch patch - if h == h' - then setPatchStatus h PatchOk - else setPatchStatus h (PatchReplaced h') - pure (h', patch) - repairBranch :: - forall m n. - (S m n, V m n, Applicative m) => - UnwrappedBranch m -> - n (UnwrappedBranch m) - repairBranch = \case - Causal.One _h e -> do + +isSyncedCausal :: forall n. Branch.Hash -> n Bool +isSyncedCausal = undefined + +getBranchStatus :: forall n m. S m n => Branch.Hash -> n (Maybe (BranchStatus m)) +getBranchStatus h = use (branchStatus . at h) + +getTermStatus :: forall n m. S m n => Hash -> n (Maybe TermStatus) +getTermStatus h = use (termStatus . at h) + +getDeclStatus :: forall n m. S m n => Hash -> n (Maybe DeclStatus) +getDeclStatus h = use (declStatus . at h) + +getPatchStatus h = use (patchStatus . at h) + +setTermStatus h s = termStatus . at h .= Just s + +setDeclStatus h s = declStatus . at h .= Just s + +setPatchStatus h s = patchStatus . at h .= Just s + +setBranchStatus :: forall m n. S m n => Branch.Hash -> BranchStatus m -> n () +setBranchStatus h s = branchStatus . at h .= Just s + +checkTermComponent :: + forall m n. + (RS m n, V m n, E TermStatus n) => + (m ~> n) -> + Hash -> + Reference.Size -> + n [(Term Symbol (), Type Symbol ())] +checkTermComponent t h n = do + Env src _ _ <- Reader.ask + for [Reference.Id h i n | i <- [0 .. n -1]] \r -> do + term <- t $ Codebase.getTerm src r + typ <- t $ Codebase.getTypeOfTermImpl src r + case (term, typ) of + (Nothing, _) -> Except.throwError TermMissing + (_, Nothing) -> Except.throwError TermMissingType + (Just term, Just typ) -> do + let termDeps = Term.labeledDependencies term + typeDeps = Type.dependencies typ + let checkDecl = \case + Reference.Builtin {} -> pure () + Reference.DerivedId (Reference.Id h' _ n') -> + getDeclStatus h' >>= \case + Just DeclOk -> pure () + Just _ -> Except.throwError TermMissingDependencies + Nothing -> Validate.dispute . Set.singleton $ D h' n' + checkTerm = \case + Reference.Builtin {} -> pure () + Reference.DerivedId (Reference.Id h' _ n') -> + getTermStatus h' >>= \case + Just TermOk -> pure () + Just _ -> Except.throwError TermMissingDependencies + Nothing -> Validate.dispute . Set.singleton $ T h' n' + traverse_ (bitraverse_ checkDecl checkTerm . LD.toReference) termDeps + traverse_ checkDecl typeDeps + pure (term, typ) + +checkDeclComponent :: + forall m n. + (RS m n, E DeclStatus n, V m n) => + (m ~> n) -> + Hash -> + Reference.Size -> + n [Decl Symbol ()] +checkDeclComponent t h n = do + Env src _ _ <- Reader.ask + for [Reference.Id h i n | i <- [0 .. n -1]] \r -> do + decl <- t $ Codebase.getTypeDeclaration src r + case decl of + Nothing -> Except.throwError DeclMissing + Just decl -> do + let deps = DD.declDependencies decl + checkDecl = \case + Reference.Builtin {} -> pure () + Reference.DerivedId (Reference.Id h' _ n') -> + getDeclStatus h' >>= \case + Just DeclOk -> pure () + Just _ -> Except.throwError DeclMissingDependencies + Nothing -> Validate.dispute . Set.singleton $ D h' n' + traverse_ checkDecl deps + pure decl + +checkPatch :: + forall m n. + (RS m n, E PatchStatus n, V m n) => + (m ~> n) -> + Branch.EditHash -> + n (Branch.EditHash, Patch) +checkPatch t h = do + Env src _ _ <- Reader.ask + t (Codebase.getPatch src h) >>= \case + Nothing -> Except.throwError PatchMissing + Just patch -> do + (h', patch) <- repairPatch patch + if h == h' + then setPatchStatus h PatchOk + else setPatchStatus h (PatchReplaced h') + pure (h', patch) + +repairBranch :: + forall m n. + (S m n, V m n, Applicative m) => + UnwrappedBranch m -> + n (UnwrappedBranch m) +repairBranch = \case + Causal.One _h e -> do + e' <- repairBranch0 e + pure $ Causal.one e' + Causal.Cons _h e (ht, mt) -> do + getBranchStatus @n @m ht >>= \case + Nothing -> Validate.refute . Set.singleton $ C ht mt + Just tailStatus -> do e' <- repairBranch0 e - pure $ Causal.one e' - Causal.Cons _h e (ht, mt) -> do + pure case tailStatus of + BranchOk -> Causal.cons' e' ht mt + BranchReplaced _ht' t' -> Causal.consDistinct e' t' + Causal.Merge _h e (Map.toList -> tails) -> do + tails' <- + Map.fromList <$> for tails \(ht, mt) -> getBranchStatus @n @m ht >>= \case Nothing -> Validate.refute . Set.singleton $ C ht mt - Just tailStatus -> do - e' <- repairBranch0 e + Just tailStatus -> pure case tailStatus of - BranchOk -> Causal.cons' e' ht mt - BranchReplaced _ht' t' -> Causal.consDistinct e' t' - Causal.Merge _h e (Map.toList -> tails) -> do - tails' <- - Map.fromList <$> for tails \(ht, mt) -> - getBranchStatus @n @m ht >>= \case - Nothing -> Validate.refute . Set.singleton $ C ht mt - Just tailStatus -> - pure case tailStatus of - BranchOk -> (ht, mt) - BranchReplaced ht' t' -> (ht', pure t') - e' <- repairBranch0 e - let h' = Causal.RawHash $ Causal.hash (e', Map.keys tails') - pure $ Causal.Merge h' e' tails' - - repairBranch0 :: - forall m n. - (S m n, V m n) => - Branch.Branch0 m -> - n (Branch.Branch0 m) - repairBranch0 b = do - terms' <- error "filterTermStar" (view Branch.terms b) - types' <- error "filterTermStar" (view Branch.types b) - children' <- error "filterChildren" (view Branch.children b) - edits' <- error "filterEdits" (view Branch.edits b) - pure @n $ Branch.branch0 terms' types' children' edits' - - repairPatch :: - forall m n. - (MonadState (Status m) n, MonadValidate (Set (Entity m)) n) => - Patch -> - n (Branch.EditHash, Patch) - repairPatch (Patch termEdits typeEdits) = do - termEdits' <- Relation.filterM (uncurry filterTermEdit) termEdits - typeEdits' <- Relation.filterM (uncurry filterTypeEdit) typeEdits - let patch = Patch termEdits' typeEdits' - pure (H.accumulate' patch, patch) + BranchOk -> (ht, mt) + BranchReplaced ht' t' -> (ht', pure t') + e' <- repairBranch0 e + let h' = Causal.RawHash $ Causal.hash (e', Map.keys tails') + pure $ Causal.Merge h' e' tails' + +repairBranch0 :: + forall m n. + (S m n, V m n, Applicative m) => + Branch.Branch0 m -> + n (Branch.Branch0 m) +repairBranch0 b = do + terms' <- filterBranchTermStar (view Branch.terms b) + types' <- filterBranchTypeStar (view Branch.types b) + children' <- filterBranchChildren (view Branch.children b) + edits' <- filterBranchEdits (view Branch.edits b) + pure @n $ Branch.branch0 terms' types' children' edits' + +repairPatch :: + forall m n. + (MonadState (Status m) n, MonadValidate (Set (Entity m)) n) => + Patch -> + n (Branch.EditHash, Patch) +repairPatch (Patch termEdits typeEdits) = do + termEdits' <- Relation.filterM (uncurry filterTermEdit) termEdits + typeEdits' <- Relation.filterM (uncurry filterTypeEdit) typeEdits + let patch = Patch termEdits' typeEdits' + pure (H.accumulate' patch, patch) + where + filterTermEdit _ = \case + TermEdit.Deprecate -> pure True + TermEdit.Replace (Reference.Builtin _) _ -> pure True + TermEdit.Replace (Reference.DerivedId (Reference.Id h _ n)) _ -> + getTermStatus h >>= \case + Nothing -> Validate.refute . Set.singleton $ T h n + Just TermOk -> pure True + Just _ -> pure False + filterTypeEdit _ = \case + TypeEdit.Deprecate -> pure True + TypeEdit.Replace (Reference.Builtin _) -> pure True + TypeEdit.Replace (Reference.DerivedId (Reference.Id h _ n)) -> + getDeclStatus h >>= \case + Nothing -> Validate.refute . Set.singleton $ D h n + Just DeclOk -> pure True + Just _ -> pure False + +filterBranchTermStar :: (S m n, V m n) => Metadata.Star Referent NameSegment -> n (Metadata.Star Referent NameSegment) +filterBranchTermStar (Star3 _refs names _mdType mdTypeValues) = do + names' <- filterTermNames names + mdTypeValues' <- filterMetadata mdTypeValues + let refs' = Relation.dom names' + let mdType' = error "Can I get away with not populating the mdType column?" + pure $ Star3 refs' names' mdType' mdTypeValues' + +filterBranchTypeStar :: (S m n, V m n) => Metadata.Star Reference.Reference NameSegment -> n (Metadata.Star Reference.Reference NameSegment) +filterBranchTypeStar (Star3 _refs names _mdType mdTypeValues) = do + names' <- filterTypeNames names + let refs' = Relation.dom names' + mdTypeValues' <- filterMetadata mdTypeValues + let mdType' = error "Can I get away with not populating the mdType column?" + pure $ Star3 refs' names mdType' mdTypeValues' + +filterMetadata :: Relation r (Metadata.Type, Metadata.Value) -> n (Relation r (Metadata.Type, Metadata.Value)) +filterMetadata = error "not implemented" + +filterTermNames :: (S m n, V m n) => Relation Referent NameSegment -> n (Relation Referent NameSegment) +filterTermNames = Relation.filterDomM validateTermReferent + +validateTermReferent :: (S m n, V m n) => Referent -> n Bool +validateTermReferent = \case + Referent.Ref r -> validateTermReference r + Referent.Con r _ _ -> validateTypeReference r + +validateTermReference :: (S m n, V m n) => Reference.Reference -> n Bool +validateTermReference = \case + Reference.Builtin {} -> pure True + Reference.DerivedId (Reference.Id h _i n) -> + getTermStatus h >>= \case + Nothing -> Validate.refute . Set.singleton $ T h n + Just TermOk -> pure True + Just _ -> pure False + +validateTypeReference :: (S m n, V m n) => Reference.Reference -> n Bool +validateTypeReference = \case + Reference.Builtin t -> pure True + Reference.DerivedId (Reference.Id h _i n) -> + getTermStatus h >>= \case + Nothing -> Validate.refute . Set.singleton $ T h n + Just TermOk -> pure True + Just _ -> pure False + +filterTypeNames :: (S m n, V m n) => Relation Reference.Reference NameSegment -> n (Relation Reference.Reference NameSegment) +filterTypeNames = Relation.filterDomM validateTypeReference + +filterBranchChildren :: (S m n, V m n, Applicative m) => Map NameSegment (Branch.Branch m) -> n (Map NameSegment (Branch.Branch m)) +filterBranchChildren = fmap Map.fromList . traverse go . Map.toList + where + go orig@(ns, Branch.Branch c) = + getBranchStatus (Causal.currentHash c) >>= \case + Nothing -> Validate.refute . Set.singleton $ C (Causal.currentHash c) (pure c) + Just BranchOk -> pure orig + Just (BranchReplaced _h c) -> pure (ns, Branch.Branch c) + +-- | if a dependency is missing, then remove the entry +filterBranchEdits :: (S m n, V m n) => Map NameSegment (Branch.EditHash, m Patch) -> n (Map NameSegment (Branch.EditHash, m Patch)) +filterBranchEdits = fmap (Map.fromList . catMaybes) . traverse go . Map.toList + where + go (ns, (h, _)) = + getPatchStatus h >>= \case + Nothing -> Validate.refute . Set.singleton $ P h + Just PatchOk -> rebuild h + Just PatchMissing -> pure Nothing + Just (PatchReplaced h) -> rebuild h where - filterTermEdit _ = \case - TermEdit.Deprecate -> pure True - TermEdit.Replace (Reference.Builtin _) _ -> pure True - TermEdit.Replace (Reference.DerivedId (Reference.Id h _ n)) _ -> - getTermStatus h >>= \case - Nothing -> Validate.refute (Set.singleton $ T h n) - Just TermOk -> pure True - Just _ -> pure False - filterTypeEdit _ = \case - TypeEdit.Deprecate -> pure True - TypeEdit.Replace (Reference.Builtin _) -> pure True - TypeEdit.Replace (Reference.DerivedId (Reference.Id h _ n)) -> - getDeclStatus h >>= \case - Nothing -> Validate.refute (Set.singleton $ D h n) - Just DeclOk -> pure True - Just _ -> pure False + rebuild h = pure $ Just (ns, (h, err h)) + err h = error $ "expected to short-circuit already-synced patch " ++ show h runSrc, runDest :: R m n => (Codebase m Symbol () -> a) -> n a runSrc = (Reader.reader srcCodebase <&>) @@ -389,3 +479,10 @@ runDest' ma = Reader.reader destConnection >>= flip runDB ma runDB :: Connection -> ReaderT Connection m a -> m a runDB conn action = Reader.runReaderT action conn + +-- each entity has to check to see +-- a) if it exists (if not, mark as missing in Status) +-- b) if any of its dependencies have not yet been synced +-- (if so, note as validation) +-- c) if any of its dependencies are missing from the source codebase +-- (if so, then filter them if possible, otherwise give this entity an error Status) \ No newline at end of file diff --git a/unison-core/src/Unison/Util/Relation.hs b/unison-core/src/Unison/Util/Relation.hs index 32786947fb..42638a0e86 100644 --- a/unison-core/src/Unison/Util/Relation.hs +++ b/unison-core/src/Unison/Util/Relation.hs @@ -230,6 +230,9 @@ filterDom f r = S.filter f (dom r) <| r filterRan :: (Ord a, Ord b) => (b -> Bool) -> Relation a b -> Relation a b filterRan f r = r |> S.filter f (ran r) +filterDomM :: (Applicative m, Ord a, Ord b) => (a -> m Bool) -> Relation a b -> m (Relation a b) +filterDomM f = fmap fromList . Monad.filterM (f . fst) . toList + filter :: (Ord a, Ord b) => ((a, b) -> Bool) -> Relation a b -> Relation a b filter f = fromList . List.filter f . toList From 6aae7f39d0e7251cd759c719dd0eca30373ed923 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 22 Mar 2021 16:08:48 -0400 Subject: [PATCH 140/225] formatting? --- .../src/Unison/Codebase/Conversion/Sync12.hs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index fa0a6400a2..bca4f1f027 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -187,7 +187,7 @@ trySync t _gc e = do t $ Codebase.putBranch dest (Branch.Branch c') pure Sync.Done T h n -> - getTermStatus @n @m h >>= \case + getTermStatus h >>= \case Just {} -> pure Sync.PreviouslyDone Nothing -> do runExceptT (runValidateT (checkTermComponent (lift . lift . t) h n)) >>= \case @@ -202,7 +202,7 @@ trySync t _gc e = do setTermStatus h TermOk pure Sync.Done D h n -> - getDeclStatus @n @m h >>= \case + getDeclStatus h >>= \case Just {} -> pure Sync.PreviouslyDone Nothing -> runExceptT (runValidateT (checkDeclComponent (lift . lift . t) h n)) >>= \case @@ -231,21 +231,25 @@ trySync t _gc e = do isSyncedCausal :: forall n. Branch.Hash -> n Bool isSyncedCausal = undefined -getBranchStatus :: forall n m. S m n => Branch.Hash -> n (Maybe (BranchStatus m)) +getBranchStatus :: S m n => Branch.Hash -> n (Maybe (BranchStatus m)) getBranchStatus h = use (branchStatus . at h) -getTermStatus :: forall n m. S m n => Hash -> n (Maybe TermStatus) +getTermStatus :: S m n => Hash -> n (Maybe TermStatus) getTermStatus h = use (termStatus . at h) -getDeclStatus :: forall n m. S m n => Hash -> n (Maybe DeclStatus) +getDeclStatus :: S m n => Hash -> n (Maybe DeclStatus) getDeclStatus h = use (declStatus . at h) +getPatchStatus :: S m n => Hash -> n (Maybe PatchStatus) getPatchStatus h = use (patchStatus . at h) +setTermStatus :: S m n => Hash -> TermStatus -> n () setTermStatus h s = termStatus . at h .= Just s +setDeclStatus :: S m n => Hash -> DeclStatus -> n () setDeclStatus h s = declStatus . at h .= Just s +setPatchStatus :: S m n => Hash -> PatchStatus -> n () setPatchStatus h s = patchStatus . at h .= Just s setBranchStatus :: forall m n. S m n => Branch.Hash -> BranchStatus m -> n () @@ -339,7 +343,7 @@ repairBranch = \case e' <- repairBranch0 e pure $ Causal.one e' Causal.Cons _h e (ht, mt) -> do - getBranchStatus @n @m ht >>= \case + getBranchStatus ht >>= \case Nothing -> Validate.refute . Set.singleton $ C ht mt Just tailStatus -> do e' <- repairBranch0 e @@ -349,7 +353,7 @@ repairBranch = \case Causal.Merge _h e (Map.toList -> tails) -> do tails' <- Map.fromList <$> for tails \(ht, mt) -> - getBranchStatus @n @m ht >>= \case + getBranchStatus ht >>= \case Nothing -> Validate.refute . Set.singleton $ C ht mt Just tailStatus -> pure case tailStatus of From 1225e1ecc55df4439f107ec7d86dc8298245e097 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 22 Mar 2021 16:30:37 -0400 Subject: [PATCH 141/225] finished? --- codebase2/util/U/Util/Relation.hs | 3 +++ .../src/Unison/Codebase/Conversion/Sync12.hs | 25 +++++++++---------- unison-core/src/Unison/Util/Relation.hs | 5 +++- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/codebase2/util/U/Util/Relation.hs b/codebase2/util/U/Util/Relation.hs index 9ea5d04983..d164b3dd99 100644 --- a/codebase2/util/U/Util/Relation.hs +++ b/codebase2/util/U/Util/Relation.hs @@ -241,6 +241,9 @@ filterM f = fmap fromList . Monad.filterM f . toList filterDomM :: (Applicative m, Ord a, Ord b) => (a -> m Bool) -> Relation a b -> m (Relation a b) filterDomM f = fmap fromList . Monad.filterM (f . fst) . toList +filterRanM :: (Applicative m, Ord a, Ord b) => (b -> m Bool) -> Relation a b -> m (Relation a b) +filterRanM f = fmap fromList . Monad.filterM (f . snd) . toList + -- | Restricts the relation to domain elements having multiple range elements filterManyDom :: (Ord a, Ord b) => Relation a b -> Relation a b filterManyDom r = filterDom (`manyDom` r) r diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index bca4f1f027..acbcf980a1 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -11,6 +11,7 @@ module Unison.Codebase.Conversion.Sync12 where import Control.Lens import Control.Monad.Except (MonadError, runExceptT) +import Control.Monad.Extra ((&&^)) import qualified Control.Monad.Except as Except import Control.Monad.Reader import qualified Control.Monad.Reader as Reader @@ -45,7 +46,7 @@ import qualified Unison.DataDeclaration as DD import Unison.Hash (Hash) import qualified Unison.Hashable as H import qualified Unison.LabeledDependency as LD -import Unison.NameSegment (NameSegment (NameSegment)) +import Unison.NameSegment (NameSegment) import Unison.Prelude (Set) import qualified Unison.Reference as Reference import Unison.Referent (Referent) @@ -170,10 +171,10 @@ trySync :: Entity m -> n (TrySyncResult (Entity m)) trySync t _gc e = do - Env src dest _ <- Reader.ask + Env _ dest _ <- Reader.ask case e of C h mc -> - isSyncedCausal h >>= \case + (t $ Codebase.branchExists dest h) >>= \case True -> pure Sync.PreviouslyDone False -> do c <- t mc @@ -228,9 +229,6 @@ trySync t _gc e = do setPatchStatus h PatchOk pure Sync.Done -isSyncedCausal :: forall n. Branch.Hash -> n Bool -isSyncedCausal = undefined - getBranchStatus :: S m n => Branch.Hash -> n (Maybe (BranchStatus m)) getBranchStatus h = use (branchStatus . at h) @@ -404,23 +402,24 @@ repairPatch (Patch termEdits typeEdits) = do Just _ -> pure False filterBranchTermStar :: (S m n, V m n) => Metadata.Star Referent NameSegment -> n (Metadata.Star Referent NameSegment) -filterBranchTermStar (Star3 _refs names _mdType mdTypeValues) = do +filterBranchTermStar (Star3 _refs names _mdType md) = do names' <- filterTermNames names - mdTypeValues' <- filterMetadata mdTypeValues let refs' = Relation.dom names' let mdType' = error "Can I get away with not populating the mdType column?" + mdTypeValues' <- filterMetadata $ Relation.restrictDom refs' md pure $ Star3 refs' names' mdType' mdTypeValues' filterBranchTypeStar :: (S m n, V m n) => Metadata.Star Reference.Reference NameSegment -> n (Metadata.Star Reference.Reference NameSegment) -filterBranchTypeStar (Star3 _refs names _mdType mdTypeValues) = do +filterBranchTypeStar (Star3 _refs names _mdType md) = do names' <- filterTypeNames names let refs' = Relation.dom names' - mdTypeValues' <- filterMetadata mdTypeValues let mdType' = error "Can I get away with not populating the mdType column?" + mdTypeValues' <- filterMetadata $ Relation.restrictDom refs' md pure $ Star3 refs' names mdType' mdTypeValues' -filterMetadata :: Relation r (Metadata.Type, Metadata.Value) -> n (Relation r (Metadata.Type, Metadata.Value)) -filterMetadata = error "not implemented" +filterMetadata :: (S m n, V m n, Ord r) => Relation r (Metadata.Type, Metadata.Value) -> n (Relation r (Metadata.Type, Metadata.Value)) +filterMetadata = Relation.filterRanM \(t, v) -> + validateTypeReference t &&^ validateTermReference v filterTermNames :: (S m n, V m n) => Relation Referent NameSegment -> n (Relation Referent NameSegment) filterTermNames = Relation.filterDomM validateTermReferent @@ -441,7 +440,7 @@ validateTermReference = \case validateTypeReference :: (S m n, V m n) => Reference.Reference -> n Bool validateTypeReference = \case - Reference.Builtin t -> pure True + Reference.Builtin {} -> pure True Reference.DerivedId (Reference.Id h _i n) -> getTermStatus h >>= \case Nothing -> Validate.refute . Set.singleton $ T h n diff --git a/unison-core/src/Unison/Util/Relation.hs b/unison-core/src/Unison/Util/Relation.hs index 42638a0e86..0d295b0448 100644 --- a/unison-core/src/Unison/Util/Relation.hs +++ b/unison-core/src/Unison/Util/Relation.hs @@ -4,7 +4,7 @@ module Unison.Util.Relation where import Unison.Prelude hiding (empty, toList) import Prelude hiding ( null, map, filter ) -import Data.Bifunctor ( first, second, Bifunctor ) +import Data.Bifunctor ( first, second ) import qualified Data.List as List import qualified Data.Map as M import qualified Data.Set as S @@ -233,6 +233,9 @@ filterRan f r = r |> S.filter f (ran r) filterDomM :: (Applicative m, Ord a, Ord b) => (a -> m Bool) -> Relation a b -> m (Relation a b) filterDomM f = fmap fromList . Monad.filterM (f . fst) . toList +filterRanM :: (Applicative m, Ord a, Ord b) => (b -> m Bool) -> Relation a b -> m (Relation a b) +filterRanM f = fmap fromList . Monad.filterM (f . snd) . toList + filter :: (Ord a, Ord b) => ((a, b) -> Bool) -> Relation a b -> Relation a b filter f = fromList . List.filter f . toList From b9020aa2c89b4d3d67b0a92ef1c3179c14087fc1 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 24 Mar 2021 16:57:57 -0400 Subject: [PATCH 142/225] move Codebase init and Git commands into interfaces: * Git cache escaping no longer depends on FileCodebase.common (but is untested) * Git.{view,import,pushGit}RemoteBranch moved into Codebase interface * minimized the exports from {File,Sqlite}Codebase * introduce Codebase.openCodebase which tries to open an existing codebase but _doesn't_ bomb on failure * Git.hs test is specific to FileCodebase, but GitSimple.hs tests both * IO test currently uses FileCodebase only, but could be switched with some refactoring. --- codebase2/util/U/Util/Cache.hs | 6 +- parser-typechecker/src/Unison/Codebase.hs | 129 ++++++-- .../src/Unison/Codebase/Editor/Git.hs | 22 +- .../Unison/Codebase/Editor/HandleCommand.hs | 7 +- .../src/Unison/Codebase/FileCodebase.hs | 285 ++++++++++++------ .../Unison/Codebase/FileCodebase/Common.hs | 4 +- .../src/Unison/Codebase/FileCodebase/Git.hs | 136 --------- .../src/Unison/Codebase/GitError.hs | 3 +- .../src/Unison/Codebase/SqliteCodebase.hs | 125 +++++++- .../src/Unison/Codebase/SqliteCodebase/Git.hs | 140 --------- parser-typechecker/tests/Unison/Test/Git.hs | 41 +-- .../tests/Unison/Test/GitSimple.hs | 82 +++-- parser-typechecker/tests/Unison/Test/IO.hs | 17 +- .../unison-parser-typechecker.cabal | 2 - parser-typechecker/unison/Main.hs | 126 ++++---- 15 files changed, 596 insertions(+), 529 deletions(-) delete mode 100644 parser-typechecker/src/Unison/Codebase/FileCodebase/Git.hs delete mode 100644 parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs diff --git a/codebase2/util/U/Util/Cache.hs b/codebase2/util/U/Util/Cache.hs index 22fba7dab3..7bac9b5735 100644 --- a/codebase2/util/U/Util/Cache.hs +++ b/codebase2/util/U/Util/Cache.hs @@ -27,8 +27,8 @@ cache = do pure $ Cache lookup insert -nullCache :: (MonadIO m, Ord k) => m (Cache m k v) -nullCache = pure $ Cache (const (pure Nothing)) (\_ _ -> pure ()) +nullCache :: Applicative m => Cache m k v +nullCache = Cache (const (pure Nothing)) (\_ _ -> pure ()) -- Create a cache of bounded size. Once the cache -- reaches a size of `maxSize`, older unused entries @@ -36,7 +36,7 @@ nullCache = pure $ Cache (const (pure Nothing)) (\_ _ -> pure ()) -- where cache hits require updating LRU info, -- cache hits here are read-only and contention free. semispaceCache :: (MonadIO m, Ord k) => Word -> m (Cache m k v) -semispaceCache 0 = nullCache +semispaceCache 0 = pure nullCache semispaceCache maxSize = do -- Analogous to semispace GC, keep 2 maps: gen0 and gen1 -- `insert k v` is done in gen0 diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs index a42832226a..f3280bc7e1 100644 --- a/parser-typechecker/src/Unison/Codebase.hs +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -1,50 +1,79 @@ -{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE OverloadedStrings #-} module Unison.Codebase where +import Control.Lens ((%=), _1, _2) +import Control.Monad.Except (ExceptT (ExceptT), runExceptT) +import Control.Monad.State (State, evalState, get) +import Data.Bifunctor (bimap) +import qualified Data.Map as Map +import qualified Data.Set as Set +import qualified Unison.ABT as ABT +import qualified Unison.Builtin as Builtin +import qualified Unison.Builtin.Terms as Builtin +import Unison.Codebase.Branch (Branch) +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.CodeLookup as CL +import Unison.Codebase.Editor.Git (withStatus) +import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace, RemoteRepo) +import Unison.Codebase.GitError (GitError) +import Unison.Codebase.Patch (Patch) +import qualified Unison.Codebase.Reflog as Reflog +import Unison.Codebase.ShortBranchHash (ShortBranchHash) +import Unison.Codebase.SyncMode (SyncMode) +import Unison.DataDeclaration (Decl) +import qualified Unison.DataDeclaration as DD +import qualified Unison.Parser as Parser import Unison.Prelude - -import Control.Lens ( _1, _2, (%=) ) -import Control.Monad.State ( State, evalState, get ) -import Data.Bifunctor ( bimap ) -import qualified Data.Map as Map -import qualified Data.Set as Set -import qualified Unison.ABT as ABT -import qualified Unison.Builtin as Builtin -import qualified Unison.Builtin.Terms as Builtin -import Unison.Codebase.Branch ( Branch ) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.CodeLookup as CL -import qualified Unison.Codebase.Reflog as Reflog -import Unison.Codebase.SyncMode ( SyncMode ) -import qualified Unison.DataDeclaration as DD -import Unison.Reference ( Reference ) -import qualified Unison.Reference as Reference +import Unison.Reference (Reference) +import qualified Unison.Reference as Reference import qualified Unison.Referent as Referent -import qualified Unison.Term as Term -import qualified Unison.Type as Type -import Unison.Typechecker.TypeLookup (TypeLookup(TypeLookup)) -import qualified Unison.Typechecker.TypeLookup as TL -import qualified Unison.Parser as Parser -import qualified Unison.UnisonFile as UF -import qualified Unison.Util.Relation as Rel -import qualified Unison.Util.Set as Set -import qualified Unison.Var as Var -import Unison.Var ( Var ) -import Unison.Symbol ( Symbol ) -import Unison.DataDeclaration (Decl) +import Unison.ShortHash (ShortHash) +import Unison.Symbol (Symbol) import Unison.Term (Term) +import qualified Unison.Term as Term import Unison.Type (Type) -import Unison.Codebase.ShortBranchHash (ShortBranchHash) -import Unison.ShortHash (ShortHash) -import Unison.Codebase.Patch (Patch) +import qualified Unison.Type as Type +import Unison.Typechecker.TypeLookup (TypeLookup (TypeLookup)) +import qualified Unison.Typechecker.TypeLookup as TL +import qualified Unison.UnisonFile as UF +import qualified Unison.Util.Relation as Rel +import qualified Unison.Util.Set as Set +import Unison.Util.Timing (time) +import Unison.Var (Var) +import qualified Unison.Var as Var +import UnliftIO.Directory (getHomeDirectory) type DataDeclaration v a = DD.DataDeclaration v a + type EffectDeclaration v a = DD.EffectDeclaration v a -- | this FileCodebase detail lives here, because the interface depends on it 🙃 type CodebasePath = FilePath +type SyncToDir m = + CodebasePath -> -- dest codebase + SyncMode -> + Branch m -> -- branch to sync to dest codebase + m () + +-- | just collecting the abstraction we're currently using, not what we maybe should be using. -AI +data Init m v a = Init + { + openCodebase :: CodebasePath -> m (Either String (m (), Codebase m v a)), + -- | load an existing codebase or exit. + -- seems like Maybe might be more useful idk + getCodebaseOrExit :: Maybe CodebasePath -> m (m (), Codebase m v a), + -- | initialize a codebase where none exists, or exit + -- seems again like the return type type could be more useful + initCodebase :: CodebasePath -> m (m (), Codebase m v a), + -- | try to init a codebase where none exists and then exit regardless + initCodebaseAndExit :: Maybe CodebasePath -> m (), + -- | given a codebase root, and given that the codebase root may have other junk in it, + -- give the path to the "actual" files; e.g. what a forked transcript should clone + codebasePath :: CodebasePath -> CodebasePath + } + -- | Abstract interface to a user's codebase. -- -- One implementation is 'Unison.Codebase.FileCodebase' which uses the filesystem. @@ -74,6 +103,8 @@ data Codebase m v a = -- This copies all the dependencies of `b` from the this Codebase -- into the specified FileCodebase, and sets its _head to `b` , syncToDirectory :: CodebasePath -> SyncMode -> Branch m -> m () + , viewRemoteBranch' :: RemoteNamespace -> m (Either GitError (Branch m, CodebasePath)) + , pushGitRootBranch :: Branch m -> RemoteRepo -> SyncMode -> m (Either GitError ()) -- Watch expressions are part of the codebase, the `Reference.Id` is -- the hash of the source of the watch expression, and the `Term v a` @@ -99,6 +130,7 @@ data Codebase m v a = , branchHashesByPrefix :: ShortBranchHash -> m (Set Branch.Hash) } + data GetRootBranchError = NoRootBranch | CouldntParseRootBranch String @@ -110,6 +142,9 @@ debug = False data SyncFileCodebaseResult = SyncOk | UnknownDestinationRootBranch Branch.Hash | NotFastForward +getCodebaseDir :: MonadIO m => Maybe FilePath -> m FilePath +getCodebaseDir = maybe getHomeDirectory pure + -- | Write all of the builtins types into the codebase and create empty namespace initializeCodebase :: forall m. Monad m => Codebase m Symbol Parser.Ann -> m () initializeCodebase c = do @@ -306,3 +341,31 @@ class BuiltinAnnotation a where instance BuiltinAnnotation Parser.Ann where builtinAnnotation = Parser.Intrinsic + +-- * Git stuff + +-- | Sync elements as needed from a remote codebase into the local one. +-- If `sbh` is supplied, we try to load the specified branch hash; +-- otherwise we try to load the root branch. +importRemoteBranch :: + forall m v a. + MonadIO m => + Codebase m v a -> + RemoteNamespace -> + SyncMode -> + m (Either GitError (Branch m)) +importRemoteBranch codebase ns mode = runExceptT do + (branch, cacheDir) <- ExceptT $ viewRemoteBranch' codebase ns + withStatus "Importing downloaded files into local codebase..." $ + time "SyncFromDirectory" $ + lift $ syncFromDirectory codebase cacheDir mode branch + pure branch + +-- | Pull a git branch and view it from the cache, without syncing into the +-- local codebase. +viewRemoteBranch :: + MonadIO m => + Codebase m v a -> + RemoteNamespace -> + m (Either GitError (Branch m)) +viewRemoteBranch cache = runExceptT . fmap fst . ExceptT . viewRemoteBranch' cache \ No newline at end of file diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Git.hs b/parser-typechecker/src/Unison/Codebase/Editor/Git.hs index bf2246ae64..8950d9845b 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/Git.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/Git.hs @@ -10,14 +10,32 @@ import Control.Monad.Except (MonadError, throwError) import qualified Data.Text as Text import Shellmet (($?), ($^), ($|)) import System.FilePath (()) -import Unison.Codebase (CodebasePath) import Unison.Codebase.Editor.RemoteRepo (RemoteRepo (GitRepo)) -import Unison.Codebase.FileCodebase.Common (encodeFileName) import Unison.Codebase.GitError (GitError) import qualified Unison.Codebase.GitError as GitError import qualified Unison.Util.Exception as Ex import UnliftIO.Directory (XdgDirectory (XdgCache), doesDirectoryExist, findExecutable, getXdgDirectory, removeDirectoryRecursive) import UnliftIO.IO (hFlush, stdout) +import qualified Data.ByteString.Base16 as ByteString +import qualified Data.Char as Char + +type CodebasePath = FilePath + +-- https://superuser.com/questions/358855/what-characters-are-safe-in-cross-platform-file-names-for-linux-windows-and-os +encodeFileName :: String -> FilePath +encodeFileName = let + go ('$' : rem) = "$$" <> go rem + go (c : rem) | elem @[] c "/\\:*?\"<>|" || not (Char.isPrint c && Char.isAscii c) + = "$x" <> encodeHex [c] <> "$" <> go rem + | otherwise = c : go rem + go [] = [] + encodeHex :: String -> String + encodeHex = Text.unpack . Text.toUpper . ByteString.encodeBase16 . + encodeUtf8 . Text.pack + in \case + "." -> "$dot$" + ".." -> "$dotdot$" + t -> go t tempGitDir :: MonadIO m => Text -> m FilePath tempGitDir url = diff --git a/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs b/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs index de033ecf2b..bcfa872787 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/HandleCommand.hs @@ -26,7 +26,6 @@ import Unison.Codebase ( Codebase ) import qualified Unison.Codebase as Codebase import Unison.Codebase.Branch ( Branch ) import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.SqliteCodebase.Git as Git import Unison.Parser ( Ann ) import qualified Unison.Parser as Parser import qualified Unison.Parsers as Parsers @@ -120,11 +119,11 @@ commandLine config awaitInput setBranchRef rt notifyUser notifyNumbered loadSour setBranchRef branch Codebase.putRootBranch codebase branch ViewRemoteBranch ns -> - lift $ runExceptT $ Git.viewRemoteBranch ns + lift $ Codebase.viewRemoteBranch codebase ns ImportRemoteBranch ns syncMode -> - lift $ runExceptT $ Git.importRemoteBranch codebase ns syncMode + lift $ Codebase.importRemoteBranch codebase ns syncMode SyncRemoteRootBranch repo branch syncMode -> - lift $ runExceptT $ Git.pushGitRootBranch codebase branch repo syncMode + lift $ Codebase.pushGitRootBranch codebase branch repo syncMode LoadTerm r -> lift $ Codebase.getTerm codebase r LoadType r -> lift $ Codebase.getTypeDeclaration codebase r LoadTypeOfTerm r -> lift $ Codebase.getTypeOfTerm codebase r diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs index 267d1ef5c3..ac64131044 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs @@ -1,109 +1,109 @@ {-# LANGUAGE RankNTypes #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} {-# LANGUAGE ViewPatterns #-} -module Unison.Codebase.FileCodebase -( getRootBranch -- used by Git module -, branchHashesByPrefix -- used by Git module -, branchFromFiles -- used by Git module -, codebase1 -- used by Main -, codebase1' -- used by Test/Git -, codebaseExists -- used by Main -, initCodebaseAndExit -, initCodebase -, getCodebaseOrExit -, getCodebaseDir -) where - -import Unison.Prelude -import UnliftIO ( MonadUnliftIO ) -import UnliftIO.Exception ( catchIO ) -import UnliftIO.Concurrent ( forkIO - , killThread - ) -import UnliftIO.STM ( atomically ) -import qualified Data.Set as Set -import qualified Data.Text as Text -import qualified Data.Text.IO as TextIO -import UnliftIO.Directory ( createDirectoryIfMissing - , doesDirectoryExist - ) -import System.FilePath ( takeFileName - ) -import System.Directory ( getHomeDirectory - , canonicalizePath - ) -import System.Environment ( getProgName ) -import System.Exit ( exitFailure, exitSuccess ) -import qualified Unison.Codebase as Codebase -import Unison.Codebase ( Codebase(Codebase) - , BuiltinAnnotation - , CodebasePath - ) -import Unison.Codebase.Branch ( Branch ) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Reflog as Reflog -import qualified Unison.Codebase.Serialization as S -import qualified Unison.Codebase.Serialization.V1 - as V1 -import qualified Unison.Codebase.Watch as Watch -import Unison.Parser (Ann() ) -import Unison.Reference ( Reference ) -import qualified Unison.Reference as Reference -import qualified Unison.Referent as Referent -import qualified Unison.Util.TQueue as TQueue -import Unison.Var ( Var ) -import qualified Unison.UnisonFile as UF -import qualified U.Util.Cache as Cache -import qualified Unison.Util.Pretty as P -import qualified Unison.PrettyTerminal as PT -import Unison.Symbol ( Symbol ) -import qualified Unison.Codebase.FileCodebase.Common as Common +module Unison.Codebase.FileCodebase + ( + codebase1', -- used by Test/Git + Unison.Codebase.FileCodebase.init, + ) +where +import Control.Monad.Except (ExceptT, runExceptT, throwError) +import Control.Monad.Extra ((||^)) +import qualified Data.Set as Set +import qualified Data.Text as Text +import qualified Data.Text.IO as TextIO +import System.Directory (canonicalizePath) +import System.Environment (getProgName) +import System.Exit (exitFailure, exitSuccess) +import System.FilePath (takeFileName, ()) +import qualified U.Util.Cache as Cache +import Unison.Codebase (BuiltinAnnotation, Codebase (Codebase), CodebasePath) +import qualified Unison.Codebase as Codebase +import Unison.Codebase.Branch (Branch, headHash) +import qualified Unison.Codebase.Branch as Branch +import Unison.Codebase.Editor.Git (gitIn, gitTextIn, pullBranch, withIOError, withStatus) +import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace, RemoteRepo (GitRepo), printRepo) import Unison.Codebase.FileCodebase.Common - ( Err(CantParseBranchHead) - , codebaseExists - --- - , branchHeadDir - , dependentsDir - , reflogPath - , typeIndexDir - , typeMentionsIndexDir - , watchesDir - --- - , componentIdFromString - , hashFromFilePath - , referentIdFromString - , decodeFileName - , formatAnn - , getRootBranch - , getDecl - , getTerm - , getTypeOfTerm - , getWatch - , putDecl - , putTerm - , putRootBranch - , putBranch - , putWatch - --- - , hashExists - , branchFromFiles - , branchHashesByPrefix - , termReferencesByPrefix - , termReferentsByPrefix - , typeReferencesByPrefix - --- - , failWith - , listDirectory, getPatch, serializeEdits, patchExists + ( Err (CantParseBranchHead), + branchFromFiles, + branchHashesByPrefix, + branchHeadDir, + codebaseExists, + componentIdFromString, + decodeFileName, + dependentsDir, + failWith, + formatAnn, + getDecl, + getPatch, + getRootBranch, + getTerm, + getTypeOfTerm, + getWatch, + hashExists, + hashFromFilePath, + listDirectory, + patchExists, + putBranch, + putDecl, + putRootBranch, + putTerm, + putWatch, + referentIdFromString, + reflogPath, + serializeEdits, + termReferencesByPrefix, + termReferentsByPrefix, + typeIndexDir, + typeMentionsIndexDir, + typeReferencesByPrefix, + updateCausalHead, + watchesDir, ) - +import qualified Unison.Codebase.FileCodebase.Common as Common import qualified Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex as Sync +import Unison.Codebase.GitError (GitError) +import qualified Unison.Codebase.GitError as GitError +import qualified Unison.Codebase.Path as Path +import qualified Unison.Codebase.Reflog as Reflog +import qualified Unison.Codebase.Serialization as S +import qualified Unison.Codebase.Serialization.V1 as V1 +import Unison.Codebase.SyncMode (SyncMode) +import qualified Unison.Codebase.Watch as Watch +import Unison.Parser (Ann ()) +import Unison.Prelude +import qualified Unison.PrettyTerminal as PT +import Unison.Reference (Reference) +import qualified Unison.Reference as Reference +import qualified Unison.Referent as Referent +import Unison.Symbol (Symbol) +import qualified Unison.UnisonFile as UF +import qualified Unison.Util.Pretty as P +import qualified Unison.Util.TQueue as TQueue +import Unison.Util.Timing (time) +import Unison.Var (Var) +import UnliftIO (MonadUnliftIO) +import UnliftIO.Concurrent (forkIO, killThread) +import UnliftIO.Directory (createDirectoryIfMissing, doesDirectoryExist) +import UnliftIO.Exception (catchIO) +import UnliftIO.STM (atomically) + +init :: Codebase.Init IO Symbol Ann +init = Codebase.Init + getCodebaseOrError + (fmap (pure (),) . getCodebaseOrExit Cache.nullCache) + (fmap (pure (),) . initCodebase Cache.nullCache) + initCodebaseAndExit + ( Common.codebasePath) initCodebaseAndExit :: Maybe FilePath -> IO () initCodebaseAndExit mdir = do - dir <- getCodebaseDir mdir + dir <- Codebase.getCodebaseDir mdir cache <- Cache.cache _ <- initCodebase cache dir exitSuccess @@ -131,7 +131,7 @@ initCodebase cache path = do -- get the codebase in dir, or in the home directory if not provided. getCodebaseOrExit :: Branch.Cache IO -> Maybe FilePath -> IO (Codebase IO Symbol Ann) getCodebaseOrExit cache mdir = do - dir <- getCodebaseDir mdir + dir <- Codebase.getCodebaseDir mdir progName <- getProgName prettyDir <- P.string <$> canonicalizePath dir let errMsg = getNoCodebaseErrorMsg ((P.text . Text.pack) progName) prettyDir mdir @@ -141,6 +141,15 @@ getCodebaseOrExit cache mdir = do exitFailure theCodebase +-- get the codebase in dir +getCodebaseOrError :: forall m. MonadUnliftIO m => CodebasePath -> m (Either String (m (), Codebase m Symbol Ann)) +getCodebaseOrError dir = do + prettyDir <- liftIO $ P.string <$> canonicalizePath dir + let theCodebase = codebase1 @m @Symbol @Ann Cache.nullCache V1.formatSymbol formatAnn dir + ifM (codebaseExists dir) + (Right . (pure (),) <$> theCodebase) + (pure . Left . P.render @String 80 $ "No FileCodebase structure found at " <> prettyDir) + getNoCodebaseErrorMsg :: IsString s => P.Pretty s -> P.Pretty s -> Maybe FilePath -> P.Pretty s getNoCodebaseErrorMsg executable prettyDir mdir = let secondLine = @@ -155,9 +164,6 @@ getNoCodebaseErrorMsg executable prettyDir mdir = [ "No codebase exists in " <> prettyDir <> "." , secondLine ] -getCodebaseDir :: Maybe FilePath -> IO FilePath -getCodebaseDir = maybe getHomeDirectory pure - -- builds a `Codebase IO v a`, given serializers for `v` and `a` codebase1 :: forall m v a @@ -196,6 +202,9 @@ codebase1' syncToDirectory branchCache fmtV@(S.Format getV putV) fmtA@(S.Format dependents (flip (syncToDirectory fmtV fmtA) path) (syncToDirectory fmtV fmtA path) + (runExceptT . viewRemoteBranch' Cache.nullCache) + (\b r m -> runExceptT $ + pushGitRootBranch (syncToDirectory fmtV fmtA path) Cache.nullCache b r m) watches (getWatch getV getA path) (putWatch putV putA path) @@ -287,3 +296,83 @@ branchHeadUpdates root = do ( cancelWatch >> killThread watcher1 , Set.fromList <$> Watch.collectUntilPause branchHeadChanges 400000 ) + +-- * Git stuff + +viewRemoteBranch' :: forall m. MonadIO m + => Branch.Cache m -> RemoteNamespace -> ExceptT GitError m (Branch m, CodebasePath) +viewRemoteBranch' cache (repo, sbh, path) = do + -- set up the cache dir + remotePath <- time "Git fetch" $ pullBranch repo + -- try to load the requested branch from it + branch <- time "Git fetch (sbh)" $ case sbh of + -- load the root branch + Nothing -> lift (getRootBranch cache remotePath) >>= \case + Left Codebase.NoRootBranch -> pure Branch.empty + Left (Codebase.CouldntLoadRootBranch h) -> + throwError $ GitError.CouldntLoadRootBranch repo h + Left (Codebase.CouldntParseRootBranch s) -> + throwError $ GitError.CouldntParseRootBranch repo s + Right b -> pure b + -- load from a specific `ShortBranchHash` + Just sbh -> do + branchCompletions <- lift $ branchHashesByPrefix remotePath sbh + case toList branchCompletions of + [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + [h] -> (lift $ branchFromFiles cache remotePath h) >>= \case + Just b -> pure b + Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions + pure (Branch.getAt' path branch, remotePath) + +-- Given a branch that is "after" the existing root of a given git repo, +-- stage and push the branch (as the new root) + dependencies to the repo. +pushGitRootBranch + :: MonadIO m + => Codebase.SyncToDir m + -> Branch.Cache m + -> Branch m + -> RemoteRepo + -> SyncMode + -> ExceptT GitError m () +pushGitRootBranch syncToDirectory cache branch repo syncMode = do + -- Pull the remote repo into a staging directory + (remoteRoot, remotePath) <- viewRemoteBranch' cache (repo, Nothing, Path.empty) + ifM (pure (remoteRoot == Branch.empty) + ||^ lift (remoteRoot `Branch.before` branch)) + -- ours is newer 👍, meaning this is a fast-forward push, + -- so sync branch to staging area + (stageAndPush remotePath) + (throwError $ GitError.PushDestinationHasNewStuff repo) + where + stageAndPush remotePath = do + let repoString = Text.unpack $ printRepo repo + withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ + lift (syncToDirectory remotePath syncMode branch) + updateCausalHead (branchHeadDir remotePath) (Branch._history branch) + -- push staging area to remote + withStatus ("Uploading to " ++ repoString ++ " ...") $ + unlessM + (push remotePath repo + `withIOError` (throwError . GitError.PushException repo . show)) + (throwError $ GitError.PushNoOp repo) + -- Commit our changes + push :: CodebasePath -> RemoteRepo -> IO Bool -- withIOError needs IO + push remotePath (GitRepo url gitbranch) = do + -- has anything changed? + status <- gitTextIn remotePath ["status", "--short"] + if Text.null status then + pure False + else do + gitIn remotePath ["add", "--all", "."] + gitIn remotePath + ["commit", "-q", "-m", "Sync branch " <> Text.pack (show $ headHash branch)] + -- Push our changes to the repo + case gitbranch of + Nothing -> gitIn remotePath ["push", "--quiet", url] + Just gitbranch -> error $ + "Pushing to a specific branch isn't fully implemented or tested yet.\n" + ++ "InputPatterns.parseUri was expected to have prevented you " + ++ "from supplying the git treeish `" ++ Text.unpack gitbranch ++ "`!" + -- gitIn remotePath ["push", "--quiet", url, gitbranch] + pure True diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs index 1007db27a0..71ebc4b8d6 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase/Common.hs @@ -144,14 +144,14 @@ codebasePath = ".unison" "v1" formatAnn :: S.Format Ann formatAnn = S.Format (pure External) (\_ -> pure ()) --- Write Branch and its dependents to the dest codebase, and set it as the root. +-- Write Branch and its dependents to the dest codebase type SyncToDir m v a = S.Format v -> S.Format a -> CodebasePath -- src codebase -> CodebasePath -- dest codebase -> SyncMode - -> Branch m -- new dest root branch + -> Branch m -- branch to sync to dest codebase -> m () termsDir, typesDir, branchesDir, branchHeadDir, editsDir diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase/Git.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase/Git.hs deleted file mode 100644 index 3ee4433fe2..0000000000 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase/Git.hs +++ /dev/null @@ -1,136 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Codebase.FileCodebase.Git - ( importRemoteBranch - , pushGitRootBranch - , viewRemoteBranch - ) where - -import Unison.Prelude -import Unison.Codebase.Editor.Git - -import Control.Monad.Except ( throwError - , ExceptT - ) -import Control.Monad.Extra ((||^)) -import qualified Data.Text as Text -import qualified Unison.Codebase.GitError as GitError -import Unison.Codebase.GitError (GitError) -import qualified Unison.Codebase as Codebase -import Unison.Codebase (Codebase, CodebasePath) -import Unison.Codebase.Editor.RemoteRepo ( RemoteRepo(GitRepo) - , RemoteNamespace - , printRepo - ) -import Unison.Codebase.FileCodebase as FC -import Unison.Codebase.Branch ( Branch - , headHash - ) -import qualified Unison.Codebase.Path as Path -import Unison.Codebase.SyncMode ( SyncMode ) -import Unison.Util.Timing (time) -import qualified Unison.Codebase.Branch as Branch -import Unison.Codebase.FileCodebase.Common (updateCausalHead, branchHeadDir) - --- | Sync elements as needed from a remote codebase into the local one. --- If `sbh` is supplied, we try to load the specified branch hash; --- otherwise we try to load the root branch. -importRemoteBranch - :: forall m v a - . MonadIO m - => Codebase m v a - -> Branch.Cache m - -> RemoteNamespace - -> SyncMode - -> ExceptT GitError m (Branch m) -importRemoteBranch codebase cache ns mode = do - (branch, cacheDir) <- viewRemoteBranch' cache ns - withStatus "Importing downloaded files into local codebase..." $ - time "SyncFromDirectory" $ - lift $ Codebase.syncFromDirectory codebase cacheDir mode branch - pure branch - --- | Pull a git branch and view it from the cache, without syncing into the --- local codebase. -viewRemoteBranch :: forall m. MonadIO m - => Branch.Cache m -> RemoteNamespace -> ExceptT GitError m (Branch m) -viewRemoteBranch cache = fmap fst . viewRemoteBranch' cache - -viewRemoteBranch' :: forall m. MonadIO m - => Branch.Cache m -> RemoteNamespace -> ExceptT GitError m (Branch m, CodebasePath) -viewRemoteBranch' cache (repo, sbh, path) = do - -- set up the cache dir - remotePath <- time "Git fetch" $ pullBranch repo - -- try to load the requested branch from it - branch <- time "Git fetch (sbh)" $ case sbh of - -- load the root branch - Nothing -> lift (FC.getRootBranch cache remotePath) >>= \case - Left Codebase.NoRootBranch -> pure Branch.empty - Left (Codebase.CouldntLoadRootBranch h) -> - throwError $ GitError.CouldntLoadRootBranch repo h - Left (Codebase.CouldntParseRootBranch s) -> - throwError $ GitError.CouldntParseRootBranch repo s - Right b -> pure b - -- load from a specific `ShortBranchHash` - Just sbh -> do - branchCompletions <- lift $ FC.branchHashesByPrefix remotePath sbh - case toList branchCompletions of - [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh - [h] -> (lift $ FC.branchFromFiles cache remotePath h) >>= \case - Just b -> pure b - Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh - _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions - pure (Branch.getAt' path branch, remotePath) - --- Given a branch that is "after" the existing root of a given git repo, --- stage and push the branch (as the new root) + dependencies to the repo. -pushGitRootBranch - :: MonadIO m - => Codebase m v a - -> Branch.Cache m - -> Branch m - -> RemoteRepo - -> SyncMode - -> ExceptT GitError m () -pushGitRootBranch codebase cache branch repo syncMode = do - -- Pull the remote repo into a staging directory - (remoteRoot, remotePath) <- viewRemoteBranch' cache (repo, Nothing, Path.empty) - ifM (pure (remoteRoot == Branch.empty) - ||^ lift (remoteRoot `Branch.before` branch)) - -- ours is newer 👍, meaning this is a fast-forward push, - -- so sync branch to staging area - (stageAndPush remotePath) - (throwError $ GitError.PushDestinationHasNewStuff repo) - where - stageAndPush remotePath = do - let repoString = Text.unpack $ printRepo repo - withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ - lift (Codebase.syncToDirectory codebase remotePath syncMode branch) - updateCausalHead (branchHeadDir remotePath) (Branch._history branch) - -- push staging area to remote - withStatus ("Uploading to " ++ repoString ++ " ...") $ - unlessM - (push remotePath repo - `withIOError` (throwError . GitError.PushException repo . show)) - (throwError $ GitError.PushNoOp repo) - -- Commit our changes - push :: CodebasePath -> RemoteRepo -> IO Bool -- withIOError needs IO - push remotePath (GitRepo url gitbranch) = do - -- has anything changed? - status <- gitTextIn remotePath ["status", "--short"] - if Text.null status then - pure False - else do - gitIn remotePath ["add", "--all", "."] - gitIn remotePath - ["commit", "-q", "-m", "Sync branch " <> Text.pack (show $ headHash branch)] - -- Push our changes to the repo - case gitbranch of - Nothing -> gitIn remotePath ["push", "--quiet", url] - Just gitbranch -> error $ - "Pushing to a specific branch isn't fully implemented or tested yet.\n" - ++ "InputPatterns.parseUri was expected to have prevented you " - ++ "from supplying the git treeish `" ++ Text.unpack gitbranch ++ "`!" - -- gitIn remotePath ["push", "--quiet", url, gitbranch] - pure True diff --git a/parser-typechecker/src/Unison/Codebase/GitError.hs b/parser-typechecker/src/Unison/Codebase/GitError.hs index 9798f819fb..02593ca213 100644 --- a/parser-typechecker/src/Unison/Codebase/GitError.hs +++ b/parser-typechecker/src/Unison/Codebase/GitError.hs @@ -2,11 +2,12 @@ module Unison.Codebase.GitError where import Unison.Prelude -import Unison.Codebase (CodebasePath) import Unison.Codebase.ShortBranchHash (ShortBranchHash) import qualified Unison.Codebase.Branch as Branch import Unison.Codebase.Editor.RemoteRepo (RemoteRepo) +type CodebasePath = FilePath + data GitError = NoGit | UnrecognizableCacheDir Text CodebasePath | UnrecognizableCheckoutDir Text CodebasePath diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 0a38e77105..d0daa2a06a 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -6,14 +6,14 @@ {-# LANGUAGE TypeApplications #-} {-# LANGUAGE ViewPatterns #-} -module Unison.Codebase.SqliteCodebase where +module Unison.Codebase.SqliteCodebase (Unison.Codebase.SqliteCodebase.init) where import qualified Control.Concurrent import qualified Control.Exception import Control.Monad (filterM, when, (>=>)) -import Control.Monad.Except (ExceptT, runExceptT) +import Control.Monad.Except (ExceptT, runExceptT,MonadError (throwError)) import qualified Control.Monad.Except as Except -import Control.Monad.Extra (ifM, unlessM) +import Control.Monad.Extra (ifM, unlessM, (||^)) import qualified Control.Monad.Extra as Monad import Control.Monad.Reader (ReaderT (runReaderT)) import Control.Monad.State (MonadState) @@ -85,21 +85,34 @@ import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF import qualified Unison.Util.Pretty as P import UnliftIO (MonadIO, catchIO, liftIO) -import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, doesDirectoryExist, doesFileExist, getHomeDirectory) +import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, doesDirectoryExist, doesFileExist) import qualified UnliftIO.Environment as SysEnv import UnliftIO.STM +import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace, RemoteRepo (GitRepo), printRepo) +import Unison.Codebase.GitError (GitError) +import qualified Unison.Codebase.GitError as GitError +import qualified Unison.Codebase as Codebase +import qualified Unison.Codebase.Path as Path +import Unison.Codebase.Editor.Git (withIOError, withStatus, gitTextIn, gitIn, pullBranch) +import Unison.Util.Timing (time) +import GHC.Stack (HasCallStack) +import qualified Unison.Util.ColorText as ColorText +import qualified Data.Bifunctor as Bifunctor debug, debugProcessBranches :: Bool debug = False debugProcessBranches = False +init :: HasCallStack => MonadIO m => Codebase1.Init m Symbol Ann +init = Codebase1.Init getCodebaseOrError getCodebaseOrExit initCodebase initCodebaseAndExit ( codebasePath) + codebasePath :: FilePath codebasePath = ".unison" "v2" "unison.sqlite3" -- get the codebase in dir, or in the home directory if not provided. getCodebaseOrExit :: MonadIO m => Maybe FilePath -> m (m (), Codebase1.Codebase m Symbol Ann) getCodebaseOrExit mdir = do - dir <- getCodebaseDir mdir + dir <- Codebase.getCodebaseDir mdir progName <- SysEnv.getProgName prettyDir <- P.string <$> canonicalizePath dir let errMsg = getNoCodebaseErrorMsg ((P.text . Text.pack) progName) prettyDir mdir @@ -109,6 +122,17 @@ getCodebaseOrExit mdir = do SysExit.exitFailure Right c -> pure c +-- get the codebase in dir +getCodebaseOrError :: forall m. MonadIO m => CodebasePath -> m (Either String (m (), Codebase.Codebase m Symbol Ann)) +getCodebaseOrError dir = do + prettyDir <- liftIO $ P.string <$> canonicalizePath dir + let prettyError :: [(Q.SchemaType, Q.SchemaName)] -> String + prettyError schema = + ColorText.toANSI . P.render 80 . (("Missing SqliteCodebase structure in " <> prettyDir <> ".") <>) + . P.column2Header "Schema Type" "Name" + $ map (Bifunctor.bimap P.string P.string) schema + fmap (Either.mapLeft prettyError) (sqliteCodebase dir) + getNoCodebaseErrorMsg :: IsString s => P.Pretty s -> P.Pretty s -> Maybe FilePath -> P.Pretty s getNoCodebaseErrorMsg executable prettyDir mdir = let secondLine = @@ -129,7 +153,7 @@ getNoCodebaseErrorMsg executable prettyDir mdir = initCodebaseAndExit :: MonadIO m => Maybe FilePath -> m () initCodebaseAndExit mdir = do - dir <- getCodebaseDir mdir + dir <- Codebase.getCodebaseDir mdir (closeCodebase, _codebase) <- initCodebase dir closeCodebase liftIO SysExit.exitSuccess @@ -173,9 +197,6 @@ initCodebase path = do Codebase1.initializeCodebase theCodebase pure (closeCodebase, theCodebase) -getCodebaseDir :: MonadIO m => Maybe FilePath -> m FilePath -getCodebaseDir = maybe getHomeDirectory pure - -- checks if a db exists at `path` with the minimum schema codebaseExists :: MonadIO m => CodebasePath -> m Bool codebaseExists root = liftIO do @@ -764,6 +785,8 @@ sqliteCodebase root = do dependentsImpl syncFromDirectory syncToDirectory + viewRemoteBranch' + (pushGitRootBranch syncToDirectory) watches getWatch putWatch @@ -861,3 +884,87 @@ syncProgress = Sync.Progress need done warn allDone SyncProgressState need done warn -> "invalid SyncProgressState " ++ show (fmap v need, bimap id v done, bimap id v warn) where v = const () + + +viewRemoteBranch' :: forall m. MonadIO m + => RemoteNamespace -> m (Either GitError (Branch m, CodebasePath)) +viewRemoteBranch' (repo, sbh, path) = runExceptT do + -- set up the cache dir + remotePath <- time "Git fetch" $ pullBranch repo + ifM (codebaseExists remotePath) + (do + (closeCodebase, codebase) <- lift (sqliteCodebase remotePath) >>= + Validation.valueOr (\_missingSchema -> throwError $ GitError.CouldntOpenCodebase repo remotePath) . fmap pure + -- try to load the requested branch from it + branch <- time "Git fetch (sbh)" $ case sbh of + -- load the root branch + Nothing -> lift (Codebase.getRootBranch codebase) >>= \case + Left Codebase.NoRootBranch -> pure Branch.empty + Left (Codebase.CouldntLoadRootBranch h) -> + throwError $ GitError.CouldntLoadRootBranch repo h + Left (Codebase.CouldntParseRootBranch s) -> + throwError $ GitError.CouldntParseRootBranch repo s + Right b -> pure b + -- load from a specific `ShortBranchHash` + Just sbh -> do + branchCompletions <- lift $ Codebase.branchHashesByPrefix codebase sbh + case toList branchCompletions of + [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + [h] -> (lift $ Codebase.getBranchForHash codebase h) >>= \case + Just b -> pure b + Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions + lift closeCodebase + pure (Branch.getAt' path branch, remotePath)) + -- else there's no initialized codebase at this repo; we pretend there's an empty one. + (pure (Branch.empty, remotePath)) + +-- Given a branch that is "after" the existing root of a given git repo, +-- stage and push the branch (as the new root) + dependencies to the repo. +pushGitRootBranch + :: MonadIO m + => Codebase.SyncToDir m + -> Branch m + -> RemoteRepo + -> SyncMode + -> m (Either GitError ()) +pushGitRootBranch syncToDirectory branch repo syncMode = runExceptT do + -- Pull the remote repo into a staging directory + (remoteRoot, remotePath) <- Except.ExceptT $ viewRemoteBranch' (repo, Nothing, Path.empty) + ifM (pure (remoteRoot == Branch.empty) + ||^ lift (remoteRoot `Branch.before` branch)) + -- ours is newer 👍, meaning this is a fast-forward push, + -- so sync branch to staging area + (stageAndPush remotePath) + (throwError $ GitError.PushDestinationHasNewStuff repo) + where + stageAndPush remotePath = do + let repoString = Text.unpack $ printRepo repo + withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ + lift (syncToDirectory remotePath syncMode branch) + -- push staging area to remote + withStatus ("Uploading to " ++ repoString ++ " ...") $ + unlessM + (push remotePath repo + `withIOError` (throwError . GitError.PushException repo . show)) + (throwError $ GitError.PushNoOp repo) + -- Commit our changes + push :: CodebasePath -> RemoteRepo -> IO Bool -- withIOError needs IO + push remotePath (GitRepo url gitbranch) = do + -- has anything changed? + status <- gitTextIn remotePath ["status", "--short"] + if Text.null status then + pure False + else do + gitIn remotePath ["add", "--all", "."] + gitIn remotePath + ["commit", "-q", "-m", "Sync branch " <> Text.pack (show $ Branch.headHash branch)] + -- Push our changes to the repo + case gitbranch of + Nothing -> gitIn remotePath ["push", "--quiet", url] + Just gitbranch -> error $ + "Pushing to a specific branch isn't fully implemented or tested yet.\n" + ++ "InputPatterns.parseUri was expected to have prevented you " + ++ "from supplying the git treeish `" ++ Text.unpack gitbranch ++ "`!" + -- gitIn remotePath ["push", "--quiet", url, gitbranch] + pure True diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs deleted file mode 100644 index 1f57d23502..0000000000 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Git.hs +++ /dev/null @@ -1,140 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Codebase.SqliteCodebase.Git - ( importRemoteBranch - , pushGitRootBranch - , viewRemoteBranch - ) where - -import Unison.Prelude -import Unison.Codebase.Editor.Git - -import Control.Monad.Except ( throwError - , ExceptT - ) -import Control.Monad.Extra ((||^)) -import qualified Data.Text as Text -import qualified Data.Validation as Validation -import qualified Unison.Codebase.GitError as GitError -import Unison.Codebase.GitError (GitError) -import qualified Unison.Codebase as Codebase -import Unison.Codebase (Codebase, CodebasePath) -import Unison.Codebase.Editor.RemoteRepo ( RemoteRepo(GitRepo) - , RemoteNamespace - , printRepo - ) -import qualified Unison.Codebase.SqliteCodebase as FC -import Unison.Codebase.Branch ( Branch - , headHash - ) -import qualified Unison.Codebase.Path as Path -import Unison.Codebase.SyncMode ( SyncMode ) -import Unison.Util.Timing (time) -import qualified Unison.Codebase.Branch as Branch - --- | Sync elements as needed from a remote codebase into the local one. --- If `sbh` is supplied, we try to load the specified branch hash; --- otherwise we try to load the root branch. -importRemoteBranch - :: forall m v a - . MonadIO m - => Codebase m v a - -> RemoteNamespace - -> SyncMode - -> ExceptT GitError m (Branch m) -importRemoteBranch codebase ns mode = do - (branch, cacheDir) <- viewRemoteBranch' ns - withStatus "Importing downloaded files into local codebase..." $ - time "SyncFromDirectory" $ - lift $ Codebase.syncFromDirectory codebase cacheDir mode branch - pure branch - --- | Pull a git branch and view it from the cache, without syncing into the --- local codebase. -viewRemoteBranch :: forall m. MonadIO m - => RemoteNamespace -> ExceptT GitError m (Branch m) -viewRemoteBranch = fmap fst . viewRemoteBranch' - -viewRemoteBranch' :: forall m. MonadIO m - => RemoteNamespace -> ExceptT GitError m (Branch m, CodebasePath) -viewRemoteBranch' (repo, sbh, path) = do - -- set up the cache dir - remotePath <- time "Git fetch" $ pullBranch repo - ifM (FC.codebaseExists remotePath) - (do - (closeCodebase, codebase) <- lift (FC.sqliteCodebase remotePath) >>= - Validation.valueOr (\_missingSchema -> throwError $ GitError.CouldntOpenCodebase repo remotePath) . fmap pure - -- try to load the requested branch from it - branch <- time "Git fetch (sbh)" $ case sbh of - -- load the root branch - Nothing -> lift (Codebase.getRootBranch codebase) >>= \case - Left Codebase.NoRootBranch -> pure Branch.empty - Left (Codebase.CouldntLoadRootBranch h) -> - throwError $ GitError.CouldntLoadRootBranch repo h - Left (Codebase.CouldntParseRootBranch s) -> - throwError $ GitError.CouldntParseRootBranch repo s - Right b -> pure b - -- load from a specific `ShortBranchHash` - Just sbh -> do - branchCompletions <- lift $ Codebase.branchHashesByPrefix codebase sbh - case toList branchCompletions of - [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh - [h] -> (lift $ Codebase.getBranchForHash codebase h) >>= \case - Just b -> pure b - Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh - _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions - lift closeCodebase - pure (Branch.getAt' path branch, remotePath)) - -- else there's no initialized codebase at this repo; we pretend there's an empty one. - (pure (Branch.empty, remotePath)) - --- Given a branch that is "after" the existing root of a given git repo, --- stage and push the branch (as the new root) + dependencies to the repo. -pushGitRootBranch - :: MonadIO m - => Codebase m v a - -> Branch m - -> RemoteRepo - -> SyncMode - -> ExceptT GitError m () -pushGitRootBranch codebase branch repo syncMode = do - -- Pull the remote repo into a staging directory - (remoteRoot, remotePath) <- viewRemoteBranch' (repo, Nothing, Path.empty) - ifM (pure (remoteRoot == Branch.empty) - ||^ lift (remoteRoot `Branch.before` branch)) - -- ours is newer 👍, meaning this is a fast-forward push, - -- so sync branch to staging area - (stageAndPush remotePath) - (throwError $ GitError.PushDestinationHasNewStuff repo) - where - stageAndPush remotePath = do - let repoString = Text.unpack $ printRepo repo - withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ - lift (Codebase.syncToDirectory codebase remotePath syncMode branch) - -- push staging area to remote - withStatus ("Uploading to " ++ repoString ++ " ...") $ - unlessM - (push remotePath repo - `withIOError` (throwError . GitError.PushException repo . show)) - (throwError $ GitError.PushNoOp repo) - -- Commit our changes - push :: CodebasePath -> RemoteRepo -> IO Bool -- withIOError needs IO - push remotePath (GitRepo url gitbranch) = do - -- has anything changed? - status <- gitTextIn remotePath ["status", "--short"] - if Text.null status then - pure False - else do - gitIn remotePath ["add", "--all", "."] - gitIn remotePath - ["commit", "-q", "-m", "Sync branch " <> Text.pack (show $ headHash branch)] - -- Push our changes to the repo - case gitbranch of - Nothing -> gitIn remotePath ["push", "--quiet", url] - Just gitbranch -> error $ - "Pushing to a specific branch isn't fully implemented or tested yet.\n" - ++ "InputPatterns.parseUri was expected to have prevented you " - ++ "from supplying the git treeish `" ++ Text.unpack gitbranch ++ "`!" - -- gitIn remotePath ["push", "--quiet", url, gitbranch] - pure True diff --git a/parser-typechecker/tests/Unison/Test/Git.hs b/parser-typechecker/tests/Unison/Test/Git.hs index ddb1e671e2..4d3e65079c 100644 --- a/parser-typechecker/tests/Unison/Test/Git.hs +++ b/parser-typechecker/tests/Unison/Test/Git.hs @@ -28,9 +28,10 @@ import Unison.Codebase.FileCodebase.Common (SyncToDir, formatAnn) import Unison.Parser (Ann) import Unison.Symbol (Symbol) import Unison.Var (Var) +import qualified U.Util.Cache as Cache test :: Test () -test = scope "git" . tests $ +test = scope "git-fc" . tests $ [ testPull , testPush , syncComplete @@ -52,7 +53,7 @@ syncComplete = scope "syncComplete" $ do observe title expectation files = scope title . for_ files $ \path -> scope (makeTitle path) $ io (doesFileExist $ targetDir path) >>= expectation - codebase <- io $ snd <$> initCodebase tmp "codebase" + (_, cleanup, codebase) <- io $ initCodebase tmp "codebase" runTranscript_ tmp codebase [iTrim| ```ucm:hide @@ -96,7 +97,9 @@ pushComplete.b.c.y = x + 1 observe "complete" expect files -- if we haven't crashed, clean up! - io $ removeDirectoryRecursive tmp + io do + cleanup + removeDirectoryRecursive tmp where files = @@ -111,7 +114,7 @@ syncTestResults = scope "syncTestResults" $ do tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "syncTestResults" targetDir <- io $ Temp.createTempDirectory tmp "target" - codebase <- io $ snd <$> initCodebase tmp "codebase" + (_, cleanup, codebase) <- io $ initCodebase tmp "codebase" runTranscript_ tmp codebase [iTrim| ```ucm @@ -145,7 +148,9 @@ test> tests.x = [Ok "Great!"] scope (makeTitle path) $ io (doesFileExist $ targetDir path) >>= expect -- if we haven't crashed, clean up! - io $ removeDirectoryRecursive tmp + io do + cleanup + removeDirectoryRecursive tmp where targetShouldHave = [ ".unison/v1/paths/0bnfrk7cu44q0vvaj7a0osl90huv6nj01nkukplcsbgn3i09h6ggbthhrorm01gpqc088673nom2i491fh9rtbqcc6oud6iqq6oam88.ub" @@ -166,8 +171,8 @@ testPull = scope "pull" $ do tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-pull" -- initialize author and user codebases - authorCodebase <- io $ snd <$> initCodebase tmp "author" - (userDir, userCodebase) <- io $ initCodebase tmp "user" + (_authorDir, cleanupAuthor, authorCodebase) <- io $ initCodebase tmp "author" + (userDir, cleanupUser, userCodebase) <- io $ initCodebase tmp "user" -- initialize git repo let repo = tmp "repo.git" @@ -222,7 +227,10 @@ inside.y = c + c scope (makeTitle path) $ io (doesFileExist $ userDir path) >>= expect . not -- if we haven't crashed, clean up! - io $ removeDirectoryRecursive tmp + io $ do + cleanupAuthor + cleanupUser + removeDirectoryRecursive tmp where gitShouldHave = userShouldHave ++ userShouldNotHave @@ -284,14 +292,11 @@ inside.y = c + c ] -- initialize a fresh codebase -initCodebaseDir :: FilePath -> String -> IO CodebasePath -initCodebaseDir tmpDir name = fst <$> initCodebase tmpDir name - -initCodebase :: FilePath -> String -> IO (CodebasePath, Codebase IO Symbol Ann) +initCodebase :: FilePath -> String -> IO (CodebasePath, IO (), Codebase IO Symbol Ann) initCodebase tmpDir name = do let codebaseDir = tmpDir name - c <- FC.initCodebase undefined codebaseDir - pure (codebaseDir, c) + (cleanup, c) <- Codebase.initCodebase FC.init codebaseDir + pure (codebaseDir, cleanup, c) -- run a transcript on an existing codebase runTranscript_ :: MonadIO m => FilePath -> Codebase IO Symbol Ann -> String -> m () @@ -315,7 +320,7 @@ testPush = scope "push" $ do tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-push" -- initialize a fresh codebase named "c" - (codebasePath, c) <- io $ initCodebase tmp "c" + (codebasePath, cleanup, c) <- io $ initCodebase tmp "c" -- Run the "setup transcript" to do the adds and updates; everything short of -- pushing. @@ -328,7 +333,7 @@ testPush = scope "push" $ do io $ "git" ["init", "--bare", Text.pack repoGit] -- push one way! - codebase <- io $ FC.codebase1' impl (error "todo") V1.formatSymbol formatAnn codebasePath + codebase <- io $ FC.codebase1' impl Cache.nullCache V1.formatSymbol formatAnn codebasePath runTranscript_ tmp codebase (pushTranscript repoGit) -- check out the resulting repo so we can inspect it @@ -344,7 +349,9 @@ testPush = scope "push" $ do io (fmap not . doesFileExist $ tmp implName path) >>= expect -- if we haven't crashed, clean up! - io $ removeDirectoryRecursive tmp + io do + cleanup + removeDirectoryRecursive tmp where setupTranscript = [iTrim| diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs index 562dce4e6b..de691f2a7c 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -3,7 +3,6 @@ module Unison.Test.GitSimple where -import Control.Lens (view, _1) import Data.String.Here (iTrim) import qualified Data.Text as Text import EasyTest @@ -12,19 +11,22 @@ import System.Directory (removeDirectoryRecursive) import System.FilePath (()) import qualified System.IO.Temp as Temp import Unison.Codebase (Codebase, CodebasePath) -import qualified Unison.Codebase.SqliteCodebase as FC +import qualified Unison.Codebase.FileCodebase as FC import qualified Unison.Codebase.TranscriptParser as TR import Unison.Parser (Ann) import Unison.Prelude import Unison.Symbol (Symbol) +import qualified Unison.Parser as Parser +import qualified Unison.Codebase as Codebase +import qualified Unison.Codebase.SqliteCodebase as SC writeTranscriptOutput :: Bool -writeTranscriptOutput = False +writeTranscriptOutput = True test :: Test () -test = scope "git-simple" . tests $ [ - - pushPullTest "one-term" +test = scope "git-simple" . tests $ flip map [(FC.init, "fc")]--, (SC.init, "fc")] + \(cbInit, name) -> scope name $ tests [ + pushPullTest cbInit "one-term" -- simplest-author (\repo -> [iTrim| ```unison @@ -47,7 +49,7 @@ c = 3 ``` |]) , - pushPullTest "one-term2" + pushPullTest cbInit "one-term2" -- simplest-author (\repo -> [iTrim| ```unison @@ -69,7 +71,7 @@ c = 3 ``` |]) , - pushPullTest "one-type" + pushPullTest cbInit "one-type" -- simplest-author (\repo -> [iTrim| ```unison @@ -90,6 +92,53 @@ type Foo = Foo > Foo.Foo ``` |]) + , + pushPullTest cbInit "patching" + (\repo -> [iTrim| +```ucm +.myLib> alias.term ##Nat.+ + +``` +```unison +improveNat x = x + 3 +``` +```ucm +.myLib> add +.myLib> ls +.myLib> move.namespace .myLib .workaround1552.myLib.v1 +.workaround1552.myLib> ls +.workaround1552.myLib> fork v1 v2 +.workaround1552.myLib.v2> +``` +```unison +improveNat x = x + 100 +``` +```ucm +.workaround1552.myLib.v2> update +.workaround1552.myLib> push ${repo} +``` + |]) + (\repo -> [iTrim| +```ucm +.myApp> pull ${repo}:.v1 external.yourLib +.myApp> alias.term ##Nat.* * +```` +```unison +> greatApp = improveNat 5 * improveNat 6 +``` +```ucm +.myApp> add +.myApp> pull ${repo}:.v2 external.yourLib +``` +```unison +> greatApp = improveNat 5 * improveNat 6 +``` +```ucm +.myApp> patch external.yourLib.patch +``` +```unison +> greatApp = improveNat 5 * improveNat 6 +``` + |]) -- , -- pushPullTest "regular" @@ -143,14 +192,14 @@ type Foo = Foo -- .myLib.outside.B> #j57m94daqi -pushPullTest :: String -> (FilePath -> String) -> (FilePath -> String) -> Test () -pushPullTest name authorScript userScript = scope name $ do +pushPullTest :: Codebase.Init IO Symbol Parser.Ann -> String -> (FilePath -> String) -> (FilePath -> String) -> Test () +pushPullTest cbInit name authorScript userScript = scope name $ do -- put all our junk into here tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory ("git-simple-" ++ name) -- initialize author and user codebases - (_authorDir, closeAuthor, authorCodebase) <- io $ initCodebase tmp "author" - (_userDir, closeUser, userCodebase) <- io $ initCodebase tmp "user" + (_authorDir, closeAuthor, authorCodebase) <- io $ initCodebase cbInit tmp "author" + (_userDir, closeUser, userCodebase) <- io $ initCodebase cbInit tmp "user" -- initialize git repo let repo = tmp "repo.git" @@ -179,13 +228,10 @@ pushPullTest name authorScript userScript = scope name $ do ok -- initialize a fresh codebase -initCodebaseDir :: FilePath -> String -> IO CodebasePath -initCodebaseDir tmpDir name = view _1 <$> initCodebase tmpDir name - -initCodebase :: FilePath -> String -> IO (CodebasePath, IO (), Codebase IO Symbol Ann) -initCodebase tmpDir name = do +initCodebase :: Monad m => Codebase.Init m v a -> FilePath -> String -> m (CodebasePath, m (), Codebase m v a) +initCodebase cbInit tmpDir name = do let codebaseDir = tmpDir name - (close, c) <- FC.initCodebase codebaseDir + (close, c) <- Codebase.initCodebase cbInit codebaseDir pure (codebaseDir, close, c) -- run a transcript on an existing codebase diff --git a/parser-typechecker/tests/Unison/Test/IO.hs b/parser-typechecker/tests/Unison/Test/IO.hs index 65d829f595..286a68a9d4 100644 --- a/parser-typechecker/tests/Unison/Test/IO.hs +++ b/parser-typechecker/tests/Unison/Test/IO.hs @@ -18,6 +18,8 @@ import qualified Unison.Codebase.SqliteCodebase as SqliteCodebase import qualified Unison.Codebase.TranscriptParser as TR import Unison.Parser (Ann) import Unison.Symbol (Symbol) +import qualified Unison.Codebase as Codebase +import qualified Unison.Codebase.FileCodebase as FileCodebase -- * IO Tests @@ -78,10 +80,10 @@ main = 'let -- * Utilities -initCodebase :: FilePath -> String -> IO (CodebasePath, IO (), Codebase IO Symbol Ann) -initCodebase tmpDir name = do +initCodebase :: Codebase.Init IO Symbol Ann -> FilePath -> String -> IO (CodebasePath, IO (), Codebase IO Symbol Ann) +initCodebase cbInit tmpDir name = do let codebaseDir = tmpDir name - (finalize, c) <- SqliteCodebase.initCodebase codebaseDir + (finalize, c) <- Codebase.initCodebase cbInit codebaseDir pure (codebaseDir, finalize, c) -- run a transcript on an existing codebase @@ -105,8 +107,9 @@ runTranscript_ newRt tmpDir c transcript = do withScopeAndTempDir :: String -> (FilePath -> Codebase IO Symbol Ann -> Test ()) -> Test () withScopeAndTempDir name body = scope name $ do - tmp <- io (Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory name) - (_, closeCodebase, codebase) <- io $ initCodebase tmp "user" + tmp <- liftIO $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory name + (_, closeCodebase, codebase) <- liftIO $ initCodebase FileCodebase.init tmp "user" body tmp codebase - io $ closeCodebase - io $ removeDirectoryRecursive tmp + liftIO do + closeCodebase + removeDirectoryRecursive tmp diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index f79349c9ac..de3956ce8d 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -85,7 +85,6 @@ library Unison.Codebase.FileCodebase Unison.Codebase.FileCodebase.Branch.Dependencies Unison.Codebase.FileCodebase.Common - Unison.Codebase.FileCodebase.Git Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex Unison.Codebase.GitError Unison.Codebase.Metadata @@ -101,7 +100,6 @@ library Unison.Codebase.SqliteCodebase Unison.Codebase.SqliteCodebase.Branch.Dependencies Unison.Codebase.SqliteCodebase.Conversions - Unison.Codebase.SqliteCodebase.Git Unison.Codebase.SqliteCodebase.SyncEphemeral Unison.Codebase.SyncMode Unison.Codebase.TermEdit diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index 205d8be254..086113140f 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -1,47 +1,52 @@ -{-# Language OverloadedStrings #-} -{-# Language PartialTypeSignatures #-} {-# LANGUAGE CPP #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PartialTypeSignatures #-} +{-# LANGUAGE ViewPatterns #-} {-# OPTIONS_GHC -Wno-partial-type-signatures #-} module Main where -import Unison.Prelude -import Control.Concurrent ( mkWeakThreadId, myThreadId ) -import Control.Error.Safe (rightMay) -import Control.Exception ( throwTo, AsyncException(UserInterrupt) ) -import Data.ByteString.Char8 ( unpack ) -import Data.Configurator.Types ( Config ) -import qualified Network.URI.Encode as URI -import System.Directory ( getCurrentDirectory - , removeDirectoryRecursive - ) -import System.Environment ( getArgs, getProgName ) -import System.Mem.Weak ( deRefWeak ) -import qualified Unison.Codebase.Editor.VersionParser as VP -import Unison.Codebase.Execute ( execute ) -import qualified Unison.Codebase.SqliteCodebase as SqliteCodebase -import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace) -import Unison.Codebase.Runtime ( Runtime ) -import Unison.CommandLine ( watchConfig ) -import qualified Unison.CommandLine.Main as CommandLine -import qualified Unison.Runtime.Rt1IO as Rt1 -import qualified Unison.Runtime.Interface as RTI -import Unison.Symbol ( Symbol ) -import qualified Unison.Codebase.Path as Path -import qualified Unison.Server.CodebaseServer as Server -import qualified Version -import qualified Unison.Codebase.TranscriptParser as TR -import qualified System.Path as Path -import qualified System.FilePath as FP -import qualified System.IO.Temp as Temp +import Control.Concurrent (mkWeakThreadId, myThreadId) +import Control.Error.Safe (rightMay) +import Control.Exception (AsyncException (UserInterrupt), throwTo) +import Data.ByteString.Char8 (unpack) +import qualified Data.Configurator as Config +import Data.Configurator.Types (Config) +import qualified Data.Text as Text +import qualified Network.URI.Encode as URI +import System.Directory + ( getCurrentDirectory, + removeDirectoryRecursive, + ) +import System.Environment (getArgs, getProgName) import qualified System.Exit as Exit +import qualified System.FilePath as FP import System.IO.Error (catchIOError) +import qualified System.IO.Temp as Temp +import System.Mem.Weak (deRefWeak) +import qualified System.Path as Path +import Text.Megaparsec (runParser) +import qualified Unison.Codebase as Codebase import qualified Unison.Codebase.Editor.Input as Input -import qualified Unison.Util.Pretty as P +import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace) +import qualified Unison.Codebase.Editor.VersionParser as VP +import Unison.Codebase.Execute (execute) +import qualified Unison.Codebase.FileCodebase as FileCodebaseX +import qualified Unison.Codebase.Path as Path +import Unison.Codebase.Runtime (Runtime) +import qualified Unison.Codebase.SqliteCodebase as SqliteCodebaseX +import qualified Unison.Codebase.TranscriptParser as TR +import Unison.CommandLine (watchConfig) +import qualified Unison.CommandLine.Main as CommandLine +import Unison.Parser (Ann) +import Unison.Prelude import qualified Unison.PrettyTerminal as PT -import qualified Data.Text as Text -import qualified Data.Configurator as Config -import Text.Megaparsec (runParser) +import qualified Unison.Runtime.Interface as RTI +import qualified Unison.Runtime.Rt1IO as Rt1 +import qualified Unison.Server.CodebaseServer as Server +import Unison.Symbol (Symbol) +import qualified Unison.Util.Pretty as P +import qualified Version #if defined(mingw32_HOST_OS) import qualified GHC.ConsoleHandler as WinSig @@ -140,9 +145,14 @@ main = do let (mcodepath, restargs0) = case args of "-codebase" : codepath : restargs -> (Just codepath, restargs) _ -> (Nothing, args) - (mNewRun, restargs) = case restargs0 of + (mNewRun, restargs1) = case restargs0 of "--new-runtime" : rest -> (Just True, rest) _ -> (Nothing, restargs0) + (fromMaybe False -> mNewCodebase, restargs) = case restargs1 of + "--new-codebase" : rest -> (Just True, rest) + "--old-codebase" : rest -> (Just False, rest) + _ -> (Nothing, restargs1) + cbInit = if mNewCodebase then SqliteCodebaseX.init else FileCodebaseX.init currentDir <- getCurrentDirectory configFilePath <- getConfigFilePath mcodepath config <- @@ -150,7 +160,7 @@ main = do Exit.die "Your .unisonConfig could not be loaded. Check that it's correct!" case restargs of [] -> do - (closeCodebase, theCodebase) <- SqliteCodebase.getCodebaseOrExit mcodepath + (closeCodebase, theCodebase) <- Codebase.getCodebaseOrExit cbInit mcodepath Server.start theCodebase $ \token port -> do PT.putPrettyLn . P.string $ "I've started a codebase API server at " PT.putPrettyLn . P.string $ "http://127.0.0.1:" @@ -160,9 +170,9 @@ main = do [version] | isFlag "version" version -> putStrLn $ progName ++ " version: " ++ Version.gitDescribe [help] | isFlag "help" help -> PT.putPrettyLn (usage progName) - ["init"] -> SqliteCodebase.initCodebaseAndExit mcodepath + ["init"] -> Codebase.initCodebaseAndExit cbInit mcodepath "run" : [mainName] -> do - (closeCodebase, theCodebase) <- SqliteCodebase.getCodebaseOrExit mcodepath + (closeCodebase, theCodebase) <- Codebase.getCodebaseOrExit cbInit mcodepath runtime <- join . getStartRuntime mNewRun $ fst config execute theCodebase runtime mainName closeCodebase @@ -171,7 +181,7 @@ main = do case e of Left _ -> PT.putPrettyLn $ P.callout "⚠️" "I couldn't find that file or it is for some reason unreadable." Right contents -> do - (closeCodebase, theCodebase) <- SqliteCodebase.getCodebaseOrExit mcodepath + (closeCodebase, theCodebase) <- Codebase.getCodebaseOrExit cbInit mcodepath let fileEvent = Input.UnisonFileChanged (Text.pack file) contents launch currentDir mNewRun config theCodebase [Left fileEvent, Right $ Input.ExecuteI mainName, Right Input.QuitI] closeCodebase @@ -180,7 +190,7 @@ main = do case e of Left _ -> PT.putPrettyLn $ P.callout "⚠️" "I had trouble reading this input." Right contents -> do - (closeCodebase, theCodebase) <- SqliteCodebase.getCodebaseOrExit mcodepath + (closeCodebase, theCodebase) <- Codebase.getCodebaseOrExit cbInit mcodepath let fileEvent = Input.UnisonFileChanged (Text.pack "") contents launch currentDir mNewRun config theCodebase @@ -188,41 +198,42 @@ main = do closeCodebase "transcript" : args' -> case args' of - "-save-codebase" : transcripts -> runTranscripts mNewRun False True mcodepath transcripts - _ -> runTranscripts mNewRun False False mcodepath args' + "-save-codebase" : transcripts -> runTranscripts mNewRun cbInit False True mcodepath transcripts + _ -> runTranscripts mNewRun cbInit False False mcodepath args' "transcript.fork" : args' -> case args' of - "-save-codebase" : transcripts -> runTranscripts mNewRun True True mcodepath transcripts - _ -> runTranscripts mNewRun True False mcodepath args' + "-save-codebase" : transcripts -> runTranscripts mNewRun cbInit True True mcodepath transcripts + _ -> runTranscripts mNewRun cbInit True False mcodepath args' _ -> do PT.putPrettyLn (usage progName) Exit.exitWith (Exit.ExitFailure 1) -prepareTranscriptDir :: Bool -> Maybe FilePath -> IO FilePath -prepareTranscriptDir inFork mcodepath = do +prepareTranscriptDir :: Codebase.Init IO v a -> Bool -> Maybe FilePath -> IO FilePath +prepareTranscriptDir cbInit inFork mcodepath = do tmp <- Temp.getCanonicalTemporaryDirectory >>= (`Temp.createTempDirectory` "transcript") unless inFork $ do PT.putPrettyLn . P.wrap $ "Transcript will be run on a new, empty codebase." - _ <- SqliteCodebase.initCodebase tmp + _ <- Codebase.initCodebase cbInit tmp pure() - when inFork $ SqliteCodebase.getCodebaseOrExit mcodepath >> do - path <- SqliteCodebase.getCodebaseDir mcodepath + when inFork $ Codebase.getCodebaseOrExit cbInit mcodepath >> do + path <- Codebase.getCodebaseDir mcodepath PT.putPrettyLn $ P.lines [ P.wrap "Transcript will be run on a copy of the codebase at: ", "", P.indentN 2 (P.string path) ] - Path.copyDir (path FP. SqliteCodebase.codebasePath) (tmp FP. SqliteCodebase.codebasePath) + Path.copyDir (Codebase.codebasePath cbInit path) (Codebase.codebasePath cbInit tmp) pure tmp runTranscripts' :: Maybe Bool + -> Codebase.Init IO Symbol Ann -> Maybe FilePath -> FilePath -> [String] -> IO Bool -runTranscripts' mNewRun mcodepath transcriptDir args = do +runTranscripts' mNewRun cbInit mcodepath transcriptDir args = do currentDir <- getCurrentDirectory case args of args@(_:_) -> do @@ -237,7 +248,7 @@ runTranscripts' mNewRun mcodepath transcriptDir args = do P.indentN 2 $ P.string err]) Right stanzas -> do configFilePath <- getConfigFilePath mcodepath - (closeCodebase, theCodebase) <- SqliteCodebase.getCodebaseOrExit $ Just transcriptDir + (closeCodebase, theCodebase) <- Codebase.getCodebaseOrExit cbInit $ Just transcriptDir mdOut <- TR.run mNewRun transcriptDir configFilePath stanzas theCodebase closeCodebase let out = currentDir FP. @@ -256,16 +267,17 @@ runTranscripts' mNewRun mcodepath transcriptDir args = do runTranscripts :: Maybe Bool + -> Codebase.Init IO Symbol Ann -> Bool -> Bool -> Maybe FilePath -> [String] -> IO () -runTranscripts mNewRun inFork keepTemp mcodepath args = do +runTranscripts mNewRun cbInit inFork keepTemp mcodepath args = do progName <- getProgName - transcriptDir <- prepareTranscriptDir inFork mcodepath + transcriptDir <- prepareTranscriptDir cbInit inFork mcodepath completed <- - runTranscripts' mNewRun (Just transcriptDir) transcriptDir args + runTranscripts' mNewRun cbInit (Just transcriptDir) transcriptDir args when completed $ do unless keepTemp $ removeDirectoryRecursive transcriptDir when keepTemp $ PT.putPrettyLn $ @@ -316,7 +328,7 @@ isFlag :: String -> String -> Bool isFlag f arg = arg == f || arg == "-" ++ f || arg == "--" ++ f getConfigFilePath :: Maybe FilePath -> IO FilePath -getConfigFilePath mcodepath = (FP. ".unisonConfig") <$> SqliteCodebase.getCodebaseDir mcodepath +getConfigFilePath mcodepath = (FP. ".unisonConfig") <$> Codebase.getCodebaseDir mcodepath defaultBaseLib :: Maybe RemoteNamespace defaultBaseLib = rightMay $ From 6b0751d48fd60e183f615173a0197e8767425af4 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 25 Mar 2021 11:22:03 -0400 Subject: [PATCH 143/225] tie the knot on calling Sync12 --- .../U/Codebase/Sqlite/Queries.hs | 1 - codebase2/codebase-sync/U/Codebase/Sync.hs | 1 + .../src/Unison/Codebase/Conversion/Sync12.hs | 229 ++++++++++++------ .../src/Unison/Codebase/SqliteCodebase.hs | 2 +- .../tests/Unison/Test/GitSimple.hs | 2 +- parser-typechecker/tests/Unison/Test/IO.hs | 1 - parser-typechecker/transcripts/Transcripts.hs | 14 +- .../unison-parser-typechecker.cabal | 3 + parser-typechecker/unison/Main.hs | 64 +++-- 9 files changed, 224 insertions(+), 93 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 982f5262d1..c3a0e38202 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -621,7 +621,6 @@ queryTrace title query input m = if debugQuery || alwaysTraceOnCrash then do - traceShowM query try @_ @SQLite.SQLError m >>= \case Right a -> do when debugQuery . traceM $ title ++ " " ++ show query ++ "\n input: " ++ show input ++ "\n output: " ++ show a diff --git a/codebase2/codebase-sync/U/Codebase/Sync.hs b/codebase2/codebase-sync/U/Codebase/Sync.hs index c0b829e27b..c1c8c7c442 100644 --- a/codebase2/codebase-sync/U/Codebase/Sync.hs +++ b/codebase2/codebase-sync/U/Codebase/Sync.hs @@ -4,6 +4,7 @@ {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE RankNTypes #-} +{-# LANGUAGE TypeOperators #-} module U.Codebase.Sync where import Data.Foldable (traverse_) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index acbcf980a1..02ae46e538 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -1,5 +1,7 @@ {-# LANGUAGE ApplicativeDo #-} {-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE ExistentialQuantification #-} +{-# LANGUAGE PartialTypeSignatures #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TemplateHaskell #-} @@ -10,18 +12,21 @@ module Unison.Codebase.Conversion.Sync12 where import Control.Lens +import qualified Control.Lens as Lens import Control.Monad.Except (MonadError, runExceptT) -import Control.Monad.Extra ((&&^)) import qualified Control.Monad.Except as Except +import Control.Monad.Extra ((&&^)) import Control.Monad.Reader import qualified Control.Monad.Reader as Reader import Control.Monad.State (MonadState) +import qualified Control.Monad.State as State import Control.Monad.Validate (MonadValidate, runValidateT) import qualified Control.Monad.Validate as Validate import Control.Natural (type (~>)) import Data.Bifoldable (bitraverse_) import Data.Foldable (traverse_) import qualified Data.Foldable as Foldable +import qualified Data.List as List import Data.Map (Map) import qualified Data.Map as Map import Data.Maybe (catMaybes) @@ -32,6 +37,7 @@ import U.Codebase.Sqlite.DbId (Generation) import qualified U.Codebase.Sqlite.Queries as Q import U.Codebase.Sync (Sync (Sync), TrySyncResult) import qualified U.Codebase.Sync as Sync +import qualified U.Util.Monoid as Monoid import Unison.Codebase (Codebase) import qualified Unison.Codebase as Codebase import Unison.Codebase.Branch (UnwrappedBranch) @@ -60,9 +66,9 @@ import Unison.Util.Relation (Relation) import qualified Unison.Util.Relation as Relation import Unison.Util.Star3 (Star3 (Star3)) -data Env m = Env - { srcCodebase :: Codebase m Symbol (), - destCodebase :: Codebase m Symbol (), +data Env m a = Env + { srcCodebase :: Codebase m Symbol a, + destCodebase :: Codebase m Symbol a, destConnection :: Connection } @@ -72,55 +78,19 @@ data Entity m | D Hash Reference.Size | P Branch.EditHash -data Entity' - = C' Branch.Hash - | T' Hash - | D' Hash - | P' Branch.EditHash - deriving (Eq, Ord, Show) - -toEntity' :: Entity m -> Entity' -toEntity' = \case - C h _ -> C' h - T h _ -> T' h - D h _ -> D' h - P h -> P' h - -instance Eq (Entity m) where - x == y = toEntity' x == toEntity' y - -instance Ord (Entity m) where - x `compare` y = toEntity' x `compare` toEntity' y - -data BranchStatus m - = BranchOk - | BranchReplaced Branch.Hash (UnwrappedBranch m) - -data BranchStatus' - = BranchOk' - | BranchReplaced' Branch.Hash - deriving (Eq, Ord) - -toBranchStatus' :: BranchStatus m -> BranchStatus' -toBranchStatus' = \case - BranchOk -> BranchOk' - BranchReplaced h _ -> BranchReplaced' h - -instance Eq (BranchStatus m) where - x == y = toBranchStatus' x == toBranchStatus' y - -instance Ord (BranchStatus m) where - x `compare` y = toBranchStatus' x `compare` toBranchStatus' y - type V m n = MonadValidate (Set (Entity m)) n type E e n = MonadError e n type S m n = MonadState (Status m) n -type R m n = MonadReader (Env m) n +type R m n a = MonadReader (Env m a) n + +type RS m n a = (R m n a, S m n) -type RS m n = (R m n, S m n) +data BranchStatus m + = BranchOk + | BranchReplaced Branch.Hash (UnwrappedBranch m) data TermStatus = TermOk @@ -145,27 +115,22 @@ data Status m = Status _patchStatus :: Map Branch.EditHash PatchStatus } -makeLenses ''Status +emptyStatus :: Status m +emptyStatus = Status mempty mempty mempty mempty -instance Show (Entity m) where - show = \case - C h _ -> "C " ++ show h - T h len -> "T " ++ show h ++ " " ++ show len - D h len -> "D " ++ show h ++ " " ++ show len - P h -> "P " ++ show h +makeLenses ''Status sync12 :: - forall m n. - (MonadIO n, RS m n, Applicative m) => + (MonadIO f, MonadReader (Env p x) f, RS m n a, Applicative m) => (m ~> n) -> - n (Sync n (Entity m)) + f (Sync n (Entity m)) sync12 t = do - gc <- runDest' $ Q.getNurseryGeneration + gc <- runDest' Q.getNurseryGeneration pure $ Sync (trySync t (succ gc)) trySync :: - forall m n. - (R m n, S m n, Applicative m) => + forall m n a. + (R m n a, S m n, Applicative m) => (m ~> n) -> Generation -> Entity m -> @@ -254,12 +219,12 @@ setBranchStatus :: forall m n. S m n => Branch.Hash -> BranchStatus m -> n () setBranchStatus h s = branchStatus . at h .= Just s checkTermComponent :: - forall m n. - (RS m n, V m n, E TermStatus n) => + forall m n a. + (RS m n a, V m n, E TermStatus n) => (m ~> n) -> Hash -> Reference.Size -> - n [(Term Symbol (), Type Symbol ())] + n [(Term Symbol a, Type Symbol a)] checkTermComponent t h n = do Env src _ _ <- Reader.ask for [Reference.Id h i n | i <- [0 .. n -1]] \r -> do @@ -290,12 +255,12 @@ checkTermComponent t h n = do pure (term, typ) checkDeclComponent :: - forall m n. - (RS m n, E DeclStatus n, V m n) => + forall m n a. + (RS m n a, E DeclStatus n, V m n) => (m ~> n) -> Hash -> Reference.Size -> - n [Decl Symbol ()] + n [Decl Symbol a] checkDeclComponent t h n = do Env src _ _ <- Reader.ask for [Reference.Id h i n | i <- [0 .. n -1]] \r -> do @@ -315,8 +280,8 @@ checkDeclComponent t h n = do pure decl checkPatch :: - forall m n. - (RS m n, E PatchStatus n, V m n) => + forall m n a. + (RS m n a, E PatchStatus n, V m n) => (m ~> n) -> Branch.EditHash -> n (Branch.EditHash, Patch) @@ -419,7 +384,7 @@ filterBranchTypeStar (Star3 _refs names _mdType md) = do filterMetadata :: (S m n, V m n, Ord r) => Relation r (Metadata.Type, Metadata.Value) -> n (Relation r (Metadata.Type, Metadata.Value)) filterMetadata = Relation.filterRanM \(t, v) -> - validateTypeReference t &&^ validateTermReference v + validateTypeReference t &&^ validateTermReference v filterTermNames :: (S m n, V m n) => Relation Referent NameSegment -> n (Relation Referent NameSegment) filterTermNames = Relation.filterDomM validateTermReferent @@ -473,11 +438,11 @@ filterBranchEdits = fmap (Map.fromList . catMaybes) . traverse go . Map.toList rebuild h = pure $ Just (ns, (h, err h)) err h = error $ "expected to short-circuit already-synced patch " ++ show h -runSrc, runDest :: R m n => (Codebase m Symbol () -> a) -> n a +runSrc, runDest :: R m n a => (Codebase m Symbol a -> x) -> n x runSrc = (Reader.reader srcCodebase <&>) runDest = (Reader.reader destCodebase <&>) -runDest' :: R m n => ReaderT Connection n a -> n a +runDest' :: R m n x => ReaderT Connection n a -> n a runDest' ma = Reader.reader destConnection >>= flip runDB ma runDB :: Connection -> ReaderT Connection m a -> m a @@ -488,4 +453,126 @@ runDB conn action = Reader.runReaderT action conn -- b) if any of its dependencies have not yet been synced -- (if so, note as validation) -- c) if any of its dependencies are missing from the source codebase --- (if so, then filter them if possible, otherwise give this entity an error Status) \ No newline at end of file +-- (if so, then filter them if possible, otherwise give this entity an error Status) + +data DoneCount = DoneCount + { _doneBranches :: Int, + _doneTerms :: Int, + _doneDecls :: Int, + _donePatches :: Int + } + +data ErrorCount = ErrorCount + { _errorBranches :: Int, + _errorTerms :: Int, + _errorDecls :: Int, + _errorPatches :: Int + } + +emptyDoneCount :: DoneCount +emptyDoneCount = DoneCount 0 0 0 0 + +emptyErrorCount :: ErrorCount +emptyErrorCount = ErrorCount 0 0 0 0 + +makeLenses ''DoneCount +makeLenses ''ErrorCount + +type ProgressState m = (DoneCount, ErrorCount, Status m) + +simpleProgress :: MonadState (ProgressState m) n => MonadIO n => Sync.Progress n (Entity m) +simpleProgress = Sync.Progress need done error allDone + where + -- ignore need + need _ = pure () + done e = do + case e of + C {} -> _1 . doneBranches += 1 + T {} -> _1 . doneTerms += 1 + D {} -> _1 . doneDecls += 1 + P {} -> _1 . donePatches += 1 + printProgress + + error e = do + case e of + C {} -> _2 . errorBranches += 1 + T {} -> _2 . errorTerms += 1 + D {} -> _2 . errorDecls += 1 + P {} -> _2 . errorPatches += 1 + printProgress + + allDone :: MonadState (DoneCount, ErrorCount, Status m) n => MonadIO n => n () + allDone = do + Status branches terms decls patches <- Lens.use Lens._3 + liftIO $ putStr "Finished." + Foldable.for_ (Map.toList decls) \(h, s) -> case s of + DeclOk -> pure () + DeclMissing -> liftIO . putStrLn $ "I couldn't find the decl " ++ show h ++ ", so I filtered it out of the sync." + DeclMissingDependencies -> liftIO . putStrLn $ "One or more dependencies of decl " ++ show h ++ " were missing, so I filtered it out of the sync." + Foldable.for_ (Map.toList terms) \(h, s) -> case s of + TermOk -> pure () + TermMissing -> liftIO . putStrLn $ "I couldn't find the term " ++ show h ++ "so I filtered it out of the sync." + TermMissingType -> liftIO . putStrLn $ "The type of term " ++ show h ++ " was missing, so I filtered it out of the sync." + TermMissingDependencies -> liftIO . putStrLn $ "One or more dependencies of term " ++ show h ++ " were missing, so I filtered it out of the sync." + Foldable.for_ (Map.toList patches) \(h, s) -> case s of + PatchOk -> pure () + PatchMissing -> liftIO . putStrLn $ "I couldn't find the patch " ++ show h ++ ", so I filtered it out of the sync." + PatchReplaced h' -> liftIO . putStrLn $ "I replaced the patch " ++ show h ++ " with the filtered version " ++ show h' ++ "." + Foldable.for_ (Map.toList branches) \(h, s) -> case s of + BranchOk -> pure () + BranchReplaced h' _ -> liftIO . putStrLn $ "I replaced the branch " ++ show h ++ " with the filtered version " ++ show h' ++ "." + + printProgress :: MonadState (ProgressState m) n => MonadIO n => n () + printProgress = do + (DoneCount b t d p, ErrorCount b' t' d' p', _) <- State.get + let ways :: [Maybe String] = + [ Monoid.whenM (b > 0 || b' > 0) (Just $ show b ++ " branches" ++ Monoid.whenM (b' > 0) (" (" ++ show b' ++ "errors)")), + Monoid.whenM (t > 0 || t' > 0) (Just $ show t ++ " branches" ++ Monoid.whenM (t' > 0) (" (" ++ show t' ++ "errors)")), + Monoid.whenM (d > 0 || d' > 0) (Just $ show d ++ " branches" ++ Monoid.whenM (d' > 0) (" (" ++ show d' ++ "errors)")), + Monoid.whenM (p > 0 || p' > 0) (Just $ show p ++ " branches" ++ Monoid.whenM (p' > 0) (" (" ++ show p' ++ "errors)")) + ] + liftIO . putStr $ "\rSynced " ++ List.intercalate "," (catMaybes ways) + + +instance Show (Entity m) where + show = \case + C h _ -> "C " ++ show h + T h len -> "T " ++ show h ++ " " ++ show len + D h len -> "D " ++ show h ++ " " ++ show len + P h -> "P " ++ show h + +data Entity' + = C' Branch.Hash + | T' Hash + | D' Hash + | P' Branch.EditHash + deriving (Eq, Ord, Show) + +toEntity' :: Entity m -> Entity' +toEntity' = \case + C h _ -> C' h + T h _ -> T' h + D h _ -> D' h + P h -> P' h + +instance Eq (Entity m) where + x == y = toEntity' x == toEntity' y + +instance Ord (Entity m) where + x `compare` y = toEntity' x `compare` toEntity' y + +data BranchStatus' + = BranchOk' + | BranchReplaced' Branch.Hash + deriving (Eq, Ord) + +toBranchStatus' :: BranchStatus m -> BranchStatus' +toBranchStatus' = \case + BranchOk -> BranchOk' + BranchReplaced h _ -> BranchReplaced' h + +instance Eq (BranchStatus m) where + x == y = toBranchStatus' x == toBranchStatus' y + +instance Ord (BranchStatus m) where + x `compare` y = toBranchStatus' x `compare` toBranchStatus' y diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index d0daa2a06a..257fed1214 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -6,7 +6,7 @@ {-# LANGUAGE TypeApplications #-} {-# LANGUAGE ViewPatterns #-} -module Unison.Codebase.SqliteCodebase (Unison.Codebase.SqliteCodebase.init) where +module Unison.Codebase.SqliteCodebase (Unison.Codebase.SqliteCodebase.init, unsafeGetConnection) where import qualified Control.Concurrent import qualified Control.Exception diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs index de691f2a7c..f6dd6745bb 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -24,7 +24,7 @@ writeTranscriptOutput :: Bool writeTranscriptOutput = True test :: Test () -test = scope "git-simple" . tests $ flip map [(FC.init, "fc")]--, (SC.init, "fc")] +test = scope "git-simple" . tests $ flip map [(FC.init, "fc"), (SC.init, "fc")] \(cbInit, name) -> scope name $ tests [ pushPullTest cbInit "one-term" -- simplest-author diff --git a/parser-typechecker/tests/Unison/Test/IO.hs b/parser-typechecker/tests/Unison/Test/IO.hs index 286a68a9d4..222bbd5e6b 100644 --- a/parser-typechecker/tests/Unison/Test/IO.hs +++ b/parser-typechecker/tests/Unison/Test/IO.hs @@ -14,7 +14,6 @@ import System.FilePath (()) import System.Directory (removeDirectoryRecursive) import Unison.Codebase (Codebase, CodebasePath) -import qualified Unison.Codebase.SqliteCodebase as SqliteCodebase import qualified Unison.Codebase.TranscriptParser as TR import Unison.Parser (Ann) import Unison.Symbol (Symbol) diff --git a/parser-typechecker/transcripts/Transcripts.hs b/parser-typechecker/transcripts/Transcripts.hs index 13dfc1f81a..4d9a3e645c 100644 --- a/parser-typechecker/transcripts/Transcripts.hs +++ b/parser-typechecker/transcripts/Transcripts.hs @@ -20,13 +20,19 @@ import Data.List type TestBuilder = FilePath -> FilePath -> [String] -> String -> Test () +data Codebase = CodebaseV1 | CodebaseV2 deriving (Eq, Ord, Show, Enum, Bounded) + testBuilder :: FilePath -> FilePath -> [String] -> String -> Test () -testBuilder ucm dir prelude transcript = scope transcript $ do - io $ fromString ucm args +testBuilder ucm dir prelude transcript = scope transcript . tests $ + flip map [minBound .. maxBound] \cb -> cbScope cb do + io $ fromString ucm (args cb) ok where files = fmap (pack . (dir )) (prelude ++ [transcript]) - args = ["transcript"] ++ files + cbScope CodebaseV1 = scope "FC" + cbScope CodebaseV2 = scope "SC" + args CodebaseV1 = "--old-codebase" : "transcript" : files + args CodebaseV2 = "--new-codebase" : "transcript" : files testBuilderNewRuntime :: FilePath -> FilePath -> [String] -> String -> Test () testBuilderNewRuntime ucm dir prelude transcript = scope transcript $ do @@ -66,7 +72,7 @@ buildTests testBuilder dir = do , "Searching for transcripts to run in: " ++ dir ] files <- io $ listDirectory dir - let + let -- Any files that start with _ are treated as prelude (prelude, transcripts) = partition ((isPrefixOf "_") . snd . splitFileName) diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index de3956ce8d..31d4e36f21 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -311,7 +311,9 @@ executable unison directory, errors, filepath, + lens, megaparsec, + mtl, safe, shellmet, template-haskell, @@ -319,6 +321,7 @@ executable unison text, unison-core1, unison-parser-typechecker, + unison-codebase-sync, uri-encode if !os(windows) build-depends: diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index 086113140f..ffaada4fbd 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -2,22 +2,32 @@ {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PartialTypeSignatures #-} {-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ApplicativeDo #-} {-# OPTIONS_GHC -Wno-partial-type-signatures #-} module Main where +#if defined(mingw32_HOST_OS) +import qualified GHC.ConsoleHandler as WinSig +#else +import qualified System.Posix.Signals as Sig +#endif + import Control.Concurrent (mkWeakThreadId, myThreadId) import Control.Error.Safe (rightMay) import Control.Exception (AsyncException (UserInterrupt), throwTo) +import Control.Lens (Lens', (&)) +import qualified Control.Lens as Lens +import qualified Control.Monad.Reader as Reader +import Control.Monad.State (StateT (StateT, runStateT)) +import qualified Control.Monad.State as State import Data.ByteString.Char8 (unpack) import qualified Data.Configurator as Config import Data.Configurator.Types (Config) import qualified Data.Text as Text import qualified Network.URI.Encode as URI -import System.Directory - ( getCurrentDirectory, - removeDirectoryRecursive, - ) +import System.Directory (getCurrentDirectory, removeDirectoryRecursive) import System.Environment (getArgs, getProgName) import qualified System.Exit as Exit import qualified System.FilePath as FP @@ -26,15 +36,19 @@ import qualified System.IO.Temp as Temp import System.Mem.Weak (deRefWeak) import qualified System.Path as Path import Text.Megaparsec (runParser) +import qualified U.Codebase.Sync as Sync import qualified Unison.Codebase as Codebase +import qualified Unison.Codebase.Branch as Branch +import qualified Unison.Codebase.Causal as Causal +import qualified Unison.Codebase.Conversion.Sync12 as Sync12 import qualified Unison.Codebase.Editor.Input as Input import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace) import qualified Unison.Codebase.Editor.VersionParser as VP import Unison.Codebase.Execute (execute) -import qualified Unison.Codebase.FileCodebase as FileCodebaseX +import qualified Unison.Codebase.FileCodebase as FC import qualified Unison.Codebase.Path as Path import Unison.Codebase.Runtime (Runtime) -import qualified Unison.Codebase.SqliteCodebase as SqliteCodebaseX +import qualified Unison.Codebase.SqliteCodebase as SC import qualified Unison.Codebase.TranscriptParser as TR import Unison.CommandLine (watchConfig) import qualified Unison.CommandLine.Main as CommandLine @@ -48,12 +62,6 @@ import Unison.Symbol (Symbol) import qualified Unison.Util.Pretty as P import qualified Version -#if defined(mingw32_HOST_OS) -import qualified GHC.ConsoleHandler as WinSig -#else -import qualified System.Posix.Signals as Sig -#endif - usage :: String -> P.Pretty P.ColorText usage executableStr = P.callout "🌻" $ P.lines [ P.bold "Usage instructions for the Unison Codebase Manager", @@ -148,11 +156,11 @@ main = do (mNewRun, restargs1) = case restargs0 of "--new-runtime" : rest -> (Just True, rest) _ -> (Nothing, restargs0) - (fromMaybe False -> mNewCodebase, restargs) = case restargs1 of + (fromMaybe False -> newCodebase, restargs) = case restargs1 of "--new-codebase" : rest -> (Just True, rest) "--old-codebase" : rest -> (Just False, rest) _ -> (Nothing, restargs1) - cbInit = if mNewCodebase then SqliteCodebaseX.init else FileCodebaseX.init + cbInit = if newCodebase then SC.init else FC.init currentDir <- getCurrentDirectory configFilePath <- getConfigFilePath mcodepath config <- @@ -204,10 +212,38 @@ main = do case args' of "-save-codebase" : transcripts -> runTranscripts mNewRun cbInit True True mcodepath transcripts _ -> runTranscripts mNewRun cbInit True False mcodepath args' + ["upgrade-codebase"] -> upgradeCodebase mcodepath _ -> do PT.putPrettyLn (usage progName) Exit.exitWith (Exit.ExitFailure 1) +upgradeCodebase :: Maybe Codebase.CodebasePath -> IO () +upgradeCodebase mcodepath = do + (cleanupSrc, srcCB) <- Codebase.getCodebaseOrExit FC.init mcodepath -- FC.init mcodepath + (cleanupDest, destCB) <- Codebase.getCodebaseOrExit SC.init mcodepath -- FC.init mcodepath + destDB <- SC.unsafeGetConnection =<< Codebase.getCodebaseDir mcodepath + let env = Sync12.Env srcCB destCB destDB + let initialState :: Sync12.ProgressState _ = + (Sync12.emptyDoneCount, Sync12.emptyErrorCount, Sync12.emptyStatus) + rootEntity <- + Codebase.getRootBranch srcCB >>= \case + Left e -> error $ "Error loading source codebase root branch: " ++ show e + Right (Branch.Branch c) -> pure $ Sync12.C (Causal.currentHash c) (pure c) + let progress = Sync12.simpleProgress @IO + flip Reader.runReaderT env . flip State.evalStateT initialState $ do + sync <- Sync12.sync12 (lift . lift) + Sync.sync @_ @(Sync12.Entity _) + (Sync.transformSync (lensStateT Lens._3) sync) + progress + [rootEntity] + cleanupSrc + cleanupDest + where + lensStateT :: Monad m => Lens' s2 s1 -> StateT s1 m a -> StateT s2 m a + lensStateT l m = StateT \s2 -> do + (a, s1') <- runStateT m (s2 Lens.^. l) + pure (a, s2 & l Lens..~ s1') + prepareTranscriptDir :: Codebase.Init IO v a -> Bool -> Maybe FilePath -> IO FilePath prepareTranscriptDir cbInit inFork mcodepath = do tmp <- Temp.getCanonicalTemporaryDirectory >>= (`Temp.createTempDirectory` "transcript") From a20e7eaba10259ef93eac8729332026a4ef064dd Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 30 Mar 2021 17:16:29 -0700 Subject: [PATCH 144/225] refactor Codebase.Init interface --- .../U/Codebase/Sqlite/Queries.hs | 2 +- codebase2/util/U/Util/Cache.hs | 5 + parser-typechecker/src/Unison/Codebase.hs | 23 +- .../src/Unison/Codebase/FileCodebase.hs | 83 +---- .../src/Unison/Codebase/Init.hs | 98 ++++++ .../src/Unison/Codebase/SqliteCodebase.hs | 331 +++++++++--------- parser-typechecker/tests/Unison/Test/Git.hs | 3 +- .../tests/Unison/Test/GitSimple.hs | 5 +- parser-typechecker/tests/Unison/Test/IO.hs | 3 +- .../unison-parser-typechecker.cabal | 1 + parser-typechecker/unison/Main.hs | 5 +- 11 files changed, 297 insertions(+), 262 deletions(-) create mode 100644 parser-typechecker/src/Unison/Codebase/Init.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index c3a0e38202..fc092770e5 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -38,7 +38,7 @@ import Database.SQLite.Simple (Connection, FromRow, Only (..), SQLData, ToRow (. import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple.FromField (FromField) import Database.SQLite.Simple.ToField (ToField (..)) -import Debug.Trace (trace, traceM, traceShowM) +import Debug.Trace (trace, traceM) import GHC.Stack (HasCallStack) import U.Codebase.HashTags (BranchHash, CausalHash, unBranchHash, unCausalHash) import U.Codebase.Reference (Reference') diff --git a/codebase2/util/U/Util/Cache.hs b/codebase2/util/U/Util/Cache.hs index 7bac9b5735..70dcdde575 100644 --- a/codebase2/util/U/Util/Cache.hs +++ b/codebase2/util/U/Util/Cache.hs @@ -1,4 +1,6 @@ +{-# LANGUAGE RankNTypes #-} {-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RecordWildCards #-} module U.Util.Cache where import Prelude hiding (lookup) @@ -13,6 +15,9 @@ data Cache m k v = , insert :: k -> v -> m () } +transform :: (forall a. m a -> n a) -> Cache m k v -> Cache n k v +transform f Cache {..} = Cache (f . lookup) ((f .) . insert) + -- Create a cache of unbounded size. cache :: (MonadIO m, Ord k) => m (Cache m k v) cache = do diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs index f3280bc7e1..de6a6a7f15 100644 --- a/parser-typechecker/src/Unison/Codebase.hs +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -57,23 +57,6 @@ type SyncToDir m = Branch m -> -- branch to sync to dest codebase m () --- | just collecting the abstraction we're currently using, not what we maybe should be using. -AI -data Init m v a = Init - { - openCodebase :: CodebasePath -> m (Either String (m (), Codebase m v a)), - -- | load an existing codebase or exit. - -- seems like Maybe might be more useful idk - getCodebaseOrExit :: Maybe CodebasePath -> m (m (), Codebase m v a), - -- | initialize a codebase where none exists, or exit - -- seems again like the return type type could be more useful - initCodebase :: CodebasePath -> m (m (), Codebase m v a), - -- | try to init a codebase where none exists and then exit regardless - initCodebaseAndExit :: Maybe CodebasePath -> m (), - -- | given a codebase root, and given that the codebase root may have other junk in it, - -- give the path to the "actual" files; e.g. what a forked transcript should clone - codebasePath :: CodebasePath -> CodebasePath - } - -- | Abstract interface to a user's codebase. -- -- One implementation is 'Unison.Codebase.FileCodebase' which uses the filesystem. @@ -145,9 +128,9 @@ data SyncFileCodebaseResult = SyncOk | UnknownDestinationRootBranch Branch.Hash getCodebaseDir :: MonadIO m => Maybe FilePath -> m FilePath getCodebaseDir = maybe getHomeDirectory pure --- | Write all of the builtins types into the codebase and create empty namespace -initializeCodebase :: forall m. Monad m => Codebase m Symbol Parser.Ann -> m () -initializeCodebase c = do +-- | Write all of UCM's dependencies (builtins types and an empty namespace) into the codebase +installUcmDependencies :: forall m. Monad m => Codebase m Symbol Parser.Ann -> m () +installUcmDependencies c = do let uf = (UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls) (Map.fromList Builtin.builtinEffectDecls) [Builtin.builtinTermsSrc Parser.Intrinsic] diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs index ac64131044..9c33c6ca4b 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs @@ -18,12 +18,11 @@ import qualified Data.Set as Set import qualified Data.Text as Text import qualified Data.Text.IO as TextIO import System.Directory (canonicalizePath) -import System.Environment (getProgName) -import System.Exit (exitFailure, exitSuccess) import System.FilePath (takeFileName, ()) import qualified U.Util.Cache as Cache import Unison.Codebase (BuiltinAnnotation, Codebase (Codebase), CodebasePath) import qualified Unison.Codebase as Codebase +import qualified Unison.Codebase.Init as Codebase import Unison.Codebase.Branch (Branch, headHash) import qualified Unison.Codebase.Branch as Branch import Unison.Codebase.Editor.Git (gitIn, gitTextIn, pullBranch, withIOError, withStatus) @@ -77,7 +76,6 @@ import Unison.Codebase.SyncMode (SyncMode) import qualified Unison.Codebase.Watch as Watch import Unison.Parser (Ann ()) import Unison.Prelude -import qualified Unison.PrettyTerminal as PT import Unison.Reference (Reference) import qualified Unison.Reference as Reference import qualified Unison.Referent as Referent @@ -93,76 +91,33 @@ import UnliftIO.Directory (createDirectoryIfMissing, doesDirectoryExist) import UnliftIO.Exception (catchIO) import UnliftIO.STM (atomically) -init :: Codebase.Init IO Symbol Ann +init :: MonadUnliftIO m => Codebase.Init m Symbol Ann init = Codebase.Init - getCodebaseOrError - (fmap (pure (),) . getCodebaseOrExit Cache.nullCache) - (fmap (pure (),) . initCodebase Cache.nullCache) - initCodebaseAndExit + openCodebase + createCodebase ( Common.codebasePath) -initCodebaseAndExit :: Maybe FilePath -> IO () -initCodebaseAndExit mdir = do - dir <- Codebase.getCodebaseDir mdir - cache <- Cache.cache - _ <- initCodebase cache dir - exitSuccess - --- initializes a new codebase here (i.e. `ucm -codebase dir init`) -initCodebase :: Branch.Cache IO -> FilePath -> IO (Codebase IO Symbol Ann) -initCodebase cache path = do - theCodebase <- codebase1 cache V1.formatSymbol Common.formatAnn path - prettyDir <- P.string <$> canonicalizePath path - - whenM (codebaseExists path) $ - do PT.putPrettyLn' - . P.wrap - $ "It looks like there's already a codebase in: " - <> prettyDir - exitFailure - - PT.putPrettyLn' - . P.wrap - $ "Initializing a new codebase in: " - <> prettyDir - Codebase.initializeCodebase theCodebase - pure theCodebase - --- get the codebase in dir, or in the home directory if not provided. -getCodebaseOrExit :: Branch.Cache IO -> Maybe FilePath -> IO (Codebase IO Symbol Ann) -getCodebaseOrExit cache mdir = do - dir <- Codebase.getCodebaseDir mdir - progName <- getProgName - prettyDir <- P.string <$> canonicalizePath dir - let errMsg = getNoCodebaseErrorMsg ((P.text . Text.pack) progName) prettyDir mdir - let theCodebase = codebase1 cache V1.formatSymbol formatAnn dir - unlessM (codebaseExists dir) $ do - PT.putPrettyLn' errMsg - exitFailure - theCodebase - -- get the codebase in dir -getCodebaseOrError :: forall m. MonadUnliftIO m => CodebasePath -> m (Either String (m (), Codebase m Symbol Ann)) -getCodebaseOrError dir = do +openCodebase :: forall m. MonadUnliftIO m => CodebasePath -> m (Either Codebase.Pretty (m (), Codebase m Symbol Ann)) +openCodebase dir = do prettyDir <- liftIO $ P.string <$> canonicalizePath dir let theCodebase = codebase1 @m @Symbol @Ann Cache.nullCache V1.formatSymbol formatAnn dir ifM (codebaseExists dir) (Right . (pure (),) <$> theCodebase) - (pure . Left . P.render @String 80 $ "No FileCodebase structure found at " <> prettyDir) + (pure . Left $ "No FileCodebase structure found at " <> prettyDir) -getNoCodebaseErrorMsg :: IsString s => P.Pretty s -> P.Pretty s -> Maybe FilePath -> P.Pretty s -getNoCodebaseErrorMsg executable prettyDir mdir = - let secondLine = - case mdir of - Just dir -> "Run `" <> executable <> " -codebase " <> fromString dir - <> " init` to create one, then try again!" - Nothing -> "Run `" <> executable <> " init` to create one there," - <> " then try again;" - <> " or `" <> executable <> " -codebase ` to load a codebase from someplace else!" - in - P.lines - [ "No codebase exists in " <> prettyDir <> "." - , secondLine ] +createCodebase :: + forall m. + MonadUnliftIO m => + CodebasePath -> + m (Either Codebase.CreateCodebaseError (m (), Codebase m Symbol Ann)) +createCodebase dir = ifM + (codebaseExists dir) + (pure $ Left Codebase.CreateCodebaseAlreadyExists) + (do + codebase <- codebase1 @m @Symbol @Ann Cache.nullCache V1.formatSymbol formatAnn dir + Codebase.installUcmDependencies codebase + pure $ Right (pure (), codebase)) -- builds a `Codebase IO v a`, given serializers for `v` and `a` codebase1 diff --git a/parser-typechecker/src/Unison/Codebase/Init.hs b/parser-typechecker/src/Unison/Codebase/Init.hs new file mode 100644 index 0000000000..cdbdb9fca1 --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Init.hs @@ -0,0 +1,98 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Codebase.Init where + +import qualified Data.Text as Text +import System.Exit (exitFailure) +import Unison.Codebase (Codebase, CodebasePath) +import qualified Unison.Codebase as Codebase +import Unison.Parser (Ann) +import Unison.Prelude +import qualified Unison.PrettyTerminal as PT +import Unison.Symbol (Symbol) +import qualified Unison.Util.Pretty as P +import UnliftIO.Directory (canonicalizePath) +import UnliftIO.Environment (getProgName) + +type Pretty = P.Pretty P.ColorText + +data CreateCodebaseError + = CreateCodebaseAlreadyExists + | CreateCodebaseOther Pretty + +data Init m v a = Init + { -- | open an existing codebase + openCodebase :: CodebasePath -> m (Either Pretty (m (), Codebase m v a)), + -- | create a new codebase + createCodebase' :: CodebasePath -> m (Either CreateCodebaseError (m (), Codebase m v a)), + -- | given a codebase root, and given that the codebase root may have other junk in it, + -- give the path to the "actual" files; e.g. what a forked transcript should clone + codebasePath :: CodebasePath -> CodebasePath + } + +-- * compatibility stuff + +-- | load an existing codebase or exit. +getCodebaseOrExit :: MonadIO m => Init m v a -> Maybe CodebasePath -> m (m (), Codebase m v a) +getCodebaseOrExit init mdir = do + dir <- Codebase.getCodebaseDir mdir + openCodebase init dir >>= \case + Left _e -> liftIO do + progName <- getProgName + prettyDir <- P.string <$> canonicalizePath dir + PT.putPrettyLn' $ getNoCodebaseErrorMsg ((P.text . Text.pack) progName) prettyDir mdir + exitFailure + Right x -> pure x + where + getNoCodebaseErrorMsg :: IsString s => P.Pretty s -> P.Pretty s -> Maybe FilePath -> P.Pretty s + getNoCodebaseErrorMsg executable prettyDir mdir = + let secondLine = + case mdir of + Just dir -> + "Run `" <> executable <> " -codebase " <> fromString dir + <> " init` to create one, then try again!" + Nothing -> + "Run `" <> executable <> " init` to create one there," + <> " then try again;" + <> " or `" + <> executable + <> " -codebase ` to load a codebase from someplace else!" + in P.lines + [ "No codebase exists in " <> prettyDir <> ".", + secondLine + ] + +-- previously: initCodebaseOrExit :: CodebasePath -> m (m (), Codebase m v a) +-- previously: FileCodebase.initCodebase :: CodebasePath -> m (m (), Codebase m v a) +openNewUcmCodebaseOrExit :: MonadIO m => Init m Symbol Ann -> CodebasePath -> m (m (), Codebase m Symbol Ann) +openNewUcmCodebaseOrExit cbInit path = do + prettyDir <- P.string <$> canonicalizePath path + createCodebase' cbInit path >>= \case + Left CreateCodebaseAlreadyExists -> liftIO do + PT.putPrettyLn' + . P.wrap + $ "It looks like there's already a codebase in: " + <> prettyDir + exitFailure + Left (CreateCodebaseOther message) -> liftIO do + PT.putPrettyLn' $ + P.wrap ("I ran into an error when creating the codebase in: " <> prettyDir) + <> P.newline + <> P.newline + <> "The error was:" + <> P.newline + <> P.indentN 2 message + exitFailure + Right x@(_, codebase) -> do + liftIO $ + PT.putPrettyLn' + . P.wrap + $ "Initializing a new codebase in: " + <> prettyDir + Codebase.installUcmDependencies codebase + pure x + +-- | try to init a codebase where none exists and then exit regardless (i.e. `ucm -codebase dir init`) +initCodebaseAndExit :: MonadIO m => Init m Symbol Ann -> Maybe CodebasePath -> m () +initCodebaseAndExit i mdir = + void $ openNewUcmCodebaseOrExit i =<< Codebase.getCodebaseDir mdir diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 257fed1214..1773a419d9 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -11,7 +11,7 @@ module Unison.Codebase.SqliteCodebase (Unison.Codebase.SqliteCodebase.init, unsa import qualified Control.Concurrent import qualified Control.Exception import Control.Monad (filterM, when, (>=>)) -import Control.Monad.Except (ExceptT, runExceptT,MonadError (throwError)) +import Control.Monad.Except (ExceptT, MonadError (throwError), runExceptT) import qualified Control.Monad.Except as Except import Control.Monad.Extra (ifM, unlessM, (||^)) import qualified Control.Monad.Extra as Monad @@ -20,7 +20,8 @@ import Control.Monad.State (MonadState) import qualified Control.Monad.State as State import Control.Monad.Trans (MonadTrans (lift)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT)) -import Data.Bifunctor (Bifunctor (first, bimap), second) +import Data.Bifunctor (Bifunctor (bimap, first), second) +import qualified Data.Bifunctor as Bifunctor import qualified Data.Either.Combinators as Either import Data.Foldable (Foldable (toList), traverse_) import Data.Functor (void) @@ -29,7 +30,6 @@ import Data.Map (Map) import qualified Data.Map as Map import Data.Set (Set) import qualified Data.Set as Set -import Data.String (IsString (fromString)) import Data.Text (Text) import qualified Data.Text as Text import qualified Data.Text.IO as TextIO @@ -37,7 +37,7 @@ import qualified Data.Validation as Validation import Data.Word (Word64) import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as Sqlite -import qualified System.Exit as SysExit +import GHC.Stack (HasCallStack) import System.FilePath (()) import qualified System.FilePath as FilePath import U.Codebase.HashTags (CausalHash (CausalHash, unCausalHash)) @@ -52,12 +52,19 @@ import qualified U.Util.Hash as H2 import qualified U.Util.Monoid as Monoid import qualified U.Util.Set as Set import qualified Unison.Builtin as Builtins -import Unison.Codebase (CodebasePath) +import Unison.Codebase (Codebase, CodebasePath) import qualified Unison.Codebase as Codebase1 import Unison.Codebase.Branch (Branch (..)) import qualified Unison.Codebase.Branch as Branch import qualified Unison.Codebase.Causal as Causal +import Unison.Codebase.Editor.Git (gitIn, gitTextIn, pullBranch, withIOError, withStatus) +import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace, RemoteRepo (GitRepo), printRepo) +import Unison.Codebase.GitError (GitError) +import qualified Unison.Codebase.GitError as GitError +import qualified Unison.Codebase.Init as Codebase +import qualified Unison.Codebase.Init as Codebase1 import Unison.Codebase.Patch (Patch) +import qualified Unison.Codebase.Path as Path import qualified Unison.Codebase.Reflog as Reflog import Unison.Codebase.ShortBranchHash (ShortBranchHash) import qualified Unison.Codebase.SqliteCodebase.Branch.Dependencies as BD @@ -69,8 +76,7 @@ import Unison.DataDeclaration (Decl) import qualified Unison.DataDeclaration as Decl import Unison.Hash (Hash) import Unison.Parser (Ann) -import Unison.Prelude (MaybeT (runMaybeT), fromMaybe, trace, traceM, isJust) -import qualified Unison.PrettyTerminal as PT +import Unison.Prelude (MaybeT (runMaybeT), fromMaybe, isJust, trace, traceM) import Unison.Reference (Reference) import qualified Unison.Reference as Reference import qualified Unison.Referent as Referent @@ -83,47 +89,65 @@ import qualified Unison.Term as Term import Unison.Type (Type) import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF +import qualified Unison.Util.ColorText as ColorText import qualified Unison.Util.Pretty as P +import Unison.Util.Timing (time) import UnliftIO (MonadIO, catchIO, liftIO) import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, doesDirectoryExist, doesFileExist) -import qualified UnliftIO.Environment as SysEnv import UnliftIO.STM -import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace, RemoteRepo (GitRepo), printRepo) -import Unison.Codebase.GitError (GitError) -import qualified Unison.Codebase.GitError as GitError -import qualified Unison.Codebase as Codebase -import qualified Unison.Codebase.Path as Path -import Unison.Codebase.Editor.Git (withIOError, withStatus, gitTextIn, gitIn, pullBranch) -import Unison.Util.Timing (time) -import GHC.Stack (HasCallStack) -import qualified Unison.Util.ColorText as ColorText -import qualified Data.Bifunctor as Bifunctor debug, debugProcessBranches :: Bool debug = False debugProcessBranches = False -init :: HasCallStack => MonadIO m => Codebase1.Init m Symbol Ann -init = Codebase1.Init getCodebaseOrError getCodebaseOrExit initCodebase initCodebaseAndExit ( codebasePath) +init :: HasCallStack => MonadIO m => Codebase.Init m Symbol Ann +init = Codebase.Init getCodebaseOrError createCodebaseOrError ( codebasePath) + +createCodebaseOrError :: + MonadIO m => + CodebasePath -> + m (Either Codebase1.CreateCodebaseError (m (), Codebase m Symbol Ann)) +createCodebaseOrError dir = do + prettyDir <- P.string <$> canonicalizePath dir + let convertError = \case + CreateCodebaseAlreadyExists -> Codebase1.CreateCodebaseAlreadyExists + CreateCodebaseMissingSchema schema -> + let prettyError :: [(Q.SchemaType, Q.SchemaName)] -> Codebase1.Pretty + prettyError schema = + (("Missing SqliteCodebase structure in " <> prettyDir <> ".") <>) + . P.column2Header "Schema Type" "Name" + $ map (Bifunctor.bimap P.string P.string) schema + in Codebase1.CreateCodebaseOther $ prettyError schema + Either.mapLeft convertError <$> createCodebaseOrError' dir + where + +data CreateCodebaseError + = CreateCodebaseAlreadyExists + | CreateCodebaseMissingSchema [(Q.SchemaType, Q.SchemaName)] + deriving (Show) + +createCodebaseOrError' :: + MonadIO m => + CodebasePath -> + m (Either CreateCodebaseError (m (), Codebase m Symbol Ann)) +createCodebaseOrError' path = do + ifM + (doesFileExist $ path codebasePath) + (pure $ Left CreateCodebaseAlreadyExists) + do + createDirectoryIfMissing True (path FilePath.takeDirectory codebasePath) + liftIO $ + Control.Exception.bracket + (unsafeGetConnection path) + Sqlite.close + (runReaderT Q.createSchema) + fmap (Either.mapLeft CreateCodebaseMissingSchema) (sqliteCodebase path) codebasePath :: FilePath codebasePath = ".unison" "v2" "unison.sqlite3" --- get the codebase in dir, or in the home directory if not provided. -getCodebaseOrExit :: MonadIO m => Maybe FilePath -> m (m (), Codebase1.Codebase m Symbol Ann) -getCodebaseOrExit mdir = do - dir <- Codebase.getCodebaseDir mdir - progName <- SysEnv.getProgName - prettyDir <- P.string <$> canonicalizePath dir - let errMsg = getNoCodebaseErrorMsg ((P.text . Text.pack) progName) prettyDir mdir - sqliteCodebase dir >>= \case - Left _missingSchema -> liftIO do - PT.putPrettyLn' errMsg - SysExit.exitFailure - Right c -> pure c - -- get the codebase in dir -getCodebaseOrError :: forall m. MonadIO m => CodebasePath -> m (Either String (m (), Codebase.Codebase m Symbol Ann)) +getCodebaseOrError :: forall m. MonadIO m => CodebasePath -> m (Either Codebase1.Pretty (m (), Codebase m Symbol Ann)) getCodebaseOrError dir = do prettyDir <- liftIO $ P.string <$> canonicalizePath dir let prettyError :: [(Q.SchemaType, Q.SchemaName)] -> String @@ -131,32 +155,7 @@ getCodebaseOrError dir = do ColorText.toANSI . P.render 80 . (("Missing SqliteCodebase structure in " <> prettyDir <> ".") <>) . P.column2Header "Schema Type" "Name" $ map (Bifunctor.bimap P.string P.string) schema - fmap (Either.mapLeft prettyError) (sqliteCodebase dir) - -getNoCodebaseErrorMsg :: IsString s => P.Pretty s -> P.Pretty s -> Maybe FilePath -> P.Pretty s -getNoCodebaseErrorMsg executable prettyDir mdir = - let secondLine = - case mdir of - Just dir -> - "Run `" <> executable <> " -codebase " <> fromString dir - <> " init` to create one, then try again!" - Nothing -> - "Run `" <> executable <> " init` to create one there," - <> " then try again;" - <> " or `" - <> executable - <> " -codebase ` to load a codebase from someplace else!" - in P.lines - [ "No codebase exists in " <> prettyDir <> ".", - secondLine - ] - -initCodebaseAndExit :: MonadIO m => Maybe FilePath -> m () -initCodebaseAndExit mdir = do - dir <- Codebase.getCodebaseDir mdir - (closeCodebase, _codebase) <- initCodebase dir - closeCodebase - liftIO SysExit.exitSuccess + fmap (Either.mapLeft $ P.string . prettyError) (sqliteCodebase dir) initSchemaIfNotExist :: MonadIO m => FilePath -> m () initSchemaIfNotExist path = liftIO do @@ -168,35 +167,6 @@ initSchemaIfNotExist path = liftIO do Sqlite.close (runReaderT Q.createSchema) --- initializes a new codebase here (i.e. `ucm -codebase dir init`) -initCodebase :: MonadIO m => FilePath -> m (m (), Codebase1.Codebase m Symbol Ann) -initCodebase path = do - Monad.when debug $ traceM $ "initCodebase " ++ path - prettyDir <- P.string <$> canonicalizePath path - - liftIO $ Monad.whenM (codebaseExists path) do - PT.putPrettyLn' - . P.wrap - $ "It looks like " <> prettyDir <> " already exists." - SysExit.exitFailure - - liftIO $ - PT.putPrettyLn' - . P.wrap - $ "Initializing a new codebase in: " - <> prettyDir - - -- run sql create scripts - createDirectoryIfMissing True (path FilePath.takeDirectory codebasePath) - initSchemaIfNotExist path - - (closeCodebase, theCodebase) <- - sqliteCodebase path >>= \case - Right x -> pure x - Left x -> error $ show x ++ " :) " - Codebase1.initializeCodebase theCodebase - pure (closeCodebase, theCodebase) - -- checks if a db exists at `path` with the minimum schema codebaseExists :: MonadIO m => CodebasePath -> m Bool codebaseExists root = liftIO do @@ -266,7 +236,7 @@ unsafeGetConnection root = do runReaderT Q.setFlags conn pure conn -sqliteCodebase :: MonadIO m => CodebasePath -> m (Either [(Q.SchemaType, Q.SchemaName)] (m (), Codebase1.Codebase m Symbol Ann)) +sqliteCodebase :: MonadIO m => CodebasePath -> m (Either [(Q.SchemaType, Q.SchemaName)] (m (), Codebase m Symbol Ann)) sqliteCodebase root = do Monad.when debug $ traceM $ "sqliteCodebase " ++ root conn <- unsafeGetConnection root @@ -510,18 +480,21 @@ sqliteCodebase root = do Just hId -> Q.isCausalHash hId getPatch :: MonadIO m => Branch.EditHash -> m (Maybe Patch) - getPatch h = runDB conn . runMaybeT $ - MaybeT (Ops.primaryHashToMaybePatchObjectId (Cv.patchHash1to2 h)) - >>= Ops.loadPatchById - >>= Cv.patch2to1 getCycleLen + getPatch h = + runDB conn . runMaybeT $ + MaybeT (Ops.primaryHashToMaybePatchObjectId (Cv.patchHash1to2 h)) + >>= Ops.loadPatchById + >>= Cv.patch2to1 getCycleLen putPatch :: MonadIO m => Branch.EditHash -> Patch -> m () - putPatch h p = runDB conn . void $ - Ops.savePatch (Cv.patchHash1to2 h) (Cv.patch1to2 p) + putPatch h p = + runDB conn . void $ + Ops.savePatch (Cv.patchHash1to2 h) (Cv.patch1to2 p) patchExists :: MonadIO m => Branch.EditHash -> m Bool - patchExists h = runDB conn . fmap isJust $ - Ops.primaryHashToMaybePatchObjectId (Cv.patchHash1to2 h) + patchExists h = + runDB conn . fmap isJust $ + Ops.primaryHashToMaybePatchObjectId (Cv.patchHash1to2 h) dependentsImpl :: MonadIO m => Reference -> m (Set Reference.Id) dependentsImpl r = @@ -816,9 +789,9 @@ data Entity m | O Hash data SyncProgressState = SyncProgressState - { needEntities :: Maybe (Set Sync22.Entity), - doneEntities :: Either Int (Set Sync22.Entity), - warnEntities :: Either Int (Set Sync22.Entity) + { _needEntities :: Maybe (Set Sync22.Entity), + _doneEntities :: Either Int (Set Sync22.Entity), + _warnEntities :: Either Int (Set Sync22.Entity) } emptySyncProgressState :: SyncProgressState @@ -881,90 +854,106 @@ syncProgress = Sync.Progress need done warn allDone ++ if Set.size warn > 0 then " with " ++ show warn ++ " warnings." else "." - SyncProgressState need done warn -> "invalid SyncProgressState " ++ - show (fmap v need, bimap id v done, bimap id v warn) - where v = const () - - -viewRemoteBranch' :: forall m. MonadIO m - => RemoteNamespace -> m (Either GitError (Branch m, CodebasePath)) + SyncProgressState need done warn -> + "invalid SyncProgressState " + ++ show (fmap v need, bimap id v done, bimap id v warn) + where + v = const () + +viewRemoteBranch' :: + forall m. + MonadIO m => + RemoteNamespace -> + m (Either GitError (Branch m, CodebasePath)) viewRemoteBranch' (repo, sbh, path) = runExceptT do -- set up the cache dir remotePath <- time "Git fetch" $ pullBranch repo - ifM (codebaseExists remotePath) - (do - (closeCodebase, codebase) <- lift (sqliteCodebase remotePath) >>= - Validation.valueOr (\_missingSchema -> throwError $ GitError.CouldntOpenCodebase repo remotePath) . fmap pure - -- try to load the requested branch from it - branch <- time "Git fetch (sbh)" $ case sbh of - -- load the root branch - Nothing -> lift (Codebase.getRootBranch codebase) >>= \case - Left Codebase.NoRootBranch -> pure Branch.empty - Left (Codebase.CouldntLoadRootBranch h) -> - throwError $ GitError.CouldntLoadRootBranch repo h - Left (Codebase.CouldntParseRootBranch s) -> - throwError $ GitError.CouldntParseRootBranch repo s - Right b -> pure b - -- load from a specific `ShortBranchHash` - Just sbh -> do - branchCompletions <- lift $ Codebase.branchHashesByPrefix codebase sbh - case toList branchCompletions of - [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh - [h] -> (lift $ Codebase.getBranchForHash codebase h) >>= \case - Just b -> pure b - Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh - _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions - lift closeCodebase - pure (Branch.getAt' path branch, remotePath)) + ifM + (codebaseExists remotePath) + ( do + (closeCodebase, codebase) <- + lift (sqliteCodebase remotePath) + >>= Validation.valueOr (\_missingSchema -> throwError $ GitError.CouldntOpenCodebase repo remotePath) . fmap pure + -- try to load the requested branch from it + branch <- time "Git fetch (sbh)" $ case sbh of + -- load the root branch + Nothing -> + lift (Codebase1.getRootBranch codebase) >>= \case + Left Codebase1.NoRootBranch -> pure Branch.empty + Left (Codebase1.CouldntLoadRootBranch h) -> + throwError $ GitError.CouldntLoadRootBranch repo h + Left (Codebase1.CouldntParseRootBranch s) -> + throwError $ GitError.CouldntParseRootBranch repo s + Right b -> pure b + -- load from a specific `ShortBranchHash` + Just sbh -> do + branchCompletions <- lift $ Codebase1.branchHashesByPrefix codebase sbh + case toList branchCompletions of + [] -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + [h] -> + (lift $ Codebase1.getBranchForHash codebase h) >>= \case + Just b -> pure b + Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh + _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions + lift closeCodebase + pure (Branch.getAt' path branch, remotePath) + ) -- else there's no initialized codebase at this repo; we pretend there's an empty one. (pure (Branch.empty, remotePath)) -- Given a branch that is "after" the existing root of a given git repo, -- stage and push the branch (as the new root) + dependencies to the repo. -pushGitRootBranch - :: MonadIO m - => Codebase.SyncToDir m - -> Branch m - -> RemoteRepo - -> SyncMode - -> m (Either GitError ()) +pushGitRootBranch :: + MonadIO m => + Codebase1.SyncToDir m -> + Branch m -> + RemoteRepo -> + SyncMode -> + m (Either GitError ()) pushGitRootBranch syncToDirectory branch repo syncMode = runExceptT do -- Pull the remote repo into a staging directory (remoteRoot, remotePath) <- Except.ExceptT $ viewRemoteBranch' (repo, Nothing, Path.empty) - ifM (pure (remoteRoot == Branch.empty) - ||^ lift (remoteRoot `Branch.before` branch)) + ifM + ( pure (remoteRoot == Branch.empty) + ||^ lift (remoteRoot `Branch.before` branch) + ) -- ours is newer 👍, meaning this is a fast-forward push, -- so sync branch to staging area (stageAndPush remotePath) (throwError $ GitError.PushDestinationHasNewStuff repo) where - stageAndPush remotePath = do - let repoString = Text.unpack $ printRepo repo - withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ - lift (syncToDirectory remotePath syncMode branch) - -- push staging area to remote - withStatus ("Uploading to " ++ repoString ++ " ...") $ - unlessM - (push remotePath repo - `withIOError` (throwError . GitError.PushException repo . show)) - (throwError $ GitError.PushNoOp repo) - -- Commit our changes - push :: CodebasePath -> RemoteRepo -> IO Bool -- withIOError needs IO - push remotePath (GitRepo url gitbranch) = do - -- has anything changed? - status <- gitTextIn remotePath ["status", "--short"] - if Text.null status then - pure False - else do - gitIn remotePath ["add", "--all", "."] - gitIn remotePath - ["commit", "-q", "-m", "Sync branch " <> Text.pack (show $ Branch.headHash branch)] - -- Push our changes to the repo - case gitbranch of - Nothing -> gitIn remotePath ["push", "--quiet", url] - Just gitbranch -> error $ - "Pushing to a specific branch isn't fully implemented or tested yet.\n" - ++ "InputPatterns.parseUri was expected to have prevented you " - ++ "from supplying the git treeish `" ++ Text.unpack gitbranch ++ "`!" + stageAndPush remotePath = do + let repoString = Text.unpack $ printRepo repo + withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ + lift (syncToDirectory remotePath syncMode branch) + -- push staging area to remote + withStatus ("Uploading to " ++ repoString ++ " ...") $ + unlessM + ( push remotePath repo + `withIOError` (throwError . GitError.PushException repo . show) + ) + (throwError $ GitError.PushNoOp repo) + -- Commit our changes + push :: CodebasePath -> RemoteRepo -> IO Bool -- withIOError needs IO + push remotePath (GitRepo url gitbranch) = do + -- has anything changed? + status <- gitTextIn remotePath ["status", "--short"] + if Text.null status + then pure False + else do + gitIn remotePath ["add", "--all", "."] + gitIn + remotePath + ["commit", "-q", "-m", "Sync branch " <> Text.pack (show $ Branch.headHash branch)] + -- Push our changes to the repo + case gitbranch of + Nothing -> gitIn remotePath ["push", "--quiet", url] + Just gitbranch -> + error $ + "Pushing to a specific branch isn't fully implemented or tested yet.\n" + ++ "InputPatterns.parseUri was expected to have prevented you " + ++ "from supplying the git treeish `" + ++ Text.unpack gitbranch + ++ "`!" -- gitIn remotePath ["push", "--quiet", url, gitbranch] - pure True + pure True diff --git a/parser-typechecker/tests/Unison/Test/Git.hs b/parser-typechecker/tests/Unison/Test/Git.hs index 4d3e65079c..682e8a9704 100644 --- a/parser-typechecker/tests/Unison/Test/Git.hs +++ b/parser-typechecker/tests/Unison/Test/Git.hs @@ -17,6 +17,7 @@ import System.Directory (doesFileExist, removeDirectoryRecursive, removeFile) import Unison.Codebase (BuiltinAnnotation, Codebase, CodebasePath) import qualified Unison.Codebase as Codebase +import qualified Unison.Codebase.Init as Codebase import qualified Unison.Codebase.Branch as Branch import qualified Unison.Codebase.FileCodebase as FC import qualified Unison.Codebase.Serialization.V1 as V1 @@ -295,7 +296,7 @@ inside.y = c + c initCodebase :: FilePath -> String -> IO (CodebasePath, IO (), Codebase IO Symbol Ann) initCodebase tmpDir name = do let codebaseDir = tmpDir name - (cleanup, c) <- Codebase.initCodebase FC.init codebaseDir + (cleanup, c) <- Codebase.openNewUcmCodebaseOrExit FC.init codebaseDir pure (codebaseDir, cleanup, c) -- run a transcript on an existing codebase diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs index f6dd6745bb..53acf0f0d0 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -18,6 +18,7 @@ import Unison.Prelude import Unison.Symbol (Symbol) import qualified Unison.Parser as Parser import qualified Unison.Codebase as Codebase +import qualified Unison.Codebase.Init as Codebase import qualified Unison.Codebase.SqliteCodebase as SC writeTranscriptOutput :: Bool @@ -228,10 +229,10 @@ pushPullTest cbInit name authorScript userScript = scope name $ do ok -- initialize a fresh codebase -initCodebase :: Monad m => Codebase.Init m v a -> FilePath -> String -> m (CodebasePath, m (), Codebase m v a) +initCodebase :: MonadIO m => Codebase.Init m Symbol Ann -> FilePath -> String -> m (CodebasePath, m (), Codebase m Symbol Ann) initCodebase cbInit tmpDir name = do let codebaseDir = tmpDir name - (close, c) <- Codebase.initCodebase cbInit codebaseDir + (close, c) <- Codebase.openNewUcmCodebaseOrExit cbInit codebaseDir pure (codebaseDir, close, c) -- run a transcript on an existing codebase diff --git a/parser-typechecker/tests/Unison/Test/IO.hs b/parser-typechecker/tests/Unison/Test/IO.hs index 222bbd5e6b..6c6bdc2f17 100644 --- a/parser-typechecker/tests/Unison/Test/IO.hs +++ b/parser-typechecker/tests/Unison/Test/IO.hs @@ -18,6 +18,7 @@ import qualified Unison.Codebase.TranscriptParser as TR import Unison.Parser (Ann) import Unison.Symbol (Symbol) import qualified Unison.Codebase as Codebase +import qualified Unison.Codebase.Init as Codebase import qualified Unison.Codebase.FileCodebase as FileCodebase -- * IO Tests @@ -82,7 +83,7 @@ main = 'let initCodebase :: Codebase.Init IO Symbol Ann -> FilePath -> String -> IO (CodebasePath, IO (), Codebase IO Symbol Ann) initCodebase cbInit tmpDir name = do let codebaseDir = tmpDir name - (finalize, c) <- Codebase.initCodebase cbInit codebaseDir + (finalize, c) <- Codebase.openNewUcmCodebaseOrExit cbInit codebaseDir pure (codebaseDir, finalize, c) -- run a transcript on an existing codebase diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 31d4e36f21..36784a2f01 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -87,6 +87,7 @@ library Unison.Codebase.FileCodebase.Common Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex Unison.Codebase.GitError + Unison.Codebase.Init Unison.Codebase.Metadata Unison.Codebase.NameEdit Unison.Codebase.Path diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index ffaada4fbd..973b5a75e2 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -38,6 +38,7 @@ import qualified System.Path as Path import Text.Megaparsec (runParser) import qualified U.Codebase.Sync as Sync import qualified Unison.Codebase as Codebase +import qualified Unison.Codebase.Init as Codebase import qualified Unison.Codebase.Branch as Branch import qualified Unison.Codebase.Causal as Causal import qualified Unison.Codebase.Conversion.Sync12 as Sync12 @@ -244,12 +245,12 @@ upgradeCodebase mcodepath = do (a, s1') <- runStateT m (s2 Lens.^. l) pure (a, s2 & l Lens..~ s1') -prepareTranscriptDir :: Codebase.Init IO v a -> Bool -> Maybe FilePath -> IO FilePath +prepareTranscriptDir :: Codebase.Init IO Symbol Ann -> Bool -> Maybe FilePath -> IO FilePath prepareTranscriptDir cbInit inFork mcodepath = do tmp <- Temp.getCanonicalTemporaryDirectory >>= (`Temp.createTempDirectory` "transcript") unless inFork $ do PT.putPrettyLn . P.wrap $ "Transcript will be run on a new, empty codebase." - _ <- Codebase.initCodebase cbInit tmp + _ <- Codebase.openNewUcmCodebaseOrExit cbInit tmp pure() when inFork $ Codebase.getCodebaseOrExit cbInit mcodepath >> do From f72a3ce30903dfc73790fc0a946dbc42afbb650d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 31 Mar 2021 22:48:05 -0700 Subject: [PATCH 145/225] ucm test helpers --- .../src/Unison/Codebase/Init.hs | 35 ++++++----- .../tests/Unison/Test/Codebase/Sync12.hs | 10 +++ .../tests/Unison/Test/GitSimple.hs | 1 - parser-typechecker/tests/Unison/Test/Ucm.hs | 63 +++++++++++++++++++ .../unison-parser-typechecker.cabal | 3 + 5 files changed, 95 insertions(+), 17 deletions(-) create mode 100644 parser-typechecker/tests/Unison/Test/Codebase/Sync12.hs create mode 100644 parser-typechecker/tests/Unison/Test/Ucm.hs diff --git a/parser-typechecker/src/Unison/Codebase/Init.hs b/parser-typechecker/src/Unison/Codebase/Init.hs index cdbdb9fca1..3b2599ac29 100644 --- a/parser-typechecker/src/Unison/Codebase/Init.hs +++ b/parser-typechecker/src/Unison/Codebase/Init.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE ApplicativeDo #-} {-# LANGUAGE OverloadedStrings #-} module Unison.Codebase.Init where @@ -30,6 +31,22 @@ data Init m v a = Init codebasePath :: CodebasePath -> CodebasePath } +createCodebase :: MonadIO m => Init m v a -> CodebasePath -> m (Either Pretty (m (), Codebase m v a)) +createCodebase cbInit path = do + prettyDir <- P.string <$> canonicalizePath path + createCodebase' cbInit path <&> mapLeft \case + CreateCodebaseAlreadyExists -> + P.wrap $ + "It looks like there's already a codebase in: " + <> prettyDir + CreateCodebaseOther message -> + P.wrap ("I ran into an error when creating the codebase in: " <> prettyDir) + <> P.newline + <> P.newline + <> "The error was:" + <> P.newline + <> P.indentN 2 message + -- * compatibility stuff -- | load an existing codebase or exit. @@ -67,22 +84,8 @@ getCodebaseOrExit init mdir = do openNewUcmCodebaseOrExit :: MonadIO m => Init m Symbol Ann -> CodebasePath -> m (m (), Codebase m Symbol Ann) openNewUcmCodebaseOrExit cbInit path = do prettyDir <- P.string <$> canonicalizePath path - createCodebase' cbInit path >>= \case - Left CreateCodebaseAlreadyExists -> liftIO do - PT.putPrettyLn' - . P.wrap - $ "It looks like there's already a codebase in: " - <> prettyDir - exitFailure - Left (CreateCodebaseOther message) -> liftIO do - PT.putPrettyLn' $ - P.wrap ("I ran into an error when creating the codebase in: " <> prettyDir) - <> P.newline - <> P.newline - <> "The error was:" - <> P.newline - <> P.indentN 2 message - exitFailure + createCodebase cbInit path >>= \case + Left error -> liftIO $ PT.putPrettyLn' error >> exitFailure Right x@(_, codebase) -> do liftIO $ PT.putPrettyLn' diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Sync12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Sync12.hs new file mode 100644 index 0000000000..af649d81f3 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Codebase/Sync12.hs @@ -0,0 +1,10 @@ +module Unison.Test.Codebase.Sync12 where + +import Control.Error (minimumMay) +import Control.Lens (view, _1) +import qualified Data.Char as Char +import Data.Maybe (fromMaybe) +import Unison.Test.Ucm + + + diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs index 53acf0f0d0..0ef04b8c15 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -17,7 +17,6 @@ import Unison.Parser (Ann) import Unison.Prelude import Unison.Symbol (Symbol) import qualified Unison.Parser as Parser -import qualified Unison.Codebase as Codebase import qualified Unison.Codebase.Init as Codebase import qualified Unison.Codebase.SqliteCodebase as SC diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs new file mode 100644 index 0000000000..1b40b8c0ab --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -0,0 +1,63 @@ +{-# LANGUAGE DerivingVia #-} +module Unison.Test.Ucm where + +import Unison.Prelude +import Control.Monad.Writer (WriterT) +import qualified Data.Text as Text +import qualified Unison.Codebase.TranscriptParser as TR +import System.FilePath (()) +import Control.Error (minimumMay) +import Control.Lens (view, _1) +import qualified Unison.Util.Pretty as P +import qualified Unison.Codebase.Init as Codebase.Init +import qualified Unison.Codebase.SqliteCodebase as SC +import qualified Unison.Codebase.FileCodebase as FC +import qualified Data.Sequence as Seq +import qualified Control.Monad.Writer as Writer + +data Runtime = Runtime1 | Runtime2 +data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 +newtype CodebaseName = CodebaseName { unCodebaseName :: String } +type Args = Map String String +newtype Transcript = Transcript { unTranscript :: Text } deriving IsString via Text +type TranscriptOutput = String +type Cleanup = Seq (IO ()) + +runTranscript :: FilePath -> CodebaseName -> CodebaseFormat -> Runtime -> Args -> (Args -> Transcript) -> WriterT Cleanup IO TranscriptOutput +runTranscript tmpDir codebaseName fmt rt args mkTranscript = do + let configFile = tmpDir ".unisonConfig" + codebasePath = tmpDir unCodebaseName codebaseName + let err err = error $ "Parse error: \n" <> show err + cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init + codebase <- lift (Codebase.Init.createCodebase cbInit codebasePath) >>= \case + Left e -> fail $ P.toANSI 80 e + Right (cleanup, c) -> do + Writer.tell . Seq.singleton $ cleanup + pure c + -- parse and run the transcript + flip (either err) (TR.parse "transcript" (stripMargin . unTranscript $ mkTranscript args)) $ \stanzas -> + liftIO . fmap Text.unpack $ + TR.run + (case rt of Runtime1 -> Just False; Runtime2 -> Just True) + codebasePath + configFile + stanzas + codebase + + +-- | remove however many spaces prefix all of the lines of the input +-- e.g. +-- stripMargin [here| +-- def foo: +-- blah blah +-- |] == [here| +-- def foo: +-- blah blah +-- |]T +stripMargin :: Text -> Text +stripMargin str = + let stripLen = + fromMaybe 0 . minimumMay + . map (Text.length . view _1 . Text.span (==' ')) + $ Text.lines str + in Text.unlines . map (Text.drop stripLen) $ Text.lines str \ No newline at end of file diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 36784a2f01..dad4418d6a 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -355,6 +355,7 @@ executable tests Unison.Test.Codebase.Causal Unison.Test.Codebase.FileCodebase Unison.Test.Codebase.Path + Unison.Test.Codebase.Sync12 Unison.Test.ColorText Unison.Test.Common Unison.Test.DataDeclaration @@ -375,6 +376,7 @@ executable tests Unison.Test.Typechecker.Components Unison.Test.Typechecker.Context Unison.Test.Typechecker.TypeError + Unison.Test.Ucm Unison.Test.UnisonSources Unison.Test.UriParser Unison.Test.Util.Bytes @@ -406,6 +408,7 @@ executable tests temporary, text, transformers, + unliftio, unison-core1, unison-parser-typechecker, unison-util From 844022a7fe25feeb4e3fd6dba586e2333353ec9f Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 31 Mar 2021 22:48:47 -0700 Subject: [PATCH 146/225] simplify hie.yaml --- hie.yaml | 116 ++----------------------------------------------------- 1 file changed, 4 insertions(+), 112 deletions(-) diff --git a/hie.yaml b/hie.yaml index e81696108e..8b2f3418eb 100644 --- a/hie.yaml +++ b/hie.yaml @@ -39,127 +39,19 @@ cradle: - path: "parser-typechecker/src" component: "unison-parser-typechecker:lib" - - path: "parser-typechecker/unison/Main.hs" - component: "unison-parser-typechecker:exe:unison" - - - path: "parser-typechecker/unison/System/Path.hs" - component: "unison-parser-typechecker:exe:unison" - - - path: "parser-typechecker/unison/Version.hs" + - path: "parser-typechecker/unison/." component: "unison-parser-typechecker:exe:unison" - path: "parser-typechecker/prettyprintdemo/Main.hs" component: "unison-parser-typechecker:exe:prettyprintdemo" - - path: "parser-typechecker/tests/Suite.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/ABT.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/ANF.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Cache.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Codebase.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Codebase/Causal.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Codebase/FileCodebase.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Codebase/Path.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/ColorText.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Common.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/DataDeclaration.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/FileParser.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Git.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Lexer.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/IO.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/MCode.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Range.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Referent.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Term.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/TermParser.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/TermPrinter.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Type.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/TypePrinter.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Typechecker.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Typechecker/Components.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Typechecker/Context.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Typechecker/TypeError.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/UnisonSources.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/UriParser.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Util/Bytes.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Util/PinBoard.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Util/Pretty.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Var.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/VersionParser.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Core/Test/Name.hs" + - path: "parser-typechecker/tests/." component: "unison-parser-typechecker:exe:tests" - - path: "parser-typechecker/transcripts/Transcripts.hs" + - path: "parser-typechecker/transcripts/." component: "unison-parser-typechecker:exe:transcripts" - - path: "parser-typechecker/benchmarks/runtime/Main.hs" + - path: "parser-typechecker/benchmarks/runtime/." component: "unison-parser-typechecker:bench:runtime" - path: "unison-core/src" From cb83bebcc65627ab25dd3d1c6906f841ab7eb7bc Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 1 Apr 2021 08:17:34 -0700 Subject: [PATCH 147/225] U.Util.Text --- codebase2/util/U/Util/Text.hs | 23 +++++++++++++++++++++++ codebase2/util/unison-util.cabal | 2 ++ 2 files changed, 25 insertions(+) create mode 100644 codebase2/util/U/Util/Text.hs diff --git a/codebase2/util/U/Util/Text.hs b/codebase2/util/U/Util/Text.hs new file mode 100644 index 0000000000..70f0a6d4b1 --- /dev/null +++ b/codebase2/util/U/Util/Text.hs @@ -0,0 +1,23 @@ +module U.Util.Text where + +import Data.Maybe (fromMaybe) +import Data.Text (Text) +import qualified Data.Text as Text +import Safe.Foldable (minimumMay) + +-- | remove however many spaces prefix all of the lines of the input +-- e.g. +-- stripMargin [here| +-- def foo: +-- blah blah +-- |] == [here| +-- def foo: +-- blah blah +-- |]T +stripMargin :: Text -> Text +stripMargin str = + let stripLen = + fromMaybe 0 . minimumMay + . map (Text.length . fst . Text.span (== ' ')) + $ Text.lines str + in Text.unlines . map (Text.drop stripLen) $ Text.lines str \ No newline at end of file diff --git a/codebase2/util/unison-util.cabal b/codebase2/util/unison-util.cabal index 39d41f99b3..c62832070e 100644 --- a/codebase2/util/unison-util.cabal +++ b/codebase2/util/unison-util.cabal @@ -25,6 +25,7 @@ library U.Util.Monoid U.Util.Relation U.Util.Set + U.Util.Text -- other-modules: -- other-extensions: build-depends: @@ -36,6 +37,7 @@ library memory, text, unliftio, + safe, sandi hs-source-dirs: . default-language: Haskell2010 From f4d4bde92ac2536e8aebcd1c0e608e2e632dcafc Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 6 Apr 2021 12:25:52 -0600 Subject: [PATCH 148/225] factor codebase upgrade code out of Main.hs --- codebase2/util/U/Util/String.hs | 12 +++ codebase2/util/U/Util/Text.hs | 2 +- codebase2/util/package.yaml | 12 +++ codebase2/util/unison-util.cabal | 82 +++++++++++---------- parser-typechecker/tests/Unison/Test/Ucm.hs | 64 ++++++++-------- 5 files changed, 99 insertions(+), 73 deletions(-) create mode 100644 codebase2/util/U/Util/String.hs create mode 100644 codebase2/util/package.yaml diff --git a/codebase2/util/U/Util/String.hs b/codebase2/util/U/Util/String.hs new file mode 100644 index 0000000000..83b225c3cc --- /dev/null +++ b/codebase2/util/U/Util/String.hs @@ -0,0 +1,12 @@ +module U.Util.String where + +import Data.Maybe (fromMaybe) +import Safe.Foldable (minimumMay) + +stripMargin :: String -> String +stripMargin str = + let stripLen = + fromMaybe 0 . minimumMay + . map (length . fst . span (== ' ')) + $ lines str + in unlines . map (drop stripLen) $ lines str \ No newline at end of file diff --git a/codebase2/util/U/Util/Text.hs b/codebase2/util/U/Util/Text.hs index 70f0a6d4b1..9de6621aba 100644 --- a/codebase2/util/U/Util/Text.hs +++ b/codebase2/util/U/Util/Text.hs @@ -20,4 +20,4 @@ stripMargin str = fromMaybe 0 . minimumMay . map (Text.length . fst . Text.span (== ' ')) $ Text.lines str - in Text.unlines . map (Text.drop stripLen) $ Text.lines str \ No newline at end of file + in Text.unlines . map (Text.drop stripLen) $ Text.lines str diff --git a/codebase2/util/package.yaml b/codebase2/util/package.yaml new file mode 100644 index 0000000000..ea0e738398 --- /dev/null +++ b/codebase2/util/package.yaml @@ -0,0 +1,12 @@ +name: unison-util +github: unisonweb/unison + +library: + source-dirs: . + +defaults: + local: ../../defaults.yaml + +dependencies: + - cryptonite + - sandi diff --git a/codebase2/util/unison-util.cabal b/codebase2/util/unison-util.cabal index c62832070e..bc0396ddd6 100644 --- a/codebase2/util/unison-util.cabal +++ b/codebase2/util/unison-util.cabal @@ -1,43 +1,49 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ +cabal-version: 1.12 -name: unison-util -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development +-- This file has been generated from package.yaml by hpack version 0.33.0. +-- +-- see: https://github.com/sol/hpack +-- +-- hash: 1ac1d0a054ed3772371a8196b4e0c33ae78a59ad5bcfc0961648f71c3d137ae6 + +name: unison-util +version: 0.0.0 +homepage: https://github.com/unisonweb/unison#readme +bug-reports: https://github.com/unisonweb/unison/issues +build-type: Simple + +source-repository head + type: git + location: https://github.com/unisonweb/unison library exposed-modules: - U.Util.Base32Hex - U.Util.Cache - U.Util.Components - U.Util.Hash - U.Util.Hashable - U.Util.Lens - U.Util.Map - U.Util.Monoid - U.Util.Relation - U.Util.Set - U.Util.Text - -- other-modules: - -- other-extensions: + U.Util.Base32Hex + U.Util.Cache + U.Util.Components + U.Util.Hash + U.Util.Hashable + U.Util.Lens + U.Util.Map + U.Util.Monoid + U.Util.Relation + U.Util.Set + U.Util.String + U.Util.Text + other-modules: + Paths_unison_util + hs-source-dirs: + ./. build-depends: - base, - bytestring, - containers, - cryptonite, - lens, - memory, - text, - unliftio, - safe, - sandi - hs-source-dirs: . - default-language: Haskell2010 + base + , bytestring + , containers + , cryptonite + , extra + , lens + , memory + , safe + , sandi + , text + , unliftio + default-language: Haskell2010 diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs index 1b40b8c0ab..18b8d6b537 100644 --- a/parser-typechecker/tests/Unison/Test/Ucm.hs +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -1,39 +1,53 @@ {-# LANGUAGE DerivingVia #-} + module Unison.Test.Ucm where -import Unison.Prelude import Control.Monad.Writer (WriterT) +import qualified Control.Monad.Writer as Writer +import qualified Data.Sequence as Seq import qualified Data.Text as Text -import qualified Unison.Codebase.TranscriptParser as TR import System.FilePath (()) -import Control.Error (minimumMay) -import Control.Lens (view, _1) -import qualified Unison.Util.Pretty as P +import U.Util.Text (stripMargin) +import qualified Unison.Codebase.FileCodebase as FC import qualified Unison.Codebase.Init as Codebase.Init import qualified Unison.Codebase.SqliteCodebase as SC -import qualified Unison.Codebase.FileCodebase as FC -import qualified Data.Sequence as Seq -import qualified Control.Monad.Writer as Writer +import qualified Unison.Codebase.TranscriptParser as TR +import Unison.Prelude +import qualified Unison.Util.Pretty as P data Runtime = Runtime1 | Runtime2 + data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 -newtype CodebaseName = CodebaseName { unCodebaseName :: String } + +newtype CodebaseName = CodebaseName {unCodebaseName :: String} + type Args = Map String String -newtype Transcript = Transcript { unTranscript :: Text } deriving IsString via Text + +newtype Transcript = Transcript {unTranscript :: Text} deriving (IsString) via Text + type TranscriptOutput = String + type Cleanup = Seq (IO ()) -runTranscript :: FilePath -> CodebaseName -> CodebaseFormat -> Runtime -> Args -> (Args -> Transcript) -> WriterT Cleanup IO TranscriptOutput +runTranscript :: + FilePath -> + CodebaseName -> + CodebaseFormat -> + Runtime -> + Args -> + (Args -> Transcript) -> + WriterT Cleanup IO TranscriptOutput runTranscript tmpDir codebaseName fmt rt args mkTranscript = do let configFile = tmpDir ".unisonConfig" codebasePath = tmpDir unCodebaseName codebaseName let err err = error $ "Parse error: \n" <> show err cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init - codebase <- lift (Codebase.Init.createCodebase cbInit codebasePath) >>= \case - Left e -> fail $ P.toANSI 80 e - Right (cleanup, c) -> do - Writer.tell . Seq.singleton $ cleanup - pure c + codebase <- + lift (Codebase.Init.createCodebase cbInit codebasePath) >>= \case + Left e -> fail $ P.toANSI 80 e + Right (cleanup, c) -> do + Writer.tell . Seq.singleton $ cleanup + pure c -- parse and run the transcript flip (either err) (TR.parse "transcript" (stripMargin . unTranscript $ mkTranscript args)) $ \stanzas -> liftIO . fmap Text.unpack $ @@ -43,21 +57,3 @@ runTranscript tmpDir codebaseName fmt rt args mkTranscript = do configFile stanzas codebase - - --- | remove however many spaces prefix all of the lines of the input --- e.g. --- stripMargin [here| --- def foo: --- blah blah --- |] == [here| --- def foo: --- blah blah --- |]T -stripMargin :: Text -> Text -stripMargin str = - let stripLen = - fromMaybe 0 . minimumMay - . map (Text.length . view _1 . Text.span (==' ')) - $ Text.lines str - in Text.unlines . map (Text.drop stripLen) $ Text.lines str \ No newline at end of file From f4db9f7998b801d891b4b801e4185c50d58cd8c1 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 6 Apr 2021 12:28:34 -0600 Subject: [PATCH 149/225] add format tag to watch expression result cache --- .../codebase-sqlite/U/Codebase/Sqlite/Operations.hs | 9 +++++---- .../U/Codebase/Sqlite/Serialization.hs | 12 ++++++++++++ .../codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs | 5 ++++- parser-typechecker/tests/Unison/Test/IO.hs | 1 - 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 9dee69727a..c5b8e3cbd1 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -703,19 +703,20 @@ c2sTerm tm tp = c2xTerm Q.saveText primaryHashToExistingObjectId tm (Just tp) <& listWatches :: EDB m => WatchKind -> m [C.Reference.Id] listWatches k = Q.loadWatchesByWatchKind k >>= traverse s2cReferenceId --- | returns Nothing is the expression isn't cached. +-- | returns Nothing if the expression isn't cached. loadWatch :: EDB m => WatchKind -> C.Reference.Id -> MaybeT m (C.Term Symbol) loadWatch k r = C.Reference.idH Q.saveHashHash r >>= MaybeT . Q.loadWatch k - >>= getFromBytesOr (ErrWatch k r) (S.getPair S.getWatchLocalIds S.getTerm) - >>= uncurry w2cTerm + >>= getFromBytesOr (ErrWatch k r) S.getWatchResultFormat + >>= \case + S.Term.WatchResult wlids t -> w2cTerm wlids t saveWatch :: EDB m => WatchKind -> C.Reference.Id -> C.Term Symbol -> m () saveWatch w r t = do rs <- C.Reference.idH Q.saveHashHash r wterm <- c2wTerm t - let bytes = S.putBytes (S.putPair S.putLocalIds S.putTerm) wterm + let bytes = S.putBytes S.putWatchResultFormat (uncurry S.Term.WatchResult wterm) Q.saveWatch w rs bytes c2wTerm :: EDB m => C.Term Symbol -> m (WatchLocalIds, S.Term.Term) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index 09d4b91a2b..d11c708b5f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -152,6 +152,18 @@ putUnit _ = pure () getUnit :: Applicative m => m () getUnit = pure () +putWatchResultFormat :: MonadPut m => TermFormat.WatchResultFormat -> m () +putWatchResultFormat = \case + TermFormat.WatchResult ids t -> do + putWord8 0 + putLocalIds ids + putTerm t + +getWatchResultFormat :: MonadGet m => m TermFormat.WatchResultFormat +getWatchResultFormat = getWord8 >>= \case + 0 -> TermFormat.WatchResult <$> getWatchLocalIds <*> getTerm + other -> unknownTag "getWatchResultFormat" other + putTermFormat :: MonadPut m => TermFormat.TermFormat -> m () putTermFormat = \case TermFormat.Term c -> putWord8 0 *> putTermComponent c diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs index b8ec18a40e..dccbb5f24d 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Term/Format.hs @@ -6,7 +6,7 @@ import Data.Vector (Vector) import U.Codebase.Reference (Reference') import U.Codebase.Referent (Referent') import U.Codebase.Sqlite.LocalIds - ( LocalIds', LocalTextId, LocalDefnId ) + ( LocalIds', LocalTextId, LocalDefnId, WatchLocalIds ) import U.Codebase.Sqlite.Symbol ( Symbol ) import qualified U.Codebase.Term as Term import qualified U.Core.ABT as ABT @@ -39,3 +39,6 @@ type TypeOfTerm = ABT.Term FTT Symbol () data TermFormat = Term LocallyIndexedComponent + +data WatchResultFormat + = WatchResult WatchLocalIds Term diff --git a/parser-typechecker/tests/Unison/Test/IO.hs b/parser-typechecker/tests/Unison/Test/IO.hs index 6c6bdc2f17..5072033576 100644 --- a/parser-typechecker/tests/Unison/Test/IO.hs +++ b/parser-typechecker/tests/Unison/Test/IO.hs @@ -17,7 +17,6 @@ import Unison.Codebase (Codebase, CodebasePath) import qualified Unison.Codebase.TranscriptParser as TR import Unison.Parser (Ann) import Unison.Symbol (Symbol) -import qualified Unison.Codebase as Codebase import qualified Unison.Codebase.Init as Codebase import qualified Unison.Codebase.FileCodebase as FileCodebase From b6afd46a0539fd128f30e3c264cd5e377bc523b5 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 6 Apr 2021 12:37:46 -0600 Subject: [PATCH 150/225] codebase upgrade function --- .../Unison/Codebase/Conversion/Upgrade12.hs | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs new file mode 100644 index 0000000000..63e2f84e4b --- /dev/null +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs @@ -0,0 +1,52 @@ +{-# LANGUAGE PartialTypeSignatures #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE TypeApplications #-} + +module Unison.Codebase.Conversion.Upgrade12 where + +import Control.Lens (Lens', (&), (.~), (^.)) +import qualified Control.Lens as Lens +import qualified Control.Monad.Reader as Reader +import Control.Monad.State (StateT (StateT, runStateT)) +import qualified Control.Monad.State as State +import Control.Monad.Trans (lift) +import qualified U.Codebase.Sync as Sync +import qualified Unison.Codebase as Codebase +import Unison.Codebase.Branch (Branch (Branch)) +import qualified Unison.Codebase.Causal as Causal +import qualified Unison.Codebase.Conversion.Sync12 as Sync12 +import qualified Unison.Codebase.FileCodebase as FC +import qualified Unison.Codebase.Init as Codebase +import qualified Unison.Codebase.SqliteCodebase as SC +import Control.Monad.Except (ExceptT(ExceptT), runExceptT) +import Unison.Codebase (CodebasePath) +import UnliftIO (MonadUnliftIO, liftIO) +import qualified Unison.PrettyTerminal as CT + +upgradeCodebase :: forall m. MonadUnliftIO m => CodebasePath -> m () +upgradeCodebase root = do + either (liftIO . CT.putPrettyLn) pure =<< runExceptT do + (cleanupSrc, srcCB) <- ExceptT $ Codebase.openCodebase FC.init root + (cleanupDest, destCB) <- ExceptT $ Codebase.createCodebase SC.init root + destDB <- SC.unsafeGetConnection root + let env = Sync12.Env srcCB destCB destDB + let initialState = (Sync12.emptyDoneCount, Sync12.emptyErrorCount, Sync12.emptyStatus) + rootEntity <- + lift (Codebase.getRootBranch srcCB) >>= \case + Left e -> error $ "Error loading source codebase root branch: " ++ show e + Right (Branch c) -> pure $ Sync12.C (Causal.currentHash c) (pure c) + flip Reader.runReaderT env . flip State.evalStateT initialState $ do + sync <- Sync12.sync12 (lift . lift . lift) + Sync.sync @_ @(Sync12.Entity _) + (Sync.transformSync (lensStateT Lens._3) sync) + Sync12.simpleProgress + [rootEntity] + lift cleanupSrc + lift cleanupDest + pure () + + where + lensStateT :: forall m s1 s2 a. Monad m => Lens' s2 s1 -> StateT s1 m a -> StateT s2 m a + lensStateT l m = StateT \s2 -> do + (a, s1') <- runStateT m (s2 ^. l) + pure (a, s2 & l .~ s1') \ No newline at end of file From e4f76472d6375b1a7c6fcfa2c9daea14f7d92c95 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 6 Apr 2021 12:50:18 -0600 Subject: [PATCH 151/225] making an absolute mess in git --- codebase2-tests/add-type.md | 21 + codebase2-tests/add-type.output.md | 67 ++ codebase2-tests/builtins-merge.md | 3 + codebase2-tests/builtins-merge.output.md | 6 + codebase2-tests/reference-check.md | 8 + codebase2-tests/reference-check.output.md | 29 + codebase2-tests/write-namespaces.md | 26 + codebase2-tests/write-namespaces.output.md | 64 ++ codebase2/util-serialization/package.yaml | 17 + .../unison-util-serialization.cabal | 56 +- defaults.yaml | 10 + parser-typechecker/package.yaml | 203 +++++ parser-typechecker/tests/Unison/Test/Ucm.hs | 110 +-- .../unison-parser-typechecker.cabal | 806 +++++++++--------- questions.md | 2 +- .../transcripts/GitSimple.one-term.output.md | 4 +- .../transcripts/GitSimple.one-term2.output.md | 4 +- .../transcripts/GitSimple.one-type.output.md | 4 +- .../transcripts/GitSimple.patching.output.md | 190 +++++ 19 files changed, 1117 insertions(+), 513 deletions(-) create mode 100644 codebase2-tests/add-type.md create mode 100644 codebase2-tests/add-type.output.md create mode 100644 codebase2-tests/builtins-merge.md create mode 100644 codebase2-tests/builtins-merge.output.md create mode 100644 codebase2-tests/reference-check.md create mode 100644 codebase2-tests/reference-check.output.md create mode 100644 codebase2-tests/write-namespaces.md create mode 100644 codebase2-tests/write-namespaces.output.md create mode 100644 codebase2/util-serialization/package.yaml create mode 100644 defaults.yaml create mode 100644 parser-typechecker/package.yaml create mode 100644 unison-src/transcripts/GitSimple.patching.output.md diff --git a/codebase2-tests/add-type.md b/codebase2-tests/add-type.md new file mode 100644 index 0000000000..3206c7aa5a --- /dev/null +++ b/codebase2-tests/add-type.md @@ -0,0 +1,21 @@ +```ucm +.> alias.type ##Int Int +``` + +```unison +type Optional a = None | Some a +type Boptional = Bconstructional (Optional ##Int) +``` + +```ucm +.mytypes> add +``` + +```unison +``` + +```ucm +.> names Optional +.> names Boptional +.> find +``` diff --git a/codebase2-tests/add-type.output.md b/codebase2-tests/add-type.output.md new file mode 100644 index 0000000000..d9c4846ff9 --- /dev/null +++ b/codebase2-tests/add-type.output.md @@ -0,0 +1,67 @@ +```ucm +.> alias.type ##Int Int + + Done. + +``` +```unison +type Optional a = None | Some a +type Boptional = Bconstructional (Optional ##Int) +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + type Boptional + type Optional a + +``` +```ucm + ☝️ The namespace .mytypes is empty. + +.mytypes> add + + ⍟ I've added these definitions: + + type Boptional + type Optional a + +``` +```unison +``` + +```ucm + + I loaded scratch.u and didn't find anything. + +``` +```ucm +.> names Optional + + Type + Hash: #5isltsdct9 + Names: mytypes.Optional + +.> names Boptional + + Type + Hash: #5q7ug1s3tb + Names: mytypes.Boptional + +.> find + + 1. builtin type Int + 2. type mytypes.Boptional + 3. mytypes.Boptional.Bconstructional : Optional Int + -> Boptional + 4. type mytypes.Optional a + 5. mytypes.Optional.None : Optional a + 6. mytypes.Optional.Some : a -> Optional a + + +``` diff --git a/codebase2-tests/builtins-merge.md b/codebase2-tests/builtins-merge.md new file mode 100644 index 0000000000..547211e82b --- /dev/null +++ b/codebase2-tests/builtins-merge.md @@ -0,0 +1,3 @@ +```ucm +.> builtins.merge +``` \ No newline at end of file diff --git a/codebase2-tests/builtins-merge.output.md b/codebase2-tests/builtins-merge.output.md new file mode 100644 index 0000000000..092ca74462 --- /dev/null +++ b/codebase2-tests/builtins-merge.output.md @@ -0,0 +1,6 @@ +```ucm +.> builtins.merge + + Done. + +``` diff --git a/codebase2-tests/reference-check.md b/codebase2-tests/reference-check.md new file mode 100644 index 0000000000..ed8fd5e344 --- /dev/null +++ b/codebase2-tests/reference-check.md @@ -0,0 +1,8 @@ +```unison +unique type Foo = Foo +``` + +```ucm +.> add +.> find +``` \ No newline at end of file diff --git a/codebase2-tests/reference-check.output.md b/codebase2-tests/reference-check.output.md new file mode 100644 index 0000000000..2973ef48ca --- /dev/null +++ b/codebase2-tests/reference-check.output.md @@ -0,0 +1,29 @@ +```unison +unique type Foo = Foo +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + unique type Foo + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + unique type Foo + +.> find + + 1. unique type Foo + 2. Foo.Foo : Foo + + +``` diff --git a/codebase2-tests/write-namespaces.md b/codebase2-tests/write-namespaces.md new file mode 100644 index 0000000000..1c8149df2b --- /dev/null +++ b/codebase2-tests/write-namespaces.md @@ -0,0 +1,26 @@ +```ucm +.> alias.term ##Nat.+ + +``` + +```unison:hide +type Foo = Foo | Bar +a = 3 +b = a + 1 +``` + +```ucm +.foo.bar> add +``` + +```unison:hide +a = 4 +``` + +```ucm +.foo.bar> update +.> find +``` + +```unison +> b +``` \ No newline at end of file diff --git a/codebase2-tests/write-namespaces.output.md b/codebase2-tests/write-namespaces.output.md new file mode 100644 index 0000000000..887b1fbdfd --- /dev/null +++ b/codebase2-tests/write-namespaces.output.md @@ -0,0 +1,64 @@ +```ucm +.> alias.term ##Nat.+ + + + Done. + +``` +```unison +type Foo = Foo | Bar +a = 3 +b = a + 1 +``` + +```ucm + ☝️ The namespace .foo.bar is empty. + +.foo.bar> add + + ⍟ I've added these definitions: + + type Foo + a : ##Nat + b : ##Nat + +``` +```unison +a = 4 +``` + +```ucm +.foo.bar> update + + ⍟ I've updated these names to your new definition: + + a : ##Nat + +.> find + + 1. + : ##Nat -> ##Nat -> ##Nat + 2. type foo.bar.Foo + 3. foo.bar.Foo.Bar : Foo + 4. foo.bar.Foo.Foo : Foo + 5. foo.bar.a : ##Nat + 6. foo.bar.b : ##Nat + + +``` +```unison +> b +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | > b + ⧩ + 5 + +``` diff --git a/codebase2/util-serialization/package.yaml b/codebase2/util-serialization/package.yaml new file mode 100644 index 0000000000..ca5306fee8 --- /dev/null +++ b/codebase2/util-serialization/package.yaml @@ -0,0 +1,17 @@ +name: unison-util-serialization + +library: + source-dirs: . + +dependencies: + - base + - bytes + - bytestring + - containers + - extra + - filepath + - text + - text-short + - unliftio + - vector + - unison-util diff --git a/codebase2/util-serialization/unison-util-serialization.cabal b/codebase2/util-serialization/unison-util-serialization.cabal index f19c84819b..9738bc04f6 100644 --- a/codebase2/util-serialization/unison-util-serialization.cabal +++ b/codebase2/util-serialization/unison-util-serialization.cabal @@ -1,34 +1,32 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ +cabal-version: 1.12 -name: unison-util-serialization -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development +-- This file has been generated from package.yaml by hpack version 0.33.0. +-- +-- see: https://github.com/sol/hpack +-- +-- hash: f313cfc57110dcc277ac24eb79210d1dc8071221e3452971ad26e8a3e1d8abc4 + +name: unison-util-serialization +version: 0.0.0 +build-type: Simple library exposed-modules: - U.Util.Serialization - -- other-modules: - -- other-extensions: + U.Util.Serialization + other-modules: + Paths_unison_util_serialization + hs-source-dirs: + ./. build-depends: - base, - bytes, - bytestring, - containers, - extra, - filepath, - text, - text-short, - unliftio, - vector, - unison-util - hs-source-dirs: . - default-language: Haskell2010 + base + , bytes + , bytestring + , containers + , extra + , filepath + , text + , text-short + , unison-util + , unliftio + , vector + default-language: Haskell2010 diff --git a/defaults.yaml b/defaults.yaml new file mode 100644 index 0000000000..b965e931fb --- /dev/null +++ b/defaults.yaml @@ -0,0 +1,10 @@ +dependencies: + - base + - bytestring + - containers + - extra + - lens + - memory + - text + - unliftio + - safe diff --git a/parser-typechecker/package.yaml b/parser-typechecker/package.yaml new file mode 100644 index 0000000000..c8d3b7a0a4 --- /dev/null +++ b/parser-typechecker/package.yaml @@ -0,0 +1,203 @@ +name: unison-parser-typechecker +github: unisonweb/unison + +executables: + unison: + source-dirs: unison + main: Main.hs + ghc-options: -threaded -rtsopts -with-rtsopts=-I0 -optP-Wno-nonportable-include-path + dependencies: + - base + - bytestring + - containers + - configurator + - directory + - errors + - filepath + - lens + - megaparsec + - mtl + - safe + - shellmet + - template-haskell + - temporary + - text + - unison-core1 + - unison-parser-typechecker + - unison-codebase-sync + - uri-encode + when: + - condition: '!os(windows)' + dependencies: unix + + tests: + source-dirs: tests + main: Suite.hs + ghc-options: -W -threaded -rtsopts "-with-rtsopts=-N -T" -v0 + dependencies: + - async + - base + - bytestring + - containers + - directory + - easytest + - errors + - extra + - filepath + - filemanip + - here + - lens + - megaparsec + - mtl + - raw-strings-qq + - stm + - shellmet + - split + - temporary + - text + - transformers + - unliftio + - unison-core1 + - unison-parser-typechecker + - unison-util + + + transcripts: + source-dirs: transcripts + main: Transcripts.hs + ghc-options: -threaded -rtsopts -with-rtsopts=-N -v0 + dependencies: + - base + - directory + - easytest + - filepath + - shellmet + - process + - text + - unison-core1 + - unison-parser-typechecker + +benchmarks: + runtime: + source-dirs: benchmarks/runtime + main: Main.hs + dependencies: + - base + - criterion + - containers + - unison-core1 + - unison-parser-typechecker + +library: + source-dirs: src + dependencies: + - aeson + - ansi-terminal + - async + - base + - base16 >= 0.2.1.0 + - base64-bytestring + - basement + - bifunctors + - bytes + - bytestring + - cereal + - containers >= 0.6.3 + - comonad + - concurrent-supply + - configurator + - cryptonite + - data-default + - directory + - either + - guid + - data-memocombinators + - edit-distance + - errors + - exceptions + - extra + - filepath + - filepattern + - fingertree + - free + - fsnotify + - generic-monoid + - hashable + - hashtables + - haskeline + - http-types + - io-streams + - lens + - ListLike + - megaparsec >= 5.0.0 && < 7.0.0 + - memory + - mmorph + - monad-loops + - monad-validate + - mtl + - murmur-hash + - mutable-containers + - natural-transformation + - network + - network-simple + - nonempty-containers + - openapi3 + - pem + - process + - primitive + - random >= 1.2.0 + - raw-strings-qq + - regex-base + - regex-tdfa + - safe + - servant + - servant-docs + - servant-openapi3 + - servant-server + - shellmet + - split + - stm + - strings + - sqlite-simple + - tagged + - temporary + - terminal-size + - text + - time + - tls + - transformers + - unison-core1 + - unliftio + - unliftio-core + - util + - unicode-show + - validation + - vector + - wai + - warp + - unicode-show + - x509 + - x509-store + - x509-system + - unison-core + - unison-codebase + - unison-codebase-sqlite + - unison-codebase-sync + - unison-util + - unison-util-serialization + +default-extensions: + - ApplicativeDo + - BlockArguments + - DeriveFunctor + - DerivingStrategies + - DoAndIfThenElse + - FlexibleContexts + - FlexibleInstances + - LambdaCase + - MultiParamTypeClasses + - ScopedTypeVariables + - TupleSections + - TypeApplications + +ghc-options: -Wall -funbox-strict-fields -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures \ No newline at end of file diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs index 18b8d6b537..ebba7b59d5 100644 --- a/parser-typechecker/tests/Unison/Test/Ucm.hs +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -2,58 +2,58 @@ module Unison.Test.Ucm where -import Control.Monad.Writer (WriterT) -import qualified Control.Monad.Writer as Writer -import qualified Data.Sequence as Seq -import qualified Data.Text as Text -import System.FilePath (()) -import U.Util.Text (stripMargin) -import qualified Unison.Codebase.FileCodebase as FC -import qualified Unison.Codebase.Init as Codebase.Init -import qualified Unison.Codebase.SqliteCodebase as SC -import qualified Unison.Codebase.TranscriptParser as TR -import Unison.Prelude -import qualified Unison.Util.Pretty as P - -data Runtime = Runtime1 | Runtime2 - -data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 - -newtype CodebaseName = CodebaseName {unCodebaseName :: String} - -type Args = Map String String - -newtype Transcript = Transcript {unTranscript :: Text} deriving (IsString) via Text - -type TranscriptOutput = String - -type Cleanup = Seq (IO ()) - -runTranscript :: - FilePath -> - CodebaseName -> - CodebaseFormat -> - Runtime -> - Args -> - (Args -> Transcript) -> - WriterT Cleanup IO TranscriptOutput -runTranscript tmpDir codebaseName fmt rt args mkTranscript = do - let configFile = tmpDir ".unisonConfig" - codebasePath = tmpDir unCodebaseName codebaseName - let err err = error $ "Parse error: \n" <> show err - cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init - codebase <- - lift (Codebase.Init.createCodebase cbInit codebasePath) >>= \case - Left e -> fail $ P.toANSI 80 e - Right (cleanup, c) -> do - Writer.tell . Seq.singleton $ cleanup - pure c - -- parse and run the transcript - flip (either err) (TR.parse "transcript" (stripMargin . unTranscript $ mkTranscript args)) $ \stanzas -> - liftIO . fmap Text.unpack $ - TR.run - (case rt of Runtime1 -> Just False; Runtime2 -> Just True) - codebasePath - configFile - stanzas - codebase +-- import Control.Monad.Writer (WriterT) +-- import qualified Control.Monad.Writer as Writer +-- import qualified Data.Sequence as Seq +-- import qualified Data.Text as Text +-- import System.FilePath (()) +-- import U.Util.Text (stripMargin) +-- import qualified Unison.Codebase.FileCodebase as FC +-- import qualified Unison.Codebase.Init as Codebase.Init +-- import qualified Unison.Codebase.SqliteCodebase as SC +-- import qualified Unison.Codebase.TranscriptParser as TR +-- import Unison.Prelude +-- import qualified Unison.Util.Pretty as P + +-- data Runtime = Runtime1 | Runtime2 + +-- data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 + +-- newtype CodebaseName = CodebaseName {unCodebaseName :: String} + +-- -- type Args = Map String String + +-- newtype Transcript = Transcript {unTranscript :: Text} deriving (IsString) via Text + +-- type TranscriptOutput = String + +-- type Cleanup = Seq (IO ()) + +-- runTranscript :: +-- FilePath -> +-- CodebaseName -> +-- CodebaseFormat -> +-- Runtime -> +-- Args -> +-- (Args -> Transcript) -> +-- WriterT Cleanup IO TranscriptOutput +-- runTranscript tmpDir codebaseName fmt rt args mkTranscript = do +-- let configFile = tmpDir ".unisonConfig" +-- codebasePath = tmpDir unCodebaseName codebaseName +-- let err err = error $ "Parse error: \n" <> show err +-- cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init +-- codebase <- +-- lift (Codebase.Init.createCodebase cbInit codebasePath) >>= \case +-- Left e -> fail $ P.toANSI 80 e +-- Right (cleanup, c) -> do +-- Writer.tell . Seq.singleton $ cleanup +-- pure c +-- -- parse and run the transcript +-- flip (either err) (TR.parse "transcript" (stripMargin . unTranscript $ mkTranscript args)) $ \stanzas -> +-- liftIO . fmap Text.unpack $ +-- TR.run +-- (case rt of Runtime1 -> Just False; Runtime2 -> Just True) +-- codebasePath +-- configFile +-- stanzas +-- codebase diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index dad4418d6a..e3e71d9f77 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -1,444 +1,406 @@ -cabal-version: 2.2 -name: unison-parser-typechecker -category: Compiler -version: 0.1 -license: MIT -license-file: LICENSE -author: Unison Computing, public benefit corp -maintainer: Paul Chiusano , Runar Bjarnason , Arya Irani -stability: provisional -homepage: http://unisonweb.org -bug-reports: https://github.com/unisonweb/unison/issues -copyright: Copyright (C) 2013-2018 Unison Computing, PBC and contributors -synopsis: Parser and typechecker for the Unison language -description: +cabal-version: 1.12 -build-type: Simple -extra-source-files: -data-files: +-- This file has been generated from package.yaml by hpack version 0.33.0. +-- +-- see: https://github.com/sol/hpack +-- +-- hash: 0ae2571ad379b4554048a2fd52c78425b207f9626279fea937c4628afd6fafb0 + +name: unison-parser-typechecker +version: 0.0.0 +homepage: https://github.com/unisonweb/unison#readme +bug-reports: https://github.com/unisonweb/unison/issues +license: MIT +license-file: LICENSE +build-type: Simple source-repository head type: git - location: git://github.com/unisonweb/unison.git - --- `cabal install -foptimized` enables optimizations -flag optimized - manual: True - default: False - -flag quiet - manual: True - default: False - --- NOTE: Keep in sync throughout repo. -common unison-common - default-language: Haskell2010 - default-extensions: - ApplicativeDo, - BlockArguments, - DeriveFunctor, - DerivingStrategies, - DoAndIfThenElse, - FlexibleContexts, - FlexibleInstances, - LambdaCase, - MultiParamTypeClasses, - ScopedTypeVariables, - TupleSections, - TypeApplications + location: https://github.com/unisonweb/unison library - import: unison-common - - hs-source-dirs: src - exposed-modules: - Unison.Builtin - Unison.Builtin.Decls - Unison.Builtin.Terms - Unison.Codecs - Unison.Codebase - Unison.Codebase.Branch - Unison.Codebase.BranchDiff - Unison.Codebase.BranchUtil - Unison.Codebase.Causal - Unison.Codebase.Classes - Unison.Codebase.CodeLookup - Unison.Codebase.Conversion.Sync12 - Unison.Codebase.Conversion.Sync12BranchDependencies - Unison.Codebase.Editor.AuthorInfo - Unison.Codebase.Editor.Command - Unison.Codebase.Editor.DisplayObject - Unison.Codebase.Editor.Git - Unison.Codebase.Editor.HandleInput - Unison.Codebase.Editor.HandleCommand - Unison.Codebase.Editor.Input - Unison.Codebase.Editor.Output - Unison.Codebase.Editor.Output.BranchDiff - Unison.Codebase.Editor.Propagate - Unison.Codebase.Editor.RemoteRepo - Unison.Codebase.Editor.SlurpResult - Unison.Codebase.Editor.SlurpComponent - Unison.Codebase.Editor.TodoOutput - Unison.Codebase.Editor.UriParser - Unison.Codebase.Editor.VersionParser - Unison.Codebase.FileCodebase - Unison.Codebase.FileCodebase.Branch.Dependencies - Unison.Codebase.FileCodebase.Common - Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex - Unison.Codebase.GitError - Unison.Codebase.Init - Unison.Codebase.Metadata - Unison.Codebase.NameEdit - Unison.Codebase.Path - Unison.Codebase.Patch - Unison.Codebase.Reflog - Unison.Codebase.Runtime - Unison.Codebase.Serialization - Unison.Codebase.Serialization.PutT - Unison.Codebase.Serialization.V1 - Unison.Codebase.ShortBranchHash - Unison.Codebase.SqliteCodebase - Unison.Codebase.SqliteCodebase.Branch.Dependencies - Unison.Codebase.SqliteCodebase.Conversions - Unison.Codebase.SqliteCodebase.SyncEphemeral - Unison.Codebase.SyncMode - Unison.Codebase.TermEdit - Unison.Codebase.TranscriptParser - Unison.Codebase.TypeEdit - Unison.Codebase.Watch - Unison.Codebase.Execute - Unison.Codebase.MainTerm - Unison.CommandLine - Unison.CommandLine.DisplayValues - Unison.CommandLine.InputPattern - Unison.CommandLine.InputPatterns - Unison.CommandLine.Main - Unison.CommandLine.OutputMessages - Unison.DeclPrinter - Unison.FileParser - Unison.FileParsers - Unison.Lexer - Unison.NamePrinter - Unison.Parser - Unison.Parsers - Unison.Path - Unison.PrettyPrintEnv - Unison.PrettyTerminal - Unison.PrintError - Unison.Result - Unison.Runtime.ANF - Unison.Runtime.ANF.Serialize - Unison.Runtime.Builtin - Unison.Runtime.Debug - Unison.Runtime.Decompile - Unison.Runtime.Exception - Unison.Runtime.Foreign - Unison.Runtime.Foreign.Function - Unison.Runtime.Interface - Unison.Runtime.IR - Unison.Runtime.MCode - Unison.Runtime.Machine - Unison.Runtime.Pattern - Unison.Runtime.Rt1 - Unison.Runtime.Rt1IO - Unison.Runtime.IOSource - Unison.Runtime.Vector - Unison.Runtime.SparseVector - Unison.Runtime.Stack - Unison.Server.Backend - Unison.Server.CodebaseServer - Unison.Server.Endpoints.GetDefinitions - Unison.Server.Endpoints.ListNamespace - Unison.Server.Errors - Unison.Server.QueryResult - Unison.Server.SearchResult - Unison.Server.SearchResult' - Unison.Server.Syntax - Unison.Server.Types - Unison.TermParser - Unison.TermPrinter - Unison.TypeParser - Unison.TypePrinter - Unison.Typechecker - Unison.Typechecker.Components - Unison.Typechecker.Context - Unison.Typechecker.Extractor - Unison.Typechecker.TypeError - Unison.Typechecker.TypeLookup - Unison.Typechecker.TypeVar - Unison.UnisonFile - Unison.Util.AnnotatedText - Unison.Util.Bytes - Unison.Util.ColorText - Unison.Util.EnumContainers - Unison.Util.Exception - Unison.Util.Free - Unison.Util.Find - Unison.Util.Less - Unison.Util.Logger - Unison.Util.Map - Unison.Util.Menu - Unison.Util.PinBoard - Unison.Util.Pretty - Unison.Util.Range - Unison.Util.Star3 - Unison.Util.SyntaxText - Unison.Util.Timing - Unison.Util.TQueue - Unison.Util.TransitiveClosure - Unison.Util.CycleTable - Unison.Util.CyclicEq - Unison.Util.CyclicOrd - - build-depends: - aeson, - ansi-terminal, - async, - base, - base16 >= 0.2.1.0, - base64-bytestring, - basement, - bifunctors, - bytes, - bytestring, - cereal, - containers >= 0.6.3, - comonad, - concurrent-supply, - configurator, - cryptonite, - data-default, - directory, - either, - guid, - data-memocombinators, - edit-distance, - errors, - exceptions, - extra, - filepath, - filepattern, - fingertree, - free, - fsnotify, - generic-monoid, - hashable, - hashtables, - haskeline, - http-types, - io-streams, - lens, - ListLike, - megaparsec >= 5.0.0 && < 7.0.0, - memory, - mmorph, - monad-loops, - monad-validate, - mtl, - murmur-hash, - mutable-containers, - natural-transformation, - network, - network-simple, - nonempty-containers, - openapi3, - pem, - process, - primitive, - random >= 1.2.0, - raw-strings-qq, - regex-base, - regex-tdfa, - safe, - servant, - servant-docs, - servant-openapi3, - servant-server, - shellmet, - split, - stm, - strings, - sqlite-simple, - tagged, - temporary, - terminal-size, - text, - time, - tls, - transformers, - unison-core1, - unliftio, - unliftio-core, - util, - unicode-show, - validation, - vector, - wai, - warp, - unicode-show, - x509, - x509-store, - x509-system, - -- v2 - unison-core, - unison-codebase, - unison-codebase-sqlite, - unison-codebase-sync, - unison-util, - unison-util-serialization - - ghc-options: -Wall -O0 -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures - - if flag(optimized) - ghc-options: -funbox-strict-fields -O2 - - if flag(quiet) - ghc-options: -v0 - -executable unison - import: unison-common - main-is: Main.hs - hs-source-dirs: unison - ghc-options: -Wall -threaded -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -rtsopts -with-rtsopts=-I0 -optP-Wno-nonportable-include-path + Unison.Builtin + Unison.Builtin.Decls + Unison.Builtin.Terms + Unison.Codebase + Unison.Codebase.Branch + Unison.Codebase.BranchDiff + Unison.Codebase.BranchUtil + Unison.Codebase.Causal + Unison.Codebase.Classes + Unison.Codebase.CodeLookup + Unison.Codebase.Conversion.Sync12 + Unison.Codebase.Conversion.Sync12BranchDependencies + Unison.Codebase.Conversion.Upgrade12 + Unison.Codebase.Editor.AuthorInfo + Unison.Codebase.Editor.Command + Unison.Codebase.Editor.DisplayObject + Unison.Codebase.Editor.Git + Unison.Codebase.Editor.HandleCommand + Unison.Codebase.Editor.HandleInput + Unison.Codebase.Editor.Input + Unison.Codebase.Editor.Output + Unison.Codebase.Editor.Output.BranchDiff + Unison.Codebase.Editor.Propagate + Unison.Codebase.Editor.RemoteRepo + Unison.Codebase.Editor.SlurpComponent + Unison.Codebase.Editor.SlurpResult + Unison.Codebase.Editor.TodoOutput + Unison.Codebase.Editor.UriParser + Unison.Codebase.Editor.VersionParser + Unison.Codebase.Execute + Unison.Codebase.FileCodebase + Unison.Codebase.FileCodebase.Branch.Dependencies + Unison.Codebase.FileCodebase.Common + Unison.Codebase.FileCodebase.SlimCopyRegenerateIndex + Unison.Codebase.GitError + Unison.Codebase.Init + Unison.Codebase.MainTerm + Unison.Codebase.Metadata + Unison.Codebase.NameEdit + Unison.Codebase.Patch + Unison.Codebase.Path + Unison.Codebase.Reflog + Unison.Codebase.Runtime + Unison.Codebase.Serialization + Unison.Codebase.Serialization.PutT + Unison.Codebase.Serialization.V1 + Unison.Codebase.ShortBranchHash + Unison.Codebase.SqliteCodebase + Unison.Codebase.SqliteCodebase.Branch.Dependencies + Unison.Codebase.SqliteCodebase.Conversions + Unison.Codebase.SqliteCodebase.SyncEphemeral + Unison.Codebase.SyncMode + Unison.Codebase.TermEdit + Unison.Codebase.TranscriptParser + Unison.Codebase.TypeEdit + Unison.Codebase.Watch + Unison.Codecs + Unison.CommandLine + Unison.CommandLine.DisplayValues + Unison.CommandLine.InputPattern + Unison.CommandLine.InputPatterns + Unison.CommandLine.Main + Unison.CommandLine.OutputMessages + Unison.DeclPrinter + Unison.FileParser + Unison.FileParsers + Unison.Lexer + Unison.NamePrinter + Unison.Parser + Unison.Parsers + Unison.Path + Unison.PrettyPrintEnv + Unison.PrettyTerminal + Unison.PrintError + Unison.Result + Unison.Runtime.ANF + Unison.Runtime.ANF.Serialize + Unison.Runtime.Builtin + Unison.Runtime.Debug + Unison.Runtime.Decompile + Unison.Runtime.Exception + Unison.Runtime.Foreign + Unison.Runtime.Foreign.Function + Unison.Runtime.Interface + Unison.Runtime.IOSource + Unison.Runtime.IR + Unison.Runtime.Machine + Unison.Runtime.MCode + Unison.Runtime.Pattern + Unison.Runtime.Rt1 + Unison.Runtime.Rt1IO + Unison.Runtime.SparseVector + Unison.Runtime.Stack + Unison.Runtime.Vector + Unison.Server.Backend + Unison.Server.CodebaseServer + Unison.Server.Endpoints.GetDefinitions + Unison.Server.Endpoints.ListNamespace + Unison.Server.Errors + Unison.Server.QueryResult + Unison.Server.SearchResult + Unison.Server.SearchResult' + Unison.Server.Syntax + Unison.Server.Types + Unison.TermParser + Unison.TermPrinter + Unison.Typechecker + Unison.Typechecker.Components + Unison.Typechecker.Context + Unison.Typechecker.Extractor + Unison.Typechecker.TypeError + Unison.Typechecker.TypeLookup + Unison.Typechecker.TypeVar + Unison.TypeParser + Unison.TypePrinter + Unison.UnisonFile + Unison.Util.AnnotatedText + Unison.Util.Bytes + Unison.Util.ColorText + Unison.Util.CycleTable + Unison.Util.CyclicEq + Unison.Util.CyclicOrd + Unison.Util.EnumContainers + Unison.Util.Exception + Unison.Util.Find + Unison.Util.Free + Unison.Util.Less + Unison.Util.Logger + Unison.Util.Map + Unison.Util.Menu + Unison.Util.PinBoard + Unison.Util.Pretty + Unison.Util.Range + Unison.Util.Star3 + Unison.Util.SyntaxText + Unison.Util.Timing + Unison.Util.TQueue + Unison.Util.TransitiveClosure other-modules: - System.Path - Version + Paths_unison_parser_typechecker + hs-source-dirs: + src + default-extensions: ApplicativeDo BlockArguments DeriveFunctor DerivingStrategies DoAndIfThenElse FlexibleContexts FlexibleInstances LambdaCase MultiParamTypeClasses ScopedTypeVariables TupleSections TypeApplications + ghc-options: -Wall -funbox-strict-fields -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures build-depends: - base, - bytestring, - containers, - configurator, - directory, - errors, - filepath, - lens, - megaparsec, - mtl, - safe, - shellmet, - template-haskell, - temporary, - text, - unison-core1, - unison-parser-typechecker, - unison-codebase-sync, - uri-encode - if !os(windows) - build-depends: - unix - -executable prettyprintdemo - import: unison-common - main-is: Main.hs - hs-source-dirs: prettyprintdemo - ghc-options: -Wall -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures - build-depends: - base, - safe, - text, - unison-parser-typechecker + ListLike + , aeson + , ansi-terminal + , async + , base + , base16 >=0.2.1.0 + , base64-bytestring + , basement + , bifunctors + , bytes + , bytestring + , cereal + , comonad + , concurrent-supply + , configurator + , containers >=0.6.3 + , cryptonite + , data-default + , data-memocombinators + , directory + , edit-distance + , either + , errors + , exceptions + , extra + , filepath + , filepattern + , fingertree + , free + , fsnotify + , generic-monoid + , guid + , hashable + , hashtables + , haskeline + , http-types + , io-streams + , lens + , megaparsec >=5.0.0 && <7.0.0 + , memory + , mmorph + , monad-loops + , monad-validate + , mtl + , murmur-hash + , mutable-containers + , natural-transformation + , network + , network-simple + , nonempty-containers + , openapi3 + , pem + , primitive + , process + , random >=1.2.0 + , raw-strings-qq + , regex-base + , regex-tdfa + , safe + , servant + , servant-docs + , servant-openapi3 + , servant-server + , shellmet + , split + , sqlite-simple + , stm + , strings + , tagged + , temporary + , terminal-size + , text + , time + , tls + , transformers + , unicode-show + , unison-codebase + , unison-codebase-sqlite + , unison-codebase-sync + , unison-core + , unison-core1 + , unison-util + , unison-util-serialization + , unliftio + , unliftio-core + , util + , validation + , vector + , wai + , warp + , x509 + , x509-store + , x509-system + default-language: Haskell2010 executable tests - import: unison-common - main-is: Suite.hs - hs-source-dirs: tests - ghc-options: -W -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -threaded -rtsopts "-with-rtsopts=-N -T" -v0 - build-depends: - base, - easytest + main-is: Suite.hs other-modules: - Unison.Test.ABT - Unison.Test.ANF - Unison.Test.Cache - Unison.Test.Codebase - Unison.Test.Codebase.Causal - Unison.Test.Codebase.FileCodebase - Unison.Test.Codebase.Path - Unison.Test.Codebase.Sync12 - Unison.Test.ColorText - Unison.Test.Common - Unison.Test.DataDeclaration - Unison.Test.FileParser - Unison.Test.Git - Unison.Test.GitSimple - Unison.Test.Lexer - Unison.Test.IO - Unison.Test.MCode - Unison.Test.Range - Unison.Test.Referent - Unison.Test.Term - Unison.Test.TermParser - Unison.Test.TermPrinter - Unison.Test.Type - Unison.Test.TypePrinter - Unison.Test.Typechecker - Unison.Test.Typechecker.Components - Unison.Test.Typechecker.Context - Unison.Test.Typechecker.TypeError - Unison.Test.Ucm - Unison.Test.UnisonSources - Unison.Test.UriParser - Unison.Test.Util.Bytes - Unison.Test.Util.PinBoard - Unison.Test.Util.Pretty - Unison.Test.Var - Unison.Test.VersionParser - Unison.Core.Test.Name - + Unison.Core.Test.Name + Unison.Test.ABT + Unison.Test.ANF + Unison.Test.Cache + Unison.Test.Codebase + Unison.Test.Codebase.Causal + Unison.Test.Codebase.FileCodebase + Unison.Test.Codebase.Path + Unison.Test.Codebase.Sync12 + Unison.Test.ColorText + Unison.Test.Common + Unison.Test.DataDeclaration + Unison.Test.FileParser + Unison.Test.Git + Unison.Test.GitSimple + Unison.Test.IO + Unison.Test.Lexer + Unison.Test.MCode + Unison.Test.Range + Unison.Test.Referent + Unison.Test.Term + Unison.Test.TermParser + Unison.Test.TermPrinter + Unison.Test.Type + Unison.Test.Typechecker + Unison.Test.Typechecker.Components + Unison.Test.Typechecker.Context + Unison.Test.Typechecker.TypeError + Unison.Test.TypePrinter + Unison.Test.Ucm + Unison.Test.UnisonSources + Unison.Test.UriParser + Unison.Test.Util.Bytes + Unison.Test.Util.PinBoard + Unison.Test.Util.Pretty + Unison.Test.Var + Unison.Test.VersionParser + Paths_unison_parser_typechecker + hs-source-dirs: + tests + default-extensions: ApplicativeDo BlockArguments DeriveFunctor DerivingStrategies DoAndIfThenElse FlexibleContexts FlexibleInstances LambdaCase MultiParamTypeClasses ScopedTypeVariables TupleSections TypeApplications + ghc-options: -Wall -funbox-strict-fields -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -W -threaded -rtsopts "-with-rtsopts=-N -T" -v0 build-depends: - async, - base, - bytestring, - containers, - directory, - easytest, - errors, - extra, - filepath, - filemanip, - here, - lens, - megaparsec, - mtl, - raw-strings-qq, - stm, - shellmet, - split, - temporary, - text, - transformers, - unliftio, - unison-core1, - unison-parser-typechecker, - unison-util + async + , base + , bytestring + , containers + , directory + , easytest + , errors + , extra + , filemanip + , filepath + , here + , lens + , megaparsec + , mtl + , raw-strings-qq + , shellmet + , split + , stm + , temporary + , text + , transformers + , unison-core1 + , unison-parser-typechecker + , unison-util + , unliftio + default-language: Haskell2010 executable transcripts - import: unison-common - main-is: Transcripts.hs - ghc-options: -W -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -threaded -rtsopts -with-rtsopts=-N -v0 - hs-source-dirs: transcripts + main-is: Transcripts.hs other-modules: + Paths_unison_parser_typechecker + hs-source-dirs: + transcripts + default-extensions: ApplicativeDo BlockArguments DeriveFunctor DerivingStrategies DoAndIfThenElse FlexibleContexts FlexibleInstances LambdaCase MultiParamTypeClasses ScopedTypeVariables TupleSections TypeApplications + ghc-options: -Wall -funbox-strict-fields -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -threaded -rtsopts -with-rtsopts=-N -v0 build-depends: - base, - directory, - easytest, - filepath, - shellmet, - process, - text, - unison-core1, - unison-parser-typechecker + base + , directory + , easytest + , filepath + , process + , shellmet + , text + , unison-core1 + , unison-parser-typechecker + default-language: Haskell2010 + +executable unison + main-is: Main.hs + other-modules: + System.Path + Version + Paths_unison_parser_typechecker + hs-source-dirs: + unison + default-extensions: ApplicativeDo BlockArguments DeriveFunctor DerivingStrategies DoAndIfThenElse FlexibleContexts FlexibleInstances LambdaCase MultiParamTypeClasses ScopedTypeVariables TupleSections TypeApplications + ghc-options: -Wall -funbox-strict-fields -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -threaded -rtsopts -with-rtsopts=-I0 -optP-Wno-nonportable-include-path + build-depends: + base + , bytestring + , configurator + , containers + , directory + , errors + , filepath + , lens + , megaparsec + , mtl + , safe + , shellmet + , template-haskell + , temporary + , text + , unison-codebase-sync + , unison-core1 + , unison-parser-typechecker + , uri-encode + if !os(windows) + build-depends: + unix + default-language: Haskell2010 benchmark runtime - import: unison-common type: exitcode-stdio-1.0 main-is: Main.hs - ghc-options: -O2 - hs-source-dirs: benchmarks/runtime + other-modules: + Paths_unison_parser_typechecker + hs-source-dirs: + benchmarks/runtime + default-extensions: ApplicativeDo BlockArguments DeriveFunctor DerivingStrategies DoAndIfThenElse FlexibleContexts FlexibleInstances LambdaCase MultiParamTypeClasses ScopedTypeVariables TupleSections TypeApplications + ghc-options: -Wall -funbox-strict-fields -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures build-depends: - base, - criterion, - containers, - unison-core1, - unison-parser-typechecker + base + , containers + , criterion + , unison-core1 + , unison-parser-typechecker + default-language: Haskell2010 diff --git a/questions.md b/questions.md index 671e3d1dad..21f18d54f2 100644 --- a/questions.md +++ b/questions.md @@ -1,6 +1,6 @@ next steps: -- [ ] add format tag to watch cache expressions? +- [x] add format tag to watch cache expressions? - [x] fix up `Operations.loadBranchByCausalHash`; currently it's getting a single namespace, but we need to somewhere get the causal history. - [x] load a causal, allowing a missing value (C.Branch.Spine) - [x] load a causal and require its value (C.Branch.Causal) diff --git a/unison-src/transcripts/GitSimple.one-term.output.md b/unison-src/transcripts/GitSimple.one-term.output.md index a7606f1fcc..ab1bf491d1 100644 --- a/unison-src/transcripts/GitSimple.one-term.output.md +++ b/unison-src/transcripts/GitSimple.one-term.output.md @@ -24,7 +24,7 @@ c = 3 c : ##Nat -.> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-6e0da114c2313a44/repo.git +.> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-d431dff9390554a1/repo.git Done. @@ -32,7 +32,7 @@ c = 3 ------- ```ucm -.> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-6e0da114c2313a44/repo.git +.> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-d431dff9390554a1/repo.git Here's what's changed in the current namespace after the merge: diff --git a/unison-src/transcripts/GitSimple.one-term2.output.md b/unison-src/transcripts/GitSimple.one-term2.output.md index d608d66e77..9391f4d9c2 100644 --- a/unison-src/transcripts/GitSimple.one-term2.output.md +++ b/unison-src/transcripts/GitSimple.one-term2.output.md @@ -26,7 +26,7 @@ c = 3 c : ##Nat -.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term2-6e1967b10dc504d9/repo.git +.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term2-0b775fa442535bdb/repo.git Done. @@ -36,7 +36,7 @@ c = 3 ```ucm ☝️ The namespace .yourLib is empty. -.yourLib> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term2-6e1967b10dc504d9/repo.git +.yourLib> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term2-0b775fa442535bdb/repo.git Here's what's changed in the current namespace after the merge: diff --git a/unison-src/transcripts/GitSimple.one-type.output.md b/unison-src/transcripts/GitSimple.one-type.output.md index c13084a8d0..432913db0d 100644 --- a/unison-src/transcripts/GitSimple.one-type.output.md +++ b/unison-src/transcripts/GitSimple.one-type.output.md @@ -26,7 +26,7 @@ type Foo = Foo type Foo -.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-type-cc52d9c301e40738/repo.git +.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-type-202b2f75069902c8/repo.git Done. @@ -36,7 +36,7 @@ type Foo = Foo ```ucm ☝️ The namespace .yourLib is empty. -.yourLib> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-type-cc52d9c301e40738/repo.git +.yourLib> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-type-202b2f75069902c8/repo.git Here's what's changed in the current namespace after the merge: diff --git a/unison-src/transcripts/GitSimple.patching.output.md b/unison-src/transcripts/GitSimple.patching.output.md new file mode 100644 index 0000000000..11a4fe3d1c --- /dev/null +++ b/unison-src/transcripts/GitSimple.patching.output.md @@ -0,0 +1,190 @@ +```ucm + ☝️ The namespace .myLib is empty. + +.myLib> alias.term ##Nat.+ + + + Done. + +``` +```unison +improveNat x = x + 3 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + improveNat : ##Nat -> ##Nat + +``` +```ucm +.myLib> add + + ⍟ I've added these definitions: + + improveNat : ##Nat -> ##Nat + +.myLib> ls + + 1. + (##Nat -> ##Nat -> ##Nat) + 2. improveNat (##Nat -> ##Nat) + +.myLib> move.namespace .myLib .workaround1552.myLib.v1 + + Done. + +.workaround1552.myLib> ls + + 1. v1/ (2 definitions) + +.workaround1552.myLib> fork v1 v2 + + Done. + +``` +```unison +improveNat x = x + 100 +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These names already exist. You can `update` them to your + new definition: + + improveNat : ##Nat -> ##Nat + +``` +```ucm +.workaround1552.myLib.v2> update + + ⍟ I've updated these names to your new definition: + + improveNat : ##Nat -> ##Nat + +.workaround1552.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-patching-7fa752c409f898ef/repo.git + + Done. + +``` + +------- +```ucm + ☝️ The namespace .myApp is empty. + +.myApp> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-patching-7fa752c409f898ef/repo.git:.v1 external.yourLib + + Here's what's changed in external.yourLib after the merge: + + Added definitions: + + 1. + : ##Nat -> ##Nat -> ##Nat + 2. improveNat : ##Nat -> ##Nat + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +.myApp> alias.term ##Nat.* * + + Done. + +``` +` +```unison +> greatApp = improveNat 5 * improveNat 6 +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | > greatApp = improveNat 5 * improveNat 6 + ⧩ + 72 + +``` +```ucm +.myApp> add + + + +.myApp> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-patching-7fa752c409f898ef/repo.git:.v2 external.yourLib + + Here's what's changed in external.yourLib after the merge: + + Updates: + + 1. improveNat : ##Nat -> ##Nat + ↓ + 2. improveNat : ##Nat -> ##Nat + + Added definitions: + + 3. patch patch (added 1 updates) + + Tip: You can use `todo` to see if this generated any work to + do in this namespace and `test` to run the tests. Or you + can use `undo` or `reflog` to undo the results of this + merge. + +``` +```unison +> greatApp = improveNat 5 * improveNat 6 +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | > greatApp = improveNat 5 * improveNat 6 + ⧩ + 11130 + +``` +```ucm +.myApp> patch external.yourLib.patch + + 😶 + + This had no effect. Perhaps the patch has already been applied + or it doesn't intersect with the definitions in + the current namespace. + +``` +```unison +> greatApp = improveNat 5 * improveNat 6 +``` + +```ucm + + ✅ + + scratch.u changed. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | > greatApp = improveNat 5 * improveNat 6 + ⧩ + 11130 + +``` From e1c3302e6a352cd28b31694d69bdc18d7873d9f1 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 6 Apr 2021 12:52:30 -0600 Subject: [PATCH 152/225] making a mess in git part 2 --- parser-typechecker/src/Unison/Codebase.hs | 2 +- .../src/Unison/Codebase/Conversion/Sync12.hs | 4 + .../src/Unison/Codebase/FileCodebase.hs | 6 +- .../src/Unison/Codebase/SqliteCodebase.hs | 2 +- .../src/Unison/Codebase/Watch.hs | 37 +- .../tests/Unison/Test/Codebase/Sync12.hs | 86 ++++- parser-typechecker/tests/Unison/Test/Git.hs | 6 +- parser-typechecker/tests/Unison/Test/Ucm.hs | 83 ++-- parser-typechecker/unison/Main.hs | 30 +- stack.yaml | 2 +- unison-src/Base.u | 360 +++++++++--------- .../transcripts/GitSimple.one-term.output.md | 4 +- .../transcripts/GitSimple.one-term2.output.md | 4 +- .../transcripts/GitSimple.one-type.output.md | 4 +- 14 files changed, 354 insertions(+), 276 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs index de6a6a7f15..f2dec20577 100644 --- a/parser-typechecker/src/Unison/Codebase.hs +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -70,7 +70,7 @@ data Codebase m v a = , getRootBranch :: m (Either GetRootBranchError (Branch m)) , putRootBranch :: Branch m -> m () - , rootBranchUpdates :: m (m (), m (Set Branch.Hash)) + , rootBranchUpdates :: m (IO (), IO (Set Branch.Hash)) , getBranchForHash :: Branch.Hash -> m (Maybe (Branch m)) , putBranch :: Branch m -> m () , branchExists :: Branch.Hash -> m Bool diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 02ae46e538..7da9319a4e 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -484,8 +484,11 @@ simpleProgress :: MonadState (ProgressState m) n => MonadIO n => Sync.Progress n simpleProgress = Sync.Progress need done error allDone where -- ignore need + need e = liftIO $ putStrLn $ "need " ++ show e need _ = pure () done e = do + liftIO $ putStrLn $ "done " ++ show e + case e of C {} -> _1 . doneBranches += 1 T {} -> _1 . doneTerms += 1 @@ -494,6 +497,7 @@ simpleProgress = Sync.Progress need done error allDone printProgress error e = do + liftIO $ putStrLn $ "error " ++ show e case e of C {} -> _2 . errorBranches += 1 T {} -> _2 . errorTerms += 1 diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs index 9c33c6ca4b..fe8f481516 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs @@ -86,7 +86,7 @@ import qualified Unison.Util.TQueue as TQueue import Unison.Util.Timing (time) import Unison.Var (Var) import UnliftIO (MonadUnliftIO) -import UnliftIO.Concurrent (forkIO, killThread) +import Control.Concurrent (forkIO, killThread) import UnliftIO.Directory (createDirectoryIfMissing, doesDirectoryExist) import UnliftIO.Exception (catchIO) import UnliftIO.STM (atomically) @@ -230,13 +230,13 @@ codebase1' syncToDirectory branchCache fmtV@(S.Format getV putV) fmtA@(S.Format -- watches in `branchHeadDir root` for externally deposited heads; -- parse them, and return them branchHeadUpdates - :: MonadUnliftIO m => CodebasePath -> m (m (), m (Set Branch.Hash)) + :: MonadIO m => CodebasePath -> m (IO (), IO (Set Branch.Hash)) branchHeadUpdates root = do branchHeadChanges <- TQueue.newIO (cancelWatch, watcher) <- Watch.watchDirectory' (branchHeadDir root) -- -- add .ubf file changes to intermediate queue watcher1 <- - forkIO + liftIO . forkIO $ forever $ do -- Q: what does watcher return on a file deletion? diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 1773a419d9..659749a944 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -446,7 +446,7 @@ sqliteCodebase root = do . Cv.causalbranch1to2 $ Branch.transform (lift . lift) branch1 - rootBranchUpdates :: MonadIO m => m (m (), m (Set Branch.Hash)) + rootBranchUpdates :: MonadIO m => m (IO (), IO (Set Branch.Hash)) rootBranchUpdates = pure (cleanup, liftIO newRootsDiscovered) where newRootsDiscovered = do diff --git a/parser-typechecker/src/Unison/Codebase/Watch.hs b/parser-typechecker/src/Unison/Codebase/Watch.hs index d1ab5992b2..2c2af76906 100644 --- a/parser-typechecker/src/Unison/Codebase/Watch.hs +++ b/parser-typechecker/src/Unison/Codebase/Watch.hs @@ -1,18 +1,16 @@ {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE TypeApplications #-} + module Unison.Codebase.Watch where import Unison.Prelude -import qualified UnliftIO as UnliftIO -import UnliftIO.Concurrent ( forkIO +import Control.Concurrent ( forkIO , threadDelay , killThread ) -import UnliftIO ( MonadUnliftIO - , withRunInIO - , unliftIO ) import UnliftIO.Directory ( getModificationTime , listDirectory , doesPathExist @@ -40,10 +38,10 @@ untilJust :: Monad m => m (Maybe a) -> m a untilJust act = act >>= maybe (untilJust act) return watchDirectory' - :: forall m. MonadUnliftIO m => FilePath -> m (m (), m (FilePath, UTCTime)) + :: forall m. MonadIO m => FilePath -> m (IO (), IO (FilePath, UTCTime)) watchDirectory' d = do mvar <- newEmptyMVar - let handler :: Event -> m () + let handler :: Event -> IO () handler e = case e of Added fp t False -> doIt fp t Modified fp t False -> doIt fp t @@ -57,21 +55,21 @@ watchDirectory' d = do -- we don't like FSNotify's debouncing (it seems to drop later events) -- so we will be doing our own instead let config = FSNotify.defaultConfig { FSNotify.confDebounce = FSNotify.NoDebounce } - cancel <- forkIO $ withRunInIO $ \inIO -> + cancel <- liftIO $ forkIO $ FSNotify.withManagerConf config $ \mgr -> do - cancelInner <- FSNotify.watchDir mgr d (const True) (inIO . handler) <|> (pure (pure ())) + cancelInner <- FSNotify.watchDir mgr d (const True) handler <|> (pure (pure ())) putMVar cleanupRef $ liftIO cancelInner forever $ threadDelay 1000000 - let cleanup :: m () + let cleanup :: IO () cleanup = join (takeMVar cleanupRef) >> killThread cancel pure (cleanup, takeMVar mvar) -collectUntilPause :: forall m a. MonadIO m => TQueue a -> Int -> m [a] +collectUntilPause :: forall a. TQueue a -> Int -> IO [a] collectUntilPause queue minPauseµsec = do -- 1. wait for at least one element in the queue void . atomically $ TQueue.peek queue - let go :: MonadIO m => m [a] + let go :: IO [a] go = do before <- atomically $ TQueue.enqueueCount queue threadDelay minPauseµsec @@ -82,8 +80,8 @@ collectUntilPause queue minPauseµsec = do else go go -watchDirectory :: forall m. MonadUnliftIO m - => FilePath -> (FilePath -> Bool) -> m (m (), m (FilePath, Text)) +watchDirectory :: forall m. MonadIO m + => FilePath -> (FilePath -> Bool) -> m (IO (), IO (FilePath, Text)) watchDirectory dir allow = do previousFiles <- newIORef Map.empty (cancelWatch, watcher) <- watchDirectory' dir @@ -94,14 +92,14 @@ watchDirectory dir allow = do filtered <- filterM doesPathExist files let withTime file = (file,) <$> getModificationTime file sortOn snd <$> mapM withTime filtered - process :: MonadIO m => FilePath -> UTCTime -> m (Maybe (FilePath, Text)) + process :: FilePath -> UTCTime -> IO (Maybe (FilePath, Text)) process file t = if allow file then let - handle :: IOException -> m () + handle :: IOException -> IO () handle e = do liftIO $ putStrLn $ "‼ Got an exception while reading: " <> file liftIO $ print (e :: IOException) - go :: MonadUnliftIO m => m (Maybe (FilePath, Text)) + go :: IO (Maybe (FilePath, Text)) go = liftIO $ do contents <- Data.Text.IO.readFile file prevs <- readIORef previousFiles @@ -118,18 +116,17 @@ watchDirectory dir allow = do else return Nothing queue <- TQueue.newIO gate <- liftIO newEmptyMVar - ctx <- UnliftIO.askUnliftIO -- We spawn a separate thread to siphon the file change events -- into a queue, which can be debounced using `collectUntilPause` enqueuer <- liftIO . forkIO $ do takeMVar gate -- wait until gate open before starting forever $ do - event@(file, _) <- UnliftIO.unliftIO ctx watcher + event@(file, _) <- watcher when (allow file) $ STM.atomically $ TQueue.enqueue queue event pending <- newIORef =<< existingFiles let - await :: MonadIO m => m (FilePath, Text) + await :: IO (FilePath, Text) await = untilJust $ readIORef pending >>= \case [] -> do -- open the gate diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Sync12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Sync12.hs index af649d81f3..72f7fb1c11 100644 --- a/parser-typechecker/tests/Unison/Test/Codebase/Sync12.hs +++ b/parser-typechecker/tests/Unison/Test/Codebase/Sync12.hs @@ -1,10 +1,86 @@ +{-# LANGUAGE TemplateHaskell #-} +{-# Language QuasiQuotes #-} +{-# LANGUAGE OverloadedStrings #-} + module Unison.Test.Codebase.Sync12 where -import Control.Error (minimumMay) -import Control.Lens (view, _1) -import qualified Data.Char as Char -import Data.Maybe (fromMaybe) -import Unison.Test.Ucm +-- import Control.Error (minimumMay) +-- import Control.Lens (view, _1) +-- import qualified Data.Char as Char +-- import Data.Maybe (fromMaybe) +-- import EasyTest (Test, scope, tests, io) +-- import qualified System.IO.Temp as Temp +-- import Unison.Test.Ucm (CodebaseFormat, Runtime) +-- import qualified Data.Text as Text +-- import qualified U.Util.Text as Text +-- import Data.String.Here.Interpolated (iTrim) +-- import qualified Unison.Test.Ucm as Ucm +-- import Shellmet () +-- import System.FilePath (()) +-- import UnliftIO (MonadIO (liftIO)) + + +-- -- test = scope "Sync" $ tests [typeAlias, topLevelTerm, subNamespace, accessPatch, history] + +-- typeAlias :: Test () +-- typeAlias = makeTest "typeAlias" do +-- tmp <- Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "typeAlias" +-- c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 tmp +-- runTranscript c1 [iTrim| +-- ```ucm +-- .> alias.type ##Nat builtin.Nat +-- ``` +-- |] +-- c2 <- runConversion12 c +-- runTranscript c2 $ Text.stripMargin [iTrim| +-- ```unison +-- x :: Nat +-- x = 3 +-- ``` +-- |] +-- topLevelTerm :: Test () +-- topLevelTerm = makeTest "topLevelTerm" do +-- tmp <- Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "typeAlias" +-- c1 <- initV1Codebase tmp +-- runTranscript c1 [iTrim| +-- ```unison +-- y = 3 +-- ``` +-- ```ucm +-- .> add +-- ``` +-- |] +-- runTranscript c2 [iTrim| +-- ```ucm +-- .> find +-- ``` +-- ```unison +-- > y +-- ``` +-- |] +-- subNamespace :: Test () +-- subNamespace = makeTest "subNamespace" do +-- tmp <- Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "subNamespace" +-- runTranscript tmp "src" $ Text.stripMargin [iTrim| +-- ```ucm +-- .> alias.type ##Nat builtin.Nat +-- ``` +-- ```unison +-- type myLib.X = X Nat +-- ``` +-- ```ucm +-- .> push ${repo} myLib +-- ``` +-- |] +-- runTranscript tmp "dest" $ Text.stripMargin [iTrim| +-- ```ucm +-- .> pull ${repo} yourLib +-- .> find +-- ``` +-- ```unison +-- > X 3 +-- ``` +-- |] diff --git a/parser-typechecker/tests/Unison/Test/Git.hs b/parser-typechecker/tests/Unison/Test/Git.hs index 682e8a9704..e4b3f6ae7d 100644 --- a/parser-typechecker/tests/Unison/Test/Git.hs +++ b/parser-typechecker/tests/Unison/Test/Git.hs @@ -30,6 +30,7 @@ import Unison.Parser (Ann) import Unison.Symbol (Symbol) import Unison.Var (Var) import qualified U.Util.Cache as Cache +import U.Util.String (stripMargin) test :: Test () test = scope "git-fc" . tests $ @@ -355,7 +356,7 @@ testPush = scope "push" $ do removeDirectoryRecursive tmp where - setupTranscript = [iTrim| + setupTranscript = stripMargin [iTrim| ```ucm .> builtins.merge ``` @@ -393,7 +394,8 @@ testPush = scope "push" $ do .foo.inside> update ``` |] - pushTranscript repo = [iTrim| + + pushTranscript repo = stripMargin [iTrim| ```ucm .foo.inside> push ${repo} ``` diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs index ebba7b59d5..1894252b8d 100644 --- a/parser-typechecker/tests/Unison/Test/Ucm.hs +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -1,54 +1,82 @@ +{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DerivingVia #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} module Unison.Test.Ucm where --- import Control.Monad.Writer (WriterT) --- import qualified Control.Monad.Writer as Writer --- import qualified Data.Sequence as Seq --- import qualified Data.Text as Text --- import System.FilePath (()) --- import U.Util.Text (stripMargin) --- import qualified Unison.Codebase.FileCodebase as FC --- import qualified Unison.Codebase.Init as Codebase.Init --- import qualified Unison.Codebase.SqliteCodebase as SC --- import qualified Unison.Codebase.TranscriptParser as TR --- import Unison.Prelude --- import qualified Unison.Util.Pretty as P +import Control.Monad.State (MonadState, StateT) +import qualified Control.Monad.State as State +import Control.Monad.Writer (MonadWriter, WriterT) +import qualified Control.Monad.Writer as Writer +import qualified Data.Sequence as Seq +import qualified Data.Text as Text +import EasyTest +import System.FilePath (()) +import qualified System.IO.Temp as Temp +import U.Util.Text (stripMargin) +import Unison.Codebase (CodebasePath) +import qualified Unison.Codebase.FileCodebase as FC +import qualified Unison.Codebase.Init as Codebase.Init +import qualified Unison.Codebase.SqliteCodebase as SC +import qualified Unison.Codebase.TranscriptParser as TR +import Unison.Prelude +import qualified Unison.Util.Pretty as P +import UnliftIO (MonadUnliftIO) --- data Runtime = Runtime1 | Runtime2 +data Runtime = Runtime1 | Runtime2 --- data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 +data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 deriving Show --- newtype CodebaseName = CodebaseName {unCodebaseName :: String} +data Codebase = Codebase CodebasePath CodebaseFormat deriving Show --- -- type Args = Map String String +newtype Transcript = Transcript {unTranscript :: Text} deriving (IsString) via Text --- newtype Transcript = Transcript {unTranscript :: Text} deriving (IsString) via Text +type TranscriptOutput = String --- type TranscriptOutput = String +type Cleanup m = Seq (m ()) --- type Cleanup = Seq (IO ()) +initCodebase :: (MonadUnliftIO m, MonadWriter (Cleanup m) m) => CodebaseFormat -> m Codebase +initCodebase fmt = do + let cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init + tmp <- + liftIO $ + Temp.getCanonicalTemporaryDirectory + >>= flip Temp.createTempDirectory ("ucm-test") + void $ + Codebase.Init.createCodebase cbInit tmp >>= \case + Left e -> error $ P.toANSI 80 e + Right (cleanup, c) -> do + Writer.tell . Seq.singleton $ cleanup + pure c + pure $ Codebase tmp fmt +upgradeCodebase :: MonadIO m => Codebase -> m Codebase +upgradeCodebase = \case + c@(Codebase _ CodebaseFormat2) -> error $ show c ++ " already in V2 format." + Codebase path CodebaseFormat1 -> undefined + +-- type UcmTest m = (MonadIO m, MonadWriter (Cleanup m) m, MonadState Args m) -- runTranscript :: +-- UcmTest m => -- FilePath -> -- CodebaseName -> -- CodebaseFormat -> -- Runtime -> --- Args -> -- (Args -> Transcript) -> --- WriterT Cleanup IO TranscriptOutput --- runTranscript tmpDir codebaseName fmt rt args mkTranscript = do +-- m TranscriptOutput +-- runTranscript tmpDir codebaseName fmt rt mkTranscript = do -- let configFile = tmpDir ".unisonConfig" -- codebasePath = tmpDir unCodebaseName codebaseName -- let err err = error $ "Parse error: \n" <> show err -- cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init -- codebase <- --- lift (Codebase.Init.createCodebase cbInit codebasePath) >>= \case --- Left e -> fail $ P.toANSI 80 e +-- (Codebase.Init.createCodebase cbInit codebasePath) >>= \case +-- Left e -> error $ P.toANSI 80 e -- Right (cleanup, c) -> do -- Writer.tell . Seq.singleton $ cleanup -- pure c -- -- parse and run the transcript +-- args <- State.get -- flip (either err) (TR.parse "transcript" (stripMargin . unTranscript $ mkTranscript args)) $ \stanzas -> -- liftIO . fmap Text.unpack $ -- TR.run @@ -57,3 +85,10 @@ module Unison.Test.Ucm where -- configFile -- stanzas -- codebase + +-- runTests :: UcmTest m => m a -> Test a +-- runTests a = do +-- ((result, cleanup), _args) <- flip State.runStateT mempty $ Writer.runWriterT a +-- sequence_ cleanup +-- pure result +-- pure result diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index 973b5a75e2..1ace52edbe 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -62,6 +62,7 @@ import qualified Unison.Server.CodebaseServer as Server import Unison.Symbol (Symbol) import qualified Unison.Util.Pretty as P import qualified Version +import qualified Unison.Codebase.Conversion.Upgrade12 as Upgrade12 usage :: String -> P.Pretty P.ColorText usage executableStr = P.callout "🌻" $ P.lines [ @@ -157,7 +158,7 @@ main = do (mNewRun, restargs1) = case restargs0 of "--new-runtime" : rest -> (Just True, rest) _ -> (Nothing, restargs0) - (fromMaybe False -> newCodebase, restargs) = case restargs1 of + (fromMaybe True -> newCodebase, restargs) = case restargs1 of "--new-codebase" : rest -> (Just True, rest) "--old-codebase" : rest -> (Just False, rest) _ -> (Nothing, restargs1) @@ -219,31 +220,8 @@ main = do Exit.exitWith (Exit.ExitFailure 1) upgradeCodebase :: Maybe Codebase.CodebasePath -> IO () -upgradeCodebase mcodepath = do - (cleanupSrc, srcCB) <- Codebase.getCodebaseOrExit FC.init mcodepath -- FC.init mcodepath - (cleanupDest, destCB) <- Codebase.getCodebaseOrExit SC.init mcodepath -- FC.init mcodepath - destDB <- SC.unsafeGetConnection =<< Codebase.getCodebaseDir mcodepath - let env = Sync12.Env srcCB destCB destDB - let initialState :: Sync12.ProgressState _ = - (Sync12.emptyDoneCount, Sync12.emptyErrorCount, Sync12.emptyStatus) - rootEntity <- - Codebase.getRootBranch srcCB >>= \case - Left e -> error $ "Error loading source codebase root branch: " ++ show e - Right (Branch.Branch c) -> pure $ Sync12.C (Causal.currentHash c) (pure c) - let progress = Sync12.simpleProgress @IO - flip Reader.runReaderT env . flip State.evalStateT initialState $ do - sync <- Sync12.sync12 (lift . lift) - Sync.sync @_ @(Sync12.Entity _) - (Sync.transformSync (lensStateT Lens._3) sync) - progress - [rootEntity] - cleanupSrc - cleanupDest - where - lensStateT :: Monad m => Lens' s2 s1 -> StateT s1 m a -> StateT s2 m a - lensStateT l m = StateT \s2 -> do - (a, s1') <- runStateT m (s2 Lens.^. l) - pure (a, s2 & l Lens..~ s1') +upgradeCodebase mcodepath = + Codebase.getCodebaseDir mcodepath >>= Upgrade12.upgradeCodebase prepareTranscriptDir :: Codebase.Init IO Symbol Ann -> Bool -> Maybe FilePath -> IO FilePath prepareTranscriptDir cbInit inFork mcodepath = do diff --git a/stack.yaml b/stack.yaml index 7793f1ab03..2f06fc8ec8 100644 --- a/stack.yaml +++ b/stack.yaml @@ -64,7 +64,7 @@ extra-deps: ghc-options: # All packages - "$locals": -Wall -Wno-name-shadowing -Werror -Wno-type-defaults -Wno-missing-pattern-synonym-signatures -fprint-expanded-synonyms #-freverse-errors + "$locals": -Wall -Wno-name-shadowing -Wno-type-defaults -Wno-missing-pattern-synonym-signatures -fprint-expanded-synonyms #-freverse-errors # See https://github.com/haskell/haskell-language-server/issues/208 "$everything": -haddock diff --git a/unison-src/Base.u b/unison-src/Base.u index 2236899e64..a724a43300 100644 --- a/unison-src/Base.u +++ b/unison-src/Base.u @@ -1,15 +1,13 @@ -namespace Nat where - maxNat = 18446744073709551615 +Nat.maxNat = 18446744073709551615 - (-) : Nat -> Nat -> Int - (-) = Nat.sub +(Nat.-) : Nat -> Nat -> Int +(Nat.-) = Nat.sub -namespace Int where - maxInt = +9223372036854775807 - minInt = -9223372036854775808 +Int.maxInt = +9223372036854775807 +Int.minInt = -9223372036854775808 -use Universal == < > >= -use Optional None Some +-- use Universal == < > >= +-- use Optional None Some -- Function composition dot : (b -> c) -> (a -> b) -> a -> c @@ -24,113 +22,110 @@ const a _ = a use Tuple Cons -namespace Tuple where - at1 : Tuple a b -> a - at1 = cases Cons a _ -> a +-- namespace Tuple where +Tuple.at1 : Tuple a b -> a +Tuple.at1 = cases Cons a _ -> a +Tuple.at2 : Tuple a (Tuple b c) -> b +Tuple.at2 = cases Cons _ (Cons b _) -> b +Tuple.at3 : Tuple a (Tuple b (Tuple c d)) -> c +Tuple.at3 = cases Cons _ (Cons _ (Cons c _)) -> c +Tuple.at4 : Tuple a (Tuple b (Tuple c (Tuple d e))) -> d +Tuple.at4 = cases Cons _ (Cons _ (Cons _ (Cons d _))) -> d - at2 : Tuple a (Tuple b c) -> b - at2 = cases Cons _ (Cons b _) -> b +-- namespace List where - at3 : Tuple a (Tuple b (Tuple c d)) -> c - at3 = cases Cons _ (Cons _ (Cons c _)) -> c - - at4 : Tuple a (Tuple b (Tuple c (Tuple d e))) -> d - at4 = cases Cons _ (Cons _ (Cons _ (Cons d _))) -> d - -namespace List where - - map : (a -> b) -> [a] -> [b] - map f a = +List.map : (a -> b) -> [a] -> [b] +List.map f a = go i as acc = match List.at i as with None -> acc Some a -> go (i + 1) as (acc `snoc` f a) go 0 a [] - zip : [a] -> [b] -> [(a,b)] - zip as bs = +List.zip : [a] -> [b] -> [(a,b)] +List.zip as bs = go acc i = match (at i as, at i bs) with (None,_) -> acc (_,None) -> acc (Some a, Some b) -> go (acc `snoc` (a,b)) (i + 1) go [] 0 - insert : Nat -> a -> [a] -> [a] - insert i a as = take i as ++ [a] ++ drop i as +List.insert : Nat -> a -> [a] -> [a] +List.insert i a as = take i as ++ [a] ++ drop i as - replace : Nat -> a -> [a] -> [a] - replace i a as = take i as ++ [a] ++ drop (i + 1) as +List.replace : Nat -> a -> [a] -> [a] +List.replace i a as = take i as ++ [a] ++ drop (i + 1) as - slice : Nat -> Nat -> [a] -> [a] - slice start stopExclusive s = +List.slice : Nat -> Nat -> [a] -> [a] +List.slice start stopExclusive s = take (stopExclusive `Nat.drop` start) (drop start s) - unsafeAt : Nat -> [a] -> a - unsafeAt n as = match at n as with +List.unsafeAt : Nat -> [a] -> a +List.unsafeAt n as = match at n as with Some a -> a None -> Debug.watch "oh noes" (unsafeAt n as) -- Debug.crash "oh noes!" - foldl : (b -> a -> b) -> b -> [a] -> b - foldl f b as = +List.foldl : (b -> a -> b) -> b -> [a] -> b +List.foldl f b as = go b i = match List.at i as with None -> b Some a -> go (f b a) (i + 1) go b 0 - foldb : (a -> b) -> (b -> b -> b) -> b -> [a] -> b - foldb f op z as = +List.foldb : (a -> b) -> (b -> b -> b) -> b -> [a] -> b +List.foldb f op z as = if List.size as == 0 then z else if List.size as == 1 then f (unsafeAt 0 as) else match halve as with (left, right) -> foldb f op z left `op` foldb f op z right - reverse : [a] -> [a] - reverse as = foldl (acc a -> List.cons a acc) [] as +List.reverse : [a] -> [a] +List.reverse as = foldl (acc a -> List.cons a acc) [] as - indexed : [a] -> [(a, Nat)] - indexed as = as `zip` range 0 (size as) +List.indexed : [a] -> [(a, Nat)] +List.indexed as = as `zip` range 0 (size as) - sortBy : (a -> b) -> [a] -> [a] - sortBy f as = +List.sortBy : (a -> b) -> [a] -> [a] +List.sortBy f as = tweak p = match p with (p1,p2) -> (f p1, p2, p1) Heap.sort (map tweak (indexed as)) |> map Tuple.at3 - halve : [a] -> ([a], [a]) - halve s = +List.halve : [a] -> ([a], [a]) +List.halve s = n = size s / 2 (take n s, drop n s) - unfold : s -> (s -> Optional (a, s)) -> [a] - unfold s0 f = +List.unfold : s -> (s -> Optional (a, s)) -> [a] +List.unfold s0 f = go f s acc = match f s with None -> acc Some (a, s) -> go f s (acc `snoc` a) go f s0 [] - uncons : [a] -> Optional (a, [a]) - uncons as = match at 0 as with +List.uncons : [a] -> Optional (a, [a]) +List.uncons as = match at 0 as with None -> None Some a -> Some (a, drop 1 as) - unsnoc : [a] -> Optional ([a], a) - unsnoc as = +List.unsnoc : [a] -> Optional ([a], a) +List.unsnoc as = i = size (drop 1 as) match at i as with None -> None Some a -> Some (take i as, a) - join : [[a]] -> [a] - join = foldl (++) [] +List.join : [[a]] -> [a] +List.join = foldl (++) [] - flatMap : (a -> [b]) -> [a] -> [b] - flatMap f as = join (map f as) +List.flatMap : (a -> [b]) -> [a] -> [b] +List.flatMap f as = join (map f as) - range : Nat -> Nat -> [Nat] - range start stopExclusive = +List.range : Nat -> Nat -> [Nat] +List.range start stopExclusive = f i = if i < stopExclusive then Some (i, i + 1) else None unfold start f - distinct : [a] -> [a] - distinct as = +List.distinct : [a] -> [a] +List.distinct as = go i seen acc = match List.at i as with None -> acc Some a -> if Set.contains a seen then go (i + 1) seen acc @@ -139,8 +134,8 @@ namespace List where -- Joins a list of lists in a "fair diagonal" fashion. -- Adapted from the Haskell version written by Luke Palmer. - diagonal : [[a]] -> [a] - diagonal = +List.diagonal : [[a]] -> [a] +List.diagonal = let x = 23 stripe = cases @@ -153,33 +148,33 @@ namespace List where (x +: xs, y +: ys) -> cons (cons x y) (zipCons xs ys) List.join `dot` stripe --- > List.foldb "" (t t2 -> "(" ++ t ++ " " ++ t2 ++ ")") (x -> x) ["Alice", "Bob", "Carol", "Dave", "Eve", "Frank", "Gerald", "Henry"] +-- -- > List.foldb "" (t t2 -> "(" ++ t ++ " " ++ t2 ++ ")") (x -> x) ["Alice", "Bob", "Carol", "Dave", "Eve", "Frank", "Gerald", "Henry"] --- Sorted maps, represented as a pair of sequences --- Use binary search to do lookups and find insertion points --- This relies on the underlying sequence having efficient --- slicing and concatenation +-- -- Sorted maps, represented as a pair of sequences +-- -- Use binary search to do lookups and find insertion points +-- -- This relies on the underlying sequence having efficient +-- -- slicing and concatenation type Map k v = Map [k] [v] -use Map Map +-- use Map Map -namespace Search where +-- namespace Search where - indexOf : a -> [a] -> Optional Nat - indexOf a s = +Search.indexOf : a -> [a] -> Optional Nat +Search.indexOf a s = ao = Some a Search.exact (i -> ao `compare` List.at i s) 0 (size s) - lubIndexOf' : a -> Nat -> [a] -> Nat - lubIndexOf' a start s = +Search.lubIndexOf' : a -> Nat -> [a] -> Nat +Search.lubIndexOf' a start s = ao = Some a Search.lub (i -> ao `compare` List.at i s) start (size s) - lubIndexOf : a -> [a] -> Nat - lubIndexOf a s = lubIndexOf' a 0 s +Search.lubIndexOf : a -> [a] -> Nat +Search.lubIndexOf a s = lubIndexOf' a 0 s - lub : (Nat -> Int) -> Nat -> Nat -> Nat - lub hit bot top = +Search.lub : (Nat -> Int) -> Nat -> Nat -> Nat +Search.lub hit bot top = if bot >= top then top else mid = (bot + top) / 2 @@ -188,8 +183,8 @@ namespace Search where -1 -> lub hit bot mid +1 -> lub hit (mid + 1) top - exact : (Nat -> Int) -> Nat -> Nat -> Optional Nat - exact hit bot top = +Search.exact : (Nat -> Int) -> Nat -> Nat -> Optional Nat +Search.exact hit bot top = if bot >= top then None else mid = (bot + top) / 2 @@ -198,9 +193,9 @@ namespace Search where -1 -> exact hit bot mid +1 -> exact hit (mid + 1) top --- > ex = [0,2,4,6,77,192,3838,12000] --- > List.map (e -> indexOf e ex) ex --- > lubIndexOf 193 ex +-- -- > ex = [0,2,4,6,77,192,3838,12000] +-- -- > List.map (e -> indexOf e ex) ex +-- -- > lubIndexOf 193 ex (|>) : a -> (a -> b) -> b @@ -212,40 +207,41 @@ f <| a = f a id : a -> a id a = a -namespace Map where +-- namespace Map where - empty : Map k v - empty = Map [] [] +Map.empty : Map k v +Map.empty = Map [] [] - singleton : k -> v -> Map k v - singleton k v = Map [k] [v] +Map.singleton : k -> v -> Map k v +Map.singleton k v = Map [k] [v] - fromList : [(k,v)] -> Map k v - fromList kvs = +Map.fromList : [(k,v)] -> Map k v +Map.fromList kvs = go acc i = match List.at i kvs with None -> acc - Some (k,v) -> go (insert k v acc) (i + 1) + Some (k,v) -> go (Map.insert k v acc) (i + 1) go empty 0 - toList : Map k v -> [(k,v)] - toList m = List.zip (keys m) (values m) +Map.toList : Map k v -> [(k,v)] +Map.toList m = List.zip (keys m) (values m) - size : Map k v -> Nat - size s = List.size (keys s) +Map.size : Map k v -> Nat +Map.size s = List.size (keys s) - lookup : k -> Map k v -> Optional v - lookup k = cases +Map.lookup : k -> Map k v -> Optional v +Map.lookup k = cases Map ks vs -> match Search.indexOf k ks with None -> None Some i -> at i vs - contains : k -> Map k v -> Boolean - contains k cases Map ks _ -> match Search.indexOf k ks with +Map.contains : k -> Map k v -> Boolean +Map.contains k = cases Map ks _ -> + match Search.indexOf k ks with None -> false _ -> true - insert : k -> v -> Map k v -> Map k v - insert k v = cases Map ks vs -> +Map.insert : k -> v -> Map k v -> Map k v +Map.insert k v = cases Map ks vs -> use Search lubIndexOf i = lubIndexOf k ks match at i ks with @@ -254,17 +250,17 @@ namespace Map where else Map (List.insert i k ks) (List.insert i v vs) None -> Map (ks `snoc` k) (vs `snoc` v) - map : (v -> v2) -> Map k v -> Map k v2 - map f m = Map (keys m) (List.map f (values m)) +Map.map : (v -> v2) -> Map k v -> Map k v2 +Map.map f m = Map (keys m) (List.map f (values m)) - mapKeys : (k -> k2) -> Map k v -> Map k2 v - mapKeys f m = Map (List.map f (keys m)) (values m) +Map.mapKeys : (k -> k2) -> Map k v -> Map k2 v +Map.mapKeys f m = Map (List.map f (keys m)) (values m) - union : Map k v -> Map k v -> Map k v - union = unionWith (_ v -> v) +Map.union : Map k v -> Map k v -> Map k v +Map.union = unionWith (_ v -> v) - unionWith : (v -> v -> v) -> Map k v -> Map k v -> Map k v - unionWith f m1 m2 = match (m1, m2) with (Map k1 v1, Map k2 v2) -> +Map.unionWith : (v -> v -> v) -> Map k v -> Map k v -> Map k v +Map.unionWith f m1 m2 = match (m1, m2) with (Map k1 v1, Map k2 v2) -> go i j ko vo = match (at i k1, at j k2) with (None, _) -> Map (ko ++ drop j k2) (vo ++ drop j v2) (_, None) -> Map (ko ++ drop i k1) (vo ++ drop i v1) @@ -283,11 +279,11 @@ namespace Map where go i j' (ko ++ slice j j' k2) (vo ++ slice j j' v2) go 0 0 [] [] - intersect : Map k v -> Map k v -> Map k v - intersect = intersectWith (_ v -> v) +Map.intersect : Map k v -> Map k v -> Map k v +Map.intersect = intersectWith (_ v -> v) - intersectWith : (v -> v -> v2) -> Map k v -> Map k v -> Map k v2 - intersectWith f m1 m2 = match (m1, m2) with (Map k1 v1, Map k2 v2) -> +Map.intersectWith : (v -> v -> v2) -> Map k v -> Map k v -> Map k v2 +Map.intersectWith f m1 m2 = match (m1, m2) with (Map k1 v1, Map k2 v2) -> go i j ko vo = match (at i k1, at j k2) with (None, _) -> Map ko vo (_, None) -> Map ko vo @@ -304,141 +300,131 @@ namespace Map where go i j' ko vo go 0 0 [] [] - keys : Map k v -> [k] - keys = cases Map ks _ -> ks - - values : Map k v -> [v] - values = cases Map _ vs -> vs +Map.keys : Map k v -> [k] +Map.keys = cases Map ks _ -> ks -namespace Multimap where +Map.values : Map k v -> [v] +Map.values = cases Map _ vs -> vs - insert : k -> v -> Map k [v] -> Map k [v] - insert k v m = match Map.lookup k m with +Multimap.insert : k -> v -> Map k [v] -> Map k [v] +Multimap.insert k v m = match Map.lookup k m with None -> Map.insert k [v] m Some vs -> Map.insert k (vs `snoc` v) m - lookup : k -> Map k [v] -> [v] - lookup k m = Optional.orDefault [] (Map.lookup k m) +Multimap.lookup : k -> Map k [v] -> [v] +Multimap.lookup k m = Optional.orDefault [] (Map.lookup k m) type Set a = Set (Map a ()) -use Set Set -namespace Set where +Set.empty : Set k +Set.empty = Set Map.empty - empty : Set k - empty = Set Map.empty +Set.underlying : Set k -> Map k () +Set.underlying = cases Set s -> s - underlying : Set k -> Map k () - underlying = cases Set s -> s +Set.toMap : (k -> v) -> Set k -> Map k v +Set.toMap f = cases Set (Map ks vs) -> Map ks (List.map f ks) - toMap : (k -> v) -> Set k -> Map k v - toMap f = cases Set (Map ks vs) -> Map ks (List.map f ks) +Set.fromList : [k] -> Set k +Set.fromList ks = Set (Map.fromList (List.map (k -> (k,())) ks)) - fromList : [k] -> Set k - fromList ks = Set (Map.fromList (List.map (k -> (k,())) ks)) +Set.toList : Set k -> [k] +Set.toList = cases Set (Map ks _) -> ks - toList : Set k -> [k] - toList = cases Set (Map ks _) -> ks +Set.contains : k -> Set k -> Boolean +Set.contains k = cases Set m -> Map.contains k m - contains : k -> Set k -> Boolean - contains k = cases Set m -> Map.contains k m +Set.insert : k -> Set k -> Set k +Set.insert k = cases Set s -> Set (Map.insert k () s) - insert : k -> Set k -> Set k - insert k = cases Set s -> Set (Map.insert k () s) +Set.union : Set k -> Set k -> Set k +Set.union s1 s2 = Set (Map.union (underlying s1) (underlying s2)) - union : Set k -> Set k -> Set k - union s1 s2 = Set (Map.union (underlying s1) (underlying s2)) +Set.size : Set k -> Nat +Set.size s = Map.size (underlying s) - size : Set k -> Nat - size s = Map.size (underlying s) - - intersect : Set k -> Set k -> Set k - intersect s1 s2 = Set (Map.intersect (underlying s1) (underlying s2)) +Set.intersect : Set k -> Set k -> Set k +Set.intersect s1 s2 = Set (Map.intersect (underlying s1) (underlying s2)) type Heap k v = Heap Nat k v [Heap k v] -use Heap Heap - -namespace Heap where - singleton : k -> v -> Heap k v - singleton k v = Heap 1 k v [] +Heap.singleton : k -> v -> Heap k v +Heap.singleton k v = Heap 1 k v [] - size : Heap k v -> Nat - size = cases Heap n _ _ _ -> n +Heap.size : Heap k v -> Nat +Heap.size = cases Heap n _ _ _ -> n - union : Heap k v -> Heap k v -> Heap k v - union h1 h2 = match (h1, h2) with +Heap.union : Heap k v -> Heap k v -> Heap k v +Heap.union h1 h2 = match (h1, h2) with (Heap n k1 v1 hs1, Heap m k2 v2 hs2) -> if k1 >= k2 then Heap (n + m) k1 v1 (cons h2 hs1) else Heap (n + m) k2 v2 (cons h1 hs2) - pop : Heap k v -> Optional (Heap k v) - pop h = +Heap.pop : Heap k v -> Optional (Heap k v) +Heap.pop h = go h subs = use List drop size unsafeAt if size subs == 0 then h - else if size subs == 1 then h `union` unsafeAt 0 subs - else union h (unsafeAt 0 subs) `union` go (unsafeAt 1 subs) (drop 2 subs) + else if size subs == 1 then h `Heap.union` unsafeAt 0 subs + else union h (unsafeAt 0 subs) `Heap.union` go (unsafeAt 1 subs) (drop 2 subs) match List.uncons (children h) with None -> None Some (s0, subs) -> Some (go s0 subs) - children : Heap k v -> [Heap k v] - children = cases Heap _ _ _ cs -> cs +Heap.children : Heap k v -> [Heap k v] +Heap.children = cases Heap _ _ _ cs -> cs - max : Heap k v -> (k, v) - max = cases Heap _ k v _ -> (k, v) +Heap.max : Heap k v -> (k, v) +Heap.max = cases Heap _ k v _ -> (k, v) - maxKey : Heap k v -> k - maxKey = cases Heap _ k _ _ -> k +Heap.maxKey : Heap k v -> k +Heap.maxKey = cases Heap _ k _ _ -> k - fromList : [(k,v)] -> Optional (Heap k v) - fromList kvs = +Heap.fromList : [(k,v)] -> Optional (Heap k v) +Heap.fromList kvs = op a b = match a with None -> b Some a -> match b with None -> Some a - Some b -> Some (union a b) + Some b -> Some (Heap.union a b) single = cases - (k, v) -> Some (singleton k v) + (k, v) -> Some (Heap.singleton k v) List.foldb single op None kvs - fromKeys : [a] -> Optional (Heap a a) - fromKeys as = fromList (List.map (a -> (a,a)) as) +Heap.fromKeys : [a] -> Optional (Heap a a) +Heap.fromKeys as = fromList (List.map (a -> (a,a)) as) - sortDescending : [a] -> [a] - sortDescending as = +Heap.sortDescending : [a] -> [a] +Heap.sortDescending as = step = cases None -> None Some h -> Some (max h, pop h) List.unfold (fromKeys as) step |> List.map Tuple.at1 - sort : [a] -> [a] - sort as = sortDescending as |> List.reverse - --- > sort [11,9,8,4,5,6,7,3,2,10,1] +Heap.sort : [a] -> [a] +Heap.sort as = sortDescending as |> List.reverse -namespace Optional where +> sort [11,9,8,4,5,6,7,3,2,10,1] - map : (a -> b) -> Optional a -> Optional b - map f = cases +Optional.map : (a -> b) -> Optional a -> Optional b +Optional.map f = cases None -> None Some a -> Some (f a) - orDefault : a -> Optional a -> a - orDefault a = cases +Optional.orDefault : a -> Optional a -> a +Optional.orDefault a = cases None -> a Some a -> a - orElse : Optional a -> Optional a -> Optional a - orElse a b = match a with +Optional.orElse : Optional a -> Optional a -> Optional a +Optional.orElse a b = match a with None -> b Some _ -> a - flatMap : (a -> Optional b) -> Optional a -> Optional b - flatMap f = cases +Optional.flatMap : (a -> Optional b) -> Optional a -> Optional b +Optional.flatMap f = cases None -> None Some a -> f a - map2 : (a -> b -> c) -> Optional a -> Optional b -> Optional c - map2 f oa ob = flatMap (a -> map (f a) ob) oa +Optional.map2 : (a -> b -> c) -> Optional a -> Optional b -> Optional c +Optional.map2 f oa ob = flatMap (a -> map (f a) ob) oa diff --git a/unison-src/transcripts/GitSimple.one-term.output.md b/unison-src/transcripts/GitSimple.one-term.output.md index ab1bf491d1..d70017a05b 100644 --- a/unison-src/transcripts/GitSimple.one-term.output.md +++ b/unison-src/transcripts/GitSimple.one-term.output.md @@ -24,7 +24,7 @@ c = 3 c : ##Nat -.> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-d431dff9390554a1/repo.git +.> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-ca8a9c15aa27005c/repo.git Done. @@ -32,7 +32,7 @@ c = 3 ------- ```ucm -.> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-d431dff9390554a1/repo.git +.> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-ca8a9c15aa27005c/repo.git Here's what's changed in the current namespace after the merge: diff --git a/unison-src/transcripts/GitSimple.one-term2.output.md b/unison-src/transcripts/GitSimple.one-term2.output.md index 9391f4d9c2..7c4fc3baee 100644 --- a/unison-src/transcripts/GitSimple.one-term2.output.md +++ b/unison-src/transcripts/GitSimple.one-term2.output.md @@ -26,7 +26,7 @@ c = 3 c : ##Nat -.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term2-0b775fa442535bdb/repo.git +.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term2-abd7ae5a095da0e1/repo.git Done. @@ -36,7 +36,7 @@ c = 3 ```ucm ☝️ The namespace .yourLib is empty. -.yourLib> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term2-0b775fa442535bdb/repo.git +.yourLib> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term2-abd7ae5a095da0e1/repo.git Here's what's changed in the current namespace after the merge: diff --git a/unison-src/transcripts/GitSimple.one-type.output.md b/unison-src/transcripts/GitSimple.one-type.output.md index 432913db0d..16a76e2876 100644 --- a/unison-src/transcripts/GitSimple.one-type.output.md +++ b/unison-src/transcripts/GitSimple.one-type.output.md @@ -26,7 +26,7 @@ type Foo = Foo type Foo -.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-type-202b2f75069902c8/repo.git +.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-type-192da04d6362dbe8/repo.git Done. @@ -36,7 +36,7 @@ type Foo = Foo ```ucm ☝️ The namespace .yourLib is empty. -.yourLib> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-type-202b2f75069902c8/repo.git +.yourLib> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-type-192da04d6362dbe8/repo.git Here's what's changed in the current namespace after the merge: From a58d51877756a3248a43fd3216096576a45bf019 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 7 Apr 2021 14:33:12 -0600 Subject: [PATCH 153/225] wip --- parser-typechecker/tests/Suite.hs | 2 + .../tests/Unison/Test/Codebase/Sync12.hs | 86 ----------- .../tests/Unison/Test/Codebase/Upgrade12.hs | 133 ++++++++++++++++++ parser-typechecker/tests/Unison/Test/Ucm.hs | 104 ++++++-------- .../unison-parser-typechecker.cabal | 4 +- yaks/easytest/easytest.cabal | 1 + yaks/easytest/src/EasyTest.hs | 9 ++ 7 files changed, 193 insertions(+), 146 deletions(-) delete mode 100644 parser-typechecker/tests/Unison/Test/Codebase/Sync12.hs create mode 100644 parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs diff --git a/parser-typechecker/tests/Suite.hs b/parser-typechecker/tests/Suite.hs index 3a984f1b40..8b46cbe734 100644 --- a/parser-typechecker/tests/Suite.hs +++ b/parser-typechecker/tests/Suite.hs @@ -39,6 +39,7 @@ import qualified Unison.Test.MCode as MCode import qualified Unison.Test.VersionParser as VersionParser import qualified Unison.Test.Git as Git import qualified Unison.Test.GitSimple as GitSimple +import qualified Unison.Test.Codebase.Upgrade12 as Upgrade12 test :: Bool -> Test () test rt = tests @@ -69,6 +70,7 @@ test rt = tests , UriParser.test , Context.test -- , Git.test + , Upgrade12.test , GitSimple.test , TestIO.test , Name.test diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Sync12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Sync12.hs deleted file mode 100644 index 72f7fb1c11..0000000000 --- a/parser-typechecker/tests/Unison/Test/Codebase/Sync12.hs +++ /dev/null @@ -1,86 +0,0 @@ -{-# LANGUAGE TemplateHaskell #-} -{-# Language QuasiQuotes #-} -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Test.Codebase.Sync12 where - --- import Control.Error (minimumMay) --- import Control.Lens (view, _1) --- import qualified Data.Char as Char --- import Data.Maybe (fromMaybe) --- import EasyTest (Test, scope, tests, io) --- import qualified System.IO.Temp as Temp --- import Unison.Test.Ucm (CodebaseFormat, Runtime) --- import qualified Data.Text as Text --- import qualified U.Util.Text as Text --- import Data.String.Here.Interpolated (iTrim) --- import qualified Unison.Test.Ucm as Ucm --- import Shellmet () --- import System.FilePath (()) --- import UnliftIO (MonadIO (liftIO)) - - --- -- test = scope "Sync" $ tests [typeAlias, topLevelTerm, subNamespace, accessPatch, history] - --- typeAlias :: Test () --- typeAlias = makeTest "typeAlias" do --- tmp <- Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "typeAlias" --- c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 tmp --- runTranscript c1 [iTrim| --- ```ucm --- .> alias.type ##Nat builtin.Nat --- ``` --- |] --- c2 <- runConversion12 c --- runTranscript c2 $ Text.stripMargin [iTrim| --- ```unison --- x :: Nat --- x = 3 --- ``` --- |] - --- topLevelTerm :: Test () --- topLevelTerm = makeTest "topLevelTerm" do --- tmp <- Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "typeAlias" --- c1 <- initV1Codebase tmp --- runTranscript c1 [iTrim| --- ```unison --- y = 3 --- ``` --- ```ucm --- .> add --- ``` --- |] --- runTranscript c2 [iTrim| --- ```ucm --- .> find --- ``` --- ```unison --- > y --- ``` --- |] - --- subNamespace :: Test () --- subNamespace = makeTest "subNamespace" do --- tmp <- Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "subNamespace" --- runTranscript tmp "src" $ Text.stripMargin [iTrim| --- ```ucm --- .> alias.type ##Nat builtin.Nat --- ``` --- ```unison --- type myLib.X = X Nat --- ``` --- ```ucm --- .> push ${repo} myLib --- ``` --- |] --- runTranscript tmp "dest" $ Text.stripMargin [iTrim| --- ```ucm --- .> pull ${repo} yourLib --- .> find --- ``` --- ```unison --- > X 3 --- ``` --- |] - diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs new file mode 100644 index 0000000000..d9cfc7f043 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs @@ -0,0 +1,133 @@ +{-# LANGUAGE TemplateHaskell #-} +{-# Language QuasiQuotes #-} +{-# LANGUAGE OverloadedStrings #-} + +module Unison.Test.Codebase.Upgrade12 (test) where + +import Data.String.Here.Interpolated (iTrim) +import EasyTest (Test, scope, tests) +import Shellmet () +import qualified Unison.Test.Ucm as Ucm + +test :: Test () +test = scope "codebase.upgrade12" $ + tests [typeAlias, topLevelTerm, subNamespace, accessPatch, accessHistory] + +typeAlias :: Test () +typeAlias = scope "typeAlias" do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| + ```ucm + .> alias.type ##Nat builtin.Nat + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| + ```unison + x :: Nat + x = 3 + ``` + |] + pure () + +topLevelTerm :: Test () +topLevelTerm = scope "topLevelTerm" do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| + ```unison + y = 3 + ``` + ```ucm + .> add + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| + ```ucm + .> find + ``` + ```unison + > y + ``` + |] + pure () + +subNamespace :: Test () +subNamespace = scope "subNamespace" do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| + ```unison + unique type a.b.C = C Nat + ``` + ```ucm + .> add + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| + ```ucm + .> find + ``` + ```unison + > a.b.C 3 + ``` + |] + pure () + +accessPatch :: Test () +accessPatch = scope "accessPatch" do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| + ```unison + unique type A = A Nat + foo = A.A 3 + ``` + ```ucm + .> add + ``` + ```unison + unique type A = A Nat Nat + foo = A.A 3 3 + ``` + ```ucm + .> update + ``` + ```ucm + .> view.patch patch + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| + ```ucm + .> view.patch patch + ``` + |] + pure () + +accessHistory :: Test () +accessHistory = scope "history" do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| + ```unison + foo = 3 + ``` + ```ucm + .> add + ``` + ```unison + foo = 4 + ``` + ```ucm + .> update + .> history + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| + ```ucm + .> history + .> reset-root #ls8 + .> history + ``` + |] + pure () diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs index ef7b3cae25..884f6c58db 100644 --- a/parser-typechecker/tests/Unison/Test/Ucm.hs +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -2,94 +2,82 @@ {-# LANGUAGE DerivingVia #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} -module Unison.Test.Ucm where +module Unison.Test.Ucm + ( initCodebase, + runTranscript, + upgradeCodebase, + CodebaseFormat (..), + Runtime (..), + ) +where import Control.Monad.Catch (MonadCatch) -import Control.Monad.State (MonadState, StateT) -import qualified Control.Monad.State as State -import Control.Monad.Writer (MonadWriter, WriterT) -import qualified Control.Monad.Writer as Writer -import qualified Data.Sequence as Seq import qualified Data.Text as Text -import EasyTest import System.FilePath (()) import qualified System.IO.Temp as Temp import U.Util.Text (stripMargin) import Unison.Codebase (CodebasePath) +import qualified Unison.Codebase.Conversion.Upgrade12 as Upgrade12 import qualified Unison.Codebase.FileCodebase as FC import qualified Unison.Codebase.Init as Codebase.Init import qualified Unison.Codebase.SqliteCodebase as SC import qualified Unison.Codebase.TranscriptParser as TR import Unison.Prelude import qualified Unison.Util.Pretty as P -import UnliftIO (MonadUnliftIO) data Runtime = Runtime1 | Runtime2 -data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 deriving Show +data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 deriving (Show) -data Codebase = Codebase CodebasePath CodebaseFormat deriving Show +data Codebase = Codebase CodebasePath CodebaseFormat deriving (Show) newtype Transcript = Transcript {unTranscript :: Text} deriving (IsString) via Text type TranscriptOutput = String -type Cleanup m = Seq (m ()) - -initCodebase :: (MonadIO m, MonadCatch m, MonadWriter (Cleanup m) m) => CodebaseFormat -> m Codebase +initCodebase :: (MonadIO m, MonadCatch m) => CodebaseFormat -> m Codebase initCodebase fmt = do let cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init tmp <- liftIO $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory ("ucm-test") - void $ - Codebase.Init.createCodebase cbInit tmp >>= \case - Left e -> error $ P.toANSI 80 e - Right (cleanup, c) -> do - Writer.tell . Seq.singleton $ cleanup - pure c + Codebase.Init.createCodebase cbInit tmp >>= \case + Left e -> error $ P.toANSI 80 e + Right {} -> pure () pure $ Codebase tmp fmt -upgradeCodebase :: MonadIO m => Codebase -> m Codebase +upgradeCodebase :: (MonadIO m, MonadCatch m) => Codebase -> m Codebase upgradeCodebase = \case c@(Codebase _ CodebaseFormat2) -> error $ show c ++ " already in V2 format." - Codebase path CodebaseFormat1 -> undefined + Codebase path CodebaseFormat1 -> do + Upgrade12.upgradeCodebase path + pure $ Codebase path CodebaseFormat2 --- type UcmTest m = (MonadIO m, MonadWriter (Cleanup m) m, MonadState Args m) --- runTranscript :: --- UcmTest m => --- FilePath -> --- CodebaseName -> --- CodebaseFormat -> --- Runtime -> --- (Args -> Transcript) -> --- m TranscriptOutput --- runTranscript tmpDir codebaseName fmt rt mkTranscript = do --- let configFile = tmpDir ".unisonConfig" --- codebasePath = tmpDir unCodebaseName codebaseName --- let err err = error $ "Parse error: \n" <> show err --- cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init --- codebase <- --- (Codebase.Init.createCodebase cbInit codebasePath) >>= \case --- Left e -> error $ P.toANSI 80 e --- Right (cleanup, c) -> do --- Writer.tell . Seq.singleton $ cleanup --- pure c --- -- parse and run the transcript --- args <- State.get --- flip (either err) (TR.parse "transcript" (stripMargin . unTranscript $ mkTranscript args)) $ \stanzas -> --- liftIO . fmap Text.unpack $ --- TR.run --- (case rt of Runtime1 -> Just False; Runtime2 -> Just True) --- codebasePath --- configFile --- stanzas --- codebase - --- runTests :: UcmTest m => m a -> Test a --- runTests a = do --- ((result, cleanup), _args) <- flip State.runStateT mempty $ Writer.runWriterT a --- sequence_ cleanup --- pure result --- pure result +runTranscript :: (Monad m, MonadIO m, MonadCatch m) => Codebase -> Runtime -> Transcript -> m TranscriptOutput +runTranscript (Codebase codebasePath fmt) rt transcript = do + -- this configFile ought to be optional + configFile <- do + tmpDir <- + liftIO $ + Temp.getCanonicalTemporaryDirectory + >>= flip Temp.createTempDirectory ("ucm-test") + pure $ tmpDir ".unisonConfig" + let err err = error $ "Parse error: \n" <> show err + cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init + (cleanup, codebase) <- + liftIO (Codebase.Init.createCodebase cbInit codebasePath) >>= \case + Left e -> error $ P.toANSI 80 e + Right x -> pure x + -- parse and run the transcript + output <- liftIO $ + flip (either err) (TR.parse "transcript" (stripMargin $ unTranscript transcript)) $ \stanzas -> + fmap Text.unpack $ + TR.run + (case rt of Runtime1 -> Just False; Runtime2 -> Just True) + codebasePath + configFile + stanzas + codebase + liftIO cleanup + pure output diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index f85bc7b696..8007e86d39 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -4,7 +4,7 @@ cabal-version: 1.12 -- -- see: https://github.com/sol/hpack -- --- hash: cd5dca2b2c73900fb0dce9cf2cb025b7d8e83bfb8017cb867865652b570da44c +-- hash: 61c993e5093ad077fa54b0671d9b39139c1473e3e0434a0ac452cc4076470523 name: unison-parser-typechecker version: 0.0.0 @@ -295,7 +295,7 @@ executable tests Unison.Test.Codebase.Causal Unison.Test.Codebase.FileCodebase Unison.Test.Codebase.Path - Unison.Test.Codebase.Sync12 + Unison.Test.Codebase.Upgrade12 Unison.Test.ColorText Unison.Test.Common Unison.Test.DataDeclaration diff --git a/yaks/easytest/easytest.cabal b/yaks/easytest/easytest.cabal index a20f20e5f4..5fd675ce9d 100644 --- a/yaks/easytest/easytest.cabal +++ b/yaks/easytest/easytest.cabal @@ -58,6 +58,7 @@ library build-depends: async >= 2.1.1, base >= 4.3, + exceptions, mtl >= 2.0.1, containers >= 0.4.0, stm >= 2.4, diff --git a/yaks/easytest/src/EasyTest.hs b/yaks/easytest/src/EasyTest.hs index 6f7e8793f8..a3cad2023a 100644 --- a/yaks/easytest/src/EasyTest.hs +++ b/yaks/easytest/src/EasyTest.hs @@ -9,6 +9,8 @@ import Control.Concurrent import Control.Concurrent.STM import Control.Exception import Control.Monad +import Control.Monad.Catch (MonadCatch, MonadThrow(throwM)) +import qualified Control.Monad.Catch as Catch import Control.Monad.IO.Class import Control.Monad.Reader import Data.List @@ -404,6 +406,13 @@ instance Monad Test where instance MonadFail Test where fail = crash +instance MonadThrow Test where + throwM = Test . throwM + +instance MonadCatch Test where + catch (Test m) f = + Test $ Catch.catch m (\e -> case f e of Test m' -> m') + instance Functor Test where fmap = liftM From e76df1d7a2bb0ff5a94591cf121385b9b2049504 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 8 Apr 2021 01:01:33 -0600 Subject: [PATCH 154/225] fix 3 upgrade12 bugs --- .../src/Unison/Codebase/Conversion/Sync12.hs | 16 +- .../Unison/Codebase/Conversion/Upgrade12.hs | 6 + .../tests/Unison/Test/Codebase/Upgrade12.hs | 202 +++++++++--------- parser-typechecker/tests/Unison/Test/Ucm.hs | 33 ++- 4 files changed, 135 insertions(+), 122 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 7da9319a4e..868658f60b 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -65,6 +65,7 @@ import qualified Unison.Type as Type import Unison.Util.Relation (Relation) import qualified Unison.Util.Relation as Relation import Unison.Util.Star3 (Star3 (Star3)) +import Data.Functor (($>)) data Env m a = Env { srcCodebase :: Codebase m Symbol a, @@ -138,9 +139,9 @@ trySync :: trySync t _gc e = do Env _ dest _ <- Reader.ask case e of - C h mc -> - (t $ Codebase.branchExists dest h) >>= \case - True -> pure Sync.PreviouslyDone + C h mc -> do + t (Codebase.branchExists dest h) >>= \case + True -> setBranchStatus h BranchOk $> Sync.PreviouslyDone False -> do c <- t mc runValidateT @_ @n (repairBranch c) >>= \case @@ -370,16 +371,16 @@ filterBranchTermStar :: (S m n, V m n) => Metadata.Star Referent NameSegment -> filterBranchTermStar (Star3 _refs names _mdType md) = do names' <- filterTermNames names let refs' = Relation.dom names' - let mdType' = error "Can I get away with not populating the mdType column?" mdTypeValues' <- filterMetadata $ Relation.restrictDom refs' md + let mdType' = Relation.mapRan fst mdTypeValues' pure $ Star3 refs' names' mdType' mdTypeValues' filterBranchTypeStar :: (S m n, V m n) => Metadata.Star Reference.Reference NameSegment -> n (Metadata.Star Reference.Reference NameSegment) filterBranchTypeStar (Star3 _refs names _mdType md) = do names' <- filterTypeNames names let refs' = Relation.dom names' - let mdType' = error "Can I get away with not populating the mdType column?" mdTypeValues' <- filterMetadata $ Relation.restrictDom refs' md + let mdType' = Relation.mapRan fst mdTypeValues' pure $ Star3 refs' names mdType' mdTypeValues' filterMetadata :: (S m n, V m n, Ord r) => Relation r (Metadata.Type, Metadata.Value) -> n (Relation r (Metadata.Type, Metadata.Value)) @@ -568,7 +569,7 @@ instance Ord (Entity m) where data BranchStatus' = BranchOk' | BranchReplaced' Branch.Hash - deriving (Eq, Ord) + deriving (Eq, Ord, Show) toBranchStatus' :: BranchStatus m -> BranchStatus' toBranchStatus' = \case @@ -580,3 +581,6 @@ instance Eq (BranchStatus m) where instance Ord (BranchStatus m) where x `compare` y = toBranchStatus' x `compare` toBranchStatus' y + +instance Show (BranchStatus m) where + show = show . toBranchStatus' \ No newline at end of file diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs index 858d857af1..2a27778709 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs @@ -29,6 +29,8 @@ upgradeCodebase root = do either (liftIO . CT.putPrettyLn) pure =<< runExceptT do (cleanupSrc, srcCB) <- ExceptT $ Codebase.openCodebase FC.init root (cleanupDest, destCB) <- ExceptT $ Codebase.createCodebase SC.init root + -- todo: not have to propagate this stuff, because ucm knows about it intrinsically? + lift $ Codebase.installUcmDependencies destCB destDB <- SC.unsafeGetConnection root let env = Sync12.Env srcCB destCB destDB let initialState = (Sync12.emptyDoneCount, Sync12.emptyErrorCount, Sync12.emptyStatus) @@ -42,6 +44,10 @@ upgradeCodebase root = do (Sync.transformSync (lensStateT Lens._3) sync) Sync12.simpleProgress [rootEntity] + case rootEntity of + Sync12.C _h mc -> lift $ Codebase.putRootBranch destCB =<< Branch <$> mc + _ -> error "The root wasn't a causal?" + lift cleanupSrc lift cleanupDest pure () diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs index d9cfc7f043..d872d9a69d 100644 --- a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs +++ b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs @@ -5,9 +5,10 @@ module Unison.Test.Codebase.Upgrade12 (test) where import Data.String.Here.Interpolated (iTrim) -import EasyTest (Test, scope, tests) +import EasyTest (Test, scope, tests, io, ok) import Shellmet () import qualified Unison.Test.Ucm as Ucm +import Data.Functor (void) test :: Test () test = scope "codebase.upgrade12" $ @@ -15,119 +16,124 @@ test = scope "codebase.upgrade12" $ typeAlias :: Test () typeAlias = scope "typeAlias" do - c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| - ```ucm - .> alias.type ##Nat builtin.Nat + void $ io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| + ```ucm + .> alias.type ##Nat builtin.Nat + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| + ```unison + x :: Nat + x = 3 ``` |] - c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| - ```unison - x :: Nat - x = 3 - ``` - |] - pure () + ok topLevelTerm :: Test () -topLevelTerm = scope "topLevelTerm" do - c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| - ```unison - y = 3 - ``` +topLevelTerm = scope "topLevelTerm" $ do + void $ io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| + ```unison + y = 3 + ``` + ```ucm + .> add + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| ```ucm - .> add + .> find + ``` + ```unison + > y ``` |] - c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| - ```ucm - .> find - ``` - ```unison - > y - ``` - |] - pure () + ok subNamespace :: Test () subNamespace = scope "subNamespace" do - c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| - ```unison - unique type a.b.C = C Nat - ``` - ```ucm - .> add - ``` + void $ io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| + ```unison + unique type a.b.C = C Nat + ``` + ```ucm + .> add + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| + ```ucm + .> find + ``` + ```unison + > a.b.C 3 + ``` |] - c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| - ```ucm - .> find - ``` - ```unison - > a.b.C 3 - ``` - |] - pure () + ok accessPatch :: Test () accessPatch = scope "accessPatch" do - c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| - ```unison - unique type A = A Nat - foo = A.A 3 - ``` - ```ucm - .> add - ``` - ```unison - unique type A = A Nat Nat - foo = A.A 3 3 - ``` - ```ucm - .> update - ``` - ```ucm - .> view.patch patch - ``` + void $ io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| + ```unison + unique type A = A Nat + foo = A.A 3 + ``` + ```ucm + .> add + ``` + ```unison + unique type A = A Nat Nat + foo = A.A 3 3 + ``` + ```ucm + .> update + ``` + ```ucm + .> view.patch patch + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| + ```ucm + .> view.patch patch + ``` |] - c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| - ```ucm - .> view.patch patch - ``` - |] - pure () + ok accessHistory :: Test () accessHistory = scope "history" do - c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| - ```unison - foo = 3 - ``` - ```ucm - .> add - ``` - ```unison - foo = 4 - ``` - ```ucm - .> update - .> history - ``` + void $ io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| + ```unison + foo = 3 + ``` + ```ucm + .> add + ``` + ```unison + foo = 4 + ``` + ```ucm + .> update + .> history + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| + ```ucm + .> history + .> reset-root #ls8 + .> history + ``` |] - c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| - ```ucm - .> history - .> reset-root #ls8 - .> history - ``` - |] - pure () + ok \ No newline at end of file diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs index 884f6c58db..f29f21421d 100644 --- a/parser-typechecker/tests/Unison/Test/Ucm.hs +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -11,7 +11,6 @@ module Unison.Test.Ucm ) where -import Control.Monad.Catch (MonadCatch) import qualified Data.Text as Text import System.FilePath (()) import qualified System.IO.Temp as Temp @@ -35,42 +34,40 @@ newtype Transcript = Transcript {unTranscript :: Text} deriving (IsString) via T type TranscriptOutput = String -initCodebase :: (MonadIO m, MonadCatch m) => CodebaseFormat -> m Codebase +initCodebase :: CodebaseFormat -> IO Codebase initCodebase fmt = do let cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init tmp <- - liftIO $ - Temp.getCanonicalTemporaryDirectory - >>= flip Temp.createTempDirectory ("ucm-test") + Temp.getCanonicalTemporaryDirectory + >>= flip Temp.createTempDirectory ("ucm-test") Codebase.Init.createCodebase cbInit tmp >>= \case - Left e -> error $ P.toANSI 80 e + Left e -> fail $ P.toANSI 80 e Right {} -> pure () pure $ Codebase tmp fmt -upgradeCodebase :: (MonadIO m, MonadCatch m) => Codebase -> m Codebase +upgradeCodebase :: Codebase -> IO Codebase upgradeCodebase = \case - c@(Codebase _ CodebaseFormat2) -> error $ show c ++ " already in V2 format." + c@(Codebase _ CodebaseFormat2) -> fail $ show c ++ " already in V2 format." Codebase path CodebaseFormat1 -> do Upgrade12.upgradeCodebase path pure $ Codebase path CodebaseFormat2 -runTranscript :: (Monad m, MonadIO m, MonadCatch m) => Codebase -> Runtime -> Transcript -> m TranscriptOutput +runTranscript :: Codebase -> Runtime -> Transcript -> IO TranscriptOutput runTranscript (Codebase codebasePath fmt) rt transcript = do -- this configFile ought to be optional configFile <- do tmpDir <- - liftIO $ - Temp.getCanonicalTemporaryDirectory - >>= flip Temp.createTempDirectory ("ucm-test") + Temp.getCanonicalTemporaryDirectory + >>= flip Temp.createTempDirectory ("ucm-test") pure $ tmpDir ".unisonConfig" - let err err = error $ "Parse error: \n" <> show err + let err err = fail $ "Parse error: \n" <> show err cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init - (cleanup, codebase) <- - liftIO (Codebase.Init.createCodebase cbInit codebasePath) >>= \case - Left e -> error $ P.toANSI 80 e + (closeCodebase, codebase) <- + Codebase.Init.openCodebase cbInit codebasePath >>= \case + Left e -> fail $ P.toANSI 80 e Right x -> pure x -- parse and run the transcript - output <- liftIO $ + output <- flip (either err) (TR.parse "transcript" (stripMargin $ unTranscript transcript)) $ \stanzas -> fmap Text.unpack $ TR.run @@ -79,5 +76,5 @@ runTranscript (Codebase codebasePath fmt) rt transcript = do configFile stanzas codebase - liftIO cleanup + closeCodebase pure output From f982d2bb79aeb9eddf60bab418d16b744c79d8aa Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 8 Apr 2021 02:55:37 -0600 Subject: [PATCH 155/225] fix 3 of 5 upgrade tests; stripMargin still wonky --- codebase2/util/U/Util/Text.hs | 29 +++- .../src/Unison/Codebase/Conversion/Sync12.hs | 2 +- .../tests/Unison/Test/Codebase/Upgrade12.hs | 140 +++++++++--------- parser-typechecker/tests/Unison/Test/Ucm.hs | 7 +- 4 files changed, 103 insertions(+), 75 deletions(-) diff --git a/codebase2/util/U/Util/Text.hs b/codebase2/util/U/Util/Text.hs index 9de6621aba..d8ad044c18 100644 --- a/codebase2/util/U/Util/Text.hs +++ b/codebase2/util/U/Util/Text.hs @@ -1,9 +1,13 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE QuasiQuotes #-} + module U.Util.Text where import Data.Maybe (fromMaybe) import Data.Text (Text) import qualified Data.Text as Text import Safe.Foldable (minimumMay) +import Debug.Trace (trace, traceShowId) -- | remove however many spaces prefix all of the lines of the input -- e.g. @@ -17,7 +21,26 @@ import Safe.Foldable (minimumMay) stripMargin :: Text -> Text stripMargin str = let stripLen = - fromMaybe 0 . minimumMay - . map (Text.length . fst . Text.span (== ' ')) + Data.Maybe.fromMaybe 0 . minimumMay + . map (Text.length . fst) + . filter (not . Text.null . snd) + . map (Text.span (== ' ')) + . filter (not . Text.null) $ Text.lines str - in Text.unlines . map (Text.drop stripLen) $ Text.lines str + in Text.unlines . traceShowId. map (Text.drop $ traceShowId stripLen) $ traceShowId $ Text.lines str + +test :: Bool +test = + stripMargin x + == y + +x' :: Text +x' = stripMargin x +x :: Text +x = " def foo:" <> "\n" <> + " blah blah" <> "\n" <> + " " + +y :: Text +y = "def foo:" <> "\n" <> + " blah blah" <> "\n" \ No newline at end of file diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 868658f60b..3300c14100 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -486,7 +486,7 @@ simpleProgress = Sync.Progress need done error allDone where -- ignore need need e = liftIO $ putStrLn $ "need " ++ show e - need _ = pure () + -- need _ = pure () done e = do liftIO $ putStrLn $ "done " ++ show e diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs index d872d9a69d..b3c659c095 100644 --- a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs +++ b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs @@ -19,16 +19,15 @@ typeAlias = scope "typeAlias" do void $ io do c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| - ```ucm - .> alias.type ##Nat builtin.Nat - ``` +```ucm +.> alias.type ##Nat builtin.Nat +``` |] c2 <- Ucm.upgradeCodebase c1 Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| - ```unison - x :: Nat - x = 3 - ``` +```unison +x = 3 +``` |] ok @@ -37,21 +36,21 @@ topLevelTerm = scope "topLevelTerm" $ do void $ io do c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| - ```unison - y = 3 - ``` - ```ucm - .> add - ``` +```unison +y = 3 +``` +```ucm +.> add +``` |] c2 <- Ucm.upgradeCodebase c1 Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| - ```ucm - .> find - ``` - ```unison - > y - ``` +```ucm +.> find +``` +```unison +> y +``` |] ok @@ -60,21 +59,24 @@ subNamespace = scope "subNamespace" do void $ io do c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| - ```unison - unique type a.b.C = C Nat - ``` - ```ucm - .> add - ``` +```ucm +.> alias.type ##Nat builtin.Nat +``` +```unison +unique type a.b.C = C Nat +``` +```ucm +.> add +``` |] c2 <- Ucm.upgradeCodebase c1 Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| - ```ucm - .> find - ``` - ```unison - > a.b.C 3 - ``` +```ucm +.> find +``` +```unison +> a.b.C 3 +``` |] ok @@ -83,29 +85,29 @@ accessPatch = scope "accessPatch" do void $ io do c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| - ```unison - unique type A = A Nat - foo = A.A 3 - ``` - ```ucm - .> add - ``` - ```unison - unique type A = A Nat Nat - foo = A.A 3 3 - ``` - ```ucm - .> update - ``` - ```ucm - .> view.patch patch - ``` +```unison +unique type A = A Nat +foo = A.A 3 +``` +```ucm +.> add +``` +```unison +unique type A = A Nat Nat +foo = A.A 3 3 +``` +```ucm +.> update +``` +```ucm +.> view.patch patch +``` |] c2 <- Ucm.upgradeCodebase c1 Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| - ```ucm - .> view.patch patch - ``` +```ucm +.> view.patch patch +``` |] ok @@ -114,26 +116,26 @@ accessHistory = scope "history" do void $ io do c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| - ```unison - foo = 3 - ``` - ```ucm - .> add - ``` - ```unison - foo = 4 - ``` - ```ucm - .> update - .> history - ``` +```unison +foo = 3 +``` +```ucm +.> add +``` +```unison +foo = 4 +``` +```ucm +.> update +.> history +``` |] c2 <- Ucm.upgradeCodebase c1 Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| - ```ucm - .> history - .> reset-root #ls8 - .> history - ``` +```ucm +.> history +.> reset-root #ls8 +.> history +``` |] ok \ No newline at end of file diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs index f29f21421d..bbaf4c8853 100644 --- a/parser-typechecker/tests/Unison/Test/Ucm.hs +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -30,7 +30,9 @@ data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 deriving (Show) data Codebase = Codebase CodebasePath CodebaseFormat deriving (Show) -newtype Transcript = Transcript {unTranscript :: Text} deriving (IsString) via Text +newtype Transcript = Transcript {unTranscript :: Text} + deriving Show + deriving (IsString) via Text type TranscriptOutput = String @@ -54,6 +56,7 @@ upgradeCodebase = \case runTranscript :: Codebase -> Runtime -> Transcript -> IO TranscriptOutput runTranscript (Codebase codebasePath fmt) rt transcript = do + traceM $ show transcript -- this configFile ought to be optional configFile <- do tmpDir <- @@ -68,7 +71,7 @@ runTranscript (Codebase codebasePath fmt) rt transcript = do Right x -> pure x -- parse and run the transcript output <- - flip (either err) (TR.parse "transcript" (stripMargin $ unTranscript transcript)) $ \stanzas -> + flip (either err) (TR.parse "transcript" ({-stripMargin $-} unTranscript transcript)) $ \stanzas -> fmap Text.unpack $ TR.run (case rt of Runtime1 -> Just False; Runtime2 -> Just True) From 757d4ef9f7857a5f5bf3e1f59cda3ce91634222e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 8 Apr 2021 10:03:01 -0600 Subject: [PATCH 156/225] all 5 Upgrade12 tests passing --- codebase2/codebase-sync/U/Codebase/Sync.hs | 11 ++++- .../src/Unison/Codebase/Conversion/Sync12.hs | 41 +++++++++++++------ .../tests/Unison/Test/Codebase/Upgrade12.hs | 19 +++++++-- parser-typechecker/tests/Unison/Test/Ucm.hs | 7 +++- 4 files changed, 58 insertions(+), 20 deletions(-) diff --git a/codebase2/codebase-sync/U/Codebase/Sync.hs b/codebase2/codebase-sync/U/Codebase/Sync.hs index c1c8c7c442..f02386c4ec 100644 --- a/codebase2/codebase-sync/U/Codebase/Sync.hs +++ b/codebase2/codebase-sync/U/Codebase/Sync.hs @@ -7,7 +7,12 @@ {-# LANGUAGE TypeOperators #-} module U.Codebase.Sync where +import Control.Monad (when) import Data.Foldable (traverse_) +import Debug.Trace (traceM) + +debug :: Bool +debug = False data TrySyncResult h = Missing [h] | Done | PreviouslyDone | NonFatalError deriving Show @@ -27,11 +32,13 @@ data Progress m h = Progress transformProgress :: (forall a. m a -> n a) -> Progress m h -> Progress n h transformProgress f (Progress a b c d) = Progress (f . a) (f . b) (f . c) (f d) -sync :: forall m h. Monad m => Sync m h -> Progress m h -> [h] -> m () +-- the Show constraint is just for debugging +sync :: forall m h. (Monad m, Show h) => Sync m h -> Progress m h -> [h] -> m () sync Sync {..} Progress {..} roots = go roots where go :: [h] -> m () - go (h : hs) = + go (h : hs) = do + when debug (traceM $ "Sync.sync.go " ++ (show $ h : hs)) trySync h >>= \case Missing deps -> traverse_ need deps >> go (deps ++ h : hs) Done -> done h >> go hs diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 3300c14100..21e2a9c21f 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -26,6 +26,7 @@ import Control.Natural (type (~>)) import Data.Bifoldable (bitraverse_) import Data.Foldable (traverse_) import qualified Data.Foldable as Foldable +import Data.Functor (($>)) import qualified Data.List as List import Data.Map (Map) import qualified Data.Map as Map @@ -33,6 +34,7 @@ import Data.Maybe (catMaybes) import qualified Data.Set as Set import Data.Traversable (for) import Database.SQLite.Simple (Connection) +import Debug.Trace (traceM) import U.Codebase.Sqlite.DbId (Generation) import qualified U.Codebase.Sqlite.Queries as Q import U.Codebase.Sync (Sync (Sync), TrySyncResult) @@ -65,7 +67,9 @@ import qualified Unison.Type as Type import Unison.Util.Relation (Relation) import qualified Unison.Util.Relation as Relation import Unison.Util.Star3 (Star3 (Star3)) -import Data.Functor (($>)) + +debug :: Bool +debug = False data Env m a = Env { srcCodebase :: Codebase m Symbol a, @@ -98,16 +102,19 @@ data TermStatus | TermMissing | TermMissingType | TermMissingDependencies + deriving Show data DeclStatus = DeclOk | DeclMissing | DeclMissingDependencies + deriving Show data PatchStatus = PatchOk | PatchMissing | PatchReplaced Branch.EditHash + deriving Show data Status m = Status { _branchStatus :: Map Branch.Hash (BranchStatus m), @@ -208,16 +215,24 @@ getPatchStatus :: S m n => Hash -> n (Maybe PatchStatus) getPatchStatus h = use (patchStatus . at h) setTermStatus :: S m n => Hash -> TermStatus -> n () -setTermStatus h s = termStatus . at h .= Just s +setTermStatus h s = do + when debug (traceM $ "setTermStatus " ++ take 10 (show h) ++ " " ++ show s) + termStatus . at h .= Just s setDeclStatus :: S m n => Hash -> DeclStatus -> n () -setDeclStatus h s = declStatus . at h .= Just s +setDeclStatus h s = do + when debug (traceM $ "setDeclStatus " ++ take 10 (show h) ++ " " ++ show s) + declStatus . at h .= Just s setPatchStatus :: S m n => Hash -> PatchStatus -> n () -setPatchStatus h s = patchStatus . at h .= Just s +setPatchStatus h s = do + when debug (traceM $ "setPatchStatus " ++ take 10 (show h) ++ " " ++ show s) + patchStatus . at h .= Just s setBranchStatus :: forall m n. S m n => Branch.Hash -> BranchStatus m -> n () -setBranchStatus h s = branchStatus . at h .= Just s +setBranchStatus h s = do + when debug (traceM $ "setBranchStatus " ++ take 10 (show h) ++ " " ++ show s) + branchStatus . at h .= Just s checkTermComponent :: forall m n a. @@ -246,6 +261,7 @@ checkTermComponent t h n = do Nothing -> Validate.dispute . Set.singleton $ D h' n' checkTerm = \case Reference.Builtin {} -> pure () + Reference.DerivedId (Reference.Id h' _ _) | h == h' -> pure () Reference.DerivedId (Reference.Id h' _ n') -> getTermStatus h' >>= \case Just TermOk -> pure () @@ -272,6 +288,7 @@ checkDeclComponent t h n = do let deps = DD.declDependencies decl checkDecl = \case Reference.Builtin {} -> pure () + Reference.DerivedId (Reference.Id h' _ _) | h == h' -> pure () Reference.DerivedId (Reference.Id h' _ n') -> getDeclStatus h' >>= \case Just DeclOk -> pure () @@ -408,9 +425,9 @@ validateTypeReference :: (S m n, V m n) => Reference.Reference -> n Bool validateTypeReference = \case Reference.Builtin {} -> pure True Reference.DerivedId (Reference.Id h _i n) -> - getTermStatus h >>= \case - Nothing -> Validate.refute . Set.singleton $ T h n - Just TermOk -> pure True + getDeclStatus h >>= \case + Nothing -> Validate.refute . Set.singleton $ D h n + Just DeclOk -> pure True Just _ -> pure False filterTypeNames :: (S m n, V m n) => Relation Reference.Reference NameSegment -> n (Relation Reference.Reference NameSegment) @@ -531,10 +548,10 @@ simpleProgress = Sync.Progress need done error allDone printProgress = do (DoneCount b t d p, ErrorCount b' t' d' p', _) <- State.get let ways :: [Maybe String] = - [ Monoid.whenM (b > 0 || b' > 0) (Just $ show b ++ " branches" ++ Monoid.whenM (b' > 0) (" (" ++ show b' ++ "errors)")), - Monoid.whenM (t > 0 || t' > 0) (Just $ show t ++ " branches" ++ Monoid.whenM (t' > 0) (" (" ++ show t' ++ "errors)")), - Monoid.whenM (d > 0 || d' > 0) (Just $ show d ++ " branches" ++ Monoid.whenM (d' > 0) (" (" ++ show d' ++ "errors)")), - Monoid.whenM (p > 0 || p' > 0) (Just $ show p ++ " branches" ++ Monoid.whenM (p' > 0) (" (" ++ show p' ++ "errors)")) + [ Monoid.whenM (b > 0 || b' > 0) (Just $ show b ++ " branches" ++ Monoid.whenM (b' > 0) (" (" ++ show b' ++ " errors)")), + Monoid.whenM (t > 0 || t' > 0) (Just $ show t ++ " terms" ++ Monoid.whenM (t' > 0) (" (" ++ show t' ++ " errors)")), + Monoid.whenM (d > 0 || d' > 0) (Just $ show d ++ " types" ++ Monoid.whenM (d' > 0) (" (" ++ show d' ++ " errors)")), + Monoid.whenM (p > 0 || p' > 0) (Just $ show p ++ " patches" ++ Monoid.whenM (p' > 0) (" (" ++ show p' ++ " errors)")) ] liftIO . putStr $ "\rSynced " ++ List.intercalate "," (catMaybes ways) diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs index b3c659c095..3f9b36ac95 100644 --- a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs +++ b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs @@ -26,6 +26,7 @@ typeAlias = scope "typeAlias" do c2 <- Ucm.upgradeCodebase c1 Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| ```unison +x : Nat x = 3 ``` |] @@ -36,7 +37,7 @@ topLevelTerm = scope "topLevelTerm" $ do void $ io do c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| -```unison +```unison:hide y = 3 ``` ```ucm @@ -75,7 +76,7 @@ unique type a.b.C = C Nat .> find ``` ```unison -> a.b.C 3 +> a.b.C.C 3 ``` |] ok @@ -85,18 +86,23 @@ accessPatch = scope "accessPatch" do void $ io do c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| -```unison +```ucm +.> alias.type ##Nat builtin.Nat +``` +```unison:hide unique type A = A Nat foo = A.A 3 ``` ```ucm +.> debug.file .> add ``` -```unison +```unison:hide unique type A = A Nat Nat foo = A.A 3 3 ``` ```ucm +.> debug.file .> update ``` ```ucm @@ -111,6 +117,11 @@ foo = A.A 3 3 |] ok +-- #00k3c9bp6m A +-- #6v94dtbfk1 foo +-- #d3bn4dqp1a A' +-- #p3a21bjjl4 foo' + accessHistory :: Test () accessHistory = scope "history" do void $ io do diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs index bbaf4c8853..9081029ef1 100644 --- a/parser-typechecker/tests/Unison/Test/Ucm.hs +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -11,17 +11,17 @@ module Unison.Test.Ucm ) where +import Control.Monad (when) import qualified Data.Text as Text import System.FilePath (()) import qualified System.IO.Temp as Temp -import U.Util.Text (stripMargin) import Unison.Codebase (CodebasePath) import qualified Unison.Codebase.Conversion.Upgrade12 as Upgrade12 import qualified Unison.Codebase.FileCodebase as FC import qualified Unison.Codebase.Init as Codebase.Init import qualified Unison.Codebase.SqliteCodebase as SC import qualified Unison.Codebase.TranscriptParser as TR -import Unison.Prelude +import Unison.Prelude (IsString, Text, traceM) import qualified Unison.Util.Pretty as P data Runtime = Runtime1 | Runtime2 @@ -35,6 +35,8 @@ newtype Transcript = Transcript {unTranscript :: Text} deriving (IsString) via Text type TranscriptOutput = String +debugTranscriptOutput :: Bool +debugTranscriptOutput = False initCodebase :: CodebaseFormat -> IO Codebase initCodebase fmt = do @@ -80,4 +82,5 @@ runTranscript (Codebase codebasePath fmt) rt transcript = do stanzas codebase closeCodebase + when debugTranscriptOutput $ traceM output pure output From 56ce6a4ce8ae2ebc21cc8be2fa013dd8729a9e63 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 8 Apr 2021 11:34:23 -0600 Subject: [PATCH 157/225] fixed up Text.stripMargin --- codebase2/util/U/Util/String.hs | 11 +- codebase2/util/U/Util/Text.hs | 36 ++-- parser-typechecker/tests/Suite.hs | 2 +- .../tests/Unison/Test/Codebase/Upgrade12.hs | 178 +++++++++--------- parser-typechecker/tests/Unison/Test/Git.hs | 93 ++++----- parser-typechecker/tests/Unison/Test/Ucm.hs | 8 +- 6 files changed, 158 insertions(+), 170 deletions(-) diff --git a/codebase2/util/U/Util/String.hs b/codebase2/util/U/Util/String.hs index 83b225c3cc..81ccf74066 100644 --- a/codebase2/util/U/Util/String.hs +++ b/codebase2/util/U/Util/String.hs @@ -1,12 +1,7 @@ module U.Util.String where -import Data.Maybe (fromMaybe) -import Safe.Foldable (minimumMay) +import qualified Data.Text as Text +import qualified U.Util.Text as Text stripMargin :: String -> String -stripMargin str = - let stripLen = - fromMaybe 0 . minimumMay - . map (length . fst . span (== ' ')) - $ lines str - in unlines . map (drop stripLen) $ lines str \ No newline at end of file +stripMargin = Text.unpack . Text.stripMargin . Text.pack \ No newline at end of file diff --git a/codebase2/util/U/Util/Text.hs b/codebase2/util/U/Util/Text.hs index d8ad044c18..2a3233095a 100644 --- a/codebase2/util/U/Util/Text.hs +++ b/codebase2/util/U/Util/Text.hs @@ -1,13 +1,15 @@ +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE QuasiQuotes #-} module U.Util.Text where +import qualified Data.Char as Char import Data.Maybe (fromMaybe) import Data.Text (Text) import qualified Data.Text as Text import Safe.Foldable (minimumMay) -import Debug.Trace (trace, traceShowId) -- | remove however many spaces prefix all of the lines of the input -- e.g. @@ -22,25 +24,15 @@ stripMargin :: Text -> Text stripMargin str = let stripLen = Data.Maybe.fromMaybe 0 . minimumMay - . map (Text.length . fst) - . filter (not . Text.null . snd) - . map (Text.span (== ' ')) - . filter (not . Text.null) + . map (Text.length . fst . Text.span (== ' ')) + . filter (not . Text.all (Char.isSpace)) $ Text.lines str - in Text.unlines . traceShowId. map (Text.drop $ traceShowId stripLen) $ traceShowId $ Text.lines str - -test :: Bool -test = - stripMargin x - == y - -x' :: Text -x' = stripMargin x -x :: Text -x = " def foo:" <> "\n" <> - " blah blah" <> "\n" <> - " " - -y :: Text -y = "def foo:" <> "\n" <> - " blah blah" <> "\n" \ No newline at end of file + dropFirstIf f = \case + h : t | f h -> t + x -> x + dropLastIf f = reverse . dropFirstIf f . reverse + in Text.unlines + . dropLastIf Text.null + . dropFirstIf Text.null + . map (Text.drop stripLen) + $ Text.lines str diff --git a/parser-typechecker/tests/Suite.hs b/parser-typechecker/tests/Suite.hs index 8b46cbe734..a9104a3fa2 100644 --- a/parser-typechecker/tests/Suite.hs +++ b/parser-typechecker/tests/Suite.hs @@ -69,7 +69,7 @@ test rt = tests , Typechecker.test , UriParser.test , Context.test - -- , Git.test + , Git.test , Upgrade12.test , GitSimple.test , TestIO.test diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs index 3f9b36ac95..08f99f0ca1 100644 --- a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs +++ b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs @@ -4,7 +4,7 @@ module Unison.Test.Codebase.Upgrade12 (test) where -import Data.String.Here.Interpolated (iTrim) +import Data.String.Here.Interpolated (i) import EasyTest (Test, scope, tests, io, ok) import Shellmet () import qualified Unison.Test.Ucm as Ucm @@ -18,17 +18,17 @@ typeAlias :: Test () typeAlias = scope "typeAlias" do void $ io do c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| -```ucm -.> alias.type ##Nat builtin.Nat -``` + Ucm.runTranscript c1 Ucm.Runtime1 [i| + ```ucm + .> alias.type ##Nat builtin.Nat + ``` |] c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| -```unison -x : Nat -x = 3 -``` + Ucm.runTranscript c2 Ucm.Runtime1 [i| + ```unison + x : Nat + x = 3 + ``` |] ok @@ -36,22 +36,22 @@ topLevelTerm :: Test () topLevelTerm = scope "topLevelTerm" $ do void $ io do c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| -```unison:hide -y = 3 -``` -```ucm -.> add -``` + Ucm.runTranscript c1 Ucm.Runtime1 [i| + ```unison:hide + y = 3 + ``` + ```ucm + .> add + ``` |] c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| -```ucm -.> find -``` -```unison -> y -``` + Ucm.runTranscript c2 Ucm.Runtime1 [i| + ```ucm + .> find + ``` + ```unison + > y + ``` |] ok @@ -59,25 +59,25 @@ subNamespace :: Test () subNamespace = scope "subNamespace" do void $ io do c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| -```ucm -.> alias.type ##Nat builtin.Nat -``` -```unison -unique type a.b.C = C Nat -``` -```ucm -.> add -``` + Ucm.runTranscript c1 Ucm.Runtime1 [i| + ```ucm + .> alias.type ##Nat builtin.Nat + ``` + ```unison + unique type a.b.C = C Nat + ``` + ```ucm + .> add + ``` |] c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| -```ucm -.> find -``` -```unison -> a.b.C.C 3 -``` + Ucm.runTranscript c2 Ucm.Runtime1 [i| + ```ucm + .> find + ``` + ```unison + > a.b.C.C 3 + ``` |] ok @@ -85,35 +85,35 @@ accessPatch :: Test () accessPatch = scope "accessPatch" do void $ io do c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| -```ucm -.> alias.type ##Nat builtin.Nat -``` -```unison:hide -unique type A = A Nat -foo = A.A 3 -``` -```ucm -.> debug.file -.> add -``` -```unison:hide -unique type A = A Nat Nat -foo = A.A 3 3 -``` -```ucm -.> debug.file -.> update -``` -```ucm -.> view.patch patch -``` + Ucm.runTranscript c1 Ucm.Runtime1 [i| + ```ucm + .> alias.type ##Nat builtin.Nat + ``` + ```unison:hide + unique type A = A Nat + foo = A.A 3 + ``` + ```ucm + .> debug.file + .> add + ``` + ```unison:hide + unique type A = A Nat Nat + foo = A.A 3 3 + ``` + ```ucm + .> debug.file + .> update + ``` + ```ucm + .> view.patch patch + ``` |] c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| -```ucm -.> view.patch patch -``` + Ucm.runTranscript c2 Ucm.Runtime1 [i| + ```ucm + .> view.patch patch + ``` |] ok @@ -126,27 +126,27 @@ accessHistory :: Test () accessHistory = scope "history" do void $ io do c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [iTrim| -```unison -foo = 3 -``` -```ucm -.> add -``` -```unison -foo = 4 -``` -```ucm -.> update -.> history -``` + Ucm.runTranscript c1 Ucm.Runtime1 [i| + ```unison + foo = 3 + ``` + ```ucm + .> add + ``` + ```unison + foo = 4 + ``` + ```ucm + .> update + .> history + ``` |] c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [iTrim| -```ucm -.> history -.> reset-root #ls8 -.> history -``` - |] + Ucm.runTranscript c2 Ucm.Runtime1 [i| + ```ucm + .> history + .> reset-root #ls8 + .> history + ``` + |] ok \ No newline at end of file diff --git a/parser-typechecker/tests/Unison/Test/Git.hs b/parser-typechecker/tests/Unison/Test/Git.hs index 9daf4691b1..b58bb62774 100644 --- a/parser-typechecker/tests/Unison/Test/Git.hs +++ b/parser-typechecker/tests/Unison/Test/Git.hs @@ -1,13 +1,14 @@ {-# Language OverloadedStrings #-} {-# Language QuasiQuotes #-} +{-# LANGUAGE ViewPatterns #-} module Unison.Test.Git where import EasyTest import Data.List (intercalate) import Data.List.Split (splitOn) import qualified Data.Sequence as Seq -import Data.String.Here (iTrim) +import Data.String.Here (i) import Unison.Prelude import qualified Data.Text as Text import qualified System.IO.Temp as Temp @@ -57,20 +58,20 @@ syncComplete = scope "syncComplete" $ do (_, cleanup, codebase) <- io $ initCodebase tmp "codebase" - runTranscript_ tmp codebase [iTrim| -```ucm:hide -.builtin> alias.type ##Nat Nat -.builtin> alias.term ##Nat.+ Nat.+ -``` -```unison -pushComplete.a.x = 3 -pushComplete.b.c.y = x + 1 -``` -```ucm -.> add -.> history pushComplete.b -``` -|] + runTranscript_ tmp codebase [i| + ```ucm:hide + .builtin> alias.type ##Nat Nat + .builtin> alias.term ##Nat.+ Nat.+ + ``` + ```unison + pushComplete.a.x = 3 + pushComplete.b.c.y = x + 1 + ``` + ```ucm + .> add + .> history pushComplete.b + ``` + |] -- sync pushComplete.b to targetDir -- observe that pushComplete.b.c and x exist @@ -118,7 +119,7 @@ syncTestResults = scope "syncTestResults" $ do targetDir <- io $ Temp.createTempDirectory tmp "target" (_, cleanup, codebase) <- io $ initCodebase tmp "codebase" - runTranscript_ tmp codebase [iTrim| + runTranscript_ tmp codebase [i| ```ucm .> builtins.merge ``` @@ -181,26 +182,26 @@ testPull = scope "pull" $ do io $ "git" ["init", "--bare", "--initial-branch=master", Text.pack repo] -- run author/push transcript - runTranscript_ tmp authorCodebase [iTrim| -```ucm:hide -.builtin> alias.type ##Nat Nat -.builtin> alias.term ##Nat.+ Nat.+ -``` -```unison -unique type outside.A = A Nat -unique type outside.B = B Nat Nat -outside.c = 3 -outside.d = 4 + runTranscript_ tmp authorCodebase [i| + ```ucm:hide + .builtin> alias.type ##Nat Nat + .builtin> alias.term ##Nat.+ Nat.+ + ``` + ```unison + unique type outside.A = A Nat + unique type outside.B = B Nat Nat + outside.c = 3 + outside.d = 4 -unique type inside.X = X outside.A -inside.y = c + c -``` -```ucm -.myLib> debug.file -.myLib> add -.myLib> push ${repo} -``` -|] + unique type inside.X = X outside.A + inside.y = c + c + ``` + ```ucm + .myLib> debug.file + .myLib> add + .myLib> push ${repo} + ``` + |] -- check out the resulting repo so we can inspect it io $ "git" ["clone", Text.pack repo, Text.pack $ tmp "repo" ] @@ -210,14 +211,14 @@ inside.y = c + c scope (makeTitle path) $ io (doesFileExist $ tmp "repo" path) >>= expect -- run user/pull transcript - runTranscript_ tmp userCodebase [iTrim| -```ucm:hide -.builtin> alias.type ##Nat Nat -.builtin> alias.term ##Nat.+ Nat.+ -``` -```ucm -.yourLib> pull ${repo}:.inside -``` + runTranscript_ tmp userCodebase [i| + ```ucm:hide + .builtin> alias.type ##Nat Nat + .builtin> alias.term ##Nat.+ Nat.+ + ``` + ```ucm + .yourLib> pull ${repo}:.inside + ``` |] -- inspect user codebase @@ -302,7 +303,7 @@ initCodebase tmpDir name = do -- run a transcript on an existing codebase runTranscript_ :: MonadIO m => FilePath -> Codebase IO Symbol Ann -> String -> m () -runTranscript_ tmpDir c transcript = do +runTranscript_ tmpDir c (stripMargin -> transcript) = do let configFile = tmpDir ".unisonConfig" -- transcript runner wants a "current directory" for I guess writing scratch files? let cwd = tmpDir "cwd" @@ -356,7 +357,7 @@ testPush = scope "push" $ do removeDirectoryRecursive tmp where - setupTranscript = stripMargin [iTrim| + setupTranscript = [i| ```ucm .> builtins.merge ``` @@ -395,7 +396,7 @@ testPush = scope "push" $ do ``` |] - pushTranscript repo = stripMargin [iTrim| + pushTranscript repo = [i| ```ucm .foo.inside> push ${repo} ``` diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs index 9081029ef1..003f275969 100644 --- a/parser-typechecker/tests/Unison/Test/Ucm.hs +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -23,6 +23,7 @@ import qualified Unison.Codebase.SqliteCodebase as SC import qualified Unison.Codebase.TranscriptParser as TR import Unison.Prelude (IsString, Text, traceM) import qualified Unison.Util.Pretty as P +import U.Util.Text (stripMargin) data Runtime = Runtime1 | Runtime2 @@ -31,10 +32,10 @@ data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 deriving (Show) data Codebase = Codebase CodebasePath CodebaseFormat deriving (Show) newtype Transcript = Transcript {unTranscript :: Text} - deriving Show - deriving (IsString) via Text + deriving (IsString, Show) via Text type TranscriptOutput = String + debugTranscriptOutput :: Bool debugTranscriptOutput = False @@ -58,7 +59,6 @@ upgradeCodebase = \case runTranscript :: Codebase -> Runtime -> Transcript -> IO TranscriptOutput runTranscript (Codebase codebasePath fmt) rt transcript = do - traceM $ show transcript -- this configFile ought to be optional configFile <- do tmpDir <- @@ -73,7 +73,7 @@ runTranscript (Codebase codebasePath fmt) rt transcript = do Right x -> pure x -- parse and run the transcript output <- - flip (either err) (TR.parse "transcript" ({-stripMargin $-} unTranscript transcript)) $ \stanzas -> + flip (either err) (TR.parse "transcript" (stripMargin $ unTranscript transcript)) $ \stanzas -> fmap Text.unpack $ TR.run (case rt of Runtime1 -> Just False; Runtime2 -> Just True) From 04f5470ef439837b6d59c65bb119834dae4e2b8e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 8 Apr 2021 11:34:38 -0600 Subject: [PATCH 158/225] remove some debug prints --- .../src/Unison/Codebase/Conversion/Sync12.hs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 21e2a9c21f..365b42cba2 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -502,10 +502,10 @@ simpleProgress :: MonadState (ProgressState m) n => MonadIO n => Sync.Progress n simpleProgress = Sync.Progress need done error allDone where -- ignore need - need e = liftIO $ putStrLn $ "need " ++ show e - -- need _ = pure () + -- need e = liftIO $ putStrLn $ "need " ++ show e + need _ = pure () done e = do - liftIO $ putStrLn $ "done " ++ show e + -- liftIO $ putStrLn $ "done " ++ show e case e of C {} -> _1 . doneBranches += 1 @@ -515,7 +515,7 @@ simpleProgress = Sync.Progress need done error allDone printProgress error e = do - liftIO $ putStrLn $ "error " ++ show e + -- liftIO $ putStrLn $ "error " ++ show e case e of C {} -> _2 . errorBranches += 1 T {} -> _2 . errorTerms += 1 From e7c9c8d516d3add254125eba6cda2b23ec67d24d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 8 Apr 2021 13:30:50 -0600 Subject: [PATCH 159/225] moved installUcmDependencies + putRootBranch calls --- parser-typechecker/src/Unison/Codebase.hs | 1 - .../Unison/Codebase/Conversion/Upgrade12.hs | 2 -- .../src/Unison/Codebase/FileCodebase.hs | 19 ++++--------------- parser-typechecker/tests/Suite.hs | 2 ++ .../tests/Unison/Test/Codebase/Upgrade12.hs | 16 +++++++++------- parser-typechecker/tests/Unison/Test/Ucm.hs | 10 +++++++++- 6 files changed, 24 insertions(+), 26 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs index f2dec20577..947f2e5aca 100644 --- a/parser-typechecker/src/Unison/Codebase.hs +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -136,7 +136,6 @@ installUcmDependencies c = do [Builtin.builtinTermsSrc Parser.Intrinsic] mempty) addDefsToCodebase c uf - putRootBranch c (Branch.one Branch.empty0) -- Feel free to refactor this to use some other type than TypecheckedUnisonFile -- if it makes sense to later. diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs index 2a27778709..10253da988 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs @@ -29,8 +29,6 @@ upgradeCodebase root = do either (liftIO . CT.putPrettyLn) pure =<< runExceptT do (cleanupSrc, srcCB) <- ExceptT $ Codebase.openCodebase FC.init root (cleanupDest, destCB) <- ExceptT $ Codebase.createCodebase SC.init root - -- todo: not have to propagate this stuff, because ucm knows about it intrinsically? - lift $ Codebase.installUcmDependencies destCB destDB <- SC.unsafeGetConnection root let env = Sync12.Env srcCB destCB destDB let initialState = (Sync12.emptyDoneCount, Sync12.emptyErrorCount, Sync12.emptyStatus) diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs index 971d8956d4..d18f82d806 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs @@ -17,9 +17,7 @@ import Control.Exception.Safe (MonadCatch, catchIO) import qualified Data.Set as Set import qualified Data.Text as Text import qualified Data.Text.IO as TextIO -import System.Directory (canonicalizePath, getHomeDirectory) -import System.Environment (getProgName) -import System.Exit (exitFailure, exitSuccess) +import System.Directory (canonicalizePath) import System.FilePath (takeFileName) import Unison.Codebase (BuiltinAnnotation, Codebase (Codebase), CodebasePath) import qualified Unison.Codebase as Codebase @@ -27,17 +25,10 @@ import Unison.Codebase.Branch (Branch) import qualified Unison.Codebase.Branch as Branch import Control.Monad.Except (ExceptT, runExceptT, throwError) import Control.Monad.Extra ((||^)) -import qualified Data.Set as Set -import qualified Data.Text as Text -import qualified Data.Text.IO as TextIO -import System.Directory (canonicalizePath) -import System.FilePath (takeFileName, ()) +import System.FilePath (()) import qualified U.Util.Cache as Cache -import Unison.Codebase (BuiltinAnnotation, Codebase (Codebase), CodebasePath) -import qualified Unison.Codebase as Codebase import qualified Unison.Codebase.Init as Codebase -import Unison.Codebase.Branch (Branch, headHash) -import qualified Unison.Codebase.Branch as Branch +import Unison.Codebase.Branch (headHash) import Unison.Codebase.Editor.Git (gitIn, gitTextIn, pullBranch, withIOError, withStatus) import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace, RemoteRepo (GitRepo), printRepo) import Unison.Codebase.FileCodebase.Common @@ -98,8 +89,6 @@ import qualified Unison.Util.Pretty as P import qualified Unison.Util.TQueue as TQueue import Unison.Util.Timing (time) import Unison.Var (Var) -import UnliftIO (MonadUnliftIO) -import Control.Concurrent (forkIO, killThread) import UnliftIO.Directory (createDirectoryIfMissing, doesDirectoryExist) import UnliftIO.STM (atomically) @@ -128,7 +117,7 @@ createCodebase dir = ifM (pure $ Left Codebase.CreateCodebaseAlreadyExists) (do codebase <- codebase1 @m @Symbol @Ann Cache.nullCache V1.formatSymbol formatAnn dir - Codebase.installUcmDependencies codebase + Codebase.putRootBranch codebase Branch.empty pure $ Right (pure (), codebase)) -- builds a `Codebase IO v a`, given serializers for `v` and `a` diff --git a/parser-typechecker/tests/Suite.hs b/parser-typechecker/tests/Suite.hs index a9104a3fa2..9603b5df03 100644 --- a/parser-typechecker/tests/Suite.hs +++ b/parser-typechecker/tests/Suite.hs @@ -40,6 +40,7 @@ import qualified Unison.Test.VersionParser as VersionParser import qualified Unison.Test.Git as Git import qualified Unison.Test.GitSimple as GitSimple import qualified Unison.Test.Codebase.Upgrade12 as Upgrade12 +import qualified Unison.Test.GitSimple2 as GitSimple2 test :: Bool -> Test () test rt = tests @@ -72,6 +73,7 @@ test rt = tests , Git.test , Upgrade12.test , GitSimple.test + , GitSimple2.test , TestIO.test , Name.test , VersionParser.test diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs index 08f99f0ca1..8daf29fe69 100644 --- a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs +++ b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs @@ -4,11 +4,11 @@ module Unison.Test.Codebase.Upgrade12 (test) where +import Data.Functor (void) import Data.String.Here.Interpolated (i) -import EasyTest (Test, scope, tests, io, ok) +import EasyTest (Test, io, ok, scope, tests) import Shellmet () import qualified Unison.Test.Ucm as Ucm -import Data.Functor (void) test :: Test () test = scope "codebase.upgrade12" $ @@ -21,15 +21,17 @@ typeAlias = scope "typeAlias" do Ucm.runTranscript c1 Ucm.Runtime1 [i| ```ucm .> alias.type ##Nat builtin.Nat + .> history + .> history builtin ``` |] c2 <- Ucm.upgradeCodebase c1 Ucm.runTranscript c2 Ucm.Runtime1 [i| - ```unison - x : Nat - x = 3 - ``` - |] + ```unison + x : Nat + x = 3 + ``` + |] ok topLevelTerm :: Test () diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs index 003f275969..fc8d385e06 100644 --- a/parser-typechecker/tests/Unison/Test/Ucm.hs +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -4,10 +4,12 @@ module Unison.Test.Ucm ( initCodebase, + deleteCodebase, runTranscript, upgradeCodebase, CodebaseFormat (..), Runtime (..), + Transcript (..), ) where @@ -24,6 +26,8 @@ import qualified Unison.Codebase.TranscriptParser as TR import Unison.Prelude (IsString, Text, traceM) import qualified Unison.Util.Pretty as P import U.Util.Text (stripMargin) +import System.Directory (removeDirectoryRecursive) +import qualified Unison.Codebase as Codebase data Runtime = Runtime1 | Runtime2 @@ -32,7 +36,7 @@ data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 deriving (Show) data Codebase = Codebase CodebasePath CodebaseFormat deriving (Show) newtype Transcript = Transcript {unTranscript :: Text} - deriving (IsString, Show) via Text + deriving (IsString, Show, Semigroup) via Text type TranscriptOutput = String @@ -50,6 +54,9 @@ initCodebase fmt = do Right {} -> pure () pure $ Codebase tmp fmt +deleteCodebase :: Codebase -> IO () +deleteCodebase (Codebase path _) = removeDirectoryRecursive path + upgradeCodebase :: Codebase -> IO Codebase upgradeCodebase = \case c@(Codebase _ CodebaseFormat2) -> fail $ show c ++ " already in V2 format." @@ -71,6 +78,7 @@ runTranscript (Codebase codebasePath fmt) rt transcript = do Codebase.Init.openCodebase cbInit codebasePath >>= \case Left e -> fail $ P.toANSI 80 e Right x -> pure x + Codebase.installUcmDependencies codebase -- parse and run the transcript output <- flip (either err) (TR.parse "transcript" (stripMargin $ unTranscript transcript)) $ \stanzas -> From 9b95f43c710eeba1dc3ce48d9491e262dfea7ffd Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 8 Apr 2021 23:39:38 -0600 Subject: [PATCH 160/225] consolidate Test/GitSimple2 into GitSimple --- parser-typechecker/tests/Suite.hs | 2 - .../tests/Unison/Test/GitSimple.hs | 353 ++++++++---------- parser-typechecker/tests/Unison/Test/Ucm.hs | 20 +- 3 files changed, 171 insertions(+), 204 deletions(-) diff --git a/parser-typechecker/tests/Suite.hs b/parser-typechecker/tests/Suite.hs index 9603b5df03..a9104a3fa2 100644 --- a/parser-typechecker/tests/Suite.hs +++ b/parser-typechecker/tests/Suite.hs @@ -40,7 +40,6 @@ import qualified Unison.Test.VersionParser as VersionParser import qualified Unison.Test.Git as Git import qualified Unison.Test.GitSimple as GitSimple import qualified Unison.Test.Codebase.Upgrade12 as Upgrade12 -import qualified Unison.Test.GitSimple2 as GitSimple2 test :: Bool -> Test () test rt = tests @@ -73,7 +72,6 @@ test rt = tests , Git.test , Upgrade12.test , GitSimple.test - , GitSimple2.test , TestIO.test , Name.test , VersionParser.test diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs index 0ef04b8c15..92257b458b 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -3,181 +3,175 @@ module Unison.Test.GitSimple where -import Data.String.Here (iTrim) import qualified Data.Text as Text import EasyTest import Shellmet () import System.Directory (removeDirectoryRecursive) import System.FilePath (()) import qualified System.IO.Temp as Temp -import Unison.Codebase (Codebase, CodebasePath) -import qualified Unison.Codebase.FileCodebase as FC -import qualified Unison.Codebase.TranscriptParser as TR -import Unison.Parser (Ann) import Unison.Prelude -import Unison.Symbol (Symbol) -import qualified Unison.Parser as Parser -import qualified Unison.Codebase.Init as Codebase -import qualified Unison.Codebase.SqliteCodebase as SC +import Unison.Test.Ucm (CodebaseFormat, Transcript) +import qualified Unison.Test.Ucm as Ucm +import Data.String.Here.Interpolated (i) writeTranscriptOutput :: Bool writeTranscriptOutput = True test :: Test () -test = scope "git-simple" . tests $ flip map [(FC.init, "fc"), (SC.init, "fc")] - \(cbInit, name) -> scope name $ tests [ - pushPullTest cbInit "one-term" +test = scope "git-simple" . tests $ + flip map [(Ucm.CodebaseFormat1 , "fc"), (Ucm.CodebaseFormat2, "sc")] + \(fmt, name) -> scope name $ tests [ + pushPullTest "one-term" fmt -- simplest-author - (\repo -> [iTrim| -```unison -c = 3 -``` -```ucm -.> debug.file -.> add -.> push ${repo} -``` -|]) + (\repo -> [i| + ```unison + c = 3 + ``` + ```ucm + .> debug.file + .> add + .> push ${repo} + ``` + |]) -- simplest-user - (\repo -> [iTrim| -```ucm -.> pull ${repo} -.> alias.term ##Nat.+ + -``` -```unison -> #msp7bv40rv + 1 -``` -|]) + (\repo -> [i| + ```ucm + .> pull ${repo} + .> alias.term ##Nat.+ + + ``` + ```unison + > #msp7bv40rv + 1 + ``` + |]) , - pushPullTest cbInit "one-term2" + pushPullTest "one-term2" fmt -- simplest-author - (\repo -> [iTrim| -```unison -c = 3 -``` -```ucm -.> debug.file -.myLib> add -.myLib> push ${repo} -``` -|]) + (\repo -> [i| + ```unison + c = 3 + ``` + ```ucm + .> debug.file + .myLib> add + .myLib> push ${repo} + ``` + |]) -- simplest-user - (\repo -> [iTrim| -```ucm -.yourLib> pull ${repo} -``` -```unison -> c -``` -|]) + (\repo -> [i| + ```ucm + .yourLib> pull ${repo} + ``` + ```unison + > c + ``` + |]) , - pushPullTest cbInit "one-type" + pushPullTest "one-type" fmt -- simplest-author - (\repo -> [iTrim| -```unison -type Foo = Foo -``` -```ucm -.myLib> debug.file -.myLib> add -.myLib> push ${repo} -``` -|]) + (\repo -> [i| + ```unison + type Foo = Foo + ``` + ```ucm + .myLib> debug.file + .myLib> add + .myLib> push ${repo} + ``` + |]) -- simplest-user - (\repo -> [iTrim| -```ucm -.yourLib> pull ${repo} -``` -```unison -> Foo.Foo -``` -|]) + (\repo -> [i| + ```ucm + .yourLib> pull ${repo} + ``` + ```unison + > Foo.Foo + ``` + |]) , - pushPullTest cbInit "patching" - (\repo -> [iTrim| -```ucm -.myLib> alias.term ##Nat.+ + -``` -```unison -improveNat x = x + 3 -``` -```ucm -.myLib> add -.myLib> ls -.myLib> move.namespace .myLib .workaround1552.myLib.v1 -.workaround1552.myLib> ls -.workaround1552.myLib> fork v1 v2 -.workaround1552.myLib.v2> -``` -```unison -improveNat x = x + 100 -``` -```ucm -.workaround1552.myLib.v2> update -.workaround1552.myLib> push ${repo} -``` + pushPullTest "patching" fmt + (\repo -> [i| + ```ucm + .myLib> alias.term ##Nat.+ + + ``` + ```unison + improveNat x = x + 3 + ``` + ```ucm + .myLib> add + .myLib> ls + .myLib> move.namespace .myLib .workaround1552.myLib.v1 + .workaround1552.myLib> ls + .workaround1552.myLib> fork v1 v2 + .workaround1552.myLib.v2> + ``` + ```unison + improveNat x = x + 100 + ``` + ```ucm + .workaround1552.myLib.v2> update + .workaround1552.myLib> push ${repo} + ``` |]) - (\repo -> [iTrim| -```ucm -.myApp> pull ${repo}:.v1 external.yourLib -.myApp> alias.term ##Nat.* * -```` -```unison -> greatApp = improveNat 5 * improveNat 6 -``` -```ucm -.myApp> add -.myApp> pull ${repo}:.v2 external.yourLib -``` -```unison -> greatApp = improveNat 5 * improveNat 6 -``` -```ucm -.myApp> patch external.yourLib.patch -``` -```unison -> greatApp = improveNat 5 * improveNat 6 -``` + (\repo -> [i| + ```ucm + .myApp> pull ${repo}:.v1 external.yourLib + .myApp> alias.term ##Nat.* * + ```` + ```unison + greatApp = improveNat 5 * improveNat 6 + > greatApp + ``` + ```ucm + .myApp> add + .myApp> pull ${repo}:.v2 external.yourLib + ``` + ```unison + > greatApp + ``` + ```ucm + .myApp> patch external.yourLib.patch + ``` + ```unison + > greatApp + ``` |]) --- , - --- pushPullTest "regular" --- (\repo -> [iTrim| --- ```ucm:hide --- .builtin> alias.type ##Nat Nat --- .builtin> alias.term ##Nat.+ Nat.+ --- ``` --- ```unison --- unique type outside.A = A Nat --- unique type outside.B = B Nat Nat --- outside.c = 3 --- outside.d = 4 - --- unique type inside.X = X outside.A --- inside.y = c + c --- ``` --- ```ucm --- .myLib> debug.file --- .myLib> add --- .myLib> push ${repo} --- ```|]) - --- (\repo -> [iTrim| --- ```ucm:hide --- .builtin> alias.type ##Nat Nat --- .builtin> alias.term ##Nat.+ Nat.+ --- ``` --- ```ucm --- .yourLib> pull ${repo}:.inside --- ``` --- ```unison --- > y + #msp7bv40rv + 1 --- ``` --- |]) - + -- , + -- pushPullTest "regular" fmt + -- (\repo -> [i| + -- ```ucm:hide + -- .builtin> alias.type ##Nat Nat + -- .builtin> alias.term ##Nat.+ Nat.+ + -- ``` + -- ```unison + -- unique type outside.A = A Nat + -- unique type outside.B = B Nat Nat + -- outside.c = 3 + -- outside.d = 4 + + -- unique type inside.X = X outside.A + -- inside.y = c + c + -- ``` + -- ```ucm + -- .myLib> debug.file + -- .myLib> add + -- .myLib> push ${repo} + -- ``` + -- |]) + + -- (\repo -> [i| + -- ```ucm:hide + -- .builtin> alias.type ##Nat Nat + -- .builtin> alias.term ##Nat.+ Nat.+ + -- ``` + -- ```ucm + -- .yourLib> pull ${repo}:.inside + -- ``` + -- ```unison + -- > y + #msp7bv40rv + 1 + -- ``` + -- |]) ] - -- type inside.X#skinr6rvg7 -- type outside.A#l2fmn9sdbk -- type outside.B#nsgsq4ot5u @@ -191,32 +185,14 @@ improveNat x = x + 100 -- .myLib.outside.A> #0e3g041m56 -- .myLib.outside.B> #j57m94daqi - -pushPullTest :: Codebase.Init IO Symbol Parser.Ann -> String -> (FilePath -> String) -> (FilePath -> String) -> Test () -pushPullTest cbInit name authorScript userScript = scope name $ do - -- put all our junk into here - tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory ("git-simple-" ++ name) - - -- initialize author and user codebases - (_authorDir, closeAuthor, authorCodebase) <- io $ initCodebase cbInit tmp "author" - (_userDir, closeUser, userCodebase) <- io $ initCodebase cbInit tmp "user" - - -- initialize git repo - let repo = tmp "repo.git" - io $ "git" ["init", "--bare", Text.pack repo] - - -- run author/push transcript - authorOutput <- runTranscript tmp authorCodebase (authorScript repo) - - -- check out the resulting repo so we can inspect it - io $ "git" ["clone", Text.pack repo, Text.pack $ tmp "repo" ] - - -- run user/pull transcript - userOutput <- runTranscript tmp userCodebase (userScript repo) - +pushPullTest :: String -> CodebaseFormat -> (FilePath -> Transcript) -> (FilePath -> Transcript) -> Test () +pushPullTest name fmt authorScript userScript = scope name do io do - closeAuthor - closeUser + repo <- initGitRepo + author <- Ucm.initCodebase fmt + authorOutput <- Ucm.runTranscript author Ucm.Runtime1 (authorScript repo) + user <- Ucm.initCodebase fmt + userOutput <- Ucm.runTranscript user Ucm.Runtime1 (userScript repo) when writeTranscriptOutput $ writeFile ("unison-src""transcripts"("GitSimple." ++ name ++ ".output.md")) @@ -224,24 +200,13 @@ pushPullTest cbInit name authorScript userScript = scope name $ do -- if we haven't crashed, clean up! removeDirectoryRecursive repo - removeDirectoryRecursive tmp + Ucm.deleteCodebase author + Ucm.deleteCodebase user ok --- initialize a fresh codebase -initCodebase :: MonadIO m => Codebase.Init m Symbol Ann -> FilePath -> String -> m (CodebasePath, m (), Codebase m Symbol Ann) -initCodebase cbInit tmpDir name = do - let codebaseDir = tmpDir name - (close, c) <- Codebase.openNewUcmCodebaseOrExit cbInit codebaseDir - pure (codebaseDir, close, c) - --- run a transcript on an existing codebase -runTranscript :: MonadIO m => FilePath -> Codebase IO Symbol Ann -> String -> m String -runTranscript tmpDir c transcript = do - let configFile = tmpDir ".unisonConfig" - -- transcript runner wants a "current directory" for I guess writing scratch files? - let cwd = tmpDir "cwd" - let err err = error $ "Parse error: \n" <> show err - - -- parse and run the transcript - flip (either err) (TR.parse "transcript" (Text.pack transcript)) $ \stanzas -> - liftIO . fmap Text.unpack $ TR.run Nothing cwd configFile stanzas c +initGitRepo :: IO FilePath +initGitRepo = do + tmp <- Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory ("git-simple") + let repo = tmp "repo.git" + "git" ["init", "--bare", Text.pack repo] + pure repo diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs index fc8d385e06..7adc1c6509 100644 --- a/parser-typechecker/tests/Unison/Test/Ucm.hs +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -9,25 +9,26 @@ module Unison.Test.Ucm upgradeCodebase, CodebaseFormat (..), Runtime (..), - Transcript (..), + Transcript, + unTranscript, ) where import Control.Monad (when) import qualified Data.Text as Text +import System.Directory (removeDirectoryRecursive) import System.FilePath (()) import qualified System.IO.Temp as Temp +import U.Util.String (stripMargin) import Unison.Codebase (CodebasePath) +import qualified Unison.Codebase as Codebase import qualified Unison.Codebase.Conversion.Upgrade12 as Upgrade12 import qualified Unison.Codebase.FileCodebase as FC import qualified Unison.Codebase.Init as Codebase.Init import qualified Unison.Codebase.SqliteCodebase as SC import qualified Unison.Codebase.TranscriptParser as TR -import Unison.Prelude (IsString, Text, traceM) +import Unison.Prelude (traceM) import qualified Unison.Util.Pretty as P -import U.Util.Text (stripMargin) -import System.Directory (removeDirectoryRecursive) -import qualified Unison.Codebase as Codebase data Runtime = Runtime1 | Runtime2 @@ -35,8 +36,11 @@ data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 deriving (Show) data Codebase = Codebase CodebasePath CodebaseFormat deriving (Show) -newtype Transcript = Transcript {unTranscript :: Text} - deriving (IsString, Show, Semigroup) via Text +-- newtype Transcript = Transcript {unTranscript :: Text} +-- deriving (IsString, Show, Semigroup) via Text +type Transcript = String +unTranscript :: a -> a +unTranscript = id type TranscriptOutput = String @@ -81,7 +85,7 @@ runTranscript (Codebase codebasePath fmt) rt transcript = do Codebase.installUcmDependencies codebase -- parse and run the transcript output <- - flip (either err) (TR.parse "transcript" (stripMargin $ unTranscript transcript)) $ \stanzas -> + flip (either err) (TR.parse "transcript" (Text.pack . stripMargin $ unTranscript transcript)) $ \stanzas -> fmap Text.unpack $ TR.run (case rt of Runtime1 -> Just False; Runtime2 -> Just True) From 22ab780e958533441852ca568d6c5e43ce8c5dfa Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 9 Apr 2021 01:40:58 -0600 Subject: [PATCH 161/225] disable writing GitSimple transcript output --- .../tests/Unison/Test/GitSimple.hs | 5 +- .../transcripts/GitSimple.one-term.output.md | 71 ------- .../transcripts/GitSimple.one-term2.output.md | 71 ------- .../transcripts/GitSimple.one-type.output.md | 72 ------- .../transcripts/GitSimple.patching.output.md | 190 ------------------ .../transcripts/emptyCodebase.output.md | 41 ---- 6 files changed, 4 insertions(+), 446 deletions(-) delete mode 100644 unison-src/transcripts/GitSimple.one-term.output.md delete mode 100644 unison-src/transcripts/GitSimple.one-term2.output.md delete mode 100644 unison-src/transcripts/GitSimple.one-type.output.md delete mode 100644 unison-src/transcripts/GitSimple.patching.output.md delete mode 100644 unison-src/transcripts/emptyCodebase.output.md diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs index 92257b458b..1a0e06cbfe 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -14,8 +14,11 @@ import Unison.Test.Ucm (CodebaseFormat, Transcript) import qualified Unison.Test.Ucm as Ucm import Data.String.Here.Interpolated (i) +-- keep it off for CI, since the random temp dirs it generates show up in the +-- output, which causes the test output to change, and the "no change" check +-- to fail writeTranscriptOutput :: Bool -writeTranscriptOutput = True +writeTranscriptOutput = False test :: Test () test = scope "git-simple" . tests $ diff --git a/unison-src/transcripts/GitSimple.one-term.output.md b/unison-src/transcripts/GitSimple.one-term.output.md deleted file mode 100644 index d70017a05b..0000000000 --- a/unison-src/transcripts/GitSimple.one-term.output.md +++ /dev/null @@ -1,71 +0,0 @@ -```unison -c = 3 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - c : ##Nat - -``` -```ucm -.> debug.file - - c#msp7bv40rv - -.> add - - ⍟ I've added these definitions: - - c : ##Nat - -.> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-ca8a9c15aa27005c/repo.git - - Done. - -``` - -------- -```ucm -.> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term-ca8a9c15aa27005c/repo.git - - Here's what's changed in the current namespace after the - merge: - - Added definitions: - - 1. c : ##Nat - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -.> alias.term ##Nat.+ + - - Done. - -``` -```unison -> #msp7bv40rv + 1 -``` - -```ucm - - ✅ - - scratch.u changed. - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 1 | > #msp7bv40rv + 1 - ⧩ - 4 - -``` diff --git a/unison-src/transcripts/GitSimple.one-term2.output.md b/unison-src/transcripts/GitSimple.one-term2.output.md deleted file mode 100644 index 7c4fc3baee..0000000000 --- a/unison-src/transcripts/GitSimple.one-term2.output.md +++ /dev/null @@ -1,71 +0,0 @@ -```unison -c = 3 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - c : ##Nat - -``` -```ucm -.> debug.file - - c#msp7bv40rv - - ☝️ The namespace .myLib is empty. - -.myLib> add - - ⍟ I've added these definitions: - - c : ##Nat - -.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term2-abd7ae5a095da0e1/repo.git - - Done. - -``` - -------- -```ucm - ☝️ The namespace .yourLib is empty. - -.yourLib> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-term2-abd7ae5a095da0e1/repo.git - - Here's what's changed in the current namespace after the - merge: - - Added definitions: - - 1. c : ##Nat - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -``` -```unison -> c -``` - -```ucm - - ✅ - - scratch.u changed. - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 1 | > c - ⧩ - 3 - -``` diff --git a/unison-src/transcripts/GitSimple.one-type.output.md b/unison-src/transcripts/GitSimple.one-type.output.md deleted file mode 100644 index 16a76e2876..0000000000 --- a/unison-src/transcripts/GitSimple.one-type.output.md +++ /dev/null @@ -1,72 +0,0 @@ -```unison -type Foo = Foo -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - type Foo - -``` -```ucm - ☝️ The namespace .myLib is empty. - -.myLib> debug.file - - type Foo#568rsi7o3g - -.myLib> add - - ⍟ I've added these definitions: - - type Foo - -.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-type-192da04d6362dbe8/repo.git - - Done. - -``` - -------- -```ucm - ☝️ The namespace .yourLib is empty. - -.yourLib> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-one-type-192da04d6362dbe8/repo.git - - Here's what's changed in the current namespace after the - merge: - - Added definitions: - - 1. type Foo - 2. Foo.Foo : () - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -``` -```unison -> Foo.Foo -``` - -```ucm - - ✅ - - scratch.u changed. - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 1 | > Foo.Foo - ⧩ - () - -``` diff --git a/unison-src/transcripts/GitSimple.patching.output.md b/unison-src/transcripts/GitSimple.patching.output.md deleted file mode 100644 index 11a4fe3d1c..0000000000 --- a/unison-src/transcripts/GitSimple.patching.output.md +++ /dev/null @@ -1,190 +0,0 @@ -```ucm - ☝️ The namespace .myLib is empty. - -.myLib> alias.term ##Nat.+ + - - Done. - -``` -```unison -improveNat x = x + 3 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - improveNat : ##Nat -> ##Nat - -``` -```ucm -.myLib> add - - ⍟ I've added these definitions: - - improveNat : ##Nat -> ##Nat - -.myLib> ls - - 1. + (##Nat -> ##Nat -> ##Nat) - 2. improveNat (##Nat -> ##Nat) - -.myLib> move.namespace .myLib .workaround1552.myLib.v1 - - Done. - -.workaround1552.myLib> ls - - 1. v1/ (2 definitions) - -.workaround1552.myLib> fork v1 v2 - - Done. - -``` -```unison -improveNat x = x + 100 -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These names already exist. You can `update` them to your - new definition: - - improveNat : ##Nat -> ##Nat - -``` -```ucm -.workaround1552.myLib.v2> update - - ⍟ I've updated these names to your new definition: - - improveNat : ##Nat -> ##Nat - -.workaround1552.myLib> push /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-patching-7fa752c409f898ef/repo.git - - Done. - -``` - -------- -```ucm - ☝️ The namespace .myApp is empty. - -.myApp> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-patching-7fa752c409f898ef/repo.git:.v1 external.yourLib - - Here's what's changed in external.yourLib after the merge: - - Added definitions: - - 1. + : ##Nat -> ##Nat -> ##Nat - 2. improveNat : ##Nat -> ##Nat - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -.myApp> alias.term ##Nat.* * - - Done. - -``` -` -```unison -> greatApp = improveNat 5 * improveNat 6 -``` - -```ucm - - ✅ - - scratch.u changed. - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 1 | > greatApp = improveNat 5 * improveNat 6 - ⧩ - 72 - -``` -```ucm -.myApp> add - - - -.myApp> pull /private/var/folders/gg/lqv4nxmx0nv3_35tg9_8c15r0000gn/T/git-simple-patching-7fa752c409f898ef/repo.git:.v2 external.yourLib - - Here's what's changed in external.yourLib after the merge: - - Updates: - - 1. improveNat : ##Nat -> ##Nat - ↓ - 2. improveNat : ##Nat -> ##Nat - - Added definitions: - - 3. patch patch (added 1 updates) - - Tip: You can use `todo` to see if this generated any work to - do in this namespace and `test` to run the tests. Or you - can use `undo` or `reflog` to undo the results of this - merge. - -``` -```unison -> greatApp = improveNat 5 * improveNat 6 -``` - -```ucm - - ✅ - - scratch.u changed. - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 1 | > greatApp = improveNat 5 * improveNat 6 - ⧩ - 11130 - -``` -```ucm -.myApp> patch external.yourLib.patch - - 😶 - - This had no effect. Perhaps the patch has already been applied - or it doesn't intersect with the definitions in - the current namespace. - -``` -```unison -> greatApp = improveNat 5 * improveNat 6 -``` - -```ucm - - ✅ - - scratch.u changed. - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 1 | > greatApp = improveNat 5 * improveNat 6 - ⧩ - 11130 - -``` diff --git a/unison-src/transcripts/emptyCodebase.output.md b/unison-src/transcripts/emptyCodebase.output.md deleted file mode 100644 index 20b6aa0cd1..0000000000 --- a/unison-src/transcripts/emptyCodebase.output.md +++ /dev/null @@ -1,41 +0,0 @@ -# The empty codebase - -The Unison codebase, when first initialized, contains no definitions in its namespace. - -Not even `Nat` or `+`! - -BEHOLD!!! - -```ucm -.> ls - - nothing to show - -``` -Technically, the definitions all exist, but they have no names. `builtins.merge` brings them into existence, under the current namespace: - -```ucm - ☝️ The namespace .foo is empty. - -.foo> builtins.merge - - Done. - -.foo> ls - - 1. builtin/ (335 definitions) - -``` -And for a limited time, you can get even more builtin goodies: - -```ucm -.foo> builtins.mergeio - - Done. - -.foo> ls - - 1. builtin/ (492 definitions) - -``` -More typically, you'd start out by pulling `base. From a69c3278d6e5a2891de74c3643e811e97d205cdc Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 9 Apr 2021 01:58:00 -0600 Subject: [PATCH 162/225] change encodeFileName because something weird happened pulling from ../base --- parser-typechecker/src/Unison/Codebase/Editor/Git.hs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Git.hs b/parser-typechecker/src/Unison/Codebase/Editor/Git.hs index 8950d9845b..2e590e7b58 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/Git.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/Git.hs @@ -24,6 +24,7 @@ type CodebasePath = FilePath -- https://superuser.com/questions/358855/what-characters-are-safe-in-cross-platform-file-names-for-linux-windows-and-os encodeFileName :: String -> FilePath encodeFileName = let + go ('.' : rem) = "$dot$" <> go rem go ('$' : rem) = "$$" <> go rem go (c : rem) | elem @[] c "/\\:*?\"<>|" || not (Char.isPrint c && Char.isAscii c) = "$x" <> encodeHex [c] <> "$" <> go rem @@ -32,10 +33,7 @@ encodeFileName = let encodeHex :: String -> String encodeHex = Text.unpack . Text.toUpper . ByteString.encodeBase16 . encodeUtf8 . Text.pack - in \case - "." -> "$dot$" - ".." -> "$dotdot$" - t -> go t + in go tempGitDir :: MonadIO m => Text -> m FilePath tempGitDir url = From 6a050e8e70bfd39ea8bd84a15e8945467aa35c96 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 9 Apr 2021 01:58:08 -0600 Subject: [PATCH 163/225] formatting --- parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 365b42cba2..db7e6f4c04 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -553,7 +553,7 @@ simpleProgress = Sync.Progress need done error allDone Monoid.whenM (d > 0 || d' > 0) (Just $ show d ++ " types" ++ Monoid.whenM (d' > 0) (" (" ++ show d' ++ " errors)")), Monoid.whenM (p > 0 || p' > 0) (Just $ show p ++ " patches" ++ Monoid.whenM (p' > 0) (" (" ++ show p' ++ " errors)")) ] - liftIO . putStr $ "\rSynced " ++ List.intercalate "," (catMaybes ways) + liftIO . putStr $ "\rSynced " ++ List.intercalate ", " (catMaybes ways) instance Show (Entity m) where From e50045268aba6338dad9009be600c4c6db2d1810 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 9 Apr 2021 10:59:47 -0600 Subject: [PATCH 164/225] output formatting --- parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index db7e6f4c04..d73f61576f 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -67,6 +67,8 @@ import qualified Unison.Type as Type import Unison.Util.Relation (Relation) import qualified Unison.Util.Relation as Relation import Unison.Util.Star3 (Star3 (Star3)) +import System.IO (stdout) +import System.IO.Extra (hFlush) debug :: Bool debug = False @@ -553,7 +555,9 @@ simpleProgress = Sync.Progress need done error allDone Monoid.whenM (d > 0 || d' > 0) (Just $ show d ++ " types" ++ Monoid.whenM (d' > 0) (" (" ++ show d' ++ " errors)")), Monoid.whenM (p > 0 || p' > 0) (Just $ show p ++ " patches" ++ Monoid.whenM (p' > 0) (" (" ++ show p' ++ " errors)")) ] - liftIO . putStr $ "\rSynced " ++ List.intercalate ", " (catMaybes ways) + liftIO do + putStr $ "\rSynced " ++ List.intercalate ", " (catMaybes ways) + hFlush stdout instance Show (Entity m) where From 774d01fedf7971080faed52edf485e9a56ec9706 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 9 Apr 2021 11:00:14 -0600 Subject: [PATCH 165/225] reformat tests and add link test --- .../tests/Unison/Test/Codebase/Upgrade12.hs | 265 ++++++++++-------- 1 file changed, 153 insertions(+), 112 deletions(-) diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs index 8daf29fe69..dcbc4b3159 100644 --- a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs +++ b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs @@ -11,144 +11,185 @@ import Shellmet () import qualified Unison.Test.Ucm as Ucm test :: Test () -test = scope "codebase.upgrade12" $ - tests [typeAlias, topLevelTerm, subNamespace, accessPatch, accessHistory] +test = scope "codebase.upgrade12" $ tests [ + scope "typeAlias" do + void $ io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [i| + ```ucm + .> alias.type ##Nat builtin.Nat + .> history + .> history builtin + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [i| + ```unison + x : Nat + x = 3 + ``` + |] + ok, -typeAlias :: Test () -typeAlias = scope "typeAlias" do - void $ io do - c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [i| + scope "topLevelTerm" do + void $ io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [i| + ```unison:hide + y = 3 + ``` + ```ucm + .> add + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [i| ```ucm - .> alias.type ##Nat builtin.Nat - .> history - .> history builtin + .> find ``` - |] - c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [i| ```unison - x : Nat - x = 3 + > y ``` |] - ok + ok, -topLevelTerm :: Test () -topLevelTerm = scope "topLevelTerm" $ do - void $ io do - c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [i| - ```unison:hide - y = 3 - ``` + scope "metadataForTerm" do + void $ io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 "" + Ucm.runTranscript c1 Ucm.Runtime1 [i| + ```unison:hide + doc = "y is the number 3" + y = 3 + ``` + ```ucm + .> add + .> link doc y + ``` + |] + Ucm.runTranscript c1 Ucm.Runtime1 [i| ```ucm - .> add + .> links y ``` |] - c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [i| - ```ucm - .> find - ``` - ```unison - > y - ``` - |] - ok - -subNamespace :: Test () -subNamespace = scope "subNamespace" do - void $ io do - c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [i| - ```ucm - .> alias.type ##Nat builtin.Nat - ``` - ```unison - unique type a.b.C = C Nat - ``` + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [i| ```ucm - .> add + .> links y ``` |] - c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [i| - ```ucm - .> find - ``` - ```unison - > a.b.C.C 3 - ``` - |] - ok + ok, -accessPatch :: Test () -accessPatch = scope "accessPatch" do - void $ io do - c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [i| + scope "metadataForType" do + void $ io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [i| + ```unison:hide + doc = "Nat means natural number" + ``` + ```ucm + .> alias.type ##Nat Nat + .> link doc Nat + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [i| ```ucm - .> alias.type ##Nat builtin.Nat - ``` - ```unison:hide - unique type A = A Nat - foo = A.A 3 + .> docs y ``` + |] + ok, + + scope "subNamespace" do + void $ io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [i| + ```ucm + .> alias.type ##Nat builtin.Nat + ``` + ```unison + unique type a.b.C = C Nat + ``` + ```ucm + .> add + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [i| ```ucm - .> debug.file - .> add - ``` - ```unison:hide - unique type A = A Nat Nat - foo = A.A 3 3 + .> find ``` - ```ucm - .> debug.file - .> update + ```unison + > a.b.C.C 3 ``` + |] + ok, + + scope "accessPatch" do + void $ io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [i| + ```ucm + .> alias.type ##Nat builtin.Nat + ``` + ```unison:hide + unique type A = A Nat + foo = A.A 3 + ``` + ```ucm + .> debug.file + .> add + ``` + ```unison:hide + unique type A = A Nat Nat + foo = A.A 3 3 + ``` + ```ucm + .> debug.file + .> update + ``` + ```ucm + .> view.patch patch + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [i| ```ucm .> view.patch patch ``` |] - c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [i| - ```ucm - .> view.patch patch - ``` - |] - ok + ok, -- #00k3c9bp6m A -- #6v94dtbfk1 foo -- #d3bn4dqp1a A' -- #p3a21bjjl4 foo' -accessHistory :: Test () -accessHistory = scope "history" do - void $ io do - c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 - Ucm.runTranscript c1 Ucm.Runtime1 [i| - ```unison - foo = 3 - ``` - ```ucm - .> add - ``` - ```unison - foo = 4 - ``` - ```ucm - .> update - .> history - ``` - |] - c2 <- Ucm.upgradeCodebase c1 - Ucm.runTranscript c2 Ucm.Runtime1 [i| - ```ucm - .> history - .> reset-root #ls8 - .> history - ``` - |] - ok \ No newline at end of file + scope "history" do + void $ io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 Ucm.Runtime1 [i| + ```unison + foo = 3 + ``` + ```ucm + .> add + ``` + ```unison + foo = 4 + ``` + ```ucm + .> update + .> history + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + Ucm.runTranscript c2 Ucm.Runtime1 [i| + ```ucm + .> history + .> reset-root #ls8 + .> history + ``` + |] + ok + ] \ No newline at end of file From 8c03576c2880f164f69655886f263c5d2b80102e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 10 Apr 2021 00:42:30 -0600 Subject: [PATCH 166/225] move Operations debug trace stuff around --- .../U/Codebase/Sqlite/Operations.hs | 46 ++++++++++--------- .../U/Codebase/Sqlite/Queries.hs | 2 +- .../src/Unison/Codebase/Conversion/Sync12.hs | 16 ++++--- 3 files changed, 34 insertions(+), 30 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index c5b8e3cbd1..231a504f3f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -130,6 +130,8 @@ throwError = if crashOnError then error . show else Except.throwError debug, crashOnError :: Bool debug = False crashOnError = False +-- | crashOnError can be helpful for debugging. +-- If it is False, the errors will be delivered to the user elsewhere. type Err m = (MonadError Error m, HasCallStack) @@ -402,8 +404,8 @@ decodeDeclElement i = getFromBytesOr (ErrDeclElement i) (S.lookupDeclElement i) -- * legacy conversion helpers getCycleLen :: EDB m => H.Hash -> m Word64 -getCycleLen id | debug && trace ("getCycleLen " ++ show id) False = undefined getCycleLen h = do + when debug $ traceM $ "getCycleLen " ++ show h runMaybeT (primaryHashToExistingObjectId h) >>= maybe (throwError $ LegacyUnknownCycleLen h) pure >>= liftQ . Q.loadObjectById @@ -421,8 +423,8 @@ getDeclTypeByReference r@(C.Reference.Id h pos) = >>= pure . C.Decl.declType componentByObjectId :: EDB m => Db.ObjectId -> m [S.Reference.Id] -componentByObjectId id | debug && trace ("componentByObjectId " ++ show id) False = undefined componentByObjectId id = do + when debug . traceM $ "Operations.componentByObjectId " ++ show id len <- (liftQ . Q.loadObjectById) id >>= decodeComponentLengthOnly pure [C.Reference.Id id i | i <- [0 .. len - 1]] @@ -431,8 +433,8 @@ componentByObjectId id = do -- ** Saving & loading terms saveTermComponent :: EDB m => H.Hash -> [(C.Term Symbol, C.Term.Type Symbol)] -> m Db.ObjectId -saveTermComponent h _terms | debug && trace ("Operations.saveTermComponent " ++ show h) False = undefined saveTermComponent h terms = do + when debug . traceM $ "Operations.saveTermComponent " ++ show h sTermElements <- traverse (uncurry c2sTerm) terms hashId <- Q.saveHashHash h let li = S.Term.LocallyIndexedComponent $ Vector.fromList sTermElements @@ -594,8 +596,8 @@ loadTermWithTypeByReference (C.Reference.Id h i) = >>= uncurry3 s2cTermWithType loadTermByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term Symbol) -loadTermByReference id | debug && trace ("loadTermByReference " ++ show id) False = undefined -loadTermByReference (C.Reference.Id h i) = +loadTermByReference r@(C.Reference.Id h i) = do + when debug . traceM $ "loadTermByReference " ++ show r MaybeT (primaryHashToMaybeObjectId h) >>= liftQ . Q.loadObjectWithTypeById >>= \case (OT.TermComponent, blob) -> pure blob; _ -> mzero @@ -604,8 +606,8 @@ loadTermByReference (C.Reference.Id h i) = >>= uncurry s2cTerm loadTypeOfTermByTermReference :: EDB m => C.Reference.Id -> MaybeT m (C.Term.Type Symbol) -loadTypeOfTermByTermReference id | debug && trace ("loadTypeOfTermByTermReference " ++ show id) False = undefined -loadTypeOfTermByTermReference (C.Reference.Id h i) = +loadTypeOfTermByTermReference id@(C.Reference.Id h i) = do + when debug . traceM $ "loadTypeOfTermByTermReference " ++ show id MaybeT (primaryHashToMaybeObjectId h) >>= liftQ . Q.loadObjectWithTypeById >>= \case (OT.TermComponent, blob) -> pure blob; _ -> mzero @@ -730,8 +732,8 @@ w2cTerm ids tm = do -- ** Saving & loading type decls saveDeclComponent :: EDB m => H.Hash -> [C.Decl Symbol] -> m Db.ObjectId -saveDeclComponent h _decls | debug && trace ("Operations.saveDeclComponent " ++ show h) False = undefined saveDeclComponent h decls = do + when debug . traceM $ "Operations.saveDeclComponent " ++ show h sDeclElements <- traverse (c2sDecl Q.saveText primaryHashToExistingObjectId) decls hashId <- Q.saveHashHash h let li = S.Decl.LocallyIndexedComponent $ Vector.fromList sDeclElements @@ -797,8 +799,8 @@ c2sDecl saveText saveDefn (C.Decl.DataDeclaration dt m b cts) = do pure (ids, decl) loadDeclByReference :: EDB m => C.Reference.Id -> MaybeT m (C.Decl Symbol) -loadDeclByReference id | debug && trace ("loadDeclByReference " ++ show id) False = undefined -loadDeclByReference (C.Reference.Id h i) = do +loadDeclByReference r@(C.Reference.Id h i) = do + when debug . traceM $ "loadDeclByReference " ++ show r -- retrieve the blob (localIds, C.Decl.DataDeclaration dt m b ct) <- MaybeT (primaryHashToMaybeObjectId h) @@ -894,14 +896,14 @@ type BranchSavingMonad m = StateT BranchSavingState (WriterT BranchSavingWriter saveRootBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) saveRootBranch c = do - when debug $ traceM $ "saveRootBranch " ++ show (C.causalHash c) + when debug $ traceM $ "Operations.saveRootBranch " ++ show (C.causalHash c) (boId, chId) <- saveBranch c Q.setNamespaceRoot chId pure (boId, chId) saveBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) saveBranch (C.Causal hc he parents me) = do - when debug $ traceM $ "\nsaveBranch \n hc = " ++ show hc ++ ",\n he = " ++ show he ++ ",\n parents = " ++ show (Map.keys parents) + when debug $ traceM $ "\nOperations.saveBranch \n hc = " ++ show hc ++ ",\n he = " ++ show he ++ ",\n parents = " ++ show (Map.keys parents) -- check if we can skip the whole thing, by checking if there are causal parents for hc chId <- liftQ (Q.saveCausalHash hc) parentCausalHashIds <- @@ -1001,14 +1003,14 @@ saveBranch (C.Causal hc he parents me) = do lookupChild = lookup_ Lens._4 Lens._4 LocalBranchChildId startState = mempty @BranchSavingState saveBranchObject :: DB m => Db.BranchHashId -> BranchLocalIds -> S.Branch.Full.LocalBranch -> m Db.BranchObjectId - saveBranchObject id li lBranch | debug && trace ("saveBranchObject\n\tid = " ++ show id ++ "\n\tli = " ++ show li ++ "\n\tlBranch = " ++ show lBranch) False = undefined - saveBranchObject (Db.unBranchHashId -> hashId) li lBranch = do + saveBranchObject id@(Db.unBranchHashId -> hashId) li lBranch = do + when debug $ traceM $ "saveBranchObject\n\tid = " ++ show id ++ "\n\tli = " ++ show li ++ "\n\tlBranch = " ++ show lBranch let bytes = S.putBytes S.putBranchFormat $ S.BranchFormat.Full li lBranch oId <- Q.saveObject hashId OT.Namespace bytes pure $ Db.BranchObjectId oId done :: (EDB m, Show a) => (a, BranchSavingWriter) -> m (BranchLocalIds, a) - done (lBranch, written) | debug && trace ("saveBranch.done\n\tlBranch = " ++ show lBranch ++ "\n\twritten = " ++ show written) False = undefined - done (lBranch, (textValues, defnHashes, patchObjectIds, branchCausalIds)) = do + done (lBranch, written@(textValues, defnHashes, patchObjectIds, branchCausalIds)) = do + when debug $ traceM $ "saveBranch.done\n\tlBranch = " ++ show lBranch ++ "\n\twritten = " ++ show written textIds <- liftQ $ traverse Q.saveText textValues defnObjectIds <- traverse primaryHashToExistingObjectId defnHashes let ids = @@ -1067,8 +1069,8 @@ loadBranchByObjectId id = do S.BranchFormat.Diff r li d -> doDiff r [l2sDiff li d] where deserializeBranchObject :: EDB m => Db.BranchObjectId -> m S.BranchFormat - deserializeBranchObject id | debug && trace ("deserializeBranchObject " ++ show id) False = undefined - deserializeBranchObject id = + deserializeBranchObject id = do + when debug $ traceM $ "deserializeBranchObject " ++ show id (liftQ . Q.loadObjectById) (Db.unBranchObjectId id) >>= getFromBytesOr (ErrBranch id) S.getBranchFormat @@ -1250,8 +1252,8 @@ s2cPatch (S.Patch termEdits typeEdits) = <*> Map.bitraverse h2cReference (Set.traverse s2cTypeEdit) typeEdits deserializePatchObject :: EDB m => Db.PatchObjectId -> m S.PatchFormat -deserializePatchObject id | debug && trace ("deserializePatchObject " ++ show id) False = undefined -deserializePatchObject id = +deserializePatchObject id = do + when debug $ traceM $ "Operations.deserializePatchObject " ++ show id (liftQ . Q.loadObjectById) (Db.unPatchObjectId id) >>= getFromBytesOr (ErrPatch id) S.getPatchFormat @@ -1326,8 +1328,8 @@ declReferentsByPrefix b32prefix pos cid = do cids = [cid | cid <- [0 :: ConstructorId .. ctorCount - 1], test cid] pure (h, pos, len, dt, cids) getDeclCtorCount :: EDB m => S.Reference.Id -> m (C.Decl.DeclType, Word64, ConstructorId) - getDeclCtorCount id | debug && trace ("getDeclCtorCount " ++ show id) False = undefined - getDeclCtorCount (C.Reference.Id r i) = do + getDeclCtorCount id@(C.Reference.Id r i) = do + when debug $ traceM $ "getDeclCtorCount " ++ show id bs <- liftQ (Q.loadObjectById r) len <- decodeComponentLengthOnly bs (_localIds, decl) <- decodeDeclElement i bs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index fc092770e5..03829aa729 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -94,7 +94,7 @@ noExcept a = Right a -> pure a Left e -> error $ "unexpected error: " ++ show e -orError :: MonadError Integrity m => Integrity -> Maybe b -> m b +orError :: Err m => Integrity -> Maybe b -> m b orError e = maybe (throwError e) pure type TypeHashReference = Reference' TextId HashId diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index d73f61576f..5a0284db97 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -503,12 +503,14 @@ type ProgressState m = (DoneCount, ErrorCount, Status m) simpleProgress :: MonadState (ProgressState m) n => MonadIO n => Sync.Progress n (Entity m) simpleProgress = Sync.Progress need done error allDone where + newlines = True + logEntities = True -- ignore need - -- need e = liftIO $ putStrLn $ "need " ++ show e - need _ = pure () - done e = do - -- liftIO $ putStrLn $ "done " ++ show e + need e = + when logEntities $ liftIO $ putStrLn $ "need " ++ show e + done e = do + when logEntities $ liftIO $ putStrLn $ "done " ++ show e case e of C {} -> _1 . doneBranches += 1 T {} -> _1 . doneTerms += 1 @@ -517,7 +519,7 @@ simpleProgress = Sync.Progress need done error allDone printProgress error e = do - -- liftIO $ putStrLn $ "error " ++ show e + when logEntities $ liftIO $ putStrLn $ "error " ++ show e case e of C {} -> _2 . errorBranches += 1 T {} -> _2 . errorTerms += 1 @@ -528,7 +530,7 @@ simpleProgress = Sync.Progress need done error allDone allDone :: MonadState (DoneCount, ErrorCount, Status m) n => MonadIO n => n () allDone = do Status branches terms decls patches <- Lens.use Lens._3 - liftIO $ putStr "Finished." + liftIO $ putStrLn "Finished." Foldable.for_ (Map.toList decls) \(h, s) -> case s of DeclOk -> pure () DeclMissing -> liftIO . putStrLn $ "I couldn't find the decl " ++ show h ++ ", so I filtered it out of the sync." @@ -556,7 +558,7 @@ simpleProgress = Sync.Progress need done error allDone Monoid.whenM (p > 0 || p' > 0) (Just $ show p ++ " patches" ++ Monoid.whenM (p' > 0) (" (" ++ show p' ++ " errors)")) ] liftIO do - putStr $ "\rSynced " ++ List.intercalate ", " (catMaybes ways) + putStr $ "\rSynced " ++ List.intercalate ", " (catMaybes ways) ++ Monoid.whenM newlines "\n" hFlush stdout From b3831a071bd528c21c5ae0874cf331989ab87d3a Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 10 Apr 2021 00:43:25 -0600 Subject: [PATCH 167/225] fix bug in referent type lookup --- codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs | 2 +- parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 03829aa729..ead0f36e44 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -458,7 +458,7 @@ getTypeReferenceForReferent r = FROM find_type_index WHERE term_referent_object_id = ? AND term_referent_component_index = ? - AND term_referent_constructor_index = ? + AND term_referent_constructor_index IS ? |] -- todo: error if no results diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs index dcbc4b3159..62cda21fb6 100644 --- a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs +++ b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs @@ -63,10 +63,17 @@ test = scope "codebase.upgrade12" $ tests [ y = 3 ``` ```ucm + .> debug.file .> add .> link doc y + .> history ``` |] + -- 8bbb doc + -- mps7 y + -- ttjf post-link + -- 988m pre-link + -- 7asf empty Ucm.runTranscript c1 Ucm.Runtime1 [i| ```ucm .> links y From 1b39da60771605c2c896d3d93d236926ad527d1c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 10 Apr 2021 00:47:30 -0600 Subject: [PATCH 168/225] sync progress debug false --- parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 5a0284db97..df61c71764 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -503,8 +503,8 @@ type ProgressState m = (DoneCount, ErrorCount, Status m) simpleProgress :: MonadState (ProgressState m) n => MonadIO n => Sync.Progress n (Entity m) simpleProgress = Sync.Progress need done error allDone where - newlines = True - logEntities = True + newlines = False + logEntities = False -- ignore need need e = when logEntities $ liftIO $ putStrLn $ "need " ++ show e From 20678bb18ab2beba78bb43cfd59f8cddddc30626 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 10 Apr 2021 12:22:20 -0600 Subject: [PATCH 169/225] fix codebase.upgrade12.metadataForType test --- parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs | 1 + 1 file changed, 1 insertion(+) diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs index 62cda21fb6..262738c15d 100644 --- a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs +++ b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs @@ -95,6 +95,7 @@ test = scope "codebase.upgrade12" $ tests [ doc = "Nat means natural number" ``` ```ucm + .> add .> alias.type ##Nat Nat .> link doc Nat ``` From df5827f9d3213abf3b1f8305fc315fd3248bcb97 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 10 Apr 2021 12:22:41 -0600 Subject: [PATCH 170/225] fix typo in optimized flag --- parser-typechecker/package.yaml | 2 +- parser-typechecker/unison-parser-typechecker.cabal | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/parser-typechecker/package.yaml b/parser-typechecker/package.yaml index 80d4ef3ebc..82f6390eb1 100644 --- a/parser-typechecker/package.yaml +++ b/parser-typechecker/package.yaml @@ -25,7 +25,7 @@ flags: when: - condition: flag(optimized) - ghc-options: -funbox-strict-fields -02 + ghc-options: -funbox-strict-fields -O2 library: source-dirs: src diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 8007e86d39..7a8d49d928 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -4,7 +4,7 @@ cabal-version: 1.12 -- -- see: https://github.com/sol/hpack -- --- hash: 61c993e5093ad077fa54b0671d9b39139c1473e3e0434a0ac452cc4076470523 +-- hash: 7d1ab4745c2b2893bf8e1b5c07c2879f33234bd20788d22a23e0290ef9ca9d3d name: unison-parser-typechecker version: 0.0.0 @@ -264,7 +264,7 @@ library , x509-store , x509-system if flag(optimized) - ghc-options: -funbox-strict-fields -02 + ghc-options: -funbox-strict-fields -O2 default-language: Haskell2010 executable prettyprintdemo @@ -281,7 +281,7 @@ executable prettyprintdemo , text , unison-parser-typechecker if flag(optimized) - ghc-options: -funbox-strict-fields -02 + ghc-options: -funbox-strict-fields -O2 default-language: Haskell2010 executable tests @@ -357,7 +357,7 @@ executable tests , unison-util , unliftio if flag(optimized) - ghc-options: -funbox-strict-fields -02 + ghc-options: -funbox-strict-fields -O2 default-language: Haskell2010 executable transcripts @@ -379,7 +379,7 @@ executable transcripts , unison-core1 , unison-parser-typechecker if flag(optimized) - ghc-options: -funbox-strict-fields -02 + ghc-options: -funbox-strict-fields -O2 default-language: Haskell2010 executable unison @@ -413,7 +413,7 @@ executable unison , unison-parser-typechecker , uri-encode if flag(optimized) - ghc-options: -funbox-strict-fields -02 + ghc-options: -funbox-strict-fields -O2 if !os(windows) build-depends: unix @@ -435,5 +435,5 @@ benchmark runtime , unison-core1 , unison-parser-typechecker if flag(optimized) - ghc-options: -funbox-strict-fields -02 + ghc-options: -funbox-strict-fields -O2 default-language: Haskell2010 From 1bc5abcff9bbf086cbdb23339b892dc8e3759535 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 10 Apr 2021 23:20:49 -0600 Subject: [PATCH 171/225] fix upgrade12 --- parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs index 262738c15d..d6f700d9c3 100644 --- a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs +++ b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs @@ -66,6 +66,7 @@ test = scope "codebase.upgrade12" $ tests [ .> debug.file .> add .> link doc y + .> links y .> history ``` |] @@ -103,7 +104,7 @@ test = scope "codebase.upgrade12" $ tests [ c2 <- Ucm.upgradeCodebase c1 Ucm.runTranscript c2 Ucm.Runtime1 [i| ```ucm - .> docs y + .> links Nat ``` |] ok, From 4d37edeca709989dbbebd637b85c11463396de07 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 12 Apr 2021 10:56:34 -0600 Subject: [PATCH 172/225] count branch replacement as a warning --- .../src/Unison/Codebase/Conversion/Sync12.hs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index df61c71764..00cc2f3ab9 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -157,11 +157,15 @@ trySync t _gc e = do Left deps -> pure . Sync.Missing $ Foldable.toList deps Right c' -> do let h' = Causal.currentHash c' - if h == h' - then setBranchStatus @m @n h BranchOk - else setBranchStatus h (BranchReplaced h' c') t $ Codebase.putBranch dest (Branch.Branch c') - pure Sync.Done + if h == h' + then do + setBranchStatus @m @n h BranchOk + pure Sync.Done + else do + setBranchStatus h (BranchReplaced h' c') + pure Sync.NonFatalError + T h n -> getTermStatus h >>= \case Just {} -> pure Sync.PreviouslyDone @@ -552,7 +556,7 @@ simpleProgress = Sync.Progress need done error allDone printProgress = do (DoneCount b t d p, ErrorCount b' t' d' p', _) <- State.get let ways :: [Maybe String] = - [ Monoid.whenM (b > 0 || b' > 0) (Just $ show b ++ " branches" ++ Monoid.whenM (b' > 0) (" (" ++ show b' ++ " errors)")), + [ Monoid.whenM (b > 0 || b' > 0) (Just $ show b ++ " branches" ++ Monoid.whenM (b' > 0) (" (+" ++ show b' ++ " repaired)")), Monoid.whenM (t > 0 || t' > 0) (Just $ show t ++ " terms" ++ Monoid.whenM (t' > 0) (" (" ++ show t' ++ " errors)")), Monoid.whenM (d > 0 || d' > 0) (Just $ show d ++ " types" ++ Monoid.whenM (d' > 0) (" (" ++ show d' ++ " errors)")), Monoid.whenM (p > 0 || p' > 0) (Just $ show p ++ " patches" ++ Monoid.whenM (p' > 0) (" (" ++ show p' ++ " errors)")) From 66f410544fe9bdbca43a514ed5a92909a97cfff1 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 12 Apr 2021 10:56:54 -0600 Subject: [PATCH 173/225] fix v2 bug for transcript.fork --- parser-typechecker/src/Unison/Codebase/Init.hs | 2 +- .../src/Unison/Codebase/SqliteCodebase.hs | 2 +- parser-typechecker/unison/Main.hs | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/Init.hs b/parser-typechecker/src/Unison/Codebase/Init.hs index 3b2599ac29..d4a7e9154f 100644 --- a/parser-typechecker/src/Unison/Codebase/Init.hs +++ b/parser-typechecker/src/Unison/Codebase/Init.hs @@ -27,7 +27,7 @@ data Init m v a = Init -- | create a new codebase createCodebase' :: CodebasePath -> m (Either CreateCodebaseError (m (), Codebase m v a)), -- | given a codebase root, and given that the codebase root may have other junk in it, - -- give the path to the "actual" files; e.g. what a forked transcript should clone + -- give the path to the "actual" files; e.g. what a forked transcript should clone. codebasePath :: CodebasePath -> CodebasePath } diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 659749a944..08fc10dc4e 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -101,7 +101,7 @@ debug = False debugProcessBranches = False init :: HasCallStack => MonadIO m => Codebase.Init m Symbol Ann -init = Codebase.Init getCodebaseOrError createCodebaseOrError ( codebasePath) +init = Codebase.Init getCodebaseOrError createCodebaseOrError ( ".unison" "v2") createCodebaseOrError :: MonadIO m => diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index 1ace52edbe..36da1a3116 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -226,19 +226,19 @@ upgradeCodebase mcodepath = prepareTranscriptDir :: Codebase.Init IO Symbol Ann -> Bool -> Maybe FilePath -> IO FilePath prepareTranscriptDir cbInit inFork mcodepath = do tmp <- Temp.getCanonicalTemporaryDirectory >>= (`Temp.createTempDirectory` "transcript") - unless inFork $ do - PT.putPrettyLn . P.wrap $ "Transcript will be run on a new, empty codebase." - _ <- Codebase.openNewUcmCodebaseOrExit cbInit tmp - pure() - when inFork $ Codebase.getCodebaseOrExit cbInit mcodepath >> do + if inFork then + Codebase.getCodebaseOrExit cbInit mcodepath >> do path <- Codebase.getCodebaseDir mcodepath PT.putPrettyLn $ P.lines [ P.wrap "Transcript will be run on a copy of the codebase at: ", "", P.indentN 2 (P.string path) ] Path.copyDir (Codebase.codebasePath cbInit path) (Codebase.codebasePath cbInit tmp) - + else do + PT.putPrettyLn . P.wrap $ "Transcript will be run on a new, empty codebase." + void $ Codebase.openNewUcmCodebaseOrExit cbInit tmp + traceM $ "Copying codebase to " ++ tmp pure tmp runTranscripts' From 2746c13ce2d38581cef58fd56a13466237e1ff78 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 12 Apr 2021 13:26:10 -0600 Subject: [PATCH 174/225] cache root branch --- .../src/Unison/Codebase/SqliteCodebase.hs | 41 +++++++++++++------ 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 08fc10dc4e..46bc768280 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -23,7 +23,7 @@ import Control.Monad.Trans.Maybe (MaybeT (MaybeT)) import Data.Bifunctor (Bifunctor (bimap, first), second) import qualified Data.Bifunctor as Bifunctor import qualified Data.Either.Combinators as Either -import Data.Foldable (Foldable (toList), traverse_) +import Data.Foldable (Foldable (toList), traverse_, for_) import Data.Functor (void) import qualified Data.List as List import Data.Map (Map) @@ -119,7 +119,6 @@ createCodebaseOrError dir = do $ map (Bifunctor.bimap P.string P.string) schema in Codebase1.CreateCodebaseOther $ prettyError schema Either.mapLeft convertError <$> createCodebaseOrError' dir - where data CreateCodebaseError = CreateCodebaseAlreadyExists @@ -242,6 +241,11 @@ sqliteCodebase root = do conn <- unsafeGetConnection root runReaderT Q.checkForMissingSchema conn >>= \case [] -> do + -- + rootBranchCache <- newTVarIO Nothing + -- The v1 codebase interface has operations to read and write individual definitions + -- whereas the v2 codebase writes them as complete components. These two fields buffer + -- the individual definitions until a complete component has been written. termBuffer :: TVar (Map Hash TermBufferEntry) <- newTVarIO Map.empty declBuffer :: TVar (Map Hash DeclBufferEntry) <- newTVarIO Map.empty let getTerm :: MonadIO m => Reference.Id -> m (Maybe (Term Symbol Ann)) @@ -419,13 +423,21 @@ sqliteCodebase root = do tryFlushDeclBuffer h ) - getRootBranch :: MonadIO m => m (Either Codebase1.GetRootBranchError (Branch m)) - getRootBranch = - fmap (Either.mapLeft err) - . runExceptT - . flip runReaderT conn - . fmap (Branch.transform (runDB conn)) - $ Cv.causalbranch2to1 getCycleLen getDeclType =<< Ops.loadRootCausal + getRootBranch :: MonadIO m => TVar (Maybe (Branch m)) -> m (Either Codebase1.GetRootBranchError (Branch m)) + getRootBranch rootBranchCache = + readTVarIO rootBranchCache >>= \case + Nothing -> do + b <- fmap (Either.mapLeft err) + . runExceptT + . flip runReaderT conn + . fmap (Branch.transform (runDB conn)) + $ Cv.causalbranch2to1 getCycleLen getDeclType =<< Ops.loadRootCausal + for_ b (atomically . writeTVar rootBranchCache . Just) + pure b + Just b -> do + -- todo: check to see if root namespace hash has been externally modified + -- and load it if necessary. But for now, we just return what's present in the cache. + pure (Right b) where err :: Ops.Error -> Codebase1.GetRootBranchError err = \case @@ -438,13 +450,16 @@ sqliteCodebase root = do Codebase1.CouldntLoadRootBranch $ Cv.causalHash2to1 ch e -> error $ show e - putRootBranch :: MonadIO m => Branch m -> m () - putRootBranch branch1 = + putRootBranch :: MonadIO m => TVar (Maybe (Branch m)) -> Branch m -> m () + putRootBranch rootBranchCache branch1 = do + -- todo: check to see if root namespace hash has been externally modified + -- and do something (merge?) it if necessary. But for now, we just overwrite it. runDB conn . void . Ops.saveRootBranch . Cv.causalbranch1to2 $ Branch.transform (lift . lift) branch1 + atomically $ writeTVar rootBranchCache (Just branch1) rootBranchUpdates :: MonadIO m => m (IO (), IO (Set Branch.Hash)) rootBranchUpdates = pure (cleanup, liftIO newRootsDiscovered) @@ -746,8 +761,8 @@ sqliteCodebase root = do getTypeDeclaration putTerm putTypeDeclaration - getRootBranch - putRootBranch + (getRootBranch rootBranchCache) + (putRootBranch rootBranchCache) rootBranchUpdates getBranchForHash putBranch From 9f6f613280bffe1e7aa2c6584ddbf3c2190bc645 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 12 Apr 2021 16:05:02 -0600 Subject: [PATCH 175/225] fix v2 sync display condition and add quiet flag --- .../src/Unison/Codebase/SqliteCodebase.hs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 46bc768280..766342cbdc 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -10,7 +10,7 @@ module Unison.Codebase.SqliteCodebase (Unison.Codebase.SqliteCodebase.init, unsa import qualified Control.Concurrent import qualified Control.Exception -import Control.Monad (filterM, when, (>=>)) +import Control.Monad (filterM, when, (>=>), unless) import Control.Monad.Except (ExceptT, MonadError (throwError), runExceptT) import qualified Control.Monad.Except as Except import Control.Monad.Extra (ifM, unlessM, (||^)) @@ -24,7 +24,7 @@ import Data.Bifunctor (Bifunctor (bimap, first), second) import qualified Data.Bifunctor as Bifunctor import qualified Data.Either.Combinators as Either import Data.Foldable (Foldable (toList), traverse_, for_) -import Data.Functor (void) +import Data.Functor (void, (<&>)) import qualified Data.List as List import Data.Map (Map) import qualified Data.Map as Map @@ -815,6 +815,7 @@ emptySyncProgressState = SyncProgressState (Just mempty) (Right mempty) (Right m syncProgress :: MonadState SyncProgressState m => MonadIO m => Sync.Progress m Sync22.Entity syncProgress = Sync.Progress need done warn allDone where + quiet = False maxTrackedHashCount = 1024 * 1024 size :: SyncProgressState -> Int size = \case @@ -824,7 +825,7 @@ syncProgress = Sync.Progress need done warn allDone need, done, warn :: (MonadState SyncProgressState m, MonadIO m) => Sync22.Entity -> m () need h = do - Monad.whenM (fmap (> 0) $ State.gets size) $ liftIO $ putStr "\n" + unless quiet $ Monad.whenM (State.gets size <&> (== 0)) $ liftIO $ putStr "\n" State.get >>= \case SyncProgressState Nothing Left {} Left {} -> pure () SyncProgressState (Just need) (Right done) (Right warn) -> @@ -835,27 +836,27 @@ syncProgress = Sync.Progress need done warn allDone then pure () else State.put $ SyncProgressState (Just $ Set.insert h need) (Right done) (Right warn) SyncProgressState _ _ _ -> undefined - State.get >>= liftIO . putStr . renderState ("Synced ") + unless quiet $ State.get >>= liftIO . putStr . renderState ("Synced ") done h = do - Monad.whenM (fmap (> 0) $ State.gets size) $ liftIO $ putStr "\n" + unless quiet $ Monad.whenM (State.gets size <&> (== 0)) $ liftIO $ putStr "\n" State.get >>= \case SyncProgressState Nothing (Left done) warn -> State.put $ SyncProgressState Nothing (Left (done + 1)) warn SyncProgressState (Just need) (Right done) warn -> State.put $ SyncProgressState (Just $ Set.delete h need) (Right $ Set.insert h done) warn SyncProgressState _ _ _ -> undefined - State.get >>= liftIO . putStr . renderState ("Synced ") + unless quiet $ State.get >>= liftIO . putStr . renderState ("Synced ") warn h = do - Monad.whenM (fmap (> 0) $ State.gets size) $ liftIO $ putStr "\n" + unless quiet $ Monad.whenM (State.gets size <&> (== 0)) $ liftIO $ putStr "\n" State.get >>= \case SyncProgressState Nothing done (Left warn) -> State.put $ SyncProgressState Nothing done (Left $ warn + 1) SyncProgressState (Just need) done (Right warn) -> State.put $ SyncProgressState (Just $ Set.delete h need) done (Right $ Set.insert h warn) SyncProgressState _ _ _ -> undefined - State.get >>= liftIO . putStr . renderState ("Synced ") + unless quiet $ State.get >>= liftIO . putStr . renderState ("Synced ") allDone = State.get >>= liftIO . putStr . renderState ("Done syncing ") From 722748f0b89cc19eac72d900012780f9f81658c2 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 12 Apr 2021 16:05:29 -0600 Subject: [PATCH 176/225] formatting --- .../src/Unison/Codebase/SqliteCodebase.hs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 766342cbdc..fef074dba1 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -23,7 +23,7 @@ import Control.Monad.Trans.Maybe (MaybeT (MaybeT)) import Data.Bifunctor (Bifunctor (bimap, first), second) import qualified Data.Bifunctor as Bifunctor import qualified Data.Either.Combinators as Either -import Data.Foldable (Foldable (toList), traverse_, for_) +import Data.Foldable (Foldable (toList), for_, traverse_) import Data.Functor (void, (<&>)) import qualified Data.List as List import Data.Map (Map) @@ -427,11 +427,12 @@ sqliteCodebase root = do getRootBranch rootBranchCache = readTVarIO rootBranchCache >>= \case Nothing -> do - b <- fmap (Either.mapLeft err) - . runExceptT - . flip runReaderT conn - . fmap (Branch.transform (runDB conn)) - $ Cv.causalbranch2to1 getCycleLen getDeclType =<< Ops.loadRootCausal + b <- + fmap (Either.mapLeft err) + . runExceptT + . flip runReaderT conn + . fmap (Branch.transform (runDB conn)) + $ Cv.causalbranch2to1 getCycleLen getDeclType =<< Ops.loadRootCausal for_ b (atomically . writeTVar rootBranchCache . Just) pure b Just b -> do @@ -971,5 +972,5 @@ pushGitRootBranch syncToDirectory branch repo syncMode = runExceptT do ++ "from supplying the git treeish `" ++ Text.unpack gitbranch ++ "`!" - -- gitIn remotePath ["push", "--quiet", url, gitbranch] + -- gitIn remotePath ["push", "--quiet", url, gitbranch] pure True From 6c91c9050c53ee9c29f6d885054c1834d6dc011c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 12 Apr 2021 18:47:00 -0600 Subject: [PATCH 177/225] redundant imports --- parser-typechecker/unison/Main.hs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index 36da1a3116..4bc56853cc 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -17,11 +17,6 @@ import qualified System.Posix.Signals as Sig import Control.Concurrent (mkWeakThreadId, myThreadId) import Control.Error.Safe (rightMay) import Control.Exception (AsyncException (UserInterrupt), throwTo) -import Control.Lens (Lens', (&)) -import qualified Control.Lens as Lens -import qualified Control.Monad.Reader as Reader -import Control.Monad.State (StateT (StateT, runStateT)) -import qualified Control.Monad.State as State import Data.ByteString.Char8 (unpack) import qualified Data.Configurator as Config import Data.Configurator.Types (Config) @@ -36,12 +31,8 @@ import qualified System.IO.Temp as Temp import System.Mem.Weak (deRefWeak) import qualified System.Path as Path import Text.Megaparsec (runParser) -import qualified U.Codebase.Sync as Sync import qualified Unison.Codebase as Codebase import qualified Unison.Codebase.Init as Codebase -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Causal as Causal -import qualified Unison.Codebase.Conversion.Sync12 as Sync12 import qualified Unison.Codebase.Editor.Input as Input import Unison.Codebase.Editor.RemoteRepo (RemoteNamespace) import qualified Unison.Codebase.Editor.VersionParser as VP From 2187c574d4b5e81bcbec3fea81a6d851baea9f8b Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 13 Apr 2021 23:18:22 -0600 Subject: [PATCH 178/225] rewrite Ops.saveBranch with smarter short-circuit --- .../U/Codebase/Sqlite/Operations.hs | 52 ++++++++----------- .../U/Codebase/Sqlite/Queries.hs | 21 +++++--- codebase2/util/U/Util/Alternative.hs | 8 +++ codebase2/util/unison-util.cabal | 3 +- 4 files changed, 47 insertions(+), 37 deletions(-) create mode 100644 codebase2/util/U/Util/Alternative.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 231a504f3f..c928e4bb1e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -14,9 +14,10 @@ module U.Codebase.Sqlite.Operations where import Control.Lens (Lens') import qualified Control.Lens as Lens -import Control.Monad (MonadPlus (mzero), join, when, (<=<)) +import Control.Monad (MonadPlus (mzero), join, when, (<=<), unless) import Control.Monad.Except (ExceptT, MonadError, runExceptT) import qualified Control.Monad.Except as Except +import qualified Control.Monad.Extra as Monad import Control.Monad.State (MonadState, StateT, evalStateT) import Control.Monad.Trans (MonadTrans (lift)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) @@ -120,7 +121,6 @@ import qualified U.Util.Serialization as S import qualified U.Util.Set as Set import qualified U.Util.Term as TermUtil import qualified U.Util.Type as TypeUtil -import Control.Monad.Extra (ifM) -- * Error handling @@ -904,35 +904,27 @@ saveRootBranch c = do saveBranch :: EDB m => C.Branch.Causal m -> m (Db.BranchObjectId, Db.CausalHashId) saveBranch (C.Causal hc he parents me) = do when debug $ traceM $ "\nOperations.saveBranch \n hc = " ++ show hc ++ ",\n he = " ++ show he ++ ",\n parents = " ++ show (Map.keys parents) - -- check if we can skip the whole thing, by checking if there are causal parents for hc - chId <- liftQ (Q.saveCausalHash hc) - parentCausalHashIds <- - liftQ (Q.loadCausalParents chId) >>= \case - [] -> do - -- no parents means hc maybe hasn't been saved previously, - -- so try to save each parent (recursively) before continuing to save hc - for (Map.toList parents) $ \(causalHash, mcausal) -> do - -- check if we can short circuit the parent before loading it, - -- by checking if there are causal parents associated with hc - parentChId <- liftQ (Q.saveCausalHash causalHash) - -- test if the parent has been saved previously: - ifM (liftQ . Q.isCausalHash $ Db.unCausalHashId parentChId) - (pure parentChId) - (do mcausal >>= fmap snd . saveBranch) - parentCausalHashIds -> pure parentCausalHashIds - - boId <- - liftQ (Q.loadBranchObjectIdByCausalHashId chId) >>= \case - Just boId -> pure boId - Nothing -> do - bhId <- liftQ (Q.saveBranchHash he) - (li, lBranch) <- c2lBranch =<< me - boId <- saveBranchObject bhId li lBranch - liftQ (Q.saveCausal chId bhId) - -- save the link between child and parents - liftQ (Q.saveCausalParents chId parentCausalHashIds) - pure boId + (chId, bhId) <- flip Monad.fromMaybeM (liftQ $ Q.loadCausalByCausalHash hc) do + -- if not exist, create these + chId <- liftQ (Q.saveCausalHash hc) + bhId <- liftQ (Q.saveBranchHash he) + liftQ (Q.saveCausal chId bhId) + -- save the link between child and parents + parentCausalHashIds <- + -- so try to save each parent (recursively) before continuing to save hc + for (Map.toList parents) $ \(parentHash, mcausal) -> + -- check if we can short circuit the parent before loading it, + -- by checking if there are causal parents associated with hc + (flip Monad.fromMaybeM) + (liftQ $ Q.loadCausalHashIdByCausalHash parentHash) + (mcausal >>= fmap snd . saveBranch) + unless (null parentCausalHashIds) $ + liftQ (Q.saveCausalParents chId parentCausalHashIds) + pure (chId, bhId) + boId <- flip Monad.fromMaybeM (liftQ $ Q.loadBranchObjectIdByCausalHashId chId) do + (li, lBranch) <- c2lBranch =<< me + saveBranchObject bhId li lBranch pure (boId, chId) where c2lBranch :: EDB m => C.Branch.Branch m -> m (BranchLocalIds, S.Branch.Full.LocalBranch) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index ead0f36e44..e8c01f9225 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -30,7 +30,7 @@ import Data.Functor ((<&>)) import qualified Data.List.Extra as List import Data.List.NonEmpty (NonEmpty) import qualified Data.List.NonEmpty as Nel -import Data.Maybe (fromJust, isJust, fromMaybe) +import Data.Maybe (fromJust, fromMaybe) import Data.String (fromString) import Data.String.Here.Uninterpolated (here, hereFile) import Data.Text (Text) @@ -48,10 +48,11 @@ import qualified U.Codebase.Sqlite.Reference as Reference import qualified U.Codebase.Sqlite.Referent as Referent import U.Codebase.WatchKind (WatchKind) import qualified U.Codebase.WatchKind as WatchKind +import qualified U.Util.Alternative as Alternative import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash -import UnliftIO (MonadUnliftIO, throwIO, try, withRunInIO, tryAny) +import UnliftIO (MonadUnliftIO, throwIO, try, tryAny, withRunInIO) -- * types @@ -169,8 +170,15 @@ saveBranchHash :: DB m => BranchHash -> m BranchHashId saveBranchHash = fmap BranchHashId . saveHashHash . unBranchHash loadCausalHashIdByCausalHash :: DB m => CausalHash -> m (Maybe CausalHashId) -loadCausalHashIdByCausalHash = - (fmap . fmap) CausalHashId . loadHashIdByHash . unCausalHash +loadCausalHashIdByCausalHash ch = runMaybeT do + hId <- MaybeT $ loadHashIdByHash (unCausalHash ch) + Alternative.whenM (isCausalHash hId) (CausalHashId hId) + +loadCausalByCausalHash :: DB m => CausalHash -> m (Maybe (CausalHashId, BranchHashId)) +loadCausalByCausalHash ch = runMaybeT do + hId <- MaybeT $ loadHashIdByHash (unCausalHash ch) + bhId <- MaybeT $ loadMaybeCausalValueHashId hId + pure (CausalHashId hId, bhId) expectHashIdByHash :: EDB m => Hash -> m HashId expectHashIdByHash h = loadHashIdByHash h >>= orError (UnknownHash h) @@ -330,9 +338,10 @@ loadMaybeCausalValueHashId id = |] isCausalHash :: DB m => HashId -> m Bool -isCausalHash = fmap isJust . loadMaybeCausalValueHashId +isCausalHash = queryOne . queryAtom sql . Only where sql = [here| + SELECT EXISTS (SELECT 1 FROM causal WHERE self_hash_id = ?) + |] --- todo: do a join here loadBranchObjectIdByCausalHashId :: EDB m => CausalHashId -> m (Maybe BranchObjectId) loadBranchObjectIdByCausalHashId id = queryAtom sql (Only id) where sql = [here| SELECT object_id FROM hash_object diff --git a/codebase2/util/U/Util/Alternative.hs b/codebase2/util/U/Util/Alternative.hs new file mode 100644 index 0000000000..c07601667f --- /dev/null +++ b/codebase2/util/U/Util/Alternative.hs @@ -0,0 +1,8 @@ +{-# LANGUAGE ApplicativeDo #-} + +module U.Util.Alternative where + +import Control.Applicative (Alternative (empty)) + +whenM :: (Monad m, Alternative m) => m Bool -> a -> m a +whenM m a = do b <- m; if b then pure a else empty \ No newline at end of file diff --git a/codebase2/util/unison-util.cabal b/codebase2/util/unison-util.cabal index bc0396ddd6..643db6633f 100644 --- a/codebase2/util/unison-util.cabal +++ b/codebase2/util/unison-util.cabal @@ -4,7 +4,7 @@ cabal-version: 1.12 -- -- see: https://github.com/sol/hpack -- --- hash: 1ac1d0a054ed3772371a8196b4e0c33ae78a59ad5bcfc0961648f71c3d137ae6 +-- hash: eb553f8d44e26dc6e4cf34d13fe54b626164d7c5c60146b417e27d6ece43c03f name: unison-util version: 0.0.0 @@ -18,6 +18,7 @@ source-repository head library exposed-modules: + U.Util.Alternative U.Util.Base32Hex U.Util.Cache U.Util.Components From 743578bb69063a56865bdabfbb244837e18f9267 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 14 Apr 2021 23:48:28 -0600 Subject: [PATCH 179/225] verify data version and hash before utilizing root branch cache --- .../U/Codebase/Sqlite/Operations.hs | 7 ++- .../U/Codebase/Sqlite/Queries.hs | 8 ++++ .../src/Unison/Codebase/SqliteCodebase.hs | 29 ++++++------ questions.md | 46 +++++++++++++++++-- 4 files changed, 72 insertions(+), 18 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index c928e4bb1e..afba17a98e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -237,8 +237,8 @@ loadValueHashByCausalHashId = loadValueHashById <=< liftQ . Q.loadCausalValueHas loadValueHashById :: EDB m => Db.BranchHashId -> m BranchHash loadValueHashById = fmap (BranchHash . H.fromBase32Hex) . liftQ . Q.loadHashById . Db.unBranchHashId --- loadRootCausalHash :: EDB m => m CausalHash --- loadRootCausalHash = loadCausalHashById =<< liftQ Q.loadNamespaceRoot +loadRootCausalHash :: EDB m => m CausalHash +loadRootCausalHash = loadCausalHashById =<< liftQ Q.loadNamespaceRoot -- * Reference transformations @@ -1028,6 +1028,9 @@ lookupBranchLocalChild li (LocalBranchChildId w) = S.BranchFormat.branchChildLoo loadRootCausal :: EDB m => m (C.Branch.Causal m) loadRootCausal = liftQ Q.loadNamespaceRoot >>= loadCausalByCausalHashId +dataVersion :: DB m => m Q.DataVersion +dataVersion = Q.dataVersion + loadCausalBranchByCausalHash :: EDB m => CausalHash -> m (Maybe (C.Branch.Causal m)) loadCausalBranchByCausalHash hc = do Q.loadCausalHashIdByCausalHash hc >>= \case diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index e8c01f9225..fb7e8da888 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -384,6 +384,14 @@ loadCausalParents h = queryAtoms sql (Only h) where sql = [here| SELECT parent_id FROM causal_parent WHERE causal_id = ? |] +newtype DataVersion = DataVersion Int + deriving (Eq, Ord, Show) + deriving FromField via Int +dataVersion :: DB m => m DataVersion +dataVersion = queryOne . fmap (fmap fromOnly) . fmap headMay $ query_ [here| + PRAGMA data_version + |] + loadNamespaceRoot :: EDB m => m CausalHashId loadNamespaceRoot = queryAtoms sql () >>= \case [] -> throwError NoNamespaceRoot diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index fef074dba1..f407b8f74c 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -241,7 +241,6 @@ sqliteCodebase root = do conn <- unsafeGetConnection root runReaderT Q.checkForMissingSchema conn >>= \case [] -> do - -- rootBranchCache <- newTVarIO Nothing -- The v1 codebase interface has operations to read and write individual definitions -- whereas the v2 codebase writes them as complete components. These two fields buffer @@ -423,23 +422,27 @@ sqliteCodebase root = do tryFlushDeclBuffer h ) - getRootBranch :: MonadIO m => TVar (Maybe (Branch m)) -> m (Either Codebase1.GetRootBranchError (Branch m)) + getRootBranch :: MonadIO m => TVar (Maybe (Q.DataVersion, Branch m)) -> m (Either Codebase1.GetRootBranchError (Branch m)) getRootBranch rootBranchCache = readTVarIO rootBranchCache >>= \case - Nothing -> do - b <- - fmap (Either.mapLeft err) + Nothing -> forceReload + Just (v, b) -> do + -- check to see if root namespace hash has been externally modified + -- and reload it if necessary + v' <- runDB conn Ops.dataVersion + if v == v' then pure (Right b) else do + newRootHash <- runDB conn Ops.loadRootCausalHash + if Branch.headHash b == Cv.hash2to1 newRootHash then pure (Right b) else forceReload + where + forceReload = do + b <- fmap (Either.mapLeft err) . runExceptT . flip runReaderT conn . fmap (Branch.transform (runDB conn)) $ Cv.causalbranch2to1 getCycleLen getDeclType =<< Ops.loadRootCausal - for_ b (atomically . writeTVar rootBranchCache . Just) + v <- runDB conn Ops.dataVersion + for_ b (atomically . writeTVar rootBranchCache . Just . (v,)) pure b - Just b -> do - -- todo: check to see if root namespace hash has been externally modified - -- and load it if necessary. But for now, we just return what's present in the cache. - pure (Right b) - where err :: Ops.Error -> Codebase1.GetRootBranchError err = \case Ops.DatabaseIntegrityError Q.NoNamespaceRoot -> @@ -451,7 +454,7 @@ sqliteCodebase root = do Codebase1.CouldntLoadRootBranch $ Cv.causalHash2to1 ch e -> error $ show e - putRootBranch :: MonadIO m => TVar (Maybe (Branch m)) -> Branch m -> m () + putRootBranch :: MonadIO m => TVar (Maybe (Q.DataVersion, Branch m)) -> Branch m -> m () putRootBranch rootBranchCache branch1 = do -- todo: check to see if root namespace hash has been externally modified -- and do something (merge?) it if necessary. But for now, we just overwrite it. @@ -460,7 +463,7 @@ sqliteCodebase root = do . Ops.saveRootBranch . Cv.causalbranch1to2 $ Branch.transform (lift . lift) branch1 - atomically $ writeTVar rootBranchCache (Just branch1) + atomically $ modifyTVar rootBranchCache (fmap . second $ const branch1) rootBranchUpdates :: MonadIO m => m (IO (), IO (Set Branch.Hash)) rootBranchUpdates = pure (cleanup, liftIO newRootsDiscovered) diff --git a/questions.md b/questions.md index 21f18d54f2..0764a826bc 100644 --- a/questions.md +++ b/questions.md @@ -16,10 +16,50 @@ next steps: - [x] `SqliteCodebase.syncToDirectory` - [ ] do I need to initialize a sqlite codebase in the destination? - [ ] Managing external edit events? - - [ ] `SqliteCodebase.rootBranchUpdates` Is there some Sqlite function for detecting external changes? + - [x] `SqliteCodebase.rootBranchUpdates` Is there some Sqlite function for detecting external changes? - https://www.sqlite.org/pragma.html#pragma_data_version - - https://user-images.githubusercontent.com/538571/111105100-8f0b4a80-8528-11eb-95f6-12bb906f315e.png -- [ ] consider using `causal` table to detect if a causal exists, instead of causal_parent? + - https://user-images.githubusercontent.com/538571/111105100-8f0b4a80-8528-11eb-95f6-12bb906f315e. + - [ ] to get updates notifications, we could watch the sqlite file itself for changes, and check + png +- [x] consider using `causal` table to detect if a causal exists, instead of causal_parent? +- [ ] set-root-branch being called inappropriately on `pull` + +- [ ] weird error message? + C:\Users\arya\unison>stack exec unison + unison.EXE: SQLite3 returned ErrorCan'tOpen while attempting to perform open "C:\\Users\\arya\\.unison\\v2\\unison.sqlite3": unable to open database file + +- [ ] UnknownHashId (HashId 2179) + arya@jrrr unison % stack exec unison -- -codebase /tmp/getbase3 init + Initializing a new codebase in: /private/tmp/getbase3 + arya@jrrr unison % stack exec unison -- -codebase /tmp/getbase3 + + .> pull https://github.com/aryairani/base-v2 ._base + Importing downloaded files into local codebase... + unison: DatabaseIntegrityError (UnknownHashId (HashId 2179)) + CallStack (from HasCallStack): + error, called at src/Unison/Codebase/SqliteCodebase.hs:802:29 in unison-parser-typechecker-0.0.0-HGDWpz1IYHwGxjjnh5rX1K:Unison.Codebase.SqliteCodebase + arya@jrrr unison % stack exec unison -- -codebase /tmp/getbase3 + + .> pull https://github.com/aryairani/base-v2 ._base + + Nothing changed as a result of the merge. + + + 😶 + + ._base was already up-to-date with https://github.com/aryairani/base-v2. + + .> ls + + 1. releases/ (2438 definitions) + 2. series/ (1219 definitions) + 3. trunk/ (1652 definitions) + + .> history + + Note: The most recent namespace hash is immediately below this message. + + ⊙ #agu597jbfn what even are these: From 5635edd9eb8186cc03f79f7b3cf697daa77dcbcf Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 15 Apr 2021 13:21:22 -0600 Subject: [PATCH 180/225] implement something untested for rootBranchUpdates --- .../src/Unison/Codebase/SqliteCodebase.hs | 55 ++++++++++++++++--- .../Codebase/SqliteCodebase/Conversions.hs | 3 + 2 files changed, 49 insertions(+), 9 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index f407b8f74c..4e780f3bb7 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -10,7 +10,7 @@ module Unison.Codebase.SqliteCodebase (Unison.Codebase.SqliteCodebase.init, unsa import qualified Control.Concurrent import qualified Control.Exception -import Control.Monad (filterM, when, (>=>), unless) +import Control.Monad (filterM, when, (>=>), unless, forever) import Control.Monad.Except (ExceptT, MonadError (throwError), runExceptT) import qualified Control.Monad.Except as Except import Control.Monad.Extra (ifM, unlessM, (||^)) @@ -95,13 +95,22 @@ import Unison.Util.Timing (time) import UnliftIO (MonadIO, catchIO, liftIO) import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, doesDirectoryExist, doesFileExist) import UnliftIO.STM +import qualified Unison.Util.TQueue as TQueue +import qualified Unison.Codebase.Watch as Watch +import UnliftIO.Concurrent (forkIO, killThread) debug, debugProcessBranches :: Bool debug = False debugProcessBranches = False +codebasePath :: FilePath +codebasePath = ".unison" "v2" "unison.sqlite3" + +v2dir :: FilePath -> FilePath +v2dir root = root ".unison" "v2" + init :: HasCallStack => MonadIO m => Codebase.Init m Symbol Ann -init = Codebase.Init getCodebaseOrError createCodebaseOrError ( ".unison" "v2") +init = Codebase.Init getCodebaseOrError createCodebaseOrError v2dir createCodebaseOrError :: MonadIO m => @@ -142,9 +151,6 @@ createCodebaseOrError' path = do (runReaderT Q.createSchema) fmap (Either.mapLeft CreateCodebaseMissingSchema) (sqliteCodebase path) -codebasePath :: FilePath -codebasePath = ".unison" "v2" "unison.sqlite3" - -- get the codebase in dir getCodebaseOrError :: forall m. MonadIO m => CodebasePath -> m (Either Codebase1.Pretty (m (), Codebase m Symbol Ann)) getCodebaseOrError dir = do @@ -432,7 +438,7 @@ sqliteCodebase root = do v' <- runDB conn Ops.dataVersion if v == v' then pure (Right b) else do newRootHash <- runDB conn Ops.loadRootCausalHash - if Branch.headHash b == Cv.hash2to1 newRootHash then pure (Right b) else forceReload + if Branch.headHash b == Cv.branchHash2to1 newRootHash then pure (Right b) else forceReload where forceReload = do b <- fmap (Either.mapLeft err) @@ -465,8 +471,39 @@ sqliteCodebase root = do $ Branch.transform (lift . lift) branch1 atomically $ modifyTVar rootBranchCache (fmap . second $ const branch1) - rootBranchUpdates :: MonadIO m => m (IO (), IO (Set Branch.Hash)) - rootBranchUpdates = pure (cleanup, liftIO newRootsDiscovered) + rootBranchUpdates :: MonadIO m => TVar (Maybe (Q.DataVersion, a)) -> m (IO (), IO (Set Branch.Hash)) + rootBranchUpdates rootBranchCache = do + branchHeadChanges <- TQueue.newIO + (cancelWatch, watcher) <- Watch.watchDirectory' (v2dir root) + watcher1 <- + liftIO . forkIO + $ forever + $ do + -- void ignores the name and time of the changed file, + -- and assume 'unison.sqlite3' has changed + (filename, time) <- watcher + traceM $ "SqliteCodebase.watcher " ++ show (filename, time) + readTVarIO rootBranchCache >>= \case + Nothing -> pure () + Just (v, _) -> do + -- this use of `conn` in a separate thread may be problematic. + -- hopefully sqlite will produce an obvious error message if it is. + v' <- runDB conn Ops.dataVersion + if v /= v' then + atomically + . TQueue.enqueue branchHeadChanges =<< runDB conn Ops.loadRootCausalHash + else pure () + + -- case hashFromFilePath filePath of + -- Nothing -> failWith $ CantParseBranchHead filePath + -- Just h -> + -- atomically . TQueue.enqueue branchHeadChanges $ Branch.Hash h + -- smooth out intermediate queue + pure + ( cancelWatch >> killThread watcher1 + , Set.fromList <$> Watch.collectUntilPause branchHeadChanges 400000 + ) + pure (cleanup, liftIO newRootsDiscovered) where newRootsDiscovered = do Control.Concurrent.threadDelay maxBound -- hold off on returning @@ -767,7 +804,7 @@ sqliteCodebase root = do putTypeDeclaration (getRootBranch rootBranchCache) (putRootBranch rootBranchCache) - rootBranchUpdates + (rootBranchUpdates rootBranchCache) getBranchForHash putBranch isCausalHash diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index cb5e8c972e..71a27219e0 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -296,6 +296,9 @@ hash1to2 (V1.Hash bs) = V2.Hash.Hash (SBS.toShort bs) branchHash1to2 :: V1.Branch.Hash -> V2.CausalHash branchHash1to2 = V2.CausalHash . hash1to2 . V1.Causal.unRawHash +branchHash2to1 :: V2.CausalHash -> V1.Branch.Hash +branchHash2to1 = V1.Causal.RawHash . hash2to1 . V2.unCausalHash + patchHash1to2 :: V1.Branch.EditHash -> V2.PatchHash patchHash1to2 = V2.PatchHash . hash1to2 From 2851b704ce771d13c140acd5a59b8652bfa1bc46 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 15 Apr 2021 14:01:46 -0600 Subject: [PATCH 181/225] save initial empty branch after creating codebase schema --- codebase2/codebase/U/Codebase/Branch.hs | 2 +- .../src/Unison/Codebase/SqliteCodebase.hs | 8 +++- questions.md | 9 +++- .../transcripts/emptyCodebase.output.md | 41 +++++++++++++++++++ 4 files changed, 57 insertions(+), 3 deletions(-) create mode 100644 unison-src/transcripts/emptyCodebase.output.md diff --git a/codebase2/codebase/U/Codebase/Branch.hs b/codebase2/codebase/U/Codebase/Branch.hs index 0df4710ebc..dcbf73a040 100644 --- a/codebase2/codebase/U/Codebase/Branch.hs +++ b/codebase2/codebase/U/Codebase/Branch.hs @@ -33,7 +33,7 @@ data Patch = Patch } instance Show (Branch m) where - show b = "Branch { terms = " ++ show (fmap Map.keys (terms b)) ++ + show b = "Branch { terms = " ++ show (fmap Map.keys (terms b)) ++ ", types = " ++ show (fmap Map.keys (types b)) ++ ", patches = " ++ show (fmap fst (patches b)) ++ ", children = " ++ show (Map.keys (children b)) \ No newline at end of file diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 4e780f3bb7..f7af8bfbc5 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -148,7 +148,13 @@ createCodebaseOrError' path = do Control.Exception.bracket (unsafeGetConnection path) Sqlite.close - (runReaderT Q.createSchema) + (runReaderT do + Q.createSchema + runExceptT (void . Ops.saveRootBranch $ Cv.causalbranch1to2 Branch.empty) >>= \case + Left e -> error $ show e + Right () -> pure () + ) + fmap (Either.mapLeft CreateCodebaseMissingSchema) (sqliteCodebase path) -- get the codebase in dir diff --git a/questions.md b/questions.md index 0764a826bc..3d1bf3722b 100644 --- a/questions.md +++ b/questions.md @@ -22,9 +22,16 @@ next steps: - [ ] to get updates notifications, we could watch the sqlite file itself for changes, and check png - [x] consider using `causal` table to detect if a causal exists, instead of causal_parent? +- [x] no root found, for new codebase? + .> ls + + ❗️ + + I couldn't find the codebase root! + - [ ] set-root-branch being called inappropriately on `pull` -- [ ] weird error message? +- [ ] weird error message when codebase doesn't exist C:\Users\arya\unison>stack exec unison unison.EXE: SQLite3 returned ErrorCan'tOpen while attempting to perform open "C:\\Users\\arya\\.unison\\v2\\unison.sqlite3": unable to open database file diff --git a/unison-src/transcripts/emptyCodebase.output.md b/unison-src/transcripts/emptyCodebase.output.md new file mode 100644 index 0000000000..20b6aa0cd1 --- /dev/null +++ b/unison-src/transcripts/emptyCodebase.output.md @@ -0,0 +1,41 @@ +# The empty codebase + +The Unison codebase, when first initialized, contains no definitions in its namespace. + +Not even `Nat` or `+`! + +BEHOLD!!! + +```ucm +.> ls + + nothing to show + +``` +Technically, the definitions all exist, but they have no names. `builtins.merge` brings them into existence, under the current namespace: + +```ucm + ☝️ The namespace .foo is empty. + +.foo> builtins.merge + + Done. + +.foo> ls + + 1. builtin/ (335 definitions) + +``` +And for a limited time, you can get even more builtin goodies: + +```ucm +.foo> builtins.mergeio + + Done. + +.foo> ls + + 1. builtin/ (492 definitions) + +``` +More typically, you'd start out by pulling `base. From 1362cdfaa5729c75f32c93f0230f20164ee65e87 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 15 Apr 2021 14:05:45 -0600 Subject: [PATCH 182/225] removed accidental trace from transcripts --- parser-typechecker/unison/Main.hs | 1 - 1 file changed, 1 deletion(-) diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index 4bc56853cc..64b18f9a3f 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -229,7 +229,6 @@ prepareTranscriptDir cbInit inFork mcodepath = do else do PT.putPrettyLn . P.wrap $ "Transcript will be run on a new, empty codebase." void $ Codebase.openNewUcmCodebaseOrExit cbInit tmp - traceM $ "Copying codebase to " ++ tmp pure tmp runTranscripts' From 3f3d74d709c0c506d9695e0e479353ca6c58cb4b Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 15 Apr 2021 16:27:09 -0600 Subject: [PATCH 183/225] move setNamespaceRoot call out of syncToDir and into git push --- parser-typechecker/src/Unison/Codebase.hs | 4 +-- .../src/Unison/Codebase/SqliteCodebase.hs | 29 ++++++++++--------- questions.md | 4 +-- 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs index 947f2e5aca..ee8d611aeb 100644 --- a/parser-typechecker/src/Unison/Codebase.hs +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -80,11 +80,9 @@ data Codebase m v a = , patchExists :: Branch.EditHash -> m Bool , dependentsImpl :: Reference -> m (Set Reference.Id) - -- This copies all the dependencies of `b` from the specified - -- FileCodebase into this Codebase, and sets our root branch to `b` + -- This copies all the dependencies of `b` from the specified Codebase into this one , syncFromDirectory :: CodebasePath -> SyncMode -> Branch m -> m () -- This copies all the dependencies of `b` from the this Codebase - -- into the specified FileCodebase, and sets its _head to `b` , syncToDirectory :: CodebasePath -> SyncMode -> Branch m -> m () , viewRemoteBranch' :: RemoteNamespace -> m (Either GitError (Branch m, CodebasePath)) , pushGitRootBranch :: Branch m -> RemoteRepo -> SyncMode -> m (Either GitError ()) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index f7af8bfbc5..7ccb1a7383 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -696,10 +696,10 @@ sqliteCodebase root = do SyncMode -> Branch m -> m () - syncToDirectory' progress srcPath destPath _mode newRoot = do + syncToDirectory' progress srcPath destPath _mode b = do result <- runExceptT do initSchemaIfNotExist destPath - syncEnv@(Sync22.Env srcConn destConn _) <- + syncEnv@(Sync22.Env srcConn _ _) <- Sync22.Env <$> unsafeGetConnection srcPath <*> unsafeGetConnection destPath @@ -766,16 +766,8 @@ sqliteCodebase root = do processBranches sync progress src dest rest sync <- se . r $ Sync22.sync22 let progress' = Sync.transformProgress (lift . lift) progress - newRootHash = Branch.headHash newRoot - newRootHash2 = Cv.causalHash1to2 newRootHash - se $ processBranches sync progress' src dest [B newRootHash (pure newRoot)] - -- set the root namespace - flip runReaderT destConn $ do - chId <- - (Q.loadCausalHashIdByCausalHash newRootHash2) >>= \case - Nothing -> Except.throwError $ SyncEphemeral.DisappearingBranch newRootHash2 - Just chId -> pure chId - Q.setNamespaceRoot chId + bHash = Branch.headHash b + se $ processBranches sync progress' src dest [B bHash (pure b)] lift closeSrc lift closeDest pure $ Validation.valueOr (error . show) result @@ -985,10 +977,21 @@ pushGitRootBranch syncToDirectory branch repo syncMode = runExceptT do (stageAndPush remotePath) (throwError $ GitError.PushDestinationHasNewStuff repo) where + -- | this will bomb if `h` is not a causal in the codebase + setRepoRoot :: MonadIO m => CodebasePath -> Branch.Hash -> m () + setRepoRoot root h = do + conn <- unsafeGetConnection root + let h2 = Cv.causalHash1to2 h + err = error "Called SqliteCodebase.setNamespaceRoot on unknown causal hash" + flip runReaderT conn $ do + chId <- fromMaybe err <$> Q.loadCausalHashIdByCausalHash h2 + Q.setNamespaceRoot chId + stageAndPush remotePath = do let repoString = Text.unpack $ printRepo repo - withStatus ("Staging files for upload to " ++ repoString ++ " ...") $ + withStatus ("Staging files for upload to " ++ repoString ++ " ...") do lift (syncToDirectory remotePath syncMode branch) + setRepoRoot remotePath (Branch.headHash branch) -- push staging area to remote withStatus ("Uploading to " ++ repoString ++ " ...") $ unlessM diff --git a/questions.md b/questions.md index 3d1bf3722b..bd3c071e45 100644 --- a/questions.md +++ b/questions.md @@ -28,9 +28,7 @@ next steps: ❗️ I couldn't find the codebase root! - -- [ ] set-root-branch being called inappropriately on `pull` - +- [x] set-root-branch being called inappropriately on `pull` - [ ] weird error message when codebase doesn't exist C:\Users\arya\unison>stack exec unison unison.EXE: SQLite3 returned ErrorCan'tOpen while attempting to perform open "C:\\Users\\arya\\.unison\\v2\\unison.sqlite3": unable to open database file From 401b938a5023363b2863addc16138166386560a3 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 15 Apr 2021 16:38:29 -0600 Subject: [PATCH 184/225] fix typo --- parser-typechecker/src/Unison/Codebase.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs index ee8d611aeb..9e3aa425c2 100644 --- a/parser-typechecker/src/Unison/Codebase.hs +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -82,7 +82,7 @@ data Codebase m v a = , dependentsImpl :: Reference -> m (Set Reference.Id) -- This copies all the dependencies of `b` from the specified Codebase into this one , syncFromDirectory :: CodebasePath -> SyncMode -> Branch m -> m () - -- This copies all the dependencies of `b` from the this Codebase + -- This copies all the dependencies of `b` from this Codebase , syncToDirectory :: CodebasePath -> SyncMode -> Branch m -> m () , viewRemoteBranch' :: RemoteNamespace -> m (Either GitError (Branch m, CodebasePath)) , pushGitRootBranch :: Branch m -> RemoteRepo -> SyncMode -> m (Either GitError ()) From 2b5708d0133bbf3b185dc4737d6b8bdc063ebad8 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 21 Apr 2021 13:38:44 -0600 Subject: [PATCH 185/225] add more Queries logging options --- .../U/Codebase/Sqlite/Queries.hs | 39 ++++++++++++++----- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index fb7e8da888..96711b5dbb 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -53,6 +53,8 @@ import U.Util.Base32Hex (Base32Hex (..)) import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash import UnliftIO (MonadUnliftIO, throwIO, try, tryAny, withRunInIO) +import UnliftIO.Concurrent (myThreadId) +import qualified Control.Monad.Writer as Writer -- * types @@ -62,8 +64,10 @@ type EDB m = (DB m, Err m) type Err m = (MonadError Integrity m, HasCallStack) -debugQuery :: Bool +debugQuery, debugThread, debugConnection :: Bool debugQuery = False +debugThread = False +debugConnection = False alwaysTraceOnCrash :: Bool alwaysTraceOnCrash = True @@ -625,25 +629,33 @@ queryExists q r = not . null . map (id @SQLData) <$> queryAtoms q r query :: (DB m, ToRow q, FromRow r, Show q, Show r) => SQLite.Query -> q -> m [r] query q r = do c <- ask - liftIO . queryTrace (show (SQLite.connectionHandle c) ++ " query") q r $ SQLite.query c q r + header <- debugHeader + liftIO . queryTrace (header ++ " query") q r $ SQLite.query c q r -- | no input, composite List output query_ :: (DB m, FromRow r, Show r) => SQLite.Query -> m [r] query_ q = do c <- ask - liftIO . queryTrace_ (show (SQLite.connectionHandle c) ++ " query") q $ SQLite.query_ c q + header <- debugHeader + liftIO . queryTrace_ (header ++ " query") q $ SQLite.query_ c q + +debugHeader :: DB m => m String +debugHeader = fmap (List.intercalate ", ") $ Writer.execWriterT do + when debugThread $ Writer.tell . pure . show =<< myThreadId + when debugConnection $ Writer.tell . pure . show . SQLite.connectionHandle =<< ask queryTrace :: (MonadUnliftIO m, Show q, Show a) => String -> SQLite.Query -> q -> m a -> m a -queryTrace title query input m = +queryTrace title query input m = do + let showInput = title ++ " " ++ show query ++ "\n input: " ++ show input if debugQuery || alwaysTraceOnCrash then do try @_ @SQLite.SQLError m >>= \case Right a -> do - when debugQuery . traceM $ title ++ " " ++ show query ++ "\n input: " ++ show input ++ "\n output: " ++ show a + when debugQuery . traceM $ showInput ++ "\n output: " ++ show a pure a Left e -> do - traceM $ title ++ " " ++ show query ++ "\n input: " ++ show input ++ "\n(and crashed)\n" + traceM $ showInput ++ "\n(and crashed)\n" throwIO e else m @@ -661,13 +673,22 @@ queryTrace_ title query m = else m execute :: (DB m, ToRow q, Show q) => SQLite.Query -> q -> m () -execute q r = do c <- ask; liftIO . queryTrace (show (SQLite.connectionHandle c) ++ " " ++ "execute") q r $ SQLite.execute c q r +execute q r = do + c <- ask + header <- debugHeader + liftIO . queryTrace (header ++ " " ++ "execute") q r $ SQLite.execute c q r execute_ :: DB m => SQLite.Query -> m () -execute_ q = do c <- ask; liftIO . queryTrace (show (SQLite.connectionHandle c) ++ " " ++ "execute_") q "" $ SQLite.execute_ c q +execute_ q = do + c <- ask + header <- debugHeader + liftIO . queryTrace_ (header ++ " " ++ "execute_") q $ SQLite.execute_ c q executeMany :: (DB m, ToRow q, Show q) => SQLite.Query -> [q] -> m () -executeMany q r = do c <- ask; liftIO . queryTrace (show (SQLite.connectionHandle c) ++ " " ++ "executeMany") q r $ SQLite.executeMany c q r +executeMany q r = do + c <- ask + header <- debugHeader + liftIO . queryTrace (header ++ " " ++ "executeMany") q r $ SQLite.executeMany c q r -- | transaction that blocks withImmediateTransaction :: (DB m, MonadUnliftIO m) => m a -> m a From 91020f526d0ffa10b4af6c97cda1464ee8e53b8c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 21 Apr 2021 13:53:47 -0600 Subject: [PATCH 186/225] remove explicit db connection cleanup actions while we come up with a better design --- .../Unison/Codebase/Conversion/Upgrade12.hs | 7 +-- .../src/Unison/Codebase/FileCodebase.hs | 8 ++-- .../src/Unison/Codebase/Init.hs | 14 +++--- .../src/Unison/Codebase/SqliteCodebase.hs | 45 +++++++++---------- parser-typechecker/tests/Unison/Test/Git.hs | 33 +++++--------- parser-typechecker/tests/Unison/Test/IO.hs | 12 +++-- parser-typechecker/tests/Unison/Test/Ucm.hs | 3 +- parser-typechecker/unison/Main.hs | 15 +++---- 8 files changed, 56 insertions(+), 81 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs index 10253da988..d241243872 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs @@ -27,8 +27,8 @@ import UnliftIO (MonadIO, liftIO) upgradeCodebase :: forall m. (MonadIO m, MonadCatch m) => CodebasePath -> m () upgradeCodebase root = do either (liftIO . CT.putPrettyLn) pure =<< runExceptT do - (cleanupSrc, srcCB) <- ExceptT $ Codebase.openCodebase FC.init root - (cleanupDest, destCB) <- ExceptT $ Codebase.createCodebase SC.init root + srcCB <- ExceptT $ Codebase.openCodebase FC.init root + destCB <- ExceptT $ Codebase.createCodebase SC.init root destDB <- SC.unsafeGetConnection root let env = Sync12.Env srcCB destCB destDB let initialState = (Sync12.emptyDoneCount, Sync12.emptyErrorCount, Sync12.emptyStatus) @@ -45,9 +45,6 @@ upgradeCodebase root = do case rootEntity of Sync12.C _h mc -> lift $ Codebase.putRootBranch destCB =<< Branch <$> mc _ -> error "The root wasn't a causal?" - - lift cleanupSrc - lift cleanupDest pure () where diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs index d18f82d806..1371eeb677 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs @@ -99,26 +99,26 @@ init = Codebase.Init ( Common.codebasePath) -- get the codebase in dir -openCodebase :: forall m. (MonadIO m, MonadCatch m) => CodebasePath -> m (Either Codebase.Pretty (m (), Codebase m Symbol Ann)) +openCodebase :: forall m. (MonadIO m, MonadCatch m) => CodebasePath -> m (Either Codebase.Pretty (Codebase m Symbol Ann)) openCodebase dir = do prettyDir <- liftIO $ P.string <$> canonicalizePath dir let theCodebase = codebase1 @m @Symbol @Ann Cache.nullCache V1.formatSymbol formatAnn dir ifM (codebaseExists dir) - (Right . (pure (),) <$> theCodebase) + (Right <$> theCodebase) (pure . Left $ "No FileCodebase structure found at " <> prettyDir) createCodebase :: forall m. (MonadIO m, MonadCatch m) => CodebasePath -> - m (Either Codebase.CreateCodebaseError (m (), Codebase m Symbol Ann)) + m (Either Codebase.CreateCodebaseError (Codebase m Symbol Ann)) createCodebase dir = ifM (codebaseExists dir) (pure $ Left Codebase.CreateCodebaseAlreadyExists) (do codebase <- codebase1 @m @Symbol @Ann Cache.nullCache V1.formatSymbol formatAnn dir Codebase.putRootBranch codebase Branch.empty - pure $ Right (pure (), codebase)) + pure $ Right codebase) -- builds a `Codebase IO v a`, given serializers for `v` and `a` codebase1 diff --git a/parser-typechecker/src/Unison/Codebase/Init.hs b/parser-typechecker/src/Unison/Codebase/Init.hs index d4a7e9154f..35737a5e37 100644 --- a/parser-typechecker/src/Unison/Codebase/Init.hs +++ b/parser-typechecker/src/Unison/Codebase/Init.hs @@ -23,15 +23,15 @@ data CreateCodebaseError data Init m v a = Init { -- | open an existing codebase - openCodebase :: CodebasePath -> m (Either Pretty (m (), Codebase m v a)), + openCodebase :: CodebasePath -> m (Either Pretty (Codebase m v a)), -- | create a new codebase - createCodebase' :: CodebasePath -> m (Either CreateCodebaseError (m (), Codebase m v a)), + createCodebase' :: CodebasePath -> m (Either CreateCodebaseError (Codebase m v a)), -- | given a codebase root, and given that the codebase root may have other junk in it, -- give the path to the "actual" files; e.g. what a forked transcript should clone. codebasePath :: CodebasePath -> CodebasePath } -createCodebase :: MonadIO m => Init m v a -> CodebasePath -> m (Either Pretty (m (), Codebase m v a)) +createCodebase :: MonadIO m => Init m v a -> CodebasePath -> m (Either Pretty (Codebase m v a)) createCodebase cbInit path = do prettyDir <- P.string <$> canonicalizePath path createCodebase' cbInit path <&> mapLeft \case @@ -50,7 +50,7 @@ createCodebase cbInit path = do -- * compatibility stuff -- | load an existing codebase or exit. -getCodebaseOrExit :: MonadIO m => Init m v a -> Maybe CodebasePath -> m (m (), Codebase m v a) +getCodebaseOrExit :: MonadIO m => Init m v a -> Maybe CodebasePath -> m (Codebase m v a) getCodebaseOrExit init mdir = do dir <- Codebase.getCodebaseDir mdir openCodebase init dir >>= \case @@ -81,19 +81,19 @@ getCodebaseOrExit init mdir = do -- previously: initCodebaseOrExit :: CodebasePath -> m (m (), Codebase m v a) -- previously: FileCodebase.initCodebase :: CodebasePath -> m (m (), Codebase m v a) -openNewUcmCodebaseOrExit :: MonadIO m => Init m Symbol Ann -> CodebasePath -> m (m (), Codebase m Symbol Ann) +openNewUcmCodebaseOrExit :: MonadIO m => Init m Symbol Ann -> CodebasePath -> m (Codebase m Symbol Ann) openNewUcmCodebaseOrExit cbInit path = do prettyDir <- P.string <$> canonicalizePath path createCodebase cbInit path >>= \case Left error -> liftIO $ PT.putPrettyLn' error >> exitFailure - Right x@(_, codebase) -> do + Right codebase -> do liftIO $ PT.putPrettyLn' . P.wrap $ "Initializing a new codebase in: " <> prettyDir Codebase.installUcmDependencies codebase - pure x + pure codebase -- | try to init a codebase where none exists and then exit regardless (i.e. `ucm -codebase dir init`) initCodebaseAndExit :: MonadIO m => Init m Symbol Ann -> Maybe CodebasePath -> m () diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 7ccb1a7383..74e781a8f3 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -115,7 +115,7 @@ init = Codebase.Init getCodebaseOrError createCodebaseOrError v2dir createCodebaseOrError :: MonadIO m => CodebasePath -> - m (Either Codebase1.CreateCodebaseError (m (), Codebase m Symbol Ann)) + m (Either Codebase1.CreateCodebaseError (Codebase m Symbol Ann)) createCodebaseOrError dir = do prettyDir <- P.string <$> canonicalizePath dir let convertError = \case @@ -137,7 +137,7 @@ data CreateCodebaseError createCodebaseOrError' :: MonadIO m => CodebasePath -> - m (Either CreateCodebaseError (m (), Codebase m Symbol Ann)) + m (Either CreateCodebaseError (Codebase m Symbol Ann)) createCodebaseOrError' path = do ifM (doesFileExist $ path codebasePath) @@ -158,7 +158,7 @@ createCodebaseOrError' path = do fmap (Either.mapLeft CreateCodebaseMissingSchema) (sqliteCodebase path) -- get the codebase in dir -getCodebaseOrError :: forall m. MonadIO m => CodebasePath -> m (Either Codebase1.Pretty (m (), Codebase m Symbol Ann)) +getCodebaseOrError :: forall m. MonadIO m => CodebasePath -> m (Either Codebase1.Pretty (Codebase m Symbol Ann)) getCodebaseOrError dir = do prettyDir <- liftIO $ P.string <$> canonicalizePath dir let prettyError :: [(Q.SchemaType, Q.SchemaName)] -> String @@ -185,7 +185,7 @@ codebaseExists root = liftIO do Control.Exception.catch @Sqlite.SQLError ( sqliteCodebase root >>= \case Left _ -> pure False - Right (close, _codebase) -> close >> pure True + Right _ -> pure True ) (const $ pure False) @@ -247,7 +247,7 @@ unsafeGetConnection root = do runReaderT Q.setFlags conn pure conn -sqliteCodebase :: MonadIO m => CodebasePath -> m (Either [(Q.SchemaType, Q.SchemaName)] (m (), Codebase m Symbol Ann)) +sqliteCodebase :: MonadIO m => CodebasePath -> m (Either [(Q.SchemaType, Q.SchemaName)] (Codebase m Symbol Ann)) sqliteCodebase root = do Monad.when debug $ traceM $ "sqliteCodebase " ++ root conn <- unsafeGetConnection root @@ -704,10 +704,10 @@ sqliteCodebase root = do <$> unsafeGetConnection srcPath <*> unsafeGetConnection destPath <*> pure (16 * 1024 * 1024) - (closeSrc, src) <- + src <- lift (sqliteCodebase srcPath) >>= Except.liftEither . Either.mapLeft SyncEphemeral.SrcMissingSchema - (closeDest, dest) <- + dest <- lift (sqliteCodebase destPath) >>= Except.liftEither . Either.mapLeft SyncEphemeral.DestMissingSchema -- we want to use sync22 wherever possible @@ -768,8 +768,6 @@ sqliteCodebase root = do let progress' = Sync.transformProgress (lift . lift) progress bHash = Branch.headHash b se $ processBranches sync progress' src dest [B bHash (pure b)] - lift closeSrc - lift closeDest pure $ Validation.valueOr (error . show) result -- Do we want to include causal hashes here or just namespace hashes? @@ -779,22 +777,20 @@ sqliteCodebase root = do -- primarily with commit hashes. -- Arya leaning towards doing the same for Unison. - let finalizer :: MonadIO m => m () - finalizer = do - liftIO $ Sqlite.close conn - decls <- readTVarIO declBuffer - terms <- readTVarIO termBuffer - let printBuffer header b = - liftIO - if b /= mempty - then putStrLn header >> putStrLn "" >> print b - else pure () - printBuffer "Decls:" decls - printBuffer "Terms:" terms + -- let finalizer :: MonadIO m => m () + -- finalizer = do + -- decls <- readTVarIO declBuffer + -- terms <- readTVarIO termBuffer + -- let printBuffer header b = + -- liftIO + -- if b /= mempty + -- then putStrLn header >> putStrLn "" >> print b + -- else pure () + -- printBuffer "Decls:" decls + -- printBuffer "Terms:" terms pure . Right $ - ( finalizer, - Codebase1.Codebase + ( Codebase1.Codebase getTerm getTypeOfTermImpl getTypeDeclaration @@ -926,7 +922,7 @@ viewRemoteBranch' (repo, sbh, path) = runExceptT do ifM (codebaseExists remotePath) ( do - (closeCodebase, codebase) <- + codebase <- lift (sqliteCodebase remotePath) >>= Validation.valueOr (\_missingSchema -> throwError $ GitError.CouldntOpenCodebase repo remotePath) . fmap pure -- try to load the requested branch from it @@ -950,7 +946,6 @@ viewRemoteBranch' (repo, sbh, path) = runExceptT do Just b -> pure b Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions - lift closeCodebase pure (Branch.getAt' path branch, remotePath) ) -- else there's no initialized codebase at this repo; we pretend there's an empty one. diff --git a/parser-typechecker/tests/Unison/Test/Git.hs b/parser-typechecker/tests/Unison/Test/Git.hs index b58bb62774..0353faad4b 100644 --- a/parser-typechecker/tests/Unison/Test/Git.hs +++ b/parser-typechecker/tests/Unison/Test/Git.hs @@ -56,7 +56,7 @@ syncComplete = scope "syncComplete" $ do observe title expectation files = scope title . for_ files $ \path -> scope (makeTitle path) $ io (doesFileExist $ targetDir path) >>= expectation - (_, cleanup, codebase) <- io $ initCodebase tmp "codebase" + (_, codebase) <- io $ initCodebase tmp "codebase" runTranscript_ tmp codebase [i| ```ucm:hide @@ -100,9 +100,7 @@ syncComplete = scope "syncComplete" $ do observe "complete" expect files -- if we haven't crashed, clean up! - io do - cleanup - removeDirectoryRecursive tmp + io $ removeDirectoryRecursive tmp where files = @@ -117,7 +115,7 @@ syncTestResults = scope "syncTestResults" $ do tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "syncTestResults" targetDir <- io $ Temp.createTempDirectory tmp "target" - (_, cleanup, codebase) <- io $ initCodebase tmp "codebase" + (_, codebase) <- io $ initCodebase tmp "codebase" runTranscript_ tmp codebase [i| ```ucm @@ -151,9 +149,7 @@ test> tests.x = [Ok "Great!"] scope (makeTitle path) $ io (doesFileExist $ targetDir path) >>= expect -- if we haven't crashed, clean up! - io do - cleanup - removeDirectoryRecursive tmp + io $ removeDirectoryRecursive tmp where targetShouldHave = [ ".unison/v1/paths/0bnfrk7cu44q0vvaj7a0osl90huv6nj01nkukplcsbgn3i09h6ggbthhrorm01gpqc088673nom2i491fh9rtbqcc6oud6iqq6oam88.ub" @@ -174,8 +170,8 @@ testPull = scope "pull" $ do tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-pull" -- initialize author and user codebases - (_authorDir, cleanupAuthor, authorCodebase) <- io $ initCodebase tmp "author" - (userDir, cleanupUser, userCodebase) <- io $ initCodebase tmp "user" + (_authorDir, authorCodebase) <- io $ initCodebase tmp "author" + (userDir, userCodebase) <- io $ initCodebase tmp "user" -- initialize git repo let repo = tmp "repo.git" @@ -230,10 +226,7 @@ testPull = scope "pull" $ do scope (makeTitle path) $ io (doesFileExist $ userDir path) >>= expect . not -- if we haven't crashed, clean up! - io $ do - cleanupAuthor - cleanupUser - removeDirectoryRecursive tmp + io $ removeDirectoryRecursive tmp where gitShouldHave = userShouldHave ++ userShouldNotHave @@ -295,11 +288,11 @@ testPull = scope "pull" $ do ] -- initialize a fresh codebase -initCodebase :: FilePath -> String -> IO (CodebasePath, IO (), Codebase IO Symbol Ann) +initCodebase :: FilePath -> String -> IO (CodebasePath, Codebase IO Symbol Ann) initCodebase tmpDir name = do let codebaseDir = tmpDir name - (cleanup, c) <- Codebase.openNewUcmCodebaseOrExit FC.init codebaseDir - pure (codebaseDir, cleanup, c) + c <- Codebase.openNewUcmCodebaseOrExit FC.init codebaseDir + pure (codebaseDir, c) -- run a transcript on an existing codebase runTranscript_ :: MonadIO m => FilePath -> Codebase IO Symbol Ann -> String -> m () @@ -323,7 +316,7 @@ testPush = scope "push" $ do tmp <- io $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory "git-push" -- initialize a fresh codebase named "c" - (codebasePath, cleanup, c) <- io $ initCodebase tmp "c" + (codebasePath, c) <- io $ initCodebase tmp "c" -- Run the "setup transcript" to do the adds and updates; everything short of -- pushing. @@ -352,9 +345,7 @@ testPush = scope "push" $ do io (fmap not . doesFileExist $ tmp implName path) >>= expect -- if we haven't crashed, clean up! - io do - cleanup - removeDirectoryRecursive tmp + io $ removeDirectoryRecursive tmp where setupTranscript = [i| diff --git a/parser-typechecker/tests/Unison/Test/IO.hs b/parser-typechecker/tests/Unison/Test/IO.hs index 5072033576..67548be58f 100644 --- a/parser-typechecker/tests/Unison/Test/IO.hs +++ b/parser-typechecker/tests/Unison/Test/IO.hs @@ -79,11 +79,11 @@ main = 'let -- * Utilities -initCodebase :: Codebase.Init IO Symbol Ann -> FilePath -> String -> IO (CodebasePath, IO (), Codebase IO Symbol Ann) +initCodebase :: Codebase.Init IO Symbol Ann -> FilePath -> String -> IO (CodebasePath, Codebase IO Symbol Ann) initCodebase cbInit tmpDir name = do let codebaseDir = tmpDir name - (finalize, c) <- Codebase.openNewUcmCodebaseOrExit cbInit codebaseDir - pure (codebaseDir, finalize, c) + c <- Codebase.openNewUcmCodebaseOrExit cbInit codebaseDir + pure (codebaseDir, c) -- run a transcript on an existing codebase runTranscript_ @@ -107,8 +107,6 @@ runTranscript_ newRt tmpDir c transcript = do withScopeAndTempDir :: String -> (FilePath -> Codebase IO Symbol Ann -> Test ()) -> Test () withScopeAndTempDir name body = scope name $ do tmp <- liftIO $ Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory name - (_, closeCodebase, codebase) <- liftIO $ initCodebase FileCodebase.init tmp "user" + (_, codebase) <- liftIO $ initCodebase FileCodebase.init tmp "user" body tmp codebase - liftIO do - closeCodebase - removeDirectoryRecursive tmp + liftIO $ removeDirectoryRecursive tmp diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs index 7adc1c6509..0049779208 100644 --- a/parser-typechecker/tests/Unison/Test/Ucm.hs +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -78,7 +78,7 @@ runTranscript (Codebase codebasePath fmt) rt transcript = do pure $ tmpDir ".unisonConfig" let err err = fail $ "Parse error: \n" <> show err cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init - (closeCodebase, codebase) <- + codebase <- Codebase.Init.openCodebase cbInit codebasePath >>= \case Left e -> fail $ P.toANSI 80 e Right x -> pure x @@ -93,6 +93,5 @@ runTranscript (Codebase codebasePath fmt) rt transcript = do configFile stanzas codebase - closeCodebase when debugTranscriptOutput $ traceM output pure output diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index 64b18f9a3f..70b8cc7f38 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -161,42 +161,38 @@ main = do Exit.die "Your .unisonConfig could not be loaded. Check that it's correct!" case restargs of [] -> do - (closeCodebase, theCodebase) <- Codebase.getCodebaseOrExit cbInit mcodepath + theCodebase <- Codebase.getCodebaseOrExit cbInit mcodepath Server.start theCodebase $ \token port -> do PT.putPrettyLn . P.string $ "I've started a codebase API server at " PT.putPrettyLn . P.string $ "http://127.0.0.1:" <> show port <> "?" <> URI.encode (unpack token) launch currentDir mNewRun config theCodebase [] - closeCodebase [version] | isFlag "version" version -> putStrLn $ progName ++ " version: " ++ Version.gitDescribe [help] | isFlag "help" help -> PT.putPrettyLn (usage progName) ["init"] -> Codebase.initCodebaseAndExit cbInit mcodepath "run" : [mainName] -> do - (closeCodebase, theCodebase) <- Codebase.getCodebaseOrExit cbInit mcodepath + theCodebase <- Codebase.getCodebaseOrExit cbInit mcodepath runtime <- join . getStartRuntime mNewRun $ fst config execute theCodebase runtime mainName - closeCodebase "run.file" : file : [mainName] | isDotU file -> do e <- safeReadUtf8 file case e of Left _ -> PT.putPrettyLn $ P.callout "⚠️" "I couldn't find that file or it is for some reason unreadable." Right contents -> do - (closeCodebase, theCodebase) <- Codebase.getCodebaseOrExit cbInit mcodepath + theCodebase <- Codebase.getCodebaseOrExit cbInit mcodepath let fileEvent = Input.UnisonFileChanged (Text.pack file) contents launch currentDir mNewRun config theCodebase [Left fileEvent, Right $ Input.ExecuteI mainName, Right Input.QuitI] - closeCodebase "run.pipe" : [mainName] -> do e <- safeReadUtf8StdIn case e of Left _ -> PT.putPrettyLn $ P.callout "⚠️" "I had trouble reading this input." Right contents -> do - (closeCodebase, theCodebase) <- Codebase.getCodebaseOrExit cbInit mcodepath + theCodebase <- Codebase.getCodebaseOrExit cbInit mcodepath let fileEvent = Input.UnisonFileChanged (Text.pack "") contents launch currentDir mNewRun config theCodebase [Left fileEvent, Right $ Input.ExecuteI mainName, Right Input.QuitI] - closeCodebase "transcript" : args' -> case args' of "-save-codebase" : transcripts -> runTranscripts mNewRun cbInit False True mcodepath transcripts @@ -253,9 +249,8 @@ runTranscripts' mNewRun cbInit mcodepath transcriptDir args = do P.indentN 2 $ P.string err]) Right stanzas -> do configFilePath <- getConfigFilePath mcodepath - (closeCodebase, theCodebase) <- Codebase.getCodebaseOrExit cbInit $ Just transcriptDir + theCodebase <- Codebase.getCodebaseOrExit cbInit $ Just transcriptDir mdOut <- TR.run mNewRun transcriptDir configFilePath stanzas theCodebase - closeCodebase let out = currentDir FP. FP.addExtension (FP.dropExtension arg ++ ".output") (FP.takeExtension md) From 1900f20138da0ec8da6cee0f259e0dc34d47a7ef Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 21 Apr 2021 14:04:24 -0600 Subject: [PATCH 187/225] copy upgrade12 tests to gitsimple also --- .../tests/Unison/Test/GitSimple.hs | 164 ++++++++++++++++++ 1 file changed, 164 insertions(+) diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs index 1a0e06cbfe..06e9f7a26c 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -24,6 +24,170 @@ test :: Test () test = scope "git-simple" . tests $ flip map [(Ucm.CodebaseFormat1 , "fc"), (Ucm.CodebaseFormat2, "sc")] \(fmt, name) -> scope name $ tests [ + pushPullTest "typeAlias" fmt + (\repo -> [i| + ```ucm + .> alias.type ##Nat builtin.Nat + .> history + .> history builtin + .> push ${repo} + ``` + |]) + (\repo -> [i| + ```ucm + .> pull ${repo} + ``` + ```unison + x : Nat + x = 3 + ``` + |]) + , + pushPullTest "topLevelTerm" fmt + (\repo -> [i| + ```unison:hide + y = 3 + ``` + ```ucm + .> add + .> history + .> push ${repo} + ``` + |]) + (\repo -> [i| + ```ucm + .> pull ${repo} + .> find + ``` + ```unison + > y + ``` + |]) + , + pushPullTest "metadataForTerm" fmt + (\repo -> [i| + ```unison:hide + doc = "y is the number 3" + y = 3 + ``` + ```ucm + .> debug.file + .> add + .> link doc y + .> links y + .> history + .> push ${repo} + ``` + |]) + (\repo -> [i| + ```ucm + .> pull ${repo} + .> links y + ``` + |]) + , + pushPullTest "metadataForType" fmt + (\repo -> [i| + ```unison:hide + doc = "Nat means natural number" + ``` + ```ucm + .> add + .> alias.type ##Nat Nat + .> link doc Nat + .> push ${repo} + ``` + |]) + (\repo -> [i| + ```ucm + .> pull ${repo} + .> links Nat + ``` + |]) + , + pushPullTest "subNamespace" fmt + (\repo -> [i| + ```ucm + .> alias.type ##Nat builtin.Nat + ``` + ```unison + unique type a.b.C = C Nat + ``` + ```ucm + .> add + .> push ${repo} + ``` + |]) + (\repo -> [i| + ```ucm + .> pull ${repo} + .> find + ``` + ```unison + > a.b.C.C 3 + ``` + |]) + , + pushPullTest "accessPatch" fmt + (\repo -> [i| + ```ucm + .> alias.type ##Nat builtin.Nat + ``` + ```unison:hide + unique type A = A Nat + foo = A.A 3 + ``` + ```ucm + .> debug.file + .> add + ``` + ```unison:hide + unique type A = A Nat Nat + foo = A.A 3 3 + ``` + ```ucm + .> debug.file + .> update + ``` + ```ucm + .> view.patch patch + .> push ${repo} + ``` + |]) + (\repo -> [i| + ```ucm + .> pull ${repo} + .> view.patch patch + ``` + |]) + , + pushPullTest "history" fmt + (\repo -> [i| + ```unison + foo = 3 + ``` + ```ucm + .> add + ``` + ```unison + foo = 4 + ``` + ```ucm + .> update + .> history + .> push ${repo} + ``` + |]) + (\repo -> [i| + ```ucm + .> pull ${repo} + .> history + .> reset-root #ls8 + .> history + ``` + |]) + , + pushPullTest "one-term" fmt -- simplest-author (\repo -> [i| From 722ae5ae1dc9b7ccbbe77728e9eebc05adc2bb6f Mon Sep 17 00:00:00 2001 From: Mitchell Rosen Date: Sun, 25 Apr 2021 20:57:20 -0400 Subject: [PATCH 188/225] indent call to launch --- parser-typechecker/unison/Main.hs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index 8fc71a9f07..eb8234fd0b 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -166,8 +166,8 @@ main = do PT.putPrettyLn . P.string $ "I've started a codebase API server at " PT.putPrettyLn . P.string $ "http://127.0.0.1:" <> show port <> "?" <> URI.encode (unpack token) - PT.putPrettyLn' . P.string $ "Now starting the Unison Codebase Manager..." - launch currentDir mNewRun config theCodebase [] + PT.putPrettyLn' . P.string $ "Now starting the Unison Codebase Manager..." + launch currentDir mNewRun config theCodebase [] [version] | isFlag "version" version -> putStrLn $ progName ++ " version: " ++ Version.gitDescribe [help] | isFlag "help" help -> PT.putPrettyLn (usage progName) From a42ef1aa40b8ce0cb909d70dfa2da748fc1f393c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 26 Apr 2021 22:22:12 -0600 Subject: [PATCH 189/225] add traceConnectionFile option --- .../U/Codebase/Sqlite/Queries.hs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 96711b5dbb..a352d37e3d 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -55,6 +55,8 @@ import qualified U.Util.Hash as Hash import UnliftIO (MonadUnliftIO, throwIO, try, tryAny, withRunInIO) import UnliftIO.Concurrent (myThreadId) import qualified Control.Monad.Writer as Writer +import Data.Functor (void) +import qualified Data.Text as Text -- * types @@ -64,10 +66,11 @@ type EDB m = (DB m, Err m) type Err m = (MonadError Integrity m, HasCallStack) -debugQuery, debugThread, debugConnection :: Bool debugQuery = False debugThread = False debugConnection = False +debugQuery, debugThread, debugConnection, debugFile :: Bool +debugFile = False alwaysTraceOnCrash :: Bool alwaysTraceOnCrash = True @@ -630,6 +633,7 @@ query :: (DB m, ToRow q, FromRow r, Show q, Show r) => SQLite.Query -> q -> m [r query q r = do c <- ask header <- debugHeader + when debugFile traceConnectionFile liftIO . queryTrace (header ++ " query") q r $ SQLite.query c q r -- | no input, composite List output @@ -637,6 +641,7 @@ query_ :: (DB m, FromRow r, Show r) => SQLite.Query -> m [r] query_ q = do c <- ask header <- debugHeader + when debugFile traceConnectionFile liftIO . queryTrace_ (header ++ " query") q $ SQLite.query_ c q debugHeader :: DB m => m String @@ -672,22 +677,32 @@ queryTrace_ title query m = throwIO e else m +traceConnectionFile :: DB m => m () +traceConnectionFile = do + c <- ask + liftIO (SQLite.query_ c "PRAGMA database_list;") >>= \case + [(_seq :: Int, _name :: String, file)] -> traceM file + x -> error $ show x + execute :: (DB m, ToRow q, Show q) => SQLite.Query -> q -> m () execute q r = do c <- ask header <- debugHeader + when debugFile traceConnectionFile liftIO . queryTrace (header ++ " " ++ "execute") q r $ SQLite.execute c q r execute_ :: DB m => SQLite.Query -> m () execute_ q = do c <- ask header <- debugHeader + when debugFile traceConnectionFile liftIO . queryTrace_ (header ++ " " ++ "execute_") q $ SQLite.execute_ c q executeMany :: (DB m, ToRow q, Show q) => SQLite.Query -> [q] -> m () executeMany q r = do c <- ask header <- debugHeader + when debugFile traceConnectionFile liftIO . queryTrace (header ++ " " ++ "executeMany") q r $ SQLite.executeMany c q r -- | transaction that blocks From 30c8881c3698207c5fbf8e676b22d8bf46e0be64 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 27 Apr 2021 12:55:53 -0600 Subject: [PATCH 190/225] a few fixes: - try not to continue working out of git cache - turn off cursor during sync22 - after codebase upgrade, set the replaced branch as root instead of the original - disable root branch update watch thing that didn't work - add term to git-simple sub-namespace test --- parser-typechecker/src/Unison/Codebase.hs | 6 +- .../src/Unison/Codebase/Branch.hs | 1 - .../src/Unison/Codebase/Conversion/Sync12.hs | 21 +++++ .../Unison/Codebase/Conversion/Upgrade12.hs | 11 ++- .../src/Unison/Codebase/GitError.hs | 1 + .../src/Unison/Codebase/SqliteCodebase.hs | 83 ++++++++++--------- .../tests/Unison/Test/GitSimple.hs | 3 +- 7 files changed, 83 insertions(+), 43 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase.hs b/parser-typechecker/src/Unison/Codebase.hs index 9e3aa425c2..e40919c094 100644 --- a/parser-typechecker/src/Unison/Codebase.hs +++ b/parser-typechecker/src/Unison/Codebase.hs @@ -43,6 +43,7 @@ import Unison.Util.Timing (time) import Unison.Var (Var) import qualified Unison.Var as Var import UnliftIO.Directory (getHomeDirectory) +import qualified Unison.Codebase.GitError as GitError type DataDeclaration v a = DD.DataDeclaration v a @@ -339,7 +340,10 @@ importRemoteBranch codebase ns mode = runExceptT do withStatus "Importing downloaded files into local codebase..." $ time "SyncFromDirectory" $ lift $ syncFromDirectory codebase cacheDir mode branch - pure branch + ExceptT + let h = Branch.headHash branch + err = Left $ GitError.CouldntLoadSyncedBranch h + in getBranchForHash codebase h <&> maybe err Right -- | Pull a git branch and view it from the cache, without syncing into the -- local codebase. diff --git a/parser-typechecker/src/Unison/Codebase/Branch.hs b/parser-typechecker/src/Unison/Codebase/Branch.hs index e2ee885097..8316947b6e 100644 --- a/parser-typechecker/src/Unison/Codebase/Branch.hs +++ b/parser-typechecker/src/Unison/Codebase/Branch.hs @@ -10,7 +10,6 @@ module Unison.Codebase.Branch , UnwrappedBranch , Branch0(..) , MergeMode(..) - , UnwrappedBranch , Raw(..) , Star , Hash diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 00cc2f3ab9..0cf04cb1c8 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -149,6 +149,27 @@ trySync t _gc e = do Env _ dest _ <- Reader.ask case e of C h mc -> do + -- getBranchStatus h >>= \case + -- Just {} -> pure Sync.PreviouslyDone + -- Nothing -> t (Codebase.branchExists dest h) >>= \case + -- True -> setBranchStatus h BranchOk $> Sync.PreviouslyDone + -- False -> do + -- c <- t mc + -- runValidateT @_ @n (repairBranch c) >>= \case + -- Left deps -> pure . Sync.Missing $ Foldable.toList deps + -- Right c' -> do + -- let h' = Causal.currentHash c' + -- t $ Codebase.putBranch dest (Branch.Branch c') + -- if h == h' + -- then do + -- setBranchStatus @m @n h BranchOk + -- pure Sync.Done + -- else do + -- setBranchStatus h (BranchReplaced h' c') + -- pure Sync.NonFatalError + + + t (Codebase.branchExists dest h) >>= \case True -> setBranchStatus h BranchOk $> Sync.PreviouslyDone False -> do diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs index d241243872..08eef04da2 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs @@ -2,6 +2,7 @@ {-# LANGUAGE RankNTypes #-} {-# LANGUAGE TypeApplications #-} +{-# LANGUAGE ViewPatterns #-} module Unison.Codebase.Conversion.Upgrade12 where import Control.Exception.Safe (MonadCatch) @@ -23,6 +24,7 @@ import qualified Unison.Codebase.Init as Codebase import qualified Unison.Codebase.SqliteCodebase as SC import qualified Unison.PrettyTerminal as CT import UnliftIO (MonadIO, liftIO) +import qualified Data.Map as Map upgradeCodebase :: forall m. (MonadIO m, MonadCatch m) => CodebasePath -> m () upgradeCodebase root = do @@ -36,14 +38,17 @@ upgradeCodebase root = do lift (Codebase.getRootBranch srcCB) >>= \case Left e -> error $ "Error loading source codebase root branch: " ++ show e Right (Branch c) -> pure $ Sync12.C (Causal.currentHash c) (pure c) - flip Reader.runReaderT env . flip State.evalStateT initialState $ do + (_, _, s) <- flip Reader.runReaderT env . flip State.execStateT initialState $ do sync <- Sync12.sync12 (lift . lift . lift) Sync.sync @_ @(Sync12.Entity _) (Sync.transformSync (lensStateT Lens._3) sync) Sync12.simpleProgress [rootEntity] - case rootEntity of - Sync12.C _h mc -> lift $ Codebase.putRootBranch destCB =<< Branch <$> mc + lift $ Codebase.putRootBranch destCB =<< fmap Branch case rootEntity of + Sync12.C h mc -> case Map.lookup h (Sync12._branchStatus s) of + Just Sync12.BranchOk -> mc + Just (Sync12.BranchReplaced _h' c') -> pure c' + Nothing -> error "We didn't sync the root?" _ -> error "The root wasn't a causal?" pure () diff --git a/parser-typechecker/src/Unison/Codebase/GitError.hs b/parser-typechecker/src/Unison/Codebase/GitError.hs index 02593ca213..f083967760 100644 --- a/parser-typechecker/src/Unison/Codebase/GitError.hs +++ b/parser-typechecker/src/Unison/Codebase/GitError.hs @@ -22,4 +22,5 @@ data GitError = NoGit | CouldntParseRootBranch RemoteRepo String | CouldntOpenCodebase RemoteRepo CodebasePath | SomeOtherError String + | CouldntLoadSyncedBranch Branch.Hash deriving Show diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 74e781a8f3..dee16bce26 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -98,6 +98,7 @@ import UnliftIO.STM import qualified Unison.Util.TQueue as TQueue import qualified Unison.Codebase.Watch as Watch import UnliftIO.Concurrent (forkIO, killThread) +import qualified System.Console.ANSI as ANSI debug, debugProcessBranches :: Bool debug = False @@ -444,7 +445,11 @@ sqliteCodebase root = do v' <- runDB conn Ops.dataVersion if v == v' then pure (Right b) else do newRootHash <- runDB conn Ops.loadRootCausalHash - if Branch.headHash b == Cv.branchHash2to1 newRootHash then pure (Right b) else forceReload + if Branch.headHash b == Cv.branchHash2to1 newRootHash + then pure (Right b) + else do + traceM $ "database was externally modified (" ++ show v ++ " -> " ++ show v' ++ ")" + forceReload where forceReload = do b <- fmap (Either.mapLeft err) @@ -479,36 +484,36 @@ sqliteCodebase root = do rootBranchUpdates :: MonadIO m => TVar (Maybe (Q.DataVersion, a)) -> m (IO (), IO (Set Branch.Hash)) rootBranchUpdates rootBranchCache = do - branchHeadChanges <- TQueue.newIO - (cancelWatch, watcher) <- Watch.watchDirectory' (v2dir root) - watcher1 <- - liftIO . forkIO - $ forever - $ do - -- void ignores the name and time of the changed file, - -- and assume 'unison.sqlite3' has changed - (filename, time) <- watcher - traceM $ "SqliteCodebase.watcher " ++ show (filename, time) - readTVarIO rootBranchCache >>= \case - Nothing -> pure () - Just (v, _) -> do - -- this use of `conn` in a separate thread may be problematic. - -- hopefully sqlite will produce an obvious error message if it is. - v' <- runDB conn Ops.dataVersion - if v /= v' then - atomically - . TQueue.enqueue branchHeadChanges =<< runDB conn Ops.loadRootCausalHash - else pure () - - -- case hashFromFilePath filePath of - -- Nothing -> failWith $ CantParseBranchHead filePath - -- Just h -> - -- atomically . TQueue.enqueue branchHeadChanges $ Branch.Hash h - -- smooth out intermediate queue - pure - ( cancelWatch >> killThread watcher1 - , Set.fromList <$> Watch.collectUntilPause branchHeadChanges 400000 - ) + -- branchHeadChanges <- TQueue.newIO + -- (cancelWatch, watcher) <- Watch.watchDirectory' (v2dir root) + -- watcher1 <- + -- liftIO . forkIO + -- $ forever + -- $ do + -- -- void ignores the name and time of the changed file, + -- -- and assume 'unison.sqlite3' has changed + -- (filename, time) <- watcher + -- traceM $ "SqliteCodebase.watcher " ++ show (filename, time) + -- readTVarIO rootBranchCache >>= \case + -- Nothing -> pure () + -- Just (v, _) -> do + -- -- this use of `conn` in a separate thread may be problematic. + -- -- hopefully sqlite will produce an obvious error message if it is. + -- v' <- runDB conn Ops.dataVersion + -- if v /= v' then + -- atomically + -- . TQueue.enqueue branchHeadChanges =<< runDB conn Ops.loadRootCausalHash + -- else pure () + + -- -- case hashFromFilePath filePath of + -- -- Nothing -> failWith $ CantParseBranchHead filePath + -- -- Just h -> + -- -- atomically . TQueue.enqueue branchHeadChanges $ Branch.Hash h + -- -- smooth out intermediate queue + -- pure + -- ( cancelWatch >> killThread watcher1 + -- , Set.fromList <$> Watch.collectUntilPause branchHeadChanges 400000 + -- ) pure (cleanup, liftIO newRootsDiscovered) where newRootsDiscovered = do @@ -871,7 +876,7 @@ syncProgress = Sync.Progress need done warn allDone then pure () else State.put $ SyncProgressState (Just $ Set.insert h need) (Right done) (Right warn) SyncProgressState _ _ _ -> undefined - unless quiet $ State.get >>= liftIO . putStr . renderState ("Synced ") + unless quiet printSynced done h = do unless quiet $ Monad.whenM (State.gets size <&> (== 0)) $ liftIO $ putStr "\n" @@ -881,7 +886,7 @@ syncProgress = Sync.Progress need done warn allDone SyncProgressState (Just need) (Right done) warn -> State.put $ SyncProgressState (Just $ Set.delete h need) (Right $ Set.insert h done) warn SyncProgressState _ _ _ -> undefined - unless quiet $ State.get >>= liftIO . putStr . renderState ("Synced ") + unless quiet printSynced warn h = do unless quiet $ Monad.whenM (State.gets size <&> (== 0)) $ liftIO $ putStr "\n" @@ -891,16 +896,20 @@ syncProgress = Sync.Progress need done warn allDone SyncProgressState (Just need) done (Right warn) -> State.put $ SyncProgressState (Just $ Set.delete h need) done (Right $ Set.insert h warn) SyncProgressState _ _ _ -> undefined - unless quiet $ State.get >>= liftIO . putStr . renderState ("Synced ") + unless quiet printSynced - allDone = + allDone = do State.get >>= liftIO . putStr . renderState ("Done syncing ") + liftIO ANSI.showCursor + + printSynced :: (MonadState SyncProgressState m, MonadIO m) => m () + printSynced = liftIO ANSI.hideCursor >> State.get >>= liftIO . putStr . (\s -> renderState "Synced " s) renderState prefix = \case SyncProgressState Nothing (Left done) (Left warn) -> "\r" ++ prefix ++ show done ++ " entities" ++ if warn > 0 then " with " ++ show warn ++ " warnings." else "." - SyncProgressState (Just need) (Right done) (Right warn) -> - "\r" ++ prefix ++ show (Set.size done) ++ "/" ++ show (Set.size done + Set.size need + Set.size warn) + SyncProgressState (Just _need) (Right done) (Right warn) -> + "\r" ++ prefix ++ show (Set.size done + Set.size warn) ++ " entities" ++ if Set.size warn > 0 then " with " ++ show warn ++ " warnings." diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSimple.hs index 06e9f7a26c..ccd191bb51 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSimple.hs @@ -112,6 +112,7 @@ test = scope "git-simple" . tests $ ``` ```unison unique type a.b.C = C Nat + a.b.d = 4 ``` ```ucm .> add @@ -124,7 +125,7 @@ test = scope "git-simple" . tests $ .> find ``` ```unison - > a.b.C.C 3 + > a.b.C.C a.b.d ``` |]) , From 8882687d5d5a5795bf1dacd125d1b2491f55726c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 27 Apr 2021 14:00:38 -0600 Subject: [PATCH 191/225] added more error info for getCycleLen --- .../U/Codebase/Sqlite/Queries.hs | 3 ++- .../src/Unison/Codebase/SqliteCodebase.hs | 27 ++++++++++--------- .../Codebase/SqliteCodebase/Conversions.hs | 20 +++++++------- 3 files changed, 27 insertions(+), 23 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index a352d37e3d..c38c0eb80b 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -90,6 +90,7 @@ data Integrity | UnknownHash Hash | UnknownText Text | NoObjectForHashId HashId + | NoObjectForPrimaryHashId HashId | NoNamespaceRoot | MultipleNamespaceRoots [CausalHashId] | NoTypeIndexForTerm Referent.Id @@ -254,7 +255,7 @@ loadObjectWithHashIdAndTypeById oId = queryMaybe sql (Only oId) >>= orError (Unk -- |Not all hashes have corresponding objects; e.g., hashes of term types expectObjectIdForPrimaryHashId :: EDB m => HashId -> m ObjectId expectObjectIdForPrimaryHashId h = - maybeObjectIdForPrimaryHashId h >>= orError (UnknownHashId h) + maybeObjectIdForPrimaryHashId h >>= orError (NoObjectForPrimaryHashId h) maybeObjectIdForPrimaryHashId :: DB m => HashId -> m (Maybe ObjectId) maybeObjectIdForPrimaryHashId h = queryAtom sql (Only h) where sql = [here| diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index dee16bce26..fccccba0b6 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -264,10 +264,13 @@ sqliteCodebase root = do getTerm (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = runDB' conn do term2 <- Ops.loadTermByReference (C.Reference.Id h2 i) - Cv.term2to1 h1 getCycleLen getDeclType term2 + Cv.term2to1 h1 (getCycleLen "getTerm") getDeclType term2 - getCycleLen :: EDB m => Hash -> m Reference.Size - getCycleLen = Ops.getCycleLen . Cv.hash1to2 + getCycleLen :: EDB m => String -> Hash -> m Reference.Size + getCycleLen source h = do + (Ops.getCycleLen . Cv.hash1to2) h `Except.catchError` \case + e@(Ops.DatabaseIntegrityError (Q.NoObjectForPrimaryHashId {})) -> error $ show e ++ " in " ++ source + e -> Except.throwError e getDeclType :: EDB m => C.Reference.Reference -> m CT.ConstructorType getDeclType = \case @@ -289,13 +292,13 @@ sqliteCodebase root = do getTypeOfTermImpl (Reference.Id (Cv.hash1to2 -> h2) i _n) = runDB' conn do type2 <- Ops.loadTypeOfTermByTermReference (C.Reference.Id h2 i) - Cv.ttype2to1 getCycleLen type2 + Cv.ttype2to1 (getCycleLen "getTypeOfTermImpl") type2 getTypeDeclaration :: MonadIO m => Reference.Id -> m (Maybe (Decl Symbol Ann)) getTypeDeclaration (Reference.Id h1@(Cv.hash1to2 -> h2) i _n) = runDB' conn do decl2 <- Ops.loadDeclByReference (C.Reference.Id h2 i) - Cv.decl2to1 h1 getCycleLen decl2 + Cv.decl2to1 h1 (getCycleLen "getTypeDeclaration") decl2 putTerm :: MonadIO m => Reference.Id -> Term Symbol Ann -> Type Symbol Ann -> m () putTerm id tm tp | debug && trace (show "SqliteCodebase.putTerm " ++ show id ++ " " ++ show tm ++ " " ++ show tp) False = undefined @@ -566,7 +569,7 @@ sqliteCodebase root = do dependentsImpl :: MonadIO m => Reference -> m (Set Reference.Id) dependentsImpl r = runDB conn $ - Set.traverse (Cv.referenceid2to1 getCycleLen) + Set.traverse (Cv.referenceid2to1 (getCycleLen "dependentsImpl")) =<< Ops.dependents (Cv.reference1to2 r) syncFromDirectory :: MonadIO m => Codebase1.CodebasePath -> SyncMode -> Branch m -> m () @@ -585,14 +588,14 @@ sqliteCodebase root = do watches w = runDB conn $ Ops.listWatches (Cv.watchKind1to2 w) - >>= traverse (Cv.referenceid2to1 getCycleLen) + >>= traverse (Cv.referenceid2to1 (getCycleLen "watches")) getWatch :: MonadIO m => UF.WatchKind -> Reference.Id -> m (Maybe (Term Symbol Ann)) getWatch k r@(Reference.Id h _i _n) | elem k standardWatchKinds = runDB' conn $ Ops.loadWatch (Cv.watchKind1to2 k) (Cv.referenceid1to2 r) - >>= Cv.term2to1 h getCycleLen getDeclType + >>= Cv.term2to1 h (getCycleLen "getWatch") getDeclType getWatch _unknownKind _ = pure Nothing standardWatchKinds = [UF.RegularWatch, UF.TestWatch] @@ -638,13 +641,13 @@ sqliteCodebase root = do termsOfTypeImpl r = runDB conn $ Ops.termsHavingType (Cv.reference1to2 r) - >>= Set.traverse (Cv.referentid2to1 getCycleLen getDeclType) + >>= Set.traverse (Cv.referentid2to1 (getCycleLen "termsOfTypeImpl") getDeclType) termsMentioningTypeImpl :: MonadIO m => Reference -> m (Set Referent.Id) termsMentioningTypeImpl r = runDB conn $ Ops.termsMentioningType (Cv.reference1to2 r) - >>= Set.traverse (Cv.referentid2to1 getCycleLen getDeclType) + >>= Set.traverse (Cv.referentid2to1 (getCycleLen "termsMentioningTypeImpl") getDeclType) hashLength :: Applicative m => m Int hashLength = pure 10 @@ -661,7 +664,7 @@ sqliteCodebase root = do >>= traverse (C.Reference.idH Ops.loadHashByObjectId) >>= pure . Set.fromList - Set.fromList <$> traverse (Cv.referenceid2to1 getCycleLen) (Set.toList refs) + Set.fromList <$> traverse (Cv.referenceid2to1 (getCycleLen "defnReferencesByPrefix")) (Set.toList refs) termReferencesByPrefix :: MonadIO m => ShortHash -> m (Set Reference.Id) termReferencesByPrefix = defnReferencesByPrefix OT.TermComponent @@ -674,7 +677,7 @@ sqliteCodebase root = do referentsByPrefix (SH.ShortHash prefix (fmap Cv.shortHashSuffix1to2 -> cycle) cid) = runDB conn do termReferents <- Ops.termReferentsByPrefix prefix cycle - >>= traverse (Cv.referentid2to1 getCycleLen getDeclType) + >>= traverse (Cv.referentid2to1 (getCycleLen "referentsByPrefix") getDeclType) declReferents' <- Ops.declReferentsByPrefix prefix cycle (read . Text.unpack <$> cid) let declReferents = [ Referent.Con' (Reference.Id (Cv.hash2to1 h) pos len) (fromIntegral cid) (Cv.decltype2to1 ct) diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs index 71a27219e0..34e9abae35 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/Conversions.hs @@ -415,10 +415,10 @@ type1to2' convertRef = V1.Kind.Arrow i o -> V2.Kind.Arrow (convertKind i) (convertKind o) -- | forces loading v1 branches even if they may not exist -causalbranch2to1 :: Monad m => (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Branch.Causal m -> m (V1.Branch.Branch m) +causalbranch2to1 :: Monad m => (String -> Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Branch.Causal m -> m (V1.Branch.Branch m) causalbranch2to1 lookupSize lookupCT = fmap V1.Branch.Branch . causalbranch2to1' lookupSize lookupCT -causalbranch2to1' :: Monad m => (Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Branch.Causal m -> m (V1.Branch.UnwrappedBranch m) +causalbranch2to1' :: Monad m => (String -> Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Branch.Causal m -> m (V1.Branch.UnwrappedBranch m) causalbranch2to1' lookupSize lookupCT (V2.Causal hc _he (Map.toList -> parents) me) = do let currentHash = causalHash2to1 hc case parents of @@ -502,27 +502,27 @@ causalbranch1to2 (V1.Branch.Branch c) = causal1to2' hash1to2cb hash1to2c branch1 patch2to1 :: forall m. Monad m => - (Hash -> m V1.Reference.Size) -> + (String -> Hash -> m V1.Reference.Size) -> V2.Branch.Patch -> m V1.Patch patch2to1 lookupSize (V2.Branch.Patch v2termedits v2typeedits) = do termEdits <- Map.bitraverse referent2to1' (Set.traverse termedit2to1) v2termedits - typeEdits <- Map.bitraverse (reference2to1 lookupSize) (Set.traverse typeedit2to1) v2typeedits + typeEdits <- Map.bitraverse (reference2to1 (lookupSize "patch->old type")) (Set.traverse typeedit2to1) v2typeedits pure $ V1.Patch (Relation.fromMultimap termEdits) (Relation.fromMultimap typeEdits) where referent2to1' :: V2.Referent -> m V1.Reference referent2to1' = \case - V2.Referent.Ref r -> reference2to1 lookupSize r + V2.Referent.Ref r -> reference2to1 (lookupSize "patch->old term") r V2.Referent.Con {} -> error "found referent on LHS when converting patch2to1" termedit2to1 :: V2.TermEdit.TermEdit -> m V1.TermEdit.TermEdit termedit2to1 = \case V2.TermEdit.Replace (V2.Referent.Ref r) t -> - V1.TermEdit.Replace <$> reference2to1 lookupSize r <*> typing2to1 t + V1.TermEdit.Replace <$> reference2to1 (lookupSize "patch->new term") r <*> typing2to1 t V2.TermEdit.Replace {} -> error "found referent on RHS when converting patch2to1" V2.TermEdit.Deprecate -> pure V1.TermEdit.Deprecate typeedit2to1 :: V2.TypeEdit.TypeEdit -> m V1.TypeEdit.TypeEdit typeedit2to1 = \case - V2.TypeEdit.Replace r -> V1.TypeEdit.Replace <$> reference2to1 lookupSize r + V2.TypeEdit.Replace r -> V1.TypeEdit.Replace <$> reference2to1 (lookupSize "patch->new type") r V2.TypeEdit.Deprecate -> pure V1.TypeEdit.Deprecate typing2to1 t = pure $ case t of V2.TermEdit.Same -> V1.TermEdit.Same @@ -561,13 +561,13 @@ namesegment1to2 (V1.NameSegment t) = V2.Branch.NameSegment t branch2to1 :: Monad m => - (Hash -> m V1.Reference.Size) -> + (String -> Hash -> m V1.Reference.Size) -> (V2.Reference -> m CT.ConstructorType) -> V2.Branch.Branch m -> m (V1.Branch.Branch0 m) branch2to1 lookupSize lookupCT (V2.Branch.Branch v2terms v2types v2patches v2children) = do - v1terms <- toStar (reference2to1 lookupSize) =<< Map.bitraverse (pure . namesegment2to1) (Map.bitraverse (referent2to1 lookupSize lookupCT) id) v2terms - v1types <- toStar (reference2to1 lookupSize) =<< Map.bitraverse (pure . namesegment2to1) (Map.bitraverse (reference2to1 lookupSize) id) v2types + v1terms <- toStar (reference2to1 $ lookupSize "term metadata") =<< Map.bitraverse (pure . namesegment2to1) (Map.bitraverse (referent2to1 (lookupSize "term") lookupCT) id) v2terms + v1types <- toStar (reference2to1 $ lookupSize "type metadata") =<< Map.bitraverse (pure . namesegment2to1) (Map.bitraverse (reference2to1 (lookupSize "type")) id) v2types v1patches <- Map.bitraverse (pure . namesegment2to1) (bitraverse (pure . edithash2to1) (fmap (patch2to1 lookupSize))) v2patches v1children <- Map.bitraverse (pure . namesegment2to1) (causalbranch2to1 lookupSize lookupCT) v2children pure $ V1.Branch.branch0 v1terms v1types v1children v1patches From bd949334a759d8a931bdbeed0dd0969ddc2b97b7 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 27 Apr 2021 17:28:48 -0600 Subject: [PATCH 192/225] treat patch "old" definitions as full dependencies --- .../U/Codebase/Sqlite/Sync22.hs | 6 ++++ .../src/Unison/Codebase/Conversion/Sync12.hs | 29 ++++++++++++++----- 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index 754218dc06..b219b960fd 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -335,6 +335,12 @@ trySync' tCache hCache oCache cCache _gc = \case oIds' <- traverse syncLocalObjectId oIds tIds' <- lift $ traverse syncTextLiteral tIds hIds' <- lift $ traverse syncHashLiteral hIds + + -- workaround for requiring components to compute component lengths for references. + -- this line requires objects in the destination for any hashes referenced in the source, + -- (making those objects dependencies of this patch). See Sync21.filter{Term,Type}Edit + traverse_ syncLocalObjectId =<< traverse (runSrc . Q.expectObjectIdForAnyHashId) hIds + pure $ PL.LocalIds tIds' hIds' oIds' syncBranchLocalIds :: BL.BranchLocalIds -> ValidateT (Set Entity) m BL.BranchLocalIds diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 0cf04cb1c8..b16fc79b48 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -394,22 +394,35 @@ repairPatch (Patch termEdits typeEdits) = do let patch = Patch termEdits' typeEdits' pure (H.accumulate' patch, patch) where - filterTermEdit _ = \case - TermEdit.Deprecate -> pure True - TermEdit.Replace (Reference.Builtin _) _ -> pure True - TermEdit.Replace (Reference.DerivedId (Reference.Id h _ n)) _ -> + -- filtering `old` is part of a workaround for ucm currently + -- requiring the actual component in order to construct a + -- reference to it. See Sync22.syncPatchLocalIds + helpTermEdit = \case + Reference.Builtin _ -> pure True + Reference.DerivedId (Reference.Id h _ n) -> getTermStatus h >>= \case Nothing -> Validate.refute . Set.singleton $ T h n Just TermOk -> pure True Just _ -> pure False - filterTypeEdit _ = \case - TypeEdit.Deprecate -> pure True - TypeEdit.Replace (Reference.Builtin _) -> pure True - TypeEdit.Replace (Reference.DerivedId (Reference.Id h _ n)) -> + helpTypeEdit = \case + Reference.Builtin _ -> pure True + Reference.DerivedId (Reference.Id h _ n) -> getDeclStatus h >>= \case Nothing -> Validate.refute . Set.singleton $ D h n Just DeclOk -> pure True Just _ -> pure False + filterTermEdit old new = do + oldOk <- helpTermEdit old + newOk <- case new of + TermEdit.Deprecate -> pure True + TermEdit.Replace new _typing -> helpTermEdit new + pure $ oldOk && newOk + filterTypeEdit old new = do + oldOk <- helpTypeEdit old + newOk <- case new of + TypeEdit.Deprecate -> pure True + TypeEdit.Replace new -> helpTypeEdit new + pure $ oldOk && newOk filterBranchTermStar :: (S m n, V m n) => Metadata.Star Referent NameSegment -> n (Metadata.Star Referent NameSegment) filterBranchTermStar (Star3 _refs names _mdType md) = do From d625245f043af21ef04143e930a1d56ae3303c2d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 27 Apr 2021 17:29:07 -0600 Subject: [PATCH 193/225] correctly record patch replacements during Upgrade12 --- parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index b16fc79b48..87abf340e9 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -226,7 +226,7 @@ trySync t _gc e = do Right (Left deps) -> pure . Sync.Missing $ Foldable.toList deps Right (Right (h', patch')) -> do t $ Codebase.putPatch dest h' patch' - setPatchStatus h PatchOk + setPatchStatus h if h == h' then PatchOk else PatchReplaced h' pure Sync.Done getBranchStatus :: S m n => Branch.Hash -> n (Maybe (BranchStatus m)) From 15e6008a0be0071de4da401cd11798d1abf7153b Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 27 Apr 2021 17:29:49 -0600 Subject: [PATCH 194/225] add a flag to disable Config watch thread: useful during +RTS -xc --- parser-typechecker/src/Unison/CommandLine.hs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/parser-typechecker/src/Unison/CommandLine.hs b/parser-typechecker/src/Unison/CommandLine.hs index d96a647e6b..2d02ff37bd 100644 --- a/parser-typechecker/src/Unison/CommandLine.hs +++ b/parser-typechecker/src/Unison/CommandLine.hs @@ -38,6 +38,10 @@ import qualified Unison.Util.Find as Find import qualified Unison.Util.Pretty as P import Unison.Util.TQueue (TQueue) import qualified Unison.Util.TQueue as Q +import qualified Data.Configurator as Config + +disableWatchConfig :: Bool +disableWatchConfig = False allow :: FilePath -> Bool allow p = @@ -46,7 +50,7 @@ allow p = (isSuffixOf ".u" p || isSuffixOf ".uu" p) watchConfig :: FilePath -> IO (Config, IO ()) -watchConfig path = do +watchConfig path = if disableWatchConfig then pure (Config.empty, pure ()) else do (config, t) <- autoReload autoConfig [Optional path] pure (config, killThread t) From 549b05e48d6ca25a481cf0f63f9380b33cac0cfe Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 27 Apr 2021 17:43:58 -0600 Subject: [PATCH 195/225] answered some questions --- questions.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/questions.md b/questions.md index bd3c071e45..585ecdc80a 100644 --- a/questions.md +++ b/questions.md @@ -14,7 +14,7 @@ next steps: - [x] Syncing a remote codebase - [x] `SqliteCodebase.syncFromDirectory` - [x] `SqliteCodebase.syncToDirectory` - - [ ] do I need to initialize a sqlite codebase in the destination? + - [x] do I need to initialize a sqlite codebase in the destination? - [ ] Managing external edit events? - [x] `SqliteCodebase.rootBranchUpdates` Is there some Sqlite function for detecting external changes? - https://www.sqlite.org/pragma.html#pragma_data_version @@ -33,7 +33,7 @@ next steps: C:\Users\arya\unison>stack exec unison unison.EXE: SQLite3 returned ErrorCan'tOpen while attempting to perform open "C:\\Users\\arya\\.unison\\v2\\unison.sqlite3": unable to open database file -- [ ] UnknownHashId (HashId 2179) +- [x] UnknownHashId (HashId 2179) arya@jrrr unison % stack exec unison -- -codebase /tmp/getbase3 init Initializing a new codebase in: /private/tmp/getbase3 arya@jrrr unison % stack exec unison -- -codebase /tmp/getbase3 From 89a97521c37001e1e28d8b8724315780e1128a16 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 27 Apr 2021 17:56:44 -0600 Subject: [PATCH 196/225] let Sync12 getBranchStatus before branchExists --- .../src/Unison/Codebase/Conversion/Sync12.hs | 56 ++++++------------- 1 file changed, 18 insertions(+), 38 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 87abf340e9..3289c73281 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -149,44 +149,24 @@ trySync t _gc e = do Env _ dest _ <- Reader.ask case e of C h mc -> do - -- getBranchStatus h >>= \case - -- Just {} -> pure Sync.PreviouslyDone - -- Nothing -> t (Codebase.branchExists dest h) >>= \case - -- True -> setBranchStatus h BranchOk $> Sync.PreviouslyDone - -- False -> do - -- c <- t mc - -- runValidateT @_ @n (repairBranch c) >>= \case - -- Left deps -> pure . Sync.Missing $ Foldable.toList deps - -- Right c' -> do - -- let h' = Causal.currentHash c' - -- t $ Codebase.putBranch dest (Branch.Branch c') - -- if h == h' - -- then do - -- setBranchStatus @m @n h BranchOk - -- pure Sync.Done - -- else do - -- setBranchStatus h (BranchReplaced h' c') - -- pure Sync.NonFatalError - - - - t (Codebase.branchExists dest h) >>= \case - True -> setBranchStatus h BranchOk $> Sync.PreviouslyDone - False -> do - c <- t mc - runValidateT @_ @n (repairBranch c) >>= \case - Left deps -> pure . Sync.Missing $ Foldable.toList deps - Right c' -> do - let h' = Causal.currentHash c' - t $ Codebase.putBranch dest (Branch.Branch c') - if h == h' - then do - setBranchStatus @m @n h BranchOk - pure Sync.Done - else do - setBranchStatus h (BranchReplaced h' c') - pure Sync.NonFatalError - + getBranchStatus h >>= \case + Just {} -> pure Sync.PreviouslyDone + Nothing -> t (Codebase.branchExists dest h) >>= \case + True -> setBranchStatus h BranchOk $> Sync.PreviouslyDone + False -> do + c <- t mc + runValidateT (repairBranch c) >>= \case + Left deps -> pure . Sync.Missing $ Foldable.toList deps + Right c' -> do + let h' = Causal.currentHash c' + t $ Codebase.putBranch dest (Branch.Branch c') + if h == h' + then do + setBranchStatus h BranchOk + pure Sync.Done + else do + setBranchStatus h (BranchReplaced h' c') + pure Sync.NonFatalError T h n -> getTermStatus h >>= \case Just {} -> pure Sync.PreviouslyDone From c348bcc4002f38de1046673ee4a3195402400a84 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 27 Apr 2021 18:37:36 -0600 Subject: [PATCH 197/225] delete aspirational packages --- codebase-convert-1to2/app/Main.hs | 2 - .../lib/U/Codebase/Convert/SyncV1V2.hs | 1365 ----------------- .../unison-codebase-convert-1to2.cabal | 67 - codebase1/codebase/Unison/Codebase/V1/ABT.hs | 88 -- .../Unison/Codebase/V1/Branch/NameSegment.hs | 13 - .../codebase/Unison/Codebase/V1/Branch/Raw.hs | 34 - .../codebase/Unison/Codebase/V1/Causal/Raw.hs | 26 - .../Unison/Codebase/V1/ConstructorType.hs | 8 - .../Unison/Codebase/V1/DataDeclaration.hs | 72 - .../Unison/Codebase/V1/FileCodebase.hs | 261 ---- .../Unison/Codebase/V1/LabeledDependency.hs | 58 - .../codebase/Unison/Codebase/V1/Patch.hs | 60 - .../Unison/Codebase/V1/Patch/TermEdit.hs | 32 - .../Unison/Codebase/V1/Patch/TypeEdit.hs | 14 - .../codebase/Unison/Codebase/V1/Reference.hs | 63 - .../codebase/Unison/Codebase/V1/Referent.hs | 54 - .../V1/Serialization/Serialization.hs | 43 - .../Unison/Codebase/V1/Serialization/V1.hs | 390 ----- .../codebase/Unison/Codebase/V1/Star3.hs | 214 --- .../codebase/Unison/Codebase/V1/Symbol.hs | 13 - codebase1/codebase/Unison/Codebase/V1/Term.hs | 167 -- .../Unison/Codebase/V1/Term/Pattern.hs | 102 -- codebase1/codebase/Unison/Codebase/V1/Type.hs | 55 - .../codebase/Unison/Codebase/V1/Type/Kind.hs | 3 - codebase1/codebase/unison-codebase1.cabal | 59 - codebase2/editor/U/Editor/Codebase.hs | 57 - codebase2/editor/unison-editor.cabal | 27 - codebase2/language/U/Language/Blank.hs | 25 - codebase2/language/unison-language.cabal | 27 - codebase2/runtime/U/Runtime/CodeLookup.hs | 1 - codebase2/runtime/unison-runtime.cabal | 28 - codebase2/syntax/unison-syntax.cabal | 25 - stack.yaml | 7 - 33 files changed, 3460 deletions(-) delete mode 100644 codebase-convert-1to2/app/Main.hs delete mode 100644 codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs delete mode 100644 codebase-convert-1to2/unison-codebase-convert-1to2.cabal delete mode 100644 codebase1/codebase/Unison/Codebase/V1/ABT.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Branch/NameSegment.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Branch/Raw.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Causal/Raw.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/ConstructorType.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/DataDeclaration.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/FileCodebase.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/LabeledDependency.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Patch.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Patch/TermEdit.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Patch/TypeEdit.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Reference.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Referent.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Serialization/Serialization.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Star3.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Symbol.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Term.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Term/Pattern.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Type.hs delete mode 100644 codebase1/codebase/Unison/Codebase/V1/Type/Kind.hs delete mode 100644 codebase1/codebase/unison-codebase1.cabal delete mode 100644 codebase2/editor/U/Editor/Codebase.hs delete mode 100644 codebase2/editor/unison-editor.cabal delete mode 100644 codebase2/language/U/Language/Blank.hs delete mode 100644 codebase2/language/unison-language.cabal delete mode 100644 codebase2/runtime/U/Runtime/CodeLookup.hs delete mode 100644 codebase2/runtime/unison-runtime.cabal delete mode 100644 codebase2/syntax/unison-syntax.cabal diff --git a/codebase-convert-1to2/app/Main.hs b/codebase-convert-1to2/app/Main.hs deleted file mode 100644 index a1e202107f..0000000000 --- a/codebase-convert-1to2/app/Main.hs +++ /dev/null @@ -1,2 +0,0 @@ -main :: IO () -main = putStrLn "Hello, World!" \ No newline at end of file diff --git a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs b/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs deleted file mode 100644 index a1b1185a60..0000000000 --- a/codebase-convert-1to2/lib/U/Codebase/Convert/SyncV1V2.hs +++ /dev/null @@ -1,1365 +0,0 @@ -{-# LANGUAGE BlockArguments #-} -{-# LANGUAGE DeriveFunctor #-} -{-# LANGUAGE DerivingStrategies #-} -{-# LANGUAGE DerivingVia #-} -{-# LANGUAGE EmptyCase #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE PartialTypeSignatures #-} -{-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE ViewPatterns #-} -{-# OPTIONS_GHC -Wno-incomplete-patterns #-} -{-# OPTIONS_GHC -Wno-unused-imports #-} -{-# OPTIONS_GHC -Wno-unused-do-bind #-} --- {-# OPTIONS_GHC -Wno-unused-local-binds #-} -{-# OPTIONS_GHC -Wno-unused-matches #-} - -module U.Codebase.Convert.SyncV1V2 where - -import Control.Lens (mapMOf, over) -import Control.Monad.Except (MonadError, runExceptT, throwError) -import Control.Monad.Extra ((>=>), ifM) -import Control.Monad.Reader (ReaderT (runReaderT)) -import qualified Control.Monad.State as State -import Control.Monad.State (State) -import Data.Bifunctor (Bifunctor (first), second) -import Data.Bytes.Get (MonadGet) -import Data.Either (partitionEithers) -import Data.Either.Extra (mapLeft) -import Data.Foldable (Foldable (foldl', toList), for_, traverse_) -import Data.Functor ((<&>)) -import qualified Data.List as List -import Data.List.Extra (nubOrd) -import Data.Map (Map) -import qualified Data.Map.Strict as Map -import qualified Data.Set as Set -import Data.Text (Text) -import qualified Data.Text as Text -import Data.Traversable (for) -import Data.Tuple (swap) -import Data.Vector (Vector) -import qualified Data.Vector as Vector -import Database.SQLite.Simple (Connection) -import qualified Database.SQLite.Simple as SQLite -import Database.SQLite.Simple.FromField (FromField) -import Database.SQLite.Simple.ToField (ToField) -import qualified U.Util.Term as TermUtil -import qualified U.Util.Type as TypeUtil -import qualified U.Codebase.Decl as V2.Decl -import qualified U.Codebase.Kind as V2.Kind -import qualified U.Codebase.Reference as V2.Reference -import qualified U.Codebase.Referent as V2.Referent -import Data.String.Here.Uninterpolated (here) -import qualified U.Codebase.Sqlite.DbId as Db -import qualified U.Codebase.Sqlite.Queries as Db -import qualified U.Codebase.Sqlite.Reference as V2S.Reference -import qualified U.Codebase.Sqlite.Reference as V2.Sqlite.Reference -import qualified U.Codebase.Sqlite.Referent as V2.Sqlite.Referent -import qualified U.Codebase.Sqlite.Serialization as S.V2 -import qualified U.Codebase.Sqlite.Symbol as V2.Symbol -import qualified U.Codebase.Sqlite.Term.Format as V2.TermFormat -import qualified U.Codebase.Term as V2.Term -import qualified U.Codebase.Sqlite.Decl.Format as V2.DeclFormat -import qualified U.Codebase.Sqlite.LocalIds as V2.LocalIds -import qualified U.Codebase.Sqlite.ObjectType as V2.OT -import U.Codebase.Sqlite.Queries (DB) -import qualified U.Codebase.Type as V2.Type -import qualified U.Core.ABT as V2.ABT -import qualified U.Util.Base32Hex as Base32Hex -import U.Util.Base32Hex (Base32Hex) -import U.Util.Hash (Hash) -import qualified U.Util.Hash as Hash -import qualified U.Util.Hashable as H -import U.Util.Monoid (foldMapM) -import qualified U.Util.Serialization as S -import qualified Unison.Codebase.V1.ABT as V1.ABT -import qualified Unison.Codebase.V1.Branch.Raw as V1 -import qualified Unison.Codebase.V1.DataDeclaration as V1.DD -import qualified Unison.Codebase.V1.FileCodebase as V1 -import Unison.Codebase.V1.FileCodebase (CodebasePath) -import qualified Unison.Codebase.V1.FileCodebase as V1.FC -import qualified Unison.Codebase.V1.LabeledDependency as V1.LD -import qualified Unison.Codebase.V1.Reference as V1.Reference -import qualified Unison.Codebase.V1.Referent as V1.Referent -import qualified Unison.Codebase.V1.Serialization.Serialization as V1.S -import qualified Unison.Codebase.V1.Serialization.V1 as V1.S -import qualified Unison.Codebase.V1.Symbol as V1.Symbol -import qualified Unison.Codebase.V1.Term as V1.Term -import qualified Unison.Codebase.V1.Term.Pattern as V1.Pattern -import qualified Unison.Codebase.V1.Type as V1.Type -import qualified Unison.Codebase.V1.Type.Kind as V1.Kind -import UnliftIO (MonadIO, liftIO) -import UnliftIO.Directory (listDirectory) -import Data.Set (Set) -import Data.Bifunctor (Bifunctor(bimap)) - -newtype V1 a = V1 {runV1 :: a} deriving (Eq, Ord, Show) - -newtype V2 a = V2 {runV2 :: a} - deriving (Eq, Ord, Show, Functor) - deriving (FromField, H.Accumulate, H.Hashable) via a - -data V1EntityRef - = Decl1 (V1 Hash) -- this will refer to the whole component - | Term1 (V1 Hash) -- ditto - | Patch1 V1.EditHash - | Branch1 V1.BranchHash - deriving (Eq, Ord, Show) - -v1EntityRefToHash :: V1EntityRef -> V1 Hash -v1EntityRefToHash = \case - Decl1 h -> h - Term1 h -> h - Patch1 (V1.EditHash h) -> V1 h - Branch1 (V1.BranchHash h) -> V1 h - -newtype Base32HexPrefix = Base32HexPrefix Text - deriving (Show) via Text - deriving (ToField) via Text - deriving (FromField) via Text - --- newtype PatchHash h = PatchHash h --- newtype NamespaceHash h = NamespaceHash h -newtype CausalHash h = CausalHash h - --- 1. Load a V1 component (Hash, [V1Term]) --- 2. Convert its dependencies before continuing --- 3. Construct the hashable data structure --- 4. Serialize the hashable data structure - --- unhashTermComponent :: V1 Hash -> [V1Term.Term] -> [V2.Term Symbol] - --- -- |things that appear in a deserialized RawBranch --- type V2EntityRef = --- V2EntityRefH --- Hash --- (PatchHash Hash) --- (NamespaceHash Hash) --- (CausalHash Hash) - --- | things that appear in a serialized RawBranch --- type V2EntityRefS = --- V2EntityRef --- Db.ObjectId --- (PatchHash Db.ObjectId) --- (NamespaceHash Db.NamespaceHashId) --- (CausalHash Db.CausalHashId) - --- data V2EntityRef hr hp hn hc --- = Decl2 V2.Reference.Id --- | Term2 V2.Reference.Id --- | Patch2 PatchHash --- | NamespaceHash2 V2NamespaceHash --- | CausalHash2 CausalHash - --- initializeV2DB :: MonadIO m => m () --- initializeV2DB = error "todo" - -data FatalError - = NoRootBranch - | MissingBranch (V1 Hash) - | MissingPatch (V1 Hash) - | MissingTerm V1.Reference.Id - | MissingTermHash (V1 Hash) - | MissingTypeOfTerm V1.Reference.Id - | MissingDecl V1.Reference.Id - | MissingDeclHash (V1 Hash) - | InvalidBranch (V1 Hash) - | InvalidPatch (V1 Hash) - | InvalidTerm V1.Reference.Id - | InvalidTypeOfTerm V1.Reference.Id - | InvalidDecl V1.Reference.Id - - -type V1Type = V1.Type.Type V1.Symbol.Symbol () -type V1Term = V1.Term.Term V1.Symbol.Symbol () - -type V1Decl = V1.DD.Decl V1.Symbol.Symbol () - -type V2HashTerm = V2.Term.Term V2.Symbol.Symbol -type V2HashTypeOfTerm = V2.Type.TypeT V2.Symbol.Symbol - -type V2DiskTypeOfTerm = V2.Type.TypeR V2.Sqlite.Reference.Reference V2.Symbol.Symbol -type V2HashTermComponent = [V2HashTerm] - -type V2DiskTermComponent = V2.TermFormat.LocallyIndexedComponent - - -type V2HashDecl = V2.Decl.Decl V2.Symbol.Symbol -type V2TypeOfConstructor = V2.Type.TypeD V2.Symbol.Symbol - -type V2HashDeclComponent = [V2HashDecl] -type V2DiskDeclComponent = V2.DeclFormat.LocallyIndexedComponent - --- type Patch = Patch.Patch V1.Reference - --- -- the H stands for "for hashing" --- -- the S stands for "for serialization" - --- type Term2ComponentH = [Term2 Hash] --- type Term2ComponentS = [Term2 Db.ObjectId] - --- type Decl2ComponentH = [Decl2 (Maybe Hash)] --- type Decl2S = Decl2 Db.ObjectId --- type Decl2ComponentS = [Decl2S] - --- -- these have maybes in them to indicate a self-component reference --- type Term2 h = V2.Term h --- type Decl2 h = DD.DeclR (V2.Reference h) Symbol () - --- -- for indexing --- type Decl2I = DD.DeclR (V2.Reference Db.ObjectId) Symbol () --- type Term2S = Term2 Db.ObjectId --- type Type2S = V2.Type Db.ObjectId - --- -- what about referent types in the index? --- -- type CtorType2S = Type.TypeH Db.ObjectId Symbol Ann --- -- type Term2S = Term.TermH (Maybe Db.ObjectId) Symbol Ann --- type Patch2S = Patch.Patch (V2.Reference Db.ObjectId) - --- --type Term2S = ABT.Term (Term.F' (Maybe TermId) DeclId (Type.TypeH DeclId Symbol ()) Void ()) Symbol () --- --alternative representation if embedded --- --type Term2S = ABT.Term (Term.F' (Maybe TermId) DeclId TypeId Void ()) Symbol () - --- fmtV :: S.Format Symbol --- fmtV = S.V1.formatSymbol - --- getV :: S.Get Symbol --- getV = S.get S.V1.formatSymbol - --- putV :: S.Put Symbol --- putV = S.put fmtV - --- fmtA :: S.Format Ann --- fmtA = V1.formatAnn - --- getA :: S.Get Ann --- getA = S.get fmtA - --- putA :: S.Put Ann --- putA = S.put fmtA - --- todo: this just converts a whole codebase, which we need to do locally \ --- but we also want some code that imports just a particular sub-branch. -syncV1V2 :: forall m. MonadIO m => Connection -> CodebasePath -> m (Either FatalError ()) -syncV1V2 c rootDir = liftIO $ {- SQLite.withTransaction c . -} runExceptT . flip runReaderT c $ do - v1RootHash <- getV1RootBranchHash rootDir >>= maybe (throwError NoRootBranch) pure - -- starting from the root namespace, convert all entities you can find - convertEntities [Branch1 v1RootHash] - v2RootHash <- v2CausalHashForV1BranchHash v1RootHash - setV2Root v2RootHash - error "todo: compressEntities and vacuum db" v2RootHash - - -- Incorporating diff construction into the conversion is tough because - -- a) I was thinking we'd represent an older version as a diff against the - -- newer version, but the newer version hasn't been fully constructed - -- until the older versions have been converted and hashed. - -- b) If we just store all the old versions uncompressed, it might be too big. - -- (But no bigger than the v1 db.) But if that is okay, we can compress and - -- vacuum them afterwards. - - pure () - where - setV2Root = error "todo: setV2Root" - v2CausalHashForV1BranchHash = error "todo: v2CausalHashForV1BranchHash" - convertEntities :: - forall m. - DB m => - MonadError FatalError m => - [V1EntityRef] -> - m () - convertEntities [] = pure () - convertEntities all@(h : rest) = do - termDirComponents <- componentMapForDir (V1.termsDir rootDir) - declsDirComponents <- componentMapForDir (V1.typesDir rootDir) - case h of - Term1 h -> - -- if this hash is already associated to an object - ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do - -- load a cycle from disk - e <- loadTerm1 rootDir termDirComponents h - matchTerm1Dependencies h e >>= \case - Left missing -> convertEntities (missing ++ all) - Right (getHash, getObjId, getTextId) -> do - convertTerm1 getHash getObjId getTextId h e - convertEntities rest --- Decl1 h -> --- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do --- d <- loadDecl1 rootDir declsDirComponents h --- matchDecl1Dependencies h d >>= \case --- Left missing -> convertEntities (missing ++ all) --- Right lookup -> do --- convertDecl1 (error "todo: lookup") h d --- convertEntities rest --- Patch1 h -> --- ifM (existsObjectWithHash (runV1 h)) (convertEntities rest) $ do --- p <- loadPatch1 rootDir h --- matchPatch1Dependencies ("patch " ++ show h) p >>= \case --- Left missing -> convertEntities (missing ++ all) --- Right lookup -> do --- -- hashId <- Db.saveHashByteString (runV1 h) --- -- savePatch hashId (Patch.hmap (lookup . V1) p) --- error "todo" --- convertEntities rest --- Branch1 (V1.BranchHash h) -> --- ifM (existsObjectWithHash h) (convertEntities rest) $ do --- cb <- loadCausalBranch1 rootDir (V1 h) --- matchCausalBranch1Dependencies ("branch " ++ show h) cb >>= \case --- Left missing -> convertEntities (missing ++ all) --- Right (lookupObject, lookupCausal) -> do --- convertCausalBranch1 lookupObject lookupCausal cb --- convertEntities rest - - --- -- | load a causal branch raw thingo --- loadCausalBranch1 :: --- MonadIO m => --- MonadError FatalError m => --- CodebasePath -> --- V1 Hash -> --- m (V1.Causal.Raw V1.Branch.Raw V1.Branch.Raw) --- loadCausalBranch1 rootDir h = do --- let file = V1.branchPath rootDir (V1.RawHash (runV1 h)) --- ifM --- (doesFileExist file) --- ( S.getFromFile' (S.V1.getCausal0 S.V1.getRawBranch) file >>= \case --- Left err -> throwError $ InvalidBranch h --- Right c0 -> pure c0 --- ) --- (throwError $ MissingBranch h) - --- primaryHashByHash1 :: DB m => V2.ObjectType -> Hash -> m (Maybe Hash) --- primaryHashByHash1 t h = --- Db.query sql (t, Base32Hex.fromHash h) <&> \case --- [Only h] -> Just (Base32Hex.toHash h) --- [] -> Nothing --- hs -> --- error $ --- "hash2ForHash1 " ++ show t ++ " " --- ++ take 10 (show h) --- ++ " = " --- ++ (show . map (take 10 . show)) hs --- where --- sql = --- [here| --- SELECT v2hash.base32 --- FROM hash AS v2hash --- INNER JOIN object ON object.primary_hash_id = v2hash.id --- INNER JOIN hash_object ON object.id = hash_object.object_id --- INNER JOIN hash AS v1hash ON hash_object.hash_id = v1hash.id --- WHERE object.type_id = ? AND v1hash.base32 = ? --- |] - --- loadBranch1 :: --- forall m. --- MonadIO m => --- MonadError FatalError m => --- m V1.Branch.Raw --- loadBranch1 = error "todo: loadBranch1" - --- -- ifM (not <$> doesFileExist (V1.branchPath root h)) --- -- (throwError $ MissingBranch h) --- -- (do --- -- branch1 <- loadBranch1 --- -- ) - --- newtype MdValuesR r = MdValues (Set r) - --- deriving via --- (Set r) --- instance --- Hashable r => Hashable (MdValuesR r) - --- -- this is the version we'll hash --- type RawBranch = --- RawBranchH --- (V2.Referent Hash Hash) -- terms --- (V2.Reference Hash) -- types --- (V2.Reference Hash) -- metadata --- (V2 Hash) -- patches --- (V2 Hash) -- children - --- -- this is the version that closely corresponds to the db schema --- type RawBranch2S = --- RawBranchH --- (V2.Referent Db.ObjectId Db.ObjectId) -- terms --- (V2.Reference Db.ObjectId) -- types --- (V2.Reference Db.ObjectId) -- metadata --- Db.ObjectId -- patches --- Db.CausalHashId -- children - --- data RawBranchH termRef typeRef mdRef pRef cRef = RawBranch --- { terms :: Map (NameSegment, termRef) (Set mdRef), --- types :: Map (NameSegment, typeRef) (Set mdRef), --- patches :: Map NameSegment pRef, --- children :: Map NameSegment cRef --- } - --- type RawCausal = RawCausalH Db.CausalHashId Db.NamespaceHashId - --- data RawCausalH hCausal hValue = RawCausal --- { causalHash :: hCausal, --- valueHash :: hValue, --- parents :: [hCausal] --- } - --- instance Hashable RawBranch where --- tokens b = --- [ H.accumulateToken (terms b), --- H.accumulateToken (types b), --- H.accumulateToken (patches b), --- H.accumulateToken (children b) --- ] - --- instance Hashable RawCausal where --- tokens c = --- [ H.accumulateToken (causalHash c), --- H.accumulateToken (valueHash c), --- H.accumulateToken (parents c) --- ] - -getV1RootBranchHash :: MonadIO m => CodebasePath -> m (Maybe V1.BranchHash) -getV1RootBranchHash root = listDirectory (V1.branchHeadDir root) <&> \case - [single] -> Just . V1.BranchHash . Hash.fromBase32Hex . Base32Hex.UnsafeBase32Hex $ Text.pack single - _ -> Nothing - --- | Look for an ObjectId corresponding to the provided V1 hash. --- Returns Left if not found. -lookupObject :: DB m => V1EntityRef -> m (Either V1EntityRef (V1 Hash, (V2 Hash, Db.ObjectId))) -lookupObject r@(runV1 . v1EntityRefToHash -> h) = - getObjectIdByBase32Hex (Hash.toBase32Hex h) <&> \case - Nothing -> Left r - Just i -> Right (V1 h, i) - --- -- | Look for a CausalHashId corresponding to the provided V1 hash. --- -- Returns Left if not found. --- lookupCausal :: DB m => V1.Branch.Hash -> m (Either (V1 Hash) (V1 Hash, (V2 Hash, Db.CausalHashId))) --- lookupCausal (V1.unRawHash -> h) = --- Db.queryMaybe sql (Only (Base32Hex.fromHash h)) <&> \case --- Nothing -> Left (V1 h) --- Just (v2Hash, id) -> Right (V1 h, (Base32Hex.toHash <$> v2Hash, id)) --- where --- sql = --- [here| --- SELECT new_hash.base32, new_hash_id --- FROM causal_old --- INNER JOIN hash old_hash ON old_hash_id = old_hash.id --- INNER JOIN hash new_hash ON new_hash_id = new_hash.id --- WHERE old_hash.base32 = ? --- |] - -saveTypeBlobForTerm :: DB m => V2.Sqlite.Reference.Id -> V2DiskTypeOfTerm -> m () -saveTypeBlobForTerm r typ = Db.saveTypeOfTerm r blob - where - blob = S.putBytes (S.V2.putType S.V2.putReference S.V2.putSymbol) typ - --- -- | no Maybes here, as all relevant ObjectId can be known in advance --- saveTypeBlobForReferent :: DB m => V2.ReferentId Db.ObjectId -> Type2S -> m () --- saveTypeBlobForReferent r type2s = --- let blob = S.putBytes (S.V1.putTypeR (V2.putReference V2.putObjectId) putV V2.putUnit) type2s --- in Db.saveTypeOfReferent r blob - --- | Multiple hashes can map to a single object! -getObjectIdByBase32Hex :: DB m => Base32Hex -> m (Maybe (V2 Hash, Db.ObjectId)) -getObjectIdByBase32Hex h = - fmap (first (V2 . Hash.fromBase32Hex)) <$> Db.objectAndPrimaryHashByAnyHash h - --- augmentLookup :: Ord a => (a -> b) -> Map a b -> a -> b --- augmentLookup f m a = fromMaybe (f a) (Map.lookup a m) - --- Control.Monad (>=>) :: Monad m => (a -> m b) -> (b -> m c) -> a -> m c -saveReferenceAsReference2 :: DB m => V2.Reference.Reference -> m V2.Sqlite.Reference.ReferenceH -saveReferenceAsReference2 = - mapMOf V2.Reference.h Db.saveHashHash >=> - mapMOf V2.Reference.t Db.saveText - --- | load a term component by its hash. --- A v1 term component is split across an arbitrary number of files. --- We have to 1) figure out what all the filenames are (preloaded into --- `termDirComponents`), 2) load them all, -loadTerm1 :: - MonadIO m => - MonadError FatalError m => - CodebasePath -> - Map (V1 Hash) [V1.Reference.Id] -> - V1 Hash -> - m [(V1Term, V1Type)] -loadTerm1 rootDir componentsFromDir h = case Map.lookup h componentsFromDir of - Nothing -> throwError $ MissingTermHash h - Just set -> case toList set of - [] -> error "Just [] shouldn't occur here." - V1.Reference.Id h _i n : _etc -> for [0 .. n -1] \i -> do - let r = V1.Reference.Id h i n - term <- - V1.FC.getTerm rootDir r - >>= maybe (throwError $ MissingTerm r) pure - typeOfTerm <- - V1.FC.getTypeOfTerm rootDir r - >>= maybe (throwError $ MissingTypeOfTerm r) pure - pure (term, typeOfTerm) - --- loadDecl1 :: --- MonadIO m => --- MonadError FatalError m => --- CodebasePath -> --- Map (V1 Hash) [Reference.Id] -> --- V1 Hash -> --- m [Decl] --- loadDecl1 rootDir componentsFromDir h = case Map.lookup h componentsFromDir of --- Nothing -> throwError $ MissingDeclHash h --- Just set -> case toList set of --- [] -> error "Just [] shouldn't occur here." --- Reference.Id h _i n : _etc -> for [0 .. n -1] \i -> do --- let r = Reference.Id h i n --- V1.FC.getDecl (S.get fmtV) (S.get fmtA) rootDir r --- >>= maybe (throwError $ MissingDecl r) pure - --- -- | load a patch --- loadPatch1 :: (MonadIO m, MonadError FatalError m) => [Char] -> V1 Hash -> m (Patch.Patch Reference) --- loadPatch1 rootDir h = do --- let file = V1.editsPath rootDir (runV1 h) --- ifM --- (doesFileExist file) --- ( S.getFromFile' S.V1.getEdits file >>= \case --- Left _err -> throwError (InvalidPatch h) --- Right edits -> pure edits --- ) --- (throwError $ MissingPatch h) - --- 3) figure out what their combined dependencies are -matchTerm1Dependencies :: - DB m => - V1 Hash -> - [(V1Term, V1Type)] -> - m (Either [V1EntityRef] (V1 Hash -> V2 Hash, V2 Hash -> Db.ObjectId, Text -> Db.TextId)) -matchTerm1Dependencies componentHash tms = - let -- Get a list of Eithers corresponding to the non-self dependencies of this term. - lookupDependencyObjects :: - DB m => (V1Term, V1Type) -> m [Either V1EntityRef (V1 Hash, (V2 Hash, Db.ObjectId))] - lookupDependencyObjects (term, typeOfTerm) = traverse lookupObject deps - where - (termTypeDeps, termTermDeps) = - partitionEithers - . map V1.LD.toReference - . toList - $ V1.Term.labeledDependencies term - deps = - nubOrd $ - [Decl1 (V1 h) | V1.Reference.Derived h _i _n <- toList $ V1.Type.dependencies typeOfTerm] - <> [Decl1 (V1 h) | V1.Reference.Derived h _i _n <- termTypeDeps] - <> [ Term1 (V1 h) | V1.Reference.Derived h _i _n <- termTermDeps, h /= runV1 componentHash -- don't include self-refs 😬 - ] - in do - -- check the lefts, if empty then everything is on the right; - -- else return left. - (missing, found) <- partitionEithers <$> foldMapM lookupDependencyObjects tms - -- pure $ case missing of - -- [] -> Right (makeLookup found $ "term " ++ show componentHash) - -- missing -> Left missing - error "todo" - --- matchDecl1Dependencies :: --- DB m => V1 Hash -> [Decl] -> m (Either [V1EntityRef] (V1 Hash -> Db.ObjectId)) --- matchDecl1Dependencies componentHash decls = error "todo" -- let --- -- lookupDependencyObjects --- -- :: DB m => Decl -> m [Either V1EntityRef (V1 Hash, (V2 Hash, Db.ObjectId))] --- -- lookupDependencyObjects decl = traverse lookupObject . nubOrd $ --- -- [ Decl1 (V1 h) | Reference.Derived h _i _n <- toList (DD.declDependencies decl) --- -- , V1 h /= componentHash ] --- -- in do --- -- (missing, found) <- partitionEithers <$> foldMapM lookupDependencyObjects decls --- -- pure $ case missing of --- -- [] -> Right (makeLookup found $ "decl " ++ show componentHash) --- -- missing -> Left missing - --- matchPatch1Dependencies :: --- DB m => --- String -> --- Patch -> --- m (Either [V1EntityRef] (V1 Hash -> (V2 Hash, Db.ObjectId))) --- matchPatch1Dependencies description (Patch.Patch tms tps) = do --- deps :: [Either V1EntityRef (V1 Hash, (V2 Hash, Db.ObjectId))] <- --- traverse lookupObject . nubOrd $ --- [ Term1 (V1 h) | (r, e) <- Relation.toList tms, Reference.Derived h _i _n <- r : TermEdit.references e --- ] --- ++ [ Decl1 (V1 h) | (r, e) <- Relation.toList tps, Reference.Derived h _i _n <- r : TypeEdit.references e --- ] --- let (missing, found) = partitionEithers deps --- pure $ case missing of --- [] -> Right (makeLookup found description) --- missing -> Left missing - --- -- | multiple lookups needed in converting branch --- -- data CBDepLookup --- matchCausalBranch1Dependencies :: --- DB m => --- String -> --- V1.Causal.Raw V1.Branch.Raw V1.Branch.Raw -> --- m (Either [V1EntityRef] (V1 Hash -> Db.ObjectId, V1 Hash -> (V2 Hash, Db.CausalHashId))) --- matchCausalBranch1Dependencies description cb@(V1.Causal.rawHead -> b) = do --- deps <- --- traverse lookupObject . nubOrd $ --- -- history --- [Branch1 h | h <- V1.Causal.rawTails cb] --- ++ --- -- terms --- [ Term1 (V1 h) --- | Referent.Ref (Reference.Derived h _i _n) <- --- (toList . Relation.dom . Star3.d1 . V1.Branch._termsR) b --- ] --- ++ [ Term1 (V1 h) --- | Referent.Ref (Reference.Derived h _i _n) <- --- (toList . Relation.dom . Star3.d1 . V1.Branch._termsR) b --- ] --- ++ --- -- term metadata --- [ Term1 (V1 h) --- | Reference.Derived h _i _n <- --- (map snd . toList . Relation.ran . Star3.d3 . V1.Branch._termsR) b --- ] --- ++ --- -- types --- [ Decl1 (V1 h) --- | Reference.Derived h _i _n <- --- (toList . Relation.dom . Star3.d1 . V1.Branch._typesR) b --- ] --- ++ --- -- type metadata --- [ Term1 (V1 h) --- | Reference.Derived h _i _n <- --- (map snd . toList . Relation.ran . Star3.d3 . V1.Branch._typesR) b --- ] --- ++ [Branch1 h | h <- toList (V1.Branch._childrenR b)] --- ++ [Patch1 (V1 h) | h <- toList (V1.Branch._editsR b)] - --- causalParents <- traverse lookupCausal (V1.Causal.rawTails cb) - --- let (missingEntities, foundObjects) = partitionEithers deps --- let (missingParents, foundParents) = partitionEithers causalParents - --- error "todo" - --- -- pure $ case missingEntities of --- -- [] -> Right ( makeLookup foundObjects description --- -- , makeCausalLookup foundParents description ) --- -- missing -> Left missing - --- makeCausalLookup :: [(V1 Hash, (V2 Hash, Db.CausalHashId))] -> String -> V1 Hash -> (V2 Hash, Db.CausalHashId) --- makeCausalLookup l description a = --- let m = Map.fromList l --- in case Map.lookup a m of --- Just b -> b --- Nothing -> --- error $ --- "Somehow I don't have the CausalHashId for " --- ++ show (Base32Hex.fromHash (runV1 a)) --- ++ " in the map for " --- ++ description - -makeLookup :: [(V1 Hash, (V2 Hash, Db.ObjectId))] -> String -> V1 Hash -> (V2 Hash, Db.ObjectId) -makeLookup l lookupDescription a = - let m = Map.fromList l - in case Map.lookup a m of - Just b -> b - Nothing -> - error $ - "Somehow I don't have the ObjectId for " - ++ show (Hash.toBase32Hex (runV1 a)) - ++ " in the map for " - ++ lookupDescription - -createTypeSearchIndicesForReferent :: DB m => V2.Sqlite.Referent.Id -> V2HashTypeOfTerm -> m () -createTypeSearchIndicesForReferent r typ = do - let typeForIndexing = TypeUtil.removeAllEffectVars typ - - -- add the term to the type index - typeReferenceForIndexing :: V2.Sqlite.Reference.ReferenceH <- - saveReferenceAsReference2 (TypeUtil.toReference typeForIndexing) - - Db.addToTypeIndex typeReferenceForIndexing r - - -- add the term to the type mentions index - typeMentionsForIndexing :: [V2.Sqlite.Reference.ReferenceH] <- - traverse - saveReferenceAsReference2 - (toList $ TypeUtil.toReferenceMentions typeForIndexing) - - traverse_ (flip Db.addToTypeMentionsIndex r) typeMentionsForIndexing - --- todo: -createDependencyIndexForTerm :: DB m => V2.Sqlite.Reference.Id -> V2DiskTermComponent -> m () -createDependencyIndexForTerm tmRef@(V2.Reference.Id selfId i) (V2.TermFormat.LocallyIndexedComponent c) = --- newtype LocallyIndexedComponent = --- LocallyIndexedComponent (Vector (LocalIds, Term)) - let - -- | get the ith element from the term component - (localIds, localTerm) = c Vector.! fromIntegral i - - -- get the term dependencies as localids - termRefs :: [V2.TermFormat.TermRef] - typeRefs :: [V2.TermFormat.TypeRef] - termLinks :: [V2.Referent.Referent' V2.TermFormat.TermRef V2.TermFormat.TypeRef] - typeLinks :: [V2.TermFormat.TypeRef] - (termRefs, typeRefs, termLinks, typeLinks) = TermUtil.dependencies localTerm - - -- and convert them to Reference' TextId ObjectId - localToDbTextId :: V2.TermFormat.LocalTextId -> Db.TextId - localToDbTextId (V2.TermFormat.LocalTextId n) = - V2.LocalIds.textLookup localIds Vector.! fromIntegral n - localToDbDefnId :: V2.TermFormat.LocalDefnId -> Db.ObjectId - localToDbDefnId (V2.TermFormat.LocalDefnId n)= - V2.LocalIds.objectLookup localIds Vector.! fromIntegral n - localToDbTermRef :: V2.TermFormat.TermRef -> V2.Sqlite.Reference.Reference - localToDbTermRef = bimap localToDbTextId (maybe selfId localToDbDefnId) - localToDbTypeRef :: V2.TermFormat.TypeRef -> V2.Sqlite.Reference.Reference - localToDbTypeRef = bimap localToDbTextId localToDbDefnId - localFoo :: V2.Referent.Referent' V2.TermFormat.TermRef V2.TermFormat.TypeRef -> V2.Sqlite.Reference.Reference - localFoo = \case - V2.Referent.Ref tm -> localToDbTermRef tm - V2.Referent.Con tp _ -> localToDbTypeRef tp - dependencies :: [V2.Sqlite.Reference.Reference] - dependencies = map localToDbTermRef termRefs - <> map localToDbTypeRef typeRefs - <> map localFoo termLinks - <> map localToDbTypeRef typeLinks - -- and then add all of these to the dependency index - in traverse_ (flip Db.addToDependentsIndex tmRef) dependencies - -localDefnIdToObjectId :: V2.LocalIds.LocalIds -> V2.TermFormat.LocalDefnId -> Db.ObjectId -localDefnIdToObjectId (V2.LocalIds.LocalIds _t d) (V2.TermFormat.LocalDefnId id) = d Vector.! fromIntegral id - -localTextIdToObjectId :: V2.LocalIds.LocalIds -> V2.TermFormat.LocalTextId -> Db.TextId -localTextIdToObjectId (V2.LocalIds.LocalIds t _d) (V2.TermFormat.LocalTextId id) = t Vector.! fromIntegral id - --- createDependencyIndexForDecl :: DB m => V2.ReferenceId Db.ObjectId -> Decl2S -> m () --- createDependencyIndexForDecl tmRef@(V2.ReferenceId selfId _i) decl = --- traverse_ (Db.addDependencyToIndex tmRef) --- . toList --- . DD.declDependencies --- $ DD.rmapDecl (fmap $ fromMaybe selfId) decl - -saveTermComponent :: DB m => V1 Hash -> V2 Hash -> V2DiskTermComponent -> m Db.ObjectId -saveTermComponent h1 h2 component = do - h1Id <- Db.saveHashHash (runV1 h1) - h2Id <- Db.saveHashHash (runV2 h2) - o <- Db.saveObject h2Id V2.OT.TermComponent blob - Db.saveHashObject h1Id o 1 - Db.saveHashObject h2Id o 2 - pure o - where - blob = S.putBytes S.V2.putTermFormat (V2.TermFormat.Term component) - -saveDeclComponent :: DB m => V1 Hash -> V2 Hash -> V2DiskDeclComponent -> m Db.ObjectId -saveDeclComponent h component = error "todo" -- do - -- -- o <- Db.saveObject h V2.DeclComponent blob - -- -- Db.saveHashObject h o 2 - -- -- pure o - -- -- where - -- -- blob = S.putBytes (S.V1.putFoldable (V2.putDecl putObjectId putV putA)) component - --- savePatch :: DB m => Db.HashId -> Patch2S -> m () --- savePatch h p = do --- o <- Db.saveObject h V2.Patch (S.putBytes V2.putEdits p) --- Db.saveHashObject h o 2 - --- -- saveBranch :: DB m => Db.HashId -> - --- | Loads a dir with format /base32-encoded-reference.id... --- into a map from Hash to component references -componentMapForDir :: forall m. MonadIO m => FilePath -> m (Map (V1 Hash) [V1.Reference.Id]) -componentMapForDir root = listDirectory root <&> foldl' insert mempty - where - insert m filename = case V1.componentIdFromString filename of - Nothing -> m -- skip silently - Just r@(V1.Reference.Id h _i _n) -> - Map.unionWith (<>) m (Map.singleton (V1 h) [r]) - -existsObjectWithHash :: DB m => Hash -> m Bool -existsObjectWithHash = Db.objectExistsWithHash . Hash.toBase32Hex - -convertABT :: forall f v a f' v' a'. Ord v' => (f (V1.ABT.Term f v a) -> f' (V2.ABT.Term f' v' a')) -> (v -> v') -> (a -> a') -> V1.ABT.Term f v a -> V2.ABT.Term f' v' a' -convertABT ff fv fa = goTerm - where - goTerm :: V1.ABT.Term f v a -> V2.ABT.Term f' v' a' - goTerm (V1.ABT.Term vs a out) = V2.ABT.Term (Set.map fv vs) (fa a) (goABT out) - goABT :: V1.ABT.ABT f v (V1.ABT.Term f v a) -> V2.ABT.ABT f' v' (V2.ABT.Term f' v' a') - goABT = \case - V1.ABT.Var v -> V2.ABT.Var (fv v) - V1.ABT.Cycle t -> V2.ABT.Cycle (goTerm t) - V1.ABT.Abs v t -> V2.ABT.Abs (fv v) (goTerm t) - V1.ABT.Tm ft -> V2.ABT.Tm (ff ft) - -convertABT0 :: Functor f => V1.ABT.Term f v a -> V2.ABT.Term f v a -convertABT0 (V1.ABT.Term vs a out) = V2.ABT.Term vs a (goABT out) where - goABT = \case - V1.ABT.Var v -> V2.ABT.Var v - V1.ABT.Cycle t -> V2.ABT.Cycle (convertABT0 t) - V1.ABT.Abs v t -> V2.ABT.Abs v (convertABT0 t) - V1.ABT.Tm ft -> V2.ABT.Tm (convertABT0 <$> ft) - -convertType1to2 :: (V1.Reference.Reference -> r) -> V1.Type.F a -> V2.Type.F' r a -convertType1to2 fr = \case - V1.Type.Ref r -> V2.Type.Ref (fr r) - V1.Type.Arrow i o -> V2.Type.Arrow i o - V1.Type.Ann a k -> V2.Type.Ann a (convertKind k) - V1.Type.App f x -> V2.Type.App f x - V1.Type.Effect e b -> V2.Type.Effect e b - V1.Type.Effects as -> V2.Type.Effects as - V1.Type.Forall a -> V2.Type.Forall a - V1.Type.IntroOuter a -> V2.Type.IntroOuter a - -convertSymbol :: V1.Symbol.Symbol -> V2.Symbol.Symbol -convertSymbol (V1.Symbol.Symbol id name) = V2.Symbol.Symbol id name - -convertKind :: V1.Kind.Kind -> V2.Kind.Kind -convertKind = \case - V1.Kind.Star -> V2.Kind.Star - V1.Kind.Arrow i o -> V2.Kind.Arrow (convertKind i) (convertKind o) - -type LocalIdState = - (Map Text V2.TermFormat.LocalTextId, Map (V2 Hash) V2.TermFormat.LocalDefnId) - -rewriteType :: - (V2.Reference.Reference -> State.State LocalIdState V2.TermFormat.TypeRef) -> - V2HashTypeOfTerm -> - State LocalIdState V2.TermFormat.Type -rewriteType doRef = V2.ABT.transformM go - where - go :: V2.Type.FT k -> State LocalIdState (V2.TermFormat.FT k) - go = \case - V2.Type.Ref r -> (V2.Type.Ref <$> doRef r) - V2.Type.Arrow l r -> pure $ V2.Type.Arrow l r - V2.Type.Ann a kind -> pure $ V2.Type.Ann a kind - V2.Type.Effect e b -> pure $ V2.Type.Effect e b - V2.Type.Effects es -> pure $ V2.Type.Effects es - V2.Type.Forall a -> pure $ V2.Type.Forall a - V2.Type.IntroOuter a -> pure $ V2.Type.IntroOuter a - --- | rewrite Vars and Tms 🙃 -mapTermToVar :: - (Foldable f, Functor f, Ord v2) => - (v -> v2) -> - (a -> f (V2.ABT.Term f v a) -> Maybe (V2.ABT.Term f v2 a)) -> - V2.ABT.Term f v a -> - V2.ABT.Term f v2 a -mapTermToVar fv ft t@(V2.ABT.Term _ a abt) = case abt of - V2.ABT.Var v -> V2.ABT.var a (fv v) - V2.ABT.Cycle body -> V2.ABT.cycle a (mapTermToVar fv ft body) - V2.ABT.Abs x e -> V2.ABT.abs a (fv x) (mapTermToVar fv ft e) - V2.ABT.Tm body -> - case ft a body of - Nothing -> V2.ABT.tm a (mapTermToVar fv ft `fmap` body) - Just t' -> t' - -mapVarToTerm :: - (Foldable f, Functor f, Ord v2) => - (v -> v2) -> - (v -> Either (f (V2.ABT.Term f v2 a)) v2) -> - V2.ABT.Term f v a -> - V2.ABT.Term f v2 a -mapVarToTerm fAbs fVar t@(V2.ABT.Term _ a abt) = case abt of - V2.ABT.Var v -> case fVar v of - Left tm -> V2.ABT.tm a tm - Right v2 -> V2.ABT.var a v2 - V2.ABT.Cycle body -> V2.ABT.cycle a (mapVarToTerm fAbs fVar body) - V2.ABT.Abs x e -> V2.ABT.abs a (fAbs x) (mapVarToTerm fAbs fVar e) - V2.ABT.Tm body -> V2.ABT.tm a (mapVarToTerm fAbs fVar <$> body) - --- | Given a V1 term component, convert and save it to the V2 codebase --- Pre-requisite: all hash-identified entities in the V1 component have --- already been converted and added to the V2 codebase, apart from self- --- references. -convertTerm1 :: DB m => (V1 Hash -> V2 Hash) -> (V2 Hash -> Db.ObjectId) -> (Text -> Db.TextId) -> V1 Hash -> [(V1Term, V1Type)] -> m () -convertTerm1 lookup1 lookup2 lookupText hash1 v1component = do - -- construct v2 term component for hashing - let - buildTermType2H :: (V1 Hash -> V2 Hash) -> V1Type -> V2HashTypeOfTerm - buildTermType2H lookup - = V2.ABT.transform (convertType1to2 goRef) - . V2.ABT.vmap convertSymbol - . convertABT0 - where - goRef = \case - V1.Reference.Builtin t -> V2.Reference.ReferenceBuiltin t - V1.Reference.Derived h i _n -> - V2.Reference.ReferenceDerived - (V2.Reference.Id (runV2 . lookup $ V1 h) i) - buildTerm2H :: (V1 Hash -> V2 Hash) -> V1 Hash -> V1Term -> V2HashTerm - buildTerm2H lookup self = goTerm - where - goTerm = convertABT goTermF convertSymbol (const ()) - goTermF :: V1.Term.F V1.Symbol.Symbol () V1Term -> V2.Term.F V2.Symbol.Symbol V2HashTerm - lookupTermLink = \case - V1.Referent.Ref r -> V2.Referent.Ref (lookupTerm r) - V1.Referent.Con r i _ct -> V2.Referent.Con (lookupType r) (fromIntegral i) - lookupTerm = \case - V1.Reference.Builtin t -> V2.Reference.ReferenceBuiltin t - V1.Reference.Derived h i _n -> - let h' = if V1 h == self then - Nothing - else Just . runV2 . lookup $ V1 h - in V2.Reference.ReferenceDerived (V2.Reference.Id h' i) - lookupType = \case - V1.Reference.Builtin t -> V2.Reference.ReferenceBuiltin t - V1.Reference.Derived h i _n -> - V2.Reference.ReferenceDerived - (V2.Reference.Id (runV2 . lookup $ V1 h) i) - goTermF = \case - V1.Term.Int i -> V2.Term.Int i - V1.Term.Nat n -> V2.Term.Nat n - V1.Term.Float f -> V2.Term.Float f - V1.Term.Boolean b -> V2.Term.Boolean b - V1.Term.Text t -> V2.Term.Text t - V1.Term.Char c -> V2.Term.Char c - V1.Term.Ref r -> V2.Term.Ref (lookupTerm r) - V1.Term.Constructor r i -> - V2.Term.Constructor (lookupType r) (fromIntegral i) - V1.Term.Request r i -> - V2.Term.Request (lookupType r) (fromIntegral i) - V1.Term.Handle b h -> V2.Term.Handle (goTerm b) (goTerm h) - V1.Term.App f a -> V2.Term.App (goTerm f) (goTerm a) - V1.Term.Ann e t -> V2.Term.Ann (goTerm e) (buildTermType2H lookup t) - V1.Term.Sequence as -> V2.Term.Sequence (goTerm <$> as) - V1.Term.If c t f -> V2.Term.If (goTerm c) (goTerm t) (goTerm f) - V1.Term.And a b -> V2.Term.And (goTerm a) (goTerm b) - V1.Term.Or a b -> V2.Term.Or (goTerm a) (goTerm b) - V1.Term.Lam a -> V2.Term.Lam (goTerm a) - V1.Term.LetRec _ bs body -> V2.Term.LetRec (goTerm <$> bs) (goTerm body) - V1.Term.Let _ b body -> V2.Term.Let (goTerm b) (goTerm body) - V1.Term.Match e cases -> V2.Term.Match (goTerm e) (goCase <$> cases) - V1.Term.TermLink r -> V2.Term.TermLink (lookupTermLink r) - V1.Term.TypeLink r -> V2.Term.TypeLink (lookupType r) - goCase (V1.Term.MatchCase p g b) = - V2.Term.MatchCase (goPat p) (goTerm <$> g) (goTerm b) - goPat = \case - V1.Pattern.Unbound -> V2.Term.PUnbound - V1.Pattern.Var -> V2.Term.PVar - V1.Pattern.Boolean b -> V2.Term.PBoolean b - V1.Pattern.Int i -> V2.Term.PInt i - V1.Pattern.Nat n -> V2.Term.PNat n - V1.Pattern.Float d -> V2.Term.PFloat d - V1.Pattern.Text t -> V2.Term.PText t - V1.Pattern.Char c -> V2.Term.PChar c - V1.Pattern.Constructor r i ps -> - V2.Term.PConstructor (lookupType r) i (goPat <$> ps) - V1.Pattern.As p -> V2.Term.PAs (goPat p) - V1.Pattern.EffectPure p -> V2.Term.PEffectPure (goPat p) - V1.Pattern.EffectBind r i ps k -> - V2.Term.PEffectBind (lookupType r) i (goPat <$> ps) (goPat k) - V1.Pattern.SequenceLiteral ps -> V2.Term.PSequenceLiteral (goPat <$> ps) - V1.Pattern.SequenceOp p op p2 -> - V2.Term.PSequenceOp (goPat p) (goSeqOp op) (goPat p2) - goSeqOp = \case - V1.Pattern.Cons -> V2.Term.PCons - V1.Pattern.Snoc -> V2.Term.PSnoc - V1.Pattern.Concat -> V2.Term.PConcat - buildTermComponent2S :: - (V2 Hash -> Db.ObjectId) -> V2 Hash -> V2HashTermComponent -> V2DiskTermComponent - buildTermComponent2S getId h0 terms = - let rewrittenTerms :: [(V2.TermFormat.Term, LocalIdState)] = - map (flip State.runState mempty . rewriteTerm) terms - rewriteTerm :: V2HashTerm -> State.State LocalIdState V2.TermFormat.Term - rewriteTerm = V2.ABT.transformM go - where - doText :: Text -> State.State LocalIdState V2.TermFormat.LocalTextId - doText t = do - (textMap, objectMap) <- State.get - case Map.lookup t textMap of - Nothing -> do - let id = - V2.TermFormat.LocalTextId - . fromIntegral - $ Map.size textMap - State.put (Map.insert t id textMap, objectMap) - pure id - Just id -> pure id - doHash :: Hash -> State.State LocalIdState V2.TermFormat.LocalDefnId - doHash (V2 -> h) = do - (textMap, objectMap) <- State.get - case Map.lookup h objectMap of - Nothing -> do - let id = - V2.TermFormat.LocalDefnId - . fromIntegral - $ Map.size objectMap - State.put (textMap, Map.insert h id objectMap) - pure id - Just id -> pure id - doRecRef :: V2.Reference.Reference' Text (Maybe Hash) -> State.State LocalIdState V2.TermFormat.TermRef - doRecRef = \case - V2.Reference.ReferenceBuiltin t -> - V2.Reference.ReferenceBuiltin <$> doText t - V2.Reference.ReferenceDerived r -> - V2.Reference.ReferenceDerived <$> case r of - V2.Reference.Id h i -> V2.Reference.Id <$> traverse doHash h <*> pure i - doRef :: V2.Reference.Reference -> State.State LocalIdState V2.TermFormat.TypeRef - doRef = \case - V2.Reference.ReferenceBuiltin t -> - V2.Reference.ReferenceBuiltin <$> doText t - V2.Reference.ReferenceDerived (V2.Reference.Id h i) -> - V2.Reference.ReferenceDerived - <$> (V2.Reference.Id <$> doHash h <*> pure i) - go :: V2.Term.F V2.Symbol.Symbol k -> State LocalIdState (V2.TermFormat.F k) - go = \case - V2.Term.Int i -> pure $ V2.Term.Int i - V2.Term.Nat n -> pure $ V2.Term.Nat n - V2.Term.Float d -> pure $ V2.Term.Float d - V2.Term.Boolean b -> pure $ V2.Term.Boolean b - V2.Term.Text t -> V2.Term.Text <$> doText t - V2.Term.Char c -> pure $ V2.Term.Char c - V2.Term.Ref r -> V2.Term.Ref <$> doRecRef r - V2.Term.Constructor r cid -> - V2.Term.Constructor <$> doRef r <*> pure cid - V2.Term.Request r cid -> V2.Term.Request <$> doRef r <*> pure cid - V2.Term.Handle e h -> pure $ V2.Term.Handle e h - V2.Term.App f a -> pure $ V2.Term.App f a - V2.Term.Ann e typ -> V2.Term.Ann e <$> rewriteType doRef typ - mapToVec :: Ord i => (a -> b) -> Map a i -> Vector b - mapToVec f = Vector.fromList . map (f . fst) . List.sortOn snd . Map.toList - stateToIds :: LocalIdState -> V2.LocalIds.LocalIds - stateToIds (t, o) = - V2.LocalIds.LocalIds (mapToVec lookupText t) (mapToVec lookup2 o) - -- state : (Map Text Int, Map Hash Int) - -- Term.app Nat.+ 7 #8sf73g - -- ["Nat.+"] [#8sf73g] - -- [lookupText "Nat.+"] [lookup #8sf73g] - -- Term.app (Builtin 0) 7 (Hash 0) - in V2.TermFormat.LocallyIndexedComponent - . Vector.fromList - . fmap swap - . fmap (second stateToIds) - $ rewrittenTerms - refToVarTerm :: - Ord v => - V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) v a -> - V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) (Either (V1 Int) v) a - refToVarTerm = mapTermToVar Right \a body -> case body of - V2.Term.Ref (V2.Reference.ReferenceDerived (V2.Reference.Id Nothing i)) -> - Just $ V2.ABT.var a (Left (V1 (fromIntegral i))) - _ -> Nothing - varToRefTerm :: - (Show v, Ord v) => - Map (V1 Int) (V2 Int) -> - V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) (Either (V1 Int) v) a -> - V2.ABT.Term (V2.Term.F' text (V2.Reference.Reference' t (Maybe h)) typeRef termLink typeLink vt) v a - varToRefTerm lookup = mapVarToTerm fromLeft $ mapLeft \(V1 i) -> - V2.Term.Ref (V2.Reference.Derived Nothing (fromIntegral i)) - where - fromLeft :: Show a => Either a b -> b - fromLeft = flip either id \r -> - error ("encountered a reference pseudovar " ++ show r ++ " in ABT.Abs") - rehashComponent :: (V1 Hash -> V2 Hash) -> V1 Hash -> [(V1Term, V1Type)] -> (V2 Hash, [V2HashTypeOfTerm], V2HashTermComponent) - rehashComponent lookup1 hash1 (unzip -> (v1terms, v1types)) = - let fromLeft = either id (\x -> error $ "impossibly " ++ show x) - in let indexVars = Left . V1 <$> [0 ..] - -- create a [(v, V1Term)] - namedTerms1 :: [(Either (V1 Int) V2.Symbol.Symbol, V1Term)] - namedTerms1 = zip indexVars v1terms - -- convert [(v, V1Term)] to [(v, V2Term)] - namedTerms2 :: [(Either (V1 Int) V2.Symbol.Symbol, V2HashTerm)] - namedTerms2 = fmap (second (buildTerm2H lookup1 hash1)) namedTerms1 - -- convert the previous to a map - namedTermMap :: Map (Either (V1 Int) V2.Symbol.Symbol) V2HashTerm - namedTermMap = Map.fromList namedTerms2 - -- convert the Map v V2Term to (hash, [v, V2Term]) where the list - -- has a new canonical ordering - hash2 :: V2 Hash - v1Index :: [V1 Int] - -- (h, ([2, 0, 1], [t2, t0, t1]) - (hash2, unzip -> (fmap fromLeft -> v1Index, v2Terms)) = - V2.ABT.hashComponent (refToVarTerm <$> namedTermMap) - - -- a mapping from the v1 canonical order to v2 canonical order - -- Q: Why do you need a map from V1 to V2 Ints? - -- A: the `v`s embed the component index of a self-reference, - -- - indexMap :: Map (V1 Int) (V2 Int) - indexMap = Map.fromList (zip v1Index (V2 <$> [0 :: Int ..])) - - -- convert the V1TypeOfTerm to V2TypeOfTerm, - -- and permute their order according to indexMap - convertedTypes, permutedTypes :: [V2HashTypeOfTerm] - convertedTypes = map (buildTermType2H lookup1) v1types - -- the first element of v1Index is the V1 index of the first V2 element - permutedTypes = map (((!!) convertedTypes) . runV1) v1Index - -- - in (hash2, permutedTypes, varToRefTerm indexMap <$> v2Terms) - - hash2 :: V2 Hash - v2types :: [V2HashTypeOfTerm] - v2hashComponent :: V2HashTermComponent - (hash2, v2types, v2hashComponent) = rehashComponent lookup1 hash1 v1component - -- construct v2 term component for serializing - v2diskComponent :: V2DiskTermComponent = - buildTermComponent2S lookup2 hash2 v2hashComponent - - -- serialize the v2 term component - componentObjectId :: Db.ObjectId <- saveTermComponent hash1 hash2 v2diskComponent - - -- construct v2 types for each component element, and save the types to the - -- to the indices - for_ (zip [0 ..] v2types) $ \(i, type2) -> do - let r = V2.Reference.Id componentObjectId i - let rt = V2.Referent.RefId r - - saveTypeBlobForTerm r (buildTermType2S lookupText lookup2 type2) - createTypeSearchIndicesForReferent rt type2 - createDependencyIndexForTerm r v2diskComponent - -convertDecl1 :: DB m => (V1 Hash -> V2 Hash) -> (V2 Hash -> Db.ObjectId) -> V1 Hash -> [V1Decl] -> m () -convertDecl1 lookup1 lookup2 hash1 v1component = do - let -- convert constructor type (similar to buildTermType2H) - - -- v2ctorTypes :: [V2TypeOfConstructor] = error "todo" - - -- -- rehash and reorder component - -- hash2 :: V2 Hash - -- v2hashComponent :: V2HashDeclComponent - -- (hash2, v2hashComponent) = rehashComponent lookup1 hash1 v1component - -- where - -- -- take a look at existing DataDeclaration.hashDecls - - -- -- |1. for each decl in a component, convert it to the new abt/functor - -- -- and swap out all its V1 Hashes for V2 Hashes, using `Nothing` for - -- -- a self-reference hash. - -- -- 2. lift the vars so that self-references are Left i - -- -- and local vars are Right Symbol - -- -- 3. call ABT.hashComponent to get a new hash and a new canonical ordering - -- -- 4. unlift the vars back, rewrite them to reflect the new canonical ordering - -- rehashComponent :: (V1 Hash -> V2 Hash) -> V1 Hash -> [V1Decl] -> (V2 Hash, V2HashDeclComponent) - -- rehashComponent = error "todo" - - -- convert decl component - -- v2diskComponent :: V2DiskDeclComponent = error "todo" - - -- serialize the v2 decl component - -- componentObjectId :: Db.ObjectId <- saveDeclComponent hash1 hash2 v2diskComponent - - error "todo: create type indices for each decl in the component" - --- let v2componentI :: [Decl2I] = --- map (buildDecl2I hash2) v2hashComponent - --- for_ (zip v2componentI [0..]) $ \(decl2, i) -> do --- let r = V2.ReferenceId componentObjectId i - --- for_ (zip --- (DD.constructorTypes (DD.asDataDecl decl2)) --- [0..]) $ \(type2, j) -> do --- let rt = V2.ReferentIdCon r j --- saveTypeBlobForReferent rt type2 --- createTypeSearchIndicesForReferent rt type1 -- type1 because `find` uses Hashes - --- createDependencyIndexForDecl r decl2 - --- convertCausalBranch1 :: --- DB m => --- (V1 Hash -> Db.ObjectId) -> --- (V1 Hash -> (V2 Hash, Db.CausalHashId)) -> --- -- -> V1 Hash --- V1.Causal.Raw V1.Branch.Raw V1.Branch.Raw -> --- m () --- convertCausalBranch1 lookupObject lookupCausal causalBranch1 = error "todo" -- do --- -- let branch1Hash = V1.currentHash causalBranch1 --- -- rawBranch2 :: RawBranch = convertBranch1 (V1.rawHead causalBranch1) - --- -- -- branch2Id <- Db.saveObject branch1Hash --- -- branch2Hash :: V2 Hash = H.hash rawBranch2 --- -- lookupObject <- pure () --- -- -- rawCausal2 :: RawCausal = convertCausal1 --- -- -- rawBranch2S --- -- -- rawCausal2 :: RawCausal <- convertCausal1 lookup rawBranch2 (V1.rawTails causalBranch1) - --- -- -- rawBranch2S --- -- -- saveBranch2 rawBranch2 --- -- -- saveCausal2 rawCausal2 --- -- error "todo" --- -- -- Due to switching reference types, and reference's hash's having previously --- -- -- incorporated the extra `len` field, definitions and patches will not end up --- -- -- having the same hashes as before. :-\ --- -- -- This means we have to hash them again and perhaps store old and new hashes --- -- -- separately. --- -- where --- -- indexBranch2S :: RawBranch -> RawBranch2S --- -- indexBranch2S b = RawBranch --- -- (Map.fromList --- -- [((ns, over Db.referent2Traversal (lookupObject . V1) r), --- -- Set.map (over Db.reference2Traversal (lookupObject . V1)) mds) --- -- |((ns, r), mds) <- Map.toList (terms b)]) --- -- (Map.fromList --- -- [((ns, over Db.reference2Traversal (lookupObject . V1) r), --- -- Set.map (over Db.reference2Traversal (lookupObject . V1)) mds) --- -- |((ns, r), mds) <- Map.toList (types b)]) --- -- (Map.fromList []) --- -- (Map.fromList []) --- -- -- <$> tms <*> tps <*> pchs <*> chn where --- -- -- tms = Map.fromList <$> traverse indexTerm (Map.toList (terms b)) --- -- -- indexTerm :: DB m --- -- -- => ((NameSegment, Db.Referent2 Hash), Set (V2.Reference Hash)) --- -- -- -> m ((NameSegment, Db.Referent2 Db.ObjectId), Set (V2.Reference Db.ObjectId)) --- -- -- indexTerm ((ns, r), mds) = (,) <$> k <*> v where --- -- -- k = (ns, over Db.referentTraversal lookupObject r) --- -- -- v = Set.map - --- -- convertBranch1 :: V1.Branch.Raw -> RawBranch --- -- convertBranch1 b = RawBranch --- -- -- terms --- -- (Map.fromList --- -- [ ((ns, over Db.referentTraversal id r), mdSet) --- -- | (r, ns) <- Relation.toList . Star3.d1 $ V1.Branch._termsR b --- -- , let mdSet :: Set (V2.Reference Hash) --- -- mdSet = Set.fromList --- -- . fmap (over Db.referenceTraversal id . snd) --- -- . Set.toList --- -- . Relation.lookupDom r --- -- . Star3.d3 --- -- $ V1.Branch._termsR b --- -- ]) --- -- -- types --- -- (Map.fromList --- -- [ ((ns, over Db.referenceTraversal id r), mdSet) --- -- | (r, ns) <- Relation.toList . Star3.d1 $ V1.Branch._typesR b --- -- , let mdSet :: Set (V2.Reference Hash) --- -- mdSet = Set.fromList --- -- . fmap (over Db.referenceTraversal id . snd) --- -- . Set.toList --- -- . Relation.lookupDom r --- -- . Star3.d3 --- -- $ V1.Branch._typesR b --- -- ]) --- -- -- patches --- -- (V1.Branch._editsR b) --- -- -- children --- -- (runV2 . fst . lookupCausal . V1 . V1.unRawHash <$> V1.Branch._childrenR b) - --- voidTermAnnotations :: --- V1.TermR tmRef tpRef tmLink tpLink (V1.TypeR tpRef vt at) blankRepr ap v a -> --- V1.TermR tmRef tpRef tmLink tpLink (V1.TypeR tpRef vt ()) Void () v () --- voidTermAnnotations = --- void . Term.extraMap id id id id void undefined (const ()) - --- ----- graveyard --- ---- |True if `h` (just the hash!) is interned in the DB --- --knownHash :: DB m => Hash -> m Bool --- --knownHash h = anyExists $ Db.query sql [Base32Hex.fromHash h] where --- -- sql = [here| SELECT 1 FROM hash WHERE base32 = ? |] - --- --saveReference :: DB m => ReferenceH h -> m Db.ReferenceId --- --saveReference r = insert r >> fmap fromJust (loadReference r) where --- -- insert = \case --- -- Reference.Builtin t -> execute sql (Just t, Nothing) --- -- Reference.DerivedId idH -> do --- -- rdId <- saveReferenceDerived idH --- -- Db.execute sql (Nothing, Just rdId) --- -- sql = [here| --- -- INSERT INTO reference (builtin, reference_derived_id) --- -- VALUES (?, ?) --- -- ON CONFLICT DO NOTHING --- -- |] - --- --loadReferenceByHashId :: DB m => ReferenceH HashId -> m (Maybe ReferenceId) --- --loadReferenceByHashId = \case --- -- Reference.Builtin t -> queryMaybe sqlb (Only t) --- -- Reference.DerivedId idH -> --- -- loadReferenceDerivedByHashId idH >>= \case --- -- Nothing -> pure Nothing --- -- Just rdId -> queryMaybe sqld (Only rdId) --- -- where --- -- sqlb = [here| SELECT id FROM reference WHERE builtin = ? |] --- -- sqld = [here| SELECT id FROM reference WHERE reference_derived_id = ? |] - --- --saveReferenceDerived :: DB m => Reference.Id -> m Db.ReferenceDerivedId --- --saveReferenceDerived r@(Reference.Id h i _n) = do --- -- hashId <- saveHashByteString h --- -- saveReferenceDerivedByHashId (Reference.IdH hashId i _n) --- -- --- --saveReferenceDerivedByHashId :: DB m => Reference.IdH Db.HashId -> m Db.ReferenceDerivedId --- --saveReferenceDerivedByHashId r@(Reference.IdH hashId i _n) = --- -- insert hashId i >> fmap fromJust (loadReferenceDerivedByHashId r) where --- -- insert h i = liftIO $ execute sql (h, i) where --- -- sql = [here| --- -- INSERT INTO reference_derived (hash_id, component_index) --- -- VALUES (?, ?) --- -- ON CONFLICT DO NOTHING --- -- |] --- -- --- --loadReferenceDerivedByHashId :: DB m => Reference.IdH Db.HashId -> m (Maybe Db.ReferenceDerivedId) --- --loadReferenceDerivedByHashId (Reference.IdH h i _n) = --- -- queryMaybe sql (h, i) where --- -- sql = [here| --- -- SELECT id FROM reference_derived --- -- WHERE hash_id = ? AND component_index = ? --- -- |] - --- --saveReferentDerived :: DB m => Referent.Id -> m ReferentDerivedId --- --saveReferentDerived = error "todo" --- --loadReferentDerived :: DB m => Referent.Id -> m (Maybe ReferentDerivedId) --- --loadReferentDerived = error "todo" --- -- --- --saveReferentDerivedByReferenceDerivedId :: DB m => Referent' ReferenceDerivedId -> m ReferentDerivedId --- --saveReferentDerivedByReferenceDerivedId r = do --- -- liftIO $ execute sql r --- -- fmap fromJust (loadReferenceDerivedByReferenceDerivedId r) --- -- where --- -- sql = [here| --- -- INSERT INTO referent_derived --- -- (reference_derived_id, constructor_id, constructor_type) --- -- VALUES (?, ?, ?) --- -- ON CONFLICT DO NOTHING --- -- |]s --- --loadReferentDerivedByReferenceDerivedId :: DB m => Referent' ReferenceDerivedId -> m (Maybe ReferentDerivedId) --- --loadReferentDerivedByReferenceDerivedId r = queryMaybe . query sql r where --- -- sql = [here| --- -- SELECT id FROM referent_derived --- -- WHERE reference_derived_id = ? --- -- AND constructor_id = ? --- -- AND constructor_type = ? --- -- |] - --- buildTerm2H :: (V1 Hash -> V2 Hash) -> V1 Hash -> Term -> Term2 Hash --- buildTerm2H lookup hash1 = --- voidTermAnnotations . Term.rmap --- (over Db.referenceTraversal (fmap runV2 . lookupTerm . V1)) --- (over Db.referenceTraversal (runV2 . lookupType . V1)) --- ( over Db.referent2ConTraversal (runV2 . lookupType . V1) --- . over Db.referentRefTraversal (fmap runV2 . lookupTerm . V1) --- ) --- where --- lookupTerm :: V1 Hash -> Maybe (V2 Hash) --- lookupTerm h | h == hash1 = Nothing --- lookupTerm h = Just (lookup h) --- lookupType :: V1 Hash -> V2 Hash --- lookupType = lookup - --- buildTerm2S :: (V1 Hash -> Db.ObjectId) -> V1 Hash -> Term -> Term2 Db.ObjectId --- buildTerm2S lookup hash1 = --- voidTermAnnotations . Term.rmap --- (over Db.referenceTraversal (lookupTerm . V1)) --- (over Db.referenceTraversal (lookupType . V1)) --- ( over Db.referent2ConTraversal (lookupType . V1) --- . over Db.referentRefTraversal (lookupTerm . V1) --- ) --- where --- lookupTerm :: V1 Hash -> Maybe Db.ObjectId --- lookupTerm h | h == hash1 = Nothing --- lookupTerm h = Just (lookup h) --- lookupType :: V1 Hash -> Db.ObjectId --- lookupType = lookup - -buildTermType2S :: (Text -> Db.TextId) -> (V2 Hash -> Db.ObjectId) -> V2HashTypeOfTerm -> V2DiskTypeOfTerm -buildTermType2S lookupText lookup2 = V2.Type.rmap - (over V2.Reference.t lookupText . over V2.Reference.h (lookup2 . V2)) - --- buildDecl2H :: (V1 Hash -> V2 Hash) -> V1 Hash -> Decl -> Decl2 Hash --- buildDecl2H lookup = --- void . DD.rmapDecl (over Db.referenceTraversal (fmap runV2 . lookup' . V1)) --- where --- lookup' :: V1 Hash -> Maybe (V2 hash) --- lookup' h | h == hash1 = Nothing --- lookup' h = Just (lookup h) - --- buildDecl2I :: V2 Hash -> Decl2 Hash -> Decl2I --- buildDecl2I self = --- DD.rmapDecl (over Db.reference2Traversal (fmap runV2 . fromMaybe self . V2)) diff --git a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal b/codebase-convert-1to2/unison-codebase-convert-1to2.cabal deleted file mode 100644 index 214941be7e..0000000000 --- a/codebase-convert-1to2/unison-codebase-convert-1to2.cabal +++ /dev/null @@ -1,67 +0,0 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ - -name: unison-codebase-convert-1to2 -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development - -executable uconvert12 - -- import: unison-common - main-is: Main.hs - hs-source-dirs: app - ghc-options: -Wall -threaded -fno-warn-name-shadowing -fno-warn-missing-pattern-synonym-signatures -rtsopts -with-rtsopts=-I0 -optP-Wno-nonportable-include-path - -- other-modules: - build-depends: - base, - -- containers, - -- configurator, - -- directory, - -- errors, - -- filepath, - lens, - -- megaparsec, - -- safe, - -- shellmet, - -- template-haskell, - -- temporary, - -- text, - unison-codebase-convert-1to2 - -library - hs-source-dirs: lib - exposed-modules: - U.Codebase.Convert.SyncV1V2 - U.Codebase.Convert.TermUtil - U.Codebase.Convert.TypeUtil - -- other-modules: - -- other-extensions: - build-depends: - base, - bytes, - bytestring, - containers, - extra, - here, - lens, - mtl, - safe, - text, - sqlite-simple, - unliftio, - vector, - unison-core, - unison-codebase1, - unison-codebase, - unison-codebase-sqlite, - unison-util, - unison-util-serialization, - unison-util-term - default-language: Haskell2010 diff --git a/codebase1/codebase/Unison/Codebase/V1/ABT.hs b/codebase1/codebase/Unison/Codebase/V1/ABT.hs deleted file mode 100644 index c8b7d39016..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/ABT.hs +++ /dev/null @@ -1,88 +0,0 @@ -{-# LANGUAGE LambdaCase #-} --- Based on: http://semantic-domain.blogspot.com/2015/03/abstract-binding-trees.html -{-# LANGUAGE DeriveFoldable #-} -{-# LANGUAGE DeriveTraversable #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE Rank2Types #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Codebase.V1.ABT where - -import Data.Maybe (fromMaybe) -import qualified Data.Foldable as Foldable -import qualified Data.Set as Set -import Data.Set (Set) -import Prelude hiding (abs, cycle) --- import U.Util.Hashable (Accumulate, Hashable1) --- import qualified Data.Map as Map --- import qualified U.Util.Hashable as Hashable --- import Data.Functor (void) - -data ABT f v r - = Var v - | Cycle r - | Abs v r - | Tm (f r) - deriving (Functor, Foldable, Traversable) - --- | At each level in the tree, we store the set of free variables and --- a value of type `a`. Variables are of type `v`. -data Term f v a = Term {freeVars :: Set v, annotation :: a, out :: ABT f v (Term f v a)} - --- | A class for variables. --- --- * `Set.notMember (freshIn vs v) vs`: --- `freshIn` returns a variable not used in the `Set` -class Ord v => Var v where - freshIn :: Set v -> v -> v - -extraMap :: Functor g => (forall k . f k -> g k) -> Term f v a -> Term g v a -extraMap p (Term fvs a sub) = Term fvs a (go p sub) where - go :: Functor g => (forall k . f k -> g k) -> ABT f v (Term f v a) -> ABT g v (Term g v a) - go p = \case - Var v -> Var v - Cycle r -> Cycle (extraMap p r) - Abs v r -> Abs v (extraMap p r) - Tm x -> Tm (fmap (extraMap p) (p x)) - -var :: a -> v -> Term f v a -var a v = Term (Set.singleton v) a (Var v) - -abs :: Ord v => a -> v -> Term f v a -> Term f v a -abs a v body = Term (Set.delete v (freeVars body)) a (Abs v body) - -tm :: (Foldable f, Ord v) => a -> f (Term f v a) -> Term f v a -tm a t = Term (Set.unions (fmap freeVars (Foldable.toList t))) a (Tm t) - -cycle :: a -> Term f v a -> Term f v a -cycle a t = Term (freeVars t) a (Cycle t) - --- | `visit f t` applies an effectful function to each subtree of --- `t` and sequences the results. When `f` returns `Nothing`, `visit` --- descends into the children of the current subtree. When `f` returns --- `Just t2`, `visit` replaces the current subtree with `t2`. Thus: --- `visit (const Nothing) t == pure t` and --- `visit (const (Just (pure t2))) t == pure t2` -visit :: - (Traversable f, Applicative g, Ord v) => - (Term f v a -> Maybe (g (Term f v a))) -> - Term f v a -> - g (Term f v a) -visit f t = flip fromMaybe (f t) $ case out t of - Var _ -> pure t - Cycle body -> cycle (annotation t) <$> visit f body - Abs x e -> abs (annotation t) x <$> visit f e - Tm body -> tm (annotation t) <$> traverse (visit f) body - --- | Apply an effectful function to an ABT tree top down, sequencing the results. -visit' :: - (Traversable f, Applicative g, Monad g, Ord v) => - (f (Term f v a) -> g (f (Term f v a))) -> - Term f v a -> - g (Term f v a) -visit' f t = case out t of - Var _ -> pure t - Cycle body -> cycle (annotation t) <$> visit' f body - Abs x e -> abs (annotation t) x <$> visit' f e - Tm body -> f body >>= (fmap (tm (annotation t)) . traverse (visit' f)) diff --git a/codebase1/codebase/Unison/Codebase/V1/Branch/NameSegment.hs b/codebase1/codebase/Unison/Codebase/V1/Branch/NameSegment.hs deleted file mode 100644 index 82d895c248..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Branch/NameSegment.hs +++ /dev/null @@ -1,13 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Codebase.V1.Branch.NameSegment where - -import qualified Data.Text as Text -import Data.String (IsString(..)) -import Data.Text (Text) - --- Represents the parts of a name between the `.`s -newtype NameSegment = NameSegment {toText :: Text} deriving (Show, Eq, Ord) - -instance IsString NameSegment where - fromString = NameSegment . Text.pack diff --git a/codebase1/codebase/Unison/Codebase/V1/Branch/Raw.hs b/codebase1/codebase/Unison/Codebase/V1/Branch/Raw.hs deleted file mode 100644 index ed75abf149..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Branch/Raw.hs +++ /dev/null @@ -1,34 +0,0 @@ -{-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE ViewPatterns #-} -{-# LANGUAGE RankNTypes #-} - -module Unison.Codebase.V1.Branch.Raw where - -import Data.Map (Map) -import qualified U.Util.Hash as Hash -import Unison.Codebase.V1.Star3 -import Unison.Codebase.V1.Reference -import Unison.Codebase.V1.Referent -import Unison.Codebase.V1.Branch.NameSegment (NameSegment) - -type MetadataType = Reference -type MetadataValue = Reference - --- `a` is generally the type of references or hashes --- `n` is generally the the type of name associated with the references --- `Type` is the type of metadata. Duplicate info to speed up certain queries. --- `(Type, Value)` is the metadata value itself along with its type. -type Star r n = Star3 r n MetadataType (MetadataType, MetadataValue) - -newtype EditHash = EditHash Hash.Hash deriving (Eq, Ord, Show) -newtype BranchHash = BranchHash Hash.Hash deriving (Eq, Ord, Show) - --- The raw Branch -data Raw = Raw - { terms :: Star Referent NameSegment - , types :: Star Reference NameSegment - , children :: Map NameSegment BranchHash - , edits :: Map NameSegment EditHash - } \ No newline at end of file diff --git a/codebase1/codebase/Unison/Codebase/V1/Causal/Raw.hs b/codebase1/codebase/Unison/Codebase/V1/Causal/Raw.hs deleted file mode 100644 index 397bd7b9dc..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Causal/Raw.hs +++ /dev/null @@ -1,26 +0,0 @@ -{-# LANGUAGE DerivingVia #-} -{-# LANGUAGE RankNTypes #-} -module Unison.Codebase.V1.Causal.Raw where - -import U.Util.Hashable (Hashable) -import U.Util.Hash (Hash) -import Data.Set (Set) -import Data.Foldable (Foldable(toList)) - -newtype RawHash h = RawHash { unRawHash :: Hash } - deriving (Eq, Ord, Hashable) via Hash - -data Raw h e - = RawOne e - | RawCons e (RawHash h) - | RawMerge e (Set (RawHash h)) - -rawHead :: Raw h e -> e -rawHead (RawOne e ) = e -rawHead (RawCons e _) = e -rawHead (RawMerge e _) = e - -rawTails :: Raw h e -> [RawHash h] -rawTails (RawOne _) = [] -rawTails (RawCons _ h) = [h] -rawTails (RawMerge _ hs) = toList hs diff --git a/codebase1/codebase/Unison/Codebase/V1/ConstructorType.hs b/codebase1/codebase/Unison/Codebase/V1/ConstructorType.hs deleted file mode 100644 index 21a2b4b2ae..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/ConstructorType.hs +++ /dev/null @@ -1,8 +0,0 @@ -module Unison.Codebase.V1.ConstructorType where - -import U.Util.Hashable (Hashable, Token(Tag), tokens) - -data ConstructorType = Data | Effect deriving (Eq, Ord, Show, Enum) - -instance Hashable ConstructorType where - tokens b = [Tag . fromIntegral $ fromEnum b] diff --git a/codebase1/codebase/Unison/Codebase/V1/DataDeclaration.hs b/codebase1/codebase/Unison/Codebase/V1/DataDeclaration.hs deleted file mode 100644 index 13ee496050..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/DataDeclaration.hs +++ /dev/null @@ -1,72 +0,0 @@ -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE DeriveAnyClass #-} -{-# Language DeriveFoldable #-} -{-# Language DeriveTraversable #-} -{-# Language OverloadedStrings #-} -{-# Language PatternSynonyms #-} -{-# Language ViewPatterns #-} - -module Unison.Codebase.V1.DataDeclaration where - - -import qualified Data.Set as Set -import Prelude hiding ( cycle ) -import Unison.Codebase.V1.Reference ( Reference ) -import Unison.Codebase.V1.Type ( Type ) -import qualified Unison.Codebase.V1.Type as Type -import qualified Unison.Codebase.V1.ConstructorType as CT -import Data.Text (Text) -import Data.Set (Set) - -type Decl v a = Either (EffectDeclaration v a) (DataDeclaration v a) - -data DeclOrBuiltin v a = - Builtin CT.ConstructorType | Decl (Decl v a) - -asDataDecl :: Decl v a -> DataDeclaration v a -asDataDecl = either toDataDecl id - -declDependencies :: Ord v => Decl v a -> Set Reference -declDependencies = either (dependencies . toDataDecl) dependencies - -constructorType :: Decl v a -> CT.ConstructorType -constructorType = \case - Left{} -> CT.Effect - Right{} -> CT.Data - -data Modifier = Structural | Unique Text - deriving (Eq, Ord, Show) - -data DataDeclaration v a = DataDeclaration { - modifier :: Modifier, - annotation :: a, - bound :: [v], - constructors' :: [(a, v, Type v a)] -} - -newtype EffectDeclaration v a = EffectDeclaration { - toDataDecl :: DataDeclaration v a -} - -withEffectDecl - :: (DataDeclaration v a -> DataDeclaration v' a') - -> (EffectDeclaration v a -> EffectDeclaration v' a') -withEffectDecl f e = EffectDeclaration (f . toDataDecl $ e) - -withEffectDeclM :: Functor f - => (DataDeclaration v a -> f (DataDeclaration v' a')) - -> EffectDeclaration v a - -> f (EffectDeclaration v' a') -withEffectDeclM f = fmap EffectDeclaration . f . toDataDecl - -constructors :: DataDeclaration v a -> [(v, Type v a)] -constructors (DataDeclaration _ _ _ ctors) = [(v,t) | (_,v,t) <- ctors ] - -constructorTypes :: DataDeclaration v a -> [Type v a] -constructorTypes = (snd <$>) . constructors - -dependencies :: Ord v => DataDeclaration v a -> Set Reference -dependencies dd = - Set.unions (Type.dependencies <$> constructorTypes dd) diff --git a/codebase1/codebase/Unison/Codebase/V1/FileCodebase.hs b/codebase1/codebase/Unison/Codebase/V1/FileCodebase.hs deleted file mode 100644 index 4430943869..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/FileCodebase.hs +++ /dev/null @@ -1,261 +0,0 @@ -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Codebase.V1.FileCodebase where - -import Control.Error (ExceptT (..), runExceptT) -import Control.Monad.Catch (catch) -import Control.Monad.Extra (ifM) -import Data.Either.Extra (maybeToEither) -import Data.Functor ((<&>)) -import qualified Data.Text as Text -import System.Directory (getHomeDirectory) -import qualified System.Directory -import System.FilePath (()) -import qualified U.Util.Base32Hex as Base32Hex -import qualified U.Util.Hash as Hash -import qualified Unison.Codebase.V1.Branch.Raw as Branch -import Unison.Codebase.V1.Branch.Raw (BranchHash (..), EditHash (..)) -import qualified Unison.Codebase.V1.Causal.Raw as Causal -import qualified Unison.Codebase.V1.DataDeclaration as DD -import Unison.Codebase.V1.Patch (Patch (..)) -import Unison.Codebase.V1.Reference (Reference) -import qualified Unison.Codebase.V1.Reference as Reference -import qualified Unison.Codebase.V1.Serialization.Serialization as S -import qualified Unison.Codebase.V1.Serialization.V1 as V1 -import qualified Unison.Codebase.V1.Term (Term) -import qualified Unison.Codebase.V1.Type (Type) -import UnliftIO (MonadIO) -import UnliftIO (IOException) -import UnliftIO (MonadIO (liftIO)) -import UnliftIO.Directory (createDirectoryIfMissing, doesDirectoryExist, doesFileExist) -import Data.Text (Text) -import Data.Maybe (fromMaybe) -import Data.Char (isDigit) -import Unison.Codebase.V1.Symbol (Symbol) - -type CodebasePath = FilePath -type Term = Unison.Codebase.V1.Term.Term Symbol () -type Type = Unison.Codebase.V1.Type.Type Symbol () - -data WatchKind = RegularWatch | TestWatch deriving (Eq, Ord, Show) - -getCodebaseDir :: Maybe FilePath -> IO FilePath -getCodebaseDir = maybe getHomeDirectory pure - -data Err - = InvalidBranchFile FilePath String - | InvalidEditsFile FilePath String - | NoBranchHead FilePath - | CantParseBranchHead FilePath - | AmbiguouslyTypeAndTerm Reference.Id - | UnknownTypeOrTerm Reference - deriving (Show) - -codebasePath :: FilePath -codebasePath = ".unison" "v1" - -termsDir, typesDir, branchesDir, branchHeadDir, editsDir :: CodebasePath -> FilePath -termsDir root = root codebasePath "terms" -typesDir root = root codebasePath "types" -branchesDir root = root codebasePath "paths" -branchHeadDir root = branchesDir root "_head" -editsDir root = root codebasePath "patches" - -termDir, declDir :: CodebasePath -> Reference.Id -> FilePath -termDir root r = termsDir root componentIdToString r -declDir root r = typesDir root componentIdToString r - -watchesDir :: CodebasePath -> WatchKind -> FilePath -watchesDir root k = - root codebasePath "watches" case k of - RegularWatch -> "_cache" - TestWatch -> "test" - -watchPath :: CodebasePath -> WatchKind -> Reference.Id -> FilePath -watchPath root kind id = - watchesDir root kind componentIdToString id <> ".ub" - -termPath, typePath, declPath :: CodebasePath -> Reference.Id -> FilePath -termPath path r = termDir path r "compiled.ub" -typePath path r = termDir path r "type.ub" -declPath path r = declDir path r "compiled.ub" - -branchPath :: CodebasePath -> BranchHash -> FilePath -branchPath root (BranchHash h) = branchesDir root hashToString h ++ ".ub" - -editsPath :: CodebasePath -> EditHash -> FilePath -editsPath root (EditHash h) = editsDir root hashToString h ++ ".up" - -reflogPath :: CodebasePath -> FilePath -reflogPath root = root codebasePath "reflog" - --- checks if `path` looks like a unison codebase -minimalCodebaseStructure :: CodebasePath -> [FilePath] -minimalCodebaseStructure root = [branchHeadDir root] - --- checks if a minimal codebase structure exists at `path` -codebaseExists :: MonadIO m => CodebasePath -> m Bool -codebaseExists root = - and <$> traverse doesDirectoryExist (minimalCodebaseStructure root) - -deserializeEdits :: MonadIO m => CodebasePath -> Branch.EditHash -> m Patch -deserializeEdits root h = - let file = editsPath root h - in S.getFromFile' V1.getEdits file >>= \case - Left err -> failWith $ InvalidEditsFile file err - Right edits -> pure edits - -data GetRootBranchError - = NoRootBranch - | CouldntParseRootBranch FilePath - | CouldntLoadRootBranch BranchHash - | ConflictedRootBranch [FilePath] - deriving (Show) - -getRootBranch :: - forall m. - MonadIO m => - CodebasePath -> - m (Either GetRootBranchError (Causal.Raw Branch.Raw Branch.Raw)) -getRootBranch root = - ifM - (codebaseExists root) - (listDirectory (branchHeadDir root) >>= filesToBranch) - (pure $ Left NoRootBranch) - where - filesToBranch :: [FilePath] -> m (Either GetRootBranchError (Causal.Raw Branch.Raw Branch.Raw)) - filesToBranch = \case - [] -> pure $ Left NoRootBranch - [single] -> runExceptT $ fileToBranch single - conflict -> pure $ Left $ ConflictedRootBranch conflict - fileToBranch :: FilePath -> ExceptT GetRootBranchError m (Causal.Raw Branch.Raw Branch.Raw) - fileToBranch single = ExceptT $ case hashFromString single of - Nothing -> pure . Left $ CouldntParseRootBranch single - Just (BranchHash -> h) -> - branchFromFiles root h - <&> maybeToEither (CouldntLoadRootBranch h) - branchFromFiles :: MonadIO m => CodebasePath -> BranchHash -> m (Maybe (Causal.Raw Branch.Raw Branch.Raw)) - branchFromFiles rootDir h = do - fileExists <- doesFileExist (branchPath rootDir h) - if fileExists - then Just <$> deserializeRawBranch rootDir h - else pure Nothing - where - deserializeRawBranch :: - MonadIO m => CodebasePath -> BranchHash -> m (Causal.Raw Branch.Raw Branch.Raw) - deserializeRawBranch root h = do - let ubf = branchPath root h - S.getFromFile' (V1.getCausal0 V1.getRawBranch) ubf >>= \case - Left err -> failWith $ InvalidBranchFile ubf err - Right c0 -> pure c0 - --- here -hashFromString :: String -> Maybe Hash.Hash -hashFromString = fmap (Hash.fromBase32Hex . Base32Hex.fromByteString) . Base32Hex.textToByteString . Text.pack - --- here -hashToString :: Hash.Hash -> String -hashToString = Text.unpack . Base32Hex.toText . Hash.toBase32Hex - --- hashFromFilePath :: FilePath -> Maybe Hash.Hash --- hashFromFilePath = hashFromString . takeBaseName - --- here -componentIdToString :: Reference.Id -> String -componentIdToString = Text.unpack . Reference.toText . Reference.DerivedId - --- here -componentIdFromString :: String -> Maybe Reference.Id -componentIdFromString = idFromText . Text.pack where - idFromText :: Text.Text -> Maybe Reference.Id - idFromText s = case fromText s of - Left _ -> Nothing - Right (Reference.Builtin _) -> Nothing - Right (Reference.DerivedId id) -> pure id - --- examples: --- `##Text.take` — builtins don’t have cycles --- `#2tWjVAuc7` — derived, no cycle --- `#y9ycWkiC1.y9` — derived, part of cycle --- todo: take a (Reference -> CycleSize) so that `readSuffix` doesn't have to parse the size from the text. -fromText :: Text -> Either String Reference -fromText t = case Text.split (=='#') t of - [_, "", b] -> Right (Reference.Builtin b) - [_, h] -> case Text.split (=='.') h of - [hash] -> Right (derivedBase32Hex hash 0 1) - [hash, suffix] -> uncurry (derivedBase32Hex hash) <$> readSuffix suffix - _ -> bail - _ -> bail - where - bail = Left $ "couldn't parse a Reference from " <> Text.unpack t - derivedBase32Hex :: Text -> Reference.Pos -> Reference.Size -> Reference - derivedBase32Hex b32Hex i n = Reference.DerivedId (Reference.Id (fromMaybe msg h) i n) - where - msg = error $ "Reference.derivedBase32Hex " <> show h - h = Hash.fromBase32Hex <$> Base32Hex.fromText b32Hex - readSuffix :: Text -> Either String (Reference.Pos, Reference.Size) - readSuffix t = case Text.breakOn "c" t of - (pos, Text.drop 1 -> size) | Text.all isDigit pos && Text.all isDigit size -> - Right (read (Text.unpack pos), read (Text.unpack size)) - _ -> Left "suffix decoding error" - - --- here --- referentFromString :: String -> Maybe Referent --- referentFromString = Referent.fromText . Text.pack - --- referentIdFromString :: String -> Maybe Referent.Id --- referentIdFromString s = referentFromString s >>= \case --- Referent.Ref (Reference.DerivedId r) -> Just $ Referent.Ref' r --- Referent.Con (Reference.DerivedId r) i t -> Just $ Referent.Con' r i t --- _ -> Nothing - --- here --- referentToString :: Referent -> String --- referentToString = Text.unpack . Referent.toText - -getTerm :: MonadIO m => CodebasePath -> Reference.Id -> m (Maybe Term) -getTerm path h = S.getFromFile V1.getTerm (termPath path h) - -getTypeOfTerm :: MonadIO m => CodebasePath -> Reference.Id -> m (Maybe Type) -getTypeOfTerm path h = S.getFromFile V1.getType (typePath path h) - -getDecl :: - MonadIO m => - CodebasePath -> - Reference.Id -> - m (Maybe (DD.Decl Symbol ())) -getDecl root h = - S.getFromFile - (V1.getEither V1.getEffectDeclaration V1.getDataDeclaration) - (declPath root h) - -getWatch :: - MonadIO m => - CodebasePath -> - WatchKind -> - Reference.Id -> - m (Maybe Term) -getWatch path k id = do - let wp = watchesDir path k - createDirectoryIfMissing True wp - S.getFromFile V1.getTerm (watchPath path k id) - -failWith :: MonadIO m => Err -> m a -failWith = liftIO . fail . show - --- | A version of listDirectory that returns mempty if the directory doesn't exist -listDirectory :: MonadIO m => FilePath -> m [FilePath] -listDirectory dir = - liftIO $ - System.Directory.listDirectory dir `catch` (\(_ :: IOException) -> pure mempty) - --- -- | delete all the elements of a given reference component from a set --- deleteComponent :: Reference.Id -> Set Reference -> Set Reference --- deleteComponent r rs = Set.difference rs --- (Reference.members . Reference.componentFor . Reference.DerivedId $ r) diff --git a/codebase1/codebase/Unison/Codebase/V1/LabeledDependency.hs b/codebase1/codebase/Unison/Codebase/V1/LabeledDependency.hs deleted file mode 100644 index f9ae793d52..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/LabeledDependency.hs +++ /dev/null @@ -1,58 +0,0 @@ -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Codebase.V1.LabeledDependency - ( derivedTerm - , derivedType - , termRef - , typeRef - , referent - , dataConstructor - , effectConstructor - , fold - , referents - , toReference - , LabeledDependency - , partition - ) where - -import Unison.Codebase.V1.ConstructorType (ConstructorType(Data, Effect)) -import Unison.Codebase.V1.Reference (Reference(DerivedId), Id) -import Unison.Codebase.V1.Referent (Referent, pattern Ref, pattern Con, Referent'(Ref', Con')) -import qualified Data.Set as Set -import Data.Set (Set) -import Data.Foldable (Foldable(toList)) -import Data.Either (partitionEithers) - --- dumb constructor name is private -newtype LabeledDependency = X (Either Reference Referent) deriving (Eq, Ord, Show) - -derivedType, derivedTerm :: Id -> LabeledDependency -typeRef, termRef :: Reference -> LabeledDependency -referent :: Referent -> LabeledDependency -dataConstructor :: Reference -> Int -> LabeledDependency -effectConstructor :: Reference -> Int -> LabeledDependency - -derivedType = X . Left . DerivedId -derivedTerm = X . Right . Ref . DerivedId -typeRef = X . Left -termRef = X . Right . Ref -referent = X . Right -dataConstructor r cid = X . Right $ Con r cid Data -effectConstructor r cid = X . Right $ Con r cid Effect - -referents :: Foldable f => f Referent -> Set LabeledDependency -referents rs = Set.fromList (map referent $ toList rs) - -fold :: (Reference -> a) -> (Referent -> a) -> LabeledDependency -> a -fold f g (X e) = either f g e - -partition :: Foldable t => t LabeledDependency -> ([Reference], [Referent]) -partition = partitionEithers . map (\(X e) -> e) . toList - --- | Left TypeRef | Right TermRef -toReference :: LabeledDependency -> Either Reference Reference -toReference = \case - X (Left r) -> Left r - X (Right (Ref' r)) -> Right r - X (Right (Con' r _ _)) -> Left r diff --git a/codebase1/codebase/Unison/Codebase/V1/Patch.hs b/codebase1/codebase/Unison/Codebase/V1/Patch.hs deleted file mode 100644 index 92e7f5495f..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Patch.hs +++ /dev/null @@ -1,60 +0,0 @@ -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE RecordWildCards #-} - -module Unison.Codebase.V1.Patch where - -import Control.Lens hiding (children, cons, transform) -import Data.Foldable (Foldable (toList)) -import qualified Data.Set as Set -import Data.Set (Set) -import qualified U.Util.Relation as R -import U.Util.Relation (Relation) -import qualified Unison.Codebase.V1.LabeledDependency as LD -import Unison.Codebase.V1.LabeledDependency (LabeledDependency) -import Unison.Codebase.V1.Patch.TermEdit (TermEdit) -import qualified Unison.Codebase.V1.Patch.TermEdit as TermEdit -import Unison.Codebase.V1.Patch.TypeEdit (TypeEdit) -import qualified Unison.Codebase.V1.Patch.TypeEdit as TypeEdit -import Unison.Codebase.V1.Reference (Reference) - -data Patch = Patch - { _termEdits :: Relation Reference TermEdit, - _typeEdits :: Relation Reference TypeEdit - } - deriving (Eq, Ord, Show) - -makeLenses ''Patch - -labeledDependencies :: Patch -> Set LabeledDependency -labeledDependencies Patch {..} = - Set.map LD.termRef (R.dom _termEdits) - <> Set.fromList - (fmap LD.termRef $ TermEdit.references =<< toList (R.ran _termEdits)) - <> Set.map LD.typeRef (R.dom _typeEdits) - <> Set.fromList - (fmap LD.typeRef $ TypeEdit.references =<< toList (R.ran _typeEdits)) - -allReferences :: Patch -> Set Reference -allReferences p = typeReferences p <> termReferences p - where - typeReferences p = - Set.fromList - [ r | (old, TypeEdit.Replace new) <- R.toList (_typeEdits p), r <- [old, new] - ] - termReferences p = - Set.fromList - [ r | (old, TermEdit.Replace new _) <- R.toList (_termEdits p), r <- [old, new] - ] - --- | Returns the set of references which are the target of an arrow in the patch -allReferenceTargets :: Patch -> Set Reference -allReferenceTargets p = typeReferences p <> termReferences p - where - typeReferences p = - Set.fromList - [new | (_, TypeEdit.Replace new) <- R.toList (_typeEdits p)] - termReferences p = - Set.fromList - [new | (_, TermEdit.Replace new _) <- R.toList (_termEdits p)] diff --git a/codebase1/codebase/Unison/Codebase/V1/Patch/TermEdit.hs b/codebase1/codebase/Unison/Codebase/V1/Patch/TermEdit.hs deleted file mode 100644 index f3955ab09b..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Patch/TermEdit.hs +++ /dev/null @@ -1,32 +0,0 @@ -module Unison.Codebase.V1.Patch.TermEdit where - -import Unison.Codebase.V1.Reference (Reference) - -data TermEdit = Replace Reference Typing | Deprecate - deriving (Eq, Ord, Show) - -references :: TermEdit -> [Reference] -references (Replace r _) = [r] -references Deprecate = [] - --- Replacements with the Same type can be automatically propagated. --- Replacements with a Subtype can be automatically propagated but may result in dependents getting more general types, so requires re-inference. --- Replacements of a Different type need to be manually propagated by the programmer. -data Typing = Same | Subtype | Different - deriving (Eq, Ord, Show) - -toReference :: TermEdit -> Maybe Reference -toReference (Replace r _) = Just r -toReference Deprecate = Nothing - -isTypePreserving :: TermEdit -> Bool -isTypePreserving e = case e of - Replace _ Same -> True - Replace _ Subtype -> True - _ -> False - -isSame :: TermEdit -> Bool -isSame e = case e of - Replace _ Same -> True - _ -> False - diff --git a/codebase1/codebase/Unison/Codebase/V1/Patch/TypeEdit.hs b/codebase1/codebase/Unison/Codebase/V1/Patch/TypeEdit.hs deleted file mode 100644 index afb911cd8d..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Patch/TypeEdit.hs +++ /dev/null @@ -1,14 +0,0 @@ -module Unison.Codebase.V1.Patch.TypeEdit where - -import Unison.Codebase.V1.Reference (Reference) - -data TypeEdit = Replace Reference | Deprecate - deriving (Eq, Ord, Show) - -references :: TypeEdit -> [Reference] -references (Replace r) = [r] -references Deprecate = [] - -toReference :: TypeEdit -> Maybe Reference -toReference (Replace r) = Just r -toReference Deprecate = Nothing diff --git a/codebase1/codebase/Unison/Codebase/V1/Reference.hs b/codebase1/codebase/Unison/Codebase/V1/Reference.hs deleted file mode 100644 index 500d69e93f..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Reference.hs +++ /dev/null @@ -1,63 +0,0 @@ -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Codebase.V1.Reference where - -import Data.Char (isDigit) -import qualified Data.Text as Text -import Data.Word (Word64) -import Data.Text (Text) -import U.Util.Base32Hex (Base32Hex) -import qualified U.Util.Hash as H -import qualified U.Util.Base32Hex as Base32Hex -import qualified Data.Set as Set -import Data.Set (Set) - -data Reference - = Builtin Text.Text - | -- `Derived` can be part of a strongly connected component. - -- The `Pos` refers to a particular element of the component - -- and the `Size` is the number of elements in the component. - -- Using an ugly name so no one tempted to use this - DerivedId Id - deriving (Eq, Ord, Show) - -pattern Derived :: H.Hash -> Pos -> Size -> Reference -pattern Derived h i n = DerivedId (Id h i n) - -{-# COMPLETE Builtin, Derived #-} - -type Pos = Word64 -type Size = Word64 - --- todo: don't read or return size; must also update showSuffix and fromText -data Id = Id H.Hash Pos Size deriving (Eq, Ord, Show) - -readSuffix :: Text -> Either String (Pos, Size) -readSuffix t = case Text.breakOn "c" t of - (pos, Text.drop 1 -> size) - | Text.all isDigit pos && Text.all isDigit size -> - Right (read (Text.unpack pos), read (Text.unpack size)) - _ -> Left "suffix decoding error" - -toText :: Reference -> Text -toText (Builtin b) = "##" <> b -toText (DerivedId (Id h i n)) = - "#" <> (Base32Hex.toText . H.toBase32Hex) h - <> "." - <> (Text.pack . show) i - <> "c" - <> (Text.pack . show) n - -newtype Component = Component {members :: Set Reference} - --- Gives the component (dependency cycle) that the reference is a part of -componentFor :: Reference -> Component -componentFor b@(Builtin _) = Component (Set.singleton b) -componentFor (DerivedId (Id h _ n)) = - Component (Set.fromList [DerivedId (Id h i n) | i <- take (fromIntegral n) [0 ..]]) - -derivedBase32Hex :: Base32Hex -> Pos -> Size -> Reference -derivedBase32Hex h i n = DerivedId (Id (H.fromBase32Hex h) i n) diff --git a/codebase1/codebase/Unison/Codebase/V1/Referent.hs b/codebase1/codebase/Unison/Codebase/V1/Referent.hs deleted file mode 100644 index 07c338790a..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Referent.hs +++ /dev/null @@ -1,54 +0,0 @@ -{-# LANGUAGE DeriveFunctor #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Codebase.V1.Referent where - -import Unison.Codebase.V1.Reference (Reference) -import qualified Unison.Codebase.V1.Reference as R - -import Unison.Codebase.V1.ConstructorType (ConstructorType) -import Data.Word (Word64) - --- Slightly odd naming. This is the "referent of term name in the codebase", --- rather than the target of a Reference. -type Referent = Referent' Reference -pattern Ref :: Reference -> Referent -pattern Ref r = Ref' r -pattern Con :: Reference -> Int -> ConstructorType -> Referent -pattern Con r i t = Con' r i t -{-# COMPLETE Ref, Con #-} - -type Id = Referent' R.Id - -data Referent' r = Ref' r | Con' r Int ConstructorType - deriving (Show, Ord, Eq, Functor) - -type Pos = Word64 -type Size = Word64 - -isConstructor :: Referent -> Bool -isConstructor Con{} = True -isConstructor _ = False - -toTermReference :: Referent -> Maybe Reference -toTermReference = \case - Ref r -> Just r - _ -> Nothing - -toReference :: Referent -> Reference -toReference = toReference' - -toReference' :: Referent' r -> r -toReference' = \case - Ref' r -> r - Con' r _i _t -> r - -fromId :: Id -> Referent -fromId = fmap R.DerivedId - -toTypeReference :: Referent -> Maybe Reference -toTypeReference = \case - Con r _i _t -> Just r - _ -> Nothing diff --git a/codebase1/codebase/Unison/Codebase/V1/Serialization/Serialization.hs b/codebase1/codebase/Unison/Codebase/V1/Serialization/Serialization.hs deleted file mode 100644 index f2a8f7a19a..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Serialization/Serialization.hs +++ /dev/null @@ -1,43 +0,0 @@ -{-# LANGUAGE RankNTypes #-} - -module Unison.Codebase.V1.Serialization.Serialization where - -import Data.Bytes.Get (MonadGet, runGetS) -import Data.Bytes.Put (MonadPut, runPutS) -import Data.ByteString (ByteString, readFile, writeFile) -import UnliftIO.Directory (doesFileExist, createDirectoryIfMissing) -import System.FilePath (takeDirectory) -import Prelude hiding (readFile, writeFile) -import UnliftIO (MonadIO, liftIO) - -type Get a = forall m . MonadGet m => m a -type Put a = forall m . MonadPut m => a -> m () - --- todo: do we use this? -data Format a = Format { - get :: Get a, - put :: Put a -} - -getFromBytes :: Get a -> ByteString -> Maybe a -getFromBytes getA bytes = - case runGetS getA bytes of Left _ -> Nothing; Right a -> Just a - -getFromFile :: MonadIO m => Get a -> FilePath -> m (Maybe a) -getFromFile getA file = do - b <- doesFileExist file - if b then getFromBytes getA <$> liftIO (readFile file) else pure Nothing - -getFromFile' :: MonadIO m => Get a -> FilePath -> m (Either String a) -getFromFile' getA file = do - b <- doesFileExist file - if b then runGetS getA <$> liftIO (readFile file) - else pure . Left $ "No such file: " ++ file - -putBytes :: Put a -> a -> ByteString -putBytes put a = runPutS (put a) - -putWithParentDirs :: MonadIO m => Put a -> FilePath -> a -> m () -putWithParentDirs putA file a = do - createDirectoryIfMissing True (takeDirectory file) - liftIO . writeFile file $ putBytes putA a diff --git a/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs b/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs deleted file mode 100644 index 224bb10495..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Serialization/V1.hs +++ /dev/null @@ -1,390 +0,0 @@ -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE Strict #-} -{-# LANGUAGE RankNTypes #-} - -module Unison.Codebase.V1.Serialization.V1 where - -import Control.Applicative (Applicative (liftA2), liftA3) -import Control.Monad (replicateM) -import Data.Bits (Bits) -import qualified Data.ByteString as B -import Data.Bytes.Get -import Data.Bytes.Serial (deserialize, deserializeBE) -import Data.Bytes.Signed (Unsigned) -import Data.Bytes.VarInt (VarInt (..)) -import Data.Int (Int64) -import qualified Data.Map as Map -import Data.Map (Map) -import qualified Data.Sequence as Sequence -import qualified Data.Set as Set -import Data.Text (Text) -import Data.Text.Encoding (decodeUtf8) -import Data.Word (Word64) -import U.Util.Hash (Hash) -import qualified U.Util.Hash as Hash -import U.Util.Relation (Relation) -import qualified U.Util.Relation as Relation -import qualified Unison.Codebase.V1.ABT as ABT -import Unison.Codebase.V1.Branch.NameSegment as NameSegment -import qualified Unison.Codebase.V1.Branch.Raw as Branch -import Unison.Codebase.V1.Causal.Raw (Raw (..), RawHash (..)) -import qualified Unison.Codebase.V1.Causal.Raw as Causal -import qualified Unison.Codebase.V1.ConstructorType as CT -import qualified Unison.Codebase.V1.DataDeclaration as DataDeclaration -import Unison.Codebase.V1.DataDeclaration (DataDeclaration, EffectDeclaration) -import Unison.Codebase.V1.Patch (Patch (..)) -import Unison.Codebase.V1.Patch.TermEdit (TermEdit) -import qualified Unison.Codebase.V1.Patch.TermEdit as TermEdit -import Unison.Codebase.V1.Patch.TypeEdit (TypeEdit) -import qualified Unison.Codebase.V1.Patch.TypeEdit as TypeEdit -import Unison.Codebase.V1.Reference (Reference) -import qualified Unison.Codebase.V1.Reference as Reference -import Unison.Codebase.V1.Referent (Referent) -import qualified Unison.Codebase.V1.Referent as Referent -import Unison.Codebase.V1.Star3 (Star3) -import qualified Unison.Codebase.V1.Star3 as Star3 -import Unison.Codebase.V1.Symbol (Symbol (..)) -import Unison.Codebase.V1.Term (Term) -import qualified Unison.Codebase.V1.Term as Term -import qualified Unison.Codebase.V1.Term.Pattern as Pattern -import Unison.Codebase.V1.Term.Pattern (Pattern, SeqOp) -import qualified Unison.Codebase.V1.Type as Type -import Unison.Codebase.V1.Type (Type) -import Unison.Codebase.V1.Type.Kind (Kind) -import qualified Unison.Codebase.V1.Type.Kind as Kind -import Prelude hiding (getChar, putChar) - --- ABOUT THIS FORMAT: --- --- A serialization format for uncompiled Unison syntax trees. --- --- Finalized: No --- --- If Finalized: Yes, don't modify this file in a way that affects serialized form. --- Instead, create a new file, V(n + 1). --- This ensures that we have a well-defined serialized form and can read --- and write old versions. - -unknownTag :: (MonadGet m, Show a) => String -> a -> m x -unknownTag msg tag = - fail $ - "unknown tag " ++ show tag - ++ " while deserializing: " - ++ msg - -getCausal0 :: MonadGet m => m a -> m (Causal.Raw h a) -getCausal0 getA = getWord8 >>= \case - 0 -> RawOne <$> getA - 1 -> flip RawCons <$> (RawHash <$> getHash) <*> getA - 2 -> flip RawMerge . Set.fromList <$> getList (RawHash <$> getHash) <*> getA - x -> unknownTag "Causal0" x - -getLength :: - ( MonadGet m, - Integral n, - Integral (Unsigned n), - Bits n, - Bits (Unsigned n) - ) => - m n -getLength = unVarInt <$> deserialize - -getText :: MonadGet m => m Text -getText = do - len <- getLength - bs <- B.copy <$> getBytes len - pure $ decodeUtf8 bs - -getFloat :: MonadGet m => m Double -getFloat = deserializeBE - -getNat :: MonadGet m => m Word64 -getNat = getWord64be - -getInt :: MonadGet m => m Int64 -getInt = deserializeBE - -getBoolean :: MonadGet m => m Bool -getBoolean = go =<< getWord8 - where - go 0 = pure False - go 1 = pure True - go t = unknownTag "Boolean" t - -getHash :: MonadGet m => m Hash -getHash = do - len <- getLength - bs <- B.copy <$> getBytes len - pure $ Hash.fromBytes bs - -getReference :: MonadGet m => m Reference -getReference = do - tag <- getWord8 - case tag of - 0 -> Reference.Builtin <$> getText - 1 -> Reference.DerivedId <$> (Reference.Id <$> getHash <*> getLength <*> getLength) - _ -> unknownTag "Reference" tag - -getReferent :: MonadGet m => m Referent -getReferent = do - tag <- getWord8 - case tag of - 0 -> Referent.Ref <$> getReference - 1 -> Referent.Con <$> getReference <*> getLength <*> getConstructorType - _ -> unknownTag "getReferent" tag - -getConstructorType :: MonadGet m => m CT.ConstructorType -getConstructorType = getWord8 >>= \case - 0 -> pure CT.Data - 1 -> pure CT.Effect - t -> unknownTag "getConstructorType" t - -getMaybe :: MonadGet m => m a -> m (Maybe a) -getMaybe getA = getWord8 >>= \tag -> case tag of - 0 -> pure Nothing - 1 -> Just <$> getA - _ -> unknownTag "Maybe" tag - --- getFolded :: MonadGet m => (b -> a -> b) -> b -> m a -> m b --- getFolded f z a = --- foldl' f z <$> getList a - -getList :: MonadGet m => m a -> m [a] -getList a = getLength >>= (`replicateM` a) - -getABT :: - (MonadGet m, Foldable f, Functor f, Ord v) => - m v -> - m a -> - (forall x. m x -> m (f x)) -> - m (ABT.Term f v a) -getABT getVar getA getF = getList getVar >>= go [] - where - go env fvs = do - a <- getA - tag <- getWord8 - case tag of - 0 -> do - tag <- getWord8 - case tag of - 0 -> ABT.var a . (env !!) <$> getLength - 1 -> ABT.var a . (fvs !!) <$> getLength - _ -> unknownTag "getABT.Var" tag - 1 -> ABT.tm a <$> getF (go env fvs) - 2 -> do - v <- getVar - body <- go (v : env) fvs - pure $ ABT.abs a v body - 3 -> ABT.cycle a <$> go env fvs - _ -> unknownTag "getABT" tag - -getKind :: MonadGet m => m Kind -getKind = getWord8 >>= \tag -> case tag of - 0 -> pure Kind.Star - 1 -> Kind.Arrow <$> getKind <*> getKind - _ -> unknownTag "getKind" tag - -getType :: MonadGet m => m (Type Symbol ()) -getType = getType' getSymbol (pure ()) - -getType' :: - (MonadGet m, Ord v) => - m v -> - m a -> - m (Type v a) -getType' getVar getA = getABT getVar getA go - where - go getChild = getWord8 >>= \tag -> case tag of - 0 -> Type.Ref <$> getReference - 1 -> Type.Arrow <$> getChild <*> getChild - 2 -> Type.Ann <$> getChild <*> getKind - 3 -> Type.App <$> getChild <*> getChild - 4 -> Type.Effect <$> getChild <*> getChild - 5 -> Type.Effects <$> getList getChild - 6 -> Type.Forall <$> getChild - 7 -> Type.IntroOuter <$> getChild - _ -> unknownTag "getType" tag - -getSymbol :: MonadGet m => m Symbol -getSymbol = Symbol <$> getLength <*> getText - -getSeqOp :: MonadGet m => m SeqOp -getSeqOp = getWord8 >>= \case - 0 -> pure Pattern.Cons - 1 -> pure Pattern.Snoc - 2 -> pure Pattern.Concat - tag -> unknownTag "SeqOp" tag - -getPattern :: MonadGet m => m a -> m Pattern -getPattern getA = getWord8 >>= \tag -> case tag of - 0 -> Pattern.Unbound <$ getA - 1 -> Pattern.Var <$ getA - 2 -> Pattern.Boolean <$ getA <*> getBoolean - 3 -> Pattern.Int <$ getA <*> getInt - 4 -> Pattern.Nat <$ getA <*> getNat - 5 -> Pattern.Float <$ getA <*> getFloat - 6 -> - Pattern.Constructor <$ getA <*> getReference <*> getLength - <*> getList - (getPattern getA) - 7 -> Pattern.As <$ getA <*> getPattern getA - 8 -> Pattern.EffectPure <$ getA <*> getPattern getA - 9 -> - Pattern.EffectBind - <$ getA - <*> getReference - <*> getLength - <*> getList (getPattern getA) - <*> getPattern getA - 10 -> Pattern.SequenceLiteral <$ getA <*> getList (getPattern getA) - 11 -> - Pattern.SequenceOp - <$ getA - <*> getPattern getA - <*> getSeqOp - <*> getPattern getA - 12 -> Pattern.Text <$ getA <*> getText - 13 -> Pattern.Char <$ getA <*> getChar - _ -> unknownTag "Pattern" tag - -getTerm :: MonadGet m => m (Term Symbol ()) -getTerm = getTerm' getSymbol (pure ()) - -getTerm' :: - (MonadGet m, Ord v) => - m v -> - m a -> - m (Term v a) -getTerm' getVar getA = getABT getVar getA go - where - go getChild = getWord8 >>= \tag -> case tag of - 0 -> Term.Int <$> getInt - 1 -> Term.Nat <$> getNat - 2 -> Term.Float <$> getFloat - 3 -> Term.Boolean <$> getBoolean - 4 -> Term.Text <$> getText - 5 -> Term.Ref <$> getReference - 6 -> Term.Constructor <$> getReference <*> getLength - 7 -> Term.Request <$> getReference <*> getLength - 8 -> Term.Handle <$> getChild <*> getChild - 9 -> Term.App <$> getChild <*> getChild - 10 -> Term.Ann <$> getChild <*> getType' getVar getA - 11 -> Term.List . Sequence.fromList <$> getList getChild - 12 -> Term.If <$> getChild <*> getChild <*> getChild - 13 -> Term.And <$> getChild <*> getChild - 14 -> Term.Or <$> getChild <*> getChild - 15 -> Term.Lam <$> getChild - 16 -> Term.LetRec False <$> getList getChild <*> getChild - 17 -> Term.Let False <$> getChild <*> getChild - 18 -> - Term.Match <$> getChild - <*> getList (Term.MatchCase <$> getPattern getA <*> getMaybe getChild <*> getChild) - 19 -> Term.Char <$> getChar - 20 -> Term.TermLink <$> getReferent - 21 -> Term.TypeLink <$> getReference - _ -> unknownTag "getTerm" tag - -getPair :: MonadGet m => m a -> m b -> m (a, b) -getPair = liftA2 (,) - -getTuple3 :: MonadGet m => m a -> m b -> m c -> m (a, b, c) -getTuple3 = liftA3 (,,) - -getRelation :: (MonadGet m, Ord a, Ord b) => m a -> m b -> m (Relation a b) -getRelation getA getB = Relation.fromList <$> getList (getPair getA getB) - -getMap :: (MonadGet m, Ord a) => m a -> m b -> m (Map a b) -getMap getA getB = Map.fromList <$> getList (getPair getA getB) - -getTermEdit :: MonadGet m => m TermEdit -getTermEdit = getWord8 >>= \case - 1 -> - TermEdit.Replace <$> getReference - <*> ( getWord8 >>= \case - 1 -> pure TermEdit.Same - 2 -> pure TermEdit.Subtype - 3 -> pure TermEdit.Different - t -> unknownTag "TermEdit.Replace" t - ) - 2 -> pure TermEdit.Deprecate - t -> unknownTag "TermEdit" t - -getTypeEdit :: MonadGet m => m TypeEdit -getTypeEdit = getWord8 >>= \case - 1 -> TypeEdit.Replace <$> getReference - 2 -> pure TypeEdit.Deprecate - t -> unknownTag "TypeEdit" t - -getStar3 :: - (MonadGet m, Ord fact, Ord d1, Ord d2, Ord d3) => - m fact -> - m d1 -> - m d2 -> - m d3 -> - m (Star3 fact d1 d2 d3) -getStar3 getF getD1 getD2 getD3 = - Star3.Star3 - <$> (Set.fromList <$> getList getF) - <*> getRelation getF getD1 - <*> getRelation getF getD2 - <*> getRelation getF getD3 - -getBranchStar :: (Ord a, Ord n, MonadGet m) => m a -> m n -> m (Branch.Star a n) -getBranchStar getA getN = getStar3 getA getN getMetadataType (getPair getMetadataType getMetadataValue) - -getChar :: MonadGet m => m Char -getChar = toEnum . unVarInt <$> deserialize - -getNameSegment :: MonadGet m => m NameSegment -getNameSegment = NameSegment <$> getText - -getMetadataType :: MonadGet m => m Branch.MetadataType -getMetadataType = getReference - -getMetadataValue :: MonadGet m => m Branch.MetadataValue -getMetadataValue = getReference - -getRawBranch :: MonadGet m => m Branch.Raw -getRawBranch = - Branch.Raw - <$> getBranchStar getReferent getNameSegment - <*> getBranchStar getReference getNameSegment - <*> getMap getNameSegment (Branch.BranchHash <$> getHash) - <*> getMap getNameSegment (Branch.EditHash <$> getHash) - -getDataDeclaration :: MonadGet m => m (DataDeclaration Symbol ()) -getDataDeclaration = getDataDeclaration' getSymbol (pure ()) - -getDataDeclaration' :: (MonadGet m, Ord v) => m v -> m a -> m (DataDeclaration v a) -getDataDeclaration' getV getA = - DataDeclaration.DataDeclaration - <$> getModifier - <*> getA - <*> getList getV - <*> getList (getTuple3 getA getV (getType' getV getA)) - -getModifier :: MonadGet m => m DataDeclaration.Modifier -getModifier = getWord8 >>= \case - 0 -> pure DataDeclaration.Structural - 1 -> DataDeclaration.Unique <$> getText - tag -> unknownTag "DataDeclaration.Modifier" tag - -getEffectDeclaration :: MonadGet m => m (EffectDeclaration Symbol ()) -getEffectDeclaration = - DataDeclaration.EffectDeclaration <$> getDataDeclaration - -getEffectDeclaration' :: (MonadGet m, Ord v) => m v -> m a -> m (EffectDeclaration v a) -getEffectDeclaration' getV getA = - DataDeclaration.EffectDeclaration <$> getDataDeclaration' getV getA - -getEither :: MonadGet m => m a -> m b -> m (Either a b) -getEither getL getR = getWord8 >>= \case - 0 -> Left <$> getL - 1 -> Right <$> getR - tag -> unknownTag "Either" tag - -getEdits :: MonadGet m => m Patch -getEdits = - Patch <$> getRelation getReference getTermEdit - <*> getRelation getReference getTypeEdit diff --git a/codebase1/codebase/Unison/Codebase/V1/Star3.hs b/codebase1/codebase/Unison/Codebase/V1/Star3.hs deleted file mode 100644 index 9b7a994033..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Star3.hs +++ /dev/null @@ -1,214 +0,0 @@ -{-# LANGUAGE RecordWildCards #-} - -module Unison.Codebase.V1.Star3 where - -import U.Util.Relation (Relation) -import qualified Data.Set as Set -import Data.Set (Set) -import Data.Foldable (Foldable(foldl')) -import qualified U.Util.Hashable as H -import qualified U.Util.Relation as R - --- Represents a set of (fact, d1, d2, d3), but indexed using a star schema so --- it can be efficiently queried from any of the dimensions. -data Star3 fact d1 d2 d3 - = Star3 { fact :: Set fact - , d1 :: Relation fact d1 - , d2 :: Relation fact d2 - , d3 :: Relation fact d3 } deriving (Eq,Ord,Show) - --- Produce the cross-product across all the dimensions -toList :: (Ord fact, Ord d1, Ord d2, Ord d3) - => Star3 fact d1 d2 d3 - -> [(fact, d1, d2, d3)] -toList s = [ (f, x, y, z) | f <- Set.toList (fact s) - , x <- Set.toList (R.lookupDom f (d1 s)) - , y <- Set.toList (R.lookupDom f (d2 s)) - , z <- Set.toList (R.lookupDom f (d3 s)) ] - --- `difference a b` contains only the facts from `a` that are absent from `b` --- or differ along any of the dimensions `d1..d3`. -difference - :: (Ord fact, Ord d1, Ord d2, Ord d3) - => Star3 fact d1 d2 d3 - -> Star3 fact d1 d2 d3 - -> Star3 fact d1 d2 d3 -difference a b = Star3 facts d1s d2s d3s - where - d1s = R.difference (d1 a) (d1 b) - d2s = R.difference (d2 a) (d2 b) - d3s = R.difference (d3 a) (d3 b) - facts = R.dom d1s <> R.dom d2s <> R.dom d3s - -d23s :: (Ord fact, Ord d2, Ord d3) - => Star3 fact d1 d2 d3 - -> [(fact, d2, d3)] -d23s s = [ (f, x, y) | f <- Set.toList (fact s) - , x <- Set.toList (R.lookupDom f (d2 s)) - , y <- Set.toList (R.lookupDom f (d3 s)) ] - -d23s' :: (Ord fact, Ord d2, Ord d3) - => Star3 fact d1 d2 d3 - -> [(d2, d3)] -d23s' s = [ (x, y) | f <- Set.toList (fact s) - , x <- Set.toList (R.lookupDom f (d2 s)) - , y <- Set.toList (R.lookupDom f (d3 s)) ] - -d12s :: (Ord fact, Ord d1, Ord d2) - => Star3 fact d1 d2 d3 - -> [(fact, d1, d2)] -d12s s = [ (f, x, y) | f <- Set.toList (fact s) - , x <- Set.toList (R.lookupDom f (d1 s)) - , y <- Set.toList (R.lookupDom f (d2 s)) ] - -d13s :: (Ord fact, Ord d1, Ord d3) - => Star3 fact d1 d2 d3 - -> [(fact, d1, d3)] -d13s s = [ (f, x, y) | f <- Set.toList (fact s) - , x <- Set.toList (R.lookupDom f (d1 s)) - , y <- Set.toList (R.lookupDom f (d3 s)) ] - -mapD1 :: (Ord fact, Ord d1, Ord d1a) => (d1 -> d1a) -> Star3 fact d1 d2 d3 -> Star3 fact d1a d2 d3 -mapD1 f s = s { d1 = R.mapRan f (d1 s) } - -mapD2 :: (Ord fact, Ord d2, Ord d2a) => (d2 -> d2a) -> Star3 fact d1 d2 d3 -> Star3 fact d1 d2a d3 -mapD2 f s = s { d2 = R.mapRan f (d2 s) } - -mapD3 :: (Ord fact, Ord d3, Ord d3a) => (d3 -> d3a) -> Star3 fact d1 d2 d3 -> Star3 fact d1 d2 d3a -mapD3 f s = s { d3 = R.mapRan f (d3 s) } - -fromList :: (Ord fact, Ord d1, Ord d2, Ord d3) - => [(fact, d1, d2, d3)] -> Star3 fact d1 d2 d3 -fromList = foldl' (flip insert) mempty - -selectFact - :: (Ord fact, Ord d1, Ord d2, Ord d3) - => Set fact - -> Star3 fact d1 d2 d3 - -> Star3 fact d1 d2 d3 -selectFact fs s = Star3 fact' d1' d2' d3' where - fact' = Set.intersection fs (fact s) - d1' = fs R.<| d1 s - d2' = fs R.<| d2 s - d3' = fs R.<| d3 s - -select1D3 - :: (Ord fact, Ord d1, Ord d2, Ord d3) - => d3 -> Star3 fact d1 d2 d3 -> Star3 fact d1 d2 d3 -select1D3 = selectD3 . Set.singleton - -selectD3 - :: (Ord fact, Ord d1, Ord d2, Ord d3) - => Set d3 - -> Star3 fact d1 d2 d3 - -> Star3 fact d1 d2 d3 -selectD3 d3s s = Star3 fact' d1' d2' d3' where - fact' = Set.intersection (R.dom d3') (fact s) - d1' = R.dom d3' R.<| d1 s - d2' = R.dom d3' R.<| d2 s - d3' = d3 s R.|> d3s - --- Deletes tuples of the form (fact, d1, _, _). --- If no other (fact, dk, _, _) tuples exist for any other dk, then --- `fact` is removed from the `fact` set and from the other dimensions as well, --- that is, (fact, d1) is treated as a primary key. -deletePrimaryD1 :: (Ord fact, Ord d1, Ord d2, Ord d3) - => (fact, d1) -> Star3 fact d1 d2 d3 -> Star3 fact d1 d2 d3 -deletePrimaryD1 (f, x) s = let - d1' = R.delete f x (d1 s) - otherX = R.lookupDom f d1' - in if Set.null otherX then - Star3 (Set.delete f (fact s)) d1' (R.deleteDom f (d2 s)) (R.deleteDom f (d3 s)) - else s { d1 = d1' } - -lookupD1 :: (Ord fact, Ord d1) => d1 -> Star3 fact d1 d2 d3 -> Set fact -lookupD1 x s = R.lookupRan x (d1 s) - -insertD1 - :: (Ord fact, Ord d1) - => (fact, d1) - -> Star3 fact d1 d2 d3 - -> Star3 fact d1 d2 d3 -insertD1 (f,x) s = s { fact = Set.insert f (fact s) - , d1 = R.insert f x (d1 s) } - -memberD1 :: (Ord fact, Ord d1) => (fact,d1) -> Star3 fact d1 d2 d3 -> Bool -memberD1 (f, x) s = R.member f x (d1 s) - -memberD2 :: (Ord fact, Ord d2) => (fact,d2) -> Star3 fact d1 d2 d3 -> Bool -memberD2 (f, x) s = R.member f x (d2 s) - -memberD3 :: (Ord fact, Ord d3) => (fact,d3) -> Star3 fact d1 d2 d3 -> Bool -memberD3 (f, x) s = R.member f x (d3 s) - -insert :: (Ord fact, Ord d1, Ord d2, Ord d3) - => (fact, d1, d2, d3) - -> Star3 fact d1 d2 d3 - -> Star3 fact d1 d2 d3 -insert (f, d1i, d2i, d3i) s = Star3 fact' d1' d2' d3' where - fact' = Set.insert f (fact s) - d1' = R.insert f d1i (d1 s) - d2' = R.insert f d2i (d2 s) - d3' = R.insert f d3i (d3 s) - -insertD23 :: (Ord fact, Ord d1, Ord d2, Ord d3) - => (fact, d2, d3) - -> Star3 fact d1 d2 d3 - -> Star3 fact d1 d2 d3 -insertD23 (f, x, y) s = Star3 fact' (d1 s) d2' d3' where - fact' = Set.insert f (fact s) - d2' = R.insert f x (d2 s) - d3' = R.insert f y (d3 s) - -deleteD3 :: (Ord fact, Ord d1, Ord d2, Ord d3) - => (fact, d3) - -> Star3 fact d1 d2 d3 - -> Star3 fact d1 d2 d3 -deleteD3 (f, x) s = Star3 (fact s) (d1 s) (d2 s) d3' where - d3' = R.delete f x (d3 s) - -deleteD2 :: (Ord fact, Ord d1, Ord d2, Ord d3) - => (fact, d2) - -> Star3 fact d1 d2 d3 - -> Star3 fact d1 d2 d3 -deleteD2 (f, x) s = Star3 (fact s) (d1 s) d2' (d3 s) where - d2' = R.delete f x (d2 s) - -deleteFact :: (Ord fact, Ord d1, Ord d2, Ord d3) - => Set fact -> Star3 fact d1 d2 d3 -> Star3 fact d1 d2 d3 -deleteFact facts Star3{..} = - Star3 (fact `Set.difference` facts) - (facts R.<|| d1) - (facts R.<|| d2) - (facts R.<|| d3) - -replaceFact :: (Ord fact, Ord d1, Ord d2, Ord d3) - => fact -> fact -> Star3 fact d1 d2 d3 -> Star3 fact d1 d2 d3 -replaceFact f f' Star3{..} = - let updateFact fact = - if Set.member f fact - then (Set.insert f' . Set.delete f) fact - else fact - in Star3 (updateFact fact) - (R.replaceDom f f' d1) - (R.replaceDom f f' d2) - (R.replaceDom f f' d3) - -instance (Ord fact, Ord d1, Ord d2, Ord d3) => Semigroup (Star3 fact d1 d2 d3) where - (<>) = mappend - -instance (Ord fact, Ord d1, Ord d2, Ord d3) => Monoid (Star3 fact d1 d2 d3) where - mempty = Star3 mempty mempty mempty mempty - s1 `mappend` s2 = Star3 fact' d1' d2' d3' where - fact' = fact s1 <> fact s2 - d1' = d1 s1 <> d1 s2 - d2' = d2 s1 <> d2 s2 - d3' = d3 s1 <> d3 s2 - -instance (H.Hashable fact, H.Hashable d1, H.Hashable d2, H.Hashable d3) - => H.Hashable (Star3 fact d1 d2 d3) where - tokens s = - [ H.accumulateToken (fact s) - , H.accumulateToken (d1 s) - , H.accumulateToken (d2 s) - , H.accumulateToken (d3 s) ] diff --git a/codebase1/codebase/Unison/Codebase/V1/Symbol.hs b/codebase1/codebase/Unison/Codebase/V1/Symbol.hs deleted file mode 100644 index e2169c53a3..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Symbol.hs +++ /dev/null @@ -1,13 +0,0 @@ -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE OverloadedStrings #-} - -module Unison.Codebase.V1.Symbol where - -import Data.Word (Word64) -import Data.Text (Text) - -data Symbol = Symbol !Word64 !Text deriving (Eq, Ord) - --- instance Show Symbol where --- show (Symbol 0 n) = show n --- show (Symbol id n) = show n ++ "-" ++ show id diff --git a/codebase1/codebase/Unison/Codebase/V1/Term.hs b/codebase1/codebase/Unison/Codebase/V1/Term.hs deleted file mode 100644 index 9c13ad9b12..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Term.hs +++ /dev/null @@ -1,167 +0,0 @@ -{-# LANGUAGE DeriveFoldable #-} -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE DeriveTraversable #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE Rank2Types #-} -{-# LANGUAGE UnicodeSyntax #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Codebase.V1.Term where - -import qualified Control.Monad.Writer.Strict as Writer -import Data.Foldable (Foldable (toList), traverse_) -import Data.Functor (($>)) -import Data.Int (Int64) -import Data.Maybe (mapMaybe) -import qualified Data.Set as Set -import Data.Sequence (Seq) -import Data.Set (Set) -import Data.Text (Text) -import Data.Word (Word64) -import qualified Unison.Codebase.V1.ABT as ABT -import qualified Unison.Codebase.V1.ConstructorType as CT -import qualified Unison.Codebase.V1.LabeledDependency as LD -import Unison.Codebase.V1.LabeledDependency (LabeledDependency) -import Unison.Codebase.V1.Reference (Reference) -import Unison.Codebase.V1.Referent (Referent) -import qualified Unison.Codebase.V1.Referent as Referent -import Unison.Codebase.V1.Term.Pattern (Pattern) -import qualified Unison.Codebase.V1.Term.Pattern as Pattern -import Unison.Codebase.V1.Type (Type) -import qualified Unison.Codebase.V1.Type as Type - --- This gets reexported; should maybe live somewhere other than Pattern, though. -type ConstructorId = Pattern.ConstructorId - -data MatchCase a = MatchCase Pattern (Maybe a) a - deriving (Foldable, Functor, Traversable) - --- | Base functor for terms in the Unison language --- We need `typeVar` because the term and type variables may differ. -data F typeVar typeAnn a - = Int Int64 - | Nat Word64 - | Float Double - | Boolean Bool - | Text Text - | Char Char - | Ref Reference - | -- First argument identifies the data type, - -- second argument identifies the constructor - Constructor Reference ConstructorId - | Request Reference ConstructorId - | Handle a a - | App a a - | Ann a (Type typeVar typeAnn) - | List (Seq a) - | If a a a - | And a a - | Or a a - | Lam a - | -- Note: let rec blocks have an outer ABT.Cycle which introduces as many - -- variables as there are bindings - LetRec IsTop [a] a - | -- Note: first parameter is the binding, second is the expression which may refer - -- to this let bound variable. Constructed as `Let b (abs v e)` - Let IsTop a a - | -- Pattern matching / eliminating data types, example: - -- case x of - -- Just n -> rhs1 - -- Nothing -> rhs2 - -- - -- translates to - -- - -- Match x - -- [ (Constructor 0 [Var], ABT.abs n rhs1) - -- , (Constructor 1 [], rhs2) ] - Match a [MatchCase a] - | TermLink Referent - | TypeLink Reference - deriving (Foldable, Functor, Traversable) - -type IsTop = Bool - --- | Like `Term v`, but with an annotation of type `a` at every level in the tree -type Term v a = ABT.Term (F v a) v a - --- Dependencies including referenced data and effect decls -dependencies :: Ord v => Term v a -> Set Reference -dependencies t = Set.map (LD.fold id Referent.toReference) (labeledDependencies t) - -typeDependencies :: Ord v => Term v a -> Set Reference -typeDependencies = - Set.fromList . mapMaybe (LD.fold Just (const Nothing)) . toList . labeledDependencies - --- Gets the types to which this term contains references via patterns and --- data constructors. -constructorDependencies :: - Ord v => Term v a -> Set Reference -constructorDependencies = - Set.unions - . generalizedDependencies - (const mempty) - (const mempty) - Set.singleton - (const . Set.singleton) - Set.singleton - (const . Set.singleton) - Set.singleton - -generalizedDependencies :: - (Ord v, Ord r) => - (Reference -> r) -> - (Reference -> r) -> - (Reference -> r) -> - (Reference -> ConstructorId -> r) -> - (Reference -> r) -> - (Reference -> ConstructorId -> r) -> - (Reference -> r) -> - Term v a -> - Set r -generalizedDependencies termRef typeRef literalType dataConstructor dataType effectConstructor effectType = - Set.fromList . Writer.execWriter . ABT.visit' f - where - f t@(Ref r) = Writer.tell [termRef r] $> t - f t@(TermLink r) = case r of - Referent.Ref r -> Writer.tell [termRef r] $> t - Referent.Con r id CT.Data -> Writer.tell [dataConstructor r id] $> t - Referent.Con r id CT.Effect -> Writer.tell [effectConstructor r id] $> t - f t@(TypeLink r) = Writer.tell [typeRef r] $> t - f t@(Ann _ typ) = - Writer.tell (map typeRef . toList $ Type.dependencies typ) $> t - f t@(Nat _) = Writer.tell [literalType Type.natRef] $> t - f t@(Int _) = Writer.tell [literalType Type.intRef] $> t - f t@(Float _) = Writer.tell [literalType Type.floatRef] $> t - f t@(Boolean _) = Writer.tell [literalType Type.booleanRef] $> t - f t@(Text _) = Writer.tell [literalType Type.textRef] $> t - f t@(List _) = Writer.tell [literalType Type.listRef] $> t - f t@(Constructor r cid) = - Writer.tell [dataType r, dataConstructor r cid] $> t - f t@(Request r cid) = - Writer.tell [effectType r, effectConstructor r cid] $> t - f t@(Match _ cases) = traverse_ goPat cases $> t - f t = pure t - goPat (MatchCase pat _ _) = - Writer.tell . toList $ - Pattern.generalizedDependencies - literalType - dataConstructor - dataType - effectConstructor - effectType - pat - -labeledDependencies :: - Ord v => Term v a -> Set LabeledDependency -labeledDependencies = - generalizedDependencies - LD.termRef - LD.typeRef - LD.typeRef - LD.dataConstructor - LD.typeRef - LD.effectConstructor - LD.typeRef diff --git a/codebase1/codebase/Unison/Codebase/V1/Term/Pattern.hs b/codebase1/codebase/Unison/Codebase/V1/Term/Pattern.hs deleted file mode 100644 index 43e9c3304b..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Term/Pattern.hs +++ /dev/null @@ -1,102 +0,0 @@ -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE DeriveTraversable #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} - -module Unison.Codebase.V1.Term.Pattern where - -import Data.Int (Int64) -import qualified Data.Set as Set -import qualified Unison.Codebase.V1.LabeledDependency as LD -import Unison.Codebase.V1.LabeledDependency (LabeledDependency) -import Data.Set (Set) -import Data.Text (Text) -import Data.Word (Word64) -import Unison.Codebase.V1.Reference (Reference) -import qualified Unison.Codebase.V1.Type as Type - -type ConstructorId = Int - -data Pattern - = Unbound - | Var - | Boolean !Bool - | Int !Int64 - | Nat !Word64 - | Float !Double - | Text !Text - | Char !Char - | Constructor !Reference !Int [Pattern] - | As Pattern - | EffectPure Pattern - | EffectBind !Reference !Int [Pattern] Pattern - | SequenceLiteral [Pattern] - | SequenceOp Pattern !SeqOp Pattern - deriving (Eq, Ord, Show) - -data SeqOp - = Cons - | Snoc - | Concat - deriving (Eq, Ord, Show) - -application :: Pattern -> Bool -application (Constructor _ _ (_ : _)) = True -application _ = False - -foldMap' :: Monoid m => (Pattern -> m) -> Pattern -> m -foldMap' f p = case p of - Unbound -> f p - Var -> f p - Boolean _ -> f p - Int _ -> f p - Nat _ -> f p - Float _ -> f p - Text _ -> f p - Char _ -> f p - Constructor _ _ ps -> f p <> foldMap (foldMap' f) ps - As p' -> f p <> foldMap' f p' - EffectPure p' -> f p <> foldMap' f p' - EffectBind _ _ ps p' -> f p <> foldMap (foldMap' f) ps <> foldMap' f p' - SequenceLiteral ps -> f p <> foldMap (foldMap' f) ps - SequenceOp p1 _ p2 -> f p <> foldMap' f p1 <> foldMap' f p2 - -generalizedDependencies :: - Ord r => - (Reference -> r) -> - (Reference -> ConstructorId -> r) -> - (Reference -> r) -> - (Reference -> ConstructorId -> r) -> - (Reference -> r) -> - Pattern -> - Set r -generalizedDependencies literalType dataConstructor dataType effectConstructor effectType = - Set.fromList - . foldMap' - ( \case - Unbound -> mempty - Var -> mempty - As _ -> mempty - Constructor r cid _ -> [dataType r, dataConstructor r cid] - EffectPure _ -> [effectType Type.effectRef] - EffectBind r cid _ _ -> - [effectType Type.effectRef, effectType r, effectConstructor r cid] - SequenceLiteral _ -> [literalType Type.listRef] - SequenceOp {} -> [literalType Type.listRef] - Boolean _ -> [literalType Type.booleanRef] - Int _ -> [literalType Type.intRef] - Nat _ -> [literalType Type.natRef] - Float _ -> [literalType Type.floatRef] - Text _ -> [literalType Type.textRef] - Char _ -> [literalType Type.charRef] - ) - -labeledDependencies :: Pattern -> Set LabeledDependency -labeledDependencies = - generalizedDependencies - LD.typeRef - LD.dataConstructor - LD.typeRef - LD.effectConstructor - LD.typeRef diff --git a/codebase1/codebase/Unison/Codebase/V1/Type.hs b/codebase1/codebase/Unison/Codebase/V1/Type.hs deleted file mode 100644 index e1ed9d507a..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Type.hs +++ /dev/null @@ -1,55 +0,0 @@ -{-# LANGUAGE DeriveFoldable #-} -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE DeriveTraversable #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE Rank2Types #-} -{-# LANGUAGE ViewPatterns #-} - -module Unison.Codebase.V1.Type where - -import qualified Control.Monad.Writer as Writer -import Data.Functor (($>)) -import Data.Set (Set) -import qualified Data.Set as Set -import qualified Unison.Codebase.V1.ABT as ABT -import Unison.Codebase.V1.Reference (Reference) -import qualified Unison.Codebase.V1.Reference as Reference -import qualified Unison.Codebase.V1.Type.Kind as K - --- | Base functor for types in the Unison language -data F a - = Ref Reference - | Arrow a a - | Ann a K.Kind - | App a a - | Effect a a - | Effects [a] - | Forall a - | IntroOuter a -- binder like ∀, used to introduce variables that are - -- bound by outer type signatures, to support scoped type - -- variables - deriving (Foldable, Functor, Traversable) - --- | Types are represented as ABTs over the base functor F, with variables in `v` -type Type v a = ABT.Term F v a - -dependencies :: Ord v => Type v a -> Set Reference -dependencies t = Set.fromList . Writer.execWriter $ ABT.visit' f t - where - f t@(Ref r) = Writer.tell [r] $> t - f t = pure t - -intRef, natRef, floatRef, booleanRef, textRef, charRef, listRef, bytesRef, effectRef, termLinkRef, typeLinkRef :: Reference -intRef = Reference.Builtin "Int" -natRef = Reference.Builtin "Nat" -floatRef = Reference.Builtin "Float" -booleanRef = Reference.Builtin "Boolean" -textRef = Reference.Builtin "Text" -charRef = Reference.Builtin "Char" -listRef = Reference.Builtin "Sequence" -bytesRef = Reference.Builtin "Bytes" -effectRef = Reference.Builtin "Effect" -termLinkRef = Reference.Builtin "Link.Term" -typeLinkRef = Reference.Builtin "Link.Type" diff --git a/codebase1/codebase/Unison/Codebase/V1/Type/Kind.hs b/codebase1/codebase/Unison/Codebase/V1/Type/Kind.hs deleted file mode 100644 index fbddc81306..0000000000 --- a/codebase1/codebase/Unison/Codebase/V1/Type/Kind.hs +++ /dev/null @@ -1,3 +0,0 @@ -module Unison.Codebase.V1.Type.Kind where - -data Kind = Star | Arrow Kind Kind deriving (Eq,Ord,Show) \ No newline at end of file diff --git a/codebase1/codebase/unison-codebase1.cabal b/codebase1/codebase/unison-codebase1.cabal deleted file mode 100644 index b8d9f5c57e..0000000000 --- a/codebase1/codebase/unison-codebase1.cabal +++ /dev/null @@ -1,59 +0,0 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ - -name: unison-codebase1 -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development - -library - exposed-modules: - Unison.Codebase.V1.ABT - Unison.Codebase.V1.Branch.Raw - Unison.Codebase.V1.Branch.NameSegment - Unison.Codebase.V1.Causal.Raw - Unison.Codebase.V1.ConstructorType - Unison.Codebase.V1.DataDeclaration - Unison.Codebase.V1.FileCodebase - Unison.Codebase.V1.LabeledDependency - Unison.Codebase.V1.Patch - Unison.Codebase.V1.Patch.TermEdit - Unison.Codebase.V1.Patch.TypeEdit - Unison.Codebase.V1.Term - Unison.Codebase.V1.Term.Pattern - Unison.Codebase.V1.Type - Unison.Codebase.V1.Type.Kind - Unison.Codebase.V1.Reference - Unison.Codebase.V1.Referent - Unison.Codebase.V1.Serialization.Serialization - Unison.Codebase.V1.Serialization.V1 - Unison.Codebase.V1.Symbol - Unison.Codebase.V1.Star3 - -- other-modules: - -- other-extensions: - build-depends: - base, - base16, - bytes, - bytestring, - containers, - directory, - errors, - exceptions, - extra, - filepath, - lens, - mtl, - text, - unliftio, - unison-core, - unison-util - hs-source-dirs: . - default-language: Haskell2010 diff --git a/codebase2/editor/U/Editor/Codebase.hs b/codebase2/editor/U/Editor/Codebase.hs deleted file mode 100644 index 7c9ec5a6f1..0000000000 --- a/codebase2/editor/U/Editor/Codebase.hs +++ /dev/null @@ -1,57 +0,0 @@ -module U.Editor.Codebase where - --- data Codebase m v a = Codebase { --- getTerm :: Reference.Id -> m (Maybe (Term v a)), --- getTypeOfTermImpl :: Reference.Id -> m (Maybe (Type v a)), --- getTypeDeclaration :: Reference.Id -> m (Maybe (Decl v a)), - --- putTerm :: Reference.Id -> Term v a -> Type v a -> m (), --- putTypeDeclaration :: Reference.Id -> Decl v a -> m (), - --- getBranch :: Branch.Hash -> m (Maybe (Branch m)), --- getRootBranch :: m (Either GetRootBranchError (Branch m)), --- putRootBranch :: Branch m -> m (), - --- rootBranchUpdates :: m (m (), m (Set Branch.Hash), --- getBranchForCausal :: Branch.CausalHash -> m (Maybe (Branch m)), - --- -- --|Supports syncing from a current or older codebase format --- syncFromDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), --- -- -- |Only writes the latest codebase format --- syncToDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), --- -- -- ^ maybe return type needs to reflect failure if remote codebase has an old version - --- -- -- |Watch expressions are part of the codebase, the `Reference.Id` is --- -- the hash of the source of the watch expression, and the `Term v a` --- -- is the evaluated result of the expression, decompiled to a term. --- watches :: UF.WatchKind -> m [Reference.Id], --- getWatch :: UF.WatchKind -> Reference.Id -> m (Maybe (Term v a)), --- putWatch :: UF.WatchKind -> Reference.Id -> Term v a -> m (), - --- getReflog :: m [Reflog.Entry], --- appendReflog :: Text -> Branch m -> Branch m -> m (), - --- -- -- |the nicely-named versions will utilize these, and add builtins to the result set --- termsHavingType_impl :: Reference -> m (Set Referent.Id), --- termsMentioningType_impl :: Reference -> m (Set Referent.Id), - --- -- -- |number of base58 characters needed to distinguish any two hashes in the codebase; --- -- we don't have to compute it separately for different namespaces --- hashLength :: m Int, --- termReferencesByPrefix :: ShortHash -> m (Set Reference.Id), --- typeReferencesByPrefix :: ShortHash -> m (Set Reference.Id), --- termReferentsByPrefix :: ShortHash -> m (Set Referent.Id), --- branchHashesByPrefix :: ShortBranchHash -> m (Set Branch.Hash), - --- -- --- lca :: [Causal m Branch.Raw e] -> m (Maybe Branch.Hash), --- dependentsImpl :: Reference -> m (Maybe (Set Reference.Id)), --- termDependencies :: Reference.Id -> m (Maybe (Set Reference.Id)), --- declDependencies :: Reference.Id -> m (Maybe (Set Reference.Id)), --- -- -- |terms, types, patches, and branches --- branchDependencies :: --- Branch.Hash -> m (Maybe (Branch.CausalHash, BD.Dependencies)), --- -- -- |the "new" terms and types mentioned in a patch --- patchDependencies :: EditHash -> m (Set Reference, Set Reference) - --- } \ No newline at end of file diff --git a/codebase2/editor/unison-editor.cabal b/codebase2/editor/unison-editor.cabal deleted file mode 100644 index cac5476005..0000000000 --- a/codebase2/editor/unison-editor.cabal +++ /dev/null @@ -1,27 +0,0 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ - -name: unison-editor -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development - -library - exposed-modules: - -- U.Editor.Codebase - -- other-modules: - -- other-extensions: - build-depends: - base, - unison-codebase, - unison-language, - unison-runtime - hs-source-dirs: . - default-language: Haskell2010 diff --git a/codebase2/language/U/Language/Blank.hs b/codebase2/language/U/Language/Blank.hs deleted file mode 100644 index 0832dda35b..0000000000 --- a/codebase2/language/U/Language/Blank.hs +++ /dev/null @@ -1,25 +0,0 @@ -{-# LANGUAGE DeriveFunctor #-} --- |This is clearly an aspect of type-checking only, but included as a --- dependency of Term.F -module U.Language.Blank where - -loc :: Recorded loc -> loc -loc (Placeholder loc _) = loc -loc (Resolve loc _) = loc - -nameb :: Blank loc -> Maybe String -nameb (Recorded (Placeholder _ n)) = Just n -nameb (Recorded (Resolve _ n)) = Just n -nameb _ = Nothing - -data Recorded loc - -- A user-provided named placeholder - = Placeholder loc String - -- A name to be resolved with type-directed name resolution. - | Resolve loc String - deriving (Show, Eq, Ord, Functor) - -data Blank loc = Blank | Recorded (Recorded loc) - deriving (Show, Eq, Ord, Functor) - - diff --git a/codebase2/language/unison-language.cabal b/codebase2/language/unison-language.cabal deleted file mode 100644 index 28d319759f..0000000000 --- a/codebase2/language/unison-language.cabal +++ /dev/null @@ -1,27 +0,0 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ - -name: unison-language -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development - -library - exposed-modules: - U.Language.Blank - -- U.Core.Referent.Short - -- other-modules: - -- other-extensions: - build-depends: - base, - unison-core, - unison-syntax - hs-source-dirs: . - default-language: Haskell2010 diff --git a/codebase2/runtime/U/Runtime/CodeLookup.hs b/codebase2/runtime/U/Runtime/CodeLookup.hs deleted file mode 100644 index a0878ffb72..0000000000 --- a/codebase2/runtime/U/Runtime/CodeLookup.hs +++ /dev/null @@ -1 +0,0 @@ -module U.Runtime.CodeLookup where \ No newline at end of file diff --git a/codebase2/runtime/unison-runtime.cabal b/codebase2/runtime/unison-runtime.cabal deleted file mode 100644 index 5d6572c756..0000000000 --- a/codebase2/runtime/unison-runtime.cabal +++ /dev/null @@ -1,28 +0,0 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ - -name: unison-runtime -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development - -library - exposed-modules: - U.Runtime.CodeLookup - -- other-modules: - -- other-extensions: - build-depends: - base, - containers, - text, - unison-core - -- unison-util - hs-source-dirs: . - default-language: Haskell2010 diff --git a/codebase2/syntax/unison-syntax.cabal b/codebase2/syntax/unison-syntax.cabal deleted file mode 100644 index bd05aa8040..0000000000 --- a/codebase2/syntax/unison-syntax.cabal +++ /dev/null @@ -1,25 +0,0 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ - -name: unison-syntax -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development - -library - exposed-modules: - -- U.Syntax2 - -- other-modules: - -- other-extensions: - build-depends: - base, - unison-core - hs-source-dirs: . - default-language: Haskell2010 diff --git a/stack.yaml b/stack.yaml index 9e8453987f..dcac75c6be 100644 --- a/stack.yaml +++ b/stack.yaml @@ -11,17 +11,10 @@ packages: - yaks/easytest - parser-typechecker - unison-core - -# - codebase-convert-1to2 -- codebase1/codebase - codebase2/codebase - codebase2/codebase-sqlite - codebase2/codebase-sync - codebase2/core -- codebase2/language -- codebase2/runtime -- codebase2/editor -- codebase2/syntax - codebase2/util - codebase2/util-serialization - codebase2/util-term From fae69197e5cac0c30420cc15a9370e5a307b8bc5 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 27 Apr 2021 18:55:28 -0600 Subject: [PATCH 198/225] Nicer message for upgrade --- parser-typechecker/unison/Main.hs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index eb8234fd0b..0428eb1ee8 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -209,7 +209,11 @@ main = do upgradeCodebase :: Maybe Codebase.CodebasePath -> IO () upgradeCodebase mcodepath = - Codebase.getCodebaseDir mcodepath >>= Upgrade12.upgradeCodebase + Codebase.getCodebaseDir mcodepath >>= \root -> do + putStrLn $ "I'm upgrading the codebase in '" ++ root ++ "', but it will take a while." + Upgrade12.upgradeCodebase root + putStrLn $ "\nTry it out and once you're satisfied, you may delete the old version from\n\n\t'" + ++ Codebase.codebasePath (FC.init @IO) root ++ "';\n\nbut there's no rush." prepareTranscriptDir :: Codebase.Init IO Symbol Ann -> Bool -> Maybe FilePath -> IO FilePath prepareTranscriptDir cbInit inFork mcodepath = do From df2b048b34bed69b02a07ffd80f23093d15e741e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 27 Apr 2021 19:21:31 -0600 Subject: [PATCH 199/225] cleanup --- codebase2-tests/add-type.md | 21 --- codebase2-tests/add-type.output.md | 67 ---------- codebase2-tests/builtins-merge.md | 3 - codebase2-tests/builtins-merge.output.md | 6 - codebase2-tests/reference-check.md | 8 -- codebase2-tests/reference-check.output.md | 29 ---- codebase2-tests/write-namespaces.md | 26 ---- codebase2-tests/write-namespaces.output.md | 64 --------- codebase2/CHANGELOG.md | 5 - .../U/Codebase/Sqlite/Branch/MetadataSet.hs | 6 - .../U/Codebase/Sqlite/Causal.hs | 1 - codebase2/codebase-sqlite/package.yaml | 27 ++++ .../unison-codebase-sqlite.cabal | 125 +++++++++--------- codebase2/codebase-sync/package.yaml | 26 ++++ .../codebase-sync/unison-codebase-sync.cabal | 61 ++++++--- codebase2/codebase/package.yaml | 14 ++ codebase2/codebase/unison-codebase.cabal | 82 ++++++------ codebase2/core/package.yaml | 13 ++ codebase2/core/unison-core.cabal | 59 ++++----- codebase2/util-term/package.yaml | 12 ++ codebase2/util-term/unison-util-term.cabal | 52 ++++---- codebase2/util/package.yaml | 12 +- defaults.yaml | 10 -- 23 files changed, 305 insertions(+), 424 deletions(-) delete mode 100644 codebase2-tests/add-type.md delete mode 100644 codebase2-tests/add-type.output.md delete mode 100644 codebase2-tests/builtins-merge.md delete mode 100644 codebase2-tests/builtins-merge.output.md delete mode 100644 codebase2-tests/reference-check.md delete mode 100644 codebase2-tests/reference-check.output.md delete mode 100644 codebase2-tests/write-namespaces.md delete mode 100644 codebase2-tests/write-namespaces.output.md delete mode 100644 codebase2/CHANGELOG.md delete mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/MetadataSet.hs delete mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs create mode 100644 codebase2/codebase-sqlite/package.yaml create mode 100644 codebase2/codebase-sync/package.yaml create mode 100644 codebase2/codebase/package.yaml create mode 100644 codebase2/core/package.yaml create mode 100644 codebase2/util-term/package.yaml delete mode 100644 defaults.yaml diff --git a/codebase2-tests/add-type.md b/codebase2-tests/add-type.md deleted file mode 100644 index 3206c7aa5a..0000000000 --- a/codebase2-tests/add-type.md +++ /dev/null @@ -1,21 +0,0 @@ -```ucm -.> alias.type ##Int Int -``` - -```unison -type Optional a = None | Some a -type Boptional = Bconstructional (Optional ##Int) -``` - -```ucm -.mytypes> add -``` - -```unison -``` - -```ucm -.> names Optional -.> names Boptional -.> find -``` diff --git a/codebase2-tests/add-type.output.md b/codebase2-tests/add-type.output.md deleted file mode 100644 index d9c4846ff9..0000000000 --- a/codebase2-tests/add-type.output.md +++ /dev/null @@ -1,67 +0,0 @@ -```ucm -.> alias.type ##Int Int - - Done. - -``` -```unison -type Optional a = None | Some a -type Boptional = Bconstructional (Optional ##Int) -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - type Boptional - type Optional a - -``` -```ucm - ☝️ The namespace .mytypes is empty. - -.mytypes> add - - ⍟ I've added these definitions: - - type Boptional - type Optional a - -``` -```unison -``` - -```ucm - - I loaded scratch.u and didn't find anything. - -``` -```ucm -.> names Optional - - Type - Hash: #5isltsdct9 - Names: mytypes.Optional - -.> names Boptional - - Type - Hash: #5q7ug1s3tb - Names: mytypes.Boptional - -.> find - - 1. builtin type Int - 2. type mytypes.Boptional - 3. mytypes.Boptional.Bconstructional : Optional Int - -> Boptional - 4. type mytypes.Optional a - 5. mytypes.Optional.None : Optional a - 6. mytypes.Optional.Some : a -> Optional a - - -``` diff --git a/codebase2-tests/builtins-merge.md b/codebase2-tests/builtins-merge.md deleted file mode 100644 index 547211e82b..0000000000 --- a/codebase2-tests/builtins-merge.md +++ /dev/null @@ -1,3 +0,0 @@ -```ucm -.> builtins.merge -``` \ No newline at end of file diff --git a/codebase2-tests/builtins-merge.output.md b/codebase2-tests/builtins-merge.output.md deleted file mode 100644 index 092ca74462..0000000000 --- a/codebase2-tests/builtins-merge.output.md +++ /dev/null @@ -1,6 +0,0 @@ -```ucm -.> builtins.merge - - Done. - -``` diff --git a/codebase2-tests/reference-check.md b/codebase2-tests/reference-check.md deleted file mode 100644 index ed8fd5e344..0000000000 --- a/codebase2-tests/reference-check.md +++ /dev/null @@ -1,8 +0,0 @@ -```unison -unique type Foo = Foo -``` - -```ucm -.> add -.> find -``` \ No newline at end of file diff --git a/codebase2-tests/reference-check.output.md b/codebase2-tests/reference-check.output.md deleted file mode 100644 index 2973ef48ca..0000000000 --- a/codebase2-tests/reference-check.output.md +++ /dev/null @@ -1,29 +0,0 @@ -```unison -unique type Foo = Foo -``` - -```ucm - - I found and typechecked these definitions in scratch.u. If you - do an `add` or `update`, here's how your codebase would - change: - - ⍟ These new definitions are ok to `add`: - - unique type Foo - -``` -```ucm -.> add - - ⍟ I've added these definitions: - - unique type Foo - -.> find - - 1. unique type Foo - 2. Foo.Foo : Foo - - -``` diff --git a/codebase2-tests/write-namespaces.md b/codebase2-tests/write-namespaces.md deleted file mode 100644 index 1c8149df2b..0000000000 --- a/codebase2-tests/write-namespaces.md +++ /dev/null @@ -1,26 +0,0 @@ -```ucm -.> alias.term ##Nat.+ + -``` - -```unison:hide -type Foo = Foo | Bar -a = 3 -b = a + 1 -``` - -```ucm -.foo.bar> add -``` - -```unison:hide -a = 4 -``` - -```ucm -.foo.bar> update -.> find -``` - -```unison -> b -``` \ No newline at end of file diff --git a/codebase2-tests/write-namespaces.output.md b/codebase2-tests/write-namespaces.output.md deleted file mode 100644 index 887b1fbdfd..0000000000 --- a/codebase2-tests/write-namespaces.output.md +++ /dev/null @@ -1,64 +0,0 @@ -```ucm -.> alias.term ##Nat.+ + - - Done. - -``` -```unison -type Foo = Foo | Bar -a = 3 -b = a + 1 -``` - -```ucm - ☝️ The namespace .foo.bar is empty. - -.foo.bar> add - - ⍟ I've added these definitions: - - type Foo - a : ##Nat - b : ##Nat - -``` -```unison -a = 4 -``` - -```ucm -.foo.bar> update - - ⍟ I've updated these names to your new definition: - - a : ##Nat - -.> find - - 1. + : ##Nat -> ##Nat -> ##Nat - 2. type foo.bar.Foo - 3. foo.bar.Foo.Bar : Foo - 4. foo.bar.Foo.Foo : Foo - 5. foo.bar.a : ##Nat - 6. foo.bar.b : ##Nat - - -``` -```unison -> b -``` - -```ucm - - ✅ - - scratch.u changed. - - Now evaluating any watch expressions (lines starting with - `>`)... Ctrl+C cancels. - - 1 | > b - ⧩ - 5 - -``` diff --git a/codebase2/CHANGELOG.md b/codebase2/CHANGELOG.md deleted file mode 100644 index ce1c7792ea..0000000000 --- a/codebase2/CHANGELOG.md +++ /dev/null @@ -1,5 +0,0 @@ -# Revision history for unison-codebase2-core - -## 0.1.0.0 -- YYYY-mm-dd - -* First version. Released on an unsuspecting world. diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/MetadataSet.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/MetadataSet.hs deleted file mode 100644 index a7628d0c70..0000000000 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Branch/MetadataSet.hs +++ /dev/null @@ -1,6 +0,0 @@ -module U.Codebase.Sqlite.Branch.MetadataSet where - --- import Data.Set (Set) --- import U.Codebase.Sqlite.Reference (Reference) - --- data MetadataSetFormat = Inline (Set Reference) \ No newline at end of file diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs deleted file mode 100644 index 7996277b1f..0000000000 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Causal.hs +++ /dev/null @@ -1 +0,0 @@ -module U.Codebase.Sqlite.Causal where diff --git a/codebase2/codebase-sqlite/package.yaml b/codebase2/codebase-sqlite/package.yaml new file mode 100644 index 0000000000..f26a17fc5e --- /dev/null +++ b/codebase2/codebase-sqlite/package.yaml @@ -0,0 +1,27 @@ +name: unison-codebase-sqlite +github: unisonweb/unison + +library: + source-dirs: . + +dependencies: + - base + - bytes + - bytestring + - containers + - extra + - here + - lens + - monad-validate + - mtl + - sqlite-simple + - text + - transformers + - unliftio + - vector + - unison-codebase + - unison-codebase-sync + - unison-core + - unison-util + - unison-util-serialization + - unison-util-term \ No newline at end of file diff --git a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal index 6e53604d71..cd34874dbd 100644 --- a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal +++ b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal @@ -1,67 +1,68 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ +cabal-version: 1.12 -name: unison-codebase-sqlite -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development +-- This file has been generated from package.yaml by hpack version 0.33.0. +-- +-- see: https://github.com/sol/hpack +-- +-- hash: 5ed07b4204b545e00ed90d14cf65b106bfccb97b635adafa701389e440b29e04 + +name: unison-codebase-sqlite +version: 0.0.0 +homepage: https://github.com/unisonweb/unison#readme +bug-reports: https://github.com/unisonweb/unison/issues +build-type: Simple + +source-repository head + type: git + location: https://github.com/unisonweb/unison library exposed-modules: - U.Codebase.Sqlite.Branch.Format - U.Codebase.Sqlite.Branch.Full - U.Codebase.Sqlite.Branch.Diff - U.Codebase.Sqlite.Branch.MetadataSet - U.Codebase.Sqlite.Decl.Format - U.Codebase.Sqlite.Causal - U.Codebase.Sqlite.DbId - U.Codebase.Sqlite.LocalIds - U.Codebase.Sqlite.ObjectType - U.Codebase.Sqlite.Operations - U.Codebase.Sqlite.Patch.Format - U.Codebase.Sqlite.Patch.Full - U.Codebase.Sqlite.Patch.Diff - U.Codebase.Sqlite.Patch.TermEdit - U.Codebase.Sqlite.Patch.TypeEdit - U.Codebase.Sqlite.Queries - U.Codebase.Sqlite.Reference - U.Codebase.Sqlite.Referent - U.Codebase.Sqlite.Serialization - U.Codebase.Sqlite.Symbol - U.Codebase.Sqlite.Sync22 - U.Codebase.Sqlite.SyncEntity - U.Codebase.Sqlite.Term.Format - U.Codebase.Sqlite.Types - - -- other-modules: - -- other-extensions: + U.Codebase.Sqlite.Branch.Diff + U.Codebase.Sqlite.Branch.Format + U.Codebase.Sqlite.Branch.Full + U.Codebase.Sqlite.DbId + U.Codebase.Sqlite.Decl.Format + U.Codebase.Sqlite.LocalIds + U.Codebase.Sqlite.ObjectType + U.Codebase.Sqlite.Operations + U.Codebase.Sqlite.Patch.Diff + U.Codebase.Sqlite.Patch.Format + U.Codebase.Sqlite.Patch.Full + U.Codebase.Sqlite.Patch.TermEdit + U.Codebase.Sqlite.Patch.TypeEdit + U.Codebase.Sqlite.Queries + U.Codebase.Sqlite.Reference + U.Codebase.Sqlite.Referent + U.Codebase.Sqlite.Serialization + U.Codebase.Sqlite.Symbol + U.Codebase.Sqlite.Sync22 + U.Codebase.Sqlite.SyncEntity + U.Codebase.Sqlite.Term.Format + U.Codebase.Sqlite.Types + other-modules: + Paths_unison_codebase_sqlite + hs-source-dirs: + ./. build-depends: - base, - bytes, - bytestring, - containers, - extra, - here, - lens, - monad-validate, - mtl, - sqlite-simple, - text, - transformers, - unliftio, - vector, - unison-codebase, - unison-codebase-sync, - unison-core, - unison-util, - unison-util-serialization, - unison-util-term - hs-source-dirs: . - default-language: Haskell2010 + base + , bytes + , bytestring + , containers + , extra + , here + , lens + , monad-validate + , mtl + , sqlite-simple + , text + , transformers + , unison-codebase + , unison-codebase-sync + , unison-core + , unison-util + , unison-util-serialization + , unison-util-term + , unliftio + , vector + default-language: Haskell2010 diff --git a/codebase2/codebase-sync/package.yaml b/codebase2/codebase-sync/package.yaml new file mode 100644 index 0000000000..000341ca41 --- /dev/null +++ b/codebase2/codebase-sync/package.yaml @@ -0,0 +1,26 @@ +name: unison-codebase-sync +github: unisonweb/unison + +library: + source-dirs: . + +dependencies: + - base + - bytes + - bytestring + - containers + - extra + - here + - lens + - monad-validate + - mtl + - sqlite-simple + - text + - transformers + - unison-codebase + - unison-core + - unison-util + - unison-util-serialization + - unison-util-term + - unliftio + - vector \ No newline at end of file diff --git a/codebase2/codebase-sync/unison-codebase-sync.cabal b/codebase2/codebase-sync/unison-codebase-sync.cabal index a9381c0886..51dc8b4f4b 100644 --- a/codebase2/codebase-sync/unison-codebase-sync.cabal +++ b/codebase2/codebase-sync/unison-codebase-sync.cabal @@ -1,25 +1,46 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ +cabal-version: 1.12 -name: unison-codebase-sync -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development +-- This file has been generated from package.yaml by hpack version 0.33.0. +-- +-- see: https://github.com/sol/hpack +-- +-- hash: 5aecf5175bad0ac9cc461173d87d0fc00be36065974348d8541bf14ff451370c + +name: unison-codebase-sync +version: 0.0.0 +homepage: https://github.com/unisonweb/unison#readme +bug-reports: https://github.com/unisonweb/unison/issues +build-type: Simple + +source-repository head + type: git + location: https://github.com/unisonweb/unison library - hs-source-dirs: . exposed-modules: - U.Codebase.Sync - -- other-modules: - -- other-extensions: + U.Codebase.Sync + other-modules: + Paths_unison_codebase_sync + hs-source-dirs: + ./. build-depends: - base, - extra, - default-language: Haskell2010 + base + , bytes + , bytestring + , containers + , extra + , here + , lens + , monad-validate + , mtl + , sqlite-simple + , text + , transformers + , unison-codebase + , unison-core + , unison-util + , unison-util-serialization + , unison-util-term + , unliftio + , vector + default-language: Haskell2010 diff --git a/codebase2/codebase/package.yaml b/codebase2/codebase/package.yaml new file mode 100644 index 0000000000..4159de809d --- /dev/null +++ b/codebase2/codebase/package.yaml @@ -0,0 +1,14 @@ +name: unison-codebase +github: unisonweb/unison + +library: + source-dirs: . + +dependencies: + - base + - containers + - lens + - mtl + - text + - unison-core + - unison-util \ No newline at end of file diff --git a/codebase2/codebase/unison-codebase.cabal b/codebase2/codebase/unison-codebase.cabal index 45bb0355bf..3f37375c76 100644 --- a/codebase2/codebase/unison-codebase.cabal +++ b/codebase2/codebase/unison-codebase.cabal @@ -1,44 +1,48 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ +cabal-version: 1.12 -name: unison-codebase -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development +-- This file has been generated from package.yaml by hpack version 0.33.0. +-- +-- see: https://github.com/sol/hpack +-- +-- hash: 6bc4dfe7298f08ffbdf912c194bdaa7b5d82d6239249779de15a536ce062e383 + +name: unison-codebase +version: 0.0.0 +homepage: https://github.com/unisonweb/unison#readme +bug-reports: https://github.com/unisonweb/unison/issues +build-type: Simple + +source-repository head + type: git + location: https://github.com/unisonweb/unison library exposed-modules: - U.Codebase.Branch - U.Codebase.Causal - U.Codebase.Codebase - U.Codebase.Decl - U.Codebase.HashTags - U.Codebase.Kind - U.Codebase.Reference - U.Codebase.Referent - U.Codebase.Reflog - U.Codebase.ShortHash - U.Codebase.Term - U.Codebase.TermEdit - U.Codebase.Type - U.Codebase.TypeEdit - U.Codebase.WatchKind - -- other-modules: - -- other-extensions: + U.Codebase.Branch + U.Codebase.Causal + U.Codebase.Codebase + U.Codebase.Decl + U.Codebase.HashTags + U.Codebase.Kind + U.Codebase.Reference + U.Codebase.Referent + U.Codebase.Reflog + U.Codebase.ShortHash + U.Codebase.Term + U.Codebase.TermEdit + U.Codebase.Type + U.Codebase.TypeEdit + U.Codebase.WatchKind + other-modules: + Paths_unison_codebase + hs-source-dirs: + ./. build-depends: - base, - containers, - lens, - mtl, - text, - unison-core, - unison-util - hs-source-dirs: . - default-language: Haskell2010 + base + , containers + , lens + , mtl + , text + , unison-core + , unison-util + default-language: Haskell2010 diff --git a/codebase2/core/package.yaml b/codebase2/core/package.yaml new file mode 100644 index 0000000000..8fbd66d244 --- /dev/null +++ b/codebase2/core/package.yaml @@ -0,0 +1,13 @@ +name: unison-core +github: unisonweb/unison + +library: + source-dirs: . + +dependencies: + - base + - containers + - text + - vector + - prelude-extras # deprecated in favor of base + - unison-util \ No newline at end of file diff --git a/codebase2/core/unison-core.cabal b/codebase2/core/unison-core.cabal index 3a4e1dfd5b..16b15c4a8e 100644 --- a/codebase2/core/unison-core.cabal +++ b/codebase2/core/unison-core.cabal @@ -1,35 +1,34 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ +cabal-version: 1.12 -name: unison-core -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development +-- This file has been generated from package.yaml by hpack version 0.33.0. +-- +-- see: https://github.com/sol/hpack +-- +-- hash: 40c5460ecd0a04d3f1012c9848d442e889805f339ee3f9a20d46932e843d116d + +name: unison-core +version: 0.0.0 +homepage: https://github.com/unisonweb/unison#readme +bug-reports: https://github.com/unisonweb/unison/issues +build-type: Simple + +source-repository head + type: git + location: https://github.com/unisonweb/unison library exposed-modules: - U.Core.ABT - U.Core.ABT.Var - -- U.Core.Reference - -- U.Core.Referent - -- U.Core.Term - -- U.Core2.Referent.Short - -- other-modules: - -- other-extensions: + U.Core.ABT + U.Core.ABT.Var + other-modules: + Paths_unison_core + hs-source-dirs: + ./. build-depends: - base, - containers, - text, - vector, - -- prelude-extras is deprecated in favor of base - prelude-extras, - unison-util - hs-source-dirs: . - default-language: Haskell2010 + base + , containers + , prelude-extras + , text + , unison-util + , vector + default-language: Haskell2010 diff --git a/codebase2/util-term/package.yaml b/codebase2/util-term/package.yaml new file mode 100644 index 0000000000..95dc2a5108 --- /dev/null +++ b/codebase2/util-term/package.yaml @@ -0,0 +1,12 @@ +name: unison-util-term +github: unisonweb/unison + +library: + source-dirs: . + +dependencies: + - base + - containers + - mtl + - unison-core + - unison-codebase \ No newline at end of file diff --git a/codebase2/util-term/unison-util-term.cabal b/codebase2/util-term/unison-util-term.cabal index c56407e009..39fd34bf50 100644 --- a/codebase2/util-term/unison-util-term.cabal +++ b/codebase2/util-term/unison-util-term.cabal @@ -1,29 +1,33 @@ -cabal-version: 2.2 --- Initial package description 'unison-codebase2-core.cabal' generated by --- 'cabal init'. For further documentation, see --- http://haskell.org/cabal/users-guide/ +cabal-version: 1.12 -name: unison-util-term -version: 0.1.0.0 --- synopsis: --- description: -homepage: https://github.com/unisonweb/unison --- bug-reports: -license: MIT -copyright: Unison Computing, PBC -category: Development +-- This file has been generated from package.yaml by hpack version 0.33.0. +-- +-- see: https://github.com/sol/hpack +-- +-- hash: 4ffe80ec3d93d2c069c06e4713c6a630d11e01d99b29ce073a6ae0b79fffaa5e + +name: unison-util-term +version: 0.0.0 +homepage: https://github.com/unisonweb/unison#readme +bug-reports: https://github.com/unisonweb/unison/issues +build-type: Simple + +source-repository head + type: git + location: https://github.com/unisonweb/unison library exposed-modules: - U.Util.Term - U.Util.Type - -- other-modules: - -- other-extensions: + U.Util.Term + U.Util.Type + other-modules: + Paths_unison_util_term + hs-source-dirs: + ./. build-depends: - base, - containers, - mtl, - unison-core, - unison-codebase - hs-source-dirs: . - default-language: Haskell2010 + base + , containers + , mtl + , unison-codebase + , unison-core + default-language: Haskell2010 diff --git a/codebase2/util/package.yaml b/codebase2/util/package.yaml index ea0e738398..c1e1373dc1 100644 --- a/codebase2/util/package.yaml +++ b/codebase2/util/package.yaml @@ -4,9 +4,15 @@ github: unisonweb/unison library: source-dirs: . -defaults: - local: ../../defaults.yaml - dependencies: + - base + - bytestring + - containers - cryptonite + - extra + - lens + - memory + - safe - sandi + - text + - unliftio diff --git a/defaults.yaml b/defaults.yaml deleted file mode 100644 index b965e931fb..0000000000 --- a/defaults.yaml +++ /dev/null @@ -1,10 +0,0 @@ -dependencies: - - base - - bytestring - - containers - - extra - - lens - - memory - - text - - unliftio - - safe From 0b57929fd19d10a2fb862d64c68434c06dc24d5f Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 28 Apr 2021 00:09:14 -0600 Subject: [PATCH 200/225] delete and clean up stuff --- .../U/Codebase/Sqlite/Operations.hs | 40 +--- .../U/Codebase/Sqlite/Serialization.hs | 84 +-------- .../U/Codebase/Sqlite/Sync22.hs | 27 +-- .../U/Codebase/Sqlite/SyncEntity.hs | 43 ----- .../U/Codebase/Sqlite/Types.hs | 8 - codebase2/codebase-sqlite/package.yaml | 2 +- .../codebase-sqlite/sql/create-index.sql | 2 +- .../unison-codebase-sqlite.cabal | 4 +- codebase2/codebase/U/Codebase/Codebase.hs | 74 -------- codebase2/codebase/package.yaml | 2 +- codebase2/codebase/unison-codebase.cabal | 3 +- codebase2/core/package.yaml | 2 +- codebase2/notes.txt | 3 - .../U/Util/Serialization.hs | 2 - codebase2/util-term/package.yaml | 2 +- codebase2/util/U/Util/Alternative.hs | 2 +- codebase2/util/U/Util/Set.hs | 2 +- codebase2/util/U/Util/String.hs | 2 +- hie.yaml | 173 +++++++++++++++--- .../src/Unison/Codebase/Causal.hs | 6 +- .../src/Unison/Codebase/Conversion/Sync12.hs | 19 +- .../Unison/Codebase/Conversion/Upgrade12.hs | 2 +- .../src/Unison/Codebase/Editor/HandleInput.hs | 28 --- .../src/Unison/Codebase/Editor/Output.hs | 2 - .../src/Unison/Codebase/SqliteCodebase.hs | 53 ++---- .../src/Unison/CommandLine/OutputMessages.hs | 16 -- parser-typechecker/unison/Main.hs | 13 +- stack.yaml | 2 +- 28 files changed, 207 insertions(+), 411 deletions(-) delete mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/SyncEntity.hs delete mode 100644 codebase2/codebase-sqlite/U/Codebase/Sqlite/Types.hs delete mode 100644 codebase2/codebase/U/Codebase/Codebase.hs delete mode 100644 codebase2/notes.txt diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index afba17a98e..69d2feef1d 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -101,7 +101,6 @@ import qualified U.Codebase.Sqlite.Referent as S import qualified U.Codebase.Sqlite.Referent as S.Referent import qualified U.Codebase.Sqlite.Serialization as S import U.Codebase.Sqlite.Symbol (Symbol) -import qualified U.Codebase.Sqlite.SyncEntity as SE import qualified U.Codebase.Sqlite.Term.Format as S.Term import qualified U.Codebase.Term as C import qualified U.Codebase.Term as C.Term @@ -184,11 +183,6 @@ lookupTextId t = loadTextById :: EDB m => Db.TextId -> m Text loadTextById = liftQ . Q.loadTextById --- | Q: Any Hash that UCM gets ahold of should already exist in the DB? --- because it came from a sync or from a save --- hashToHashId :: EDB m => H.Hash -> m Db.HashId --- hashToHashId = liftQ . Q.expectHashIdByHash - -- | look up an existing object by its primary hash primaryHashToExistingObjectId :: EDB m => H.Hash -> m Db.ObjectId primaryHashToExistingObjectId h = do @@ -214,8 +208,6 @@ primaryHashToMaybeBranchObjectId :: DB m => BranchHash -> m (Maybe Db.BranchObje primaryHashToMaybeBranchObjectId = (fmap . fmap) Db.BranchObjectId . primaryHashToMaybeObjectId . unBranchHash --- (fmap . fmap) Db.BranchObjectId . liftQ . Q.maybeObjectIdPrimaryHashId . unBranchHash - objectExistsForHash :: DB m => H.Hash -> m Bool objectExistsForHash h = isJust <$> runMaybeT do @@ -409,10 +401,11 @@ getCycleLen h = do runMaybeT (primaryHashToExistingObjectId h) >>= maybe (throwError $ LegacyUnknownCycleLen h) pure >>= liftQ . Q.loadObjectById - -- todo: decodeComponentLengthOnly is unintentionally a hack that relies on the - -- fact the two things that have cycles (term and decl components) have the same basic - -- serialized structure: first a format byte that is always 0 for now, followed by - -- a framed array representing the component. :grimace: + -- todo: decodeComponentLengthOnly is unintentionally a hack that relies on + -- the fact the two things that references can refer to (term and decl + -- components) have the same basic serialized structure: first a format + -- byte that is always 0 for now, followed by a framed array representing + -- the strongly-connected component. :grimace: >>= decodeComponentLengthOnly >>= pure . fromIntegral @@ -1360,27 +1353,4 @@ derivedDependencies cid = do cids <- traverse s2cReferenceId sids pure $ Set.fromList cids --- * Sync-related dependency queries - -objectDependencies :: EDB m => Db.ObjectId -> m SE.SyncEntitySeq -objectDependencies oid = do - (ot, bs) <- liftQ $ Q.loadObjectWithTypeById oid - let getOrError = getFromBytesOr (ErrObjectDependencies ot oid) - case ot of - OT.TermComponent -> getOrError S.getComponentSyncEntities bs - OT.DeclComponent -> getOrError S.getComponentSyncEntities bs - OT.Namespace -> getOrError S.getBranchSyncEntities bs - OT.Patch -> getOrError S.getPatchSyncEntities bs - --- branchDependencies :: --- Branch.Hash -> m (Maybe (CausalHash, BD.Dependencies)), --- -- |the "new" terms and types mentioned in a patch - --- patchDependencies :: EditHash -> m (Maybe (Set DefnHash)) --- patchDependencies h = error "todo" - --- getBranchByAnyHash :: --- getBranchByBranchHash :: DB m => BranchHash -> m (Maybe (Branch m)) --- getBranchByCausalHash :: DB m => CausalHash -> m (Maybe (Branch m)) - -- lca :: (forall he e. [Causal m CausalHash he e] -> m (Maybe BranchHash)), diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index d11c708b5f..aab8b48b98 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -34,7 +34,7 @@ import qualified U.Codebase.Sqlite.Branch.Diff as BranchDiff import U.Codebase.Sqlite.Branch.Format (BranchLocalIds) import qualified U.Codebase.Sqlite.Branch.Format as BranchFormat import qualified U.Codebase.Sqlite.Branch.Full as BranchFull -import U.Codebase.Sqlite.DbId (BranchObjectId, ObjectId, PatchObjectId, unBranchObjectId, unPatchObjectId) +import U.Codebase.Sqlite.DbId (ObjectId) import qualified U.Codebase.Sqlite.Decl.Format as DeclFormat import U.Codebase.Sqlite.LocalIds (LocalIds, LocalIds' (..), LocalTextId, WatchLocalIds) import qualified U.Codebase.Sqlite.Patch.Diff as PatchDiff @@ -43,8 +43,7 @@ import qualified U.Codebase.Sqlite.Patch.Format as PatchFormat import qualified U.Codebase.Sqlite.Patch.Full as PatchFull import qualified U.Codebase.Sqlite.Patch.TermEdit as TermEdit import qualified U.Codebase.Sqlite.Patch.TypeEdit as TypeEdit -import U.Codebase.Sqlite.Symbol -import qualified U.Codebase.Sqlite.SyncEntity as SE +import U.Codebase.Sqlite.Symbol (Symbol (..)) import qualified U.Codebase.Sqlite.Term.Format as TermFormat import qualified U.Codebase.Term as Term import qualified U.Codebase.Type as Type @@ -115,26 +114,6 @@ getABT getVar getA getF = getList getVar >>= go [] 3 -> ABT.cycle a <$> go env fvs _ -> unknownTag "getABT" tag -{- -put/get/write/read -- [x][x][ ][ ] term component -- [x][x][ ][ ] types of terms -- [x][x][ ][ ] decl component -- [-][-][ ][ ] causal -- [x][x][ ][ ] BranchFormat -- [x][ ][ ][ ] full branch -- [x][ ][ ][ ] diff branch -- [x][ ][ ][ ] PatchFormat -- [x][ ][ ][ ] full patch -- [x][ ][ ][ ] diff patch -- [ ] O(1) framed array access? -- [ ] tests for framed array access - -- [ ] add to dependents index -- [ ] add to type index -- [ ] add to type mentions index --} - putLocalIds :: (MonadPut m, Integral t, Bits t, Integral d, Bits d) => LocalIds' t d -> m () putLocalIds LocalIds {..} = do putFoldable putVarInt textLookup @@ -659,41 +638,9 @@ getBranchLocalIds = <*> getVector getVarInt <*> getVector (getPair getVarInt getVarInt) -getBranchSyncEntities :: MonadGet m => m SE.SyncEntitySeq -getBranchSyncEntities = - getWord8 >>= \case - -- Full - 0 -> getDeps - -- Diff - 1 -> do - id <- getVarInt @_ @BranchObjectId - SE.addObjectId (unBranchObjectId id) <$> getDeps - x -> unknownTag "getBranchSyncEntities" x - where - getDeps = localIdsToDeps <$> getBranchLocalIds - localIdsToDeps (BranchFormat.LocalIds ts os ps bcs) = - SE.SyncEntity - (vec2seq ts) - ( vec2seq os - <> vec2seq (Vector.map unPatchObjectId ps) - <> vec2seq (Vector.map unBranchObjectId bos) - ) - mempty - (vec2seq chs) - where - (bos, chs) = Vector.unzip bcs - vec2seq :: Vector a -> Seq a vec2seq v = Seq.fromFunction (length v) (v Vector.!) -localIdsToLocalDeps :: LocalIds -> SE.SyncEntitySeq -localIdsToLocalDeps (LocalIds ts os) = - SE.SyncEntity (vec2seq ts) (vec2seq os) mempty mempty - -watchLocalIdsToLocalDeps :: WatchLocalIds -> SE.SyncEntitySeq -watchLocalIdsToLocalDeps (LocalIds ts hs) = - SE.SyncEntity (vec2seq ts) mempty (vec2seq hs) mempty - decomposeComponent :: MonadGet m => m [(LocalIds, BS.ByteString)] decomposeComponent = do offsets <- getList (getVarInt @_ @Int) @@ -739,28 +686,6 @@ recomposeBranchFull li bs = putBranchLocalIds li *> putByteString bs recomposeBranchDiff :: MonadPut m => ObjectId -> BranchLocalIds -> BS.ByteString -> m () recomposeBranchDiff id li bs = putVarInt id *> putBranchLocalIds li *> putByteString bs --- the same implementation currently works for term component and type component -getComponentSyncEntities :: MonadGet m => m SE.SyncEntitySeq -getComponentSyncEntities = - foldMap (localIdsToLocalDeps . fst) <$> decomposeComponent - -getPatchSyncEntities :: MonadGet m => m SE.SyncEntitySeq -getPatchSyncEntities = - getWord8 >>= \case - 0 -> getDeps - 1 -> do - id <- getVarInt @_ @PatchObjectId - SE.addObjectId (unPatchObjectId id) <$> getDeps - x -> unknownTag "getPatchSyncEntities" x - where - getDeps = localIdsToDeps <$> getPatchLocalIds - localIdsToDeps (PatchFormat.LocalIds ts hs os) = - SE.SyncEntity - (vec2seq ts) - (vec2seq os) - (vec2seq hs) - mempty - getSymbol :: MonadGet m => m Symbol getSymbol = Symbol <$> getVarInt <*> getText @@ -928,7 +853,4 @@ getMaybe getA = unknownTag :: (MonadGet m, Show a) => String -> a -> m x unknownTag msg tag = - fail $ - "unknown tag " ++ show tag - ++ " while deserializing: " - ++ msg + fail $ "unknown tag " ++ show tag ++ " while deserializing: " ++ msg diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index b219b960fd..a5dbd3c66d 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -57,9 +57,7 @@ data DbTag = SrcDb | DestDb data DecodeError = ErrTermComponent | ErrDeclComponent - | -- | ErrTermFormat - -- | ErrDeclFormat - ErrBranchFormat + | ErrBranchFormat | ErrPatchFormat | ErrBranchBody Word8 | ErrPatchBody Word8 @@ -113,23 +111,7 @@ trySync :: Generation -> Entity -> m (TrySyncResult Entity) -trySync t h o c _gc e = do - -- traceM $ "trySync " ++ show e ++ "..." - result <- trySync' t h o c _gc e - -- traceM $ "trySync " ++ show e ++ " = " ++ show result - pure result - -trySync' :: - forall m. - (MonadIO m, MonadError Error m, MonadReader Env m) => - Cache m TextId TextId -> - Cache m HashId HashId -> - Cache m ObjectId ObjectId -> - Cache m CausalHashId CausalHashId -> - Generation -> - Entity -> - m (TrySyncResult Entity) -trySync' tCache hCache oCache cCache _gc = \case +trySync tCache hCache oCache cCache _gc = \case -- for causals, we need to get the value_hash_id of the thingo -- - maybe enqueue their parents -- - enqueue the self_ and value_ hashes @@ -438,8 +420,3 @@ runDB conn action = Except.runExceptT (Reader.runReaderT action conn) >>= \case Left e -> throwError (DbIntegrity e) Right a -> pure a - --- syncs coming from git: --- - pull a specified remote causal (Maybe CausalHash) into the local database --- - and then maybe do some stuff --- syncs coming from \ No newline at end of file diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/SyncEntity.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/SyncEntity.hs deleted file mode 100644 index 755f060b28..0000000000 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/SyncEntity.hs +++ /dev/null @@ -1,43 +0,0 @@ -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE TypeSynonymInstances #-} -{-# LANGUAGE RankNTypes #-} - -module U.Codebase.Sqlite.SyncEntity where - -import Data.Sequence (Seq) -import qualified Data.Sequence as Seq -import qualified U.Codebase.Sqlite.DbId as Db - --- | Stuff you'll need to sync -data SyncEntity' f = SyncEntity - { -- | strings that need to be synced - text :: f Db.TextId, - -- | objects that need to be synced - objects :: f Db.ObjectId, - -- | hashes that need to be synced (comparable to weak refs) - hashes :: f Db.HashId, - -- | causals that need to be synced (these are not weak refs). - -- causals are relational instead of objects because we - -- ... wanted to use sqlite for LCA?? - causals :: f Db.CausalHashId - } - -type SyncEntitySeq = SyncEntity' Seq - -addObjectId :: Db.ObjectId -> SyncEntitySeq -> SyncEntitySeq -addObjectId id s = s {objects = id Seq.<| objects s} - -append :: (forall a. f a -> f a -> f a) -> SyncEntity' f -> SyncEntity' f -> SyncEntity' f -append (<>) a b = - SyncEntity - (text a <> text b) - (objects a <> objects b) - (hashes a <> hashes b) - (causals a <> causals b) - -instance Semigroup SyncEntitySeq where - (<>) = append (<>) - -instance Monoid SyncEntitySeq where - mempty = SyncEntity mempty mempty mempty mempty - mappend = (<>) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Types.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Types.hs deleted file mode 100644 index 5cbc509c60..0000000000 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Types.hs +++ /dev/null @@ -1,8 +0,0 @@ -module U.Codebase.Sqlite.Types where - -import U.Codebase.Sqlite.DbId -import U.Codebase.Referent (Referent') -import U.Codebase.Reference (Reference') - -type Reference = Reference' TextId ObjectId -type Referent = Referent' Reference Reference diff --git a/codebase2/codebase-sqlite/package.yaml b/codebase2/codebase-sqlite/package.yaml index f26a17fc5e..fd779fb4c0 100644 --- a/codebase2/codebase-sqlite/package.yaml +++ b/codebase2/codebase-sqlite/package.yaml @@ -24,4 +24,4 @@ dependencies: - unison-core - unison-util - unison-util-serialization - - unison-util-term \ No newline at end of file + - unison-util-term diff --git a/codebase2/codebase-sqlite/sql/create-index.sql b/codebase2/codebase-sqlite/sql/create-index.sql index 86743a0da1..a054e0dadc 100644 --- a/codebase2/codebase-sqlite/sql/create-index.sql +++ b/codebase2/codebase-sqlite/sql/create-index.sql @@ -73,4 +73,4 @@ CREATE INDEX dependents_by_dependency ON dependents_index ( CREATE INDEX dependencies_by_dependent ON dependents_index ( dependent_object_id, dependent_component_index -) \ No newline at end of file +) diff --git a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal index cd34874dbd..e3c6b36c1e 100644 --- a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal +++ b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal @@ -4,7 +4,7 @@ cabal-version: 1.12 -- -- see: https://github.com/sol/hpack -- --- hash: 5ed07b4204b545e00ed90d14cf65b106bfccb97b635adafa701389e440b29e04 +-- hash: e1cd44afa84ce86b33e3573d3ec12f6238df82991290e25f9b13a65d5ad98b04 name: unison-codebase-sqlite version: 0.0.0 @@ -37,9 +37,7 @@ library U.Codebase.Sqlite.Serialization U.Codebase.Sqlite.Symbol U.Codebase.Sqlite.Sync22 - U.Codebase.Sqlite.SyncEntity U.Codebase.Sqlite.Term.Format - U.Codebase.Sqlite.Types other-modules: Paths_unison_codebase_sqlite hs-source-dirs: diff --git a/codebase2/codebase/U/Codebase/Codebase.hs b/codebase2/codebase/U/Codebase/Codebase.hs deleted file mode 100644 index 5cb35dfa96..0000000000 --- a/codebase2/codebase/U/Codebase/Codebase.hs +++ /dev/null @@ -1,74 +0,0 @@ -{-# LANGUAGE RankNTypes #-} - -module U.Codebase.Codebase where - -import Data.Set (Set) -import Data.Text (Text) -import U.Codebase.Branch (Branch, Patch) -import U.Codebase.Causal (Causal) -import U.Codebase.Decl (Decl) -import U.Codebase.HashTags (BranchHash, CausalHash, PatchHash) -import U.Codebase.Reference (Reference) -import qualified U.Codebase.Reference as Reference -import qualified U.Codebase.Referent as Referent -import qualified U.Codebase.Reflog as Reflog -import U.Codebase.ShortHash (ShortBranchHash, ShortHash) -import U.Codebase.Term (Term) -import U.Codebase.Type (TypeT) -import U.Codebase.WatchKind (WatchKind) -import U.Util.Hash (Hash) - -newtype CodebasePath = CodebasePath FilePath - -data SyncMode = SyncShortCircuit | SyncComplete - -data Codebase m v = Codebase - { getTerm :: Reference.Id -> m (Maybe (Term v)), - getTypeOfTerm :: Reference.Id -> m (Maybe (TypeT v)), - getTypeDeclaration :: Reference.Id -> m (Maybe (Decl v)), - putTerm :: Reference.Id -> Term v -> TypeT v -> m (), - putTypeDeclaration :: Reference.Id -> Decl v -> m (), - getPatch :: PatchHash -> m Patch, - putPatch :: PatchHash -> Patch -> m (), - getBranch :: BranchHash -> m (Maybe (Branch m)), - getRootBranch :: m (Either GetRootBranchError (Branch m)), - putRootBranch :: Branch m -> m (), - getBranchForCausal :: CausalHash -> m (Maybe (Branch m)), - -- | Supports syncing from a current or older codebase format - syncFromDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), - -- | Only writes the latest codebase format - syncToDirectory :: CodebasePath -> SyncMode -> Branch m -> m (), - -- | Watch expressions are part of the codebase, the `Reference.Id` is - -- the hash of the source of the watch expression, and the `Term v a` - -- is the evaluated result of the expression, decompiled to a term. - watches :: WatchKind -> m [Reference.Id], - getWatch :: WatchKind -> Reference.Id -> m (Maybe (Term v)), - putWatch :: WatchKind -> Reference.Id -> Term v -> m (), - getReflog :: m [Reflog.Entry], - appendReflog :: Text -> Branch m -> Branch m -> m (), - -- | the nicely-named versions will utilize these, and add builtins to the result set - termsHavingType :: Reference -> m (Set Referent.Id), - termsMentioningType :: Reference -> m (Set Referent.Id), - -- | number of base32 characters needed to distinguish any two hashes in the codebase; - -- we don't have to compute it separately for different namespaces - hashLength :: m Int, - termReferencesByPrefix :: ShortHash -> m (Set Reference.Id), - typeReferencesByPrefix :: ShortHash -> m (Set Reference.Id), - termReferentsByPrefix :: ShortHash -> m (Set Referent.Id), - branchHashesByPrefix :: ShortBranchHash -> m (Set BranchHash), - -- - lca :: (forall he e. [Causal m CausalHash he e] -> m (Maybe BranchHash)), - dependents :: Reference -> m (Maybe (Set Reference.Id)), - termDependencies :: Reference.Id -> m (Maybe (Set Reference.Id)), - declDependencies :: Reference.Id -> m (Maybe (Set Reference.Id)) --, - -- -- |terms, types, patches, and branches - -- branchDependencies :: - -- Branch.Hash -> m (Maybe (CausalHash, BD.Dependencies)), - -- -- |the "new" terms and types mentioned in a patch - -- patchDependencies :: EditHash -> m (Set Reference, Set Reference) - } - -data GetRootBranchError - = NoRootBranch - | CouldntLoadRootBranch Hash - deriving (Show) diff --git a/codebase2/codebase/package.yaml b/codebase2/codebase/package.yaml index 4159de809d..d995f56cb0 100644 --- a/codebase2/codebase/package.yaml +++ b/codebase2/codebase/package.yaml @@ -11,4 +11,4 @@ dependencies: - mtl - text - unison-core - - unison-util \ No newline at end of file + - unison-util diff --git a/codebase2/codebase/unison-codebase.cabal b/codebase2/codebase/unison-codebase.cabal index 3f37375c76..c39fa1ffe8 100644 --- a/codebase2/codebase/unison-codebase.cabal +++ b/codebase2/codebase/unison-codebase.cabal @@ -4,7 +4,7 @@ cabal-version: 1.12 -- -- see: https://github.com/sol/hpack -- --- hash: 6bc4dfe7298f08ffbdf912c194bdaa7b5d82d6239249779de15a536ce062e383 +-- hash: 143e303e5857e697f54b99302f3b1ddf2c44f27d5373dad588243be2034caeb2 name: unison-codebase version: 0.0.0 @@ -20,7 +20,6 @@ library exposed-modules: U.Codebase.Branch U.Codebase.Causal - U.Codebase.Codebase U.Codebase.Decl U.Codebase.HashTags U.Codebase.Kind diff --git a/codebase2/core/package.yaml b/codebase2/core/package.yaml index 8fbd66d244..a58fafa365 100644 --- a/codebase2/core/package.yaml +++ b/codebase2/core/package.yaml @@ -10,4 +10,4 @@ dependencies: - text - vector - prelude-extras # deprecated in favor of base - - unison-util \ No newline at end of file + - unison-util diff --git a/codebase2/notes.txt b/codebase2/notes.txt deleted file mode 100644 index 83d3d50993..0000000000 --- a/codebase2/notes.txt +++ /dev/null @@ -1,3 +0,0 @@ -Decision: - Try to reuse ABT.hashComponent stuff as-is? - Or rejigger it to be more general/lovely/etc.? <-- choosing \ No newline at end of file diff --git a/codebase2/util-serialization/U/Util/Serialization.hs b/codebase2/util-serialization/U/Util/Serialization.hs index 436b8dbec7..7354037556 100644 --- a/codebase2/util-serialization/U/Util/Serialization.hs +++ b/codebase2/util-serialization/U/Util/Serialization.hs @@ -40,8 +40,6 @@ import UnliftIO.Directory (createDirectoryIfMissing, doesFileExist) import Prelude hiding (readFile, writeFile) import Debug.Trace (traceM) --- import qualified U.Util.Monoid as Monoid - type Get a = forall m. MonadGet m => m a type Put a = forall m. MonadPut m => a -> m () diff --git a/codebase2/util-term/package.yaml b/codebase2/util-term/package.yaml index 95dc2a5108..b3ca704347 100644 --- a/codebase2/util-term/package.yaml +++ b/codebase2/util-term/package.yaml @@ -9,4 +9,4 @@ dependencies: - containers - mtl - unison-core - - unison-codebase \ No newline at end of file + - unison-codebase diff --git a/codebase2/util/U/Util/Alternative.hs b/codebase2/util/U/Util/Alternative.hs index c07601667f..a8068d3dbe 100644 --- a/codebase2/util/U/Util/Alternative.hs +++ b/codebase2/util/U/Util/Alternative.hs @@ -5,4 +5,4 @@ module U.Util.Alternative where import Control.Applicative (Alternative (empty)) whenM :: (Monad m, Alternative m) => m Bool -> a -> m a -whenM m a = do b <- m; if b then pure a else empty \ No newline at end of file +whenM m a = do b <- m; if b then pure a else empty diff --git a/codebase2/util/U/Util/Set.hs b/codebase2/util/U/Util/Set.hs index 788d2a2f9f..cefb6ff9e3 100644 --- a/codebase2/util/U/Util/Set.hs +++ b/codebase2/util/U/Util/Set.hs @@ -9,4 +9,4 @@ traverse :: (Applicative f, Ord b) => (a -> f b) -> Set a -> f (Set b) traverse f = fmap Set.fromList . T.traverse f . Set.toList mapMaybe :: Ord b => (a -> Maybe b) -> Set a -> Set b -mapMaybe f = Set.fromList . Maybe.mapMaybe f . Set.toList \ No newline at end of file +mapMaybe f = Set.fromList . Maybe.mapMaybe f . Set.toList diff --git a/codebase2/util/U/Util/String.hs b/codebase2/util/U/Util/String.hs index 81ccf74066..887c006975 100644 --- a/codebase2/util/U/Util/String.hs +++ b/codebase2/util/U/Util/String.hs @@ -4,4 +4,4 @@ import qualified Data.Text as Text import qualified U.Util.Text as Text stripMargin :: String -> String -stripMargin = Text.unpack . Text.stripMargin . Text.pack \ No newline at end of file +stripMargin = Text.unpack . Text.stripMargin . Text.pack diff --git a/hie.yaml b/hie.yaml index 8b2f3418eb..13aea48e7d 100644 --- a/hie.yaml +++ b/hie.yaml @@ -1,57 +1,174 @@ cradle: stack: - - path: "codebase1/codebase/." - component: "unison-codebase1:lib" - - - path: "codebase2/codebase/." + - path: "codebase2/codebase/./." component: "unison-codebase:lib" - - path: "codebase2/codebase-sqlite/." + - path: "codebase2/codebase-sqlite/./." component: "unison-codebase-sqlite:lib" - - path: "codebase2/codebase-sync/." + - path: "codebase2/codebase-sync/./." component: "unison-codebase-sync:lib" - - path: "codebase2/core/." + - path: "codebase2/core/./." component: "unison-core:lib" - - path: "codebase2/editor/." - component: "unison-editor:lib" - - - path: "codebase2/language/." - component: "unison-language:lib" - - - path: "codebase2/runtime/." - component: "unison-runtime:lib" - - - path: "codebase2/syntax/." - component: "unison-syntax:lib" - - - path: "codebase2/util/." + - path: "codebase2/util/./." component: "unison-util:lib" - - path: "codebase2/util-serialization/." + - path: "codebase2/util-serialization/./." component: "unison-util-serialization:lib" - - path: "codebase2/util-term/." + - path: "codebase2/util-term/./." component: "unison-util-term:lib" - path: "parser-typechecker/src" component: "unison-parser-typechecker:lib" - - path: "parser-typechecker/unison/." - component: "unison-parser-typechecker:exe:unison" - - path: "parser-typechecker/prettyprintdemo/Main.hs" component: "unison-parser-typechecker:exe:prettyprintdemo" - - path: "parser-typechecker/tests/." + - path: "parser-typechecker/prettyprintdemo/Paths_unison_parser_typechecker.hs" + component: "unison-parser-typechecker:exe:prettyprintdemo" + + - path: "parser-typechecker/tests/Suite.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Core/Test/Name.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/ABT.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/ANF.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Cache.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Codebase.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Codebase/Causal.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Codebase/FileCodebase.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Codebase/Path.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/ColorText.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Common.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/DataDeclaration.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/FileParser.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Git.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/GitSimple.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/IO.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Lexer.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/MCode.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Range.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Referent.hs" component: "unison-parser-typechecker:exe:tests" - - path: "parser-typechecker/transcripts/." + - path: "parser-typechecker/tests/Unison/Test/Term.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/TermParser.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/TermPrinter.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Type.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Typechecker.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Typechecker/Components.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Typechecker/Context.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Typechecker/TypeError.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/TypePrinter.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Ucm.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/UnisonSources.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/UriParser.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Util/Bytes.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Util/PinBoard.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Util/Pretty.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/Var.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Unison/Test/VersionParser.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/tests/Paths_unison_parser_typechecker.hs" + component: "unison-parser-typechecker:exe:tests" + + - path: "parser-typechecker/transcripts/Transcripts.hs" component: "unison-parser-typechecker:exe:transcripts" - - path: "parser-typechecker/benchmarks/runtime/." + - path: "parser-typechecker/transcripts/Paths_unison_parser_typechecker.hs" + component: "unison-parser-typechecker:exe:transcripts" + + - path: "parser-typechecker/unison/Main.hs" + component: "unison-parser-typechecker:exe:unison" + + - path: "parser-typechecker/unison/System/Path.hs" + component: "unison-parser-typechecker:exe:unison" + + - path: "parser-typechecker/unison/Version.hs" + component: "unison-parser-typechecker:exe:unison" + + - path: "parser-typechecker/unison/Paths_unison_parser_typechecker.hs" + component: "unison-parser-typechecker:exe:unison" + + - path: "parser-typechecker/benchmarks/runtime/Main.hs" + component: "unison-parser-typechecker:bench:runtime" + + - path: "parser-typechecker/benchmarks/runtime/Paths_unison_parser_typechecker.hs" component: "unison-parser-typechecker:bench:runtime" - path: "unison-core/src" diff --git a/parser-typechecker/src/Unison/Codebase/Causal.hs b/parser-typechecker/src/Unison/Codebase/Causal.hs index bdd3dd7d76..db017e760a 100644 --- a/parser-typechecker/src/Unison/Codebase/Causal.hs +++ b/parser-typechecker/src/Unison/Codebase/Causal.hs @@ -15,11 +15,7 @@ import qualified U.Util.Cache as Cache import Unison.Hash (Hash) import Unison.Hashable (Hashable) import qualified Unison.Hashable as Hashable -import Prelude hiding - ( head, - read, - tail, - ) +import Prelude hiding (head, read, tail) {- `Causal a` has 5 operations, specified algebraically here: diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 3289c73281..5df439284f 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -138,6 +138,14 @@ sync12 t = do gc <- runDest' Q.getNurseryGeneration pure $ Sync (trySync t (succ gc)) +-- For each entity, we have to check to see +-- a) if it exists (if not, mark as missing in Status) +-- b) if any of its dependencies have not yet been synced +-- (if so, note as validation) +-- c) if any of its dependencies are missing from the source codebase +-- (if so, then filter them if possible, otherwise give this entity an +-- error Status) + trySync :: forall m n a. (R m n a, S m n, Applicative m) => @@ -486,13 +494,6 @@ runDest' ma = Reader.reader destConnection >>= flip runDB ma runDB :: Connection -> ReaderT Connection m a -> m a runDB conn action = Reader.runReaderT action conn --- each entity has to check to see --- a) if it exists (if not, mark as missing in Status) --- b) if any of its dependencies have not yet been synced --- (if so, note as validation) --- c) if any of its dependencies are missing from the source codebase --- (if so, then filter them if possible, otherwise give this entity an error Status) - data DoneCount = DoneCount { _doneBranches :: Int, _doneTerms :: Int, @@ -570,7 +571,7 @@ simpleProgress = Sync.Progress need done error allDone printProgress = do (DoneCount b t d p, ErrorCount b' t' d' p', _) <- State.get let ways :: [Maybe String] = - [ Monoid.whenM (b > 0 || b' > 0) (Just $ show b ++ " branches" ++ Monoid.whenM (b' > 0) (" (+" ++ show b' ++ " repaired)")), + [ Monoid.whenM (b > 0 || b' > 0) (Just $ show (b + b') ++ " branches" ++ Monoid.whenM (b' > 0) (" (" ++ show b' ++ " repaired)")), Monoid.whenM (t > 0 || t' > 0) (Just $ show t ++ " terms" ++ Monoid.whenM (t' > 0) (" (" ++ show t' ++ " errors)")), Monoid.whenM (d > 0 || d' > 0) (Just $ show d ++ " types" ++ Monoid.whenM (d' > 0) (" (" ++ show d' ++ " errors)")), Monoid.whenM (p > 0 || p' > 0) (Just $ show p ++ " patches" ++ Monoid.whenM (p' > 0) (" (" ++ show p' ++ " errors)")) @@ -624,4 +625,4 @@ instance Ord (BranchStatus m) where x `compare` y = toBranchStatus' x `compare` toBranchStatus' y instance Show (BranchStatus m) where - show = show . toBranchStatus' \ No newline at end of file + show = show . toBranchStatus' diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs index 08eef04da2..542ecd1775 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs @@ -56,4 +56,4 @@ upgradeCodebase root = do lensStateT :: forall m s1 s2 a. Monad m => Lens' s2 s1 -> StateT s1 m a -> StateT s2 m a lensStateT l m = StateT \s2 -> do (a, s1') <- runStateT m (s2 ^. l) - pure (a, s2 & l .~ s1') \ No newline at end of file + pure (a, s2 & l .~ s1') diff --git a/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs b/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs index a50abb24d6..7057be2ca3 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs @@ -1813,34 +1813,6 @@ loop = do prettyDefn renderR (r, (Foldable.toList -> names, Foldable.toList -> links)) = P.lines (P.shown <$> if null names then [NameSegment ""] else names) <> P.newline <> prettyLinks renderR r links void . eval . Eval . flip State.execStateT mempty $ goCausal [getCausal root'] - -- DebugDumpNamespacesI -> do - -- let seen h = State.gets (Map.member h) - -- set h d = State.modify (Map.insert h d) - -- getCausal b = (Branch.headHash b, pure $ Branch._history b) - -- goCausal :: forall m. Monad m => [(Branch.Hash, m (Branch.UnwrappedBranch m))] -> StateT (Map Branch.Hash Output.DN.DumpNamespace) m () - -- goCausal [] = pure () - -- goCausal ((h, mc) : queue) = do - -- ifM (seen h) (goCausal queue) do - -- traceShowM =<< State.gets Map.size - -- lift mc >>= \case - -- Causal.One h b -> goBranch h b mempty queue - -- Causal.Cons h b tail -> goBranch h b [fst tail] (tail : queue) - -- Causal.Merge h b (Map.toList -> tails) -> goBranch h b (map fst tails) (tails ++ queue) - -- goBranch :: forall m. Monad m => Branch.Hash -> Branch0 m -> [Branch.Hash] -> [(Branch.Hash, m (Branch.UnwrappedBranch m))] -> StateT (Map Branch.Hash Output.DN.DumpNamespace) m () - -- goBranch h b (Set.fromList -> causalParents) queue = case b of - -- Branch0 terms0 types0 children0 patches0 _ _ _ _ _ _ -> let - -- wrangleMetadata :: (Ord r, Ord n) => Metadata.Star r n -> r -> (r, (Set n, Set Metadata.Value)) - -- wrangleMetadata s r = - -- (r, (R.lookupDom r $ Star3.d1 s, Set.map snd . R.lookupDom r $ Star3.d3 s)) - -- terms = Map.fromList . map (wrangleMetadata terms0) . Foldable.toList $ Star3.fact terms0 - -- types = Map.fromList . map (wrangleMetadata types0) . Foldable.toList $ Star3.fact types0 - -- patches = fmap fst patches0 - -- children = fmap Branch.headHash children0 - -- in do - -- set h $ Output.DN.DumpNamespace terms types patches children causalParents - -- goCausal (map getCausal (Foldable.toList children0) ++ queue) - -- m <- eval . Eval . flip State.execStateT mempty $ goCausal [getCausal root'] - -- eval . Notify $ Output.DumpNamespace m DeprecateTermI {} -> notImplemented DeprecateTypeI {} -> notImplemented RemoveTermReplacementI from patchPath -> diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Output.hs b/parser-typechecker/src/Unison/Codebase/Editor/Output.hs index 341059fa8a..ac53ed6b64 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/Output.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/Output.hs @@ -206,7 +206,6 @@ data Output v | DumpNumberedArgs NumberedArgs | DumpBitBooster Branch.Hash (Map Branch.Hash [Branch.Hash]) | DumpUnisonFileHashes Int [(Name, Reference.Id)] [(Name, Reference.Id)] [(Name, Reference.Id)] - | DumpNamespace (Map Branch.Hash DumpNamespace) | BadName String | DefaultMetadataNotification | BadRootBranch GetRootBranchError @@ -327,7 +326,6 @@ isFailure o = case o of ListDependents{} -> False TermMissingType{} -> True DumpUnisonFileHashes _ x y z -> x == mempty && y == mempty && z == mempty - DumpNamespace _ -> False isNumberedFailure :: NumberedOutput v -> Bool isNumberedFailure = \case diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index fccccba0b6..3c5246ea09 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -10,7 +10,7 @@ module Unison.Codebase.SqliteCodebase (Unison.Codebase.SqliteCodebase.init, unsa import qualified Control.Concurrent import qualified Control.Exception -import Control.Monad (filterM, when, (>=>), unless, forever) +import Control.Monad (filterM, when, (>=>), unless) import Control.Monad.Except (ExceptT, MonadError (throwError), runExceptT) import qualified Control.Monad.Except as Except import Control.Monad.Extra (ifM, unlessM, (||^)) @@ -95,9 +95,6 @@ import Unison.Util.Timing (time) import UnliftIO (MonadIO, catchIO, liftIO) import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, doesDirectoryExist, doesFileExist) import UnliftIO.STM -import qualified Unison.Util.TQueue as TQueue -import qualified Unison.Codebase.Watch as Watch -import UnliftIO.Concurrent (forkIO, killThread) import qualified System.Console.ANSI as ANSI debug, debugProcessBranches :: Bool @@ -190,8 +187,6 @@ codebaseExists root = liftIO do ) (const $ pure False) --- and <$> traverse doesDirectoryExist (minimalCodebaseStructure root) - -- 1) buffer up the component -- 2) in the event that the component is complete, then what? -- * can write component provided all of its dependency components are complete. @@ -486,7 +481,7 @@ sqliteCodebase root = do atomically $ modifyTVar rootBranchCache (fmap . second $ const branch1) rootBranchUpdates :: MonadIO m => TVar (Maybe (Q.DataVersion, a)) -> m (IO (), IO (Set Branch.Hash)) - rootBranchUpdates rootBranchCache = do + rootBranchUpdates _rootBranchCache = do -- branchHeadChanges <- TQueue.newIO -- (cancelWatch, watcher) <- Watch.watchDirectory' (v2dir root) -- watcher1 <- @@ -742,11 +737,10 @@ sqliteCodebase root = do when debugProcessBranches $ traceM $ "processBranches B " ++ take 10 (show h) ifM @(ExceptT Sync22.Error m) (lift $ Codebase1.branchExists dest h) - ( do + do when debugProcessBranches $ traceM " already exists in dest db" processBranches sync progress src dest rest - ) - ( do + do when debugProcessBranches $ traceM " doesn't exist in dest db" let h2 = CausalHash . Cv.hash1to2 $ Causal.unRawHash h lift (flip runReaderT srcConn (Q.loadCausalHashIdByCausalHash h2)) >>= \case @@ -764,7 +758,6 @@ sqliteCodebase root = do let bs = map (uncurry B) branchDeps os = map O (es <> ts <> ds) in processBranches @m sync progress src dest (os ++ bs ++ B h mb : rest) - ) processBranches sync progress src dest (O h : rest) = do when debugProcessBranches $ traceM $ "processBranches O " ++ take 10 (show h) (runExceptT $ flip runReaderT srcConn (Q.expectHashIdByHash (Cv.hash1to2 h) >>= Q.expectObjectIdForAnyHashId)) >>= \case @@ -778,24 +771,19 @@ sqliteCodebase root = do se $ processBranches sync progress' src dest [B bHash (pure b)] pure $ Validation.valueOr (error . show) result - -- Do we want to include causal hashes here or just namespace hashes? - -- Could we expose just one or the other of them to the user? - -- Git uses commit hashes and tree hashes (analogous to causal hashes - -- and namespace hashes, respectively), but the user is presented - -- primarily with commit hashes. - -- Arya leaning towards doing the same for Unison. - - -- let finalizer :: MonadIO m => m () - -- finalizer = do - -- decls <- readTVarIO declBuffer - -- terms <- readTVarIO termBuffer - -- let printBuffer header b = - -- liftIO - -- if b /= mempty - -- then putStrLn header >> putStrLn "" >> print b - -- else pure () - -- printBuffer "Decls:" decls - -- printBuffer "Terms:" terms + -- we don't currently have any good opportunity to call this sanity check; + -- at ucm shutdown + let _finalizer :: MonadIO m => m () + _finalizer = do + decls <- readTVarIO declBuffer + terms <- readTVarIO termBuffer + let printBuffer header b = + liftIO + if b /= mempty + then putStrLn header >> putStrLn "" >> print b + else pure () + printBuffer "Decls:" decls + printBuffer "Terms:" terms pure . Right $ ( Codebase1.Codebase @@ -933,7 +921,7 @@ viewRemoteBranch' (repo, sbh, path) = runExceptT do remotePath <- time "Git fetch" $ pullBranch repo ifM (codebaseExists remotePath) - ( do + do codebase <- lift (sqliteCodebase remotePath) >>= Validation.valueOr (\_missingSchema -> throwError $ GitError.CouldntOpenCodebase repo remotePath) . fmap pure @@ -959,7 +947,6 @@ viewRemoteBranch' (repo, sbh, path) = runExceptT do Nothing -> throwError $ GitError.NoRemoteNamespaceWithHash repo sbh _ -> throwError $ GitError.RemoteNamespaceHashAmbiguous repo sbh branchCompletions pure (Branch.getAt' path branch, remotePath) - ) -- else there's no initialized codebase at this repo; we pretend there's an empty one. (pure (Branch.empty, remotePath)) @@ -976,9 +963,7 @@ pushGitRootBranch syncToDirectory branch repo syncMode = runExceptT do -- Pull the remote repo into a staging directory (remoteRoot, remotePath) <- Except.ExceptT $ viewRemoteBranch' (repo, Nothing, Path.empty) ifM - ( pure (remoteRoot == Branch.empty) - ||^ lift (remoteRoot `Branch.before` branch) - ) + (pure (remoteRoot == Branch.empty) ||^ lift (remoteRoot `Branch.before` branch)) -- ours is newer 👍, meaning this is a fast-forward push, -- so sync branch to staging area (stageAndPush remotePath) diff --git a/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs b/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs index 88a64abd1b..35ed685156 100644 --- a/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs +++ b/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs @@ -1036,22 +1036,6 @@ notifyUser dir o = case o of "", "Paste that output into http://bit-booster.com/graph.html" ] - -- DumpNamespace m -> pure $ P.shown m - DumpNamespace m -> let - prettyDump (h, DN.DumpNamespace terms types patches children causalParents) = - P.lit "Namespace " <> P.shown h <> P.newline <> (P.indentN 2 $ P.linesNonEmpty [ - Monoid.unlessM (null causalParents) $ P.lit "Causal Parents:" <> P.newline <> P.indentN 2 (P.lines (map P.shown $ Set.toList causalParents)) - , Monoid.unlessM (null terms) $ P.lit "Terms:" <> P.newline <> P.indentN 2 (P.lines (map (prettyDefn Referent.toText) $ Map.toList terms)) - , Monoid.unlessM (null types) $ P.lit "Types:" <> P.newline <> P.indentN 2 (P.lines (map (prettyDefn Reference.toText) $ Map.toList types)) - , Monoid.unlessM (null patches) $ P.lit "Patches:" <> P.newline <> P.indentN 2 (P.column2 (map (bimap P.shown P.shown) $ Map.toList patches)) - , Monoid.unlessM (null children) $ P.lit "Children:" <> P.newline <> P.indentN 2 (P.column2 (map (bimap P.shown P.shown) $ Map.toList children)) - ]) - where - prettyLinks renderR r [] = P.indentN 2 $ P.text (renderR r) - prettyLinks renderR r links = P.indentN 2 (P.lines (P.text (renderR r) : (links <&> \r -> "+ " <> P.text (Reference.toText r)))) - prettyDefn renderR (r, (Foldable.toList -> names, Foldable.toList -> links)) = - P.lines (P.shown <$> if null names then [NameSegment ""] else names) <> P.newline <> prettyLinks renderR r links - in pure $ P.lines (map prettyDump $ Map.toList m) ListDependents hqLength ld names missing -> pure $ if names == mempty && missing == mempty then c (prettyLabeledDependency hqLength ld) <> " doesn't have any dependents." diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index 0428eb1ee8..364b5ddd31 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -210,10 +210,17 @@ main = do upgradeCodebase :: Maybe Codebase.CodebasePath -> IO () upgradeCodebase mcodepath = Codebase.getCodebaseDir mcodepath >>= \root -> do - putStrLn $ "I'm upgrading the codebase in '" ++ root ++ "', but it will take a while." + PT.putPrettyLn $ + "I'm upgrading the codebase in '" <> P.string root <> "', but it will take a while." Upgrade12.upgradeCodebase root - putStrLn $ "\nTry it out and once you're satisfied, you may delete the old version from\n\n\t'" - ++ Codebase.codebasePath (FC.init @IO) root ++ "';\n\nbut there's no rush." + PT.putPrettyLn + $ P.newline + <> "Try it out and once you're satisfied, you can safely(?) delete the old version from" + <> P.newline + <> P.indentN 2 (P.string $ Codebase.codebasePath (FC.init @IO) root) + <> P.newline + <> "but there's no rush. You can access the old codebase again by passing the" + <> P.backticked "--old-codebase" <> "flag at startup." prepareTranscriptDir :: Codebase.Init IO Symbol Ann -> Bool -> Maybe FilePath -> IO FilePath prepareTranscriptDir cbInit inFork mcodepath = do diff --git a/stack.yaml b/stack.yaml index dcac75c6be..e69f827a92 100644 --- a/stack.yaml +++ b/stack.yaml @@ -58,7 +58,7 @@ extra-deps: ghc-options: # All packages - "$locals": -Wall -Wno-name-shadowing -Wno-type-defaults -Wno-missing-pattern-synonym-signatures -fprint-expanded-synonyms #-freverse-errors + "$locals": -Wall -Werror -Wno-name-shadowing -Wno-type-defaults -Wno-missing-pattern-synonym-signatures -fprint-expanded-synonyms -fwrite-ide-info #-freverse-errors # See https://github.com/haskell/haskell-language-server/issues/208 "$everything": -haddock From 380a44d82f4b823615ea8ea2d08a2374a6e2c66a Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 28 Apr 2021 01:27:06 -0600 Subject: [PATCH 201/225] fix warnings --- codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs | 2 -- parser-typechecker/src/Unison/Codebase/Editor/Output.hs | 1 - parser-typechecker/src/Unison/CommandLine/OutputMessages.hs | 5 ++--- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index c38c0eb80b..098fd8f7bd 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -55,8 +55,6 @@ import qualified U.Util.Hash as Hash import UnliftIO (MonadUnliftIO, throwIO, try, tryAny, withRunInIO) import UnliftIO.Concurrent (myThreadId) import qualified Control.Monad.Writer as Writer -import Data.Functor (void) -import qualified Data.Text as Text -- * types diff --git a/parser-typechecker/src/Unison/Codebase/Editor/Output.hs b/parser-typechecker/src/Unison/Codebase/Editor/Output.hs index ac53ed6b64..a338d70e7f 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/Output.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/Output.hs @@ -56,7 +56,6 @@ import Unison.ShortHash (ShortHash) import Unison.Codebase.ShortBranchHash (ShortBranchHash) import Unison.Codebase.Editor.RemoteRepo import Unison.Codebase.Editor.Output.BranchDiff (BranchDiffOutput) -import Unison.Codebase.Editor.Output.DumpNamespace (DumpNamespace) import Unison.LabeledDependency (LabeledDependency) type ListDetailed = Bool diff --git a/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs b/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs index 35ed685156..7ae1ecf929 100644 --- a/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs +++ b/parser-typechecker/src/Unison/CommandLine/OutputMessages.hs @@ -23,7 +23,6 @@ import Unison.Server.Backend (ShallowListEntry(..), TermEntry(..), TypeEntry(..) import Control.Lens import qualified Control.Monad.State.Strict as State import Data.Bifunctor (first, second) -import qualified Data.Foldable as Foldable import Data.List (sort, stripPrefix) import Data.List.Extra (nubOrdOn, nubOrd, notNull) import qualified Data.Map as Map @@ -63,7 +62,6 @@ import qualified Unison.HashQualified as HQ import qualified Unison.HashQualified' as HQ' import Unison.Name (Name) import qualified Unison.Name as Name -import Unison.NameSegment (NameSegment(NameSegment)) import Unison.NamePrinter (prettyHashQualified, prettyReference, prettyReferent, prettyLabeledDependency, @@ -103,7 +101,6 @@ import Unison.Var (Var) import qualified Unison.Var as Var import qualified Unison.Codebase.Editor.SlurpResult as SlurpResult import Unison.Codebase.Editor.DisplayObject (DisplayObject(MissingObject, BuiltinObject, UserObject)) -import qualified Unison.Codebase.Editor.Output.DumpNamespace as DN import qualified Unison.Codebase.Editor.Input as Input import qualified Unison.Hash as Hash import qualified Unison.Codebase.Causal as Causal @@ -671,6 +668,8 @@ notifyUser dir o = case o of CouldntParseRootBranch repo s -> P.wrap $ "I couldn't parse the string" <> P.red (P.string s) <> "into a namespace hash, when opening the repository at" <> P.group (prettyRepoBranch repo <> ".") + CouldntLoadSyncedBranch h -> P.wrap $ "I just finished importing the branch" + <> P.red (P.shown h) <> "but now I can't find it." NoGit -> P.wrap $ "I couldn't find git. Make sure it's installed and on your path." CloneException repo msg -> P.wrap $ From bfe4e8175e2a78490e1eaa65ccf8a25d58fdb6dd Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 28 Apr 2021 03:50:04 -0600 Subject: [PATCH 202/225] deleting unused stuff --- .../U/Codebase/Sqlite/Operations.hs | 20 ----- .../U/Codebase/Sqlite/Patch/Diff.hs | 8 -- .../U/Codebase/Sqlite/Queries.hs | 3 - .../Conversion/Sync12BranchDependencies.hs | 88 ------------------- .../src/Unison/Codebase/Editor/HandleInput.hs | 3 - .../unison-parser-typechecker.cabal | 3 +- 6 files changed, 1 insertion(+), 124 deletions(-) delete mode 100644 parser-typechecker/src/Unison/Codebase/Conversion/Sync12BranchDependencies.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index 69d2feef1d..ad0d493029 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -196,18 +196,10 @@ primaryHashToMaybeObjectId h = do Just hashId -> Q.maybeObjectIdForPrimaryHashId hashId Nothing -> pure Nothing -primaryHashToExistingPatchObjectId :: EDB m => PatchHash -> m Db.PatchObjectId -primaryHashToExistingPatchObjectId = - fmap Db.PatchObjectId . primaryHashToExistingObjectId . unPatchHash - primaryHashToMaybePatchObjectId :: EDB m => PatchHash -> m (Maybe Db.PatchObjectId) primaryHashToMaybePatchObjectId = (fmap . fmap) Db.PatchObjectId . primaryHashToMaybeObjectId . unPatchHash -primaryHashToMaybeBranchObjectId :: DB m => BranchHash -> m (Maybe Db.BranchObjectId) -primaryHashToMaybeBranchObjectId = - (fmap . fmap) Db.BranchObjectId . primaryHashToMaybeObjectId . unBranchHash - objectExistsForHash :: DB m => H.Hash -> m Bool objectExistsForHash h = isJust <$> runMaybeT do @@ -1006,18 +998,6 @@ saveBranch (C.Causal hc he parents me) = do (Vector.fromList (Foldable.toList branchCausalIds)) pure (ids, lBranch) -lookupBranchLocalText :: S.BranchLocalIds -> LocalTextId -> Db.TextId -lookupBranchLocalText li (LocalTextId w) = S.BranchFormat.branchTextLookup li Vector.! fromIntegral w - -lookupBranchLocalDefn :: S.BranchLocalIds -> LocalDefnId -> Db.ObjectId -lookupBranchLocalDefn li (LocalDefnId w) = S.BranchFormat.branchDefnLookup li Vector.! fromIntegral w - -lookupBranchLocalPatch :: BranchLocalIds -> LocalPatchObjectId -> Db.PatchObjectId -lookupBranchLocalPatch li (LocalPatchObjectId w) = S.BranchFormat.branchPatchLookup li Vector.! fromIntegral w - -lookupBranchLocalChild :: BranchLocalIds -> LocalBranchChildId -> (Db.BranchObjectId, Db.CausalHashId) -lookupBranchLocalChild li (LocalBranchChildId w) = S.BranchFormat.branchChildLookup li Vector.! fromIntegral w - loadRootCausal :: EDB m => m (C.Branch.Causal m) loadRootCausal = liftQ Q.loadNamespaceRoot >>= loadCausalByCausalHashId diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs index 613f70a5a4..077a18c8de 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs @@ -30,14 +30,6 @@ data PatchDiff' t h d = PatchDiff } deriving (Eq, Ord, Show) --- | the number of dbids in the patch, an approximation to disk size -idcount :: PatchDiff' t h d -> Int -idcount (PatchDiff atm atp rtm rtp) = - go atm + go atp + go rtm + go rtp - where - go :: Foldable f => f (Set a) -> Int - go fsa = (Monoid.getSum . foldMap (Monoid.Sum . Set.size)) fsa + length fsa - trimap :: (Ord t', Ord h', Ord d') => (t -> t') -> diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 098fd8f7bd..54a65f0379 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -375,9 +375,6 @@ loadOldCausalValueHash id = WHERE old_hash_id = ? |] -saveCausalParent :: DB m => CausalHashId -> CausalHashId -> m () -saveCausalParent child parent = saveCausalParents child [parent] - saveCausalParents :: DB m => CausalHashId -> [CausalHashId] -> m () saveCausalParents child parents = executeMany sql $ (child,) <$> parents where sql = [here| diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12BranchDependencies.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12BranchDependencies.hs deleted file mode 100644 index 6e1a3acb1c..0000000000 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12BranchDependencies.hs +++ /dev/null @@ -1,88 +0,0 @@ -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE DerivingVia #-} -{-# LANGUAGE PatternSynonyms #-} -{-# LANGUAGE RecordWildCards #-} - -module Unison.Codebase.Conversion.Sync12BranchDependencies where - -import Data.Foldable (toList) -import Data.Map (Map) -import qualified Data.Map as Map -import Data.Monoid.Generic (GenericMonoid (..), GenericSemigroup (..)) -import Data.Set (Set) -import qualified Data.Set as Set -import GHC.Generics (Generic) -import Unison.Codebase.Branch (Branch (Branch), Branch0, EditHash) -import qualified Unison.Codebase.Branch as Branch -import qualified Unison.Codebase.Causal as Causal -import Unison.Codebase.Patch (Patch) -import Unison.NameSegment (NameSegment) -import Unison.Reference (Reference, pattern Derived) -import Unison.Referent (Referent) -import qualified Unison.Referent as Referent -import qualified Unison.Util.Relation as R -import qualified Unison.Util.Star3 as Star3 -import Unison.Hash (Hash) -import qualified Unison.Reference as Reference - -type Branches m = [(Branch.Hash, m (Branch m))] - -data Dependencies = Dependencies - { patches :: Set EditHash - , terms :: Map Hash Reference.Size - , decls :: Map Hash Reference.Size - } - deriving Show - deriving Generic - deriving Semigroup via GenericSemigroup Dependencies - deriving Monoid via GenericMonoid Dependencies - -data Dependencies' = Dependencies' - { patches' :: [EditHash] - , terms' :: [(Hash, Reference.Size)] - , decls' :: [(Hash, Reference.Size)] - } - deriving (Eq, Show) - deriving Generic - deriving Semigroup via GenericSemigroup Dependencies' - deriving Monoid via GenericMonoid Dependencies' - - -to' :: Dependencies -> Dependencies' -to' Dependencies{..} = - Dependencies' (toList patches) (Map.toList terms) (Map.toList decls) - -fromBranch :: Applicative m => Branch m -> (Branches m, Dependencies) -fromBranch (Branch c) = case c of - Causal.One _hh e -> fromBranch0 e - Causal.Cons _hh e (h, m) -> fromBranch0 e <> fromTails (Map.singleton h m) - Causal.Merge _hh e tails -> fromBranch0 e <> fromTails tails - where - fromTails m = ([(h, Branch <$> mc) | (h, mc) <- Map.toList m], mempty) - -fromBranch0 :: Applicative m => Branch0 m -> (Branches m, Dependencies) -fromBranch0 b = - ( fromChildren (Branch._children b) - , fromTermsStar (Branch._terms b) - <> fromTypesStar (Branch._types b) - <> fromEdits (Branch._edits b) ) - where - fromChildren :: Applicative m => Map NameSegment (Branch m) -> Branches m - fromChildren m = [ (Branch.headHash b, pure b) | b <- toList m ] - references :: Branch.Star r NameSegment -> [r] - references = toList . R.dom . Star3.d1 - mdValues :: Branch.Star r NameSegment -> [Reference] - mdValues = fmap snd . toList . R.ran . Star3.d3 - fromTermsStar :: Branch.Star Referent NameSegment -> Dependencies - fromTermsStar s = Dependencies mempty terms decls where - terms = Map.fromList $ - [ (h, n) | Referent.Ref (Derived h _ n) <- references s] ++ - [ (h, n) | (Derived h _ n) <- mdValues s] - decls = Map.fromList $ - [ (h, n) | Referent.Con (Derived h _i n) _ _ <- references s ] - fromTypesStar :: Branch.Star Reference NameSegment -> Dependencies - fromTypesStar s = Dependencies mempty terms decls where - terms = Map.fromList [ (h, n) | (Derived h _ n) <- mdValues s ] - decls = Map.fromList [ (h, n) | (Derived h _ n) <- references s ] - fromEdits :: Map NameSegment (EditHash, m Patch) -> Dependencies - fromEdits m = Dependencies (Set.fromList . fmap fst $ toList m) mempty mempty diff --git a/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs b/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs index 7057be2ca3..9274c53474 100644 --- a/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs +++ b/parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs @@ -144,9 +144,6 @@ type F m i v = Free (Command m i v) -- type (Action m i v) a type Action m i v = MaybeT (StateT (LoopState m v) (F m i v)) -_liftToAction :: m a -> Action m i v a -_liftToAction = lift . lift . Free.eval . Eval - data LoopState m v = LoopState { _root :: Branch m diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 2013ab7069..2b38bf2a8d 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -4,7 +4,7 @@ cabal-version: 1.12 -- -- see: https://github.com/sol/hpack -- --- hash: c6e26ae011df5a4dd154f8c552bf57f686a830cb49e5794f295d593d81348551 +-- hash: f4014930fb5c946ea852d2c0b1ba1fb08245009446089637a92d986501586ca7 name: unison-parser-typechecker version: 0.0.0 @@ -36,7 +36,6 @@ library Unison.Codebase.Classes Unison.Codebase.CodeLookup Unison.Codebase.Conversion.Sync12 - Unison.Codebase.Conversion.Sync12BranchDependencies Unison.Codebase.Conversion.Upgrade12 Unison.Codebase.Editor.AuthorInfo Unison.Codebase.Editor.Command From f31782a4c29991886a1b0553828b7080a83d1cb4 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 28 Apr 2021 08:09:41 -0600 Subject: [PATCH 203/225] cleanup --- codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs | 1 - 1 file changed, 1 deletion(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs index 077a18c8de..03ed70109e 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Patch/Diff.hs @@ -11,7 +11,6 @@ import U.Codebase.Sqlite.LocalIds (LocalDefnId, LocalHashId, LocalTextId) import U.Codebase.Sqlite.Patch.TermEdit (TermEdit') import U.Codebase.Sqlite.Patch.TypeEdit (TypeEdit') import qualified U.Util.Map as Map -import qualified Data.Monoid as Monoid type PatchDiff = PatchDiff' Db.TextId Db.HashId Db.ObjectId From c4f0727a3ef37c0d161aad279ace133026686ce6 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 28 Apr 2021 08:10:14 -0600 Subject: [PATCH 204/225] cleanup hie.yaml --- hie.yaml | 151 ++++--------------------------------------------------- 1 file changed, 11 insertions(+), 140 deletions(-) diff --git a/hie.yaml b/hie.yaml index 13aea48e7d..91af04b86d 100644 --- a/hie.yaml +++ b/hie.yaml @@ -1,24 +1,24 @@ cradle: stack: - - path: "codebase2/codebase/./." + - path: "codebase2/codebase" component: "unison-codebase:lib" - - path: "codebase2/codebase-sqlite/./." + - path: "codebase2/codebase-sqlite" component: "unison-codebase-sqlite:lib" - - path: "codebase2/codebase-sync/./." + - path: "codebase2/codebase-sync" component: "unison-codebase-sync:lib" - - path: "codebase2/core/./." + - path: "codebase2/core" component: "unison-core:lib" - - path: "codebase2/util/./." + - path: "codebase2/util" component: "unison-util:lib" - - path: "codebase2/util-serialization/./." + - path: "codebase2/util-serialization" component: "unison-util-serialization:lib" - - path: "codebase2/util-term/./." + - path: "codebase2/util-term" component: "unison-util-term:lib" - path: "parser-typechecker/src" @@ -30,145 +30,16 @@ cradle: - path: "parser-typechecker/prettyprintdemo/Paths_unison_parser_typechecker.hs" component: "unison-parser-typechecker:exe:prettyprintdemo" - - path: "parser-typechecker/tests/Suite.hs" + - path: "parser-typechecker/tests" component: "unison-parser-typechecker:exe:tests" - - path: "parser-typechecker/tests/Unison/Core/Test/Name.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/ABT.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/ANF.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Cache.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Codebase.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Codebase/Causal.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Codebase/FileCodebase.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Codebase/Path.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/ColorText.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Common.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/DataDeclaration.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/FileParser.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Git.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/GitSimple.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/IO.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Lexer.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/MCode.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Range.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Referent.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Term.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/TermParser.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/TermPrinter.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Type.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Typechecker.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Typechecker/Components.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Typechecker/Context.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Typechecker/TypeError.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/TypePrinter.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Ucm.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/UnisonSources.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/UriParser.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Util/Bytes.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Util/PinBoard.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Util/Pretty.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/Var.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Unison/Test/VersionParser.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/tests/Paths_unison_parser_typechecker.hs" - component: "unison-parser-typechecker:exe:tests" - - - path: "parser-typechecker/transcripts/Transcripts.hs" + - path: "parser-typechecker/transcripts" component: "unison-parser-typechecker:exe:transcripts" - - path: "parser-typechecker/transcripts/Paths_unison_parser_typechecker.hs" - component: "unison-parser-typechecker:exe:transcripts" - - - path: "parser-typechecker/unison/Main.hs" - component: "unison-parser-typechecker:exe:unison" - - - path: "parser-typechecker/unison/System/Path.hs" - component: "unison-parser-typechecker:exe:unison" - - - path: "parser-typechecker/unison/Version.hs" - component: "unison-parser-typechecker:exe:unison" - - - path: "parser-typechecker/unison/Paths_unison_parser_typechecker.hs" + - path: "parser-typechecker/unison" component: "unison-parser-typechecker:exe:unison" - - path: "parser-typechecker/benchmarks/runtime/Main.hs" - component: "unison-parser-typechecker:bench:runtime" - - - path: "parser-typechecker/benchmarks/runtime/Paths_unison_parser_typechecker.hs" + - path: "parser-typechecker/benchmarks/runtime" component: "unison-parser-typechecker:bench:runtime" - path: "unison-core/src" From 4e6303aea1624142784378b13d92f157b225bb1f Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 28 Apr 2021 20:32:10 -0600 Subject: [PATCH 205/225] add failing test for test watch sync --- .../src/Unison/Codebase/FileCodebase.hs | 4 +- parser-typechecker/tests/Suite.hs | 4 +- .../tests/Unison/Test/Codebase/Upgrade12.hs | 40 ++++++++- .../Unison/Test/{GitSimple.hs => GitSync.hs} | 90 +++++++++++++++++-- parser-typechecker/tests/Unison/Test/Ucm.hs | 10 +++ .../unison-parser-typechecker.cabal | 4 +- 6 files changed, 140 insertions(+), 12 deletions(-) rename parser-typechecker/tests/Unison/Test/{GitSimple.hs => GitSync.hs} (74%) diff --git a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs index 1371eeb677..35911266a9 100644 --- a/parser-typechecker/src/Unison/Codebase/FileCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/FileCodebase.hs @@ -18,7 +18,7 @@ import qualified Data.Set as Set import qualified Data.Text as Text import qualified Data.Text.IO as TextIO import System.Directory (canonicalizePath) -import System.FilePath (takeFileName) +import System.FilePath (dropExtension) import Unison.Codebase (BuiltinAnnotation, Codebase (Codebase), CodebasePath) import qualified Unison.Codebase as Codebase import Unison.Codebase.Branch (Branch) @@ -209,7 +209,7 @@ codebase1' syncToDirectory branchCache fmtV@(S.Format getV putV) fmtA@(S.Format let wp = watchesDir path (Text.pack k) createDirectoryIfMissing True wp ls <- listDirectory wp - pure $ ls >>= (toList . componentIdFromString . takeFileName) + pure $ ls >>= (toList . componentIdFromString . dropExtension) getReflog :: m [Reflog.Entry] getReflog = liftIO diff --git a/parser-typechecker/tests/Suite.hs b/parser-typechecker/tests/Suite.hs index 133ff16968..03c596b4b9 100644 --- a/parser-typechecker/tests/Suite.hs +++ b/parser-typechecker/tests/Suite.hs @@ -37,7 +37,7 @@ import qualified Unison.Test.ANF as ANF import qualified Unison.Test.MCode as MCode import qualified Unison.Test.VersionParser as VersionParser import qualified Unison.Test.Git as Git -import qualified Unison.Test.GitSimple as GitSimple +import qualified Unison.Test.GitSync as GitSync import qualified Unison.Test.Codebase.Upgrade12 as Upgrade12 test :: Test () @@ -70,7 +70,7 @@ test = tests , Context.test , Git.test , Upgrade12.test - , GitSimple.test + , GitSync.test , Name.test , VersionParser.test , Pretty.test diff --git a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs index 87c436a79d..e6ed7dff7d 100644 --- a/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs +++ b/parser-typechecker/tests/Unison/Test/Codebase/Upgrade12.hs @@ -1,14 +1,19 @@ {-# LANGUAGE TemplateHaskell #-} {-# Language QuasiQuotes #-} {-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} + module Unison.Test.Codebase.Upgrade12 (test) where import Data.Functor (void) import Data.String.Here.Interpolated (i) -import EasyTest (Test, io, ok, scope, tests) +import EasyTest (Test, expectJust, io, ok, scope, tests) import Shellmet () +import qualified Unison.Codebase as Codebase import qualified Unison.Test.Ucm as Ucm +import Unison.UnisonFile (pattern TestWatch) +import Debug.Trace (traceShowM) test :: Test () test = scope "codebase.upgrade12" $ tests [ @@ -200,5 +205,38 @@ test = scope "codebase.upgrade12" $ tests [ .> history ``` |] + ok, + + scope "test-watches" do + (watchTerms1, watchTerms2) <- io do + c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + Ucm.runTranscript c1 [i| + ```ucm + .> builtins.merge + ``` + ```unison + test> pass = [Ok "Passed"] + ``` + ```ucm + .> add + ``` + |] + c1' <- Ucm.lowLevel c1 + watches1@(_:_) <- Codebase.watches c1' TestWatch + watchTerms1 <- traverse (Codebase.getWatch c1' TestWatch) watches1 + Ucm.runTranscript c1 [i| + ```unison + test> pass = [Ok "Passed"] + ``` + |] + c2 <- Ucm.upgradeCodebase c1 + c2' <- Ucm.lowLevel c2 + watchTerms2 <- traverse (Codebase.getWatch c2' TestWatch) watches1 + traceShowM watches1 + traceShowM watchTerms1 + traceShowM watchTerms2 + pure (watchTerms1, watchTerms2) + expectJust (sequence watchTerms1) + expectJust (sequence watchTerms2) ok ] diff --git a/parser-typechecker/tests/Unison/Test/GitSimple.hs b/parser-typechecker/tests/Unison/Test/GitSync.hs similarity index 74% rename from parser-typechecker/tests/Unison/Test/GitSimple.hs rename to parser-typechecker/tests/Unison/Test/GitSync.hs index edda57149e..048a1fc99c 100644 --- a/parser-typechecker/tests/Unison/Test/GitSimple.hs +++ b/parser-typechecker/tests/Unison/Test/GitSync.hs @@ -1,18 +1,25 @@ {-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE QuasiQuotes #-} -module Unison.Test.GitSimple where +module Unison.Test.GitSync where +import Data.Maybe (fromJust) +import Data.String.Here.Interpolated (i) import qualified Data.Text as Text import EasyTest import Shellmet () import System.Directory (removeDirectoryRecursive) import System.FilePath (()) import qualified System.IO.Temp as Temp +import qualified Unison.Codebase as Codebase +import Unison.Codebase (Codebase) +import Unison.Parser (Ann) import Unison.Prelude +import Unison.Symbol (Symbol) import Unison.Test.Ucm (CodebaseFormat, Transcript) import qualified Unison.Test.Ucm as Ucm -import Data.String.Here.Interpolated (i) +import Unison.UnisonFile (pattern TestWatch) -- keep it off for CI, since the random temp dirs it generates show up in the -- output, which causes the test output to change, and the "no change" check @@ -21,7 +28,7 @@ writeTranscriptOutput :: Bool writeTranscriptOutput = False test :: Test () -test = scope "git-simple" . tests $ +test = scope "gitsync22" . tests $ flip map [(Ucm.CodebaseFormat1 , "fc"), (Ucm.CodebaseFormat2, "sc")] \(fmt, name) -> scope name $ tests [ pushPullTest "typeAlias" fmt @@ -303,7 +310,36 @@ test = scope "git-simple" . tests $ > greatApp ``` |]) - -- , + , + watchPushPullTest "test-watches" fmt + (\repo -> [i| + ```ucm + .> builtins.merge + ``` + ```unison + test> pass = [Ok "Passed"] + ``` + ```ucm + .> add + .> push ${repo} + ``` + |]) + (\repo -> [i| + ```ucm + .> pull ${repo} + ``` + |]) + (\cb -> do + void . fmap (fromJust . sequence) $ + traverse (Codebase.getWatch cb TestWatch) =<< + Codebase.watches cb TestWatch) + + -- m [Reference.Id] + +-- traverse :: (Traversable t, Applicative f) => (a -> f b) -> t a -> f (t b) +-- watches :: UF.WatchKind -> m [Reference.Id] +-- getWatch :: UF.WatchKind -> Reference.Id -> m (Maybe (Term v a)) + -- pushPullTest "regular" fmt -- (\repo -> [i| -- ```ucm:hide @@ -363,7 +399,7 @@ pushPullTest name fmt authorScript userScript = scope name do userOutput <- Ucm.runTranscript user (userScript repo) when writeTranscriptOutput $ writeFile - ("unison-src""transcripts"("GitSimple." ++ name ++ ".output.md")) + ("unison-src""transcripts"("GitSync22." ++ name ++ ".output.md")) (authorOutput <> "\n-------\n" <> userOutput) -- if we haven't crashed, clean up! @@ -372,6 +408,50 @@ pushPullTest name fmt authorScript userScript = scope name do Ucm.deleteCodebase user ok +watchPushPullTest :: String -> CodebaseFormat -> (FilePath -> Transcript) -> (FilePath -> Transcript) -> (Codebase IO Symbol Ann -> IO ()) -> Test () +watchPushPullTest name fmt authorScript userScript codebaseCheck = scope name do + io do + repo <- initGitRepo + author <- Ucm.initCodebase fmt + authorOutput <- Ucm.runTranscript author (authorScript repo) + user <- Ucm.initCodebase fmt + userOutput <- Ucm.runTranscript user (userScript repo) + user' <- Ucm.lowLevel user + codebaseCheck user' + + when writeTranscriptOutput $ writeFile + ("unison-src""transcripts"("GitSync22." ++ name ++ ".output.md")) + (authorOutput <> "\n-------\n" <> userOutput) + + -- if we haven't crashed, clean up! + removeDirectoryRecursive repo + Ucm.deleteCodebase author + Ucm.deleteCodebase user + ok + + + -- scope "test-watches" do + -- (watchTerms1, watchTerms2) <- io do + -- c1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + -- Ucm.runTranscript c1 [i| + -- ```unison + -- test> x = 4 + -- ``` + -- ```ucm + -- .> add + -- ``` + -- |] + -- c1' <- Ucm.lowLevel c1 + -- watches1 <- Codebase.watches c1' TestWatch + -- watchTerms1 <- traverse (Codebase.getWatch c1' TestWatch) watches1 + -- c2 <- Ucm.upgradeCodebase c1 + -- c2' <- Ucm.lowLevel c2 + -- watchTerms2 <- traverse (Codebase.getWatch c2' TestWatch) watches1 + -- pure (watchTerms1, watchTerms2) + -- expectJust watchTerms1 + -- expectJust watchTerms2 + -- ok + initGitRepo :: IO FilePath initGitRepo = do tmp <- Temp.getCanonicalTemporaryDirectory >>= flip Temp.createTempDirectory ("git-simple") diff --git a/parser-typechecker/tests/Unison/Test/Ucm.hs b/parser-typechecker/tests/Unison/Test/Ucm.hs index c0786e9a84..8f02bb75bd 100644 --- a/parser-typechecker/tests/Unison/Test/Ucm.hs +++ b/parser-typechecker/tests/Unison/Test/Ucm.hs @@ -7,6 +7,7 @@ module Unison.Test.Ucm deleteCodebase, runTranscript, upgradeCodebase, + lowLevel, CodebaseFormat (..), Transcript, unTranscript, @@ -28,6 +29,9 @@ import qualified Unison.Codebase.SqliteCodebase as SC import qualified Unison.Codebase.TranscriptParser as TR import Unison.Prelude (traceM) import qualified Unison.Util.Pretty as P +import Unison.Parser (Ann) +import Unison.Symbol (Symbol) +import qualified Data.Either as Either data CodebaseFormat = CodebaseFormat1 | CodebaseFormat2 deriving (Show) @@ -91,3 +95,9 @@ runTranscript (Codebase codebasePath fmt) transcript = do codebase when debugTranscriptOutput $ traceM output pure output + +lowLevel :: Codebase -> IO (Codebase.Codebase IO Symbol Ann) +lowLevel (Codebase root fmt) = do + let cbInit = case fmt of CodebaseFormat1 -> FC.init; CodebaseFormat2 -> SC.init + Either.fromRight (error "This really should have loaded") <$> + Codebase.Init.openCodebase cbInit root diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 2b38bf2a8d..818afc82c4 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -4,7 +4,7 @@ cabal-version: 1.12 -- -- see: https://github.com/sol/hpack -- --- hash: f4014930fb5c946ea852d2c0b1ba1fb08245009446089637a92d986501586ca7 +-- hash: 6773e97d0e1646a1218127f5065794946877513acef8b5c10126c3a88552966d name: unison-parser-typechecker version: 0.0.0 @@ -301,7 +301,7 @@ executable tests Unison.Test.DataDeclaration Unison.Test.FileParser Unison.Test.Git - Unison.Test.GitSimple + Unison.Test.GitSync Unison.Test.Lexer Unison.Test.MCode Unison.Test.Range From b3c26f1eab590c0c7bf05384f6f67b997b7cf151 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 28 Apr 2021 20:32:29 -0600 Subject: [PATCH 206/225] improve upgrade help message --- parser-typechecker/unison/Main.hs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index f9f798a26d..8818596075 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -205,7 +205,9 @@ upgradeCodebase :: Maybe Codebase.CodebasePath -> IO () upgradeCodebase mcodepath = Codebase.getCodebaseDir mcodepath >>= \root -> do PT.putPrettyLn $ - "I'm upgrading the codebase in '" <> P.string root <> "', but it will take a while." + "I'm upgrading the codebase in '" <> P.string root <> "', but it will" + <> "take a while, and may even run out of memory. If you're having" + <> "trouble, contact us on #alphatesting and we'll try to help." Upgrade12.upgradeCodebase root PT.putPrettyLn $ P.newline From 11856b71c057fe1a9c943f39ebd60076f1fe63c5 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 28 Apr 2021 20:33:11 -0600 Subject: [PATCH 207/225] ignore .hie files created by probably -fwrite-ide-info --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 04f76c36f9..bf9b574196 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,6 @@ scratch.u # Stack .stack-work stack.yaml.lock + +# GHC +*.hie From e94ee9c9bc0f45f50e8049dd1eff41919637f1c1 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Fri, 30 Apr 2021 10:57:22 -0600 Subject: [PATCH 208/225] comments on schema --- codebase2/codebase-sqlite/sql/create.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index f625546d85..5985bdf46a 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -11,6 +11,7 @@ CREATE TABLE text ( id INTEGER PRIMARY KEY, text TEXT UNIQUE NOT NULL ); + -- just came up with this, a layer of indirection to allow multiple hash_ids to -- reference the same object. -- so: SELECT object.id, bytes FROM object @@ -68,6 +69,7 @@ CREATE TABLE namespace_root ( causal_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT namespace_root_fk1 REFERENCES causal(self_hash_id) ); +-- LCA computations only need to look at this table CREATE TABLE causal_parent ( causal_id INTEGER NOT NULL CONSTRAINT causal_parent_fk1 REFERENCES causal(self_hash_id), parent_id INTEGER NOT NULL CONSTRAINT causal_parent_fk2 REFERENCES causal(self_hash_id), From 9fe89794d4a8c5e7d50942a9a3913664d27abd59 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 1 May 2021 03:35:05 -0600 Subject: [PATCH 209/225] upgrade test watches --- .../src/Unison/Codebase/Conversion/Sync12.hs | 118 ++++++++++++++---- .../Unison/Codebase/Conversion/Upgrade12.hs | 31 +++-- 2 files changed, 117 insertions(+), 32 deletions(-) diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 5df439284f..453ea11fa9 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -35,6 +35,8 @@ import qualified Data.Set as Set import Data.Traversable (for) import Database.SQLite.Simple (Connection) import Debug.Trace (traceM) +import System.IO (stdout) +import System.IO.Extra (hFlush) import U.Codebase.Sqlite.DbId (Generation) import qualified U.Codebase.Sqlite.Queries as Q import U.Codebase.Sync (Sync (Sync), TrySyncResult) @@ -64,11 +66,10 @@ import Unison.Term (Term) import qualified Unison.Term as Term import Unison.Type (Type) import qualified Unison.Type as Type +import Unison.UnisonFile (WatchKind) import Unison.Util.Relation (Relation) import qualified Unison.Util.Relation as Relation import Unison.Util.Star3 (Star3 (Star3)) -import System.IO (stdout) -import System.IO.Extra (hFlush) debug :: Bool debug = False @@ -82,6 +83,7 @@ data Env m a = Env data Entity m = C Branch.Hash (m (UnwrappedBranch m)) | T Hash Reference.Size + | W WatchKind Reference.Id | D Hash Reference.Size | P Branch.EditHash @@ -104,29 +106,36 @@ data TermStatus | TermMissing | TermMissingType | TermMissingDependencies - deriving Show + deriving (Show) + +data WatchStatus + = WatchOk + | WatchNotCached + | WatchMissingDependencies + deriving (Show) data DeclStatus = DeclOk | DeclMissing | DeclMissingDependencies - deriving Show + deriving (Show) data PatchStatus = PatchOk | PatchMissing | PatchReplaced Branch.EditHash - deriving Show + deriving (Show) data Status m = Status { _branchStatus :: Map Branch.Hash (BranchStatus m), _termStatus :: Map Hash TermStatus, _declStatus :: Map Hash DeclStatus, - _patchStatus :: Map Branch.EditHash PatchStatus + _patchStatus :: Map Branch.EditHash PatchStatus, + _watchStatus :: Map (WatchKind, Reference.Id) WatchStatus } emptyStatus :: Status m -emptyStatus = Status mempty mempty mempty mempty +emptyStatus = Status mempty mempty mempty mempty mempty makeLenses ''Status @@ -190,6 +199,20 @@ trySync t _gc e = do t $ Codebase.putTerm dest (Reference.Id h i n) term typ setTermStatus h TermOk pure Sync.Done + W k r -> + getWatchStatus k r >>= \case + Just {} -> pure Sync.PreviouslyDone + Nothing -> do + runExceptT (runValidateT (checkWatchComponent (lift . lift . t) k r)) >>= \case + Left status -> do + setWatchStatus k r status + pure Sync.NonFatalError + Right (Left deps) -> + pure . Sync.Missing $ Foldable.toList deps + Right (Right watchResult) -> do + t $ Codebase.putWatch dest k r watchResult + setWatchStatus k r WatchOk + pure Sync.Done D h n -> getDeclStatus h >>= \case Just {} -> pure Sync.PreviouslyDone @@ -229,6 +252,9 @@ getDeclStatus h = use (declStatus . at h) getPatchStatus :: S m n => Hash -> n (Maybe PatchStatus) getPatchStatus h = use (patchStatus . at h) +getWatchStatus :: S m n => WatchKind -> Reference.Id -> n (Maybe WatchStatus) +getWatchStatus w r = use (watchStatus . at (w, r)) + setTermStatus :: S m n => Hash -> TermStatus -> n () setTermStatus h s = do when debug (traceM $ "setTermStatus " ++ take 10 (show h) ++ " " ++ show s) @@ -244,11 +270,16 @@ setPatchStatus h s = do when debug (traceM $ "setPatchStatus " ++ take 10 (show h) ++ " " ++ show s) patchStatus . at h .= Just s -setBranchStatus :: forall m n. S m n => Branch.Hash -> BranchStatus m -> n () +setBranchStatus :: S m n => Branch.Hash -> BranchStatus m -> n () setBranchStatus h s = do when debug (traceM $ "setBranchStatus " ++ take 10 (show h) ++ " " ++ show s) branchStatus . at h .= Just s +setWatchStatus :: S m n => WatchKind -> Reference.Id -> WatchStatus -> n () +setWatchStatus k r@(Reference.Id h i _) s = do + when debug (traceM $ "setWatchStatus " ++ show k ++ " " ++ take 10 (show h) ++ " " ++ show i) + watchStatus . at (k, r) .= Just s + checkTermComponent :: forall m n a. (RS m n a, V m n, E TermStatus n) => @@ -275,8 +306,11 @@ checkTermComponent t h n = do Just _ -> Except.throwError TermMissingDependencies Nothing -> Validate.dispute . Set.singleton $ D h' n' checkTerm = \case - Reference.Builtin {} -> pure () - Reference.DerivedId (Reference.Id h' _ _) | h == h' -> pure () + Reference.Builtin {} -> + pure () + Reference.DerivedId (Reference.Id h' _ _) + | h == h' -> + pure () -- ignore self-references Reference.DerivedId (Reference.Id h' _ n') -> getTermStatus h' >>= \case Just TermOk -> pure () @@ -286,6 +320,40 @@ checkTermComponent t h n = do traverse_ checkDecl typeDeps pure (term, typ) +checkWatchComponent :: + forall m n a. + (RS m n a, V m n, E WatchStatus n) => + (m ~> n) -> + WatchKind -> + Reference.Id -> + n (Term Symbol a) +checkWatchComponent t k r@(Reference.Id h _ _) = do + Env src _ _ <- Reader.ask + (t $ Codebase.getWatch src k r) >>= \case + Nothing -> Except.throwError WatchNotCached + Just watchResult -> do + let deps = Term.labeledDependencies watchResult + let checkDecl = \case + Reference.Builtin {} -> pure () + Reference.DerivedId (Reference.Id h' _ n') -> + getDeclStatus h' >>= \case + Just DeclOk -> pure () + Just _ -> Except.throwError WatchMissingDependencies + Nothing -> Validate.dispute . Set.singleton $ D h' n' + checkTerm = \case + Reference.Builtin {} -> + pure () + Reference.DerivedId (Reference.Id h' _ _) + | h == h' -> + pure () -- ignore self-references + Reference.DerivedId (Reference.Id h' _ n') -> + getTermStatus h' >>= \case + Just TermOk -> pure () + Just _ -> Except.throwError WatchMissingDependencies + Nothing -> Validate.dispute . Set.singleton $ T h' n' + traverse_ (bitraverse_ checkDecl checkTerm . LD.toReference) deps + pure watchResult + checkDeclComponent :: forall m n a. (RS m n a, E DeclStatus n, V m n) => @@ -498,21 +566,23 @@ data DoneCount = DoneCount { _doneBranches :: Int, _doneTerms :: Int, _doneDecls :: Int, - _donePatches :: Int + _donePatches :: Int, + _doneWatches :: Int } data ErrorCount = ErrorCount { _errorBranches :: Int, _errorTerms :: Int, _errorDecls :: Int, - _errorPatches :: Int + _errorPatches :: Int, + _errorWatches :: Int } emptyDoneCount :: DoneCount -emptyDoneCount = DoneCount 0 0 0 0 +emptyDoneCount = DoneCount 0 0 0 0 0 emptyErrorCount :: ErrorCount -emptyErrorCount = ErrorCount 0 0 0 0 +emptyErrorCount = ErrorCount 0 0 0 0 0 makeLenses ''DoneCount makeLenses ''ErrorCount @@ -535,6 +605,7 @@ simpleProgress = Sync.Progress need done error allDone T {} -> _1 . doneTerms += 1 D {} -> _1 . doneDecls += 1 P {} -> _1 . donePatches += 1 + W {} -> _1 . doneWatches += 1 printProgress error e = do @@ -544,11 +615,12 @@ simpleProgress = Sync.Progress need done error allDone T {} -> _2 . errorTerms += 1 D {} -> _2 . errorDecls += 1 P {} -> _2 . errorPatches += 1 + W {} -> _2 . errorWatches += 1 printProgress allDone :: MonadState (DoneCount, ErrorCount, Status m) n => MonadIO n => n () allDone = do - Status branches terms decls patches <- Lens.use Lens._3 + Status branches terms decls patches watches <- Lens.use Lens._3 liftIO $ putStrLn "Finished." Foldable.for_ (Map.toList decls) \(h, s) -> case s of DeclOk -> pure () @@ -559,6 +631,10 @@ simpleProgress = Sync.Progress need done error allDone TermMissing -> liftIO . putStrLn $ "I couldn't find the term " ++ show h ++ "so I filtered it out of the sync." TermMissingType -> liftIO . putStrLn $ "The type of term " ++ show h ++ " was missing, so I filtered it out of the sync." TermMissingDependencies -> liftIO . putStrLn $ "One or more dependencies of term " ++ show h ++ " were missing, so I filtered it out of the sync." + Foldable.for_ (Map.toList watches) \((k, r), s) -> case s of + WatchOk -> pure () + WatchNotCached -> pure () + WatchMissingDependencies -> liftIO . putStrLn $ "One or more dependencies of watch expression " ++ show (k, r) ++ " were missing, so I skipped it." Foldable.for_ (Map.toList patches) \(h, s) -> case s of PatchOk -> pure () PatchMissing -> liftIO . putStrLn $ "I couldn't find the patch " ++ show h ++ ", so I filtered it out of the sync." @@ -569,10 +645,11 @@ simpleProgress = Sync.Progress need done error allDone printProgress :: MonadState (ProgressState m) n => MonadIO n => n () printProgress = do - (DoneCount b t d p, ErrorCount b' t' d' p', _) <- State.get + (DoneCount b t d p w, ErrorCount b' t' d' p' w', _) <- State.get let ways :: [Maybe String] = [ Monoid.whenM (b > 0 || b' > 0) (Just $ show (b + b') ++ " branches" ++ Monoid.whenM (b' > 0) (" (" ++ show b' ++ " repaired)")), Monoid.whenM (t > 0 || t' > 0) (Just $ show t ++ " terms" ++ Monoid.whenM (t' > 0) (" (" ++ show t' ++ " errors)")), + Monoid.whenM (w > 0 || w' > 0) (Just $ show w ++ " test results" ++ Monoid.whenM (w' > 0) (" (" ++ show w' ++ " errors)")), Monoid.whenM (d > 0 || d' > 0) (Just $ show d ++ " types" ++ Monoid.whenM (d' > 0) (" (" ++ show d' ++ " errors)")), Monoid.whenM (p > 0 || p' > 0) (Just $ show p ++ " patches" ++ Monoid.whenM (p' > 0) (" (" ++ show p' ++ " errors)")) ] @@ -580,19 +657,15 @@ simpleProgress = Sync.Progress need done error allDone putStr $ "\rSynced " ++ List.intercalate ", " (catMaybes ways) ++ Monoid.whenM newlines "\n" hFlush stdout - instance Show (Entity m) where - show = \case - C h _ -> "C " ++ show h - T h len -> "T " ++ show h ++ " " ++ show len - D h len -> "D " ++ show h ++ " " ++ show len - P h -> "P " ++ show h + show = show . toEntity' data Entity' = C' Branch.Hash | T' Hash | D' Hash | P' Branch.EditHash + | W' WatchKind Reference.Id deriving (Eq, Ord, Show) toEntity' :: Entity m -> Entity' @@ -601,6 +674,7 @@ toEntity' = \case T h _ -> T' h D h _ -> D' h P h -> P' h + W k r -> W' k r instance Eq (Entity m) where x == y = toEntity' x == toEntity' y diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs index 542ecd1775..87966c3046 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Upgrade12.hs @@ -1,8 +1,8 @@ {-# LANGUAGE PartialTypeSignatures #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE TypeApplications #-} - {-# LANGUAGE ViewPatterns #-} + module Unison.Codebase.Conversion.Upgrade12 where import Control.Exception.Safe (MonadCatch) @@ -13,6 +13,7 @@ import qualified Control.Monad.Reader as Reader import Control.Monad.State (StateT (StateT, runStateT)) import qualified Control.Monad.State as State import Control.Monad.Trans (lift) +import qualified Data.Map as Map import qualified U.Codebase.Sync as Sync import Unison.Codebase (CodebasePath) import qualified Unison.Codebase as Codebase @@ -23,8 +24,12 @@ import qualified Unison.Codebase.FileCodebase as FC import qualified Unison.Codebase.Init as Codebase import qualified Unison.Codebase.SqliteCodebase as SC import qualified Unison.PrettyTerminal as CT +import Unison.UnisonFile (WatchKind) +import qualified Unison.UnisonFile as WK import UnliftIO (MonadIO, liftIO) -import qualified Data.Map as Map + +syncWatchKinds :: [WatchKind] +syncWatchKinds = [WK.TestWatch] upgradeCodebase :: forall m. (MonadIO m, MonadCatch m) => CodebasePath -> m () upgradeCodebase root = do @@ -38,20 +43,26 @@ upgradeCodebase root = do lift (Codebase.getRootBranch srcCB) >>= \case Left e -> error $ "Error loading source codebase root branch: " ++ show e Right (Branch c) -> pure $ Sync12.C (Causal.currentHash c) (pure c) + watchResults <- + lift $ + concat + <$> traverse + (\k -> fmap (Sync12.W k) <$> Codebase.watches srcCB k) + syncWatchKinds (_, _, s) <- flip Reader.runReaderT env . flip State.execStateT initialState $ do sync <- Sync12.sync12 (lift . lift . lift) Sync.sync @_ @(Sync12.Entity _) (Sync.transformSync (lensStateT Lens._3) sync) Sync12.simpleProgress - [rootEntity] - lift $ Codebase.putRootBranch destCB =<< fmap Branch case rootEntity of - Sync12.C h mc -> case Map.lookup h (Sync12._branchStatus s) of - Just Sync12.BranchOk -> mc - Just (Sync12.BranchReplaced _h' c') -> pure c' - Nothing -> error "We didn't sync the root?" - _ -> error "The root wasn't a causal?" + (rootEntity : watchResults) + lift $ + Codebase.putRootBranch destCB =<< fmap Branch case rootEntity of + Sync12.C h mc -> case Map.lookup h (Sync12._branchStatus s) of + Just Sync12.BranchOk -> mc + Just (Sync12.BranchReplaced _h' c') -> pure c' + Nothing -> error "We didn't sync the root?" + _ -> error "The root wasn't a causal?" pure () - where lensStateT :: forall m s1 s2 a. Monad m => Lens' s2 s1 -> StateT s1 m a -> StateT s2 m a lensStateT l m = StateT \s2 -> do From 6ba3e84d31a99b92ce2b13c005439b2cd4e447dd Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 1 May 2021 03:35:14 -0600 Subject: [PATCH 210/225] phrasing --- parser-typechecker/unison/Main.hs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index 8818596075..2932228c41 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -204,12 +204,12 @@ main = do upgradeCodebase :: Maybe Codebase.CodebasePath -> IO () upgradeCodebase mcodepath = Codebase.getCodebaseDir mcodepath >>= \root -> do - PT.putPrettyLn $ - "I'm upgrading the codebase in '" <> P.string root <> "', but it will" - <> "take a while, and may even run out of memory. If you're having" + PT.putPrettyLn . P.wrap $ + "I'm upgrading the codebase in " <> P.backticked' (P.string root) "," <> "but it will" + <> "take a while, and may even run out of memory. If you have" <> "trouble, contact us on #alphatesting and we'll try to help." Upgrade12.upgradeCodebase root - PT.putPrettyLn + PT.putPrettyLn . P.wrap $ P.newline <> "Try it out and once you're satisfied, you can safely(?) delete the old version from" <> P.newline From ca7d7924359b9043a4bef3f22cd367f6fb94f767 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Sat, 1 May 2021 03:35:29 -0600 Subject: [PATCH 211/225] schema version --- codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs | 3 ++- codebase2/codebase-sqlite/sql/create.sql | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 54a65f0379..a5201e39b4 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -127,7 +127,8 @@ checkForMissingSchema = filterM missing schema missing (t, n) = null @[] @(Only Int) <$> query sql (t, n) sql = "SELECT 1 FROM sqlite_master WHERE type = ? and name = ?" schema = - [ ("table", "hash"), + [ ("table", "schema_version"), + ("table", "hash"), ("index", "hash_base32"), ("table", "text"), ("table", "hash_object"), diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 5985bdf46a..2fd62db563 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -1,3 +1,7 @@ +CREATE TABLE schema_version ( + version INTEGER NOT NULL +); + -- actually stores the 512-byte hashes CREATE TABLE hash ( id INTEGER PRIMARY KEY, From 0828b0535dae0f57fce964a0f8d0f63aee78fd9e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Mon, 3 May 2021 21:46:42 -0600 Subject: [PATCH 212/225] fix watch saving functions and syncing --- .../U/Codebase/Sqlite/Operations.hs | 5 ++- .../U/Codebase/Sqlite/Queries.hs | 25 +++++++++-- .../U/Codebase/Sqlite/Sync22.hs | 43 ++++++++++++------- .../src/Unison/Codebase/SqliteCodebase.hs | 16 ++++--- parser-typechecker/tests/Suite.hs | 2 + .../Unison/Test/BaseUpgradePushPullTest.hs | 43 +++++++++++++++++++ .../unison-parser-typechecker.cabal | 3 +- 7 files changed, 110 insertions(+), 27 deletions(-) create mode 100644 parser-typechecker/tests/Unison/Test/BaseUpgradePushPullTest.hs diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs index ad0d493029..11e06b3a26 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Operations.hs @@ -240,6 +240,9 @@ c2sReferenceId = C.Reference.idH primaryHashToExistingObjectId s2cReferenceId :: EDB m => S.Reference.Id -> m C.Reference.Id s2cReferenceId = C.Reference.idH loadHashByObjectId +h2cReferenceId :: EDB m => S.Reference.IdH -> m C.Reference.Id +h2cReferenceId = C.Reference.idH loadHashByHashId + h2cReference :: EDB m => S.ReferenceH -> m C.Reference h2cReference = bitraverse loadTextById loadHashByHashId @@ -688,7 +691,7 @@ c2sTerm tm tp = c2xTerm Q.saveText primaryHashToExistingObjectId tm (Just tp) <& -- *** Watch expressions listWatches :: EDB m => WatchKind -> m [C.Reference.Id] -listWatches k = Q.loadWatchesByWatchKind k >>= traverse s2cReferenceId +listWatches k = Q.loadWatchesByWatchKind k >>= traverse h2cReferenceId -- | returns Nothing if the expression isn't cached. loadWatch :: EDB m => WatchKind -> C.Reference.Id -> MaybeT m (C.Term Symbol) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index a5201e39b4..fb6d7f46b4 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -24,9 +24,11 @@ import Control.Monad.IO.Class (MonadIO) import Control.Monad.Reader (MonadReader (ask)) import Control.Monad.Trans (MonadIO (liftIO)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT), runMaybeT) +import qualified Control.Monad.Writer as Writer import Data.ByteString (ByteString) import Data.Foldable (traverse_) import Data.Functor ((<&>)) +import Data.Int (Int8) import qualified Data.List.Extra as List import Data.List.NonEmpty (NonEmpty) import qualified Data.List.NonEmpty as Nel @@ -36,7 +38,7 @@ import Data.String.Here.Uninterpolated (here, hereFile) import Data.Text (Text) import Database.SQLite.Simple (Connection, FromRow, Only (..), SQLData, ToRow (..), (:.) (..)) import qualified Database.SQLite.Simple as SQLite -import Database.SQLite.Simple.FromField (FromField) +import Database.SQLite.Simple.FromField (FromField (..)) import Database.SQLite.Simple.ToField (ToField (..)) import Debug.Trace (trace, traceM) import GHC.Stack (HasCallStack) @@ -54,7 +56,6 @@ import U.Util.Hash (Hash) import qualified U.Util.Hash as Hash import UnliftIO (MonadUnliftIO, throwIO, try, tryAny, withRunInIO) import UnliftIO.Concurrent (myThreadId) -import qualified Control.Monad.Writer as Writer -- * types @@ -437,9 +438,19 @@ loadWatch k r = queryAtom sql (Only k :. r) where sql = [here| AND watch.component_index = ? |] -loadWatchesByWatchKind :: DB m => WatchKind -> m [Reference.Id] +loadWatchKindsByReference :: DB m => Reference.IdH -> m [WatchKind] +loadWatchKindsByReference r = queryAtoms sql r where sql = [here| + SELECT watch_kind_id FROM watch_result + INNER JOIN watch + ON watch_result.hash_id = watch.hash_id + AND watch_result.component_index = watch.component_index + WHERE watch.hash_id = ? + AND watch.component_index = ? + |] + +loadWatchesByWatchKind :: DB m => WatchKind -> m [Reference.IdH] loadWatchesByWatchKind k = query sql (Only k) where sql = [here| - SELECT object_id, component_index FROM watch WHERE watch_kind_id = ? + SELECT hash_id, component_index FROM watch WHERE watch_kind_id = ? |] -- * Index-building @@ -723,3 +734,9 @@ instance ToField WatchKind where WatchKind.RegularWatch -> SQLite.SQLInteger 0 WatchKind.TestWatch -> SQLite.SQLInteger 1 +instance FromField WatchKind where + fromField = fromField @Int8 <&> fmap \case + 0 -> WatchKind.RegularWatch + 1 -> WatchKind.TestWatch + tag -> error $ "Unknown WatchKind id " ++ show tag + diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index a5dbd3c66d..b869de8958 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -50,7 +50,11 @@ import qualified U.Codebase.WatchKind as WK import U.Util.Cache (Cache) import qualified U.Util.Cache as Cache -data Entity = O ObjectId | C CausalHashId deriving (Eq, Ord, Show) +data Entity + = O ObjectId + | C CausalHashId + | W WK.WatchKind Sqlite.Reference.IdH + deriving (Eq, Ord, Show) data DbTag = SrcDb | DestDb @@ -171,20 +175,8 @@ trySync tCache hCache oCache cCache _gc = \case -- copy reference-specific stuff lift $ for_ [0 .. length localIds - 1] \(fromIntegral -> idx) -> do -- sync watch results - for_ [WK.RegularWatch, WK.TestWatch] \wk -> do - let refH = Reference.Id hId idx - refH' = Reference.Id hId' idx - runSrc (Q.loadWatch wk refH) >>= traverse_ \blob -> do - (L.LocalIds tIds hIds, termBytes) <- - case runGetS S.decomposeWatchResult blob of - Right x -> pure x - Left s -> throwError $ DecodeError ErrWatchResult blob s - tIds' <- traverse syncTextLiteral tIds - hIds' <- traverse syncHashLiteral hIds - when debug $ traceM $ "LocalIds for Source watch result " ++ show refH ++ ": " ++ show (tIds, hIds) - when debug $ traceM $ "LocalIds for Dest watch result " ++ show refH' ++ ": " ++ show (tIds', hIds') - let blob' = runPutS (S.recomposeWatchResult (L.LocalIds tIds' hIds', termBytes)) - runDest (Q.saveWatch wk refH' blob') + for_ [WK.TestWatch] \wk -> + syncWatch wk (Reference.Id hId idx) -- sync dependencies index let ref = Reference.Id oId idx ref' = Reference.Id oId' idx @@ -290,6 +282,7 @@ trySync tCache hCache oCache cCache _gc = \case when debug $ traceM $ "Source " ++ show (hId, oId) ++ " becomes Dest " ++ show (hId', oId') Cache.insert oCache oId oId' pure Sync.Done + W k r -> ifM (syncWatch k r) (pure Sync.Done) (pure Sync.NonFatalError) where syncLocalObjectId :: ObjectId -> ValidateT (Set Entity) m ObjectId syncLocalObjectId oId = @@ -334,6 +327,7 @@ trySync tCache hCache oCache cCache _gc = \case pure $ BL.LocalIds tIds' oIds' poIds' chboIds' syncTypeIndexRow oId' = bitraverse syncHashReference (pure . rewriteTypeIndexReferent oId') + rewriteTypeIndexReferent :: ObjectId -> Sqlite.Referent.Id -> Sqlite.Referent.Id rewriteTypeIndexReferent oId' = bimap (const oId') (const oId') @@ -376,6 +370,25 @@ trySync tCache hCache oCache cCache _gc = \case srcParents <- runSrc $ Q.loadCausalParents chId traverse syncCausal srcParents + syncWatch :: WK.WatchKind -> Sqlite.Reference.IdH -> m Bool + syncWatch wk r = do + r' <- traverse syncHashLiteral r + doneKinds <- runDest (Q.loadWatchKindsByReference r') + if (wk `elem` doneKinds) then do + runSrc (Q.loadWatch wk r) >>= traverse \blob -> do + (L.LocalIds tIds hIds, termBytes) <- + case runGetS S.decomposeWatchResult blob of + Right x -> pure x + Left s -> throwError $ DecodeError ErrWatchResult blob s + tIds' <- traverse syncTextLiteral tIds + hIds' <- traverse syncHashLiteral hIds + when debug $ traceM $ "LocalIds for Source watch result " ++ show r ++ ": " ++ show (tIds, hIds) + when debug $ traceM $ "LocalIds for Dest watch result " ++ show r' ++ ": " ++ show (tIds', hIds') + let blob' = runPutS (S.recomposeWatchResult (L.LocalIds tIds' hIds', termBytes)) + runDest (Q.saveWatch wk r' blob') + pure True + else pure False + syncSecondaryHashes oId oId' = runSrc (Q.hashIdWithVersionForObject oId) >>= traverse_ (go oId') where diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index 3c5246ea09..ad70b9fa5b 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -10,7 +10,7 @@ module Unison.Codebase.SqliteCodebase (Unison.Codebase.SqliteCodebase.init, unsa import qualified Control.Concurrent import qualified Control.Exception -import Control.Monad (filterM, when, (>=>), unless) +import Control.Monad (filterM, unless, when, (>=>)) import Control.Monad.Except (ExceptT, MonadError (throwError), runExceptT) import qualified Control.Monad.Except as Except import Control.Monad.Extra (ifM, unlessM, (||^)) @@ -33,11 +33,13 @@ import qualified Data.Set as Set import Data.Text (Text) import qualified Data.Text as Text import qualified Data.Text.IO as TextIO +import Data.Traversable (for) import qualified Data.Validation as Validation import Data.Word (Word64) import Database.SQLite.Simple (Connection) import qualified Database.SQLite.Simple as Sqlite import GHC.Stack (HasCallStack) +import qualified System.Console.ANSI as ANSI import System.FilePath (()) import qualified System.FilePath as FilePath import U.Codebase.HashTags (CausalHash (CausalHash, unCausalHash)) @@ -48,6 +50,7 @@ import qualified U.Codebase.Sqlite.Operations as Ops import qualified U.Codebase.Sqlite.Queries as Q import qualified U.Codebase.Sqlite.Sync22 as Sync22 import qualified U.Codebase.Sync as Sync +import qualified U.Codebase.WatchKind as WK import qualified U.Util.Hash as H2 import qualified U.Util.Monoid as Monoid import qualified U.Util.Set as Set @@ -95,7 +98,6 @@ import Unison.Util.Timing (time) import UnliftIO (MonadIO, catchIO, liftIO) import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, doesDirectoryExist, doesFileExist) import UnliftIO.STM -import qualified System.Console.ANSI as ANSI debug, debugProcessBranches :: Bool debug = False @@ -624,10 +626,9 @@ sqliteCodebase root = do appendReflog :: MonadIO m => Text -> Branch m -> Branch m -> m () appendReflog reason old new = - let t = - Reflog.toText $ - Reflog.Entry (Branch.headHash old) (Branch.headHash new) reason - in liftIO $ TextIO.appendFile (reflogPath root) (t <> "\n") + liftIO $ TextIO.appendFile (reflogPath root) (t <> "\n") + where + t = Reflog.toText $ Reflog.Entry (Branch.headHash old) (Branch.headHash new) reason reflogPath :: CodebasePath -> FilePath reflogPath root = root "reflog" @@ -769,6 +770,9 @@ sqliteCodebase root = do let progress' = Sync.transformProgress (lift . lift) progress bHash = Branch.headHash b se $ processBranches sync progress' src dest [B bHash (pure b)] + testWatchRefs <- lift . fmap concat $ for [WK.TestWatch] \wk -> + fmap (Sync22.W wk) <$> flip runReaderT srcConn (Q.loadWatchesByWatchKind wk) + se . r $ Sync.sync sync progress' testWatchRefs pure $ Validation.valueOr (error . show) result -- we don't currently have any good opportunity to call this sanity check; diff --git a/parser-typechecker/tests/Suite.hs b/parser-typechecker/tests/Suite.hs index 03c596b4b9..f3d8e2372f 100644 --- a/parser-typechecker/tests/Suite.hs +++ b/parser-typechecker/tests/Suite.hs @@ -39,6 +39,7 @@ import qualified Unison.Test.VersionParser as VersionParser import qualified Unison.Test.Git as Git import qualified Unison.Test.GitSync as GitSync import qualified Unison.Test.Codebase.Upgrade12 as Upgrade12 +-- import qualified Unison.Test.BaseUpgradePushPullTest as BaseUpgradePushPullTest test :: Test () test = tests @@ -71,6 +72,7 @@ test = tests , Git.test , Upgrade12.test , GitSync.test + -- , BaseUpgradePushPullTest.test -- slowwwwww test involving upgrading base, hard-coded to arya's filesystem , Name.test , VersionParser.test , Pretty.test diff --git a/parser-typechecker/tests/Unison/Test/BaseUpgradePushPullTest.hs b/parser-typechecker/tests/Unison/Test/BaseUpgradePushPullTest.hs new file mode 100644 index 0000000000..a3b833f2d2 --- /dev/null +++ b/parser-typechecker/tests/Unison/Test/BaseUpgradePushPullTest.hs @@ -0,0 +1,43 @@ +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE QuasiQuotes #-} + +module Unison.Test.BaseUpgradePushPullTest where + +import Data.String.Here.Interpolated (i) +import EasyTest +import Shellmet () +import qualified Unison.Test.Ucm as Ucm +import Unison.Test.GitSync (initGitRepo) + +-- keep it off for CI, since the random temp dirs it generates show up in the +-- output, which causes the test output to change, and the "no change" check +-- to fail +writeTranscriptOutput :: Bool +writeTranscriptOutput = False + +test :: Test () +test = scope "base-upgrade-push-pull-test" do + io do + v1 <- Ucm.initCodebase Ucm.CodebaseFormat1 + putStrLn =<< Ucm.runTranscript v1 [i| + ```ucm + .> pull /Users/arya/base _base + ``` + |] + v2 <- Ucm.upgradeCodebase v1 + repo <- initGitRepo + putStrLn =<< Ucm.runTranscript v2 [i| + ```ucm + .> push ${repo} _base + ``` + |] + v2' <- Ucm.initCodebase Ucm.CodebaseFormat2 + putStrLn $ show v2' + putStrLn =<< Ucm.runTranscript v2' [i| + ```ucm + .> pull ${repo} _base + .> test + ``` + |] + ok diff --git a/parser-typechecker/unison-parser-typechecker.cabal b/parser-typechecker/unison-parser-typechecker.cabal index 818afc82c4..7748ac3ee7 100644 --- a/parser-typechecker/unison-parser-typechecker.cabal +++ b/parser-typechecker/unison-parser-typechecker.cabal @@ -4,7 +4,7 @@ cabal-version: 1.12 -- -- see: https://github.com/sol/hpack -- --- hash: 6773e97d0e1646a1218127f5065794946877513acef8b5c10126c3a88552966d +-- hash: 645fe830bb452a7952eee335a9718791601155e96b1cf61346796106377a6ad7 name: unison-parser-typechecker version: 0.0.0 @@ -290,6 +290,7 @@ executable tests Unison.Core.Test.Name Unison.Test.ABT Unison.Test.ANF + Unison.Test.BaseUpgradePushPullTest Unison.Test.Cache Unison.Test.Codebase Unison.Test.Codebase.Causal From d073384295e749e6b58c04be8890570b1bf4c2bb Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Tue, 4 May 2021 11:35:11 -0600 Subject: [PATCH 213/225] add schema_version, commit flag, causal_metadata --- codebase2/codebase-sqlite/sql/create.sql | 10 +++ parser-typechecker/unison/Main.hs | 2 +- unison-src/transcripts/watch-expressions.md | 20 ++++++ .../transcripts/watch-expressions.output.md | 69 +++++++++++++++++++ 4 files changed, 100 insertions(+), 1 deletion(-) create mode 100644 unison-src/transcripts/watch-expressions.md create mode 100644 unison-src/transcripts/watch-expressions.output.md diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 2fd62db563..e738854993 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -1,6 +1,7 @@ CREATE TABLE schema_version ( version INTEGER NOT NULL ); +INSERT INTO schema_version (version) VALUES (2); -- actually stores the 512-byte hashes CREATE TABLE hash ( @@ -62,6 +63,7 @@ CREATE INDEX object_type_id ON object(type_id); CREATE TABLE causal ( self_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk2 REFERENCES hash(id), + commit_flag INTEGER NOT NULL DEFAULT 0, gc_generation INTEGER NOT NULL ); CREATE INDEX causal_value_hash_id ON causal(value_hash_id); @@ -82,6 +84,14 @@ CREATE TABLE causal_parent ( CREATE INDEX causal_parent_causal_id ON causal_parent(causal_id); CREATE INDEX causal_parent_parent_id ON causal_parent(parent_id); +CREATE TABLE causal_metadata ( + causal_id INTEGER NOT NULL REFERENCES causal(self_hash_id), + metadata_object_id INTEGER NOT NULL REFERENCES object(id), + metadata_component_index INTEGER NOT NULL, + PRIMARY KEY (causal_id, metadata_object_id, metadata_component_index) +); +CREATE INDEX causal_metadata_causal_id ON causal_metadata(causal_id); + -- associate old (e.g. v1) causal hashes with new causal hashes CREATE TABLE causal_old ( old_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_old_fk1 REFERENCES hash(id), diff --git a/parser-typechecker/unison/Main.hs b/parser-typechecker/unison/Main.hs index 6fd2770fa8..a71de540d1 100644 --- a/parser-typechecker/unison/Main.hs +++ b/parser-typechecker/unison/Main.hs @@ -201,7 +201,7 @@ main = do , P.string $ "http://127.0.0.1:" <> show port <> "/ui?" <> URI.encode (unpack token)] PT.putPrettyLn . P.string $ "Now starting the Unison Codebase Manager..." - launch currentDir config theCodebase branchCache [] + launch currentDir config theCodebase [] upgradeCodebase :: Maybe Codebase.CodebasePath -> IO () upgradeCodebase mcodepath = diff --git a/unison-src/transcripts/watch-expressions.md b/unison-src/transcripts/watch-expressions.md new file mode 100644 index 0000000000..0402c82e3b --- /dev/null +++ b/unison-src/transcripts/watch-expressions.md @@ -0,0 +1,20 @@ +```ucm +.> builtins.merge +``` + +```unison +test> pass = [Ok "Passed"] +``` + +```ucm +.> add +``` + +```unison +test> pass = [Ok "Passed"] +``` + +```ucm +.> add +.> test +``` \ No newline at end of file diff --git a/unison-src/transcripts/watch-expressions.output.md b/unison-src/transcripts/watch-expressions.output.md new file mode 100644 index 0000000000..e24ab3a361 --- /dev/null +++ b/unison-src/transcripts/watch-expressions.output.md @@ -0,0 +1,69 @@ +```ucm +.> builtins.merge + + Done. + +``` +```unison +test> pass = [Ok "Passed"] +``` + +```ucm + + I found and typechecked these definitions in scratch.u. If you + do an `add` or `update`, here's how your codebase would + change: + + ⍟ These new definitions are ok to `add`: + + pass : [Result] + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | test> pass = [Ok "Passed"] + + ✅ Passed Passed + +``` +```ucm +.> add + + ⍟ I've added these definitions: + + pass : [Result] + +``` +```unison +test> pass = [Ok "Passed"] +``` + +```ucm + + I found and typechecked the definitions in scratch.u. This + file has been previously added to the codebase. + + Now evaluating any watch expressions (lines starting with + `>`)... Ctrl+C cancels. + + 1 | test> pass = [Ok "Passed"] + + ✅ Passed Passed (cached) + +``` +```ucm +.> add + + ⊡ Ignored previously added definitions: pass + +.> test + + Cached test results (`help testcache` to learn more) + + ◉ pass Passed + + ✅ 1 test(s) passing + + Tip: Use view pass to view the source of a test. + +``` From e840a0911e4ebf9b17338c1e5be5db33af1a2c84 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 5 May 2021 03:38:55 -0600 Subject: [PATCH 214/225] started some schema comments --- .../U/Codebase/Sqlite/Queries.hs | 4 +- codebase2/codebase-sqlite/package.yaml | 3 + .../codebase-sqlite/sql/create-index.sql | 76 ----------- codebase2/codebase-sqlite/sql/create.sql | 127 +++++++++++++++--- .../unison-codebase-sqlite.cabal | 4 +- 5 files changed, 119 insertions(+), 95 deletions(-) delete mode 100644 codebase2/codebase-sqlite/sql/create-index.sql diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index fb6d7f46b4..846a6d5a45 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -108,12 +108,10 @@ orError e = maybe (throwError e) pure type TypeHashReference = Reference' TextId HashId -- * main squeeze - createSchema :: (DB m, MonadUnliftIO m) => m () createSchema = do withImmediateTransaction . traverse_ (execute_ . fromString) $ List.splitOn ";" [hereFile|sql/create.sql|] - <> List.splitOn ";" [hereFile|sql/create-index.sql|] setFlags :: DB m => m () setFlags = execute_ "PRAGMA foreign_keys = ON;" @@ -146,7 +144,7 @@ checkForMissingSchema = filterM missing schema ("table", "causal_parent"), ("index", "causal_parent_causal_id"), ("index", "causal_parent_parent_id"), - -- ,("table", "causal_old") + ("table", "causal_old_hash"), ("table", "watch_result"), ("table", "watch"), ("index", "watch_kind"), diff --git a/codebase2/codebase-sqlite/package.yaml b/codebase2/codebase-sqlite/package.yaml index fd779fb4c0..c7e31011cb 100644 --- a/codebase2/codebase-sqlite/package.yaml +++ b/codebase2/codebase-sqlite/package.yaml @@ -4,6 +4,9 @@ github: unisonweb/unison library: source-dirs: . +extra-source-files: + - sql/* + dependencies: - base - bytes diff --git a/codebase2/codebase-sqlite/sql/create-index.sql b/codebase2/codebase-sqlite/sql/create-index.sql deleted file mode 100644 index a054e0dadc..0000000000 --- a/codebase2/codebase-sqlite/sql/create-index.sql +++ /dev/null @@ -1,76 +0,0 @@ --- find type index uses hash-based references instead of component-based --- references, because they may be arbitrary types, not just the head --- types that are stored in the codebase. -CREATE TABLE find_type_index ( - type_reference_builtin INTEGER NULL CONSTRAINT find_type_index_fk1 REFERENCES text(id), - type_reference_hash_id INTEGER NULL CONSTRAINT find_type_index_fk2 REFERENCES hash(id), - type_reference_component_index INTEGER NULL, - term_referent_object_id INTEGER NOT NULL CONSTRAINT find_type_index_fk3 REFERENCES object(id), - term_referent_component_index INTEGER NOT NULL, - term_referent_constructor_index INTEGER NULL, - CONSTRAINT find_type_index_c1 UNIQUE ( - term_referent_object_id, - term_referent_component_index, - term_referent_constructor_index - ), - CONSTRAINT find_type_index_c2 CHECK ( - (type_reference_builtin IS NULL) = - (type_reference_hash_id IS NOT NULL) - ), - CONSTRAINT find_type_index_c3 CHECK ( - (type_reference_hash_id IS NULL) = - (type_reference_component_index IS NULL) - ) -); -CREATE INDEX find_type_index_type ON find_type_index ( - type_reference_builtin, - type_reference_hash_id, - type_reference_component_index -); - -CREATE TABLE find_type_mentions_index ( - type_reference_builtin INTEGER NULL CONSTRAINT find_type_mentions_index_fk1 REFERENCES text(id), - type_reference_hash_id INTEGER NULL CONSTRAINT find_type_mentions_index_fk2 REFERENCES hash(id), - type_reference_component_index INTEGER NULL, - term_referent_object_id INTEGER NOT NULL CONSTRAINT find_type_mentions_index_fk3 REFERENCES object(id), - term_referent_component_index INTEGER NOT NULL, - term_referent_constructor_index INTEGER NULL, - CONSTRAINT find_type_mentions_index_c1 CHECK ( - (type_reference_builtin IS NULL) = - (type_reference_hash_id IS NOT NULL) - ), - CONSTRAINT find_type_mentions_index_c2 CHECK ( - (type_reference_hash_id IS NULL) = - (type_reference_component_index IS NULL) - ) -); -CREATE INDEX find_type_mentions_index_type ON find_type_mentions_index ( - type_reference_builtin, - type_reference_hash_id, - type_reference_component_index -); - -CREATE TABLE dependents_index ( - dependency_builtin INTEGER NULL CONSTRAINT dependents_index_fk1 REFERENCES text(id), - dependency_object_id INTEGER NULL CONSTRAINT dependents_index_fk2 REFERENCES object(id), - dependency_component_index INTEGER NULL, - dependent_object_id INTEGER NOT NULL CONSTRAINT dependents_index_fk3 REFERENCES object(id), - dependent_component_index INTEGER NOT NULL, - CONSTRAINT dependents_index_c1 CHECK ( - (dependency_builtin IS NULL) = - (dependency_object_id IS NOT NULL) - ), - CONSTRAINT dependents_index_c2 CHECK ( - (dependency_object_id IS NULL) = - (dependency_component_index IS NULL) - ) -); -CREATE INDEX dependents_by_dependency ON dependents_index ( - dependency_builtin, - dependency_object_id, - dependency_component_index -); -CREATE INDEX dependencies_by_dependent ON dependents_index ( - dependent_object_id, - dependent_component_index -) diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 2fd62db563..988eee521c 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -1,10 +1,12 @@ +-- v2 codebase schema + CREATE TABLE schema_version ( version INTEGER NOT NULL ); -- actually stores the 512-byte hashes CREATE TABLE hash ( - id INTEGER PRIMARY KEY, + id INTEGER PRIMARY KEY NOT NULL, -- this would be the full hash, represented as base32 instead of bytes, -- to optimize for looking them up by prefix. base32 TEXT UNIQUE NOT NULL @@ -12,13 +14,17 @@ CREATE TABLE hash ( CREATE INDEX hash_base32 ON hash(base32); CREATE TABLE text ( - id INTEGER PRIMARY KEY, + id INTEGER PRIMARY KEY NOT NULL, text TEXT UNIQUE NOT NULL ); --- just came up with this, a layer of indirection to allow multiple hash_ids to --- reference the same object. --- so: SELECT object.id, bytes FROM object +-- The `hash_object` table is a layer of indirection that allows multiple +-- hashes to be associated to the same object. For example, if the hashing +-- algorithm for the object is changed. +-- I could imagine a UNIQUE (object_id, hash_version) constraint +-- or a UNIQUE (hash_id, hash_version) constraint, or both, but I'm not sure +-- if that will cause trouble later? +-- So: SELECT object.id, bytes FROM object -- INNER JOIN hash_object ON object_id = object.id -- INNER JOIN hash ON hash_id = hash.id -- WHERE base32 LIKE 'a1b2c3%' @@ -29,28 +35,40 @@ CREATE TABLE hash_object ( object_id INTEGER NOT NULL CONSTRAINT hash_object_fk2 REFERENCES object(id), hash_version INTEGER NOT NULL ); +-- efficient look up of objects by hash CREATE INDEX hash_object_hash_id ON hash_object(hash_id); +-- efficient lookup of hashes by objects CREATE INDEX hash_object_object_id ON hash_object(object_id); --- this table is just documentation, it won't be used for joins. +-- This table is just for diagnostic queries, not for normal ucm operation, +-- by joining `ON object.type_id = object_type_description.id` CREATE TABLE object_type_description ( id INTEGER UNIQUE NOT NULL, description TEXT UNIQUE NOT NULL ); INSERT INTO object_type_description (id, description) VALUES - (0, "Term Component"), -- foo x = x + 1 + (0, "Term Component"), -- ping x = pong (x + 1), pong x = ping (x + 1) (1, "Decl Component"), -- unique type Animal = Cat | Dog | Mouse - (2, "Namespace"), -- a one-level slice + (2, "Namespace"), -- a one-level slice with no history (3, "Patch") -- replace term #abc with term #def ; +-- `object` stores binary blobs that are uniquely identified by hash (alone). +-- Within the database, objects (including terms, decls, namespaces) are primarily +-- referenced by their object_ids, not hash ids. (The use of object_id-based +-- is meant to prove that the referenced object actually exists in the database, +-- whereas if hash_ids were used, we could only prove that the hash was in the +-- database.) +-- The `hash_object` table allows us to associate multiple hashes to objects, +-- but when we want to display a hash back to the user, we have to choose one. +-- The `primary_hash_id` tells us which one we'll use. CREATE TABLE object ( id INTEGER PRIMARY KEY NOT NULL, primary_hash_id INTEGER UNIQUE NOT NULL CONSTRAINT object_fk1 REFERENCES hash(id), type_id INTEGER NOT NULL CONSTRAINT object_fk2 REFERENCES object_type_description(id), bytes BLOB NOT NULL ); --- Error: near "INTEGER": syntax error +-- look up objects by primary_hash_id. CREATE INDEX object_hash_id ON object(primary_hash_id); CREATE INDEX object_type_id ON object(type_id); @@ -77,13 +95,13 @@ CREATE TABLE namespace_root ( CREATE TABLE causal_parent ( causal_id INTEGER NOT NULL CONSTRAINT causal_parent_fk1 REFERENCES causal(self_hash_id), parent_id INTEGER NOT NULL CONSTRAINT causal_parent_fk2 REFERENCES causal(self_hash_id), - UNIQUE (causal_id, parent_id) -); + PRIMARY KEY (causal_id, parent_id) +) WITHOUT ROWID; CREATE INDEX causal_parent_causal_id ON causal_parent(causal_id); CREATE INDEX causal_parent_parent_id ON causal_parent(parent_id); -- associate old (e.g. v1) causal hashes with new causal hashes -CREATE TABLE causal_old ( +CREATE TABLE causal_old_hash ( old_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_old_fk1 REFERENCES hash(id), new_hash_id INTEGER NOT NULL CONSTRAINT causal_old_fk2 REFERENCES hash(id) ); @@ -93,20 +111,99 @@ CREATE TABLE watch_result ( component_index INTEGER NOT NULL, result BLOB NOT NULL, PRIMARY KEY (hash_id, component_index) -); +) WITHOUT ROWID; CREATE TABLE watch ( hash_id INTEGER NOT NULL CONSTRAINT watch_fk1 REFERENCES hash(id), component_index INTEGER NOT NULL, watch_kind_id INTEGER NOT NULL CONSTRAINT watch_fk2 REFERENCES watch_kind_description(id), PRIMARY KEY (hash_id, component_index, watch_kind_id) -); +) WITHOUT ROWID; CREATE INDEX watch_kind ON watch(watch_kind_id); CREATE TABLE watch_kind_description ( - id INTEGER PRIMARY KEY UNIQUE NOT NULL, + id INTEGER PRIMARY KEY NOT NULL, description TEXT UNIQUE NOT NULL ); INSERT INTO watch_kind_description (id, description) VALUES (0, "Regular"), -- won't be synced (1, "Test") -- will be synced + ; + +-- find type index uses hash-based references instead of component-based +-- references, because they may be arbitrary types, not just the head +-- types that are stored in the codebase. +CREATE TABLE find_type_index ( + type_reference_builtin INTEGER NULL CONSTRAINT find_type_index_fk1 REFERENCES text(id), + type_reference_hash_id INTEGER NULL CONSTRAINT find_type_index_fk2 REFERENCES hash(id), + type_reference_component_index INTEGER NULL, + term_referent_object_id INTEGER NOT NULL CONSTRAINT find_type_index_fk3 REFERENCES object(id), + term_referent_component_index INTEGER NOT NULL, + term_referent_constructor_index INTEGER NULL, + CONSTRAINT find_type_index_c1 UNIQUE ( + term_referent_object_id, + term_referent_component_index, + term_referent_constructor_index + ), + CONSTRAINT find_type_index_c2 CHECK ( + (type_reference_builtin IS NULL) = + (type_reference_hash_id IS NOT NULL) + ), + CONSTRAINT find_type_index_c3 CHECK ( + (type_reference_hash_id IS NULL) = + (type_reference_component_index IS NULL) + ) +); +CREATE INDEX find_type_index_type ON find_type_index ( + type_reference_builtin, + type_reference_hash_id, + type_reference_component_index +); + +CREATE TABLE find_type_mentions_index ( + type_reference_builtin INTEGER NULL CONSTRAINT find_type_mentions_index_fk1 REFERENCES text(id), + type_reference_hash_id INTEGER NULL CONSTRAINT find_type_mentions_index_fk2 REFERENCES hash(id), + type_reference_component_index INTEGER NULL, + term_referent_object_id INTEGER NOT NULL CONSTRAINT find_type_mentions_index_fk3 REFERENCES object(id), + term_referent_component_index INTEGER NOT NULL, + term_referent_constructor_index INTEGER NULL, + CONSTRAINT find_type_mentions_index_c1 CHECK ( + (type_reference_builtin IS NULL) = + (type_reference_hash_id IS NOT NULL) + ), + CONSTRAINT find_type_mentions_index_c2 CHECK ( + (type_reference_hash_id IS NULL) = + (type_reference_component_index IS NULL) + ) +); +CREATE INDEX find_type_mentions_index_type ON find_type_mentions_index ( + type_reference_builtin, + type_reference_hash_id, + type_reference_component_index +); + +CREATE TABLE dependents_index ( + dependency_builtin INTEGER NULL CONSTRAINT dependents_index_fk1 REFERENCES text(id), + dependency_object_id INTEGER NULL CONSTRAINT dependents_index_fk2 REFERENCES object(id), + dependency_component_index INTEGER NULL, + dependent_object_id INTEGER NOT NULL CONSTRAINT dependents_index_fk3 REFERENCES object(id), + dependent_component_index INTEGER NOT NULL, + CONSTRAINT dependents_index_c1 CHECK ( + (dependency_builtin IS NULL) = + (dependency_object_id IS NOT NULL) + ), + CONSTRAINT dependents_index_c2 CHECK ( + (dependency_object_id IS NULL) = + (dependency_component_index IS NULL) + ) +); +CREATE INDEX dependents_by_dependency ON dependents_index ( + dependency_builtin, + dependency_object_id, + dependency_component_index +); +CREATE INDEX dependencies_by_dependent ON dependents_index ( + dependent_object_id, + dependent_component_index +) +-- semicolon intentionally omitted diff --git a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal index e3c6b36c1e..cbffce97f2 100644 --- a/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal +++ b/codebase2/codebase-sqlite/unison-codebase-sqlite.cabal @@ -4,13 +4,15 @@ cabal-version: 1.12 -- -- see: https://github.com/sol/hpack -- --- hash: e1cd44afa84ce86b33e3573d3ec12f6238df82991290e25f9b13a65d5ad98b04 +-- hash: 08b634642723a41ec97773c7481d4d7af6a31e527b06c0e800001677e64a955d name: unison-codebase-sqlite version: 0.0.0 homepage: https://github.com/unisonweb/unison#readme bug-reports: https://github.com/unisonweb/unison/issues build-type: Simple +extra-source-files: + sql/create.sql source-repository head type: git From 3fe8ab9d6a958a17226a9342f02b98048897561d Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 5 May 2021 12:12:52 -0600 Subject: [PATCH 215/225] schema index fixes and docs --- .../U/Codebase/Sqlite/Queries.hs | 10 +++- codebase2/codebase-sqlite/sql/create.sql | 59 ++++++++++++------- 2 files changed, 47 insertions(+), 22 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 846a6d5a45..73fdba33dc 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -131,7 +131,6 @@ checkForMissingSchema = filterM missing schema ("index", "hash_base32"), ("table", "text"), ("table", "hash_object"), - ("index", "hash_object_hash_id"), ("index", "hash_object_object_id"), ("table", "object_type_description"), ("table", "object"), @@ -148,7 +147,14 @@ checkForMissingSchema = filterM missing schema ("table", "watch_result"), ("table", "watch"), ("index", "watch_kind"), - ("table", "watch_kind_description") + ("table", "watch_kind_description"), + ("table", "find_type_index"), + ("index", "find_type_index_type"), + ("table", "find_type_mentions_index"), + ("index", "find_type_mentions_index_type"), + ("table", "dependents_index"), + ("index", "dependents_by_dependency"), + ("index", "dependencies_by_dependent") ] {- ORMOLU_DISABLE -} diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index f6504f8f72..a1b4a0cf3d 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -10,9 +10,21 @@ CREATE TABLE hash ( id INTEGER PRIMARY KEY NOT NULL, -- this would be the full hash, represented as base32 instead of bytes, -- to optimize for looking them up by prefix. - base32 TEXT UNIQUE NOT NULL -); -CREATE INDEX hash_base32 ON hash(base32); + base32 TEXT NOT NULL +); +-- Per https://sqlite.org/optoverview.html#the_like_optimization, +-- we need COLLATE NOCASE to enable prefix scanning with `LIKE`. +-- If we want LIKE to be case sensitive (defaults to no) then +-- see that link. +-- We want: + -- sqlite> explain query plan select id from hash where base32 like 'a1b2c3%' + -- QUERY PLAN + -- `--SEARCH TABLE hash USING COVERING INDEX hash_base32 (base32>? AND base32 explain query plan select id from hash where base32 like 'a1b2c3%' + -- QUERY PLAN + -- `--SCAN TABLE hash +CREATE INDEX hash_base32 ON hash(base32 COLLATE NOCASE); CREATE TABLE text ( id INTEGER PRIMARY KEY NOT NULL, @@ -36,15 +48,13 @@ CREATE TABLE hash_object ( object_id INTEGER NOT NULL CONSTRAINT hash_object_fk2 REFERENCES object(id), hash_version INTEGER NOT NULL ); --- efficient look up of objects by hash -CREATE INDEX hash_object_hash_id ON hash_object(hash_id); -- efficient lookup of hashes by objects CREATE INDEX hash_object_object_id ON hash_object(object_id); -- This table is just for diagnostic queries, not for normal ucm operation, -- by joining `ON object.type_id = object_type_description.id` CREATE TABLE object_type_description ( - id INTEGER UNIQUE NOT NULL, + id INTEGER PRIMARY KEY NOT NULL, description TEXT UNIQUE NOT NULL ); INSERT INTO object_type_description (id, description) VALUES @@ -56,8 +66,8 @@ INSERT INTO object_type_description (id, description) VALUES -- `object` stores binary blobs that are uniquely identified by hash (alone). -- Within the database, objects (including terms, decls, namespaces) are primarily --- referenced by their object_ids, not hash ids. (The use of object_id-based --- is meant to prove that the referenced object actually exists in the database, +-- referenced by their object_ids, not hash ids. (The use of object_id is +-- meant to prove that the referenced object actually exists in the database, -- whereas if hash_ids were used, we could only prove that the hash was in the -- database.) -- The `hash_object` table allows us to associate multiple hashes to objects, @@ -65,19 +75,18 @@ INSERT INTO object_type_description (id, description) VALUES -- The `primary_hash_id` tells us which one we'll use. CREATE TABLE object ( id INTEGER PRIMARY KEY NOT NULL, - primary_hash_id INTEGER UNIQUE NOT NULL CONSTRAINT object_fk1 REFERENCES hash(id), + primary_hash_id INTEGER NOT NULL CONSTRAINT object_fk1 REFERENCES hash(id), type_id INTEGER NOT NULL CONSTRAINT object_fk2 REFERENCES object_type_description(id), bytes BLOB NOT NULL ); -- look up objects by primary_hash_id. -CREATE INDEX object_hash_id ON object(primary_hash_id); +CREATE UNIQUE INDEX object_hash_id ON object(primary_hash_id); +-- filter objects by CREATE INDEX object_type_id ON object(type_id); -- `causal` references value hash ids instead of value ids, in case you want -- to be able to drop values and keep just the causal spine. --- This implementation keeps the hash of the dropped values, although I could --- see an argument to drop them too and just use NULL, but I thought it better --- to not lose their identities. +-- `commit_flag` and `gc_generation` are basically unused at the moment. CREATE TABLE causal ( self_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk2 REFERENCES hash(id), @@ -87,13 +96,13 @@ CREATE TABLE causal ( CREATE INDEX causal_value_hash_id ON causal(value_hash_id); CREATE INDEX causal_gc_generation ON causal(gc_generation); +-- We expect exactly 1 row, which we overwrite when we setRootNamespace. CREATE TABLE namespace_root ( - -- a dummy pk because - -- id INTEGER PRIMARY KEY NOT NULL, causal_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT namespace_root_fk1 REFERENCES causal(self_hash_id) ); -- LCA computations only need to look at this table +-- A causal can have many parents, and a parent may be a parent to many causals. CREATE TABLE causal_parent ( causal_id INTEGER NOT NULL CONSTRAINT causal_parent_fk1 REFERENCES causal(self_hash_id), parent_id INTEGER NOT NULL CONSTRAINT causal_parent_fk2 REFERENCES causal(self_hash_id), @@ -102,12 +111,13 @@ CREATE TABLE causal_parent ( CREATE INDEX causal_parent_causal_id ON causal_parent(causal_id); CREATE INDEX causal_parent_parent_id ON causal_parent(parent_id); +-- links reference.id to causals CREATE TABLE causal_metadata ( causal_id INTEGER NOT NULL REFERENCES causal(self_hash_id), metadata_object_id INTEGER NOT NULL REFERENCES object(id), metadata_component_index INTEGER NOT NULL, PRIMARY KEY (causal_id, metadata_object_id, metadata_component_index) -); +) WITHOUT ROWID; CREATE INDEX causal_metadata_causal_id ON causal_metadata(causal_id); -- associate old (e.g. v1) causal hashes with new causal hashes @@ -140,9 +150,10 @@ INSERT INTO watch_kind_description (id, description) VALUES (1, "Test") -- will be synced ; --- find type index uses hash-based references instead of component-based --- references, because they may be arbitrary types, not just the head --- types that are stored in the codebase. +-- Related to the discussion at the `object` table, `find_type_index` indexes +-- the types by hash-based references instead of object-based references, because +-- they may be arbitrary types, not just the head types that are stored in the +-- codebase. The terms having these types are indexed by object-based referents. CREATE TABLE find_type_index ( type_reference_builtin INTEGER NULL CONSTRAINT find_type_index_fk1 REFERENCES text(id), type_reference_hash_id INTEGER NULL CONSTRAINT find_type_index_fk2 REFERENCES hash(id), @@ -192,6 +203,7 @@ CREATE INDEX find_type_mentions_index_type ON find_type_mentions_index ( type_reference_component_index ); +-- dependents and dependencies are all in the codebase, so they use object-based references. CREATE TABLE dependents_index ( dependency_builtin INTEGER NULL CONSTRAINT dependents_index_fk1 REFERENCES text(id), dependency_object_id INTEGER NULL CONSTRAINT dependents_index_fk2 REFERENCES object(id), @@ -216,4 +228,11 @@ CREATE INDEX dependencies_by_dependent ON dependents_index ( dependent_object_id, dependent_component_index ) --- semicolon intentionally omitted +-- Semicolon intentionally omitted, for the same reason +-- semicolons in comments will blow up codebase initialization. +-- (oops, almost used a semicolon at the end of that last phrase!) +-- Sqlite doesn't let us submit multiple statements in the same +-- command, so we are using Haskell code to segment the statements +-- by splitting on semicolons. It doesn't know to ignore comments, +-- though I guess that wouldn't be hard to implement. Should have +-- done it from the start. From 985ffb2e4b758b648dc09ab63a92dcad211f19f9 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 5 May 2021 21:15:01 -0600 Subject: [PATCH 216/225] add Committed newtype --- codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 5 +++++ codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs | 6 +++--- codebase2/codebase-sqlite/sql/create.sql | 2 +- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index 4ccfbc6b82..e01f281e21 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -45,6 +45,11 @@ newtype Generation = Generation { unGeneration :: Word64 } deriving (Eq, Ord, Show) deriving (Enum, FromField, ToField) via Word64 +-- |Also I guess garbage-collection related? 🤔 +newtype Committed = Committed { unCommitted :: Bool } + deriving (Eq, Ord, Show) + deriving (Enum, FromField, ToField) via Bool + instance Show PatchObjectId where show h = "PatchObjectId (" ++ show (unPatchObjectId h) ++ ")" diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 73fdba33dc..8a8076ddbf 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -321,9 +321,9 @@ updateObjectBlob oId bs = execute sql (oId, bs) where sql = [here| -- |Maybe we would generalize this to something other than NamespaceHash if we -- end up wanting to store other kinds of Causals here too. saveCausal :: DB m => CausalHashId -> BranchHashId -> m () -saveCausal self value = execute sql (self, value, Generation 0) where sql = [here| - INSERT INTO causal (self_hash_id, value_hash_id, gc_generation) - VALUES (?, ?, ?) +saveCausal self value = execute sql (self, value, Committed True, Generation 0) where sql = [here| + INSERT INTO causal (self_hash_id, value_hash_id, commit_flag, gc_generation) + VALUES (?, ?, ?, ?) ON CONFLICT DO NOTHING |] diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index a1b4a0cf3d..1f43a137c4 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -90,7 +90,7 @@ CREATE INDEX object_type_id ON object(type_id); CREATE TABLE causal ( self_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk2 REFERENCES hash(id), - commit_flag INTEGER NOT NULL DEFAULT 0, + commit_flag INTEGER NOT NULL, gc_generation INTEGER NOT NULL ); CREATE INDEX causal_value_hash_id ON causal(value_hash_id); From 5ec0dba1c91cef503a0a43f2b510773a39e45b54 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 5 May 2021 21:26:20 -0600 Subject: [PATCH 217/225] remove causal_old_hash until we're ready to use it. --- .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 2 -- .../U/Codebase/Sqlite/Queries.hs | 33 ++++++------------- codebase2/codebase-sqlite/sql/create.sql | 6 ---- 3 files changed, 10 insertions(+), 31 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index e01f281e21..471923cd7a 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -34,8 +34,6 @@ newtype BranchHashId = BranchHashId { unBranchHashId :: HashId } deriving (Eq, O newtype CausalHashId = CausalHashId { unCausalHashId :: HashId } deriving (Eq, Ord) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via HashId -newtype CausalOldHashId = CausalOldHashId HashId deriving Show deriving (Hashable, FromField, ToField) via HashId - newtype TypeId = TypeId ObjectId deriving Show deriving (FromField, ToField) via ObjectId newtype TermId = TermCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId newtype DeclId = DeclCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 8a8076ddbf..0b4b3686eb 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -44,7 +44,16 @@ import Debug.Trace (trace, traceM) import GHC.Stack (HasCallStack) import U.Codebase.HashTags (BranchHash, CausalHash, unBranchHash, unCausalHash) import U.Codebase.Reference (Reference') -import U.Codebase.Sqlite.DbId (BranchHashId (..), BranchObjectId (..), CausalHashId (..), CausalOldHashId, Generation (..), HashId (..), ObjectId (..), TextId) +import U.Codebase.Sqlite.DbId + ( BranchHashId (..), + BranchObjectId (..), + CausalHashId (..), + Committed (..), + Generation (..), + HashId (..), + ObjectId (..), + TextId, + ) import U.Codebase.Sqlite.ObjectType (ObjectType) import qualified U.Codebase.Sqlite.Reference as Reference import qualified U.Codebase.Sqlite.Referent as Referent @@ -85,7 +94,6 @@ data Integrity | UnknownTextId TextId | UnknownObjectId ObjectId | UnknownCausalHashId CausalHashId - | UnknownCausalOldHashId CausalOldHashId | UnknownHash Hash | UnknownText Text | NoObjectForHashId HashId @@ -143,7 +151,6 @@ checkForMissingSchema = filterM missing schema ("table", "causal_parent"), ("index", "causal_parent_causal_id"), ("index", "causal_parent_parent_id"), - ("table", "causal_old_hash"), ("table", "watch_result"), ("table", "watch"), ("index", "watch_kind"), @@ -361,26 +368,6 @@ loadBranchObjectIdByCausalHashId id = queryAtom sql (Only id) where sql = [here| WHERE causal.self_hash_id = ? |] -saveCausalOld :: DB m => HashId -> CausalHashId -> m () -saveCausalOld v1 v2 = execute sql (v1, v2) where sql = [here| - INSERT INTO causal_old (old_hash_id, new_hash_id) VALUES (?, ?) - ON CONFLICT DO NOTHING -|] - -loadCausalHashIdByCausalOldHash :: EDB m => CausalOldHashId -> m CausalHashId -loadCausalHashIdByCausalOldHash id = - queryAtom sql (Only id) >>= orError (UnknownCausalOldHashId id) where sql = [here| - SELECT new_hash_id FROM causal_old where old_hash_id = ? -|] - -loadOldCausalValueHash :: EDB m => CausalOldHashId -> m BranchHashId -loadOldCausalValueHash id = - queryAtom sql (Only id) >>= orError (UnknownCausalOldHashId id) where sql = [here| - SELECT value_hash_id FROM causal - INNER JOIN causal_old ON self_hash_id = new_hash_id - WHERE old_hash_id = ? -|] - saveCausalParents :: DB m => CausalHashId -> [CausalHashId] -> m () saveCausalParents child parents = executeMany sql $ (child,) <$> parents where sql = [here| diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 1f43a137c4..f9e1efbb91 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -120,12 +120,6 @@ CREATE TABLE causal_metadata ( ) WITHOUT ROWID; CREATE INDEX causal_metadata_causal_id ON causal_metadata(causal_id); --- associate old (e.g. v1) causal hashes with new causal hashes -CREATE TABLE causal_old_hash ( - old_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_old_fk1 REFERENCES hash(id), - new_hash_id INTEGER NOT NULL CONSTRAINT causal_old_fk2 REFERENCES hash(id) -); - CREATE TABLE watch_result ( hash_id INTEGER NOT NULL CONSTRAINT watch_result_fk1 REFERENCES hash(id), component_index INTEGER NOT NULL, From 844fd203ade051b6a20198458d6e1bf1545cd533 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 5 May 2021 21:26:45 -0600 Subject: [PATCH 218/225] restore missing uniqueness constraint on hash table --- codebase2/codebase-sqlite/sql/create.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index f9e1efbb91..8f67559076 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -12,6 +12,7 @@ CREATE TABLE hash ( -- to optimize for looking them up by prefix. base32 TEXT NOT NULL ); +CREATE UNIQUE INDEX hash_base32 ON hash(base32 COLLATE NOCASE); -- Per https://sqlite.org/optoverview.html#the_like_optimization, -- we need COLLATE NOCASE to enable prefix scanning with `LIKE`. -- If we want LIKE to be case sensitive (defaults to no) then @@ -24,7 +25,6 @@ CREATE TABLE hash ( -- sqlite> explain query plan select id from hash where base32 like 'a1b2c3%' -- QUERY PLAN -- `--SCAN TABLE hash -CREATE INDEX hash_base32 ON hash(base32 COLLATE NOCASE); CREATE TABLE text ( id INTEGER PRIMARY KEY NOT NULL, From 3276114fe23ccfc1d50963725d07f3b78bb6e0ff Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 5 May 2021 21:26:55 -0600 Subject: [PATCH 219/225] comments/formatting --- .../codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs | 4 +++- codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs | 9 ++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs index bd050c495e..b2601b6fc5 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/ObjectType.hs @@ -4,7 +4,9 @@ import Database.SQLite.Simple.FromField (FromField(..)) import Database.SQLite.Simple.ToField (ToField(..)) import Database.SQLite.Simple (SQLData(SQLInteger)) --- |Don't reorder these, they are part of the database +-- |Don't reorder these, they are part of the database, +-- and the ToField and FromField implementation currently +-- depends on the derived Enum implementation. data ObjectType = TermComponent -- 0 | DeclComponent -- 1 diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 0b4b3686eb..e69de296da 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -36,7 +36,14 @@ import Data.Maybe (fromJust, fromMaybe) import Data.String (fromString) import Data.String.Here.Uninterpolated (here, hereFile) import Data.Text (Text) -import Database.SQLite.Simple (Connection, FromRow, Only (..), SQLData, ToRow (..), (:.) (..)) +import Database.SQLite.Simple + ( Connection, + FromRow, + Only (..), + SQLData, + ToRow (..), + (:.) (..), + ) import qualified Database.SQLite.Simple as SQLite import Database.SQLite.Simple.FromField (FromField (..)) import Database.SQLite.Simple.ToField (ToField (..)) From cfbc676fa29dbfb35ce02ddd484e34ad7811ed54 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Wed, 5 May 2021 21:27:07 -0600 Subject: [PATCH 220/225] initial v2 format writeup --- docs/repoformats/v2.markdown | 229 +++++++++++++++++++++++++++++++++++ 1 file changed, 229 insertions(+) create mode 100644 docs/repoformats/v2.markdown diff --git a/docs/repoformats/v2.markdown b/docs/repoformats/v2.markdown new file mode 100644 index 0000000000..4a2590c030 --- /dev/null +++ b/docs/repoformats/v2.markdown @@ -0,0 +1,229 @@ +# Overview + +The v2 codebase format is a sqlite3 database with some Unison objects stored relationally, and others stored as binary blobs. + +There are a few important concepts for understanding the implementation. + +**The `hash` and `text` tables** +`hash` stores Unison hashes in base32hex format. It is indexed to let us look up hashes efficiently by case-insensitive prefix in log time. Hashes are not stored anywhere else in the database. + +`text` stores all of the strings in the codebase. This includes definition names, internal names of ucm builtins, and user-defined strings. Though it doesn't include local variable definition names. Possibly an oversight. 😬 It is indexed to let us look up strings by exact match in logarithmic time, for deduplication purposes. Indeed, it does not contain any duplicates. + +**The `object` table** + +The object table stores things that are identified by hash and represented with binary blobs instead of relationally. Currently that includes patches and namespace slices, along with term and decl components ("component" ambiguously meaning the whole cycle, a strongly-connected component of definitions). Note that the entire component is identified by a single hash, like patches and namespace slices are. There could be new object types in the future. The object type of each object is given by the `type_id` field. (It's an integer, but the description is given in the `object_type_description` table for convenience, and in `ObjectType.hs`.) + +**The `causal` and `causal_parents` tables** + +```sql +CREATE TABLE causal ( + self_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), + value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk2 REFERENCES hash(id), + commit_flag INTEGER NOT NULL, + gc_generation INTEGER NOT NULL +); + +CREATE TABLE causal_parent ( + causal_id INTEGER NOT NULL REFERENCES causal(self_hash_id), + parent_id INTEGER NOT NULL REFERENCES causal(self_hash_id), +); +``` + +Although causals are indexed by `hash.id`, an entry in the `causal` table is required, to qualify as a real causal. This also tells us the hash of its root namespace slice (which may or may not be present), whether it's been committed, and its `gc_generation`, which will be `True` and `0` until a commit/squash feature is implemented. + +**Hash-based vs Object-based References** + +To avoid referencing an object that is expected to be in the database but isn't actually in the database (e.g. a patch, a term dependency), most objects in the v2 codebase format are referenced by `object.id`. + +The Haskell reference type used internally by the v2 codebase code is parameterized over the text and hash types: +```haskell +data Reference' t h + = ReferenceBuiltin t + | ReferenceDerived (Id' h) + deriving (Eq, Ord, Show) + +data Id' h = Id h Pos +type Pos = Word64 + +-- e.g. +type Reference = Reference' TextId ObjectId +type ReferenceH = Reference' TextId HashId +``` + +where the former references objects that must exist in the database and the latter references objects that may not. + + +### Serialization formats for codebase objects + +Each of the object blob formats begins with a varint format id. For a given object type, different format ids indicate different ways of representing the same object -- not a way of representing a slightly different type of object. This enables us to make different storage-lager efficiency tradeoffs based on an object's contents or a user's access patterns. + +These different formats are enumerated in `U.Codebase.Sqlite.{Term,Decl,Branch,Patch}.Format.hs` respectively, and collected below: + +```haskell +data DeclFormat + = Decl DeclFormat.LocallyIndexedComponent + +data TermFormat + = Term TermFormat.LocallyIndexedComponent + +data WatchResultFormat + = WatchResult WatchLocalIds Term + +data BranchFormat + = Full BranchLocalIds LocalBranch + | Diff BranchObjectId BranchLocalIds LocalDiff + +data PatchFormat + = Full PatchLocalIds LocalPatch + | Diff PatchObjectId PatchLocalIds LocalPatchDiff +``` + +Only one format is currently defined for decls, terms, and watch expression results; two are defined---though only the first is currently in use and tested---for branches and patches. + +Here things get a little hairy, and we'll dive into each one individually. + +#### DeclFormat.Decl + +To start with, the ABT base functor for the Haskell type which directly backs the codebase blob representation of unison types is parameterized by a reference type `r`: +```haskell +data F' r a + = Ref r + | Arrow a a + | Ann a Kind + | App a a + | Effect a a + | Effects [a] + | Forall a + | IntroOuter a +``` + +Different parameterizations are used in different contexts, but for storing type declarations, we have `r` as `Reference' LocalTextId (Maybe LocalDefnId)` which is a parameterization of reference we haven't talked about yet. + +In the previous section, we saw `type Reference = Reference' TextId ObjectId`, but we avoid using this type of reference in the binary blobs because one database will number its `TextId`s and `ObjectId`s differently from another database + +In order to avoid fully rewriting the ABT to update `TextId` and `ObjectId` replace when transferring definitions from one codebase to another, each definition is bundled with a lookup array for `TextId` and `ObjectId` values. The references in the ABT are indexed by indexes into these _lookup array_, and when a definition is transferred between codebases, only the lookup array is rewritten, not the ABT itself. And hopefully that's better. + +An example: + +```unison +type Tree = Branch Tree Tree | INode (Optional ##Int) | BNode Boolean +``` +This gives us a decl with two constructor types: +- `Tree -> Tree -> Tree` +- `Optional Int -> Tree` +- `Boolean -> Tree` + +First we can imagine it represented using `r = Reference' TextId (Maybe ObjectId)`, where `Nothing` would represent the current object being constructed, which does not yet have an `ObjectId`. + +Supposing the builtin type `##Int` has `TextId 5`, `##Boolean` has `TextId 4`, and the derived type `Optional` has `ObjectId 3`, the types would look something like: + +`Branch`: +``` +Ref (ReferenceDerived Nothing (Pos 0)) -> +Ref (ReferenceDerived Nothing (Pos 0)) -> +Ref (ReferenceDerived Nothing (Pos 0))`` +``` +`INode`: +``` +App (Ref (ReferenceDerived (Just (ObjectId 3)) (Pos 0))) + (Ref (ReferenceBuiltin (TextId 5))) -> +Ref (ReferenceDerived Nothing (Pos 0)) +``` +`BNode`: +``` +Ref (ReferenceBuiltin (TextId 4)) -> +Ref (ReferenceDerived Nothing (Pos 0))`` +``` + +From the decl containing these types, we create lookup arrays for `TextId`s `ObjectId`s, while substituting their indices into the constructor types: + +"LocalIds": +``` + textLookup +0| TextId 5 +1| TextId 4 + + defnLookup +0'| ObjectId 3 +``` + +`Branch`: +``` +Ref (ReferenceDerived Nothing (Pos 0)) -> +Ref (ReferenceDerived Nothing (Pos 0)) -> +Ref (ReferenceDerived Nothing (Pos 0))`` +``` +`INode`: +``` +App (Ref (ReferenceDerived (Just (LocalDefnId 0')) (Pos 0))) + (Ref (ReferenceBuiltin (LocalTextId 0))) -> +Ref (ReferenceDerived Nothing (Pos 0)) +``` +`BNode`: +``` +Ref (ReferenceBuiltin (LocalTextId 1)) -> +Ref (ReferenceDerived Nothing (Pos 0))`` +``` + + +Then on a sync, we look up the corresponding ids in the source and destination codebases (creating them at the destination if necessary), and construct a new "LocalIds" lookup table: + +`TextId 5` -> `"##Nat"` -> `TextId 60` +`TextId 4` -> `"##Boolean"` -> `TextId 2` +`ObjectId 3` -> `HashId 14` -> `#asodcj3` -> `HashId 16` -> `ObjectId 2` + +"LocalIds" at destination: +``` + textLookup +0| TextId 50 +1| TextId 2 + + defnLookup +0'| ObjectId 2 +``` + +```haskell +-- | Add new formats here +data DeclFormat = Decl (Vector (LocalIds, Decl Symbol)) +``` + +From the above definition, you can see that each `Decl` is paired with its own `LocalIds` structure, as opposed to having a single `LocalIds` that spans the entire component. + +### TermFormat.Term + +This works the same way! + +### WatchResultFormat.WatchResult + +This works the same way, except that because watch expression results can reference definitions that don't exist in your codebase, but only in your watch file, other terms and types are referenced by `HashId` instead of `ObjectId`, and the `LocalIds` table contains `HashId`s instead of `ObjectId`s. + +### PatchFormat.Full + +This works the same way, except that "PatchLocalIds' includes all three of `TextId`s, `HashId`s, and `ObjectId`s. This is because while the target of a replacement is considered mandatory in the codebase, the old version of a definition doesn't have to be available to use the patch. The point of the patch is to stop using the old definition, after all! + +### BranchFormat.Full + +These namespace slices are represented in a similar way: +```haskell +data Branch' t h p c = Branch + { terms :: Map t (Map (Referent'' t h) (MetadataSetFormat' t h)), + types :: Map t (Map (Reference' t h) (MetadataSetFormat' t h)), + patches :: Map t p, + children :: Map t c + } + +data MetadataSetFormat' t h = Inline (Set (Reference' t h)) + +data BranchLocalIds = LocalIds + { branchTextLookup :: Vector TextId, + branchDefnLookup :: Vector ObjectId, + branchPatchLookup :: Vector PatchObjectId, + branchChildLookup :: Vector (BranchObjectId, CausalHashId) + } +``` + +`TextId`s are used to represent namesegments in a namespace slice +`ObjectId`s are used to reference types and terms in the namespace. Note that they are not `HashId`s, because the namespace shouldn't be able to refer to definitions that aren't in the database. +`PatchObjectIds` reference the object ids of patch objects, as you might imagine. + +`branchChildLookup` contains two fields: a `CausalHashId` which points to the history of the child, and the `BranchObjectId` which proves that the relevant namespace slice is also present. In general, a codebase may not have the namespace slice corresponding to every causal id, but it ought to have them for the children of another namespace slice it does have (thus, the `BranchObjectId` is used). The causal relationship stored relationally rather than as blobs, and the `CausalHashId` is a useful index into the `causal_parents` table. \ No newline at end of file From ec87b8e6f49ce0a2e8d9d93823ab9ceb592b490e Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 6 May 2021 21:21:52 -0600 Subject: [PATCH 221/225] purge unused schema features --- .../codebase-sqlite/U/Codebase/Sqlite/DbId.hs | 15 +------ .../U/Codebase/Sqlite/Queries.hs | 40 ++++++++++--------- .../U/Codebase/Sqlite/Sync22.hs | 6 +-- codebase2/codebase-sqlite/sql/create.sql | 5 +-- docs/repoformats/v2.markdown | 10 ++--- .../src/Unison/Codebase/Conversion/Sync12.hs | 9 +---- 6 files changed, 32 insertions(+), 53 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs index 471923cd7a..c83ba9aa1a 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/DbId.hs @@ -34,19 +34,8 @@ newtype BranchHashId = BranchHashId { unBranchHashId :: HashId } deriving (Eq, O newtype CausalHashId = CausalHashId { unCausalHashId :: HashId } deriving (Eq, Ord) deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via HashId -newtype TypeId = TypeId ObjectId deriving Show deriving (FromField, ToField) via ObjectId -newtype TermId = TermCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId -newtype DeclId = DeclCycleId ObjectId deriving Show deriving (FromField, ToField) via ObjectId - --- |For generational garbage-collection; 0 is the oldest generation. -newtype Generation = Generation { unGeneration :: Word64 } - deriving (Eq, Ord, Show) - deriving (Enum, FromField, ToField) via Word64 - --- |Also I guess garbage-collection related? 🤔 -newtype Committed = Committed { unCommitted :: Bool } - deriving (Eq, Ord, Show) - deriving (Enum, FromField, ToField) via Bool +newtype SchemaVersion = SchemaVersion Word64 deriving (Eq, Ord, Show) + deriving (Num, Real, Enum, Integral, Bits, Hashable, FromField, ToField) via Word64 instance Show PatchObjectId where show h = "PatchObjectId (" ++ show (unPatchObjectId h) ++ ")" diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index e69de296da..1380cfb36f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -32,7 +32,7 @@ import Data.Int (Int8) import qualified Data.List.Extra as List import Data.List.NonEmpty (NonEmpty) import qualified Data.List.NonEmpty as Nel -import Data.Maybe (fromJust, fromMaybe) +import Data.Maybe (fromJust) import Data.String (fromString) import Data.String.Here.Uninterpolated (here, hereFile) import Data.Text (Text) @@ -55,8 +55,6 @@ import U.Codebase.Sqlite.DbId ( BranchHashId (..), BranchObjectId (..), CausalHashId (..), - Committed (..), - Generation (..), HashId (..), ObjectId (..), TextId, @@ -153,7 +151,6 @@ checkForMissingSchema = filterM missing schema ("index", "object_type_id"), ("table", "causal"), ("index", "causal_value_hash_id"), - ("index", "causal_gc_generation"), ("table", "namespace_root"), ("table", "causal_parent"), ("index", "causal_parent_causal_id"), @@ -335,23 +332,28 @@ updateObjectBlob oId bs = execute sql (oId, bs) where sql = [here| -- |Maybe we would generalize this to something other than NamespaceHash if we -- end up wanting to store other kinds of Causals here too. saveCausal :: DB m => CausalHashId -> BranchHashId -> m () -saveCausal self value = execute sql (self, value, Committed True, Generation 0) where sql = [here| - INSERT INTO causal (self_hash_id, value_hash_id, commit_flag, gc_generation) - VALUES (?, ?, ?, ?) +saveCausal self value = execute sql (self, value) where sql = [here| + INSERT INTO causal (self_hash_id, value_hash_id) + VALUES (?, ?) ON CONFLICT DO NOTHING |] - --- maybe: look at whether parent causal is "committed"; if so, then increment; --- otherwise, don't. -getNurseryGeneration :: DB m => m Generation -getNurseryGeneration = query_ sql <&> \case - [] -> Generation 0 - [fromOnly -> g] -> Generation $ fromMaybe 0 g - (fmap fromOnly -> gs) -> - error $ "How did I get multiple values out of a MAX()? " ++ show gs - where sql = [here| - SELECT MAX(gc_generation) FROM causal; - |] +-- saveCausal self value = execute sql (self, value, Committed True, Generation 0) where sql = [here| +-- INSERT INTO causal (self_hash_id, value_hash_id, commit_flag, gc_generation) +-- VALUES (?, ?, ?, ?) +-- ON CONFLICT DO NOTHING +-- |] + +-- -- maybe: look at whether parent causal is "committed"; if so, then increment; +-- -- otherwise, don't. +-- getNurseryGeneration :: DB m => m Generation +-- getNurseryGeneration = query_ sql <&> \case +-- [] -> Generation 0 +-- [fromOnly -> g] -> Generation $ fromMaybe 0 g +-- (fmap fromOnly -> gs) -> +-- error $ "How did I get multiple values out of a MAX()? " ++ show gs +-- where sql = [here| +-- SELECT MAX(gc_generation) FROM causal; +-- |] loadCausalValueHashId :: EDB m => CausalHashId -> m BranchHashId loadCausalValueHashId chId@(CausalHashId id) = diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs index b869de8958..fc2b51ddcd 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Sync22.hs @@ -102,8 +102,7 @@ sync22 = do hCache <- Cache.semispaceCache size oCache <- Cache.semispaceCache size cCache <- Cache.semispaceCache size - gc <- runDest $ Q.getNurseryGeneration - pure $ Sync (trySync tCache hCache oCache cCache (succ gc)) + pure $ Sync (trySync tCache hCache oCache cCache) trySync :: forall m. @@ -112,10 +111,9 @@ trySync :: Cache m HashId HashId -> Cache m ObjectId ObjectId -> Cache m CausalHashId CausalHashId -> - Generation -> Entity -> m (TrySyncResult Entity) -trySync tCache hCache oCache cCache _gc = \case +trySync tCache hCache oCache cCache = \case -- for causals, we need to get the value_hash_id of the thingo -- - maybe enqueue their parents -- - enqueue the self_ and value_ hashes diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 8f67559076..0203457219 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -89,12 +89,9 @@ CREATE INDEX object_type_id ON object(type_id); -- `commit_flag` and `gc_generation` are basically unused at the moment. CREATE TABLE causal ( self_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), - value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk2 REFERENCES hash(id), - commit_flag INTEGER NOT NULL, - gc_generation INTEGER NOT NULL + value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk2 REFERENCES hash(id) ); CREATE INDEX causal_value_hash_id ON causal(value_hash_id); -CREATE INDEX causal_gc_generation ON causal(gc_generation); -- We expect exactly 1 row, which we overwrite when we setRootNamespace. CREATE TABLE namespace_root ( diff --git a/docs/repoformats/v2.markdown b/docs/repoformats/v2.markdown index 4a2590c030..b626d0b915 100644 --- a/docs/repoformats/v2.markdown +++ b/docs/repoformats/v2.markdown @@ -17,19 +17,17 @@ The object table stores things that are identified by hash and represented with ```sql CREATE TABLE causal ( - self_hash_id INTEGER PRIMARY KEY NOT NULL CONSTRAINT causal_fk1 REFERENCES hash(id), - value_hash_id INTEGER NOT NULL CONSTRAINT causal_fk2 REFERENCES hash(id), - commit_flag INTEGER NOT NULL, - gc_generation INTEGER NOT NULL + self_hash_id INTEGER PRIMARY KEY NOT NULL REFERENCES hash(id), + value_hash_id INTEGER NOT NULL REFERENCES hash(id) ); CREATE TABLE causal_parent ( causal_id INTEGER NOT NULL REFERENCES causal(self_hash_id), - parent_id INTEGER NOT NULL REFERENCES causal(self_hash_id), + parent_id INTEGER NOT NULL REFERENCES causal(self_hash_id) ); ``` -Although causals are indexed by `hash.id`, an entry in the `causal` table is required, to qualify as a real causal. This also tells us the hash of its root namespace slice (which may or may not be present), whether it's been committed, and its `gc_generation`, which will be `True` and `0` until a commit/squash feature is implemented. +Although causals are indexed by `hash.id`, an entry in the `causal` table is required, to qualify as a real causal. This also tells us the hash of its root namespace slice (which may or may not be present). **Hash-based vs Object-based References** diff --git a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs index 453ea11fa9..3c9112ef40 100644 --- a/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs +++ b/parser-typechecker/src/Unison/Codebase/Conversion/Sync12.hs @@ -37,8 +37,6 @@ import Database.SQLite.Simple (Connection) import Debug.Trace (traceM) import System.IO (stdout) import System.IO.Extra (hFlush) -import U.Codebase.Sqlite.DbId (Generation) -import qualified U.Codebase.Sqlite.Queries as Q import U.Codebase.Sync (Sync (Sync), TrySyncResult) import qualified U.Codebase.Sync as Sync import qualified U.Util.Monoid as Monoid @@ -143,9 +141,7 @@ sync12 :: (MonadIO f, MonadReader (Env p x) f, RS m n a, Applicative m) => (m ~> n) -> f (Sync n (Entity m)) -sync12 t = do - gc <- runDest' Q.getNurseryGeneration - pure $ Sync (trySync t (succ gc)) +sync12 t = pure $ Sync (trySync t) -- For each entity, we have to check to see -- a) if it exists (if not, mark as missing in Status) @@ -159,10 +155,9 @@ trySync :: forall m n a. (R m n a, S m n, Applicative m) => (m ~> n) -> - Generation -> Entity m -> n (TrySyncResult (Entity m)) -trySync t _gc e = do +trySync t e = do Env _ dest _ <- Reader.ask case e of C h mc -> do From 64f129b1cb0121229db3107389a2a25de7ab9e02 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 6 May 2021 21:22:11 -0600 Subject: [PATCH 222/225] let schema version start at 1 instead of 2 --- codebase2/codebase-sqlite/sql/create.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codebase2/codebase-sqlite/sql/create.sql b/codebase2/codebase-sqlite/sql/create.sql index 0203457219..ee86544131 100644 --- a/codebase2/codebase-sqlite/sql/create.sql +++ b/codebase2/codebase-sqlite/sql/create.sql @@ -3,7 +3,7 @@ CREATE TABLE schema_version ( version INTEGER NOT NULL ); -INSERT INTO schema_version (version) VALUES (2); +INSERT INTO schema_version (version) VALUES (1); -- actually stores the 512-byte hashes CREATE TABLE hash ( From 60bf31ab8e5df0f556dbcad04f1d1bf60c78a156 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 6 May 2021 21:22:34 -0600 Subject: [PATCH 223/225] add more sql to v2.markdown --- docs/repoformats/v2.markdown | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/docs/repoformats/v2.markdown b/docs/repoformats/v2.markdown index b626d0b915..dfaa71c445 100644 --- a/docs/repoformats/v2.markdown +++ b/docs/repoformats/v2.markdown @@ -5,12 +5,35 @@ The v2 codebase format is a sqlite3 database with some Unison objects stored rel There are a few important concepts for understanding the implementation. **The `hash` and `text` tables** + +```sql +CREATE TABLE hash ( + id INTEGER PRIMARY KEY NOT NULL, + base32 TEXT UNIQUE NOT NULL COLLATE NOCASE +); +``` + `hash` stores Unison hashes in base32hex format. It is indexed to let us look up hashes efficiently by case-insensitive prefix in log time. Hashes are not stored anywhere else in the database. +```sql +CREATE TABLE text ( + id INTEGER PRIMARY KEY NOT NULL, + text TEXT UNIQUE NOT NULL +); +``` + `text` stores all of the strings in the codebase. This includes definition names, internal names of ucm builtins, and user-defined strings. Though it doesn't include local variable definition names. Possibly an oversight. 😬 It is indexed to let us look up strings by exact match in logarithmic time, for deduplication purposes. Indeed, it does not contain any duplicates. **The `object` table** +```sql +CREATE TABLE object ( + id INTEGER PRIMARY KEY NOT NULL, + primary_hash_id INTEGER NOT NULL REFERENCES hash(id), + type_id INTEGER NOT NULL REFERENCES object_type_description(id), + bytes BLOB NOT NULL +); +``` The object table stores things that are identified by hash and represented with binary blobs instead of relationally. Currently that includes patches and namespace slices, along with term and decl components ("component" ambiguously meaning the whole cycle, a strongly-connected component of definitions). Note that the entire component is identified by a single hash, like patches and namespace slices are. There could be new object types in the future. The object type of each object is given by the `type_id` field. (It's an integer, but the description is given in the `object_type_description` table for convenience, and in `ObjectType.hs`.) **The `causal` and `causal_parents` tables** From c6d1a8d923b13630b3fd84aa66a7d162c0f0c637 Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 6 May 2021 22:03:07 -0600 Subject: [PATCH 224/225] replace schema inventory with schema_version check --- .../U/Codebase/Sqlite/Queries.hs | 49 ++++--------------- .../src/Unison/Codebase/SqliteCodebase.hs | 38 ++++++-------- .../Codebase/SqliteCodebase/SyncEphemeral.hs | 6 +-- 3 files changed, 28 insertions(+), 65 deletions(-) diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 1380cfb36f..71e6ab5b5f 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -57,6 +57,7 @@ import U.Codebase.Sqlite.DbId CausalHashId (..), HashId (..), ObjectId (..), + SchemaVersion, TextId, ) import U.Codebase.Sqlite.ObjectType (ObjectType) @@ -105,6 +106,8 @@ data Integrity | NoObjectForPrimaryHashId HashId | NoNamespaceRoot | MultipleNamespaceRoots [CausalHashId] + | NoSchemaVersion + | MultipleSchemaVersions [SchemaVersion] | NoTypeIndexForTerm Referent.Id deriving (Show) @@ -129,46 +132,14 @@ createSchema = do setFlags :: DB m => m () setFlags = execute_ "PRAGMA foreign_keys = ON;" -type SchemaType = String - -type SchemaName = String - -checkForMissingSchema :: DB m => m [(SchemaType, SchemaName)] -checkForMissingSchema = filterM missing schema - where - missing (t, n) = null @[] @(Only Int) <$> query sql (t, n) - sql = "SELECT 1 FROM sqlite_master WHERE type = ? and name = ?" - schema = - [ ("table", "schema_version"), - ("table", "hash"), - ("index", "hash_base32"), - ("table", "text"), - ("table", "hash_object"), - ("index", "hash_object_object_id"), - ("table", "object_type_description"), - ("table", "object"), - ("index", "object_hash_id"), - ("index", "object_type_id"), - ("table", "causal"), - ("index", "causal_value_hash_id"), - ("table", "namespace_root"), - ("table", "causal_parent"), - ("index", "causal_parent_causal_id"), - ("index", "causal_parent_parent_id"), - ("table", "watch_result"), - ("table", "watch"), - ("index", "watch_kind"), - ("table", "watch_kind_description"), - ("table", "find_type_index"), - ("index", "find_type_index_type"), - ("table", "find_type_mentions_index"), - ("index", "find_type_mentions_index_type"), - ("table", "dependents_index"), - ("index", "dependents_by_dependency"), - ("index", "dependencies_by_dependent") - ] - {- ORMOLU_DISABLE -} +schemaVersion :: DB m => m SchemaVersion +schemaVersion = queryAtoms sql () >>= \case + [] -> error $ show NoSchemaVersion + [v] -> pure v + vs -> error $ show (MultipleSchemaVersions vs) + where sql = "SELECT version from schema_version;" + saveHash :: DB m => Base32Hex -> m HashId saveHash base32 = execute sql (Only base32) >> queryOne (loadHashId base32) where sql = [here| diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs index ad70b9fa5b..0998345f73 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase.hs @@ -21,7 +21,6 @@ import qualified Control.Monad.State as State import Control.Monad.Trans (MonadTrans (lift)) import Control.Monad.Trans.Maybe (MaybeT (MaybeT)) import Data.Bifunctor (Bifunctor (bimap, first), second) -import qualified Data.Bifunctor as Bifunctor import qualified Data.Either.Combinators as Either import Data.Foldable (Foldable (toList), for_, traverse_) import Data.Functor (void, (<&>)) @@ -92,12 +91,12 @@ import qualified Unison.Term as Term import Unison.Type (Type) import qualified Unison.Type as Type import qualified Unison.UnisonFile as UF -import qualified Unison.Util.ColorText as ColorText import qualified Unison.Util.Pretty as P import Unison.Util.Timing (time) import UnliftIO (MonadIO, catchIO, liftIO) import UnliftIO.Directory (canonicalizePath, createDirectoryIfMissing, doesDirectoryExist, doesFileExist) import UnliftIO.STM +import U.Codebase.Sqlite.DbId (SchemaVersion(SchemaVersion)) debug, debugProcessBranches :: Bool debug = False @@ -120,18 +119,15 @@ createCodebaseOrError dir = do prettyDir <- P.string <$> canonicalizePath dir let convertError = \case CreateCodebaseAlreadyExists -> Codebase1.CreateCodebaseAlreadyExists - CreateCodebaseMissingSchema schema -> - let prettyError :: [(Q.SchemaType, Q.SchemaName)] -> Codebase1.Pretty - prettyError schema = - (("Missing SqliteCodebase structure in " <> prettyDir <> ".") <>) - . P.column2Header "Schema Type" "Name" - $ map (Bifunctor.bimap P.string P.string) schema - in Codebase1.CreateCodebaseOther $ prettyError schema + CreateCodebaseUnknownSchemaVersion v -> Codebase1.CreateCodebaseOther $ prettyError v + prettyError :: SchemaVersion -> Codebase1.Pretty + prettyError v = P.wrap $ + "I don't know how to handle " <> P.shown v <> "in" <> P.backticked' prettyDir "." Either.mapLeft convertError <$> createCodebaseOrError' dir data CreateCodebaseError = CreateCodebaseAlreadyExists - | CreateCodebaseMissingSchema [(Q.SchemaType, Q.SchemaName)] + | CreateCodebaseUnknownSchemaVersion SchemaVersion deriving (Show) createCodebaseOrError' :: @@ -155,18 +151,14 @@ createCodebaseOrError' path = do Right () -> pure () ) - fmap (Either.mapLeft CreateCodebaseMissingSchema) (sqliteCodebase path) + fmap (Either.mapLeft CreateCodebaseUnknownSchemaVersion) (sqliteCodebase path) -- get the codebase in dir getCodebaseOrError :: forall m. MonadIO m => CodebasePath -> m (Either Codebase1.Pretty (Codebase m Symbol Ann)) getCodebaseOrError dir = do prettyDir <- liftIO $ P.string <$> canonicalizePath dir - let prettyError :: [(Q.SchemaType, Q.SchemaName)] -> String - prettyError schema = - ColorText.toANSI . P.render 80 . (("Missing SqliteCodebase structure in " <> prettyDir <> ".") <>) - . P.column2Header "Schema Type" "Name" - $ map (Bifunctor.bimap P.string P.string) schema - fmap (Either.mapLeft $ P.string . prettyError) (sqliteCodebase dir) + let prettyError v = P.wrap $ "I don't know how to handle " <> P.shown v <> "in" <> P.backticked' prettyDir "." + fmap (Either.mapLeft prettyError) (sqliteCodebase dir) initSchemaIfNotExist :: MonadIO m => FilePath -> m () initSchemaIfNotExist path = liftIO do @@ -245,12 +237,12 @@ unsafeGetConnection root = do runReaderT Q.setFlags conn pure conn -sqliteCodebase :: MonadIO m => CodebasePath -> m (Either [(Q.SchemaType, Q.SchemaName)] (Codebase m Symbol Ann)) +sqliteCodebase :: MonadIO m => CodebasePath -> m (Either SchemaVersion (Codebase m Symbol Ann)) sqliteCodebase root = do Monad.when debug $ traceM $ "sqliteCodebase " ++ root conn <- unsafeGetConnection root - runReaderT Q.checkForMissingSchema conn >>= \case - [] -> do + runReaderT Q.schemaVersion conn >>= \case + SchemaVersion 1 -> do rootBranchCache <- newTVarIO Nothing -- The v1 codebase interface has operations to read and write individual definitions -- whereas the v2 codebase writes them as complete components. These two fields buffer @@ -710,10 +702,10 @@ sqliteCodebase root = do <*> pure (16 * 1024 * 1024) src <- lift (sqliteCodebase srcPath) - >>= Except.liftEither . Either.mapLeft SyncEphemeral.SrcMissingSchema + >>= Except.liftEither . Either.mapLeft SyncEphemeral.SrcWrongSchema dest <- lift (sqliteCodebase destPath) - >>= Except.liftEither . Either.mapLeft SyncEphemeral.DestMissingSchema + >>= Except.liftEither . Either.mapLeft SyncEphemeral.DestWrongSchema -- we want to use sync22 wherever possible -- so for each branch, we'll check if it exists in the destination branch -- or if it exists in the source branch, then we can sync22 it @@ -824,7 +816,7 @@ sqliteCodebase root = do branchHashLength branchHashesByPrefix ) - missingSchema -> pure . Left $ missingSchema + v -> pure . Left $ v runDB' :: MonadIO m => Connection -> MaybeT (ReaderT Connection (ExceptT Ops.Error m)) a -> m (Maybe a) runDB' conn = runDB conn . runMaybeT diff --git a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs index 11b08ab98d..2c958e5cbd 100644 --- a/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs +++ b/parser-typechecker/src/Unison/Codebase/SqliteCodebase/SyncEphemeral.hs @@ -4,7 +4,7 @@ module Unison.Codebase.SqliteCodebase.SyncEphemeral where import Data.Set (Set) import U.Codebase.HashTags (CausalHash) -import qualified U.Codebase.Sqlite.Queries as Q +import U.Codebase.Sqlite.DbId (SchemaVersion) import qualified U.Codebase.Sqlite.Sync22 as Sync22 import Unison.Hash (Hash) @@ -15,8 +15,8 @@ data Dependencies = Dependencies data Error = Sync22Error Sync22.Error - | SrcMissingSchema [(Q.SchemaType, Q.SchemaName)] - | DestMissingSchema [(Q.SchemaType, Q.SchemaName)] + | SrcWrongSchema SchemaVersion + | DestWrongSchema SchemaVersion | DisappearingBranch CausalHash deriving (Show) From 6050e622898cd09e1d5681ae62af6ab93b3c895c Mon Sep 17 00:00:00 2001 From: Arya Irani Date: Thu, 6 May 2021 23:19:11 -0600 Subject: [PATCH 225/225] pull some weeds --- .../U/Codebase/Sqlite/Queries.hs | 35 ++---------------- .../U/Codebase/Sqlite/Serialization.hs | 7 ---- codebase2/codebase/U/Codebase/Type.hs | 5 --- codebase2/codebase/U/Codebase/TypeEdit.hs | 8 ----- codebase2/core/U/Core/ABT.hs | 36 ------------------- codebase2/util/U/Util/Base32Hex.hs | 6 ---- codebase2/util/U/Util/Cache.hs | 3 -- weeder.dhall | 1 + 8 files changed, 3 insertions(+), 98 deletions(-) create mode 100644 weeder.dhall diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs index 71e6ab5b5f..93a1cd9065 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Queries.hs @@ -17,8 +17,8 @@ {-# LANGUAGE TypeOperators #-} module U.Codebase.Sqlite.Queries where -import Control.Monad (filterM, when) -import Control.Monad.Except (ExceptT, MonadError, runExceptT) +import Control.Monad (when) +import Control.Monad.Except (MonadError) import qualified Control.Monad.Except as Except import Control.Monad.IO.Class (MonadIO) import Control.Monad.Reader (MonadReader (ask)) @@ -40,7 +40,6 @@ import Database.SQLite.Simple ( Connection, FromRow, Only (..), - SQLData, ToRow (..), (:.) (..), ) @@ -101,7 +100,6 @@ data Integrity | UnknownObjectId ObjectId | UnknownCausalHashId CausalHashId | UnknownHash Hash - | UnknownText Text | NoObjectForHashId HashId | NoObjectForPrimaryHashId HashId | NoNamespaceRoot @@ -111,13 +109,6 @@ data Integrity | NoTypeIndexForTerm Referent.Id deriving (Show) --- | discard errors that you're sure are impossible -noExcept :: (Monad m, Show e) => ExceptT e m a -> m a -noExcept a = - runExceptT a >>= \case - Right a -> pure a - Left e -> error $ "unexpected error: " ++ show e - orError :: Err m => Integrity -> Maybe b -> m b orError e = maybe (throwError e) pure @@ -189,9 +180,6 @@ loadText :: DB m => Text -> m (Maybe TextId) loadText t = queryAtom sql (Only t) where sql = [here| SELECT id FROM text WHERE text = ? |] -expectText :: EDB m => Text -> m TextId -expectText t = loadText t >>= orError (UnknownText t) - loadTextById :: EDB m => TextId -> m Text loadTextById h = queryAtom sql (Only h) >>= orError (UnknownTextId h) where sql = [here| SELECT text FROM text WHERE id = ? |] @@ -266,21 +254,6 @@ loadPrimaryHashByObjectId oId = queryAtom sql (Only oId) >>= orError (UnknownObj WHERE object.id = ? |] -objectAndPrimaryHashByAnyHash :: EDB m => Base32Hex -> m (Maybe (Base32Hex, ObjectId)) -objectAndPrimaryHashByAnyHash h = runMaybeT do - hashId <- MaybeT $ loadHashId h -- hash may not exist - oId <- MaybeT $ maybeObjectIdForAnyHashId hashId -- hash may not correspond to any object - base32 <- loadPrimaryHashByObjectId oId - pure (base32, oId) - -objectExistsWithHash :: DB m => Base32Hex -> m Bool -objectExistsWithHash h = queryExists sql (Only h) where - sql = [here| - SELECT 1 - FROM hash INNER JOIN hash_object ON hash.id = hash_object.hash_id - WHERE base32 = ? - |] - hashIdsForObject :: DB m => ObjectId -> m (NonEmpty HashId) hashIdsForObject oId = do primaryHashId <- queryOne $ queryAtom sql1 (Only oId) @@ -603,10 +576,6 @@ queryAtom q r = fmap fromOnly <$> queryMaybe q r queryOne :: Functor f => f (Maybe b) -> f b queryOne = fmap fromJust --- | composite input, Boolean output -queryExists :: (DB m, ToRow q, Show q) => SQLite.Query -> q -> m Bool -queryExists q r = not . null . map (id @SQLData) <$> queryAtoms q r - -- | composite input, composite List output query :: (DB m, ToRow q, FromRow r, Show q, Show r) => SQLite.Query -> q -> m [r] query q r = do diff --git a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs index aab8b48b98..e391a60521 100644 --- a/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs +++ b/codebase2/codebase-sqlite/U/Codebase/Sqlite/Serialization.hs @@ -16,11 +16,7 @@ import Data.Bytes.Serial (SerialEndian (serializeBE), deserialize, deserializeBE import Data.Bytes.VarInt (VarInt (VarInt), unVarInt) import Data.Int (Int64) import Data.List (elemIndex) -import Data.Sequence (Seq) -import qualified Data.Sequence as Seq import qualified Data.Set as Set -import Data.Vector (Vector) -import qualified Data.Vector as Vector import Data.Word (Word64) import Debug.Trace (trace) import qualified U.Codebase.Decl as Decl @@ -638,9 +634,6 @@ getBranchLocalIds = <*> getVector getVarInt <*> getVector (getPair getVarInt getVarInt) -vec2seq :: Vector a -> Seq a -vec2seq v = Seq.fromFunction (length v) (v Vector.!) - decomposeComponent :: MonadGet m => m [(LocalIds, BS.ByteString)] decomposeComponent = do offsets <- getList (getVarInt @_ @Int) diff --git a/codebase2/codebase/U/Codebase/Type.hs b/codebase2/codebase/U/Codebase/Type.hs index 1a8a70f395..4d01bcb68b 100644 --- a/codebase2/codebase/U/Codebase/Type.hs +++ b/codebase2/codebase/U/Codebase/Type.hs @@ -58,11 +58,6 @@ rmap f = ABT.transform \case Ref r -> Ref (f r) x -> unsafeCoerce x -rtraverse :: (Monad g, Ord v) => (r -> g r') -> ABT.Term (F' r) v a -> g (ABT.Term (F' r') v a) -rtraverse g = ABT.transformM \case - Ref r -> Ref <$> g r - x -> pure $ unsafeCoerce x - typeD2T :: Ord v => Hash -> TypeD v -> TypeT v typeD2T h = rmap $ bimap id $ Maybe.fromMaybe h diff --git a/codebase2/codebase/U/Codebase/TypeEdit.hs b/codebase2/codebase/U/Codebase/TypeEdit.hs index 2ca0889a04..e24a3d5c34 100644 --- a/codebase2/codebase/U/Codebase/TypeEdit.hs +++ b/codebase2/codebase/U/Codebase/TypeEdit.hs @@ -7,14 +7,6 @@ import qualified U.Util.Hashable as H data TypeEdit = Replace Reference | Deprecate deriving (Eq, Ord, Show) -references :: TypeEdit -> [Reference] -references (Replace r) = [r] -references Deprecate = [] - instance Hashable TypeEdit where tokens (Replace r) = H.Tag 0 : H.tokens r tokens Deprecate = [H.Tag 1] - -toReference :: TypeEdit -> Maybe Reference -toReference (Replace r) = Just r -toReference Deprecate = Nothing diff --git a/codebase2/core/U/Core/ABT.hs b/codebase2/core/U/Core/ABT.hs index ec6810f41a..cdc4f9a105 100644 --- a/codebase2/core/U/Core/ABT.hs +++ b/codebase2/core/U/Core/ABT.hs @@ -18,10 +18,7 @@ import qualified Data.Set as Set import qualified Data.Foldable as Foldable import Prelude hiding (abs,cycle) import U.Util.Hashable (Accumulate, Hashable1) -import Data.Map (Map) -import qualified Data.Map as Map import qualified U.Util.Hashable as Hashable -import Data.Functor (void) import qualified Data.List as List import qualified Data.Vector as Vector import Control.Monad (join) @@ -55,13 +52,6 @@ vmap f (Term _ a out) = case out of Cycle r -> cycle a (vmap f r) Abs v body -> abs a (f v) (vmap f body) -vtraverse :: (Traversable f, Applicative g, Ord v') => (v -> g v') -> Term f v a -> g (Term f v' a) -vtraverse g (Term _ a out) = case out of - Var v -> var a <$> g v - Cycle r -> cycle a <$> vtraverse g r - Abs v r -> abs a <$> g v <*> vtraverse g r - Tm fa -> tm a <$> traverse (vtraverse g) fa - transform :: (Ord v, Foldable g, Functor g) => (forall a. f a -> g a) -> Term f v a -> Term g v a transform f t = case out t of @@ -87,12 +77,6 @@ var a v = Term (Set.singleton v) a (Var v) cycle :: a -> Term f v a -> Term f v a cycle a t = Term (freeVars t) a (Cycle t) -absChain' :: Ord v => [v] -> Term f v () -> Term f v () -absChain' vs t = foldr (\v t -> abs () v t) t vs - -absCycle' :: Ord v => [v] -> Term f v () -> Term f v () -absCycle' vs t = cycle () $ absChain' vs t - tm :: (Foldable f, Ord v) => a -> f (Term f v a) -> Term f v a tm a t = Term (Set.unions (fmap freeVars (Foldable.toList t))) a (Tm t) @@ -125,26 +109,6 @@ hash = hash' [] where env -> (map (hash' env) ts', hash' env) hashCycle env ts = (map (hash' env) ts, hash' env) --- Hash a strongly connected component and sort its definitions into a canonical order. -hashComponent :: - (Functor f, Hashable1 f, Foldable f, Eq v, Show v, Ord v, Ord h, Accumulate h) - => Map v (Term f v a) -> (h, [(v, Term f v a)]) -hashComponent byName = let - ts = Map.toList byName - embeds = [ (v, void (transform Embed t)) | (v,t) <- ts ] - vs = fst <$> ts - -- make closed terms for each element of the component - -- [ let x = ..., y = ..., in x - -- , let x = ..., y = ..., in y ] - -- so that we can then hash them (closed terms can be hashed) - -- so that we can sort them by hash. this is the "canonical, name-agnostic" - -- hash that yields the canonical ordering of the component. - tms = [ (v, absCycle' vs (tm () $ Component (snd <$> embeds) (var () v))) | v <- vs ] - hashed = [ ((v,t), hash t) | (v,t) <- tms ] - sortedHashed = List.sortOn snd hashed - overallHash = Hashable.accumulate (Hashable.Hashed . snd <$> sortedHashed) - in (overallHash, [ (v, t) | ((v, _),_) <- sortedHashed, Just t <- [Map.lookup v byName] ]) - -- Implementation detail of hashComponent data Component f a = Component [a] a | Embed (f a) deriving (Functor, Traversable, Foldable) instance (Hashable1 f, Functor f) => Hashable1 (Component f) where diff --git a/codebase2/util/U/Util/Base32Hex.hs b/codebase2/util/U/Util/Base32Hex.hs index 6a973e418a..e81dbbb987 100644 --- a/codebase2/util/U/Util/Base32Hex.hs +++ b/codebase2/util/U/Util/Base32Hex.hs @@ -27,12 +27,6 @@ toByteString :: Base32Hex -> ByteString toByteString = fromMaybe err . textToByteString . toText where err = "invalid base32Hex presumably created via \"unsafe\" constructors" -fromText :: Text -> Maybe Base32Hex -fromText = fmap fromByteString . textToByteString - -unsafeFromText :: Text -> Base32Hex -unsafeFromText = UnsafeBase32Hex - -- | Produce a 'Hash' from a base32hex-encoded version of its binary representation textToByteString :: Text -> Maybe ByteString textToByteString txt = diff --git a/codebase2/util/U/Util/Cache.hs b/codebase2/util/U/Util/Cache.hs index 70dcdde575..b853acc7bd 100644 --- a/codebase2/util/U/Util/Cache.hs +++ b/codebase2/util/U/Util/Cache.hs @@ -15,9 +15,6 @@ data Cache m k v = , insert :: k -> v -> m () } -transform :: (forall a. m a -> n a) -> Cache m k v -> Cache n k v -transform f Cache {..} = Cache (f . lookup) ((f .) . insert) - -- Create a cache of unbounded size. cache :: (MonadIO m, Ord k) => m (Cache m k v) cache = do diff --git a/weeder.dhall b/weeder.dhall new file mode 100644 index 0000000000..6c2fe1de6f --- /dev/null +++ b/weeder.dhall @@ -0,0 +1 @@ +{ roots = [ "^Main.main$", "^Paths_.*" ], type-class-roots = True }